aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.4.3/gcc/config/rs6000
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.4.3/gcc/config/rs6000')
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/40x.md120
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/440.md133
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/603.md143
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/6xx.md275
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/7450.md185
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/750cl.h30
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/7xx.md184
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/8540.md250
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix.h262
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix.opt24
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix41.h101
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix41.opt24
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix43.h189
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix51.h193
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix52.h203
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix53.h199
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix61.h204
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/aix64.opt32
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/altivec.h477
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/altivec.md3099
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/biarch64.h26
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/cell.md400
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/constraints.md161
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/crtresfpr.asm79
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/crtresgpr.asm79
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/crtresxfpr.asm84
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/crtresxgpr.asm82
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/crtsavfpr.asm79
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/crtsavgpr.asm79
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-asm.h51
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-fallback.c487
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-fpsave.asm92
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-ldouble-format84
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-ldouble.c438
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-libgcc.10.4.ver76
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-libgcc.10.5.ver89
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-tramp.asm125
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-unwind.h30
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-vecsave.asm155
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin-world.asm259
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin.h439
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin.md438
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin.opt32
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin64.h35
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin7.h30
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/darwin8.h32
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/default64.h24
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/dfp.md687
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/driver-rs6000.c426
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e300c2c3.md189
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500-double.h24
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500.h45
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtres32gpr.asm73
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtres64gpr.asm73
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtres64gprctr.asm72
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtrest32gpr.asm75
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtrest64gpr.asm74
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtresx32gpr.asm75
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtresx64gpr.asm75
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtsav32gpr.asm73
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtsav64gpr.asm72
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtsav64gprctr.asm91
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtsavg32gpr.asm73
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtsavg64gpr.asm73
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500crtsavg64gprctr.asm90
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/e500mc.md200
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/eabi-ci.asm113
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/eabi-cn.asm104
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/eabi.asm289
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/eabi.h44
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/eabialtivec.h30
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/eabisim.h54
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/eabispe.h54
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/freebsd.h74
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/gnu.h37
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/host-darwin.c156
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/host-ppc64-darwin.c30
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/libgcc-ppc-glibc.ver55
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/libgcc-ppc64.ver7
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/linux-unwind.h358
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/linux.h130
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/linux64.h534
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/linux64.opt24
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/linuxaltivec.h30
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/linuxspe.h44
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/lynx.h123
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/milli.exp7
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/mpc.md111
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/netbsd.h91
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/paired.h75
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/paired.md519
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/power4.md410
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/power5.md321
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/power6.md568
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/ppc-asm.h178
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/ppc64-fp.c239
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/ppu_intrinsics.h727
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/predicates.md1338
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rios1.md191
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rios2.md129
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rs6000-c.c3272
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rs6000-modes.def46
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rs6000-protos.h192
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rs6000.c22885
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rs6000.h3196
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rs6000.md14897
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rs6000.opt296
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rs64.md154
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/rtems.h56
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/secureplt.h20
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/sfp-machine.h68
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/si2vmx.h2048
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/singlefp.h40
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/sol-ci.asm94
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/sol-cn.asm72
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/spe.h1107
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/spe.md3190
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/spu2vmx.h2415
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/sync.md622
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/sysv4.h1129
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/sysv4.opt144
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/sysv4le.h36
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-aix4376
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-aix5256
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-darwin36
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-darwin83
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-fprules19
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-fprules-fpbit11
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-fprules-softfp6
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-linux6426
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-lynx38
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-netbsd72
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-newas37
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-ppccomm56
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-ppcendian12
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-ppcgas14
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-ppcos8
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-rs600018
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-rtems64
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-spe68
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-vxworks16
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/t-vxworksae5
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/tramp.asm107
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/vec_types.h52
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/vxworks.h139
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/vxworksae.h23
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/x-aix6
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/x-darwin4
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/x-darwin644
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/x-linux-relax2
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/x-rs60003
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/xcoff.h334
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/xfpu.h26
-rw-r--r--gcc-4.4.3/gcc/config/rs6000/xfpu.md140
154 files changed, 77526 insertions, 0 deletions
diff --git a/gcc-4.4.3/gcc/config/rs6000/40x.md b/gcc-4.4.3/gcc/config/rs6000/40x.md
new file mode 100644
index 000000000..e11c6539f
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/40x.md
@@ -0,0 +1,120 @@
+;; Scheduling description for IBM PowerPC 403 and PowerPC 405 processors.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "ppc40x,ppc40xiu")
+(define_cpu_unit "bpu_40x,fpu_405" "ppc40x")
+(define_cpu_unit "iu_40x" "ppc40xiu")
+
+;; PPC401 / PPC403 / PPC405 32-bit integer only IU BPU
+;; Embedded PowerPC controller
+;; In-order execution
+;; Max issue two insns/cycle (includes one branch)
+(define_insn_reservation "ppc403-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\
+ var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x,iu_40x")
+
+(define_insn_reservation "ppc403-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x,iu_40x,iu_40x")
+
+(define_insn_reservation "ppc403-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x,nothing,bpu_40x")
+
+(define_insn_reservation "ppc403-imul" 4
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc403"))
+ "iu_40x*4")
+
+(define_insn_reservation "ppc405-imul" 5
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc405"))
+ "iu_40x*4")
+
+(define_insn_reservation "ppc405-imul2" 3
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "ppc405"))
+ "iu_40x*2")
+
+(define_insn_reservation "ppc405-imul3" 2
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-idiv" 33
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x*33")
+
+(define_insn_reservation "ppc403-mfcr" 2
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-mtcr" 3
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "iu_40x")
+
+(define_insn_reservation "ppc403-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "bpu_40x")
+
+(define_insn_reservation "ppc403-cr" 2
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc403,ppc405"))
+ "bpu_40x")
+
+(define_insn_reservation "ppc405-float" 11
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,\
+ fpcompare,fp,dmul,sdiv,ddiv")
+ (eq_attr "cpu" "ppc405"))
+ "fpu_405*10")
diff --git a/gcc-4.4.3/gcc/config/rs6000/440.md b/gcc-4.4.3/gcc/config/rs6000/440.md
new file mode 100644
index 000000000..b146222ac
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/440.md
@@ -0,0 +1,133 @@
+;; Scheduling description for IBM PowerPC 440 processor.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; PPC440 Embedded PowerPC controller
+;; dual issue
+;; i_pipe - complex integer / compare / branch
+;; j_pipe - simple integer arithmetic
+;; l_pipe - load-store
+;; f_pipe - floating point arithmetic
+
+(define_automaton "ppc440_core,ppc440_apu")
+(define_cpu_unit "ppc440_i_pipe,ppc440_j_pipe,ppc440_l_pipe" "ppc440_core")
+(define_cpu_unit "ppc440_f_pipe" "ppc440_apu")
+(define_cpu_unit "ppc440_issue_0,ppc440_issue_1" "ppc440_core")
+
+(define_reservation "ppc440_issue" "ppc440_issue_0|ppc440_issue_1")
+
+
+(define_insn_reservation "ppc440-load" 3
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_l_pipe")
+
+(define_insn_reservation "ppc440-store" 3
+ (and (eq_attr "type" "store,store_ux,store_u")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_l_pipe")
+
+(define_insn_reservation "ppc440-fpload" 4
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_l_pipe")
+
+(define_insn_reservation "ppc440-fpstore" 3
+ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_l_pipe")
+
+(define_insn_reservation "ppc440-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,\
+ trap,var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe|ppc440_j_pipe")
+
+(define_insn_reservation "ppc440-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue_0+ppc440_issue_1,\
+ ppc440_i_pipe|ppc440_j_pipe,ppc440_i_pipe|ppc440_j_pipe")
+
+(define_insn_reservation "ppc440-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue_0+ppc440_issue_1,ppc440_i_pipe|ppc440_j_pipe,\
+ ppc440_i_pipe|ppc440_j_pipe,ppc440_i_pipe|ppc440_j_pipe")
+
+(define_insn_reservation "ppc440-imul" 3
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-imul2" 2
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-idiv" 34
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe*33")
+
+(define_insn_reservation "ppc440-branch" 1
+ (and (eq_attr "type" "branch,jmpreg,isync")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-compare" 2
+ (and (eq_attr "type" "cmp,fast_compare,compare,cr_logical,delayed_cr,mfcr")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-fpcompare" 3 ; 2
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_f_pipe+ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-fp" 5
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_f_pipe")
+
+(define_insn_reservation "ppc440-sdiv" 19
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_f_pipe*15")
+
+(define_insn_reservation "ppc440-ddiv" 33
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_f_pipe*29")
+
+(define_insn_reservation "ppc440-mtcr" 3
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
+(define_insn_reservation "ppc440-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc440"))
+ "ppc440_issue,ppc440_i_pipe")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/603.md b/gcc-4.4.3/gcc/config/rs6000/603.md
new file mode 100644
index 000000000..c5fea3148
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/603.md
@@ -0,0 +1,143 @@
+;; Scheduling description for PowerPC 603 processor.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "ppc603,ppc603fp")
+(define_cpu_unit "iu_603" "ppc603")
+(define_cpu_unit "fpu_603" "ppc603fp")
+(define_cpu_unit "lsu_603,bpu_603,sru_603" "ppc603")
+
+;; PPC603/PPC603e 32-bit IU, LSU, FPU, BPU, SRU
+;; Max issue 3 insns/clock cycle (includes 1 branch)
+
+;; Branches go straight to the BPU. All other insns are handled
+;; by a dispatch unit which can issue a max of 2 insns per cycle.
+
+;; The PPC603e user's manual recommends that to reduce branch mispredictions,
+;; the insn that sets CR bits should be separated from the branch insn
+;; that evaluates them; separation by more than 9 insns ensures that the CR
+;; bits will be immediately available for execution.
+;; This could be artificially achieved by exaggerating the latency of
+;; compare insns but at the expense of a poorer schedule.
+
+;; CR insns get executed in the SRU. Not modelled.
+
+(define_insn_reservation "ppc603-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ux,load_u,load_l")
+ (eq_attr "cpu" "ppc603"))
+ "lsu_603")
+
+(define_insn_reservation "ppc603-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppc603"))
+ "lsu_603*2")
+
+(define_insn_reservation "ppc603-fpload" 2
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc603"))
+ "lsu_603")
+
+(define_insn_reservation "ppc603-storec" 8
+ (and (eq_attr "type" "store_c")
+ (eq_attr "cpu" "ppc603"))
+ "lsu_603")
+
+(define_insn_reservation "ppc603-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\
+ var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603")
+
+(define_insn_reservation "ppc603-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603,iu_603")
+
+(define_insn_reservation "ppc603-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603,iu_603,iu_603")
+
+; This takes 2 or 3 cycles
+(define_insn_reservation "ppc603-imul" 3
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603*2")
+
+(define_insn_reservation "ppc603-imul2" 2
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603*2")
+
+(define_insn_reservation "ppc603-idiv" 37
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603*37")
+
+(define_insn_reservation "ppc603-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "ppc603"))
+ "iu_603,nothing,bpu_603")
+
+(define_insn_reservation "ppc603-fpcompare" 3
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc603"))
+ "(fpu_603+iu_603*2),bpu_603")
+
+(define_insn_reservation "ppc603-fp" 3
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc603"))
+ "fpu_603")
+
+(define_insn_reservation "ppc603-dmul" 4
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc603"))
+ "fpu_603*2")
+
+; Divides are not pipelined
+(define_insn_reservation "ppc603-sdiv" 18
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc603"))
+ "fpu_603*18")
+
+(define_insn_reservation "ppc603-ddiv" 33
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc603"))
+ "fpu_603*33")
+
+(define_insn_reservation "ppc603-crlogical" 2
+ (and (eq_attr "type" "cr_logical,delayed_cr,mfcr,mtcr")
+ (eq_attr "cpu" "ppc603"))
+ "sru_603")
+
+(define_insn_reservation "ppc603-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc603"))
+ "sru_603")
+
+(define_insn_reservation "ppc603-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr,isync,sync")
+ (eq_attr "cpu" "ppc603"))
+ "sru_603")
+
+(define_insn_reservation "ppc603-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch")
+ (eq_attr "cpu" "ppc603"))
+ "bpu_603")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/6xx.md b/gcc-4.4.3/gcc/config/rs6000/6xx.md
new file mode 100644
index 000000000..88c15ae39
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/6xx.md
@@ -0,0 +1,275 @@
+;; Scheduling description for PowerPC 604, PowerPC 604e, PowerPC 620,
+;; and PowerPC 630 processors.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "ppc6xx,ppc6xxfp,ppc6xxfp2")
+(define_cpu_unit "iu1_6xx,iu2_6xx,mciu_6xx" "ppc6xx")
+(define_cpu_unit "fpu_6xx" "ppc6xxfp")
+(define_cpu_unit "fpu1_6xx,fpu2_6xx" "ppc6xxfp2")
+(define_cpu_unit "lsu_6xx,bpu_6xx,cru_6xx" "ppc6xx")
+
+;; PPC604 32-bit 2xSCIU, MCIU, LSU, FPU, BPU
+;; PPC604e 32-bit 2xSCIU, MCIU, LSU, FPU, BPU, CRU
+;; MCIU used for imul/idiv and moves from/to spr
+;; LSU 2 stage pipelined
+;; FPU 3 stage pipelined
+;; Max issue 4 insns/clock cycle
+
+;; PPC604e is PPC604 with larger caches and a CRU. In the 604
+;; the CR logical operations are handled in the BPU.
+;; In the 604e, the CRU shares bus with BPU so only one condition
+;; register or branch insn can be issued per clock. Not modelled.
+
+;; PPC620 64-bit 2xSCIU, MCIU, LSU, FPU, BPU, CRU
+;; PPC630 64-bit 2xSCIU, MCIU, LSU, 2xFPU, BPU, CRU
+;; Max issue 4 insns/clock cycle
+;; Out-of-order execution, in-order completion
+
+;; No following instruction can dispatch in the same cycle as a branch
+;; instruction. Not modelled. This is no problem if RCSP is not
+;; enabled since the scheduler stops a schedule when it gets to a branch.
+
+;; Four insns can be dispatched per cycle.
+
+(define_insn_reservation "ppc604-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc604-fpload" 3
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc604-store" 3
+ (and (eq_attr "type" "store,fpstore,store_ux,store_u,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc604-llsc" 3
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc630-llsc" 4
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc604-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\
+ var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx")
+
+(define_insn_reservation "ppc604-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx")
+
+(define_insn_reservation "ppc604-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx,iu1_6xx|iu2_6xx")
+
+(define_insn_reservation "ppc604-imul" 4
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc604"))
+ "mciu_6xx*2")
+
+(define_insn_reservation "ppc604e-imul" 2
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc604e"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc620-imul" 5
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*3")
+
+(define_insn_reservation "ppc620-imul2" 4
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*3")
+
+(define_insn_reservation "ppc620-imul3" 3
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*3")
+
+(define_insn_reservation "ppc620-lmul" 7
+ (and (eq_attr "type" "lmul,lmul_compare")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*5")
+
+(define_insn_reservation "ppc604-idiv" 20
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "mciu_6xx*19")
+
+(define_insn_reservation "ppc620-idiv" 37
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc620"))
+ "mciu_6xx*36")
+
+(define_insn_reservation "ppc630-idiv" 21
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc630"))
+ "mciu_6xx*20")
+
+(define_insn_reservation "ppc620-ldiv" 37
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "mciu_6xx*36")
+
+(define_insn_reservation "ppc604-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "(iu1_6xx|iu2_6xx)")
+
+; FPU PPC604{,e},PPC620
+(define_insn_reservation "ppc604-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx")
+
+(define_insn_reservation "ppc604-fp" 3
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx")
+
+(define_insn_reservation "ppc604-dmul" 3
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx")
+
+; Divides are not pipelined
+(define_insn_reservation "ppc604-sdiv" 18
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx*18")
+
+(define_insn_reservation "ppc604-ddiv" 32
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "fpu_6xx*32")
+
+(define_insn_reservation "ppc620-ssqrt" 31
+ (and (eq_attr "type" "ssqrt")
+ (eq_attr "cpu" "ppc620"))
+ "fpu_6xx*31")
+
+(define_insn_reservation "ppc620-dsqrt" 31
+ (and (eq_attr "type" "dsqrt")
+ (eq_attr "cpu" "ppc620"))
+ "fpu_6xx*31")
+
+
+; 2xFPU PPC630
+(define_insn_reservation "ppc630-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx|fpu2_6xx")
+
+(define_insn_reservation "ppc630-fp" 3
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx|fpu2_6xx")
+
+(define_insn_reservation "ppc630-sdiv" 17
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx*17|fpu2_6xx*17")
+
+(define_insn_reservation "ppc630-ddiv" 21
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx*21|fpu2_6xx*21")
+
+(define_insn_reservation "ppc630-ssqrt" 18
+ (and (eq_attr "type" "ssqrt")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx*18|fpu2_6xx*18")
+
+(define_insn_reservation "ppc630-dsqrt" 25
+ (and (eq_attr "type" "dsqrt")
+ (eq_attr "cpu" "ppc630"))
+ "fpu1_6xx*25|fpu2_6xx*25")
+
+(define_insn_reservation "ppc604-mfcr" 3
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc604-mtcr" 2
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "iu1_6xx|iu2_6xx")
+
+(define_insn_reservation "ppc604-crlogical" 2
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc604"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc604e-crlogical" 2
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc604e,ppc620,ppc630"))
+ "cru_6xx")
+
+(define_insn_reservation "ppc604-mtjmpr" 2
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc604-mfjmpr" 3
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc630-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc630"))
+ "mciu_6xx")
+
+(define_insn_reservation "ppc604-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc604-isync" 0
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc630-isync" 6
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "bpu_6xx")
+
+(define_insn_reservation "ppc604-sync" 35
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ "lsu_6xx")
+
+(define_insn_reservation "ppc630-sync" 26
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ "lsu_6xx")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/7450.md b/gcc-4.4.3/gcc/config/rs6000/7450.md
new file mode 100644
index 000000000..6f2775744
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/7450.md
@@ -0,0 +1,185 @@
+;; Scheduling description for Motorola PowerPC 7450 processor.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "ppc7450,ppc7450mciu,ppc7450fp,ppc7450vec")
+(define_cpu_unit "iu1_7450,iu2_7450,iu3_7450" "ppc7450")
+(define_cpu_unit "mciu_7450" "ppc7450mciu")
+(define_cpu_unit "fpu_7450" "ppc7450fp")
+(define_cpu_unit "lsu_7450,bpu_7450" "ppc7450")
+(define_cpu_unit "du1_7450,du2_7450,du3_7450" "ppc7450")
+(define_cpu_unit "vecsmpl_7450,veccmplx_7450,vecflt_7450,vecperm_7450" "ppc7450vec")
+(define_cpu_unit "vdu1_7450,vdu2_7450" "ppc7450vec")
+
+
+;; PPC7450 32-bit 3xIU, MCIU, LSU, SRU, FPU, BPU, 4xVEC
+;; IU1,IU2,IU3 can perform all integer operations
+;; MCIU performs imul and idiv, cr logical, SPR moves
+;; LSU 2 stage pipelined
+;; FPU 3 stage pipelined
+;; It also has 4 vector units, one for each type of vector instruction.
+;; However, we can only dispatch 2 instructions per cycle.
+;; Max issue 3 insns/clock cycle (includes 1 branch)
+;; In-order execution
+
+;; Branches go straight to the BPU. All other insns are handled
+;; by a dispatch unit which can issue a max of 3 insns per cycle.
+(define_reservation "ppc7450_du" "du1_7450|du2_7450|du3_7450")
+(define_reservation "ppc7450_vec_du" "vdu1_7450|vdu2_7450")
+
+(define_insn_reservation "ppc7450-load" 3
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\
+ load_ux,load_u,vecload")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-store" 3
+ (and (eq_attr "type" "store,store_ux,store_u,vecstore")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-fpload" 4
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-fpstore" 3
+ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450*3")
+
+(define_insn_reservation "ppc7450-llsc" 3
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-sync" 35
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,lsu_7450")
+
+(define_insn_reservation "ppc7450-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,\
+ trap,var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,iu1_7450|iu2_7450|iu3_7450")
+
+(define_insn_reservation "ppc7450-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,iu1_7450|iu2_7450|iu3_7450,iu1_7450|iu2_7450|iu3_7450")
+
+(define_insn_reservation "ppc7450-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,iu1_7450|iu2_7450|iu3_7450,\
+ iu1_7450|iu2_7450|iu3_7450,iu1_7450|iu2_7450|iu3_7450")
+
+(define_insn_reservation "ppc7450-imul" 4
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450*2")
+
+(define_insn_reservation "ppc7450-imul2" 3
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450")
+
+(define_insn_reservation "ppc7450-idiv" 23
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450*23")
+
+(define_insn_reservation "ppc7450-compare" 2
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,(iu1_7450|iu2_7450|iu3_7450)")
+
+(define_insn_reservation "ppc7450-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,fpu_7450")
+
+(define_insn_reservation "ppc7450-fp" 5
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,fpu_7450")
+
+; Divides are not pipelined
+(define_insn_reservation "ppc7450-sdiv" 21
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,fpu_7450*21")
+
+(define_insn_reservation "ppc7450-ddiv" 35
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,fpu_7450*35")
+
+(define_insn_reservation "ppc7450-mfcr" 2
+ (and (eq_attr "type" "mfcr,mtcr")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450")
+
+(define_insn_reservation "ppc7450-crlogical" 1
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,mciu_7450")
+
+(define_insn_reservation "ppc7450-mtjmpr" 2
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc7450"))
+ "nothing,mciu_7450*2")
+
+(define_insn_reservation "ppc7450-mfjmpr" 3
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc7450"))
+ "nothing,mciu_7450*2")
+
+(define_insn_reservation "ppc7450-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "ppc7450"))
+ "nothing,bpu_7450")
+
+;; Altivec
+(define_insn_reservation "ppc7450-vecsimple" 1
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,vecsmpl_7450")
+
+(define_insn_reservation "ppc7450-veccomplex" 4
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,veccmplx_7450")
+
+(define_insn_reservation "ppc7450-veccmp" 2
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,veccmplx_7450")
+
+(define_insn_reservation "ppc7450-vecfloat" 4
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,vecflt_7450")
+
+(define_insn_reservation "ppc7450-vecperm" 2
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "ppc7450"))
+ "ppc7450_du,ppc7450_vec_du,vecperm_7450")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/750cl.h b/gcc-4.4.3/gcc/config/rs6000/750cl.h
new file mode 100644
index 000000000..0fa169845
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/750cl.h
@@ -0,0 +1,30 @@
+/* Enable 750cl paired single support.
+ Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+ Contributed by Revital Eres (eres@il.ibm.com)
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_PAIRED_FLOAT
+#define TARGET_PAIRED_FLOAT rs6000_paired_float
+
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC "-m750cl"
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/7xx.md b/gcc-4.4.3/gcc/config/rs6000/7xx.md
new file mode 100644
index 000000000..0129048c0
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/7xx.md
@@ -0,0 +1,184 @@
+;; Scheduling description for Motorola PowerPC 750 and PowerPC 7400 processors.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "ppc7xx,ppc7xxfp")
+(define_cpu_unit "iu1_7xx,iu2_7xx" "ppc7xx")
+(define_cpu_unit "fpu_7xx" "ppc7xxfp")
+(define_cpu_unit "lsu_7xx,bpu_7xx,sru_7xx" "ppc7xx")
+(define_cpu_unit "du1_7xx,du2_7xx" "ppc7xx")
+(define_cpu_unit "veccmplx_7xx,vecperm_7xx,vdu_7xx" "ppc7xx")
+
+;; PPC740/PPC750/PPC7400 32-bit 2xIU, LSU, SRU, FPU, BPU
+;; IU1 can perform all integer operations
+;; IU2 can perform all integer operations except imul and idiv
+;; LSU 2 stage pipelined
+;; FPU 3 stage pipelined
+;; Max issue 3 insns/clock cycle (includes 1 branch)
+;; In-order execution
+
+
+;; The PPC750 user's manual recommends that to reduce branch mispredictions,
+;; the insn that sets CR bits should be separated from the branch insn
+;; that evaluates them. There is no advantage have more than 10 cycles
+;; of separation.
+;; This could be artificially achieved by exaggerating the latency of
+;; compare insns but at the expense of a poorer schedule.
+
+;; Branches go straight to the BPU. All other insns are handled
+;; by a dispatch unit which can issue a max of 2 insns per cycle.
+(define_reservation "ppc750_du" "du1_7xx|du2_7xx")
+(define_reservation "ppc7400_vec_du" "vdu_7xx")
+
+(define_insn_reservation "ppc750-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\
+ load_ux,load_u,fpload,fpload_ux,fpload_u,\
+ vecload,load_l")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,lsu_7xx")
+
+(define_insn_reservation "ppc750-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,\
+ fpstore,fpstore_ux,fpstore_u,vecstore")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,lsu_7xx")
+
+(define_insn_reservation "ppc750-storec" 8
+ (and (eq_attr "type" "store_c")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,lsu_7xx")
+
+(define_insn_reservation "ppc750-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,\
+ trap,var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx|iu2_7xx")
+
+(define_insn_reservation "ppc750-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx")
+
+(define_insn_reservation "ppc750-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx,iu1_7xx|iu2_7xx")
+
+(define_insn_reservation "ppc750-imul" 4
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx*4")
+
+(define_insn_reservation "ppc750-imul2" 3
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx*2")
+
+(define_insn_reservation "ppc750-imul3" 2
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx")
+
+(define_insn_reservation "ppc750-idiv" 19
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx*19")
+
+(define_insn_reservation "ppc750-compare" 2
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,(iu1_7xx|iu2_7xx)")
+
+(define_insn_reservation "ppc750-fpcompare" 2
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,fpu_7xx")
+
+(define_insn_reservation "ppc750-fp" 3
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,fpu_7xx")
+
+(define_insn_reservation "ppc750-dmul" 4
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc750"))
+ "ppc750_du,fpu_7xx*2")
+
+(define_insn_reservation "ppc7400-dmul" 3
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,fpu_7xx")
+
+; Divides are not pipelined
+(define_insn_reservation "ppc750-sdiv" 17
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,fpu_7xx*17")
+
+(define_insn_reservation "ppc750-ddiv" 31
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,fpu_7xx*31")
+
+(define_insn_reservation "ppc750-mfcr" 2
+ (and (eq_attr "type" "mfcr,mtcr")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "ppc750_du,iu1_7xx")
+
+(define_insn_reservation "ppc750-crlogical" 3
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "nothing,sru_7xx*2")
+
+(define_insn_reservation "ppc750-mtjmpr" 2
+ (and (eq_attr "type" "mtjmpr,isync,sync")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "nothing,sru_7xx*2")
+
+(define_insn_reservation "ppc750-mfjmpr" 3
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "nothing,sru_7xx*2")
+
+(define_insn_reservation "ppc750-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ "nothing,bpu_7xx")
+
+;; Altivec
+(define_insn_reservation "ppc7400-vecsimple" 1
+ (and (eq_attr "type" "vecsimple,veccmp")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,ppc7400_vec_du,veccmplx_7xx")
+
+(define_insn_reservation "ppc7400-veccomplex" 4
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,ppc7400_vec_du,veccmplx_7xx")
+
+(define_insn_reservation "ppc7400-vecfloat" 4
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,ppc7400_vec_du,veccmplx_7xx")
+
+(define_insn_reservation "ppc7400-vecperm" 2
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "ppc7400"))
+ "ppc750_du,ppc7400_vec_du,vecperm_7xx")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/8540.md b/gcc-4.4.3/gcc/config/rs6000/8540.md
new file mode 100644
index 000000000..2d44b3af9
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/8540.md
@@ -0,0 +1,250 @@
+;; Pipeline description for Motorola PowerPC 8540 processor.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "ppc8540_most,ppc8540_long,ppc8540_retire")
+(define_cpu_unit "ppc8540_decode_0,ppc8540_decode_1" "ppc8540_most")
+
+;; We don't simulate general issue queue (GIC). If we have SU insn
+;; and then SU1 insn, they cannot be issued on the same cycle
+;; (although SU1 insn and then SU insn can be issued) because the SU
+;; insn will go to SU1 from GIC0 entry. Fortunately, the first cycle
+;; multipass insn scheduling will find the situation and issue the SU1
+;; insn and then the SU insn.
+(define_cpu_unit "ppc8540_issue_0,ppc8540_issue_1" "ppc8540_most")
+
+;; We could describe completion buffers slots in combination with the
+;; retirement units and the order of completion but the result
+;; automaton would behave in the same way because we cannot describe
+;; real latency time with taking in order completion into account.
+;; Actually we could define the real latency time by querying reserved
+;; automaton units but the current scheduler uses latency time before
+;; issuing insns and making any reservations.
+;;
+;; So our description is aimed to achieve a insn schedule in which the
+;; insns would not wait in the completion buffer.
+(define_cpu_unit "ppc8540_retire_0,ppc8540_retire_1" "ppc8540_retire")
+
+;; Branch unit:
+(define_cpu_unit "ppc8540_bu" "ppc8540_most")
+
+;; SU:
+(define_cpu_unit "ppc8540_su0_stage0,ppc8540_su1_stage0" "ppc8540_most")
+
+;; We could describe here MU subunits for float multiply, float add
+;; etc. But the result automaton would behave the same way as the
+;; described one pipeline below because MU can start only one insn
+;; per cycle. Actually we could simplify the automaton more not
+;; describing stages 1-3, the result automata would be the same.
+(define_cpu_unit "ppc8540_mu_stage0,ppc8540_mu_stage1" "ppc8540_most")
+(define_cpu_unit "ppc8540_mu_stage2,ppc8540_mu_stage3" "ppc8540_most")
+
+;; The following unit is used to describe non-pipelined division.
+(define_cpu_unit "ppc8540_mu_div" "ppc8540_long")
+
+;; Here we simplified LSU unit description not describing the stages.
+(define_cpu_unit "ppc8540_lsu" "ppc8540_most")
+
+;; The following units are used to make automata deterministic
+(define_cpu_unit "present_ppc8540_decode_0" "ppc8540_most")
+(define_cpu_unit "present_ppc8540_issue_0" "ppc8540_most")
+(define_cpu_unit "present_ppc8540_retire_0" "ppc8540_retire")
+(define_cpu_unit "present_ppc8540_su0_stage0" "ppc8540_most")
+
+;; The following sets to make automata deterministic when option ndfa is used.
+(presence_set "present_ppc8540_decode_0" "ppc8540_decode_0")
+(presence_set "present_ppc8540_issue_0" "ppc8540_issue_0")
+(presence_set "present_ppc8540_retire_0" "ppc8540_retire_0")
+(presence_set "present_ppc8540_su0_stage0" "ppc8540_su0_stage0")
+
+;; Some useful abbreviations.
+(define_reservation "ppc8540_decode"
+ "ppc8540_decode_0|ppc8540_decode_1+present_ppc8540_decode_0")
+(define_reservation "ppc8540_issue"
+ "ppc8540_issue_0|ppc8540_issue_1+present_ppc8540_issue_0")
+(define_reservation "ppc8540_retire"
+ "ppc8540_retire_0|ppc8540_retire_1+present_ppc8540_retire_0")
+(define_reservation "ppc8540_su_stage0"
+ "ppc8540_su0_stage0|ppc8540_su1_stage0+present_ppc8540_su0_stage0")
+
+;; Simple SU insns
+(define_insn_reservation "ppc8540_su" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,cmp,compare,\
+ delayed_compare,var_delayed_compare,fast_compare,\
+ shift,trap,var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+(define_insn_reservation "ppc8540_two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\
+ ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+(define_insn_reservation "ppc8540_three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\
+ ppc8540_issue+ppc8540_su_stage0+ppc8540_retire,\
+ ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; Branch. Actually this latency time is not used by the scheduler.
+(define_insn_reservation "ppc8540_branch" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_bu,ppc8540_retire")
+
+;; Multiply
+(define_insn_reservation "ppc8540_multiply" 4
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\
+ ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire")
+
+;; Divide. We use the average latency time here. We omit reserving a
+;; retire unit because of the result automata will be huge. We ignore
+;; reservation of miu_stage3 here because we use the average latency
+;; time.
+(define_insn_reservation "ppc8540_divide" 14
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0+ppc8540_mu_div,\
+ ppc8540_mu_div*13")
+
+;; CR logical
+(define_insn_reservation "ppc8540_cr_logical" 1
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_bu,ppc8540_retire")
+
+;; Mfcr
+(define_insn_reservation "ppc8540_mfcr" 1
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; Mtcrf
+(define_insn_reservation "ppc8540_mtcrf" 1
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; Mtjmpr
+(define_insn_reservation "ppc8540_mtjmpr" 1
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; Loads
+(define_insn_reservation "ppc8540_load" 3
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,sync")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
+
+;; Stores.
+(define_insn_reservation "ppc8540_store" 3
+ (and (eq_attr "type" "store,store_ux,store_u,store_c")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
+
+;; Simple FP
+(define_insn_reservation "ppc8540_simple_float" 1
+ (and (eq_attr "type" "fpsimple")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; FP
+(define_insn_reservation "ppc8540_float" 4
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\
+ ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire")
+
+;; float divides. We omit reserving a retire unit and miu_stage3
+;; because of the result automata will be huge.
+(define_insn_reservation "ppc8540_float_vector_divide" 29
+ (and (eq_attr "type" "vecfdiv")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0+ppc8540_mu_div,\
+ ppc8540_mu_div*28")
+
+;; Brinc
+(define_insn_reservation "ppc8540_brinc" 1
+ (and (eq_attr "type" "brinc")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; Simple vector
+(define_insn_reservation "ppc8540_simple_vector" 1
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; Simple vector compare
+(define_insn_reservation "ppc8540_simple_vector_compare" 1
+ (and (eq_attr "type" "veccmpsimple")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su_stage0+ppc8540_retire")
+
+;; Vector compare
+(define_insn_reservation "ppc8540_vector_compare" 1
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; evsplatfi evsplati
+(define_insn_reservation "ppc8540_vector_perm" 1
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_su1_stage0+ppc8540_retire")
+
+;; Vector float
+(define_insn_reservation "ppc8540_float_vector" 4
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\
+ ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire")
+
+;; Vector divides: Use the average. We omit reserving a retire unit
+;; because of the result automata will be huge. We ignore reservation
+;; of miu_stage3 here because we use the average latency time.
+(define_insn_reservation "ppc8540_vector_divide" 14
+ (and (eq_attr "type" "vecdiv")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0+ppc8540_mu_div,\
+ ppc8540_mu_div*13")
+
+;; Complex vector.
+(define_insn_reservation "ppc8540_complex_vector" 4
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_mu_stage0,ppc8540_mu_stage1,\
+ ppc8540_mu_stage2,ppc8540_mu_stage3+ppc8540_retire")
+
+;; Vector load
+(define_insn_reservation "ppc8540_vector_load" 3
+ (and (eq_attr "type" "vecload")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
+
+;; Vector store
+(define_insn_reservation "ppc8540_vector_store" 3
+ (and (eq_attr "type" "vecstore")
+ (eq_attr "cpu" "ppc8540"))
+ "ppc8540_decode,ppc8540_issue+ppc8540_lsu,nothing,ppc8540_retire")
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix.h b/gcc-4.4.3/gcc/config/rs6000/aix.h
new file mode 100644
index 000000000..4e736ce3f
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix.h
@@ -0,0 +1,262 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX.
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Yes! We are AIX! */
+#define DEFAULT_ABI ABI_AIX
+#undef TARGET_AIX
+#define TARGET_AIX 1
+
+/* AIX always has a TOC. */
+#define TARGET_NO_TOC 0
+#define TARGET_TOC 1
+#define FIXED_R2 1
+
+/* AIX allows r13 to be used in 32-bit mode. */
+#define FIXED_R13 0
+
+/* 32-bit and 64-bit AIX stack boundary is 128. */
+#undef STACK_BOUNDARY
+#define STACK_BOUNDARY 128
+
+#undef TARGET_IEEEQUAD
+#define TARGET_IEEEQUAD 0
+
+/* The AIX linker will discard static constructors in object files before
+ collect has a chance to see them, so scan the object files directly. */
+#define COLLECT_EXPORT_LIST
+
+/* Handle #pragma weak and #pragma pack. */
+#define HANDLE_SYSV_PRAGMA 1
+
+/* This is the only version of nm that collect2 can work with. */
+#define REAL_NM_FILE_NAME "/usr/ucb/nm"
+
+#define USER_LABEL_PREFIX ""
+
+/* Don't turn -B into -L if the argument specifies a relative file name. */
+#define RELATIVE_PREFIX_NOT_LINKDIR
+
+/* Because of the above, we must have gcc search itself to find libgcc.a. */
+#define LINK_LIBGCC_SPECIAL_1
+
+#define MFWRAP_SPEC " %{static: %{fmudflap|fmudflapth: \
+ -brename:malloc,__wrap_malloc -brename:__real_malloc,malloc \
+ -brename:free,__wrap_free -brename:__real_free,free \
+ -brename:calloc,__wrap_calloc -brename:__real_calloc,calloc \
+ -brename:realloc,__wrap_realloc -brename:__real_realloc,realloc \
+ -brename:mmap,__wrap_mmap -brename:__real_mmap,mmap \
+ -brename:munmap,__wrap_munmap -brename:__real_munmap,munmap \
+ -brename:alloca,__wrap_alloca -brename:__real_alloca,alloca \
+} %{fmudflapth: \
+ -brename:pthread_create,__wrap_pthread_create \
+ -brename:__real_pthread_create,pthread_create \
+ -brename:pthread_join,__wrap_pthread_join \
+ -brename:__real_pthread_join,pthread_join \
+ -brename:pthread_exit,__wrap_pthread_exit \
+ -brename:__real_pthread_exit,pthread_exit \
+}} %{fmudflap|fmudflapth: \
+ -brename:main,__wrap_main -brename:__real_main,main \
+}"
+
+#define MFLIB_SPEC " %{fmudflap: -lmudflap \
+ %{static:%(link_gcc_c_sequence) -lmudflap}} \
+ %{fmudflapth: -lmudflapth -lpthread \
+ %{static:%(link_gcc_c_sequence) -lmudflapth}} "
+
+/* Names to predefine in the preprocessor for this target machine. */
+#define TARGET_OS_AIX_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_IBMR2"); \
+ builtin_define ("_POWER"); \
+ builtin_define ("_AIX"); \
+ builtin_define ("_AIX32"); \
+ builtin_define ("_AIX41"); \
+ builtin_define ("_LONG_LONG"); \
+ if (TARGET_LONG_DOUBLE_128) \
+ builtin_define ("__LONGDOUBLE128"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=aix"); \
+ } \
+ while (0)
+
+/* Define appropriate architecture macros for preprocessor depending on
+ target switches. */
+
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE}\
+ %{ansi: -D_ANSI_C_SOURCE}"
+
+#define CC1_SPEC "%(cc1_cpu)"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC ""
+
+/* Tell the assembler to assume that all undefined names are external.
+
+ Don't do this until the fixed IBM assembler is more generally available.
+ When this becomes permanently defined, the ASM_OUTPUT_EXTERNAL,
+ ASM_OUTPUT_EXTERNAL_LIBCALL, and RS6000_OUTPUT_BASENAME macros will no
+ longer be needed. Also, the extern declaration of mcount in
+ rs6000_xcoff_file_start will no longer be needed. */
+
+/* #define ASM_SPEC "-u %(asm_cpu)" */
+
+/* Default location of syscalls.exp under AIX */
+#ifndef CROSS_DIRECTORY_STRUCTURE
+#define LINK_SYSCALLS_SPEC "-bI:/lib/syscalls.exp"
+#else
+#define LINK_SYSCALLS_SPEC ""
+#endif
+
+/* Default location of libg.exp under AIX */
+#ifndef CROSS_DIRECTORY_STRUCTURE
+#define LINK_LIBG_SPEC "-bexport:/usr/lib/libg.exp"
+#else
+#define LINK_LIBG_SPEC ""
+#endif
+
+/* Define the options for the binder: Start text at 512, align all segments
+ to 512 bytes, and warn if there is text relocation.
+
+ The -bhalt:4 option supposedly changes the level at which ld will abort,
+ but it also suppresses warnings about multiply defined symbols and is
+ used by the AIX cc command. So we use it here.
+
+ -bnodelcsect undoes a poor choice of default relating to multiply-defined
+ csects. See AIX documentation for more information about this.
+
+ -bM:SRE tells the linker that the output file is Shared REusable. Note
+ that to actually build a shared library you will also need to specify an
+ export list with the -Wl,-bE option. */
+
+#define LINK_SPEC "-T512 -H512 %{!r:-btextro} -bhalt:4 -bnodelcsect\
+%{static:-bnso %(link_syscalls) } \
+%{!shared:%{g*: %(link_libg) }} %{shared:-bM:SRE}"
+
+/* Profiled library versions are used by linking with special directories. */
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+%{p:-L/lib/profiled -L/usr/lib/profiled} %{!shared:%{g*:-lg}} -lc"
+
+/* Static linking with shared libstdc++ requires libsupc++ as well. */
+#define LIBSTDCXX_STATIC "-lstdc++ -lsupc++"
+
+/* This now supports a natural alignment mode. */
+/* AIX word-aligns FP doubles but doubleword-aligns 64-bit ints. */
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ ((TARGET_ALIGN_NATURAL == 0 \
+ && TYPE_MODE (strip_array_types (TREE_TYPE (FIELD))) == DFmode) \
+ ? MIN ((COMPUTED), 32) \
+ : (COMPUTED))
+
+/* AIX increases natural record alignment to doubleword if the first
+ field is an FP double while the FP fields remain word aligned. */
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ ((TREE_CODE (STRUCT) == RECORD_TYPE \
+ || TREE_CODE (STRUCT) == UNION_TYPE \
+ || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
+ && TARGET_ALIGN_NATURAL == 0 \
+ ? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
+ : MAX ((COMPUTED), (SPECIFIED)))
+
+/* The AIX ABI isn't explicit on whether aggregates smaller than a
+ word/doubleword should be padded upward or downward. One could
+ reasonably assume that they follow the normal rules for structure
+ layout treating the parameter area as any other block of memory,
+ then map the reg param area to registers, i.e., pad upward, which
+ is the way IBM Compilers for AIX behave.
+ Setting both of the following defines results in this behavior. */
+#define AGGREGATE_PADDING_FIXED 1
+#define AGGREGATES_PAD_UPWARD_ALWAYS 1
+
+/* Specify padding for the last element of a block move between
+ registers and memory. FIRST is nonzero if this is the only
+ element. */
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (!(FIRST) ? upward : FUNCTION_ARG_PADDING (MODE, TYPE))
+
+/* Indicate that jump tables go in the text section. */
+
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* Define any extra SPECS that the compiler needs to generate. */
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "link_syscalls", LINK_SYSCALLS_SPEC }, \
+ { "link_libg", LINK_LIBG_SPEC }
+
+/* Define cutoff for using external functions to save floating point. */
+#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) == 62 || (FIRST_REG) == 63)
+/* And similarly for general purpose registers. */
+#define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32)
+
+/* __throw will restore its own return address to be the same as the
+ return address of the function that the throw is being made to.
+ This is unfortunate, because we want to check the original
+ return address to see if we need to restore the TOC.
+ So we have to squirrel it away with this. */
+#define SETUP_FRAME_ADDRESSES() rs6000_aix_emit_builtin_unwind_init ()
+
+/* If the current unwind info (FS) does not contain explicit info
+ saving R2, then we have to do a minor amount of code reading to
+ figure out if it was saved. The big problem here is that the
+ code that does the save/restore is generated by the linker, so
+ we have no good way to determine at compile time what to do. */
+
+#define R_LR 65
+
+#ifdef __64BIT__
+#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \
+ do { \
+ if ((FS)->regs.reg[2].how == REG_UNSAVED) \
+ { \
+ unsigned int *insn \
+ = (unsigned int *) \
+ _Unwind_GetGR ((CTX), R_LR); \
+ if (*insn == 0xE8410028) \
+ _Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 40); \
+ } \
+ } while (0)
+#else
+#define MD_FROB_UPDATE_CONTEXT(CTX, FS) \
+ do { \
+ if ((FS)->regs.reg[2].how == REG_UNSAVED) \
+ { \
+ unsigned int *insn \
+ = (unsigned int *) \
+ _Unwind_GetGR ((CTX), R_LR); \
+ if (*insn == 0x80410014) \
+ _Unwind_SetGRPtr ((CTX), 2, (CTX)->cfa + 20); \
+ } \
+ } while (0)
+#endif
+
+#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL)
+
+/* Print subsidiary information on the compiler version in use. */
+#define TARGET_VERSION ;
+
+/* No version of AIX fully supports AltiVec or 64-bit instructions in
+ 32-bit mode. */
+#define OS_MISSING_POWERPC64 1
+#define OS_MISSING_ALTIVEC 1
+
+/* WINT_TYPE */
+#define WINT_TYPE "int"
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix.opt b/gcc-4.4.3/gcc/config/rs6000/aix.opt
new file mode 100644
index 000000000..d00017298
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix.opt
@@ -0,0 +1,24 @@
+; AIX options.
+;
+; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mxl-compat
+Target Var(has_xl_compat_option)
+Conform more closely to IBM XLC semantics
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix41.h b/gcc-4.4.3/gcc/config/rs6000/aix41.h
new file mode 100644
index 000000000..a10908499
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix41.h
@@ -0,0 +1,101 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX version 4.1.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004,
+ 2005, 2007
+ Free Software Foundation, Inc.
+ Contributed by David Edelsohn (edelsohn@gnu.org).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %(asm_cpu)"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mcom"
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE}\
+ %{ansi: -D_ANSI_C_SOURCE}\
+ %{mpe: -I/usr/lpp/ppe.poe/include}\
+ %{pthread: -D_THREAD_SAFE}"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT MASK_NEW_MNEMONICS
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC601
+
+/* AIX does not support Altivec. */
+#undef TARGET_ALTIVEC
+#define TARGET_ALTIVEC 0
+#undef TARGET_ALTIVEC_ABI
+#define TARGET_ALTIVEC_ABI 0
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mcpu=common" }
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+ %{p:-L/lib/profiled -L/usr/lib/profiled} %{!shared:%{g*:-lg}}\
+ %{mpe:-L/usr/lpp/ppe.poe/lib -lmpi -lvtd}\
+ %{pthread: -L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a}\
+ %{!pthread: -lc}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\
+ %{static:-bnso %(link_syscalls) } %{!shared: %{g*: %(link_libg) }}\
+ %{shared:-bM:SRE %{!e:-bnoentry}}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:\
+ %{mpe:%{pg:/usr/lpp/ppe.poe/lib/gcrt0.o}\
+ %{!pg:%{p:/usr/lpp/ppe.poe/lib/mcrt0.o}\
+ %{!p:/usr/lpp/ppe.poe/lib/crt0.o}}}\
+ %{!mpe:\
+ %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
+ %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}"
+
+/* AIX 4 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
+ and "cror 31,31,31" for POWER architecture. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+
+/* The IBM AIX 4.x assembler doesn't support forward references in
+ .set directives. We handle this by deferring the output of .set
+ directives to the end of the compilation unit. */
+#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) true
+
+#undef TARGET_64BIT
+#define TARGET_64BIT 0
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix41.opt b/gcc-4.4.3/gcc/config/rs6000/aix41.opt
new file mode 100644
index 000000000..62e37679f
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix41.opt
@@ -0,0 +1,24 @@
+; Options for AIX4.1.
+;
+; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mpe
+Target Report RejectNegative Var(internal_nothing_1)
+Support message passing with the Parallel Environment
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix43.h b/gcc-4.4.3/gcc/config/rs6000/aix43.h
new file mode 100644
index 000000000..56b24a363
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix43.h
@@ -0,0 +1,189 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX version 4.3.
+ Copyright (C) 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006,
+ 2007 Free Software Foundation, Inc.
+ Contributed by David Edelsohn (edelsohn@gnu.org).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to
+ get control. */
+
+#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2)
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
+ { \
+ target_flags &= ~NON_POWERPC_MASKS; \
+ warning (0, "-maix64 and POWER architecture are incompatible"); \
+ } \
+ if (TARGET_64BIT && ! TARGET_POWERPC64) \
+ { \
+ target_flags |= MASK_POWERPC64; \
+ warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \
+ } \
+ if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \
+ { \
+ rs6000_long_double_type_size = 64; \
+ if (rs6000_explicit_options.long_double) \
+ warning (0, "soft-float and long-double-128 are incompatible"); \
+ } \
+ if (TARGET_POWERPC64 && ! TARGET_64BIT) \
+ { \
+ error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
+ } \
+} while (0);
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
+
+/* Common ASM definitions used by ASM_SPEC amongst the various targets
+ for handling -mcpu=xxx switches. */
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC \
+"%{!mcpu*: %{!maix64: \
+ %{mpower: %{!mpower2: -mpwr}} \
+ %{mpower2: -mpwr2} \
+ %{mpowerpc*: %{!mpowerpc64: -mppc}} \
+ %{mpowerpc64: -mppc64} \
+ %{!mpower*: %{!mpowerpc*: %(asm_default)}}}} \
+%{mcpu=common: -mcom} \
+%{mcpu=power: -mpwr} \
+%{mcpu=power2: -mpwr2} \
+%{mcpu=power3: -m620} \
+%{mcpu=power4: -m620} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rios: -mpwr} \
+%{mcpu=rios1: -mpwr} \
+%{mcpu=rios2: -mpwr2} \
+%{mcpu=rsc: -mpwr} \
+%{mcpu=rsc1: -mpwr} \
+%{mcpu=rs64a: -mppc} \
+%{mcpu=601: -m601} \
+%{mcpu=602: -mppc} \
+%{mcpu=603: -m603} \
+%{mcpu=603e: -m603} \
+%{mcpu=604: -m604} \
+%{mcpu=604e: -m604} \
+%{mcpu=620: -m620} \
+%{mcpu=630: -m620}"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mcom"
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_AIX43"); \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE}\
+ %{ansi: -D_ANSI_C_SOURCE}\
+ %{maix64: -D__64BIT__}\
+ %{mpe: -I/usr/lpp/ppe.poe/include}\
+ %{pthread: -D_THREAD_SAFE}"
+
+/* The GNU C++ standard library requires that these macros be
+ defined. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC \
+ "-D_ALL_SOURCE \
+ %{maix64: -D__64BIT__} \
+ %{mpe: -I/usr/lpp/ppe.poe/include} \
+ %{pthread: -D_THREAD_SAFE}"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT MASK_NEW_MNEMONICS
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC604e
+
+/* AIX does not support Altivec. */
+#undef TARGET_ALTIVEC
+#define TARGET_ALTIVEC 0
+#undef TARGET_ALTIVEC_ABI
+#define TARGET_ALTIVEC_ABI 0
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mcpu=common" }
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+ %{p:-L/lib/profiled -L/usr/lib/profiled}\
+ %{!maix64:%{!shared:%{g*:-lg}}}\
+ %{mpe:-L/usr/lpp/ppe.poe/lib -lmpi -lvtd}\
+ %{pthread:-L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a}\
+ %{!pthread:-lc}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\
+ %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\
+ %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\
+ %{mpe:-binitfini:poe_remote_main}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:\
+ %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\
+ %{!maix64:\
+ %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
+ %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}"
+
+/* AIX 4.3 typedefs ptrdiff_t as "long" while earlier releases used "int". */
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* AIX 4 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
+ and "cror 31,31,31" for POWER architecture. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+
+/* AIX 4.2 and above provides initialization and finalization function
+ support from linker command line. */
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+
+#undef LD_INIT_SWITCH
+#define LD_INIT_SWITCH "-binitfini"
+
+/* The IBM AIX 4.x assembler doesn't support forward references in
+ .set directives. We handle this by deferring the output of .set
+ directives to the end of the compilation unit. */
+#define TARGET_DEFERRED_OUTPUT_DEFS(DECL,TARGET) true
+
+/* This target uses the aix64.opt file. */
+#define TARGET_USES_AIX64_OPT 1
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix51.h b/gcc-4.4.3/gcc/config/rs6000/aix51.h
new file mode 100644
index 000000000..dbec0fb2a
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix51.h
@@ -0,0 +1,193 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX V5.
+ Copyright (C) 2001, 2003, 2004, 2005, 2007, 2008
+ Free Software Foundation, Inc.
+ Contributed by David Edelsohn (edelsohn@gnu.org).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to
+ get control. */
+
+#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2)
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
+ { \
+ target_flags &= ~NON_POWERPC_MASKS; \
+ warning (0, "-maix64 and POWER architecture are incompatible"); \
+ } \
+ if (TARGET_64BIT && ! TARGET_POWERPC64) \
+ { \
+ target_flags |= MASK_POWERPC64; \
+ warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \
+ } \
+ if (TARGET_POWERPC64 && ! TARGET_64BIT) \
+ { \
+ error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
+ } \
+} while (0);
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
+
+/* Common ASM definitions used by ASM_SPEC amongst the various targets
+ for handling -mcpu=xxx switches. */
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC \
+"%{!mcpu*: %{!maix64: \
+ %{mpower: %{!mpower2: -mpwr}} \
+ %{mpower2: -mpwr2} \
+ %{mpowerpc*: %{!mpowerpc64: -mppc}} \
+ %{mpowerpc64: -mppc64} \
+ %{!mpower*: %{!mpowerpc*: %(asm_default)}}}} \
+%{mcpu=common: -mcom} \
+%{mcpu=power: -mpwr} \
+%{mcpu=power2: -mpwr2} \
+%{mcpu=power3: -m620} \
+%{mcpu=power4: -m620} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rios: -mpwr} \
+%{mcpu=rios1: -mpwr} \
+%{mcpu=rios2: -mpwr2} \
+%{mcpu=rsc: -mpwr} \
+%{mcpu=rsc1: -mpwr} \
+%{mcpu=rs64a: -mppc} \
+%{mcpu=601: -m601} \
+%{mcpu=602: -mppc} \
+%{mcpu=603: -m603} \
+%{mcpu=603e: -m603} \
+%{mcpu=604: -m604} \
+%{mcpu=604e: -m604} \
+%{mcpu=620: -m620} \
+%{mcpu=630: -m620} \
+%{mcpu=970: -m620} \
+%{mcpu=G5: -m620}"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mcom"
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_AIX43"); \
+ builtin_define ("_AIX51"); \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} \
+ %{ansi: -D_ANSI_C_SOURCE} \
+ %{maix64: -D__64BIT__} \
+ %{mpe: -I/usr/lpp/ppe.poe/include} \
+ %{pthread: -D_THREAD_SAFE}"
+
+/* The GNU C++ standard library requires that these macros be
+ defined. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC \
+ "-D_ALL_SOURCE \
+ %{maix64: -D__64BIT__} \
+ %{mpe: -I/usr/lpp/ppe.poe/include} \
+ %{pthread: -D_THREAD_SAFE}"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT MASK_NEW_MNEMONICS
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC604e
+
+/* AIX does not support Altivec. */
+#undef TARGET_ALTIVEC
+#define TARGET_ALTIVEC 0
+#undef TARGET_ALTIVEC_ABI
+#define TARGET_ALTIVEC_ABI 0
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mcpu=common" }
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+ %{p:-L/lib/profiled -L/usr/lib/profiled}\
+ %{!maix64:%{!shared:%{g*:-lg}}}\
+ %{mpe:-L/usr/lpp/ppe.poe/lib -lmpi -lvtd}\
+ %{pthread:-lpthreads} -lc"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\
+ %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\
+ %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\
+ %{mpe:-binitfini:poe_remote_main}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:\
+ %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\
+ %{!maix64:\
+ %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
+ %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}"
+
+/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE (!TARGET_64BIT ? "short unsigned int" : "unsigned int")
+
+/* Width of wchar_t in bits. */
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
+
+/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
+ and "cror 31,31,31" for POWER architecture. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+
+/* AIX 4.2 and above provides initialization and finalization function
+ support from linker command line. */
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+
+#undef LD_INIT_SWITCH
+#define LD_INIT_SWITCH "-binitfini"
+
+/* This target uses the aix64.opt file. */
+#define TARGET_USES_AIX64_OPT 1
+
+/* This target defines SUPPORTS_WEAK and TARGET_ASM_NAMED_SECTION,
+ but does not have crtbegin/end. */
+
+#define TARGET_USE_JCR_SECTION 0
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix52.h b/gcc-4.4.3/gcc/config/rs6000/aix52.h
new file mode 100644
index 000000000..8fab3f4a2
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix52.h
@@ -0,0 +1,203 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX V5.2.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
+ Contributed by David Edelsohn (edelsohn@gnu.org).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to
+ get control. */
+
+#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2)
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
+ { \
+ target_flags &= ~NON_POWERPC_MASKS; \
+ warning (0, "-maix64 and POWER architecture are incompatible"); \
+ } \
+ if (TARGET_64BIT && ! TARGET_POWERPC64) \
+ { \
+ target_flags |= MASK_POWERPC64; \
+ warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \
+ } \
+ if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \
+ { \
+ rs6000_long_double_type_size = 64; \
+ if (rs6000_explicit_options.long_double) \
+ warning (0, "soft-float and long-double-128 are incompatible"); \
+ } \
+ if (TARGET_POWERPC64 && ! TARGET_64BIT) \
+ { \
+ error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
+ } \
+} while (0);
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
+
+/* Common ASM definitions used by ASM_SPEC amongst the various targets
+ for handling -mcpu=xxx switches. */
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC \
+"%{!mcpu*: %{!maix64: \
+ %{mpowerpc64: -mppc64} \
+ %{!mpower64: %(asm_default)}}} \
+%{mcpu=power3: -m620} \
+%{mcpu=power4: -m620} \
+%{mcpu=power5: -m620} \
+%{mcpu=power5+: -m620} \
+%{mcpu=power6: -m620} \
+%{mcpu=power6x: -m620} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rs64a: -mppc} \
+%{mcpu=603: -m603} \
+%{mcpu=603e: -m603} \
+%{mcpu=604: -m604} \
+%{mcpu=604e: -m604} \
+%{mcpu=620: -m620} \
+%{mcpu=630: -m620} \
+%{mcpu=970: -m620} \
+%{mcpu=G5: -m620}"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mppc"
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_AIX43"); \
+ builtin_define ("_AIX51"); \
+ builtin_define ("_AIX52"); \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} \
+ %{ansi: -D_ANSI_C_SOURCE} \
+ %{maix64: -D__64BIT__} \
+ %{mpe: -I/usr/lpp/ppe.poe/include} \
+ %{pthread: -D_THREAD_SAFE}"
+
+/* The GNU C++ standard library requires that these macros be
+ defined. Synchronize with libstdc++ os_defines.h. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC \
+ "-D_ALL_SOURCE \
+ %{maix64: -D__64BIT__} \
+ %{mpe: -I/usr/lpp/ppe.poe/include} \
+ %{pthread: -D_THREAD_SAFE}"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS)
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_POWER4
+#undef PROCESSOR_DEFAULT64
+#define PROCESSOR_DEFAULT64 PROCESSOR_POWER4
+
+#undef TARGET_POWER
+#define TARGET_POWER 0
+
+/* AIX does not support Altivec. */
+#undef TARGET_ALTIVEC
+#define TARGET_ALTIVEC 0
+#undef TARGET_ALTIVEC_ABI
+#define TARGET_ALTIVEC_ABI 0
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+ %{p:-L/lib/profiled -L/usr/lib/profiled}\
+ %{!maix64:%{!shared:%{g*:-lg}}}\
+ %{mpe:-L/usr/lpp/ppe.poe/lib -lmpi -lvtd}\
+ %{pthread:-lpthreads} -lc"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\
+ %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\
+ %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\
+ %{mpe:-binitfini:poe_remote_main}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:\
+ %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\
+ %{!maix64:\
+ %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
+ %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}"
+
+/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE (!TARGET_64BIT ? "short unsigned int" : "unsigned int")
+
+/* Width of wchar_t in bits. */
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
+
+/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
+ and "cror 31,31,31" for POWER architecture. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+
+/* AIX 4.2 and above provides initialization and finalization function
+ support from linker command line. */
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+
+#undef LD_INIT_SWITCH
+#define LD_INIT_SWITCH "-binitfini"
+
+/* AIX 5.2 has the float and long double forms of math functions. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS 1
+
+#ifndef _AIX52
+extern long long int atoll(const char *);
+#endif
+
+/* This target uses the aix64.opt file. */
+#define TARGET_USES_AIX64_OPT 1
+
+/* This target defines SUPPORTS_WEAK and TARGET_ASM_NAMED_SECTION,
+ but does not have crtbegin/end. */
+
+#define TARGET_USE_JCR_SECTION 0
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix53.h b/gcc-4.4.3/gcc/config/rs6000/aix53.h
new file mode 100644
index 000000000..80f1038fa
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix53.h
@@ -0,0 +1,199 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX V5.3.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
+ Contributed by David Edelsohn (edelsohn@gnu.org).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to
+ get control. */
+
+#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2)
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
+ { \
+ target_flags &= ~NON_POWERPC_MASKS; \
+ warning (0, "-maix64 and POWER architecture are incompatible"); \
+ } \
+ if (TARGET_64BIT && ! TARGET_POWERPC64) \
+ { \
+ target_flags |= MASK_POWERPC64; \
+ warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \
+ } \
+ if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \
+ { \
+ rs6000_long_double_type_size = 64; \
+ if (rs6000_explicit_options.long_double) \
+ warning (0, "soft-float and long-double-128 are incompatible"); \
+ } \
+ if (TARGET_POWERPC64 && ! TARGET_64BIT) \
+ { \
+ error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
+ } \
+} while (0);
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
+
+/* Common ASM definitions used by ASM_SPEC amongst the various targets
+ for handling -mcpu=xxx switches. */
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC \
+"%{!mcpu*: %{!maix64: \
+ %{mpowerpc64: -mppc64} \
+ %{maltivec: -m970} \
+ %{!maltivec: %{!mpower64: %(asm_default)}}}} \
+%{mcpu=power3: -m620} \
+%{mcpu=power4: -mpwr4} \
+%{mcpu=power5: -mpwr5} \
+%{mcpu=power5+: -mpwr5x} \
+%{mcpu=power6: -mpwr6} \
+%{mcpu=power6x: -mpwr6} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rs64a: -mppc} \
+%{mcpu=603: -m603} \
+%{mcpu=603e: -m603} \
+%{mcpu=604: -m604} \
+%{mcpu=604e: -m604} \
+%{mcpu=620: -m620} \
+%{mcpu=630: -m620} \
+%{mcpu=970: -m970} \
+%{mcpu=G5: -m970}"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mppc"
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_AIX43"); \
+ builtin_define ("_AIX51"); \
+ builtin_define ("_AIX52"); \
+ builtin_define ("_AIX53"); \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} \
+ %{ansi: -D_ANSI_C_SOURCE} \
+ %{maix64: -D__64BIT__} \
+ %{mpe: -I/usr/lpp/ppe.poe/include} \
+ %{pthread: -D_THREAD_SAFE}"
+
+/* The GNU C++ standard library requires that these macros be
+ defined. Synchronize with libstdc++ os_defines.h. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC \
+ "-D_ALL_SOURCE \
+ %{maix64: -D__64BIT__} \
+ %{mpe: -I/usr/lpp/ppe.poe/include} \
+ %{pthread: -D_THREAD_SAFE}"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS)
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_POWER5
+#undef PROCESSOR_DEFAULT64
+#define PROCESSOR_DEFAULT64 PROCESSOR_POWER5
+
+#undef TARGET_POWER
+#define TARGET_POWER 0
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+ %{p:-L/lib/profiled -L/usr/lib/profiled}\
+ %{!maix64:%{!shared:%{g*:-lg}}}\
+ %{mpe:-L/usr/lpp/ppe.poe/lib -lmpi -lvtd}\
+ %{pthread:-lpthreads} -lc"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\
+ %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\
+ %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\
+ %{mpe:-binitfini:poe_remote_main}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:\
+ %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\
+ %{!maix64:\
+ %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
+ %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}"
+
+/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE (!TARGET_64BIT ? "short unsigned int" : "unsigned int")
+
+/* Width of wchar_t in bits. */
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
+
+/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
+ and "cror 31,31,31" for POWER architecture. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+
+/* AIX 4.2 and above provides initialization and finalization function
+ support from linker command line. */
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+
+#undef LD_INIT_SWITCH
+#define LD_INIT_SWITCH "-binitfini"
+
+/* AIX 5.2 has the float and long double forms of math functions. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS 1
+
+#ifndef _AIX52
+extern long long int atoll(const char *);
+#endif
+
+/* This target uses the aix64.opt file. */
+#define TARGET_USES_AIX64_OPT 1
+
+/* This target defines SUPPORTS_WEAK and TARGET_ASM_NAMED_SECTION,
+ but does not have crtbegin/end. */
+
+#define TARGET_USE_JCR_SECTION 0
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix61.h b/gcc-4.4.3/gcc/config/rs6000/aix61.h
new file mode 100644
index 000000000..e62a775c7
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix61.h
@@ -0,0 +1,204 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX V6.1.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
+ Contributed by David Edelsohn (edelsohn@gnu.org).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to
+ get control. */
+
+#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2)
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
+ { \
+ target_flags &= ~NON_POWERPC_MASKS; \
+ warning (0, "-maix64 and POWER architecture are incompatible"); \
+ } \
+ if (TARGET_64BIT && ! TARGET_POWERPC64) \
+ { \
+ target_flags |= MASK_POWERPC64; \
+ warning (0, "-maix64 requires PowerPC64 architecture remain enabled"); \
+ } \
+ if (TARGET_SOFT_FLOAT && TARGET_LONG_DOUBLE_128) \
+ { \
+ rs6000_long_double_type_size = 64; \
+ if (rs6000_explicit_options.long_double) \
+ warning (0, "soft-float and long-double-128 are incompatible"); \
+ } \
+ if (TARGET_POWERPC64 && ! TARGET_64BIT) \
+ { \
+ error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
+ } \
+} while (0);
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %{maix64:-a64 %{!mcpu*:-mppc64}} %(asm_cpu)"
+
+/* Common ASM definitions used by ASM_SPEC amongst the various targets
+ for handling -mcpu=xxx switches. */
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC \
+"%{!mcpu*: %{!maix64: \
+ %{mpowerpc64: -mppc64} \
+ %{maltivec: -m970} \
+ %{!maltivec: %{!mpower64: %(asm_default)}}}} \
+%{mcpu=power3: -m620} \
+%{mcpu=power4: -mpwr4} \
+%{mcpu=power5: -mpwr5} \
+%{mcpu=power5+: -mpwr5x} \
+%{mcpu=power6: -mpwr6} \
+%{mcpu=power6x: -mpwr6} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rs64a: -mppc} \
+%{mcpu=603: -m603} \
+%{mcpu=603e: -m603} \
+%{mcpu=604: -m604} \
+%{mcpu=604e: -m604} \
+%{mcpu=620: -m620} \
+%{mcpu=630: -m620} \
+%{mcpu=970: -m970} \
+%{mcpu=G5: -m970}"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mppc"
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("_AIX43"); \
+ builtin_define ("_AIX51"); \
+ builtin_define ("_AIX52"); \
+ builtin_define ("_AIX53"); \
+ builtin_define ("_AIX61"); \
+ TARGET_OS_AIX_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} \
+ %{ansi: -D_ANSI_C_SOURCE} \
+ %{maix64: -D__64BIT__} \
+ %{mpe: -I/usr/lpp/ppe.poe/include} \
+ %{pthread: -D_THREAD_SAFE}"
+
+/* The GNU C++ standard library requires that these macros be
+ defined. Synchronize with libstdc++ os_defines.h. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC \
+ "-D_ALL_SOURCE -D__COMPATMATH__ \
+ %{maix64: -D__64BIT__} \
+ %{mpe: -I/usr/lpp/ppe.poe/include} \
+ %{pthread: -D_THREAD_SAFE}"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS)
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_POWER5
+#undef PROCESSOR_DEFAULT64
+#define PROCESSOR_DEFAULT64 PROCESSOR_POWER5
+
+#undef TARGET_POWER
+#define TARGET_POWER 0
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+ %{p:-L/lib/profiled -L/usr/lib/profiled}\
+ %{!maix64:%{!shared:%{g*:-lg}}}\
+ %{mpe:-L/usr/lpp/ppe.poe/lib -lmpi -lvtd}\
+ %{pthread:-lpthreads} -lc"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\
+ %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\
+ %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}\
+ %{mpe:-binitfini:poe_remote_main}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:\
+ %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\
+ %{!maix64:\
+ %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
+ %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}"
+
+/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE (!TARGET_64BIT ? "short unsigned int" : "unsigned int")
+
+/* Width of wchar_t in bits. */
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
+
+/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
+ and "cror 31,31,31" for POWER architecture. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+
+/* AIX 4.2 and above provides initialization and finalization function
+ support from linker command line. */
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+
+#undef LD_INIT_SWITCH
+#define LD_INIT_SWITCH "-binitfini"
+
+/* AIX 5.2 has the float and long double forms of math functions. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS 1
+
+#ifndef _AIX52
+extern long long int atoll(const char *);
+#endif
+
+/* This target uses the aix64.opt file. */
+#define TARGET_USES_AIX64_OPT 1
+
+/* This target defines SUPPORTS_WEAK and TARGET_ASM_NAMED_SECTION,
+ but does not have crtbegin/end. */
+
+#define TARGET_USE_JCR_SECTION 0
+
+/* Default to 128 bit long double. */
+
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128
diff --git a/gcc-4.4.3/gcc/config/rs6000/aix64.opt b/gcc-4.4.3/gcc/config/rs6000/aix64.opt
new file mode 100644
index 000000000..2ffcff61e
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/aix64.opt
@@ -0,0 +1,32 @@
+; Options for the 64-bit flavor of AIX.
+;
+; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+maix64
+Target Report RejectNegative Negative(maix32) Mask(64BIT)
+Compile for 64-bit pointers
+
+maix32
+Target Report RejectNegative Negative(maix64) InverseMask(64BIT)
+Compile for 32-bit pointers
+
+mpe
+Target Report RejectNegative Var(internal_nothing_1)
+Support message passing with the Parallel Environment
diff --git a/gcc-4.4.3/gcc/config/rs6000/altivec.h b/gcc-4.4.3/gcc/config/rs6000/altivec.h
new file mode 100644
index 000000000..7b397997d
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/altivec.h
@@ -0,0 +1,477 @@
+/* PowerPC AltiVec include file.
+ Copyright (C) 2002, 2003, 2004, 2005, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+ Rewritten by Paolo Bonzini (bonzini@gnu.org).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Implemented to conform to the specification included in the AltiVec
+ Technology Programming Interface Manual (ALTIVECPIM/D 6/1999 Rev 0). */
+
+#ifndef _ALTIVEC_H
+#define _ALTIVEC_H 1
+
+#if !defined(__VEC__) || !defined(__ALTIVEC__)
+#error Use the "-maltivec" flag to enable PowerPC AltiVec support
+#endif
+
+/* If __APPLE_ALTIVEC__ is defined, the compiler supports 'vector',
+ 'pixel' and 'bool' as context-sensitive AltiVec keywords (in
+ non-AltiVec contexts, they revert to their original meanings,
+ if any), so we do not need to define them as macros. */
+
+#if !defined(__APPLE_ALTIVEC__)
+/* You are allowed to undef these for C++ compatibility. */
+#define vector __vector
+#define pixel __pixel
+#define bool __bool
+#endif
+
+/* Condition register codes for AltiVec predicates. */
+
+#define __CR6_EQ 0
+#define __CR6_EQ_REV 1
+#define __CR6_LT 2
+#define __CR6_LT_REV 3
+
+/* Synonyms. */
+#define vec_vaddcuw vec_addc
+#define vec_vand vec_and
+#define vec_vandc vec_andc
+#define vec_vrfip vec_ceil
+#define vec_vcmpbfp vec_cmpb
+#define vec_vcmpgefp vec_cmpge
+#define vec_vctsxs vec_cts
+#define vec_vctuxs vec_ctu
+#define vec_vexptefp vec_expte
+#define vec_vrfim vec_floor
+#define vec_lvx vec_ld
+#define vec_lvxl vec_ldl
+#define vec_vlogefp vec_loge
+#define vec_vmaddfp vec_madd
+#define vec_vmhaddshs vec_madds
+#define vec_vmladduhm vec_mladd
+#define vec_vmhraddshs vec_mradds
+#define vec_vnmsubfp vec_nmsub
+#define vec_vnor vec_nor
+#define vec_vor vec_or
+#define vec_vpkpx vec_packpx
+#define vec_vperm vec_perm
+#define vec_vrefp vec_re
+#define vec_vrfin vec_round
+#define vec_vrsqrtefp vec_rsqrte
+#define vec_vsel vec_sel
+#define vec_vsldoi vec_sld
+#define vec_vsl vec_sll
+#define vec_vslo vec_slo
+#define vec_vspltisb vec_splat_s8
+#define vec_vspltish vec_splat_s16
+#define vec_vspltisw vec_splat_s32
+#define vec_vsr vec_srl
+#define vec_vsro vec_sro
+#define vec_stvx vec_st
+#define vec_stvxl vec_stl
+#define vec_vsubcuw vec_subc
+#define vec_vsum2sws vec_sum2s
+#define vec_vsumsws vec_sums
+#define vec_vrfiz vec_trunc
+#define vec_vxor vec_xor
+
+/* Functions that are resolved by the backend to one of the
+ typed builtins. */
+#define vec_vaddfp __builtin_vec_vaddfp
+#define vec_addc __builtin_vec_addc
+#define vec_vaddsws __builtin_vec_vaddsws
+#define vec_vaddshs __builtin_vec_vaddshs
+#define vec_vaddsbs __builtin_vec_vaddsbs
+#define vec_vavgsw __builtin_vec_vavgsw
+#define vec_vavguw __builtin_vec_vavguw
+#define vec_vavgsh __builtin_vec_vavgsh
+#define vec_vavguh __builtin_vec_vavguh
+#define vec_vavgsb __builtin_vec_vavgsb
+#define vec_vavgub __builtin_vec_vavgub
+#define vec_ceil __builtin_vec_ceil
+#define vec_cmpb __builtin_vec_cmpb
+#define vec_vcmpeqfp __builtin_vec_vcmpeqfp
+#define vec_cmpge __builtin_vec_cmpge
+#define vec_vcmpgtfp __builtin_vec_vcmpgtfp
+#define vec_vcmpgtsw __builtin_vec_vcmpgtsw
+#define vec_vcmpgtuw __builtin_vec_vcmpgtuw
+#define vec_vcmpgtsh __builtin_vec_vcmpgtsh
+#define vec_vcmpgtuh __builtin_vec_vcmpgtuh
+#define vec_vcmpgtsb __builtin_vec_vcmpgtsb
+#define vec_vcmpgtub __builtin_vec_vcmpgtub
+#define vec_vcfsx __builtin_vec_vcfsx
+#define vec_vcfux __builtin_vec_vcfux
+#define vec_cts __builtin_vec_cts
+#define vec_ctu __builtin_vec_ctu
+#define vec_expte __builtin_vec_expte
+#define vec_floor __builtin_vec_floor
+#define vec_loge __builtin_vec_loge
+#define vec_madd __builtin_vec_madd
+#define vec_madds __builtin_vec_madds
+#define vec_mtvscr __builtin_vec_mtvscr
+#define vec_vmaxfp __builtin_vec_vmaxfp
+#define vec_vmaxsw __builtin_vec_vmaxsw
+#define vec_vmaxsh __builtin_vec_vmaxsh
+#define vec_vmaxsb __builtin_vec_vmaxsb
+#define vec_vminfp __builtin_vec_vminfp
+#define vec_vminsw __builtin_vec_vminsw
+#define vec_vminsh __builtin_vec_vminsh
+#define vec_vminsb __builtin_vec_vminsb
+#define vec_mradds __builtin_vec_mradds
+#define vec_vmsumshm __builtin_vec_vmsumshm
+#define vec_vmsumuhm __builtin_vec_vmsumuhm
+#define vec_vmsummbm __builtin_vec_vmsummbm
+#define vec_vmsumubm __builtin_vec_vmsumubm
+#define vec_vmsumshs __builtin_vec_vmsumshs
+#define vec_vmsumuhs __builtin_vec_vmsumuhs
+#define vec_vmulesb __builtin_vec_vmulesb
+#define vec_vmulesh __builtin_vec_vmulesh
+#define vec_vmuleuh __builtin_vec_vmuleuh
+#define vec_vmuleub __builtin_vec_vmuleub
+#define vec_vmulosh __builtin_vec_vmulosh
+#define vec_vmulouh __builtin_vec_vmulouh
+#define vec_vmulosb __builtin_vec_vmulosb
+#define vec_vmuloub __builtin_vec_vmuloub
+#define vec_nmsub __builtin_vec_nmsub
+#define vec_packpx __builtin_vec_packpx
+#define vec_vpkswss __builtin_vec_vpkswss
+#define vec_vpkuwus __builtin_vec_vpkuwus
+#define vec_vpkshss __builtin_vec_vpkshss
+#define vec_vpkuhus __builtin_vec_vpkuhus
+#define vec_vpkswus __builtin_vec_vpkswus
+#define vec_vpkshus __builtin_vec_vpkshus
+#define vec_re __builtin_vec_re
+#define vec_round __builtin_vec_round
+#define vec_rsqrte __builtin_vec_rsqrte
+#define vec_vsubfp __builtin_vec_vsubfp
+#define vec_subc __builtin_vec_subc
+#define vec_vsubsws __builtin_vec_vsubsws
+#define vec_vsubshs __builtin_vec_vsubshs
+#define vec_vsubsbs __builtin_vec_vsubsbs
+#define vec_sum4s __builtin_vec_sum4s
+#define vec_vsum4shs __builtin_vec_vsum4shs
+#define vec_vsum4sbs __builtin_vec_vsum4sbs
+#define vec_vsum4ubs __builtin_vec_vsum4ubs
+#define vec_sum2s __builtin_vec_sum2s
+#define vec_sums __builtin_vec_sums
+#define vec_trunc __builtin_vec_trunc
+#define vec_vupkhpx __builtin_vec_vupkhpx
+#define vec_vupkhsh __builtin_vec_vupkhsh
+#define vec_vupkhsb __builtin_vec_vupkhsb
+#define vec_vupklpx __builtin_vec_vupklpx
+#define vec_vupklsh __builtin_vec_vupklsh
+#define vec_vupklsb __builtin_vec_vupklsb
+#define vec_abs __builtin_vec_abs
+#define vec_abss __builtin_vec_abss
+#define vec_add __builtin_vec_add
+#define vec_adds __builtin_vec_adds
+#define vec_and __builtin_vec_and
+#define vec_andc __builtin_vec_andc
+#define vec_avg __builtin_vec_avg
+#define vec_cmpeq __builtin_vec_cmpeq
+#define vec_cmpgt __builtin_vec_cmpgt
+#define vec_ctf __builtin_vec_ctf
+#define vec_dst __builtin_vec_dst
+#define vec_dstst __builtin_vec_dstst
+#define vec_dststt __builtin_vec_dststt
+#define vec_dstt __builtin_vec_dstt
+#define vec_ld __builtin_vec_ld
+#define vec_lde __builtin_vec_lde
+#define vec_ldl __builtin_vec_ldl
+#define vec_lvebx __builtin_vec_lvebx
+#define vec_lvehx __builtin_vec_lvehx
+#define vec_lvewx __builtin_vec_lvewx
+/* Cell only intrinsics. */
+#ifdef __PPU__
+#define vec_lvlx __builtin_vec_lvlx
+#define vec_lvlxl __builtin_vec_lvlxl
+#define vec_lvrx __builtin_vec_lvrx
+#define vec_lvrxl __builtin_vec_lvrxl
+#endif
+#define vec_lvsl __builtin_vec_lvsl
+#define vec_lvsr __builtin_vec_lvsr
+#define vec_max __builtin_vec_max
+#define vec_mergeh __builtin_vec_mergeh
+#define vec_mergel __builtin_vec_mergel
+#define vec_min __builtin_vec_min
+#define vec_mladd __builtin_vec_mladd
+#define vec_msum __builtin_vec_msum
+#define vec_msums __builtin_vec_msums
+#define vec_mule __builtin_vec_mule
+#define vec_mulo __builtin_vec_mulo
+#define vec_nor __builtin_vec_nor
+#define vec_or __builtin_vec_or
+#define vec_pack __builtin_vec_pack
+#define vec_packs __builtin_vec_packs
+#define vec_packsu __builtin_vec_packsu
+#define vec_perm __builtin_vec_perm
+#define vec_rl __builtin_vec_rl
+#define vec_sel __builtin_vec_sel
+#define vec_sl __builtin_vec_sl
+#define vec_sld __builtin_vec_sld
+#define vec_sll __builtin_vec_sll
+#define vec_slo __builtin_vec_slo
+#define vec_splat __builtin_vec_splat
+#define vec_sr __builtin_vec_sr
+#define vec_sra __builtin_vec_sra
+#define vec_srl __builtin_vec_srl
+#define vec_sro __builtin_vec_sro
+#define vec_st __builtin_vec_st
+#define vec_ste __builtin_vec_ste
+#define vec_stl __builtin_vec_stl
+#define vec_stvebx __builtin_vec_stvebx
+#define vec_stvehx __builtin_vec_stvehx
+#define vec_stvewx __builtin_vec_stvewx
+/* Cell only intrinsics. */
+#ifdef __PPU__
+#define vec_stvlx __builtin_vec_stvlx
+#define vec_stvlxl __builtin_vec_stvlxl
+#define vec_stvrx __builtin_vec_stvrx
+#define vec_stvrxl __builtin_vec_stvrxl
+#endif
+#define vec_sub __builtin_vec_sub
+#define vec_subs __builtin_vec_subs
+#define vec_sum __builtin_vec_sum
+#define vec_unpackh __builtin_vec_unpackh
+#define vec_unpackl __builtin_vec_unpackl
+#define vec_vaddubm __builtin_vec_vaddubm
+#define vec_vaddubs __builtin_vec_vaddubs
+#define vec_vadduhm __builtin_vec_vadduhm
+#define vec_vadduhs __builtin_vec_vadduhs
+#define vec_vadduwm __builtin_vec_vadduwm
+#define vec_vadduws __builtin_vec_vadduws
+#define vec_vcmpequb __builtin_vec_vcmpequb
+#define vec_vcmpequh __builtin_vec_vcmpequh
+#define vec_vcmpequw __builtin_vec_vcmpequw
+#define vec_vmaxub __builtin_vec_vmaxub
+#define vec_vmaxuh __builtin_vec_vmaxuh
+#define vec_vmaxuw __builtin_vec_vmaxuw
+#define vec_vminub __builtin_vec_vminub
+#define vec_vminuh __builtin_vec_vminuh
+#define vec_vminuw __builtin_vec_vminuw
+#define vec_vmrghb __builtin_vec_vmrghb
+#define vec_vmrghh __builtin_vec_vmrghh
+#define vec_vmrghw __builtin_vec_vmrghw
+#define vec_vmrglb __builtin_vec_vmrglb
+#define vec_vmrglh __builtin_vec_vmrglh
+#define vec_vmrglw __builtin_vec_vmrglw
+#define vec_vpkuhum __builtin_vec_vpkuhum
+#define vec_vpkuwum __builtin_vec_vpkuwum
+#define vec_vrlb __builtin_vec_vrlb
+#define vec_vrlh __builtin_vec_vrlh
+#define vec_vrlw __builtin_vec_vrlw
+#define vec_vslb __builtin_vec_vslb
+#define vec_vslh __builtin_vec_vslh
+#define vec_vslw __builtin_vec_vslw
+#define vec_vspltb __builtin_vec_vspltb
+#define vec_vsplth __builtin_vec_vsplth
+#define vec_vspltw __builtin_vec_vspltw
+#define vec_vsrab __builtin_vec_vsrab
+#define vec_vsrah __builtin_vec_vsrah
+#define vec_vsraw __builtin_vec_vsraw
+#define vec_vsrb __builtin_vec_vsrb
+#define vec_vsrh __builtin_vec_vsrh
+#define vec_vsrw __builtin_vec_vsrw
+#define vec_vsububs __builtin_vec_vsububs
+#define vec_vsububm __builtin_vec_vsububm
+#define vec_vsubuhm __builtin_vec_vsubuhm
+#define vec_vsubuhs __builtin_vec_vsubuhs
+#define vec_vsubuwm __builtin_vec_vsubuwm
+#define vec_vsubuws __builtin_vec_vsubuws
+#define vec_xor __builtin_vec_xor
+
+#define vec_extract __builtin_vec_extract
+#define vec_insert __builtin_vec_insert
+#define vec_splats __builtin_vec_splats
+#define vec_promote __builtin_vec_promote
+
+/* Predicates.
+ For C++, we use templates in order to allow non-parenthesized arguments.
+ For C, instead, we use macros since non-parenthesized arguments were
+ not allowed even in older GCC implementation of AltiVec.
+
+ In the future, we may add more magic to the back-end, so that no
+ one- or two-argument macros are used. */
+
+#ifdef __cplusplus__
+#define __altivec_unary_pred(NAME, CALL) \
+template <class T> int NAME (T a1) { return CALL; }
+
+#define __altivec_scalar_pred(NAME, CALL) \
+template <class T, class U> int NAME (T a1, U a2) { return CALL; }
+
+/* Given the vec_step of a type, return the corresponding bool type. */
+template <int STEP> class __altivec_bool_ret { };
+template <> class __altivec_bool_ret <4> {
+ typedef __vector __bool int __ret;
+};
+template <> class __altivec_bool_ret <8> {
+ typedef __vector __bool short __ret;
+};
+template <> class __altivec_bool_ret <16> {
+ typedef __vector __bool char __ret;
+};
+
+/* Be very liberal in the pairs we accept. Mistakes such as passing
+ a `vector char' and `vector short' will be caught by the middle-end,
+ while any attempt to detect them here would produce hard to understand
+ error messages involving the implementation details of AltiVec. */
+#define __altivec_binary_pred(NAME, CALL) \
+template <class T, class U> \
+typename __altivec_bool_ret <vec_step (T)>::__ret \
+NAME (T a1, U a2) \
+{ \
+ return CALL; \
+}
+
+__altivec_binary_pred(vec_cmplt,
+ __builtin_vec_cmpgt (a2, a1))
+__altivec_binary_pred(vec_cmple,
+ __builtin_vec_cmpge (a2, a1))
+
+__altivec_scalar_pred(vec_all_in,
+ __builtin_altivec_vcmpbfp_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_any_out,
+ __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, a1, a2))
+
+__altivec_unary_pred(vec_all_nan,
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ, a1, a1))
+__altivec_unary_pred(vec_any_nan,
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, a1, a1))
+
+__altivec_unary_pred(vec_all_numeric,
+ __builtin_altivec_vcmpeqfp_p (__CR6_LT, a1, a1))
+__altivec_unary_pred(vec_any_numeric,
+ __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, a1, a1))
+
+__altivec_scalar_pred(vec_all_eq,
+ __builtin_vec_vcmpeq_p (__CR6_LT, a1, a2))
+__altivec_scalar_pred(vec_all_ne,
+ __builtin_vec_vcmpeq_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_any_eq,
+ __builtin_vec_vcmpeq_p (__CR6_EQ_REV, a1, a2))
+__altivec_scalar_pred(vec_any_ne,
+ __builtin_vec_vcmpeq_p (__CR6_LT_REV, a1, a2))
+
+__altivec_scalar_pred(vec_all_gt,
+ __builtin_vec_vcmpgt_p (__CR6_LT, a1, a2))
+__altivec_scalar_pred(vec_all_lt,
+ __builtin_vec_vcmpgt_p (__CR6_LT, a2, a1))
+__altivec_scalar_pred(vec_any_gt,
+ __builtin_vec_vcmpgt_p (__CR6_EQ_REV, a1, a2))
+__altivec_scalar_pred(vec_any_lt,
+ __builtin_vec_vcmpgt_p (__CR6_EQ_REV, a2, a1))
+
+__altivec_scalar_pred(vec_all_ngt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_all_nlt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_EQ, a2, a1))
+__altivec_scalar_pred(vec_any_ngt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a1, a2))
+__altivec_scalar_pred(vec_any_nlt,
+ __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, a2, a1))
+
+/* __builtin_vec_vcmpge_p is vcmpgefp for floating-point vector types,
+ while for integer types it is converted to __builtin_vec_vcmpgt_p,
+ with inverted args and condition code. */
+__altivec_scalar_pred(vec_all_le,
+ __builtin_vec_vcmpge_p (__CR6_LT, a2, a1))
+__altivec_scalar_pred(vec_all_ge,
+ __builtin_vec_vcmpge_p (__CR6_LT, a1, a2))
+__altivec_scalar_pred(vec_any_le,
+ __builtin_vec_vcmpge_p (__CR6_EQ_REV, a2, a1))
+__altivec_scalar_pred(vec_any_ge,
+ __builtin_vec_vcmpge_p (__CR6_EQ_REV, a1, a2))
+
+__altivec_scalar_pred(vec_all_nge,
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ, a1, a2))
+__altivec_scalar_pred(vec_all_nle,
+ __builtin_altivec_vcmpgefp_p (__CR6_EQ, a2, a1))
+__altivec_scalar_pred(vec_any_nge,
+ __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a1, a2))
+__altivec_scalar_pred(vec_any_nle,
+ __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, a2, a1))
+
+#undef __altivec_scalar_pred
+#undef __altivec_unary_pred
+#undef __altivec_binary_pred
+#else
+#define vec_cmplt(a1, a2) __builtin_vec_cmpgt ((a2), (a1))
+#define vec_cmple(a1, a2) __builtin_vec_cmpge ((a2), (a1))
+
+#define vec_all_in(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ, (a1), (a2))
+#define vec_any_out(a1, a2) __builtin_altivec_vcmpbfp_p (__CR6_EQ_REV, (a1), (a2))
+
+#define vec_all_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ, (a1), (a1))
+#define vec_any_nan(a1) __builtin_altivec_vcmpeqfp_p (__CR6_LT_REV, (a1), (a1))
+
+#define vec_all_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_LT, (a1), (a1))
+#define vec_any_numeric(a1) __builtin_altivec_vcmpeqfp_p (__CR6_EQ_REV, (a1), (a1))
+
+#define vec_all_eq(a1, a2) __builtin_vec_vcmpeq_p (__CR6_LT, (a1), (a2))
+#define vec_all_ne(a1, a2) __builtin_vec_vcmpeq_p (__CR6_EQ, (a1), (a2))
+#define vec_any_eq(a1, a2) __builtin_vec_vcmpeq_p (__CR6_EQ_REV, (a1), (a2))
+#define vec_any_ne(a1, a2) __builtin_vec_vcmpeq_p (__CR6_LT_REV, (a1), (a2))
+
+#define vec_all_gt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT, (a1), (a2))
+#define vec_all_lt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_LT, (a2), (a1))
+#define vec_any_gt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ_REV, (a1), (a2))
+#define vec_any_lt(a1, a2) __builtin_vec_vcmpgt_p (__CR6_EQ_REV, (a2), (a1))
+
+#define vec_all_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a1), (a2))
+#define vec_all_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_EQ, (a2), (a1))
+#define vec_any_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a1), (a2))
+#define vec_any_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (__CR6_LT_REV, (a2), (a1))
+
+/* __builtin_vec_vcmpge_p is vcmpgefp for floating-point vector types,
+ while for integer types it is converted to __builtin_vec_vcmpgt_p,
+ with inverted args and condition code. */
+#define vec_all_le(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT, (a2), (a1))
+#define vec_all_ge(a1, a2) __builtin_vec_vcmpge_p (__CR6_LT, (a1), (a2))
+#define vec_any_le(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ_REV, (a2), (a1))
+#define vec_any_ge(a1, a2) __builtin_vec_vcmpge_p (__CR6_EQ_REV, (a1), (a2))
+
+#define vec_all_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a1), (a2))
+#define vec_all_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_EQ, (a2), (a1))
+#define vec_any_nge(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a1), (a2))
+#define vec_any_nle(a1, a2) __builtin_altivec_vcmpgefp_p (__CR6_LT_REV, (a2), (a1))
+#endif
+
+/* These do not accept vectors, so they do not have a __builtin_vec_*
+ counterpart. */
+#define vec_dss(x) __builtin_altivec_dss((x))
+#define vec_dssall() __builtin_altivec_dssall ()
+#define vec_mfvscr() ((__vector unsigned short) __builtin_altivec_mfvscr ())
+#define vec_splat_s8(x) __builtin_altivec_vspltisb ((x))
+#define vec_splat_s16(x) __builtin_altivec_vspltish ((x))
+#define vec_splat_s32(x) __builtin_altivec_vspltisw ((x))
+#define vec_splat_u8(x) ((__vector unsigned char) vec_splat_s8 ((x)))
+#define vec_splat_u16(x) ((__vector unsigned short) vec_splat_s16 ((x)))
+#define vec_splat_u32(x) ((__vector unsigned int) vec_splat_s32 ((x)))
+
+/* This also accepts a type for its parameter, so it is not enough
+ to #define vec_step to __builtin_vec_step. */
+#define vec_step(x) __builtin_vec_step (* (__typeof__ (x) *) 0)
+
+#endif /* _ALTIVEC_H */
diff --git a/gcc-4.4.3/gcc/config/rs6000/altivec.md b/gcc-4.4.3/gcc/config/rs6000/altivec.md
new file mode 100644
index 000000000..9c6245ae8
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/altivec.md
@@ -0,0 +1,3099 @@
+;; AltiVec patterns.
+;; Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+;; Free Software Foundation, Inc.
+;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_constants
+ [(UNSPEC_VCMPBFP 50)
+ (UNSPEC_VCMPEQUB 51)
+ (UNSPEC_VCMPEQUH 52)
+ (UNSPEC_VCMPEQUW 53)
+ (UNSPEC_VCMPEQFP 54)
+ (UNSPEC_VCMPGEFP 55)
+ (UNSPEC_VCMPGTUB 56)
+ (UNSPEC_VCMPGTSB 57)
+ (UNSPEC_VCMPGTUH 58)
+ (UNSPEC_VCMPGTSH 59)
+ (UNSPEC_VCMPGTUW 60)
+ (UNSPEC_VCMPGTSW 61)
+ (UNSPEC_VCMPGTFP 62)
+ (UNSPEC_VMSUMU 65)
+ (UNSPEC_VMSUMM 66)
+ (UNSPEC_VMSUMSHM 68)
+ (UNSPEC_VMSUMUHS 69)
+ (UNSPEC_VMSUMSHS 70)
+ (UNSPEC_VMHADDSHS 71)
+ (UNSPEC_VMHRADDSHS 72)
+ (UNSPEC_VMLADDUHM 73)
+ (UNSPEC_VADDCUW 75)
+ (UNSPEC_VADDU 76)
+ (UNSPEC_VADDS 77)
+ (UNSPEC_VAVGU 80)
+ (UNSPEC_VAVGS 81)
+ (UNSPEC_VMULEUB 83)
+ (UNSPEC_VMULESB 84)
+ (UNSPEC_VMULEUH 85)
+ (UNSPEC_VMULESH 86)
+ (UNSPEC_VMULOUB 87)
+ (UNSPEC_VMULOSB 88)
+ (UNSPEC_VMULOUH 89)
+ (UNSPEC_VMULOSH 90)
+ (UNSPEC_VPKUHUM 93)
+ (UNSPEC_VPKUWUM 94)
+ (UNSPEC_VPKPX 95)
+ (UNSPEC_VPKSHSS 97)
+ (UNSPEC_VPKSWSS 99)
+ (UNSPEC_VPKUHUS 100)
+ (UNSPEC_VPKSHUS 101)
+ (UNSPEC_VPKUWUS 102)
+ (UNSPEC_VPKSWUS 103)
+ (UNSPEC_VRL 104)
+ (UNSPEC_VSLV4SI 110)
+ (UNSPEC_VSLO 111)
+ (UNSPEC_VSR 118)
+ (UNSPEC_VSRO 119)
+ (UNSPEC_VSUBCUW 124)
+ (UNSPEC_VSUBU 125)
+ (UNSPEC_VSUBS 126)
+ (UNSPEC_VSUM4UBS 131)
+ (UNSPEC_VSUM4S 132)
+ (UNSPEC_VSUM2SWS 134)
+ (UNSPEC_VSUMSWS 135)
+ (UNSPEC_VPERM 144)
+ (UNSPEC_VRFIP 148)
+ (UNSPEC_VRFIN 149)
+ (UNSPEC_VRFIM 150)
+ (UNSPEC_VCFUX 151)
+ (UNSPEC_VCFSX 152)
+ (UNSPEC_VCTUXS 153)
+ (UNSPEC_VCTSXS 154)
+ (UNSPEC_VLOGEFP 155)
+ (UNSPEC_VEXPTEFP 156)
+ (UNSPEC_VRSQRTEFP 157)
+ (UNSPEC_VREFP 158)
+ (UNSPEC_VSEL4SI 159)
+ (UNSPEC_VSEL4SF 160)
+ (UNSPEC_VSEL8HI 161)
+ (UNSPEC_VSEL16QI 162)
+ (UNSPEC_VLSDOI 163)
+ (UNSPEC_VUPKHSB 167)
+ (UNSPEC_VUPKHPX 168)
+ (UNSPEC_VUPKHSH 169)
+ (UNSPEC_VUPKLSB 170)
+ (UNSPEC_VUPKLPX 171)
+ (UNSPEC_VUPKLSH 172)
+ (UNSPEC_PREDICATE 173)
+ (UNSPEC_DST 190)
+ (UNSPEC_DSTT 191)
+ (UNSPEC_DSTST 192)
+ (UNSPEC_DSTSTT 193)
+ (UNSPEC_LVSL 194)
+ (UNSPEC_LVSR 195)
+ (UNSPEC_LVE 196)
+ (UNSPEC_STVX 201)
+ (UNSPEC_STVXL 202)
+ (UNSPEC_STVE 203)
+ (UNSPEC_SET_VSCR 213)
+ (UNSPEC_GET_VRSAVE 214)
+ (UNSPEC_REALIGN_LOAD 215)
+ (UNSPEC_REDUC_PLUS 217)
+ (UNSPEC_VECSH 219)
+ (UNSPEC_EXTEVEN_V4SI 220)
+ (UNSPEC_EXTEVEN_V8HI 221)
+ (UNSPEC_EXTEVEN_V16QI 222)
+ (UNSPEC_EXTEVEN_V4SF 223)
+ (UNSPEC_EXTODD_V4SI 224)
+ (UNSPEC_EXTODD_V8HI 225)
+ (UNSPEC_EXTODD_V16QI 226)
+ (UNSPEC_EXTODD_V4SF 227)
+ (UNSPEC_INTERHI_V4SI 228)
+ (UNSPEC_INTERHI_V8HI 229)
+ (UNSPEC_INTERHI_V16QI 230)
+ (UNSPEC_INTERHI_V4SF 231)
+ (UNSPEC_INTERLO_V4SI 232)
+ (UNSPEC_INTERLO_V8HI 233)
+ (UNSPEC_INTERLO_V16QI 234)
+ (UNSPEC_INTERLO_V4SF 235)
+ (UNSPEC_LVLX 236)
+ (UNSPEC_LVLXL 237)
+ (UNSPEC_LVRX 238)
+ (UNSPEC_LVRXL 239)
+ (UNSPEC_STVLX 240)
+ (UNSPEC_STVLXL 241)
+ (UNSPEC_STVRX 242)
+ (UNSPEC_STVRXL 243)
+ (UNSPEC_VMULWHUB 308)
+ (UNSPEC_VMULWLUB 309)
+ (UNSPEC_VMULWHSB 310)
+ (UNSPEC_VMULWLSB 311)
+ (UNSPEC_VMULWHUH 312)
+ (UNSPEC_VMULWLUH 313)
+ (UNSPEC_VMULWHSH 314)
+ (UNSPEC_VMULWLSH 315)
+ (UNSPEC_VUPKHUB 316)
+ (UNSPEC_VUPKHUH 317)
+ (UNSPEC_VUPKLUB 318)
+ (UNSPEC_VUPKLUH 319)
+ (UNSPEC_VPERMSI 320)
+ (UNSPEC_VPERMHI 321)
+ (UNSPEC_INTERHI 322)
+ (UNSPEC_INTERLO 323)
+ (UNSPEC_VUPKHS_V4SF 324)
+ (UNSPEC_VUPKLS_V4SF 325)
+ (UNSPEC_VUPKHU_V4SF 326)
+ (UNSPEC_VUPKLU_V4SF 327)
+])
+
+(define_constants
+ [(UNSPECV_SET_VRSAVE 30)
+ (UNSPECV_MTVSCR 186)
+ (UNSPECV_MFVSCR 187)
+ (UNSPECV_DSSALL 188)
+ (UNSPECV_DSS 189)
+ ])
+
+;; Vec int modes
+(define_mode_iterator VI [V4SI V8HI V16QI])
+;; Short vec in modes
+(define_mode_iterator VIshort [V8HI V16QI])
+;; Vec float modes
+(define_mode_iterator VF [V4SF])
+;; Vec modes, pity mode iterators are not composable
+(define_mode_iterator V [V4SI V8HI V16QI V4SF])
+
+(define_mode_attr VI_char [(V4SI "w") (V8HI "h") (V16QI "b")])
+
+;; Generic LVX load instruction.
+(define_insn "altivec_lvx_<mode>"
+ [(set (match_operand:V 0 "altivec_register_operand" "=v")
+ (match_operand:V 1 "memory_operand" "Z"))]
+ "TARGET_ALTIVEC"
+ "lvx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+;; Generic STVX store instruction.
+(define_insn "altivec_stvx_<mode>"
+ [(set (match_operand:V 0 "memory_operand" "=Z")
+ (match_operand:V 1 "altivec_register_operand" "v"))]
+ "TARGET_ALTIVEC"
+ "stvx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+;; Vector move instructions.
+(define_expand "mov<mode>"
+ [(set (match_operand:V 0 "nonimmediate_operand" "")
+ (match_operand:V 1 "any_operand" ""))]
+ "TARGET_ALTIVEC"
+{
+ rs6000_emit_move (operands[0], operands[1], <MODE>mode);
+ DONE;
+})
+
+(define_insn "*mov<mode>_internal"
+ [(set (match_operand:V 0 "nonimmediate_operand" "=Z,v,v,o,r,r,v")
+ (match_operand:V 1 "input_operand" "v,Z,v,r,o,r,W"))]
+ "TARGET_ALTIVEC
+ && (register_operand (operands[0], <MODE>mode)
+ || register_operand (operands[1], <MODE>mode))"
+{
+ switch (which_alternative)
+ {
+ case 0: return "stvx %1,%y0";
+ case 1: return "lvx %0,%y1";
+ case 2: return "vor %0,%1,%1";
+ case 3: return "#";
+ case 4: return "#";
+ case 5: return "#";
+ case 6: return output_vec_const_move (operands);
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "vecstore,vecload,vecsimple,store,load,*,*")])
+
+(define_split
+ [(set (match_operand:V4SI 0 "nonimmediate_operand" "")
+ (match_operand:V4SI 1 "input_operand" ""))]
+ "TARGET_ALTIVEC && reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{
+ rs6000_split_multireg_move (operands[0], operands[1]); DONE;
+})
+
+(define_split
+ [(set (match_operand:V8HI 0 "nonimmediate_operand" "")
+ (match_operand:V8HI 1 "input_operand" ""))]
+ "TARGET_ALTIVEC && reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+
+(define_split
+ [(set (match_operand:V16QI 0 "nonimmediate_operand" "")
+ (match_operand:V16QI 1 "input_operand" ""))]
+ "TARGET_ALTIVEC && reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+
+(define_split
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "")
+ (match_operand:V4SF 1 "input_operand" ""))]
+ "TARGET_ALTIVEC && reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{
+ rs6000_split_multireg_move (operands[0], operands[1]); DONE;
+})
+
+(define_split
+ [(set (match_operand:V 0 "altivec_register_operand" "")
+ (match_operand:V 1 "easy_vector_constant_add_self" ""))]
+ "TARGET_ALTIVEC && reload_completed"
+ [(set (match_dup 0) (match_dup 3))
+ (set (match_dup 0) (match_dup 4))]
+{
+ rtx dup = gen_easy_altivec_constant (operands[1]);
+ rtx const_vec;
+ enum machine_mode op_mode = <MODE>mode;
+
+ /* Divide the operand of the resulting VEC_DUPLICATE, and use
+ simplify_rtx to make a CONST_VECTOR. */
+ XEXP (dup, 0) = simplify_const_binary_operation (ASHIFTRT, QImode,
+ XEXP (dup, 0), const1_rtx);
+ const_vec = simplify_rtx (dup);
+
+ if (op_mode == V4SFmode)
+ {
+ op_mode = V4SImode;
+ operands[0] = gen_lowpart (op_mode, operands[0]);
+ }
+ if (GET_MODE (const_vec) == op_mode)
+ operands[3] = const_vec;
+ else
+ operands[3] = gen_lowpart (op_mode, const_vec);
+ operands[4] = gen_rtx_PLUS (op_mode, operands[0], operands[0]);
+})
+
+(define_insn "get_vrsave_internal"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(reg:SI 109)] UNSPEC_GET_VRSAVE))]
+ "TARGET_ALTIVEC"
+{
+ if (TARGET_MACHO)
+ return "mfspr %0,256";
+ else
+ return "mfvrsave %0";
+}
+ [(set_attr "type" "*")])
+
+(define_insn "*set_vrsave_internal"
+ [(match_parallel 0 "vrsave_operation"
+ [(set (reg:SI 109)
+ (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
+ (reg:SI 109)] UNSPECV_SET_VRSAVE))])]
+ "TARGET_ALTIVEC"
+{
+ if (TARGET_MACHO)
+ return "mtspr 256,%1";
+ else
+ return "mtvrsave %1";
+}
+ [(set_attr "type" "*")])
+
+(define_insn "*save_world"
+ [(match_parallel 0 "save_world_operation"
+ [(clobber (reg:SI 65))
+ (use (match_operand:SI 1 "call_operand" "s"))])]
+ "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
+ "bl %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*restore_world"
+ [(match_parallel 0 "restore_world_operation"
+ [(return)
+ (use (reg:SI 65))
+ (use (match_operand:SI 1 "call_operand" "s"))
+ (clobber (match_operand:SI 2 "gpc_reg_operand" "=r"))])]
+ "TARGET_MACHO && (DEFAULT_ABI == ABI_DARWIN) && TARGET_32BIT"
+ "b %z1")
+
+;; Simple binary operations.
+
+;; add
+(define_insn "add<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (plus:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vaddu<VI_char>m %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "addv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vaddfp %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vaddcuw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VADDCUW))]
+ "TARGET_ALTIVEC"
+ "vaddcuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vaddu<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VADDU))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vaddu<VI_char>s %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vadds<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VADDS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vadds<VI_char>s %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+;; sub
+(define_insn "sub<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (minus:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vsubu<VI_char>m %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "subv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vsubfp %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vsubcuw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUBCUW))]
+ "TARGET_ALTIVEC"
+ "vsubcuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsubu<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VSUBU))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsubu<VI_char>s %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsubs<VI_char>s"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VSUBS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsubs<VI_char>s %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+;;
+(define_insn "altivec_vavgu<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VAVGU))]
+ "TARGET_ALTIVEC"
+ "vavgu<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vavgs<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VAVGS))]
+ "TARGET_ALTIVEC"
+ "vavgs<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpbfp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPBFP))]
+ "TARGET_ALTIVEC"
+ "vcmpbfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpequb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VCMPEQUB))]
+ "TARGET_ALTIVEC"
+ "vcmpequb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpequh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VCMPEQUH))]
+ "TARGET_ALTIVEC"
+ "vcmpequh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpequw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VCMPEQUW))]
+ "TARGET_ALTIVEC"
+ "vcmpequw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpeqfp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPEQFP))]
+ "TARGET_ALTIVEC"
+ "vcmpeqfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgefp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPGEFP))]
+ "TARGET_ALTIVEC"
+ "vcmpgefp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtub"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTUB))]
+ "TARGET_ALTIVEC"
+ "vcmpgtub %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtsb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTSB))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtuh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTUH))]
+ "TARGET_ALTIVEC"
+ "vcmpgtuh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtsh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTSH))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtuw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTUW))]
+ "TARGET_ALTIVEC"
+ "vcmpgtuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtsw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VCMPGTSW))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtfp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")]
+ UNSPEC_VCMPGTFP))]
+ "TARGET_ALTIVEC"
+ "vcmpgtfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+;; Fused multiply add
+(define_insn "altivec_vmaddfp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (plus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v"))
+ (match_operand:V4SF 3 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaddfp %0,%1,%2,%3"
+ [(set_attr "type" "vecfloat")])
+
+;; We do multiply as a fused multiply-add with an add of a -0.0 vector.
+
+(define_expand "mulv4sf3"
+ [(use (match_operand:V4SF 0 "register_operand" ""))
+ (use (match_operand:V4SF 1 "register_operand" ""))
+ (use (match_operand:V4SF 2 "register_operand" ""))]
+ "TARGET_ALTIVEC && TARGET_FUSED_MADD"
+ "
+{
+ rtx neg0;
+
+ /* Generate [-0.0, -0.0, -0.0, -0.0]. */
+ neg0 = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
+ emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
+
+ /* Use the multiply-add. */
+ emit_insn (gen_altivec_vmaddfp (operands[0], operands[1], operands[2],
+ gen_lowpart (V4SFmode, neg0)));
+ DONE;
+}")
+
+;; 32-bit integer multiplication
+;; A_high = Operand_0 & 0xFFFF0000 >> 16
+;; A_low = Operand_0 & 0xFFFF
+;; B_high = Operand_1 & 0xFFFF0000 >> 16
+;; B_low = Operand_1 & 0xFFFF
+;; result = A_low * B_low + (A_high * B_low + B_high * A_low) << 16
+
+;; (define_insn "mulv4si3"
+;; [(set (match_operand:V4SI 0 "register_operand" "=v")
+;; (mult:V4SI (match_operand:V4SI 1 "register_operand" "v")
+;; (match_operand:V4SI 2 "register_operand" "v")))]
+(define_expand "mulv4si3"
+ [(use (match_operand:V4SI 0 "register_operand" ""))
+ (use (match_operand:V4SI 1 "register_operand" ""))
+ (use (match_operand:V4SI 2 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+ {
+ rtx zero;
+ rtx swap;
+ rtx small_swap;
+ rtx sixteen;
+ rtx one;
+ rtx two;
+ rtx low_product;
+ rtx high_product;
+
+ zero = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (zero, const0_rtx));
+
+ sixteen = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (sixteen, gen_rtx_CONST_INT (V4SImode, -16)));
+
+ swap = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vrlw (swap, operands[2], sixteen));
+
+ one = gen_reg_rtx (V8HImode);
+ convert_move (one, operands[1], 0);
+
+ two = gen_reg_rtx (V8HImode);
+ convert_move (two, operands[2], 0);
+
+ small_swap = gen_reg_rtx (V8HImode);
+ convert_move (small_swap, swap, 0);
+
+ low_product = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vmulouh (low_product, one, two));
+
+ high_product = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vmsumuhm (high_product, one, small_swap, zero));
+
+ emit_insn (gen_vashlv4si3 (high_product, high_product, sixteen));
+
+ emit_insn (gen_addv4si3 (operands[0], high_product, low_product));
+
+ DONE;
+ }")
+
+(define_expand "mulv8hi3"
+ [(use (match_operand:V8HI 0 "register_operand" ""))
+ (use (match_operand:V8HI 1 "register_operand" ""))
+ (use (match_operand:V8HI 2 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx odd = gen_reg_rtx (V4SImode);
+ rtx even = gen_reg_rtx (V4SImode);
+ rtx high = gen_reg_rtx (V4SImode);
+ rtx low = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vmulesh (even, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmulosh (odd, operands[1], operands[2]));
+
+ emit_insn (gen_altivec_vmrghw (high, even, odd));
+ emit_insn (gen_altivec_vmrglw (low, even, odd));
+
+ emit_insn (gen_altivec_vpkuwum (operands[0], high, low));
+
+ DONE;
+}")
+
+;; Fused multiply subtract
+(define_insn "altivec_vnmsubfp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (neg:V4SF (minus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v"))
+ (match_operand:V4SF 3 "register_operand" "v"))))]
+ "TARGET_ALTIVEC"
+ "vnmsubfp %0,%1,%2,%3"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vmsumu<VI_char>m"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:VIshort 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMU))]
+ "TARGET_ALTIVEC"
+ "vmsumu<VI_char>m %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumm<VI_char>m"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:VIshort 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMM))]
+ "TARGET_ALTIVEC"
+ "vmsumm<VI_char>m %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumshm"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMSHM))]
+ "TARGET_ALTIVEC"
+ "vmsumshm %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumuhs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMUHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vmsumuhs %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumshs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VMSUMSHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vmsumshs %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+;; max
+
+(define_insn "umax<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (umax:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxu<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smax<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (smax:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxs<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smaxv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "umin<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (umin:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminu<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smin<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (smin:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmins<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "sminv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vmhaddshs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VMHADDSHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vmhaddshs %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmhraddshs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VMHRADDSHS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vmhraddshs %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmladduhm"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VMLADDUHM))]
+ "TARGET_ALTIVEC"
+ "vmladduhm %0,%1,%2,%3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmrghb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 8)
+ (const_int 1)
+ (const_int 9)
+ (const_int 2)
+ (const_int 10)
+ (const_int 3)
+ (const_int 11)
+ (const_int 4)
+ (const_int 12)
+ (const_int 5)
+ (const_int 13)
+ (const_int 6)
+ (const_int 14)
+ (const_int 7)
+ (const_int 15)]))
+ (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v")
+ (parallel [(const_int 8)
+ (const_int 0)
+ (const_int 9)
+ (const_int 1)
+ (const_int 10)
+ (const_int 2)
+ (const_int 11)
+ (const_int 3)
+ (const_int 12)
+ (const_int 4)
+ (const_int 13)
+ (const_int 5)
+ (const_int 14)
+ (const_int 6)
+ (const_int 15)
+ (const_int 7)]))
+ (const_int 21845)))]
+ "TARGET_ALTIVEC"
+ "vmrghb %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrghh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 4)
+ (const_int 1)
+ (const_int 5)
+ (const_int 2)
+ (const_int 6)
+ (const_int 3)
+ (const_int 7)]))
+ (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v")
+ (parallel [(const_int 4)
+ (const_int 0)
+ (const_int 5)
+ (const_int 1)
+ (const_int 6)
+ (const_int 2)
+ (const_int 7)
+ (const_int 3)]))
+ (const_int 85)))]
+ "TARGET_ALTIVEC"
+ "vmrghh %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrghw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 1)
+ (const_int 3)]))
+ (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (parallel [(const_int 2)
+ (const_int 0)
+ (const_int 3)
+ (const_int 1)]))
+ (const_int 5)))]
+ "TARGET_ALTIVEC"
+ "vmrghw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrghsf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (vec_merge:V4SF (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 1)
+ (const_int 3)]))
+ (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v")
+ (parallel [(const_int 2)
+ (const_int 0)
+ (const_int 3)
+ (const_int 1)]))
+ (const_int 5)))]
+ "TARGET_ALTIVEC"
+ "vmrghw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (parallel [(const_int 8)
+ (const_int 0)
+ (const_int 9)
+ (const_int 1)
+ (const_int 10)
+ (const_int 2)
+ (const_int 11)
+ (const_int 3)
+ (const_int 12)
+ (const_int 4)
+ (const_int 13)
+ (const_int 5)
+ (const_int 14)
+ (const_int 6)
+ (const_int 15)
+ (const_int 7)]))
+ (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 8)
+ (const_int 1)
+ (const_int 9)
+ (const_int 2)
+ (const_int 10)
+ (const_int 3)
+ (const_int 11)
+ (const_int 4)
+ (const_int 12)
+ (const_int 5)
+ (const_int 13)
+ (const_int 6)
+ (const_int 14)
+ (const_int 7)
+ (const_int 15)]))
+ (const_int 21845)))]
+ "TARGET_ALTIVEC"
+ "vmrglb %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (parallel [(const_int 4)
+ (const_int 0)
+ (const_int 5)
+ (const_int 1)
+ (const_int 6)
+ (const_int 2)
+ (const_int 7)
+ (const_int 3)]))
+ (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 4)
+ (const_int 1)
+ (const_int 5)
+ (const_int 2)
+ (const_int 6)
+ (const_int 3)
+ (const_int 7)]))
+ (const_int 85)))]
+ "TARGET_ALTIVEC"
+ "vmrglh %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel [(const_int 2)
+ (const_int 0)
+ (const_int 3)
+ (const_int 1)]))
+ (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 1)
+ (const_int 3)]))
+ (const_int 5)))]
+ "TARGET_ALTIVEC"
+ "vmrglw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglsf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (vec_merge:V4SF (vec_select:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (parallel [(const_int 2)
+ (const_int 0)
+ (const_int 3)
+ (const_int 1)]))
+ (vec_select:V4SF (match_operand:V4SF 2 "register_operand" "v")
+ (parallel [(const_int 0)
+ (const_int 2)
+ (const_int 1)
+ (const_int 3)]))
+ (const_int 5)))]
+ "TARGET_ALTIVEC"
+ "vmrglw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmuleub"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULEUB))]
+ "TARGET_ALTIVEC"
+ "vmuleub %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulesb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULESB))]
+ "TARGET_ALTIVEC"
+ "vmulesb %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmuleuh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULEUH))]
+ "TARGET_ALTIVEC"
+ "vmuleuh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulesh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULESH))]
+ "TARGET_ALTIVEC"
+ "vmulesh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmuloub"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULOUB))]
+ "TARGET_ALTIVEC"
+ "vmuloub %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulosb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULOSB))]
+ "TARGET_ALTIVEC"
+ "vmulosb %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulouh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULOUH))]
+ "TARGET_ALTIVEC"
+ "vmulouh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulosh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULOSH))]
+ "TARGET_ALTIVEC"
+ "vmulosh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+
+;; logical ops
+
+(define_insn "and<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (and:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vand %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "ior<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (ior:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "xor<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (xor:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "xorv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (xor:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (not:VI (match_operand:VI 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vnor %0,%1,%1"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_nor<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (not:VI (ior:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v"))))]
+ "TARGET_ALTIVEC"
+ "vnor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "andc<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (and:VI (not:VI (match_operand:VI 2 "register_operand" "v"))
+ (match_operand:VI 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vandc %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "*andc3_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (and:V4SF (not:V4SF (match_operand:V4SF 2 "register_operand" "v"))
+ (match_operand:V4SF 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vandc %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vpkuhum"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKUHUM))]
+ "TARGET_ALTIVEC"
+ "vpkuhum %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuwum"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKUWUM))]
+ "TARGET_ALTIVEC"
+ "vpkuwum %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkpx"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKPX))]
+ "TARGET_ALTIVEC"
+ "vpkpx %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkshss"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKSHSS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkshss %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkswss"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKSWSS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkswss %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuhus"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKUHUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkuhus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkshus"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKSHUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkshus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuwus"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKUWUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkuwus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkswus"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKSWUS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vpkswus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vrl<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v")]
+ UNSPEC_VRL))]
+ "TARGET_ALTIVEC"
+ "vrl<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsl"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSLV4SI))]
+ "TARGET_ALTIVEC"
+ "vsl %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vslo"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSLO))]
+ "TARGET_ALTIVEC"
+ "vslo %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "vashl<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (ashift:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v") ))]
+ "TARGET_ALTIVEC"
+ "vsl<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "vlshr<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (lshiftrt:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v") ))]
+ "TARGET_ALTIVEC"
+ "vsr<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "vashr<mode>3"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (ashiftrt:VI (match_operand:VI 1 "register_operand" "v")
+ (match_operand:VI 2 "register_operand" "v") ))]
+ "TARGET_ALTIVEC"
+ "vsra<VI_char> %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsr"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSR))]
+ "TARGET_ALTIVEC"
+ "vsr %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsro"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSRO))]
+ "TARGET_ALTIVEC"
+ "vsro %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsum4ubs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUM4UBS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsum4ubs %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsum4s<VI_char>s"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUM4S))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsum4s<VI_char>s %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsum2sws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUM2SWS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsum2sws %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsumsws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUMSWS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsumsws %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vspltb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (vec_duplicate:V16QI
+ (vec_select:QI (match_operand:V16QI 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
+ "TARGET_ALTIVEC"
+ "vspltb %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsplth"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (vec_duplicate:V8HI
+ (vec_select:HI (match_operand:V8HI 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "")]))))]
+ "TARGET_ALTIVEC"
+ "vsplth %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vspltw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (vec_duplicate:V4SI
+ (vec_select:SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
+ "TARGET_ALTIVEC"
+ "vspltw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "*altivec_vspltsf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (vec_duplicate:V4SF
+ (vec_select:SF (match_operand:V4SF 1 "register_operand" "v")
+ (parallel
+ [(match_operand:QI 2 "u5bit_cint_operand" "i")]))))]
+ "TARGET_ALTIVEC"
+ "vspltw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vspltis<VI_char>"
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (vec_duplicate:VI
+ (match_operand:QI 1 "s5bit_cint_operand" "i")))]
+ "TARGET_ALTIVEC"
+ "vspltis<VI_char> %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "ftruncv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vrfiz %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vperm_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VPERM))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vrfip"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRFIP))]
+ "TARGET_ALTIVEC"
+ "vrfip %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrfin"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRFIN))]
+ "TARGET_ALTIVEC"
+ "vrfin %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrfim"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRFIM))]
+ "TARGET_ALTIVEC"
+ "vrfim %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vcfux"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCFUX))]
+ "TARGET_ALTIVEC"
+ "vcfux %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vcfsx"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCFSX))]
+ "TARGET_ALTIVEC"
+ "vcfsx %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vctuxs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCTUXS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vctuxs %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vctsxs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ UNSPEC_VCTSXS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vctsxs %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vlogefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VLOGEFP))]
+ "TARGET_ALTIVEC"
+ "vlogefp %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vexptefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VEXPTEFP))]
+ "TARGET_ALTIVEC"
+ "vexptefp %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrsqrtefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VRSQRTEFP))]
+ "TARGET_ALTIVEC"
+ "vrsqrtefp %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
+ UNSPEC_VREFP))]
+ "TARGET_ALTIVEC"
+ "vrefp %0,%1"
+ [(set_attr "type" "vecfloat")])
+
+(define_expand "vcondv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (if_then_else:V4SI
+ (match_operator 3 "comparison_operator"
+ [(match_operand:V4SI 4 "register_operand" "v")
+ (match_operand:V4SI 5 "register_operand" "v")])
+ (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vconduv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (if_then_else:V4SI
+ (match_operator 3 "comparison_operator"
+ [(match_operand:V4SI 4 "register_operand" "v")
+ (match_operand:V4SI 5 "register_operand" "v")])
+ (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vcondv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (if_then_else:V4SF
+ (match_operator 3 "comparison_operator"
+ [(match_operand:V4SF 4 "register_operand" "v")
+ (match_operand:V4SF 5 "register_operand" "v")])
+ (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vcondv8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (if_then_else:V8HI
+ (match_operator 3 "comparison_operator"
+ [(match_operand:V8HI 4 "register_operand" "v")
+ (match_operand:V8HI 5 "register_operand" "v")])
+ (match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vconduv8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (if_then_else:V8HI
+ (match_operator 3 "comparison_operator"
+ [(match_operand:V8HI 4 "register_operand" "v")
+ (match_operand:V8HI 5 "register_operand" "v")])
+ (match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vcondv16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (if_then_else:V16QI
+ (match_operator 3 "comparison_operator"
+ [(match_operand:V16QI 4 "register_operand" "v")
+ (match_operand:V16QI 5 "register_operand" "v")])
+ (match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+(define_expand "vconduv16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (if_then_else:V16QI
+ (match_operator 3 "comparison_operator"
+ [(match_operand:V16QI 4 "register_operand" "v")
+ (match_operand:V16QI 5 "register_operand" "v")])
+ (match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "
+{
+ if (rs6000_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}
+ ")
+
+
+(define_insn "altivec_vsel_v4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VSEL4SI))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsel_v4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")]
+ UNSPEC_VSEL4SF))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsel_v8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")]
+ UNSPEC_VSEL8HI))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsel_v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VSEL16QI))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsldoi_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand:QI 3 "immediate_operand" "i")]
+ UNSPEC_VLSDOI))]
+ "TARGET_ALTIVEC"
+ "vsldoi %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupkhsb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKHSB))]
+ "TARGET_ALTIVEC"
+ "vupkhsb %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupkhpx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKHPX))]
+ "TARGET_ALTIVEC"
+ "vupkhpx %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupkhsh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKHSH))]
+ "TARGET_ALTIVEC"
+ "vupkhsh %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupklsb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKLSB))]
+ "TARGET_ALTIVEC"
+ "vupklsb %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupklpx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKLPX))]
+ "TARGET_ALTIVEC"
+ "vupklpx %0,%1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupklsh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKLSH))]
+ "TARGET_ALTIVEC"
+ "vupklsh %0,%1"
+ [(set_attr "type" "vecperm")])
+
+;; AltiVec predicates.
+
+(define_expand "cr6_test_for_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (reg:CC 74)
+ (const_int 0)))]
+ "TARGET_ALTIVEC"
+ "")
+
+(define_expand "cr6_test_for_zero_reverse"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (reg:CC 74)
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))]
+ "TARGET_ALTIVEC"
+ "")
+
+(define_expand "cr6_test_for_lt"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lt:SI (reg:CC 74)
+ (const_int 0)))]
+ "TARGET_ALTIVEC"
+ "")
+
+(define_expand "cr6_test_for_lt_reverse"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lt:SI (reg:CC 74)
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (const_int 1) (match_dup 0)))]
+ "TARGET_ALTIVEC"
+ "")
+
+;; We can get away with generating the opcode on the fly (%3 below)
+;; because all the predicates have the same scheduling parameters.
+
+(define_insn "altivec_predicate_<mode>"
+ [(set (reg:CC 74)
+ (unspec:CC [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand 3 "any_operand" "")] UNSPEC_PREDICATE))
+ (clobber (match_scratch:V 0 "=v"))]
+ "TARGET_ALTIVEC"
+ "%3 %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_mtvscr"
+ [(set (reg:SI 110)
+ (unspec_volatile:SI
+ [(match_operand:V4SI 0 "register_operand" "v")] UNSPECV_MTVSCR))]
+ "TARGET_ALTIVEC"
+ "mtvscr %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_mfvscr"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec_volatile:V8HI [(reg:SI 110)] UNSPECV_MFVSCR))]
+ "TARGET_ALTIVEC"
+ "mfvscr %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dssall"
+ [(unspec_volatile [(const_int 0)] UNSPECV_DSSALL)]
+ "TARGET_ALTIVEC"
+ "dssall"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dss"
+ [(unspec_volatile [(match_operand:QI 0 "immediate_operand" "i")]
+ UNSPECV_DSS)]
+ "TARGET_ALTIVEC"
+ "dss %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dst"
+ [(unspec [(match_operand 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DST)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dst %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dstt"
+ [(unspec [(match_operand 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTT)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dstt %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dstst"
+ [(unspec [(match_operand 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTST)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dstst %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dststt"
+ [(unspec [(match_operand 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] UNSPEC_DSTSTT)]
+ "TARGET_ALTIVEC && GET_MODE (operands[0]) == Pmode"
+ "dststt %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_lvsl"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] UNSPEC_LVSL))]
+ "TARGET_ALTIVEC"
+ "lvsl %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvsr"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")] UNSPEC_LVSR))]
+ "TARGET_ALTIVEC"
+ "lvsr %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_expand "build_vector_mask_for_load"
+ [(set (match_operand:V16QI 0 "register_operand" "")
+ (unspec:V16QI [(match_operand 1 "memory_operand" "")] UNSPEC_LVSR))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx addr;
+ rtx temp;
+
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+
+ addr = XEXP (operands[1], 0);
+ temp = gen_reg_rtx (GET_MODE (addr));
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_NEG (GET_MODE (addr), addr)));
+ emit_insn (gen_altivec_lvsr (operands[0],
+ replace_equiv_address (operands[1], temp)));
+ DONE;
+}")
+
+;; Parallel some of the LVE* and STV*'s with unspecs because some have
+;; identical rtl but different instructions-- and gcc gets confused.
+
+(define_insn "altivec_lve<VI_char>x"
+ [(parallel
+ [(set (match_operand:VI 0 "register_operand" "=v")
+ (match_operand:VI 1 "memory_operand" "Z"))
+ (unspec [(const_int 0)] UNSPEC_LVE)])]
+ "TARGET_ALTIVEC"
+ "lve<VI_char>x %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "*altivec_lvesfx"
+ [(parallel
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (match_operand:V4SF 1 "memory_operand" "Z"))
+ (unspec [(const_int 0)] UNSPEC_LVE)])]
+ "TARGET_ALTIVEC"
+ "lvewx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvxl"
+ [(parallel
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (match_operand:V4SI 1 "memory_operand" "Z"))
+ (unspec [(const_int 0)] UNSPEC_SET_VSCR)])]
+ "TARGET_ALTIVEC"
+ "lvxl %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (match_operand:V4SI 1 "memory_operand" "Z"))]
+ "TARGET_ALTIVEC"
+ "lvx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_stvx"
+ [(parallel
+ [(set (match_operand:V4SI 0 "memory_operand" "=Z")
+ (match_operand:V4SI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVX)])]
+ "TARGET_ALTIVEC"
+ "stvx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvxl"
+ [(parallel
+ [(set (match_operand:V4SI 0 "memory_operand" "=Z")
+ (match_operand:V4SI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVXL)])]
+ "TARGET_ALTIVEC"
+ "stvxl %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stve<VI_char>x"
+ [(parallel
+ [(set (match_operand:VI 0 "memory_operand" "=Z")
+ (match_operand:VI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVE)])]
+ "TARGET_ALTIVEC"
+ "stve<VI_char>x %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "*altivec_stvesfx"
+ [(parallel
+ [(set (match_operand:V4SF 0 "memory_operand" "=Z")
+ (match_operand:V4SF 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVE)])]
+ "TARGET_ALTIVEC"
+ "stvewx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_expand "vec_init<mode>"
+ [(match_operand:V 0 "register_operand" "")
+ (match_operand 1 "" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "vec_setv4si"
+ [(match_operand:V4SI 0 "register_operand" "")
+ (match_operand:SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_setv8hi"
+ [(match_operand:V8HI 0 "register_operand" "")
+ (match_operand:HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_setv16qi"
+ [(match_operand:V16QI 0 "register_operand" "")
+ (match_operand:QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_setv4sf"
+ [(match_operand:V4SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_set (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv4si"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand:V4SI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv8hi"
+ [(match_operand:HI 0 "register_operand" "")
+ (match_operand:V8HI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv16qi"
+ [(match_operand:QI 0 "register_operand" "")
+ (match_operand:V16QI 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+(define_expand "vec_extractv4sf"
+ [(match_operand:SF 0 "register_operand" "")
+ (match_operand:V4SF 1 "register_operand" "")
+ (match_operand 2 "const_int_operand" "")]
+ "TARGET_ALTIVEC"
+{
+ rs6000_expand_vector_extract (operands[0], operands[1], INTVAL (operands[2]));
+ DONE;
+})
+
+;; Generate
+;; vspltis? SCRATCH0,0
+;; vsubu?m SCRATCH2,SCRATCH1,%1
+;; vmaxs? %0,%1,SCRATCH2"
+(define_expand "abs<mode>2"
+ [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
+ (set (match_dup 3)
+ (minus:VI (match_dup 2)
+ (match_operand:VI 1 "register_operand" "v")))
+ (set (match_operand:VI 0 "register_operand" "=v")
+ (smax:VI (match_dup 1) (match_dup 3)))]
+ "TARGET_ALTIVEC"
+{
+ operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
+ operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
+})
+
+;; Generate
+;; vspltisw SCRATCH1,-1
+;; vslw SCRATCH2,SCRATCH1,SCRATCH1
+;; vandc %0,%1,SCRATCH2
+(define_expand "absv4sf2"
+ [(set (match_dup 2)
+ (vec_duplicate:V4SI (const_int -1)))
+ (set (match_dup 3)
+ (ashift:V4SI (match_dup 2) (match_dup 2)))
+ (set (match_operand:V4SF 0 "register_operand" "=v")
+ (and:V4SF (not:V4SF (subreg:V4SF (match_dup 3) 0))
+ (match_operand:V4SF 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+{
+ operands[2] = gen_reg_rtx (V4SImode);
+ operands[3] = gen_reg_rtx (V4SImode);
+})
+
+;; Generate
+;; vspltis? SCRATCH0,0
+;; vsubs?s SCRATCH2,SCRATCH1,%1
+;; vmaxs? %0,%1,SCRATCH2"
+(define_expand "altivec_abss_<mode>"
+ [(set (match_dup 2) (vec_duplicate:VI (const_int 0)))
+ (parallel [(set (match_dup 3)
+ (unspec:VI [(match_dup 2)
+ (match_operand:VI 1 "register_operand" "v")]
+ UNSPEC_VSUBS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))])
+ (set (match_operand:VI 0 "register_operand" "=v")
+ (smax:VI (match_dup 1) (match_dup 3)))]
+ "TARGET_ALTIVEC"
+{
+ operands[2] = gen_reg_rtx (GET_MODE (operands[0]));
+ operands[3] = gen_reg_rtx (GET_MODE (operands[0]));
+})
+
+;; Vector shift left in bits. Currently supported ony for shift
+;; amounts that can be expressed as byte shifts (divisible by 8).
+;; General shift amounts can be supported using vslo + vsl. We're
+;; not expecting to see these yet (the vectorizer currently
+;; generates only shifts divisible by byte_size).
+(define_expand "vec_shl_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:QI 2 "reg_or_short_operand" "")]
+ UNSPEC_VECSH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx bitshift = operands[2];
+ rtx byteshift = gen_reg_rtx (QImode);
+ HOST_WIDE_INT bitshift_val;
+ HOST_WIDE_INT byteshift_val;
+
+ if (! CONSTANT_P (bitshift))
+ FAIL;
+ bitshift_val = INTVAL (bitshift);
+ if (bitshift_val & 0x7)
+ FAIL;
+ byteshift_val = bitshift_val >> 3;
+ byteshift = gen_rtx_CONST_INT (QImode, byteshift_val);
+ emit_insn (gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1],
+ byteshift));
+ DONE;
+}")
+
+;; Vector shift left in bits. Currently supported ony for shift
+;; amounts that can be expressed as byte shifts (divisible by 8).
+;; General shift amounts can be supported using vsro + vsr. We're
+;; not expecting to see these yet (the vectorizer currently
+;; generates only shifts divisible by byte_size).
+(define_expand "vec_shr_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:QI 2 "reg_or_short_operand" "")]
+ UNSPEC_VECSH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx bitshift = operands[2];
+ rtx byteshift = gen_reg_rtx (QImode);
+ HOST_WIDE_INT bitshift_val;
+ HOST_WIDE_INT byteshift_val;
+
+ if (! CONSTANT_P (bitshift))
+ FAIL;
+ bitshift_val = INTVAL (bitshift);
+ if (bitshift_val & 0x7)
+ FAIL;
+ byteshift_val = 16 - (bitshift_val >> 3);
+ byteshift = gen_rtx_CONST_INT (QImode, byteshift_val);
+ emit_insn (gen_altivec_vsldoi_<mode> (operands[0], operands[1], operands[1],
+ byteshift));
+ DONE;
+}")
+
+(define_insn "altivec_vsumsws_nomode"
+ [(set (match_operand 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VSUMSWS))
+ (set (reg:SI 110) (unspec:SI [(const_int 0)] UNSPEC_SET_VSCR))]
+ "TARGET_ALTIVEC"
+ "vsumsws %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_expand "reduc_splus_<mode>"
+ [(set (match_operand:VIshort 0 "register_operand" "=v")
+ (unspec:VIshort [(match_operand:VIshort 1 "register_operand" "v")]
+ UNSPEC_REDUC_PLUS))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero = gen_reg_rtx (V4SImode);
+ rtx vtmp1 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
+ emit_insn (gen_altivec_vsum4s<VI_char>s (vtmp1, operands[1], vzero));
+ emit_insn (gen_altivec_vsumsws_nomode (operands[0], vtmp1, vzero));
+ DONE;
+}")
+
+(define_expand "reduc_uplus_v16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_REDUC_PLUS))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero = gen_reg_rtx (V4SImode);
+ rtx vtmp1 = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
+ emit_insn (gen_altivec_vsum4ubs (vtmp1, operands[1], vzero));
+ emit_insn (gen_altivec_vsumsws_nomode (operands[0], vtmp1, vzero));
+ DONE;
+}")
+
+(define_insn "vec_realign_load_<mode>"
+ [(set (match_operand:V 0 "register_operand" "=v")
+ (unspec:V [(match_operand:V 1 "register_operand" "v")
+ (match_operand:V 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_REALIGN_LOAD))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_expand "neg<mode>2"
+ [(use (match_operand:VI 0 "register_operand" ""))
+ (use (match_operand:VI 1 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero;
+
+ vzero = gen_reg_rtx (GET_MODE (operands[0]));
+ emit_insn (gen_altivec_vspltis<VI_char> (vzero, const0_rtx));
+ emit_insn (gen_sub<mode>3 (operands[0], vzero, operands[1]));
+
+ DONE;
+}")
+
+(define_expand "udot_prod<mode>"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")
+ (match_operand:VIshort 2 "register_operand" "v")]
+ UNSPEC_VMSUMU)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_expand "sdot_prodv8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 3 "register_operand" "v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMSUMSHM)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_expand "widen_usum<mode>3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (unspec:V4SI [(match_operand:VIshort 1 "register_operand" "v")]
+ UNSPEC_VMSUMU)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vones = gen_reg_rtx (GET_MODE (operands[1]));
+
+ emit_insn (gen_altivec_vspltis<VI_char> (vones, const1_rtx));
+ emit_insn (gen_altivec_vmsumu<VI_char>m (operands[0], operands[1], vones, operands[2]));
+ DONE;
+}")
+
+(define_expand "widen_ssumv16qi3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VMSUMM)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vones = gen_reg_rtx (V16QImode);
+
+ emit_insn (gen_altivec_vspltisb (vones, const1_rtx));
+ emit_insn (gen_altivec_vmsummbm (operands[0], operands[1], vones, operands[2]));
+ DONE;
+}")
+
+(define_expand "widen_ssumv8hi3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VMSUMSHM)))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vones = gen_reg_rtx (V8HImode);
+
+ emit_insn (gen_altivec_vspltish (vones, const1_rtx));
+ emit_insn (gen_altivec_vmsumshm (operands[0], operands[1], vones, operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_unpacks_hi_v16qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKHSB))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vupkhsb (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_expand "vec_unpacks_hi_v8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKHSH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vupkhsh (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_expand "vec_unpacks_lo_v16qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKLSB))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vupklsb (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_expand "vec_unpacks_lo_v8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKLSH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vupklsh (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "vperm_v8hiv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VPERMSI))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "vperm_v16qiv8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")]
+ UNSPEC_VPERMHI))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+
+(define_expand "vec_unpacku_hi_v16qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKHUB))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero = gen_reg_rtx (V8HImode);
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 0);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 1);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 2);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 3);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 4);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 5);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 6);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 7);
+
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
+ DONE;
+}")
+
+(define_expand "vec_unpacku_hi_v8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKHUH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero = gen_reg_rtx (V4SImode);
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 0);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 1);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 2);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 3);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 4);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 5);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 6);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 7);
+
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
+ DONE;
+}")
+
+(define_expand "vec_unpacku_lo_v16qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")]
+ UNSPEC_VUPKLUB))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero = gen_reg_rtx (V8HImode);
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ emit_insn (gen_altivec_vspltish (vzero, const0_rtx));
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 8);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 9);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 10);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 11);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 12);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 13);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 14);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 15);
+
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_vperm_v16qiv8hi (operands[0], operands[1], vzero, mask));
+ DONE;
+}")
+
+(define_expand "vec_unpacku_lo_v8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")]
+ UNSPEC_VUPKLUH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx vzero = gen_reg_rtx (V4SImode);
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ emit_insn (gen_altivec_vspltisw (vzero, const0_rtx));
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 8);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 9);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 10);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 11);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 12);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 13);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 14);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 15);
+
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_vperm_v8hiv4si (operands[0], operands[1], vzero, mask));
+ DONE;
+}")
+
+(define_expand "vec_widen_umult_hi_v16qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULWHUB))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx ve = gen_reg_rtx (V8HImode);
+ rtx vo = gen_reg_rtx (V8HImode);
+
+ emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmrghh (operands[0], ve, vo));
+ DONE;
+}")
+
+(define_expand "vec_widen_umult_lo_v16qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULWLUB))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx ve = gen_reg_rtx (V8HImode);
+ rtx vo = gen_reg_rtx (V8HImode);
+
+ emit_insn (gen_altivec_vmuleub (ve, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmuloub (vo, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmrglh (operands[0], ve, vo));
+ DONE;
+}")
+
+(define_expand "vec_widen_smult_hi_v16qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULWHSB))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx ve = gen_reg_rtx (V8HImode);
+ rtx vo = gen_reg_rtx (V8HImode);
+
+ emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmrghh (operands[0], ve, vo));
+ DONE;
+}")
+
+(define_expand "vec_widen_smult_lo_v16qi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")]
+ UNSPEC_VMULWLSB))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx ve = gen_reg_rtx (V8HImode);
+ rtx vo = gen_reg_rtx (V8HImode);
+
+ emit_insn (gen_altivec_vmulesb (ve, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmulosb (vo, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmrglh (operands[0], ve, vo));
+ DONE;
+}")
+
+(define_expand "vec_widen_umult_hi_v8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULWHUH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx ve = gen_reg_rtx (V4SImode);
+ rtx vo = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmrghw (operands[0], ve, vo));
+ DONE;
+}")
+
+(define_expand "vec_widen_umult_lo_v8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULWLUH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx ve = gen_reg_rtx (V4SImode);
+ rtx vo = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vmuleuh (ve, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmulouh (vo, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmrglw (operands[0], ve, vo));
+ DONE;
+}")
+
+(define_expand "vec_widen_smult_hi_v8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULWHSH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx ve = gen_reg_rtx (V4SImode);
+ rtx vo = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmrghw (operands[0], ve, vo));
+ DONE;
+}")
+
+(define_expand "vec_widen_smult_lo_v8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VMULWLSH))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx ve = gen_reg_rtx (V4SImode);
+ rtx vo = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_altivec_vmulesh (ve, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmulosh (vo, operands[1], operands[2]));
+ emit_insn (gen_altivec_vmrglw (operands[0], ve, vo));
+ DONE;
+}")
+
+(define_expand "vec_pack_trunc_v8hi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")]
+ UNSPEC_VPKUHUM))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vpkuhum (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_pack_trunc_v4si"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")]
+ UNSPEC_VPKUWUM))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vpkuwum (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "negv4sf2"
+ [(use (match_operand:V4SF 0 "register_operand" ""))
+ (use (match_operand:V4SF 1 "register_operand" ""))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx neg0;
+
+ /* Generate [-0.0, -0.0, -0.0, -0.0]. */
+ neg0 = gen_reg_rtx (V4SImode);
+ emit_insn (gen_altivec_vspltisw (neg0, constm1_rtx));
+ emit_insn (gen_vashlv4si3 (neg0, neg0, neg0));
+
+ /* XOR */
+ emit_insn (gen_xorv4sf3 (operands[0],
+ gen_lowpart (V4SFmode, neg0), operands[1]));
+
+ DONE;
+}")
+
+;; Vector SIMD PEM v2.06c defines LVLX, LVLXL, LVRX, LVRXL,
+;; STVLX, STVLXL, STVVRX, STVRXL are available only on Cell.
+(define_insn "altivec_lvlx"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
+ UNSPEC_LVLX))]
+ "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
+ "lvlx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvlxl"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
+ UNSPEC_LVLXL))]
+ "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
+ "lvlxl %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvrx"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
+ UNSPEC_LVRX))]
+ "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
+ "lvrx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvrxl"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand 1 "memory_operand" "Z")]
+ UNSPEC_LVRXL))]
+ "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
+ "lvrxl %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_stvlx"
+ [(parallel
+ [(set (match_operand:V4SI 0 "memory_operand" "=Z")
+ (match_operand:V4SI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVLX)])]
+ "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
+ "stvlx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvlxl"
+ [(parallel
+ [(set (match_operand:V4SI 0 "memory_operand" "=Z")
+ (match_operand:V4SI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVLXL)])]
+ "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
+ "stvlxl %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvrx"
+ [(parallel
+ [(set (match_operand:V4SI 0 "memory_operand" "=Z")
+ (match_operand:V4SI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVRX)])]
+ "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
+ "stvrx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvrxl"
+ [(parallel
+ [(set (match_operand:V4SI 0 "memory_operand" "=Z")
+ (match_operand:V4SI 1 "register_operand" "v"))
+ (unspec [(const_int 0)] UNSPEC_STVRXL)])]
+ "TARGET_ALTIVEC && rs6000_cpu == PROCESSOR_CELL"
+ "stvrxl %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_expand "vec_extract_evenv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "register_operand" "")]
+ UNSPEC_EXTEVEN_V4SI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 0);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 1);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 2);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 3);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 8);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 9);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 10);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 11);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 18);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 19);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 24);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 25);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 26);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 27);
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_altivec_vperm_v4si (operands[0], operands[1], operands[2], mask));
+
+ DONE;
+}")
+
+(define_expand "vec_extract_evenv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "register_operand" "")]
+ UNSPEC_EXTEVEN_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 0);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 1);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 2);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 3);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 8);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 9);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 10);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 11);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 18);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 19);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 24);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 25);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 26);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 27);
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_altivec_vperm_v4sf (operands[0], operands[1], operands[2], mask));
+
+ DONE;
+}")
+
+(define_expand "vec_extract_evenv8hi"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "")
+ (match_operand:V8HI 2 "register_operand" "")]
+ UNSPEC_EXTEVEN_V8HI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 0);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 1);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 4);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 5);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 8);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 9);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 12);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 13);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 17);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 20);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 21);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 24);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 25);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 28);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 29);
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_altivec_vperm_v8hi (operands[0], operands[1], operands[2], mask));
+
+ DONE;
+}")
+
+(define_expand "vec_extract_evenv16qi"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "")
+ (match_operand:V16QI 2 "register_operand" "")]
+ UNSPEC_EXTEVEN_V16QI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 0);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 2);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 4);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 6);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 8);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 10);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 12);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 14);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 16);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 18);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 20);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 22);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 24);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 26);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 28);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 30);
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_altivec_vperm_v16qi (operands[0], operands[1], operands[2], mask));
+
+ DONE;
+}")
+
+(define_expand "vec_extract_oddv4si"
+ [(set (match_operand:V4SI 0 "register_operand" "")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "")
+ (match_operand:V4SI 2 "register_operand" "")]
+ UNSPEC_EXTODD_V4SI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 4);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 5);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 6);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 7);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 12);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 13);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 14);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 15);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 20);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 21);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 22);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 23);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 28);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 29);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 30);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 31);
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_altivec_vperm_v4si (operands[0], operands[1], operands[2], mask));
+
+ DONE;
+}")
+
+(define_expand "vec_extract_oddv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (unspec:V8HI [(match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "register_operand" "")]
+ UNSPEC_EXTODD_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx mask = gen_reg_rtx (V16QImode);
+ rtvec v = rtvec_alloc (16);
+
+ RTVEC_ELT (v, 0) = gen_rtx_CONST_INT (QImode, 4);
+ RTVEC_ELT (v, 1) = gen_rtx_CONST_INT (QImode, 5);
+ RTVEC_ELT (v, 2) = gen_rtx_CONST_INT (QImode, 6);
+ RTVEC_ELT (v, 3) = gen_rtx_CONST_INT (QImode, 7);
+ RTVEC_ELT (v, 4) = gen_rtx_CONST_INT (QImode, 12);
+ RTVEC_ELT (v, 5) = gen_rtx_CONST_INT (QImode, 13);
+ RTVEC_ELT (v, 6) = gen_rtx_CONST_INT (QImode, 14);
+ RTVEC_ELT (v, 7) = gen_rtx_CONST_INT (QImode, 15);
+ RTVEC_ELT (v, 8) = gen_rtx_CONST_INT (QImode, 20);
+ RTVEC_ELT (v, 9) = gen_rtx_CONST_INT (QImode, 21);
+ RTVEC_ELT (v, 10) = gen_rtx_CONST_INT (QImode, 22);
+ RTVEC_ELT (v, 11) = gen_rtx_CONST_INT (QImode, 23);
+ RTVEC_ELT (v, 12) = gen_rtx_CONST_INT (QImode, 28);
+ RTVEC_ELT (v, 13) = gen_rtx_CONST_INT (QImode, 29);
+ RTVEC_ELT (v, 14) = gen_rtx_CONST_INT (QImode, 30);
+ RTVEC_ELT (v, 15) = gen_rtx_CONST_INT (QImode, 31);
+ emit_insn (gen_vec_initv16qi (mask, gen_rtx_PARALLEL (V16QImode, v)));
+ emit_insn (gen_altivec_vperm_v4sf (operands[0], operands[1], operands[2], mask));
+
+ DONE;
+}")
+
+(define_insn "vpkuhum_nomode"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand 1 "register_operand" "v")
+ (match_operand 2 "register_operand" "v")]
+ UNSPEC_VPKUHUM))]
+ "TARGET_ALTIVEC"
+ "vpkuhum %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "vpkuwum_nomode"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand 1 "register_operand" "v")
+ (match_operand 2 "register_operand" "v")]
+ UNSPEC_VPKUWUM))]
+ "TARGET_ALTIVEC"
+ "vpkuwum %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_expand "vec_extract_oddv8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "")
+ (match_operand:V8HI 2 "register_operand" "")]
+ UNSPEC_EXTODD_V8HI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_vpkuwum_nomode (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_extract_oddv16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "")
+ (match_operand:V16QI 2 "register_operand" "")]
+ UNSPEC_EXTODD_V16QI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_vpkuhum_nomode (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+(define_expand "vec_interleave_highv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "register_operand" "")]
+ UNSPEC_INTERHI_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vmrghsf (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_interleave_lowv4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "")
+ (match_operand:V4SF 2 "register_operand" "")]
+ UNSPEC_INTERLO_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vmrglsf (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_interleave_high<mode>"
+ [(set (match_operand:VI 0 "register_operand" "")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "")
+ (match_operand:VI 2 "register_operand" "")]
+ UNSPEC_INTERHI))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vmrgh<VI_char> (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_interleave_low<mode>"
+ [(set (match_operand:VI 0 "register_operand" "")
+ (unspec:VI [(match_operand:VI 1 "register_operand" "")
+ (match_operand:VI 2 "register_operand" "")]
+ UNSPEC_INTERLO))]
+ "TARGET_ALTIVEC"
+ "
+{
+ emit_insn (gen_altivec_vmrgl<VI_char> (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_unpacks_float_hi_v8hi"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
+ UNSPEC_VUPKHS_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx tmp = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vec_unpacks_hi_v8hi (tmp, operands[1]));
+ emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
+ DONE;
+}")
+
+(define_expand "vec_unpacks_float_lo_v8hi"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
+ UNSPEC_VUPKLS_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx tmp = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vec_unpacks_lo_v8hi (tmp, operands[1]));
+ emit_insn (gen_altivec_vcfsx (operands[0], tmp, const0_rtx));
+ DONE;
+}")
+
+(define_expand "vec_unpacku_float_hi_v8hi"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
+ UNSPEC_VUPKHU_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx tmp = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vec_unpacku_hi_v8hi (tmp, operands[1]));
+ emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
+ DONE;
+}")
+
+(define_expand "vec_unpacku_float_lo_v8hi"
+ [(set (match_operand:V4SF 0 "register_operand" "")
+ (unspec:V4SF [(match_operand:V8HI 1 "register_operand" "")]
+ UNSPEC_VUPKLU_V4SF))]
+ "TARGET_ALTIVEC"
+ "
+{
+ rtx tmp = gen_reg_rtx (V4SImode);
+
+ emit_insn (gen_vec_unpacku_lo_v8hi (tmp, operands[1]));
+ emit_insn (gen_altivec_vcfux (operands[0], tmp, const0_rtx));
+ DONE;
+}")
diff --git a/gcc-4.4.3/gcc/config/rs6000/biarch64.h b/gcc-4.4.3/gcc/config/rs6000/biarch64.h
new file mode 100644
index 000000000..29e5b029b
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/biarch64.h
@@ -0,0 +1,26 @@
+/* Definitions of target machine for GNU compiler, for 32/64 bit powerpc.
+ Copyright (C) 2003, 2007, 2009 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Specify this in a cover file to provide bi-architecture (32/64) support. */
+#define RS6000_BI_ARCH 1
diff --git a/gcc-4.4.3/gcc/config/rs6000/cell.md b/gcc-4.4.3/gcc/config/rs6000/cell.md
new file mode 100644
index 000000000..3fffd2740
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/cell.md
@@ -0,0 +1,400 @@
+;; Scheduling description for cell processor.
+;; Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007
+;; Free Software Foundation, Inc.
+;; Contributed by Sony Computer Entertainment, Inc.,
+
+
+;; This file is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3 of the License, or (at your option)
+;; any later version.
+
+;; This file is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+;; for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Sources: BE BOOK4 (/sfs/enc/doc/PPU_BookIV_DD3.0_latest.pdf)
+
+;; BE Architecture *DD3.0 and DD3.1*
+;; This file simulate PPU processor unit backend of pipeline, maualP24.
+;; manual P27, stall and flush points
+;; IU, XU, VSU, dispatcher decodes and dispatch 2 insns per cycle in program
+;; order, the grouped address are aligned by 8
+;; This file only simulate one thread situation
+;; XU executes all fixed point insns(3 units, a simple alu, a complex unit,
+;; and load/store unit)
+;; VSU executes all scalar floating points insn(a float unit),
+;; VMX insns(VMX unit, 4 sub units, simple, permute, complex, floating point)
+
+;; Dual issue combination
+
+;; FXU LSU BR VMX VMX
+;; (sx,cx,vsu_fp,fp_arith) (perm,vsu_ls,fp_ls)
+;;FXU X
+;;LSU X X X
+;;BR X
+;;VMX(sx,cx,vsu_fp,fp_arth) X
+;;VMX(perm,vsu_ls, fp_ls) X
+;; X are illegal combination.
+
+;; Dual issue exceptions:
+;;(1) nop-pipelined FXU instr in slot 0
+;;(2) non-pipelined FPU inst in slot 0
+;; CSI instr(contex-synchronizing insn)
+;; Microcode insn
+
+;; BRU unit: bru(none register stall), bru_cr(cr register stall)
+;; VSU unit: vus(vmx simple), vup(vmx permute), vuc(vmx complex),
+;; vuf(vmx float), fpu(floats). fpu_div is hypothetical, it is for
+;; nonpipelined simulation
+;; micr insns will stall at least 7 cycles to get the first instr from ROM,
+;; micro instructions are not dual issued.
+
+;; slot0 is older than slot1
+;; non-pipelined insn need to be in slot1 to avoid 1cycle stall
+
+;; There different stall point
+;; IB2, only stall one thread if stall here, so try to stall here as much as
+;; we can
+;; condition(1) insert nop, OR and ORI instruction form
+;; condition(2) flush happens, in case of: RAW, WAW, D-ERAT miss, or
+;; CR0-access while stdcx, or stwcx
+;; IS2 stall ;; Page91 for details
+;; VQ8 stall
+;; IS2 stall can be activated by VQ8 stall and trying to issue a vsu instr to
+;; the vsu issue queue
+
+;;(define_automaton "cellxu")
+
+;;(define_cpu_unit "fxu_cell,lsu_cell,bru_cell,vsu1_cell,vsu2_cell" "cellxu")
+
+;; ndfa
+(define_automaton "cellxu,cellvsu,cellbru,cell_mis")
+
+(define_cpu_unit "fxu_cell,lsu_cell" "cellxu")
+(define_cpu_unit "bru_cell" "cellbru")
+(define_cpu_unit "vsu1_cell,vsu2_cell" "cellvsu")
+
+(define_cpu_unit "slot0,slot1" "cell_mis")
+
+(absence_set "slot0" "slot1")
+
+(define_reservation "nonpipeline" "fxu_cell+lsu_cell+vsu1_cell+vsu2_cell")
+(define_reservation "slot01" "slot0|slot1")
+
+
+;; Load/store
+;; lmw, lswi, lswx are only generated for optimize for space, MC,
+;; these instr are not simulated
+(define_insn_reservation "cell-load" 2
+ (and (eq_attr "type" "load")
+ (eq_attr "cpu" "cell"))
+ "slot01,lsu_cell")
+
+;; ldux, ldu, lbzux, lbzu, hardware breaks it down to two instrs,
+;; if with 32bytes alignment, CMC
+(define_insn_reservation "cell-load-ux" 2
+ (and (eq_attr "type" "load_ux,load_u")
+ (eq_attr "cpu" "cell"))
+ "slot01,fxu_cell+lsu_cell")
+
+;; lha, lhax, lhau, lhaux, lwa, lwax, lwaux, MC, latency unknown
+;; 11/7, 11/8, 11/12
+(define_insn_reservation "cell-load-ext" 2
+ (and (eq_attr "type" "load_ext,load_ext_u,load_ext_ux")
+ (eq_attr "cpu" "cell"))
+ "slot01,fxu_cell+lsu_cell")
+
+;;lfs,lfsx,lfd,lfdx, 1 cycle
+(define_insn_reservation "cell-fpload" 1
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "cell"))
+ "vsu2_cell+lsu_cell+slot01")
+
+;; lfsu,lfsux,lfdu,lfdux 1cycle(fpr) 2 cycle(gpr)
+(define_insn_reservation "cell-fpload-update" 1
+ (and (eq_attr "type" "fpload,fpload_u,fpload_ux")
+ (eq_attr "cpu" "cell"))
+ "fxu_cell+vsu2_cell+lsu_cell+slot01")
+
+(define_insn_reservation "cell-vecload" 2
+ (and (eq_attr "type" "vecload")
+ (eq_attr "cpu" "cell"))
+ "slot01,vsu2_cell+lsu_cell")
+
+;;st? stw(MC)
+(define_insn_reservation "cell-store" 1
+ (and (eq_attr "type" "store")
+ (eq_attr "cpu" "cell"))
+ "lsu_cell+slot01")
+
+;;stdux, stdu, (hardware breaks into store and add) 2 for update reg
+(define_insn_reservation "cell-store-update" 1
+ (and (eq_attr "type" "store_ux,store_u")
+ (eq_attr "cpu" "cell"))
+ "fxu_cell+lsu_cell+slot01")
+
+(define_insn_reservation "cell-fpstore" 1
+ (and (eq_attr "type" "fpstore")
+ (eq_attr "cpu" "cell"))
+ "vsu2_cell+lsu_cell+slot01")
+
+(define_insn_reservation "cell-fpstore-update" 1
+ (and (eq_attr "type" "fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "cell"))
+ "vsu2_cell+fxu_cell+lsu_cell+slot01")
+
+(define_insn_reservation "cell-vecstore" 1
+ (and (eq_attr "type" "vecstore")
+ (eq_attr "cpu" "cell"))
+ "vsu2_cell+lsu_cell+slot01")
+
+;; Integer latency is 2 cycles
+(define_insn_reservation "cell-integer" 2
+ (and (eq_attr "type" "integer,insert_dword,shift,trap,\
+ var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "cell"))
+ "slot01,fxu_cell")
+
+;; Two integer latency is 4 cycles
+(define_insn_reservation "cell-two" 4
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "cell"))
+ "slot01,fxu_cell,fxu_cell*2")
+
+;; Three integer latency is 6 cycles
+(define_insn_reservation "cell-three" 6
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "cell"))
+ "slot01,fxu_cell,fxu_cell*4")
+
+;; rlwimi, alter cr0
+(define_insn_reservation "cell-insert" 2
+ (and (eq_attr "type" "insert_word")
+ (eq_attr "cpu" "cell"))
+ "slot01,fxu_cell")
+
+;; cmpi, cmpli, cmpla, add, addo, sub, subo, alter cr0
+(define_insn_reservation "cell-cmp" 1
+ (and (eq_attr "type" "cmp")
+ (eq_attr "cpu" "cell"))
+ "fxu_cell+slot01")
+
+;; add, addo, sub, subo, alter cr0, rldcli, rlwinm
+(define_insn_reservation "cell-fast-cmp" 2
+ (and (and (eq_attr "type" "fast_compare,delayed_compare,compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "cell"))
+ (eq_attr "cell_micro" "not"))
+ "slot01,fxu_cell")
+
+(define_insn_reservation "cell-cmp-microcoded" 9
+ (and (and (eq_attr "type" "fast_compare,delayed_compare,compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "cell"))
+ (eq_attr "cell_micro" "always"))
+ "slot0+slot1,fxu_cell,fxu_cell*7")
+
+;; mulld
+(define_insn_reservation "cell-lmul" 15
+ (and (eq_attr "type" "lmul")
+ (eq_attr "cpu" "cell"))
+ "slot1,nonpipeline,nonpipeline*13")
+
+;; mulld. is microcoded
+(define_insn_reservation "cell-lmul-cmp" 22
+ (and (eq_attr "type" "lmul_compare")
+ (eq_attr "cpu" "cell"))
+ "slot0+slot1,nonpipeline,nonpipeline*20")
+
+;; mulli, 6 cycles
+(define_insn_reservation "cell-imul23" 6
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "cell"))
+ "slot1,nonpipeline,nonpipeline*4")
+
+;; mullw, 9
+(define_insn_reservation "cell-imul" 9
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "cell"))
+ "slot1,nonpipeline,nonpipeline*7")
+
+;; divide
+(define_insn_reservation "cell-idiv" 32
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "cell"))
+ "slot1,nonpipeline,nonpipeline*30")
+
+(define_insn_reservation "cell-ldiv" 64
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "cell"))
+ "slot1,nonpipeline,nonpipeline*62")
+
+;;mflr and mfctr are pipelined
+(define_insn_reservation "cell-mfjmpr" 1
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "cell"))
+ "slot01+bru_cell")
+
+;;mtlr and mtctr,
+;;mtspr fully pipelined
+(define_insn_reservation "cell-mtjmpr" 1
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "cell"))
+ "bru_cell+slot01")
+
+;; Branches
+;; b, ba, bl, bla, unconditional branch always predicts correctly n/a latency
+;; bcctr, bcctrl, latency 2, actually adjust by be to 4
+(define_insn_reservation "cell-branch" 1
+ (and (eq_attr "type" "branch")
+ (eq_attr "cpu" "cell"))
+ "bru_cell+slot1")
+
+(define_insn_reservation "cell-branchreg" 1
+ (and (eq_attr "type" "jmpreg")
+ (eq_attr "cpu" "cell"))
+ "bru_cell+slot1")
+
+;; cr hazard
+;; page 90, special cases for CR hazard, only one instr can access cr per cycle
+;; if insn reads CR following a stwcx, pipeline stall till stwcx finish
+(define_insn_reservation "cell-crlogical" 1
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "cell"))
+ "bru_cell+slot01")
+
+;; mfcrf and mfcr is about 34 cycles and nonpipelined
+(define_insn_reservation "cell-mfcr" 34
+ (and (eq_attr "type" "mfcrf,mfcr")
+ (eq_attr "cpu" "cell"))
+ "slot1,nonpipeline,nonpipeline*32")
+
+;; mtcrf (1 field)
+(define_insn_reservation "cell-mtcrf" 1
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "cell"))
+ "fxu_cell+slot01")
+
+; Basic FP latency is 10 cycles, thoughput is 1/cycle
+(define_insn_reservation "cell-fp" 10
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "cell"))
+ "slot01,vsu1_cell,vsu1_cell*8")
+
+(define_insn_reservation "cell-fpcompare" 1
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "cell"))
+ "vsu1_cell+slot01")
+
+;; sdiv thoughput 1/74, not pipelined but only in the FPU
+(define_insn_reservation "cell-sdiv" 74
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "cell"))
+ "slot1,nonpipeline,nonpipeline*72")
+
+;; fsqrt thoughput 1/84, not pipelined but only in the FPU
+(define_insn_reservation "cell-sqrt" 84
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "cell"))
+ "slot1,nonpipeline,nonpipeline*82")
+
+; VMX
+(define_insn_reservation "cell-vecsimple" 4
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "cell"))
+ "slot01,vsu1_cell,vsu1_cell*2")
+
+;; mult, div, madd
+(define_insn_reservation "cell-veccomplex" 10
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "cell"))
+ "slot01,vsu1_cell,vsu1_cell*8")
+
+;; TODO: add support for recording instructions
+(define_insn_reservation "cell-veccmp" 4
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "cell"))
+ "slot01,vsu1_cell,vsu1_cell*2")
+
+(define_insn_reservation "cell-vecfloat" 12
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "cell"))
+ "slot01,vsu1_cell,vsu1_cell*10")
+
+(define_insn_reservation "cell-vecperm" 4
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "cell"))
+ "slot01,vsu2_cell,vsu2_cell*2")
+
+;; New for 4.2, syncs
+
+(define_insn_reservation "cell-sync" 11
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "cell"))
+ "slot01,lsu_cell,lsu_cell*9")
+
+(define_insn_reservation "cell-isync" 11
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "cell"))
+ "slot01,lsu_cell,lsu_cell*9")
+
+(define_insn_reservation "cell-load_l" 11
+ (and (eq_attr "type" "load_l")
+ (eq_attr "cpu" "cell"))
+ "slot01,lsu_cell,lsu_cell*9")
+
+(define_insn_reservation "cell-store_c" 11
+ (and (eq_attr "type" "store_c")
+ (eq_attr "cpu" "cell"))
+ "slot01,lsu_cell,lsu_cell*9")
+
+;; RAW register dependency
+
+;; addi r3, r3, 1
+;; lw r4,offset(r3)
+;; there are 5 cycle deplay for r3 bypassing
+;; there are 5 cycle delay for a dependent load after a load
+(define_bypass 5 "cell-integer" "cell-load")
+(define_bypass 5 "cell-integer" "cell-load-ext")
+(define_bypass 5 "cell-load,cell-load-ext" "cell-load,cell-load-ext")
+
+;; there is a 6 cycle delay after a fp compare until you can use the cr.
+(define_bypass 6 "cell-fpcompare" "cell-branch,cell-branchreg,cell-mfcr,cell-crlogical")
+
+;; VXU float RAW
+(define_bypass 11 "cell-vecfloat" "cell-vecfloat")
+
+;; VXU and FPU
+(define_bypass 6 "cell-veccomplex" "cell-vecsimple")
+;;(define_bypass 6 "cell-veccompare" "cell-branch,cell-branchreg")
+(define_bypass 3 "cell-vecfloat" "cell-veccomplex")
+; this is not correct,
+;; this is a stall in general and not dependent on result
+(define_bypass 13 "cell-vecstore" "cell-fpstore")
+; this is not correct, this can never be true, not dependent on result
+(define_bypass 7 "cell-fp" "cell-fpload")
+;; vsu1 should avoid writing to the same target register as vsu2 insn
+;; within 12 cycles.
+
+;; WAW hazard
+
+;; the target of VSU estimate should not be reused within 10 dispatch groups
+;; the target of VSU float should not be reused within 8 dispatch groups
+;; the target of VSU complex should not be reused within 5 dispatch groups
+;; FP LOAD should not reuse an FPU Arithmetic target with 6 dispatch gropus
+
+;; mtctr-bcctr/bcctrl, branch target ctr register shadow update at
+;; ex4 stage(10 cycles)
+(define_bypass 10 "cell-mtjmpr" "cell-branchreg")
+
+;;Things are not simulated:
+;; update instruction, update address gpr are not simulated
+;; vrefp, vrsqrtefp have latency(14), currently simulated as 12 cycle float
+;; insns
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/constraints.md b/gcc-4.4.3/gcc/config/rs6000/constraints.md
new file mode 100644
index 000000000..7f551994b
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/constraints.md
@@ -0,0 +1,161 @@
+;; Constraint definitions for RS6000
+;; Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Register constraints
+
+(define_register_constraint "f" "TARGET_HARD_FLOAT && TARGET_FPRS
+ ? FLOAT_REGS : NO_REGS"
+ "@internal")
+
+(define_register_constraint "b" "BASE_REGS"
+ "@internal")
+
+(define_register_constraint "h" "SPECIAL_REGS"
+ "@internal")
+
+(define_register_constraint "q" "MQ_REGS"
+ "@internal")
+
+(define_register_constraint "c" "CTR_REGS"
+ "@internal")
+
+(define_register_constraint "l" "LINK_REGS"
+ "@internal")
+
+(define_register_constraint "v" "ALTIVEC_REGS"
+ "@internal")
+
+(define_register_constraint "x" "CR0_REGS"
+ "@internal")
+
+(define_register_constraint "y" "CR_REGS"
+ "@internal")
+
+(define_register_constraint "z" "XER_REGS"
+ "@internal")
+
+;; Integer constraints
+
+(define_constraint "I"
+ "A signed 16-bit constant"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) (ival + 0x8000) < 0x10000")))
+
+(define_constraint "J"
+ "high-order 16 bits nonzero"
+ (and (match_code "const_int")
+ (match_test "(ival & (~ (unsigned HOST_WIDE_INT) 0xffff0000)) == 0")))
+
+(define_constraint "K"
+ "low-order 16 bits nonzero"
+ (and (match_code "const_int")
+ (match_test "(ival & (~ (HOST_WIDE_INT) 0xffff)) == 0")))
+
+(define_constraint "L"
+ "signed 16-bit constant shifted left 16 bits"
+ (and (match_code "const_int")
+ (match_test "((ival & 0xffff) == 0
+ && (ival >> 31 == -1 || ival >> 31 == 0))")))
+
+(define_constraint "M"
+ "constant greater than 31"
+ (and (match_code "const_int")
+ (match_test "ival > 31")))
+
+(define_constraint "N"
+ "positive constant that is an exact power of two"
+ (and (match_code "const_int")
+ (match_test "ival > 0 && exact_log2 (ival) >= 0")))
+
+(define_constraint "O"
+ "constant zero"
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "P"
+ "constant whose negation is signed 16-bit constant"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT) ((- ival) + 0x8000) < 0x10000")))
+
+;; Floating-point constraints
+
+(define_constraint "G"
+ "Constant that can be copied into GPR with two insns for DF/DI
+ and one for SF."
+ (and (match_code "const_double")
+ (match_test "num_insns_constant (op, mode)
+ == (mode == SFmode ? 1 : 2)")))
+
+(define_constraint "H"
+ "DF/DI constant that takes three insns."
+ (and (match_code "const_double")
+ (match_test "num_insns_constant (op, mode) == 3")))
+
+;; Memory constraints
+
+(define_memory_constraint "Q"
+ "Memory operand that is just an offset from a reg"
+ (and (match_code "mem")
+ (match_test "GET_CODE (XEXP (op, 0)) == REG")))
+
+(define_memory_constraint "Y"
+ "Indexed or word-aligned displacement memory operand"
+ (match_operand 0 "word_offset_memref_operand"))
+
+(define_memory_constraint "Z"
+ "Indexed or indirect memory operand"
+ (match_operand 0 "indexed_or_indirect_operand"))
+
+;; Address constraints
+
+(define_address_constraint "a"
+ "Indexed or indirect address operand"
+ (match_operand 0 "indexed_or_indirect_address"))
+
+(define_constraint "R"
+ "AIX TOC entry"
+ (match_test "legitimate_constant_pool_address_p (op)"))
+
+;; General constraints
+
+(define_constraint "S"
+ "Constant that can be placed into a 64-bit mask operand"
+ (match_operand 0 "mask64_operand"))
+
+(define_constraint "T"
+ "Constant that can be placed into a 32-bit mask operand"
+ (match_operand 0 "mask_operand"))
+
+(define_constraint "U"
+ "V.4 small data reference"
+ (and (match_test "DEFAULT_ABI == ABI_V4")
+ (match_operand 0 "small_data_operand")))
+
+(define_constraint "t"
+ "AND masks that can be performed by two rldic{l,r} insns
+ (but excluding those that could match other constraints of anddi3)"
+ (and (and (and (match_operand 0 "mask64_2_operand")
+ (match_test "(fixed_regs[CR0_REGNO]
+ || !logical_operand (op, DImode))"))
+ (not (match_operand 0 "mask_operand")))
+ (not (match_operand 0 "mask64_operand"))))
+
+(define_constraint "W"
+ "vector constant that does not require memory"
+ (match_operand 0 "easy_vector_constant"))
diff --git a/gcc-4.4.3/gcc/config/rs6000/crtresfpr.asm b/gcc-4.4.3/gcc/config/rs6000/crtresfpr.asm
new file mode 100644
index 000000000..7adc9c1e7
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/crtresfpr.asm
@@ -0,0 +1,79 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for restoring floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+
+HIDDEN_FUNC(_restfpr_14) lfd 14,-144(11) /* restore fp registers */
+HIDDEN_FUNC(_restfpr_15) lfd 15,-136(11)
+HIDDEN_FUNC(_restfpr_16) lfd 16,-128(11)
+HIDDEN_FUNC(_restfpr_17) lfd 17,-120(11)
+HIDDEN_FUNC(_restfpr_18) lfd 18,-112(11)
+HIDDEN_FUNC(_restfpr_19) lfd 19,-104(11)
+HIDDEN_FUNC(_restfpr_20) lfd 20,-96(11)
+HIDDEN_FUNC(_restfpr_21) lfd 21,-88(11)
+HIDDEN_FUNC(_restfpr_22) lfd 22,-80(11)
+HIDDEN_FUNC(_restfpr_23) lfd 23,-72(11)
+HIDDEN_FUNC(_restfpr_24) lfd 24,-64(11)
+HIDDEN_FUNC(_restfpr_25) lfd 25,-56(11)
+HIDDEN_FUNC(_restfpr_26) lfd 26,-48(11)
+HIDDEN_FUNC(_restfpr_27) lfd 27,-40(11)
+HIDDEN_FUNC(_restfpr_28) lfd 28,-32(11)
+HIDDEN_FUNC(_restfpr_29) lfd 29,-24(11)
+HIDDEN_FUNC(_restfpr_30) lfd 30,-16(11)
+HIDDEN_FUNC(_restfpr_31) lfd 31,-8(11)
+ blr
+FUNC_END(_restfpr_31)
+FUNC_END(_restfpr_30)
+FUNC_END(_restfpr_29)
+FUNC_END(_restfpr_28)
+FUNC_END(_restfpr_27)
+FUNC_END(_restfpr_26)
+FUNC_END(_restfpr_25)
+FUNC_END(_restfpr_24)
+FUNC_END(_restfpr_23)
+FUNC_END(_restfpr_22)
+FUNC_END(_restfpr_21)
+FUNC_END(_restfpr_20)
+FUNC_END(_restfpr_19)
+FUNC_END(_restfpr_18)
+FUNC_END(_restfpr_17)
+FUNC_END(_restfpr_16)
+FUNC_END(_restfpr_15)
+FUNC_END(_restfpr_14)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/crtresgpr.asm b/gcc-4.4.3/gcc/config/rs6000/crtresgpr.asm
new file mode 100644
index 000000000..4ed3d8e21
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/crtresgpr.asm
@@ -0,0 +1,79 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+
+HIDDEN_FUNC(_restgpr_14) lwz 14,-72(11) /* restore gp registers */
+HIDDEN_FUNC(_restgpr_15) lwz 15,-68(11)
+HIDDEN_FUNC(_restgpr_16) lwz 16,-64(11)
+HIDDEN_FUNC(_restgpr_17) lwz 17,-60(11)
+HIDDEN_FUNC(_restgpr_18) lwz 18,-56(11)
+HIDDEN_FUNC(_restgpr_19) lwz 19,-52(11)
+HIDDEN_FUNC(_restgpr_20) lwz 20,-48(11)
+HIDDEN_FUNC(_restgpr_21) lwz 21,-44(11)
+HIDDEN_FUNC(_restgpr_22) lwz 22,-40(11)
+HIDDEN_FUNC(_restgpr_23) lwz 23,-36(11)
+HIDDEN_FUNC(_restgpr_24) lwz 24,-32(11)
+HIDDEN_FUNC(_restgpr_25) lwz 25,-28(11)
+HIDDEN_FUNC(_restgpr_26) lwz 26,-24(11)
+HIDDEN_FUNC(_restgpr_27) lwz 27,-20(11)
+HIDDEN_FUNC(_restgpr_28) lwz 28,-16(11)
+HIDDEN_FUNC(_restgpr_29) lwz 29,-12(11)
+HIDDEN_FUNC(_restgpr_30) lwz 30,-8(11)
+HIDDEN_FUNC(_restgpr_31) lwz 31,-4(11)
+ blr
+FUNC_END(_restgpr_31)
+FUNC_END(_restgpr_30)
+FUNC_END(_restgpr_29)
+FUNC_END(_restgpr_28)
+FUNC_END(_restgpr_27)
+FUNC_END(_restgpr_26)
+FUNC_END(_restgpr_25)
+FUNC_END(_restgpr_24)
+FUNC_END(_restgpr_23)
+FUNC_END(_restgpr_22)
+FUNC_END(_restgpr_21)
+FUNC_END(_restgpr_20)
+FUNC_END(_restgpr_19)
+FUNC_END(_restgpr_18)
+FUNC_END(_restgpr_17)
+FUNC_END(_restgpr_16)
+FUNC_END(_restgpr_15)
+FUNC_END(_restgpr_14)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/crtresxfpr.asm b/gcc-4.4.3/gcc/config/rs6000/crtresxfpr.asm
new file mode 100644
index 000000000..5a87a9805
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/crtresxfpr.asm
@@ -0,0 +1,84 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for restoring floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+/* In addition to restoring the fp registers, it will return to the caller's */
+/* caller */
+
+HIDDEN_FUNC(_restfpr_14_x) lfd 14,-144(11) /* restore fp registers */
+HIDDEN_FUNC(_restfpr_15_x) lfd 15,-136(11)
+HIDDEN_FUNC(_restfpr_16_x) lfd 16,-128(11)
+HIDDEN_FUNC(_restfpr_17_x) lfd 17,-120(11)
+HIDDEN_FUNC(_restfpr_18_x) lfd 18,-112(11)
+HIDDEN_FUNC(_restfpr_19_x) lfd 19,-104(11)
+HIDDEN_FUNC(_restfpr_20_x) lfd 20,-96(11)
+HIDDEN_FUNC(_restfpr_21_x) lfd 21,-88(11)
+HIDDEN_FUNC(_restfpr_22_x) lfd 22,-80(11)
+HIDDEN_FUNC(_restfpr_23_x) lfd 23,-72(11)
+HIDDEN_FUNC(_restfpr_24_x) lfd 24,-64(11)
+HIDDEN_FUNC(_restfpr_25_x) lfd 25,-56(11)
+HIDDEN_FUNC(_restfpr_26_x) lfd 26,-48(11)
+HIDDEN_FUNC(_restfpr_27_x) lfd 27,-40(11)
+HIDDEN_FUNC(_restfpr_28_x) lfd 28,-32(11)
+HIDDEN_FUNC(_restfpr_29_x) lfd 29,-24(11)
+HIDDEN_FUNC(_restfpr_30_x) lfd 30,-16(11)
+HIDDEN_FUNC(_restfpr_31_x) lwz 0,4(11)
+ lfd 31,-8(11)
+ mtlr 0
+ mr 1,11
+ blr
+FUNC_END(_restfpr_31_x)
+FUNC_END(_restfpr_30_x)
+FUNC_END(_restfpr_29_x)
+FUNC_END(_restfpr_28_x)
+FUNC_END(_restfpr_27_x)
+FUNC_END(_restfpr_26_x)
+FUNC_END(_restfpr_25_x)
+FUNC_END(_restfpr_24_x)
+FUNC_END(_restfpr_23_x)
+FUNC_END(_restfpr_22_x)
+FUNC_END(_restfpr_21_x)
+FUNC_END(_restfpr_20_x)
+FUNC_END(_restfpr_19_x)
+FUNC_END(_restfpr_18_x)
+FUNC_END(_restfpr_17_x)
+FUNC_END(_restfpr_16_x)
+FUNC_END(_restfpr_15_x)
+FUNC_END(_restfpr_14_x)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/crtresxgpr.asm b/gcc-4.4.3/gcc/config/rs6000/crtresxgpr.asm
new file mode 100644
index 000000000..9317131c8
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/crtresxgpr.asm
@@ -0,0 +1,82 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+
+HIDDEN_FUNC(_restgpr_14_x) lwz 14,-72(11) /* restore gp registers */
+HIDDEN_FUNC(_restgpr_15_x) lwz 15,-68(11)
+HIDDEN_FUNC(_restgpr_16_x) lwz 16,-64(11)
+HIDDEN_FUNC(_restgpr_17_x) lwz 17,-60(11)
+HIDDEN_FUNC(_restgpr_18_x) lwz 18,-56(11)
+HIDDEN_FUNC(_restgpr_19_x) lwz 19,-52(11)
+HIDDEN_FUNC(_restgpr_20_x) lwz 20,-48(11)
+HIDDEN_FUNC(_restgpr_21_x) lwz 21,-44(11)
+HIDDEN_FUNC(_restgpr_22_x) lwz 22,-40(11)
+HIDDEN_FUNC(_restgpr_23_x) lwz 23,-36(11)
+HIDDEN_FUNC(_restgpr_24_x) lwz 24,-32(11)
+HIDDEN_FUNC(_restgpr_25_x) lwz 25,-28(11)
+HIDDEN_FUNC(_restgpr_26_x) lwz 26,-24(11)
+HIDDEN_FUNC(_restgpr_27_x) lwz 27,-20(11)
+HIDDEN_FUNC(_restgpr_28_x) lwz 28,-16(11)
+HIDDEN_FUNC(_restgpr_29_x) lwz 29,-12(11)
+HIDDEN_FUNC(_restgpr_30_x) lwz 30,-8(11)
+HIDDEN_FUNC(_restgpr_31_x) lwz 0,4(11)
+ lwz 31,-4(11)
+ mtlr 0
+ mr 1,11
+ blr
+FUNC_END(_restgpr_31_x)
+FUNC_END(_restgpr_30_x)
+FUNC_END(_restgpr_29_x)
+FUNC_END(_restgpr_28_x)
+FUNC_END(_restgpr_27_x)
+FUNC_END(_restgpr_26_x)
+FUNC_END(_restgpr_25_x)
+FUNC_END(_restgpr_24_x)
+FUNC_END(_restgpr_23_x)
+FUNC_END(_restgpr_22_x)
+FUNC_END(_restgpr_21_x)
+FUNC_END(_restgpr_20_x)
+FUNC_END(_restgpr_19_x)
+FUNC_END(_restgpr_18_x)
+FUNC_END(_restgpr_17_x)
+FUNC_END(_restgpr_16_x)
+FUNC_END(_restgpr_15_x)
+FUNC_END(_restgpr_14_x)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/crtsavfpr.asm b/gcc-4.4.3/gcc/config/rs6000/crtsavfpr.asm
new file mode 100644
index 000000000..fe40a9e13
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/crtsavfpr.asm
@@ -0,0 +1,79 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for saving floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+
+HIDDEN_FUNC(_savefpr_14) stfd 14,-144(11) /* save fp registers */
+HIDDEN_FUNC(_savefpr_15) stfd 15,-136(11)
+HIDDEN_FUNC(_savefpr_16) stfd 16,-128(11)
+HIDDEN_FUNC(_savefpr_17) stfd 17,-120(11)
+HIDDEN_FUNC(_savefpr_18) stfd 18,-112(11)
+HIDDEN_FUNC(_savefpr_19) stfd 19,-104(11)
+HIDDEN_FUNC(_savefpr_20) stfd 20,-96(11)
+HIDDEN_FUNC(_savefpr_21) stfd 21,-88(11)
+HIDDEN_FUNC(_savefpr_22) stfd 22,-80(11)
+HIDDEN_FUNC(_savefpr_23) stfd 23,-72(11)
+HIDDEN_FUNC(_savefpr_24) stfd 24,-64(11)
+HIDDEN_FUNC(_savefpr_25) stfd 25,-56(11)
+HIDDEN_FUNC(_savefpr_26) stfd 26,-48(11)
+HIDDEN_FUNC(_savefpr_27) stfd 27,-40(11)
+HIDDEN_FUNC(_savefpr_28) stfd 28,-32(11)
+HIDDEN_FUNC(_savefpr_29) stfd 29,-24(11)
+HIDDEN_FUNC(_savefpr_30) stfd 30,-16(11)
+HIDDEN_FUNC(_savefpr_31) stfd 31,-8(11)
+ blr
+FUNC_END(_savefpr_31)
+FUNC_END(_savefpr_30)
+FUNC_END(_savefpr_29)
+FUNC_END(_savefpr_28)
+FUNC_END(_savefpr_27)
+FUNC_END(_savefpr_26)
+FUNC_END(_savefpr_25)
+FUNC_END(_savefpr_24)
+FUNC_END(_savefpr_23)
+FUNC_END(_savefpr_22)
+FUNC_END(_savefpr_21)
+FUNC_END(_savefpr_20)
+FUNC_END(_savefpr_19)
+FUNC_END(_savefpr_18)
+FUNC_END(_savefpr_17)
+FUNC_END(_savefpr_16)
+FUNC_END(_savefpr_15)
+FUNC_END(_savefpr_14)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/crtsavgpr.asm b/gcc-4.4.3/gcc/config/rs6000/crtsavgpr.asm
new file mode 100644
index 000000000..6c5f67209
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/crtsavgpr.asm
@@ -0,0 +1,79 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+/* On PowerPC64 Linux, these functions are provided by the linker. */
+#ifndef __powerpc64__
+
+/* Routines for saving integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer save area. */
+
+HIDDEN_FUNC(_savegpr_14) stw 14,-72(11) /* save gp registers */
+HIDDEN_FUNC(_savegpr_15) stw 15,-68(11)
+HIDDEN_FUNC(_savegpr_16) stw 16,-64(11)
+HIDDEN_FUNC(_savegpr_17) stw 17,-60(11)
+HIDDEN_FUNC(_savegpr_18) stw 18,-56(11)
+HIDDEN_FUNC(_savegpr_19) stw 19,-52(11)
+HIDDEN_FUNC(_savegpr_20) stw 20,-48(11)
+HIDDEN_FUNC(_savegpr_21) stw 21,-44(11)
+HIDDEN_FUNC(_savegpr_22) stw 22,-40(11)
+HIDDEN_FUNC(_savegpr_23) stw 23,-36(11)
+HIDDEN_FUNC(_savegpr_24) stw 24,-32(11)
+HIDDEN_FUNC(_savegpr_25) stw 25,-28(11)
+HIDDEN_FUNC(_savegpr_26) stw 26,-24(11)
+HIDDEN_FUNC(_savegpr_27) stw 27,-20(11)
+HIDDEN_FUNC(_savegpr_28) stw 28,-16(11)
+HIDDEN_FUNC(_savegpr_29) stw 29,-12(11)
+HIDDEN_FUNC(_savegpr_30) stw 30,-8(11)
+HIDDEN_FUNC(_savegpr_31) stw 31,-4(11)
+ blr
+FUNC_END(_savegpr_31)
+FUNC_END(_savegpr_30)
+FUNC_END(_savegpr_29)
+FUNC_END(_savegpr_28)
+FUNC_END(_savegpr_27)
+FUNC_END(_savegpr_26)
+FUNC_END(_savegpr_25)
+FUNC_END(_savegpr_24)
+FUNC_END(_savegpr_23)
+FUNC_END(_savegpr_22)
+FUNC_END(_savegpr_21)
+FUNC_END(_savegpr_20)
+FUNC_END(_savegpr_19)
+FUNC_END(_savegpr_18)
+FUNC_END(_savegpr_17)
+FUNC_END(_savegpr_16)
+FUNC_END(_savegpr_15)
+FUNC_END(_savegpr_14)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-asm.h b/gcc-4.4.3/gcc/config/rs6000/darwin-asm.h
new file mode 100644
index 000000000..837b7a33e
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-asm.h
@@ -0,0 +1,51 @@
+/* Macro definitions to used to support 32/64-bit code in Darwin's
+ * assembly files.
+ *
+ * Copyright (C) 2004, 2009 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* These are donated from /usr/include/architecture/ppc . */
+
+#if defined(__ppc64__)
+#define MODE_CHOICE(x, y) y
+#else
+#define MODE_CHOICE(x, y) x
+#endif
+
+#define cmpg MODE_CHOICE(cmpw, cmpd)
+#define lg MODE_CHOICE(lwz, ld)
+#define stg MODE_CHOICE(stw, std)
+#define lgx MODE_CHOICE(lwzx, ldx)
+#define stgx MODE_CHOICE(stwx, stdx)
+#define lgu MODE_CHOICE(lwzu, ldu)
+#define stgu MODE_CHOICE(stwu, stdu)
+#define lgux MODE_CHOICE(lwzux, ldux)
+#define stgux MODE_CHOICE(stwux, stdux)
+#define lgwa MODE_CHOICE(lwz, lwa)
+
+#define g_long MODE_CHOICE(long, quad) /* usage is ".g_long" */
+
+#define GPR_BYTES MODE_CHOICE(4,8) /* size of a GPR in bytes */
+#define LOG2_GPR_BYTES MODE_CHOICE(2,3) /* log2(GPR_BYTES) */
+
+#define SAVED_LR_OFFSET MODE_CHOICE(8,16) /* position of saved
+ LR in frame */
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-fallback.c b/gcc-4.4.3/gcc/config/rs6000/darwin-fallback.c
new file mode 100644
index 000000000..4591071ea
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-fallback.c
@@ -0,0 +1,487 @@
+/* Fallback frame-state unwinder for Darwin.
+ Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifdef __ppc__
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "dwarf2.h"
+#include "unwind.h"
+#include "unwind-dw2.h"
+#include <stdint.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <signal.h>
+
+#define R_LR 65
+#define R_CTR 66
+#define R_CR2 70
+#define R_XER 76
+#define R_VR0 77
+#define R_VRSAVE 109
+#define R_VSCR 110
+#define R_SPEFSCR 112
+
+typedef unsigned long reg_unit;
+
+/* Place in GPRS the parameters to the first 'sc' instruction that would
+ have been executed if we were returning from this CONTEXT, or
+ return false if an unexpected instruction is encountered. */
+
+static bool
+interpret_libc (reg_unit gprs[32], struct _Unwind_Context *context)
+{
+ uint32_t *pc = (uint32_t *)_Unwind_GetIP (context);
+ uint32_t cr;
+ reg_unit lr = (reg_unit) pc;
+ reg_unit ctr = 0;
+ uint32_t *invalid_address = NULL;
+
+ int i;
+
+ for (i = 0; i < 13; i++)
+ gprs[i] = 1;
+ gprs[1] = _Unwind_GetCFA (context);
+ for (; i < 32; i++)
+ gprs[i] = _Unwind_GetGR (context, i);
+ cr = _Unwind_GetGR (context, R_CR2);
+
+ /* For each supported Libc, we have to track the code flow
+ all the way back into the kernel.
+
+ This code is believed to support all released Libc/Libsystem builds since
+ Jaguar 6C115, including all the security updates. To be precise,
+
+ Libc Libsystem Build(s)
+ 262~1 60~37 6C115
+ 262~1 60.2~4 6D52
+ 262~1 61~3 6F21-6F22
+ 262~1 63~24 6G30-6G37
+ 262~1 63~32 6I34-6I35
+ 262~1 63~64 6L29-6L60
+ 262.4.1~1 63~84 6L123-6R172
+
+ 320~1 71~101 7B85-7D28
+ 320~1 71~266 7F54-7F56
+ 320~1 71~288 7F112
+ 320~1 71~289 7F113
+ 320.1.3~1 71.1.1~29 7H60-7H105
+ 320.1.3~1 71.1.1~30 7H110-7H113
+ 320.1.3~1 71.1.1~31 7H114
+
+ That's a big table! It would be insane to try to keep track of
+ every little detail, so we just read the code itself and do what
+ it would do.
+ */
+
+ for (;;)
+ {
+ uint32_t ins = *pc++;
+
+ if ((ins & 0xFC000003) == 0x48000000) /* b instruction */
+ {
+ pc += ((((int32_t) ins & 0x3FFFFFC) ^ 0x2000000) - 0x2000004) / 4;
+ continue;
+ }
+ if ((ins & 0xFC600000) == 0x2C000000) /* cmpwi */
+ {
+ int32_t val1 = (int16_t) ins;
+ int32_t val2 = gprs[ins >> 16 & 0x1F];
+ /* Only beq and bne instructions are supported, so we only
+ need to set the EQ bit. */
+ uint32_t mask = 0xF << ((ins >> 21 & 0x1C) ^ 0x1C);
+ if (val1 == val2)
+ cr |= mask;
+ else
+ cr &= ~mask;
+ continue;
+ }
+ if ((ins & 0xFEC38003) == 0x40820000) /* forwards beq/bne */
+ {
+ if ((cr >> ((ins >> 16 & 0x1F) ^ 0x1F) & 1) == (ins >> 24 & 1))
+ pc += (ins & 0x7FFC) / 4 - 1;
+ continue;
+ }
+ if ((ins & 0xFC0007FF) == 0x7C000378) /* or, including mr */
+ {
+ gprs [ins >> 16 & 0x1F] = (gprs [ins >> 11 & 0x1F]
+ | gprs [ins >> 21 & 0x1F]);
+ continue;
+ }
+ if (ins >> 26 == 0x0E) /* addi, including li */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ gprs [ins >> 21 & 0x1F] = src + (int16_t) ins;
+ continue;
+ }
+ if (ins >> 26 == 0x0F) /* addis, including lis */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ gprs [ins >> 21 & 0x1F] = src + ((int16_t) ins << 16);
+ continue;
+ }
+ if (ins >> 26 == 0x20) /* lwz */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ uint32_t *p = (uint32_t *)(src + (int16_t) ins);
+ if (p == invalid_address)
+ return false;
+ gprs [ins >> 21 & 0x1F] = *p;
+ continue;
+ }
+ if (ins >> 26 == 0x21) /* lwzu */
+ {
+ uint32_t *p = (uint32_t *)(gprs [ins >> 16 & 0x1F] += (int16_t) ins);
+ if (p == invalid_address)
+ return false;
+ gprs [ins >> 21 & 0x1F] = *p;
+ continue;
+ }
+ if (ins >> 26 == 0x24) /* stw */
+ /* What we hope this is doing is '--in_sigtramp'. We don't want
+ to actually store to memory, so just make a note of the
+ address and refuse to load from it. */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ uint32_t *p = (uint32_t *)(src + (int16_t) ins);
+ if (p == NULL || invalid_address != NULL)
+ return false;
+ invalid_address = p;
+ continue;
+ }
+ if (ins >> 26 == 0x2E) /* lmw */
+ {
+ reg_unit src = (ins >> 16 & 0x1F) == 0 ? 0 : gprs [ins >> 16 & 0x1F];
+ uint32_t *p = (uint32_t *)(src + (int16_t) ins);
+ int i;
+
+ for (i = (ins >> 21 & 0x1F); i < 32; i++)
+ {
+ if (p == invalid_address)
+ return false;
+ gprs[i] = *p++;
+ }
+ continue;
+ }
+ if ((ins & 0xFC1FFFFF) == 0x7c0803a6) /* mtlr */
+ {
+ lr = gprs [ins >> 21 & 0x1F];
+ continue;
+ }
+ if ((ins & 0xFC1FFFFF) == 0x7c0802a6) /* mflr */
+ {
+ gprs [ins >> 21 & 0x1F] = lr;
+ continue;
+ }
+ if ((ins & 0xFC1FFFFF) == 0x7c0903a6) /* mtctr */
+ {
+ ctr = gprs [ins >> 21 & 0x1F];
+ continue;
+ }
+ /* The PowerPC User's Manual says that bit 11 of the mtcrf
+ instruction is reserved and should be set to zero, but it
+ looks like the Darwin assembler doesn't do that... */
+ if ((ins & 0xFC000FFF) == 0x7c000120) /* mtcrf */
+ {
+ int i;
+ uint32_t mask = 0;
+ for (i = 0; i < 8; i++)
+ mask |= ((-(ins >> (12 + i) & 1)) & 0xF) << 4 * i;
+ cr = (cr & ~mask) | (gprs [ins >> 21 & 0x1F] & mask);
+ continue;
+ }
+ if (ins == 0x429f0005) /* bcl- 20,4*cr7+so,.+4, loads pc into LR */
+ {
+ lr = (reg_unit) pc;
+ continue;
+ }
+ if (ins == 0x4e800420) /* bctr */
+ {
+ pc = (uint32_t *) ctr;
+ continue;
+ }
+ if (ins == 0x44000002) /* sc */
+ return true;
+
+ return false;
+ }
+}
+
+/* We used to include <ucontext.h> and <mach/thread_status.h>,
+ but they change so much between different Darwin system versions
+ that it's much easier to just write the structures involved here
+ directly. */
+
+/* These defines are from the kernel's bsd/dev/ppc/unix_signal.c. */
+#define UC_TRAD 1
+#define UC_TRAD_VEC 6
+#define UC_TRAD64 20
+#define UC_TRAD64_VEC 25
+#define UC_FLAVOR 30
+#define UC_FLAVOR_VEC 35
+#define UC_FLAVOR64 40
+#define UC_FLAVOR64_VEC 45
+#define UC_DUAL 50
+#define UC_DUAL_VEC 55
+
+struct gcc_ucontext
+{
+ int onstack;
+ sigset_t sigmask;
+ void * stack_sp;
+ size_t stack_sz;
+ int stack_flags;
+ struct gcc_ucontext *link;
+ size_t mcsize;
+ struct gcc_mcontext32 *mcontext;
+};
+
+struct gcc_float_vector_state
+{
+ double fpregs[32];
+ uint32_t fpscr_pad;
+ uint32_t fpscr;
+ uint32_t save_vr[32][4];
+ uint32_t save_vscr[4];
+};
+
+struct gcc_mcontext32 {
+ uint32_t dar;
+ uint32_t dsisr;
+ uint32_t exception;
+ uint32_t padding1[5];
+ uint32_t srr0;
+ uint32_t srr1;
+ uint32_t gpr[32];
+ uint32_t cr;
+ uint32_t xer;
+ uint32_t lr;
+ uint32_t ctr;
+ uint32_t mq;
+ uint32_t vrsave;
+ struct gcc_float_vector_state fvs;
+};
+
+/* These are based on /usr/include/ppc/ucontext.h and
+ /usr/include/mach/ppc/thread_status.h, but rewritten to be more
+ convenient, to compile on Jaguar, and to work around Radar 3712064
+ on Panther, which is that the 'es' field of 'struct mcontext64' has
+ the wrong type (doh!). */
+
+struct gcc_mcontext64 {
+ uint64_t dar;
+ uint32_t dsisr;
+ uint32_t exception;
+ uint32_t padding1[4];
+ uint64_t srr0;
+ uint64_t srr1;
+ uint32_t gpr[32][2];
+ uint32_t cr;
+ uint32_t xer[2]; /* These are arrays because the original structure has them misaligned. */
+ uint32_t lr[2];
+ uint32_t ctr[2];
+ uint32_t vrsave;
+ struct gcc_float_vector_state fvs;
+};
+
+#define UC_FLAVOR_SIZE \
+ (sizeof (struct gcc_mcontext32) - 33*16)
+
+#define UC_FLAVOR_VEC_SIZE (sizeof (struct gcc_mcontext32))
+
+#define UC_FLAVOR64_SIZE \
+ (sizeof (struct gcc_mcontext64) - 33*16)
+
+#define UC_FLAVOR64_VEC_SIZE (sizeof (struct gcc_mcontext64))
+
+/* Given GPRS as input to a 'sc' instruction, and OLD_CFA, update FS
+ to represent the execution of a signal return; or, if not a signal
+ return, return false. */
+
+static bool
+handle_syscall (_Unwind_FrameState *fs, const reg_unit gprs[32],
+ _Unwind_Ptr old_cfa)
+{
+ struct gcc_ucontext *uctx;
+ bool is_64, is_vector;
+ struct gcc_float_vector_state * float_vector_state;
+ _Unwind_Ptr new_cfa;
+ int i;
+ static _Unwind_Ptr return_addr;
+
+ /* Yay! We're in a Libc that we understand, and it's made a
+ system call. In Jaguar, this is a direct system call with value 103;
+ in Panther and Tiger it is a SYS_syscall call for system call number 184,
+ and in Leopard it is a direct syscall with number 184. */
+
+ if (gprs[0] == 0x67 /* SYS_SIGRETURN */)
+ {
+ uctx = (struct gcc_ucontext *) gprs[3];
+ is_vector = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE
+ || uctx->mcsize == UC_FLAVOR_VEC_SIZE);
+ is_64 = (uctx->mcsize == UC_FLAVOR64_VEC_SIZE
+ || uctx->mcsize == UC_FLAVOR64_SIZE);
+ }
+ else if (gprs[0] == 0 /* SYS_syscall */ && gprs[3] == 184)
+ {
+ int ctxstyle = gprs[5];
+ uctx = (struct gcc_ucontext *) gprs[4];
+ is_vector = (ctxstyle == UC_FLAVOR_VEC || ctxstyle == UC_FLAVOR64_VEC
+ || ctxstyle == UC_TRAD_VEC || ctxstyle == UC_TRAD64_VEC);
+ is_64 = (ctxstyle == UC_FLAVOR64_VEC || ctxstyle == UC_TRAD64_VEC
+ || ctxstyle == UC_FLAVOR64 || ctxstyle == UC_TRAD64);
+ }
+ else if (gprs[0] == 184 /* SYS_sigreturn */)
+ {
+ int ctxstyle = gprs[4];
+ uctx = (struct gcc_ucontext *) gprs[3];
+ is_vector = (ctxstyle == UC_FLAVOR_VEC || ctxstyle == UC_FLAVOR64_VEC
+ || ctxstyle == UC_TRAD_VEC || ctxstyle == UC_TRAD64_VEC);
+ is_64 = (ctxstyle == UC_FLAVOR64_VEC || ctxstyle == UC_TRAD64_VEC
+ || ctxstyle == UC_FLAVOR64 || ctxstyle == UC_TRAD64);
+ }
+ else
+ return false;
+
+#define set_offset(r, addr) \
+ (fs->regs.reg[r].how = REG_SAVED_OFFSET, \
+ fs->regs.reg[r].loc.offset = (_Unwind_Ptr)(addr) - new_cfa)
+
+ /* Restore even the registers that are not call-saved, since they
+ might be being used in the prologue to save other registers,
+ for instance GPR0 is sometimes used to save LR. */
+
+ /* Handle the GPRs, and produce the information needed to do the rest. */
+ if (is_64)
+ {
+ /* The context is 64-bit, but it doesn't carry any extra information
+ for us because only the low 32 bits of the registers are
+ call-saved. */
+ struct gcc_mcontext64 *m64 = (struct gcc_mcontext64 *)uctx->mcontext;
+ int i;
+
+ float_vector_state = &m64->fvs;
+
+ new_cfa = m64->gpr[1][1];
+
+ set_offset (R_CR2, &m64->cr);
+ for (i = 0; i < 32; i++)
+ set_offset (i, m64->gpr[i] + 1);
+ set_offset (R_XER, m64->xer + 1);
+ set_offset (R_LR, m64->lr + 1);
+ set_offset (R_CTR, m64->ctr + 1);
+ if (is_vector)
+ set_offset (R_VRSAVE, &m64->vrsave);
+
+ /* Sometimes, srr0 points to the instruction that caused the exception,
+ and sometimes to the next instruction to be executed; we want
+ the latter. */
+ if (m64->exception == 3 || m64->exception == 4
+ || m64->exception == 6
+ || (m64->exception == 7 && !(m64->srr1 & 0x10000)))
+ return_addr = m64->srr0 + 4;
+ else
+ return_addr = m64->srr0;
+ }
+ else
+ {
+ struct gcc_mcontext32 *m = uctx->mcontext;
+ int i;
+
+ float_vector_state = &m->fvs;
+
+ new_cfa = m->gpr[1];
+
+ set_offset (R_CR2, &m->cr);
+ for (i = 0; i < 32; i++)
+ set_offset (i, m->gpr + i);
+ set_offset (R_XER, &m->xer);
+ set_offset (R_LR, &m->lr);
+ set_offset (R_CTR, &m->ctr);
+
+ if (is_vector)
+ set_offset (R_VRSAVE, &m->vrsave);
+
+ /* Sometimes, srr0 points to the instruction that caused the exception,
+ and sometimes to the next instruction to be executed; we want
+ the latter. */
+ if (m->exception == 3 || m->exception == 4
+ || m->exception == 6
+ || (m->exception == 7 && !(m->srr1 & 0x10000)))
+ return_addr = m->srr0 + 4;
+ else
+ return_addr = m->srr0;
+ }
+
+ fs->regs.cfa_how = CFA_REG_OFFSET;
+ fs->regs.cfa_reg = STACK_POINTER_REGNUM;
+ fs->regs.cfa_offset = new_cfa - old_cfa;;
+
+ /* The choice of column for the return address is somewhat tricky.
+ Fortunately, the actual choice is private to this file, and
+ the space it's reserved from is the GCC register space, not the
+ DWARF2 numbering. So any free element of the right size is an OK
+ choice. Thus: */
+ fs->retaddr_column = ARG_POINTER_REGNUM;
+ /* FIXME: this should really be done using a DWARF2 location expression,
+ not using a static variable. In fact, this entire file should
+ be implemented in DWARF2 expressions. */
+ set_offset (ARG_POINTER_REGNUM, &return_addr);
+
+ for (i = 0; i < 32; i++)
+ set_offset (32 + i, float_vector_state->fpregs + i);
+ set_offset (R_SPEFSCR, &float_vector_state->fpscr);
+
+ if (is_vector)
+ {
+ for (i = 0; i < 32; i++)
+ set_offset (R_VR0 + i, float_vector_state->save_vr + i);
+ set_offset (R_VSCR, float_vector_state->save_vscr);
+ }
+
+ return true;
+}
+
+/* This is also prototyped in rs6000/darwin.h, inside the
+ MD_FALLBACK_FRAME_STATE_FOR macro. */
+extern bool _Unwind_fallback_frame_state_for (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs);
+
+/* Implement the MD_FALLBACK_FRAME_STATE_FOR macro,
+ returning true iff the frame was a sigreturn() frame that we
+ can understand. */
+
+bool
+_Unwind_fallback_frame_state_for (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ reg_unit gprs[32];
+
+ if (!interpret_libc (gprs, context))
+ return false;
+ return handle_syscall (fs, gprs, _Unwind_GetCFA (context));
+}
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-fpsave.asm b/gcc-4.4.3/gcc/config/rs6000/darwin-fpsave.asm
new file mode 100644
index 000000000..47fdc92f8
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-fpsave.asm
@@ -0,0 +1,92 @@
+/* This file contains the floating-point save and restore routines.
+ *
+ * Copyright (C) 2004, 2009 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.)
+
+ MORAL: DO NOT MESS AROUND WITH THESE FUNCTIONS! */
+
+#include "darwin-asm.h"
+
+.text
+ .align 2
+
+/* saveFP saves R0 -- assumed to be the callers LR -- to 8/16(R1). */
+
+.private_extern saveFP
+saveFP:
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stg r0,SAVED_LR_OFFSET(r1)
+ blr
+
+/* restFP restores the caller`s LR from 8/16(R1). Note that the code for
+ this starts at the offset of F30 restoration, so calling this
+ routine in an attempt to restore only F31 WILL NOT WORK (it would
+ be a stupid thing to do, anyway.) */
+
+.private_extern restFP
+restFP:
+ lfd f14,-144(r1)
+ lfd f15,-136(r1)
+ lfd f16,-128(r1)
+ lfd f17,-120(r1)
+ lfd f18,-112(r1)
+ lfd f19,-104(r1)
+ lfd f20,-96(r1)
+ lfd f21,-88(r1)
+ lfd f22,-80(r1)
+ lfd f23,-72(r1)
+ lfd f24,-64(r1)
+ lfd f25,-56(r1)
+ lfd f26,-48(r1)
+ lfd f27,-40(r1)
+ lfd f28,-32(r1)
+ lfd f29,-24(r1)
+ /* <OFFSET OF F30 RESTORE> restore callers LR */
+ lg r0,SAVED_LR_OFFSET(r1)
+ lfd f30,-16(r1)
+ /* and prepare for return to caller */
+ mtlr r0
+ lfd f31,-8(r1)
+ blr
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-ldouble-format b/gcc-4.4.3/gcc/config/rs6000/darwin-ldouble-format
new file mode 100644
index 000000000..0012a332d
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-ldouble-format
@@ -0,0 +1,84 @@
+Long double format
+==================
+
+ Each long double is made up of two IEEE doubles. The value of the
+long double is the sum of the values of the two parts (except for
+-0.0). The most significant part is required to be the value of the
+long double rounded to the nearest double, as specified by IEEE. For
+Inf values, the least significant part is required to be one of +0.0
+or -0.0. No other requirements are made; so, for example, 1.0 may be
+represented as (1.0, +0.0) or (1.0, -0.0), and the low part of a NaN
+is don't-care.
+
+Classification
+--------------
+
+A long double can represent any value of the form
+ s * 2^e * sum(k=0...105: f_k * 2^(-k))
+where 's' is +1 or -1, 'e' is between 1022 and -968 inclusive, f_0 is
+1, and f_k for k>0 is 0 or 1. These are the 'normal' long doubles.
+
+A long double can also represent any value of the form
+ s * 2^-968 * sum(k=0...105: f_k * 2^(-k))
+where 's' is +1 or -1, f_0 is 0, and f_k for k>0 is 0 or 1. These are
+the 'subnormal' long doubles.
+
+There are four long doubles that represent zero, two that represent
++0.0 and two that represent -0.0. The sign of the high part is the
+sign of the long double, and the sign of the low part is ignored.
+
+Likewise, there are four long doubles that represent infinities, two
+for +Inf and two for -Inf.
+
+Each NaN, quiet or signalling, that can be represented as a 'double'
+can be represented as a 'long double'. In fact, there are 2^64
+equivalent representations for each one.
+
+There are certain other valid long doubles where both parts are
+nonzero but the low part represents a value which has a bit set below
+2^(e-105). These, together with the subnormal long doubles, make up
+the denormal long doubles.
+
+Many possible long double bit patterns are not valid long doubles.
+These do not represent any value.
+
+Limits
+------
+
+The maximum representable long double is 2^1024-2^918. The smallest
+*normal* positive long double is 2^-968. The smallest denormalised
+positive long double is 2^-1074 (this is the same as for 'double').
+
+Conversions
+-----------
+
+A double can be converted to a long double by adding a zero low part.
+
+A long double can be converted to a double by removing the low part.
+
+Comparisons
+-----------
+
+Two long doubles can be compared by comparing the high parts, and if
+those compare equal, comparing the low parts.
+
+Arithmetic
+----------
+
+The unary negate operation operates by negating the low and high parts.
+
+An absolute or absolute-negate operation must be done by comparing
+against zero and negating if necessary.
+
+Addition and subtraction are performed using library routines. They
+are not at present performed perfectly accurately, the result produced
+will be within 1ulp of the range generated by adding or subtracting
+1ulp from the input values, where a 'ulp' is 2^(e-106) given the
+exponent 'e'. In the presence of cancellation, this may be
+arbitrarily inaccurate. Subtraction is done by negation and addition.
+
+Multiplication is also performed using a library routine. Its result
+will be within 2ulp of the correct result.
+
+Division is also performed using a library routine. Its result will
+be within 3ulp of the correct result.
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-ldouble.c b/gcc-4.4.3/gcc/config/rs6000/darwin-ldouble.c
new file mode 100644
index 000000000..d76c1b184
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-ldouble.c
@@ -0,0 +1,438 @@
+/* 128-bit long double support routines for Darwin.
+ Copyright (C) 1993, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+/* Implementations of floating-point long double basic arithmetic
+ functions called by the IBM C compiler when generating code for
+ PowerPC platforms. In particular, the following functions are
+ implemented: __gcc_qadd, __gcc_qsub, __gcc_qmul, and __gcc_qdiv.
+ Double-double algorithms are based on the paper "Doubled-Precision
+ IEEE Standard 754 Floating-Point Arithmetic" by W. Kahan, February 26,
+ 1987. An alternative published reference is "Software for
+ Doubled-Precision Floating-Point Computations", by Seppo Linnainmaa,
+ ACM TOMS vol 7 no 3, September 1981, pages 272-283. */
+
+/* Each long double is made up of two IEEE doubles. The value of the
+ long double is the sum of the values of the two parts. The most
+ significant part is required to be the value of the long double
+ rounded to the nearest double, as specified by IEEE. For Inf
+ values, the least significant part is required to be one of +0.0 or
+ -0.0. No other requirements are made; so, for example, 1.0 may be
+ represented as (1.0, +0.0) or (1.0, -0.0), and the low part of a
+ NaN is don't-care.
+
+ This code currently assumes big-endian. */
+
+#if (!defined (__LITTLE_ENDIAN__) \
+ && (defined (__MACH__) || defined (__powerpc__) || defined (_AIX)))
+
+#define fabs(x) __builtin_fabs(x)
+#define isless(x, y) __builtin_isless (x, y)
+#define inf() __builtin_inf()
+
+#define unlikely(x) __builtin_expect ((x), 0)
+
+#define nonfinite(a) unlikely (! isless (fabs (a), inf ()))
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+
+/* All these routines actually take two long doubles as parameters,
+ but GCC currently generates poor code when a union is used to turn
+ a long double into a pair of doubles. */
+
+long double __gcc_qadd (double, double, double, double);
+long double __gcc_qsub (double, double, double, double);
+long double __gcc_qmul (double, double, double, double);
+long double __gcc_qdiv (double, double, double, double);
+
+#if defined __ELF__ && defined SHARED \
+ && (defined __powerpc64__ || !(defined __linux__ || defined __gnu_hurd__))
+/* Provide definitions of the old symbol names to satisfy apps and
+ shared libs built against an older libgcc. To access the _xlq
+ symbols an explicit version reference is needed, so these won't
+ satisfy an unadorned reference like _xlqadd. If dot symbols are
+ not needed, the assembler will remove the aliases from the symbol
+ table. */
+__asm__ (".symver __gcc_qadd,_xlqadd@GCC_3.4\n\t"
+ ".symver __gcc_qsub,_xlqsub@GCC_3.4\n\t"
+ ".symver __gcc_qmul,_xlqmul@GCC_3.4\n\t"
+ ".symver __gcc_qdiv,_xlqdiv@GCC_3.4\n\t"
+ ".symver .__gcc_qadd,._xlqadd@GCC_3.4\n\t"
+ ".symver .__gcc_qsub,._xlqsub@GCC_3.4\n\t"
+ ".symver .__gcc_qmul,._xlqmul@GCC_3.4\n\t"
+ ".symver .__gcc_qdiv,._xlqdiv@GCC_3.4");
+#endif
+
+typedef union
+{
+ long double ldval;
+ double dval[2];
+} longDblUnion;
+
+/* Add two 'long double' values and return the result. */
+long double
+__gcc_qadd (double a, double aa, double c, double cc)
+{
+ longDblUnion x;
+ double z, q, zz, xh;
+
+ z = a + c;
+
+ if (nonfinite (z))
+ {
+ z = cc + aa + c + a;
+ if (nonfinite (z))
+ return z;
+ x.dval[0] = z; /* Will always be DBL_MAX. */
+ zz = aa + cc;
+ if (fabs(a) > fabs(c))
+ x.dval[1] = a - z + c + zz;
+ else
+ x.dval[1] = c - z + a + zz;
+ }
+ else
+ {
+ q = a - z;
+ zz = q + c + (a - (q + z)) + aa + cc;
+
+ /* Keep -0 result. */
+ if (zz == 0.0)
+ return z;
+
+ xh = z + zz;
+ if (nonfinite (xh))
+ return xh;
+
+ x.dval[0] = xh;
+ x.dval[1] = z - xh + zz;
+ }
+ return x.ldval;
+}
+
+long double
+__gcc_qsub (double a, double b, double c, double d)
+{
+ return __gcc_qadd (a, b, -c, -d);
+}
+
+#ifdef __NO_FPRS__
+static double fmsub (double, double, double);
+#endif
+
+long double
+__gcc_qmul (double a, double b, double c, double d)
+{
+ longDblUnion z;
+ double t, tau, u, v, w;
+
+ t = a * c; /* Highest order double term. */
+
+ if (unlikely (t == 0) /* Preserve -0. */
+ || nonfinite (t))
+ return t;
+
+ /* Sum terms of two highest orders. */
+
+ /* Use fused multiply-add to get low part of a * c. */
+#ifndef __NO_FPRS__
+ asm ("fmsub %0,%1,%2,%3" : "=f"(tau) : "f"(a), "f"(c), "f"(t));
+#else
+ tau = fmsub (a, c, t);
+#endif
+ v = a*d;
+ w = b*c;
+ tau += v + w; /* Add in other second-order terms. */
+ u = t + tau;
+
+ /* Construct long double result. */
+ if (nonfinite (u))
+ return u;
+ z.dval[0] = u;
+ z.dval[1] = (t - u) + tau;
+ return z.ldval;
+}
+
+long double
+__gcc_qdiv (double a, double b, double c, double d)
+{
+ longDblUnion z;
+ double s, sigma, t, tau, u, v, w;
+
+ t = a / c; /* highest order double term */
+
+ if (unlikely (t == 0) /* Preserve -0. */
+ || nonfinite (t))
+ return t;
+
+ /* Finite nonzero result requires corrections to the highest order term. */
+
+ s = c * t; /* (s,sigma) = c*t exactly. */
+ w = -(-b + d * t); /* Written to get fnmsub for speed, but not
+ numerically necessary. */
+
+ /* Use fused multiply-add to get low part of c * t. */
+#ifndef __NO_FPRS__
+ asm ("fmsub %0,%1,%2,%3" : "=f"(sigma) : "f"(c), "f"(t), "f"(s));
+#else
+ sigma = fmsub (c, t, s);
+#endif
+ v = a - s;
+
+ tau = ((v-sigma)+w)/c; /* Correction to t. */
+ u = t + tau;
+
+ /* Construct long double result. */
+ if (nonfinite (u))
+ return u;
+ z.dval[0] = u;
+ z.dval[1] = (t - u) + tau;
+ return z.ldval;
+}
+
+#if defined (_SOFT_DOUBLE) && defined (__LONG_DOUBLE_128__)
+
+long double __gcc_qneg (double, double);
+int __gcc_qeq (double, double, double, double);
+int __gcc_qne (double, double, double, double);
+int __gcc_qge (double, double, double, double);
+int __gcc_qle (double, double, double, double);
+long double __gcc_stoq (float);
+long double __gcc_dtoq (double);
+float __gcc_qtos (double, double);
+double __gcc_qtod (double, double);
+int __gcc_qtoi (double, double);
+unsigned int __gcc_qtou (double, double);
+long double __gcc_itoq (int);
+long double __gcc_utoq (unsigned int);
+
+extern int __eqdf2 (double, double);
+extern int __ledf2 (double, double);
+extern int __gedf2 (double, double);
+
+/* Negate 'long double' value and return the result. */
+long double
+__gcc_qneg (double a, double aa)
+{
+ longDblUnion x;
+
+ x.dval[0] = -a;
+ x.dval[1] = -aa;
+ return x.ldval;
+}
+
+/* Compare two 'long double' values for equality. */
+int
+__gcc_qeq (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __eqdf2 (aa, cc);
+ return 1;
+}
+
+strong_alias (__gcc_qeq, __gcc_qne);
+
+/* Compare two 'long double' values for less than or equal. */
+int
+__gcc_qle (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __ledf2 (aa, cc);
+ return __ledf2 (a, c);
+}
+
+strong_alias (__gcc_qle, __gcc_qlt);
+
+/* Compare two 'long double' values for greater than or equal. */
+int
+__gcc_qge (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __gedf2 (aa, cc);
+ return __gedf2 (a, c);
+}
+
+strong_alias (__gcc_qge, __gcc_qgt);
+
+/* Convert single to long double. */
+long double
+__gcc_stoq (float a)
+{
+ longDblUnion x;
+
+ x.dval[0] = (double) a;
+ x.dval[1] = 0.0;
+
+ return x.ldval;
+}
+
+/* Convert double to long double. */
+long double
+__gcc_dtoq (double a)
+{
+ longDblUnion x;
+
+ x.dval[0] = a;
+ x.dval[1] = 0.0;
+
+ return x.ldval;
+}
+
+/* Convert long double to single. */
+float
+__gcc_qtos (double a, double aa __attribute__ ((__unused__)))
+{
+ return (float) a;
+}
+
+/* Convert long double to double. */
+double
+__gcc_qtod (double a, double aa __attribute__ ((__unused__)))
+{
+ return a;
+}
+
+/* Convert long double to int. */
+int
+__gcc_qtoi (double a, double aa)
+{
+ double z = a + aa;
+ return (int) z;
+}
+
+/* Convert long double to unsigned int. */
+unsigned int
+__gcc_qtou (double a, double aa)
+{
+ double z = a + aa;
+ return (unsigned int) z;
+}
+
+/* Convert int to long double. */
+long double
+__gcc_itoq (int a)
+{
+ return __gcc_dtoq ((double) a);
+}
+
+/* Convert unsigned int to long double. */
+long double
+__gcc_utoq (unsigned int a)
+{
+ return __gcc_dtoq ((double) a);
+}
+
+#endif
+
+#ifdef __NO_FPRS__
+
+int __gcc_qunord (double, double, double, double);
+
+extern int __eqdf2 (double, double);
+extern int __unorddf2 (double, double);
+
+/* Compare two 'long double' values for unordered. */
+int
+__gcc_qunord (double a, double aa, double c, double cc)
+{
+ if (__eqdf2 (a, c) == 0)
+ return __unorddf2 (aa, cc);
+ return __unorddf2 (a, c);
+}
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/double.h"
+#include "config/soft-fp/quad.h"
+
+/* Compute floating point multiply-subtract with higher (quad) precision. */
+static double
+fmsub (double a, double b, double c)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A);
+ FP_DECL_D(B);
+ FP_DECL_D(C);
+ FP_DECL_Q(X);
+ FP_DECL_Q(Y);
+ FP_DECL_Q(Z);
+ FP_DECL_Q(U);
+ FP_DECL_Q(V);
+ FP_DECL_D(R);
+ double r;
+ long double u, x, y, z;
+
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_RAW_D (A, a);
+ FP_UNPACK_RAW_D (B, b);
+ FP_UNPACK_RAW_D (C, c);
+
+ /* Extend double to quad. */
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ FP_EXTEND(Q,D,4,2,X,A);
+ FP_EXTEND(Q,D,4,2,Y,B);
+ FP_EXTEND(Q,D,4,2,Z,C);
+#else
+ FP_EXTEND(Q,D,2,1,X,A);
+ FP_EXTEND(Q,D,2,1,Y,B);
+ FP_EXTEND(Q,D,2,1,Z,C);
+#endif
+ FP_PACK_RAW_Q(x,X);
+ FP_PACK_RAW_Q(y,Y);
+ FP_PACK_RAW_Q(z,Z);
+ FP_HANDLE_EXCEPTIONS;
+
+ /* Multiply. */
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_Q(X,x);
+ FP_UNPACK_Q(Y,y);
+ FP_MUL_Q(U,X,Y);
+ FP_PACK_Q(u,U);
+ FP_HANDLE_EXCEPTIONS;
+
+ /* Subtract. */
+ FP_INIT_ROUNDMODE;
+ FP_UNPACK_SEMIRAW_Q(U,u);
+ FP_UNPACK_SEMIRAW_Q(Z,z);
+ FP_SUB_Q(V,U,Z);
+
+ /* Truncate quad to double. */
+#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
+ V_f[3] &= 0x0007ffff;
+ FP_TRUNC(D,Q,2,4,R,V);
+#else
+ V_f1 &= 0x0007ffffffffffffL;
+ FP_TRUNC(D,Q,1,2,R,V);
+#endif
+ FP_PACK_SEMIRAW_D(r,R);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r;
+}
+
+#endif
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-libgcc.10.4.ver b/gcc-4.4.3/gcc/config/rs6000/darwin-libgcc.10.4.ver
new file mode 100644
index 000000000..019218dd6
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-libgcc.10.4.ver
@@ -0,0 +1,76 @@
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdi3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixtfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunstfdi
+___floatdidf
+___floatdisf
+___floatditf
+___gcc_personality_v0
+___gcc_qadd
+___gcc_qdiv
+___gcc_qmul
+___gcc_qsub
+___lshrdi3
+___moddi3
+___muldi3
+___mulvdi3
+___mulvsi3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___trampoline_setup
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-libgcc.10.5.ver b/gcc-4.4.3/gcc/config/rs6000/darwin-libgcc.10.5.ver
new file mode 100644
index 000000000..7e0dd52b0
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-libgcc.10.5.ver
@@ -0,0 +1,89 @@
+__Unwind_Backtrace
+__Unwind_DeleteException
+__Unwind_FindEnclosingFunction
+__Unwind_Find_FDE
+__Unwind_ForcedUnwind
+__Unwind_GetCFA
+__Unwind_GetDataRelBase
+__Unwind_GetGR
+__Unwind_GetIP
+__Unwind_GetIPInfo
+__Unwind_GetLanguageSpecificData
+__Unwind_GetRegionStart
+__Unwind_GetTextRelBase
+__Unwind_RaiseException
+__Unwind_Resume
+__Unwind_Resume_or_Rethrow
+__Unwind_SetGR
+__Unwind_SetIP
+___absvdi2
+___absvsi2
+___addvdi3
+___addvsi3
+___ashldi3
+___ashrdi3
+___clear_cache
+___clzdi2
+___clzsi2
+___cmpdi2
+___ctzdi2
+___ctzsi2
+___deregister_frame
+___deregister_frame_info
+___deregister_frame_info_bases
+___divdc3
+___divdi3
+___divsc3
+___divtc3
+___enable_execute_stack
+___ffsdi2
+___fixdfdi
+___fixsfdi
+___fixtfdi
+___fixunsdfdi
+___fixunsdfsi
+___fixunssfdi
+___fixunssfsi
+___fixunstfdi
+___floatdidf
+___floatdisf
+___floatditf
+___floatundidf
+___floatundisf
+___floatunditf
+___gcc_personality_v0
+___gcc_qadd
+___gcc_qdiv
+___gcc_qmul
+___gcc_qsub
+___lshrdi3
+___moddi3
+___muldc3
+___muldi3
+___mulsc3
+___multc3
+___mulvdi3
+___mulvsi3
+___negdi2
+___negvdi2
+___negvsi2
+___paritydi2
+___paritysi2
+___popcountdi2
+___popcountsi2
+___powidf2
+___powisf2
+___powitf2
+___register_frame
+___register_frame_info
+___register_frame_info_bases
+___register_frame_info_table
+___register_frame_info_table_bases
+___register_frame_table
+___subvdi3
+___subvsi3
+___trampoline_setup
+___ucmpdi2
+___udivdi3
+___udivmoddi4
+___umoddi3
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-tramp.asm b/gcc-4.4.3/gcc/config/rs6000/darwin-tramp.asm
new file mode 100644
index 000000000..5188c98ef
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-tramp.asm
@@ -0,0 +1,125 @@
+/* Special support for trampolines
+ *
+ * Copyright (C) 1996, 1997, 2000, 2004, 2005, 2009 Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+#include "darwin-asm.h"
+
+/* Set up trampolines. */
+
+.text
+ .align LOG2_GPR_BYTES
+Ltrampoline_initial:
+ mflr r0
+ bl 1f
+Lfunc = .-Ltrampoline_initial
+ .g_long 0 /* will be replaced with function address */
+Lchain = .-Ltrampoline_initial
+ .g_long 0 /* will be replaced with static chain */
+1: mflr r11
+ lg r12,0(r11) /* function address */
+ mtlr r0
+ mtctr r12
+ lg r11,GPR_BYTES(r11) /* static chain */
+ bctr
+
+trampoline_size = .-Ltrampoline_initial
+
+/* R3 = stack address to store trampoline */
+/* R4 = length of trampoline area */
+/* R5 = function address */
+/* R6 = static chain */
+
+ .globl ___trampoline_setup
+___trampoline_setup:
+ mflr r0 /* save return address */
+ bcl 20,31,LCF0 /* load up __trampoline_initial into r7 */
+LCF0:
+ mflr r11
+ addis r7,r11,ha16(LTRAMP-LCF0)
+ lg r7,lo16(LTRAMP-LCF0)(r7)
+ subi r7,r7,4
+ li r8,trampoline_size /* verify trampoline big enough */
+ cmpg cr1,r8,r4
+ srwi r4,r4,2 /* # words to move (insns always 4-byte) */
+ addi r9,r3,-4 /* adjust pointer for lgu */
+ mtctr r4
+ blt cr1,Labort
+
+ mtlr r0
+
+ /* Copy the instructions to the stack */
+Lmove:
+ lwzu r10,4(r7)
+ stwu r10,4(r9)
+ bdnz Lmove
+
+ /* Store correct function and static chain */
+ stg r5,Lfunc(r3)
+ stg r6,Lchain(r3)
+
+ /* Now flush both caches */
+ mtctr r4
+Lcache:
+ icbi 0,r3
+ dcbf 0,r3
+ addi r3,r3,4
+ bdnz Lcache
+
+ /* Ensure cache-flushing has finished. */
+ sync
+ isync
+
+ /* Make stack writeable. */
+ b ___enable_execute_stack
+
+Labort:
+#ifdef __DYNAMIC__
+ bl L_abort$stub
+.data
+.section __TEXT,__picsymbolstub1,symbol_stubs,pure_instructions,32
+ .align 2
+L_abort$stub:
+ .indirect_symbol _abort
+ mflr r0
+ bcl 20,31,L0$_abort
+L0$_abort:
+ mflr r11
+ addis r11,r11,ha16(L_abort$lazy_ptr-L0$_abort)
+ mtlr r0
+ lgu r12,lo16(L_abort$lazy_ptr-L0$_abort)(r11)
+ mtctr r12
+ bctr
+.data
+.lazy_symbol_pointer
+L_abort$lazy_ptr:
+ .indirect_symbol _abort
+ .g_long dyld_stub_binding_helper
+#else
+ bl _abort
+#endif
+.data
+ .align LOG2_GPR_BYTES
+LTRAMP:
+ .g_long Ltrampoline_initial
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-unwind.h b/gcc-4.4.3/gcc/config/rs6000/darwin-unwind.h
new file mode 100644
index 000000000..9fdc115be
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-unwind.h
@@ -0,0 +1,30 @@
+/* DWARF2 EH unwinding support for Darwin.
+ Copyright (C) 2004, 2009 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+extern bool _Unwind_fallback_frame_state_for
+ (struct _Unwind_Context *context, _Unwind_FrameState *fs);
+
+#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS) \
+ (_Unwind_fallback_frame_state_for (CONTEXT, FS) \
+ ? _URC_NO_REASON : _URC_END_OF_STACK)
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-vecsave.asm b/gcc-4.4.3/gcc/config/rs6000/darwin-vecsave.asm
new file mode 100644
index 000000000..0a46be20c
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-vecsave.asm
@@ -0,0 +1,155 @@
+/* This file contains the vector save and restore routines.
+ *
+ * Copyright (C) 2004, 2009 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Vector save/restore routines for Darwin. Note that each vector
+ save/restore requires 2 instructions (8 bytes.)
+
+ THE SAVE AND RESTORE ROUTINES CAN HAVE ONLY ONE GLOBALLY VISIBLE
+ ENTRY POINT - callers have to jump to "saveFP+60" to save f29..f31,
+ for example. For FP reg saves/restores, it takes one instruction
+ (4 bytes) to do the operation; for Vector regs, 2 instructions are
+ required (8 bytes.). */
+
+ .machine ppc7400
+.text
+ .align 2
+
+.private_extern saveVEC
+saveVEC:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ blr
+
+.private_extern restVEC
+restVEC:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ blr
+
+/* saveVEC_vr11 -- as saveVEC but VRsave is returned in R11. */
+
+.private_extern saveVEC_vr11
+saveVEC_vr11:
+ li r11,-192
+ stvx v20,r11,r0
+ li r11,-176
+ stvx v21,r11,r0
+ li r11,-160
+ stvx v22,r11,r0
+ li r11,-144
+ stvx v23,r11,r0
+ li r11,-128
+ stvx v24,r11,r0
+ li r11,-112
+ stvx v25,r11,r0
+ li r11,-96
+ stvx v26,r11,r0
+ li r11,-80
+ stvx v27,r11,r0
+ li r11,-64
+ stvx v28,r11,r0
+ li r11,-48
+ stvx v29,r11,r0
+ li r11,-32
+ stvx v30,r11,r0
+ li r11,-16
+ stvx v31,r11,r0
+ mfspr r11,VRsave
+ blr
+
+/* As restVec, but the original VRsave value passed in R10. */
+
+.private_extern restVEC_vr10
+restVEC_vr10:
+ li r11,-192
+ lvx v20,r11,r0
+ li r11,-176
+ lvx v21,r11,r0
+ li r11,-160
+ lvx v22,r11,r0
+ li r11,-144
+ lvx v23,r11,r0
+ li r11,-128
+ lvx v24,r11,r0
+ li r11,-112
+ lvx v25,r11,r0
+ li r11,-96
+ lvx v26,r11,r0
+ li r11,-80
+ lvx v27,r11,r0
+ li r11,-64
+ lvx v28,r11,r0
+ li r11,-48
+ lvx v29,r11,r0
+ li r11,-32
+ lvx v30,r11,r0
+ li r11,-16
+ lvx v31,r11,r0
+ /* restore VRsave from R10. */
+ mtspr VRsave,r10
+ blr
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin-world.asm b/gcc-4.4.3/gcc/config/rs6000/darwin-world.asm
new file mode 100644
index 000000000..c0b1bf1a2
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin-world.asm
@@ -0,0 +1,259 @@
+/* This file contains the exception-handling save_world and
+ * restore_world routines, which need to do a run-time check to see if
+ * they should save and restore the vector registers.
+ *
+ * Copyright (C) 2004, 2009 Free Software Foundation, Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .machine ppc7400
+.data
+ .align 2
+
+#ifdef __DYNAMIC__
+
+.non_lazy_symbol_pointer
+L_has_vec$non_lazy_ptr:
+ .indirect_symbol __cpu_has_altivec
+#ifdef __ppc64__
+ .quad 0
+#else
+ .long 0
+#endif
+
+#else
+
+/* For static, "pretend" we have a non-lazy-pointer. */
+
+L_has_vec$non_lazy_ptr:
+ .long __cpu_has_altivec
+
+#endif
+
+
+.text
+ .align 2
+
+/* save_world and rest_world save/restore F14-F31 and possibly V20-V31
+ (assuming you have a CPU with vector registers; we use a global var
+ provided by the System Framework to determine this.)
+
+ SAVE_WORLD takes R0 (the caller`s caller`s return address) and R11
+ (the stack frame size) as parameters. It returns VRsave in R0 if
+ we`re on a CPU with vector regs.
+
+ With gcc3, we now need to save and restore CR as well, since gcc3's
+ scheduled prologs can cause comparisons to be moved before calls to
+ save_world!
+
+ USES: R0 R11 R12 */
+
+.private_extern save_world
+save_world:
+ stw r0,8(r1)
+ mflr r0
+ bcl 20,31,Ls$pb
+Ls$pb: mflr r12
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Ls$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Ls$pb)(r12)
+ mtlr r0
+ lwz r12,0(r12)
+ /* grab CR */
+ mfcr r0
+ /* test HAS_VEC */
+ cmpwi r12,0
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stmw r13,-220(r1)
+ /* stash CR */
+ stw r0,4(r1)
+ /* set R12 pointing at Vector Reg save area */
+ addi r12,r1,-224
+ /* allocate stack frame */
+ stwux r1,r1,r11
+ /* ...but return if HAS_VEC is zero */
+ bne+ L$saveVMX
+ /* Not forgetting to restore CR. */
+ mtcr r0
+ blr
+
+L$saveVMX:
+ /* We're saving Vector regs too. */
+ /* Restore CR from R0. No More Branches! */
+ mtcr r0
+
+ /* We should really use VRSAVE to figure out which vector regs
+ we actually need to save and restore. Some other time :-/ */
+
+ li r11,-192
+ stvx v20,r11,r12
+ li r11,-176
+ stvx v21,r11,r12
+ li r11,-160
+ stvx v22,r11,r12
+ li r11,-144
+ stvx v23,r11,r12
+ li r11,-128
+ stvx v24,r11,r12
+ li r11,-112
+ stvx v25,r11,r12
+ li r11,-96
+ stvx v26,r11,r12
+ li r11,-80
+ stvx v27,r11,r12
+ li r11,-64
+ stvx v28,r11,r12
+ li r11,-48
+ stvx v29,r11,r12
+ li r11,-32
+ stvx v30,r11,r12
+ mfspr r0,VRsave
+ li r11,-16
+ stvx v31,r11,r12
+ /* VRsave lives at -224(R1) */
+ stw r0,0(r12)
+ blr
+
+
+/* eh_rest_world_r10 is jumped to, not called, so no need to worry about LR.
+ R10 is the C++ EH stack adjust parameter, we return to the caller`s caller.
+
+ USES: R0 R10 R11 R12 and R7 R8
+ RETURNS: C++ EH Data registers (R3 - R6.)
+
+ We now set up R7/R8 and jump to rest_world_eh_r7r8.
+
+ rest_world doesn't use the R10 stack adjust parameter, nor does it
+ pick up the R3-R6 exception handling stuff. */
+
+.private_extern rest_world
+rest_world:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ li r7, 0
+ lwz r8, 8(r11)
+ li r10, 0
+ b rest_world_eh_r7r8
+
+.private_extern eh_rest_world_r10
+eh_rest_world_r10:
+ /* Pickup previous SP */
+ lwz r11, 0(r1)
+ mr r7,r10
+ lwz r8, 8(r11)
+ /* pickup the C++ EH data regs (R3 - R6.) */
+ lwz r6,-420(r11)
+ lwz r5,-424(r11)
+ lwz r4,-428(r11)
+ lwz r3,-432(r11)
+
+ b rest_world_eh_r7r8
+
+/* rest_world_eh_r7r8 is jumped to -- not called! -- when we're doing
+ the exception-handling epilog. R7 contains the offset to add to
+ the SP, and R8 contains the 'real' return address.
+
+ USES: R0 R11 R12 [R7/R8]
+ RETURNS: C++ EH Data registers (R3 - R6.) */
+
+rest_world_eh_r7r8:
+ bcl 20,31,Lr7r8$pb
+Lr7r8$pb: mflr r12
+ lwz r11,0(r1)
+ /* R11 := previous SP */
+ addis r12,r12,ha16(L_has_vec$non_lazy_ptr-Lr7r8$pb)
+ lwz r12,lo16(L_has_vec$non_lazy_ptr-Lr7r8$pb)(r12)
+ lwz r0,4(r11)
+ /* R0 := old CR */
+ lwz r12,0(r12)
+ /* R12 := HAS_VEC */
+ mtcr r0
+ cmpwi r12,0
+ lmw r13,-220(r11)
+ beq L.rest_world_fp_eh
+ /* restore VRsave and V20..V31 */
+ lwz r0,-224(r11)
+ li r12,-416
+ mtspr VRsave,r0
+ lvx v20,r11,r12
+ li r12,-400
+ lvx v21,r11,r12
+ li r12,-384
+ lvx v22,r11,r12
+ li r12,-368
+ lvx v23,r11,r12
+ li r12,-352
+ lvx v24,r11,r12
+ li r12,-336
+ lvx v25,r11,r12
+ li r12,-320
+ lvx v26,r11,r12
+ li r12,-304
+ lvx v27,r11,r12
+ li r12,-288
+ lvx v28,r11,r12
+ li r12,-272
+ lvx v29,r11,r12
+ li r12,-256
+ lvx v30,r11,r12
+ li r12,-240
+ lvx v31,r11,r12
+
+L.rest_world_fp_eh:
+ lfd f14,-144(r11)
+ lfd f15,-136(r11)
+ lfd f16,-128(r11)
+ lfd f17,-120(r11)
+ lfd f18,-112(r11)
+ lfd f19,-104(r11)
+ lfd f20,-96(r11)
+ lfd f21,-88(r11)
+ lfd f22,-80(r11)
+ lfd f23,-72(r11)
+ lfd f24,-64(r11)
+ lfd f25,-56(r11)
+ lfd f26,-48(r11)
+ lfd f27,-40(r11)
+ lfd f28,-32(r11)
+ lfd f29,-24(r11)
+ lfd f30,-16(r11)
+ /* R8 is the exception-handler's address */
+ mtctr r8
+ lfd f31,-8(r11)
+ /* set SP to original value + R7 offset */
+ add r1,r11,r7
+ bctr
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin.h b/gcc-4.4.3/gcc/config/rs6000/darwin.h
new file mode 100644
index 000000000..077f74b08
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin.h
@@ -0,0 +1,439 @@
+/* Target definitions for PowerPC running Darwin (Mac OS X).
+ Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Darwin/PowerPC)");
+
+/* The "Darwin ABI" is mostly like AIX, but with some key differences. */
+
+#define DEFAULT_ABI ABI_DARWIN
+
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+#ifdef __powerpc64__
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif
+
+/* The object file format is Mach-O. */
+
+#define TARGET_OBJECT_FORMAT OBJECT_MACHO
+
+/* Size of the Obj-C jump buffer. */
+#define OBJC_JBLEN ((TARGET_64BIT) ? (26*2 + 18*2 + 129 + 1) : (26 + 18*2 + 129 + 1))
+
+/* We're not ever going to do TOCs. */
+
+#define TARGET_TOC 0
+#define TARGET_NO_TOC 1
+
+/* Override the default rs6000 definition. */
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_64BIT ? "long int" : "int")
+
+/* Translate config/rs6000/darwin.opt to config/darwin.h. */
+#define TARGET_DYNAMIC_NO_PIC (TARGET_MACHO_DYNAMIC_NO_PIC)
+
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ if (!TARGET_64BIT) builtin_define ("__ppc__"); \
+ if (TARGET_64BIT) builtin_define ("__ppc64__"); \
+ builtin_define ("__POWERPC__"); \
+ builtin_define ("__NATURAL_ALIGNMENT__"); \
+ darwin_cpp_builtins (pfile); \
+ } \
+ while (0)
+
+#define SUBTARGET_OVERRIDE_OPTIONS darwin_rs6000_override_options ()
+
+#define C_COMMON_OVERRIDE_OPTIONS do { \
+ /* On powerpc, __cxa_get_exception_ptr is available starting in the \
+ 10.4.6 libstdc++.dylib. */ \
+ if (strverscmp (darwin_macosx_version_min, "10.4.6") < 0 \
+ && flag_use_cxa_get_exception_ptr == 2) \
+ flag_use_cxa_get_exception_ptr = 0; \
+ if (flag_mkernel) \
+ flag_no_builtin = 1; \
+ SUBTARGET_C_COMMON_OVERRIDE_OPTIONS; \
+} while (0)
+
+/* Darwin has 128-bit long double support in libc in 10.4 and later.
+ Default to 128-bit long doubles even on earlier platforms for ABI
+ consistency; arithmetic will work even if libc and libm support is
+ not available. */
+
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128
+
+
+/* We want -fPIC by default, unless we're using -static to compile for
+ the kernel or some such. */
+
+#define CC1_SPEC "\
+ %(cc1_cpu) \
+ %{g: %{!fno-eliminate-unused-debug-symbols: -feliminate-unused-debug-symbols }} \
+ %{static: %{Zdynamic: %e conflicting code gen style switches are used}}\
+ %{!mmacosx-version-min=*:-mmacosx-version-min=%(darwin_minversion)} \
+ %{!mkernel:%{!static:%{!mdynamic-no-pic:-fPIC}}}"
+
+#define DARWIN_ARCH_SPEC "%{m64:ppc64;:ppc}"
+
+#define DARWIN_SUBARCH_SPEC " \
+ %{m64: ppc64} \
+ %{!m64: \
+ %{mcpu=601:ppc601; \
+ mcpu=603:ppc603; \
+ mcpu=603e:ppc603; \
+ mcpu=604:ppc604; \
+ mcpu=604e:ppc604e; \
+ mcpu=740:ppc750; \
+ mcpu=750:ppc750; \
+ mcpu=G3:ppc750; \
+ mcpu=7400:ppc7400; \
+ mcpu=G4:ppc7400; \
+ mcpu=7450:ppc7450; \
+ mcpu=970:ppc970; \
+ mcpu=power4:ppc970; \
+ mcpu=G5:ppc970; \
+ :ppc}}"
+
+/* crt2.o is at least partially required for 10.3.x and earlier. */
+#define DARWIN_CRT2_SPEC \
+ "%{!m64:%:version-compare(!> 10.4 mmacosx-version-min= crt2.o%s)}"
+
+/* Determine a minimum version based on compiler options. */
+#define DARWIN_MINVERSION_SPEC \
+ "%{m64:%{fgnu-runtime:10.4; \
+ ,objective-c|,objc-cpp-output:10.5; \
+ ,objective-c-header:10.5; \
+ ,objective-c++|,objective-c++-cpp-output:10.5; \
+ ,objective-c++-header|,objc++-cpp-output:10.5; \
+ :10.4}; \
+ shared-libgcc:10.3; \
+ :10.1}"
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ DARWIN_EXTRA_SPECS \
+ { "darwin_arch", DARWIN_ARCH_SPEC }, \
+ { "darwin_crt2", DARWIN_CRT2_SPEC }, \
+ { "darwin_subarch", DARWIN_SUBARCH_SPEC },
+
+/* Output a .machine directive. */
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START rs6000_darwin_file_start
+
+/* The "-faltivec" option should have been called "-maltivec" all
+ along. -ffix-and-continue and -findirect-data is for compatibility
+ for old compilers. */
+
+#define SUBTARGET_OPTION_TRANSLATE_TABLE \
+ { "-ffix-and-continue", "-mfix-and-continue" }, \
+ { "-findirect-data", "-mfix-and-continue" }, \
+ { "-faltivec", "-maltivec -include altivec.h" }, \
+ { "-fno-altivec", "-mno-altivec" }, \
+ { "-Waltivec-long-deprecated", "-mwarn-altivec-long" }, \
+ { "-Wno-altivec-long-deprecated", "-mno-warn-altivec-long" }
+
+/* Make both r2 and r13 available for allocation. */
+#define FIXED_R2 0
+#define FIXED_R13 0
+
+/* Base register for access to local variables of the function. */
+
+#undef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM 30
+
+#undef RS6000_PIC_OFFSET_TABLE_REGNUM
+#define RS6000_PIC_OFFSET_TABLE_REGNUM 31
+
+/* Pad the outgoing args area to 16 bytes instead of the usual 8. */
+
+#undef STARTING_FRAME_OFFSET
+#define STARTING_FRAME_OFFSET \
+ (FRAME_GROWS_DOWNWARD \
+ ? 0 \
+ : (RS6000_ALIGN (crtl->outgoing_args_size, 16) \
+ + RS6000_SAVE_AREA))
+
+#undef STACK_DYNAMIC_OFFSET
+#define STACK_DYNAMIC_OFFSET(FUNDECL) \
+ (RS6000_ALIGN (crtl->outgoing_args_size, 16) \
+ + (STACK_POINTER_OFFSET))
+
+/* These are used by -fbranch-probabilities */
+#define HOT_TEXT_SECTION_NAME "__TEXT,__text,regular,pure_instructions"
+#define UNLIKELY_EXECUTED_TEXT_SECTION_NAME \
+ "__TEXT,__unlikely,regular,pure_instructions"
+
+/* Define cutoff for using external functions to save floating point.
+ Currently on Darwin, always use inline stores. */
+
+#undef FP_SAVE_INLINE
+#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64)
+#undef GP_SAVE_INLINE
+#define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32)
+
+/* Darwin uses a function call if everything needs to be saved/restored. */
+#undef WORLD_SAVE_P
+#define WORLD_SAVE_P(INFO) ((INFO)->world_save_p)
+
+/* The assembler wants the alternate register names, but without
+ leading percent sign. */
+#undef REGISTER_NAMES
+#define REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \
+ "mq", "lr", "ctr", "ap", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "xer", \
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \
+ "vrsave", "vscr", \
+ "spe_acc", "spefscr", \
+ "sfp" \
+}
+
+/* This outputs NAME to FILE. */
+
+#undef RS6000_OUTPUT_BASENAME
+#define RS6000_OUTPUT_BASENAME(FILE, NAME) \
+ assemble_name (FILE, NAME)
+
+/* Globalizing directive for a label. */
+#undef GLOBAL_ASM_OP
+#define GLOBAL_ASM_OP "\t.globl "
+#undef TARGET_ASM_GLOBALIZE_LABEL
+
+/* This is how to output an internal label prefix. rs6000.c uses this
+ when generating traceback tables. */
+/* Not really used for Darwin? */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL_PREFIX
+#define ASM_OUTPUT_INTERNAL_LABEL_PREFIX(FILE,PREFIX) \
+ fprintf (FILE, "%s", PREFIX)
+
+/* This says how to output an assembler line to define a global common
+ symbol. */
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+ do { \
+ unsigned HOST_WIDE_INT _new_size = SIZE; \
+ fputs (".comm ", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ if (_new_size == 0) _new_size = 1; \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", _new_size); \
+ } while (0)
+
+/* Override the standard rs6000 definition. */
+
+#undef ASM_COMMENT_START
+#define ASM_COMMENT_START ";"
+
+/* FP save and restore routines. */
+#define SAVE_FP_PREFIX "._savef"
+#define SAVE_FP_SUFFIX ""
+#define RESTORE_FP_PREFIX "._restf"
+#define RESTORE_FP_SUFFIX ""
+
+/* This is how to output an assembler line that says to advance
+ the location counter to a multiple of 2**LOG bytes using the
+ "nop" instruction as padding. */
+
+#define ASM_OUTPUT_ALIGN_WITH_NOP(FILE,LOG) \
+ do \
+ { \
+ if ((LOG) < 3) \
+ { \
+ ASM_OUTPUT_ALIGN (FILE,LOG); \
+ } \
+ else /* nop == ori r0,r0,0 */ \
+ fprintf (FILE, "\t.align32 %d,0x60000000\n", (LOG)); \
+ } while (0)
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* This is supported in cctools 465 and later. The macro test
+ above prevents using it in earlier build environments. */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
+ }
+#endif
+
+/* Generate insns to call the profiler. */
+
+#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL)
+
+/* Function name to call to do profiling. */
+
+#define RS6000_MCOUNT "*mcount"
+
+/* Default processor: G4, and G5 for 64-bit. */
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC7400
+#undef PROCESSOR_DEFAULT64
+#define PROCESSOR_DEFAULT64 PROCESSOR_POWER4
+
+/* Default target flag settings. Despite the fact that STMW/LMW
+ serializes, it's still a big code size win to use them. Use FSEL by
+ default as well. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_MULTIPLE | MASK_NEW_MNEMONICS \
+ | MASK_PPC_GFXOPT)
+
+/* Darwin only runs on PowerPC, so short-circuit POWER patterns. */
+#undef TARGET_POWER
+#define TARGET_POWER 0
+#undef TARGET_IEEEQUAD
+#define TARGET_IEEEQUAD 0
+
+/* Since Darwin doesn't do TOCs, stub this out. */
+
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) ((void)X, (void)MODE, 0)
+
+/* Unlike most other PowerPC targets, chars are signed, for
+ consistency with other Darwin architectures. */
+
+#undef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR (1)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+
+ On the RS/6000, we have to return NO_REGS when we want to reload a
+ floating-point CONST_DOUBLE to force it to be copied to memory.
+
+ Don't allow R0 when loading the address of, or otherwise furtling with,
+ a SYMBOL_REF. */
+
+#undef PREFERRED_RELOAD_CLASS
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CONSTANT_P (X) \
+ && reg_classes_intersect_p ((CLASS), FLOAT_REGS)) \
+ ? NO_REGS \
+ : ((GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == HIGH) \
+ && reg_class_subset_p (BASE_REGS, (CLASS))) \
+ ? BASE_REGS \
+ : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \
+ && (CLASS) == NON_SPECIAL_REGS) \
+ ? GENERAL_REGS \
+ : (CLASS))
+
+/* Compute field alignment. This is similar to the version of the
+ macro in the Apple version of GCC, except that version supports
+ 'mac68k' alignment, and that version uses the computed alignment
+ always for the first field of a structure. The first-field
+ behavior is dealt with by
+ darwin_rs6000_special_round_type_align. */
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ (TARGET_ALIGN_NATURAL ? (COMPUTED) \
+ : (COMPUTED) == 128 ? 128 \
+ : MIN ((COMPUTED), 32))
+
+/* Darwin increases natural record alignment to doubleword if the first
+ field is an FP double while the FP fields remain word aligned. */
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ ((TREE_CODE (STRUCT) == RECORD_TYPE \
+ || TREE_CODE (STRUCT) == UNION_TYPE \
+ || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
+ && TARGET_ALIGN_NATURAL == 0 \
+ ? darwin_rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
+ : (TREE_CODE (STRUCT) == VECTOR_TYPE \
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (STRUCT))) \
+ ? MAX (MAX ((COMPUTED), (SPECIFIED)), 128) \
+ : MAX ((COMPUTED), (SPECIFIED)))
+
+/* Specify padding for the last element of a block move between
+ registers and memory. FIRST is nonzero if this is the only
+ element. */
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (!(FIRST) ? upward : FUNCTION_ARG_PADDING (MODE, TYPE))
+
+/* XXX: Darwin supports neither .quad, or .llong, but it also doesn't
+ support 64 bit PowerPC either, so this just keeps things happy. */
+#define DOUBLE_INT_ASM_OP "\t.quad\t"
+
+/* For binary compatibility with 2.95; Darwin C APIs use bool from
+ stdbool.h, which was an int-sized enum in 2.95. Users can explicitly
+ choose to have sizeof(bool)==1 with the -mone-byte-bool switch. */
+#define BOOL_TYPE_SIZE (darwin_one_byte_bool ? CHAR_TYPE_SIZE : INT_TYPE_SIZE)
+
+#undef REGISTER_TARGET_PRAGMAS
+#define REGISTER_TARGET_PRAGMAS() \
+ do \
+ { \
+ DARWIN_REGISTER_TARGET_PRAGMAS(); \
+ targetm.resolve_overloaded_builtin = altivec_resolve_overloaded_builtin; \
+ } \
+ while (0)
+
+#ifdef IN_LIBGCC2
+#include <stdbool.h>
+#endif
+
+#if !defined(__LP64__) && !defined(DARWIN_LIBSYSTEM_HAS_UNWIND)
+#define MD_UNWIND_SUPPORT "config/rs6000/darwin-unwind.h"
+#endif
+
+/* True, iff we're generating fast turn around debugging code. When
+ true, we arrange for function prologues to start with 5 nops so
+ that gdb may insert code to redirect them, and for data to be
+ accessed indirectly. The runtime uses this indirection to forward
+ references for data to the original instance of that data. */
+
+#define TARGET_FIX_AND_CONTINUE (darwin_fix_and_continue)
+
+/* This is the reserved direct dispatch address for Objective-C. */
+#define OFFS_MSGSEND_FAST 0xFFFEFF00
+
+/* This is the reserved ivar address Objective-C. */
+#define OFFS_ASSIGNIVAR_FAST 0xFFFEFEC0
+
+/* Old versions of Mac OS/Darwin don't have C99 functions available. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS \
+ (TARGET_64BIT \
+ || strverscmp (darwin_macosx_version_min, "10.3") >= 0)
+
+/* When generating kernel code or kexts, we don't use Altivec by
+ default, as kernel code doesn't save/restore those registers. */
+#define OS_MISSING_ALTIVEC (flag_mkernel || flag_apple_kext)
+
+/* Darwin has to rename some of the long double builtins. */
+#define SUBTARGET_INIT_BUILTINS darwin_patch_builtins ()
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin.md b/gcc-4.4.3/gcc/config/rs6000/darwin.md
new file mode 100644
index 000000000..356b879b2
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin.md
@@ -0,0 +1,438 @@
+/* Machine description patterns for PowerPC running Darwin (Mac OS X).
+ Copyright (C) 2004, 2005, 2007 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GCC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>. */
+
+(define_insn "adddi3_high"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (high:DI (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{cau|addis} %0,%1,ha16(%2)"
+ [(set_attr "length" "4")])
+
+(define_insn "movdf_low_si"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r")
+ (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && !TARGET_64BIT"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"lfd %0,lo16(%2)(%1)\";
+ case 1:
+ {
+ if (TARGET_POWERPC64 && TARGET_32BIT)
+ /* Note, old assemblers didn't support relocation here. */
+ return \"ld %0,lo16(%2)(%1)\";
+ else
+ {
+ output_asm_insn (\"{cal|la} %0,lo16(%2)(%1)\", operands);
+ output_asm_insn (\"{l|lwz} %L0,4(%0)\", operands);
+ return (\"{l|lwz} %0,0(%0)\");
+ }
+ }
+ default:
+ gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "load")
+ (set_attr "length" "4,12")])
+
+
+(define_insn "movdf_low_di"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r")
+ (mem:DF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"lfd %0,lo16(%2)(%1)\";
+ case 1:
+ return \"ld %0,lo16(%2)(%1)\";
+ default:
+ gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "load")
+ (set_attr "length" "4,4")])
+
+(define_insn "movdf_low_st_si"
+ [(set (mem:DF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DF 0 "gpc_reg_operand" "f"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
+ "stfd %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movdf_low_st_di"
+ [(set (mem:DF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DF 0 "gpc_reg_operand" "f"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "stfd %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_si"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r")
+ (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
+ "@
+ lfs %0,lo16(%2)(%1)
+ {l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_di"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r")
+ (mem:SF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "@
+ lfs %0,lo16(%2)(%1)
+ {l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_st_si"
+ [(set (mem:SF (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" "")))
+ (match_operand:SF 0 "gpc_reg_operand" "f,!r"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && ! TARGET_64BIT"
+ "@
+ stfs %0,lo16(%2)(%1)
+ {st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_st_di"
+ [(set (mem:SF (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,b")
+ (match_operand 2 "" "")))
+ (match_operand:SF 0 "gpc_reg_operand" "f,!r"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_64BIT"
+ "@
+ stfs %0,lo16(%2)(%1)
+ {st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+;; 64-bit MachO load/store support
+(define_insn "movdi_low"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (mem:DI (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{l|ld} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsi_low_st"
+ [(set (mem:SI (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:SI 0 "gpc_reg_operand" "r"))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movdi_low_st"
+ [(set (mem:DI (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DI 0 "gpc_reg_operand" "r"))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{st|std} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+;; Mach-O PIC trickery.
+(define_expand "macho_high"
+ [(set (match_operand 0 "" "")
+ (high (match_operand 1 "" "")))]
+ "TARGET_MACHO"
+{
+ if (TARGET_64BIT)
+ emit_insn (gen_macho_high_di (operands[0], operands[1]));
+ else
+ emit_insn (gen_macho_high_si (operands[0], operands[1]));
+
+ DONE;
+})
+
+(define_insn "macho_high_si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
+ (high:SI (match_operand 1 "" "")))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{liu|lis} %0,ha16(%1)")
+
+
+(define_insn "macho_high_di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=b*r")
+ (high:DI (match_operand 1 "" "")))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "{liu|lis} %0,ha16(%1)")
+
+(define_expand "macho_low"
+ [(set (match_operand 0 "" "")
+ (lo_sum (match_operand 1 "" "")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO"
+{
+ if (TARGET_64BIT)
+ emit_insn (gen_macho_low_di (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_macho_low_si (operands[0], operands[1], operands[2]));
+
+ DONE;
+})
+
+(define_insn "macho_low_si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "@
+ {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)}
+ {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}")
+
+(define_insn "macho_low_di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:DI (match_operand:DI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO && TARGET_64BIT"
+ "@
+ {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)}
+ {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}")
+
+(define_split
+ [(set (mem:V4SI (plus:DI (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "short_cint_operand" "")))
+ (match_operand:V4SI 2 "register_operand" ""))
+ (clobber (match_operand:DI 3 "gpc_reg_operand" ""))]
+ "TARGET_MACHO && TARGET_64BIT"
+ [(set (match_dup 3) (plus:DI (match_dup 0) (match_dup 1)))
+ (set (mem:V4SI (match_dup 3))
+ (match_dup 2))]
+ "")
+
+(define_expand "load_macho_picbase"
+ [(set (reg:SI 65)
+ (unspec [(match_operand 0 "" "")]
+ UNSPEC_LD_MPIC))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
+{
+ if (TARGET_32BIT)
+ emit_insn (gen_load_macho_picbase_si (operands[0]));
+ else
+ emit_insn (gen_load_macho_picbase_di (operands[0]));
+
+ DONE;
+})
+
+(define_insn "load_macho_picbase_si"
+ [(set (reg:SI 65)
+ (unspec:SI [(match_operand:SI 0 "immediate_operand" "s")
+ (pc)] UNSPEC_LD_MPIC))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
+ "bcl 20,31,%0\\n%0:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "load_macho_picbase_di"
+ [(set (reg:DI 65)
+ (unspec:DI [(match_operand:DI 0 "immediate_operand" "s")
+ (pc)] UNSPEC_LD_MPIC))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic && TARGET_64BIT"
+ "bcl 20,31,%0\\n%0:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_expand "macho_correct_pic"
+ [(set (match_operand 0 "" "")
+ (plus (match_operand 1 "" "")
+ (unspec [(match_operand 2 "" "")
+ (match_operand 3 "" "")]
+ UNSPEC_MPIC_CORRECT)))]
+ "DEFAULT_ABI == ABI_DARWIN"
+{
+ if (TARGET_32BIT)
+ emit_insn (gen_macho_correct_pic_si (operands[0], operands[1], operands[2],
+ operands[3]));
+ else
+ emit_insn (gen_macho_correct_pic_di (operands[0], operands[1], operands[2],
+ operands[3]));
+
+ DONE;
+})
+
+(define_insn "macho_correct_pic_si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (unspec:SI [(match_operand:SI 2 "immediate_operand" "s")
+ (match_operand:SI 3 "immediate_operand" "s")]
+ UNSPEC_MPIC_CORRECT)))]
+ "DEFAULT_ABI == ABI_DARWIN"
+ "addis %0,%1,ha16(%2-%3)\n\taddi %0,%0,lo16(%2-%3)"
+ [(set_attr "length" "8")])
+
+(define_insn "macho_correct_pic_di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (unspec:DI [(match_operand:DI 2 "immediate_operand" "s")
+ (match_operand:DI 3 "immediate_operand" "s")]
+ 16)))]
+ "DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT"
+ "addis %0,%1,ha16(%2-%3)\n\taddi %0,%0,lo16(%2-%3)"
+ [(set_attr "length" "8")])
+
+(define_insn "*call_indirect_nonlocal_darwin64"
+ [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l,c,*l"))
+ (match_operand 1 "" "g,g,g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,O,n,n"))
+ (clobber (reg:SI 65))]
+ "DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT"
+{
+ return "b%T0l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn "*call_nonlocal_darwin64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (reg:SI 65))]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+{
+#if TARGET_MACHO
+ return output_call(insn, operands, 0, 2);
+#else
+ gcc_unreachable ();
+#endif
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_value_indirect_nonlocal_darwin64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l,c,*l"))
+ (match_operand 2 "" "g,g,g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,O,n,n"))
+ (clobber (reg:SI 65))]
+ "DEFAULT_ABI == ABI_DARWIN"
+{
+ return "b%T1l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn "*call_value_nonlocal_darwin64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (reg:SI 65))]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+{
+#if TARGET_MACHO
+ return output_call(insn, operands, 1, 3);
+#else
+ gcc_unreachable ();
+#endif
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_nonlocal_darwin64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "immediate_operand" "O,n"))
+ (use (reg:SI 65))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+{
+ return "b %z0";
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_value_nonlocal_darwin64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (reg:SI 65))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN)
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ return \"b %z1\";
+}"
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+
+(define_insn "*sibcall_symbolic_64"
+ [(call (mem:SI (match_operand:DI 0 "call_operand" "s,c")) ; 64
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (reg:SI 65))
+ (return)]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"b %z0\";
+ case 1: return \"b%T0\";
+ default: gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_symbolic_64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "call_operand" "s,c"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "" ""))
+ (use (reg:SI 65))
+ (return)]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"b %z1\";
+ case 1: return \"b%T1\";
+ default: gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin.opt b/gcc-4.4.3/gcc/config/rs6000/darwin.opt
new file mode 100644
index 000000000..ff9be36ee
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin.opt
@@ -0,0 +1,32 @@
+; Darwin options for PPC port.
+;
+; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+m64
+Target RejectNegative Negative(m32) Mask(64BIT)
+Generate 64-bit code
+
+m32
+Target RejectNegative Negative(m64) InverseMask(64BIT)
+Generate 32-bit code
+
+mdynamic-no-pic
+Target Report Mask(MACHO_DYNAMIC_NO_PIC)
+Generate code suitable for executables (NOT shared libs)
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin64.h b/gcc-4.4.3/gcc/config/rs6000/darwin64.h
new file mode 100644
index 000000000..4efff8804
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin64.h
@@ -0,0 +1,35 @@
+/* Target definitions for PowerPC running Darwin (Mac OS X).
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Darwin/PowerPC64)");
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_POWERPC64 | MASK_64BIT \
+ | MASK_MULTIPLE | MASK_NEW_MNEMONICS | MASK_PPC_GFXOPT)
+
+#undef DARWIN_ARCH_SPEC
+#define DARWIN_ARCH_SPEC "ppc64"
+
+#undef DARWIN_SUBARCH_SPEC
+#define DARWIN_SUBARCH_SPEC DARWIN_ARCH_SPEC
+
+#undef DARWIN_CRT2_SPEC
+#define DARWIN_CRT2_SPEC ""
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin7.h b/gcc-4.4.3/gcc/config/rs6000/darwin7.h
new file mode 100644
index 000000000..fdf371666
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin7.h
@@ -0,0 +1,30 @@
+/* Target definitions for Darwin 7.x (Mac OS X) systems.
+ Copyright (C) 2004, 2005, 2007
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Machine dependent libraries. Include libmx when compiling for
+ Darwin 7.0 and above, but before libSystem, since the functions are
+ actually in libSystem but for 7.x compatibility we want them to be
+ looked for in libmx first. Include libmx by default because otherwise
+ libstdc++ isn't usable. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!static:\
+ %:version-compare(!< 10.3 mmacosx-version-min= -lmx)\
+ -lSystem}"
diff --git a/gcc-4.4.3/gcc/config/rs6000/darwin8.h b/gcc-4.4.3/gcc/config/rs6000/darwin8.h
new file mode 100644
index 000000000..7cdd81db7
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/darwin8.h
@@ -0,0 +1,32 @@
+/* Target definitions for Darwin 8.0 and above (Mac OS X) systems.
+ Copyright (C) 2004, 2005, 2007
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Machine dependent libraries. Include libmx when compiling on
+ Darwin 7.0 and above, but before libSystem, since the functions are
+ actually in libSystem but for 7.x compatibility we want them to be
+ looked for in libmx first---but only do this if 7.x compatibility
+ is a concern, which it's not in 64-bit mode. Include
+ libSystemStubs when compiling on (not necessarily for) 8.0 and
+ above and not 64-bit long double. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!static:\
+ %{!mlong-double-64:%{pg:-lSystemStubs_profile;:-lSystemStubs}} \
+ %{!m64:%:version-compare(>< 10.3 10.4 mmacosx-version-min= -lmx)} -lSystem}"
diff --git a/gcc-4.4.3/gcc/config/rs6000/default64.h b/gcc-4.4.3/gcc/config/rs6000/default64.h
new file mode 100644
index 000000000..0ff49aab9
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/default64.h
@@ -0,0 +1,24 @@
+/* Definitions of target machine for GNU compiler,
+ for 64 bit powerpc linux defaulting to -m64.
+ Copyright (C) 2003, 2005, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_POWERPC | MASK_PPC_GFXOPT | \
+ MASK_POWERPC64 | MASK_64BIT | MASK_NEW_MNEMONICS)
diff --git a/gcc-4.4.3/gcc/config/rs6000/dfp.md b/gcc-4.4.3/gcc/config/rs6000/dfp.md
new file mode 100644
index 000000000..377023b04
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/dfp.md
@@ -0,0 +1,687 @@
+;; Decimal Floating Point (DFP) patterns.
+;; Copyright (C) 2007, 2008
+;; Free Software Foundation, Inc.
+;; Contributed by Ben Elliston (bje@au.ibm.com) and Peter Bergner
+;; (bergner@vnet.ibm.com).
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;;
+;; UNSPEC usage
+;;
+
+(define_constants
+ [(UNSPEC_MOVSD_LOAD 400)
+ (UNSPEC_MOVSD_STORE 401)
+ ])
+
+
+(define_expand "movsd"
+ [(set (match_operand:SD 0 "nonimmediate_operand" "")
+ (match_operand:SD 1 "any_operand" ""))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{ rs6000_emit_move (operands[0], operands[1], SDmode); DONE; }")
+
+(define_split
+ [(set (match_operand:SD 0 "gpc_reg_operand" "")
+ (match_operand:SD 1 "const_double_operand" ""))]
+ "reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
+
+ if (! TARGET_POWERPC64)
+ operands[2] = operand_subword (operands[0], 0, 0, SDmode);
+ else
+ operands[2] = gen_lowpart (SImode, operands[0]);
+
+ operands[3] = gen_int_mode (l, SImode);
+}")
+
+(define_insn "movsd_hardfloat"
+ [(set (match_operand:SD 0 "nonimmediate_operand" "=r,r,m,f,*c*l,*q,!r,*h,!r,!r")
+ (match_operand:SD 1 "input_operand" "r,m,r,f,r,r,h,0,G,Fn"))]
+ "(gpc_reg_operand (operands[0], SDmode)
+ || gpc_reg_operand (operands[1], SDmode))
+ && (TARGET_HARD_FLOAT && TARGET_FPRS)"
+ "@
+ mr %0,%1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ fmr %0,%1
+ mt%0 %1
+ mt%0 %1
+ mf%1 %0
+ {cror 0,0,0|nop}
+ #
+ #"
+ [(set_attr "type" "*,load,store,fp,mtjmpr,*,mfjmpr,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,8")])
+
+(define_insn "movsd_softfloat"
+ [(set (match_operand:SD 0 "nonimmediate_operand" "=r,cl,q,r,r,m,r,r,r,r,r,*h")
+ (match_operand:SD 1 "input_operand" "r,r,r,h,m,r,I,L,R,G,Fn,0"))]
+ "(gpc_reg_operand (operands[0], SDmode)
+ || gpc_reg_operand (operands[1], SDmode))
+ && (TARGET_SOFT_FLOAT || !TARGET_FPRS)"
+ "@
+ mr %0,%1
+ mt%0 %1
+ mt%0 %1
+ mf%1 %0
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ {lil|li} %0,%1
+ {liu|lis} %0,%v1
+ {cal|la} %0,%a1
+ #
+ #
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,mtjmpr,*,mfjmpr,load,store,*,*,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4,8,4")])
+
+(define_insn "movsd_store"
+ [(set (match_operand:DD 0 "nonimmediate_operand" "=m")
+ (unspec:DD [(match_operand:SD 1 "input_operand" "f")]
+ UNSPEC_MOVSD_STORE))]
+ "(gpc_reg_operand (operands[0], DDmode)
+ || gpc_reg_operand (operands[1], SDmode))
+ && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "stfd%U0%X0 %1,%0"
+ [(set_attr "type" "fpstore")
+ (set_attr "length" "4")])
+
+(define_insn "movsd_load"
+ [(set (match_operand:SD 0 "nonimmediate_operand" "=f")
+ (unspec:SD [(match_operand:DD 1 "input_operand" "m")]
+ UNSPEC_MOVSD_LOAD))]
+ "(gpc_reg_operand (operands[0], SDmode)
+ || gpc_reg_operand (operands[1], DDmode))
+ && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "lfd%U1%X1 %0,%1"
+ [(set_attr "type" "fpload")
+ (set_attr "length" "4")])
+
+;; Hardware support for decimal floating point operations.
+
+(define_insn "extendsddd2"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (float_extend:DD (match_operand:SD 1 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dctdp %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "extendsdtd2"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (float_extend:TD (match_operand:SD 1 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+{
+ rtx tmp = gen_reg_rtx (DDmode);
+ emit_insn (gen_extendsddd2 (tmp, operands[1]));
+ emit_insn (gen_extendddtd2 (operands[0], tmp));
+ DONE;
+})
+
+(define_insn "truncddsd2"
+ [(set (match_operand:SD 0 "gpc_reg_operand" "=f")
+ (float_truncate:SD (match_operand:DD 1 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "drsp %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "negdd2"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "")
+ (neg:DD (match_operand:DD 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "")
+
+(define_insn "*negdd2_fpr"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (neg:DD (match_operand:DD 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fneg %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "absdd2"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "")
+ (abs:DD (match_operand:DD 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "")
+
+(define_insn "*absdd2_fpr"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (abs:DD (match_operand:DD 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "*nabsdd2_fpr"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (neg:DD (abs:DD (match_operand:DF 1 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fnabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "movdd"
+ [(set (match_operand:DD 0 "nonimmediate_operand" "")
+ (match_operand:DD 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], DDmode); DONE; }")
+
+(define_split
+ [(set (match_operand:DD 0 "gpc_reg_operand" "")
+ (match_operand:DD 1 "const_int_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 1))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+
+ operands[2] = operand_subword (operands[0], endian, 0, DDmode);
+ operands[3] = operand_subword (operands[0], 1 - endian, 0, DDmode);
+#if HOST_BITS_PER_WIDE_INT == 32
+ operands[4] = (value & 0x80000000) ? constm1_rtx : const0_rtx;
+#else
+ operands[4] = GEN_INT (value >> 32);
+ operands[1] = GEN_INT (((value & 0xffffffff) ^ 0x80000000) - 0x80000000);
+#endif
+}")
+
+(define_split
+ [(set (match_operand:DD 0 "gpc_reg_operand" "")
+ (match_operand:DD 1 "const_double_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ long l[2];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
+
+ operands[2] = operand_subword (operands[0], endian, 0, DDmode);
+ operands[3] = operand_subword (operands[0], 1 - endian, 0, DDmode);
+ operands[4] = gen_int_mode (l[endian], SImode);
+ operands[5] = gen_int_mode (l[1 - endian], SImode);
+}")
+
+(define_split
+ [(set (match_operand:DD 0 "gpc_reg_operand" "")
+ (match_operand:DD 1 "const_double_operand" ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ long l[2];
+ REAL_VALUE_TYPE rv;
+#if HOST_BITS_PER_WIDE_INT >= 64
+ HOST_WIDE_INT val;
+#endif
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
+
+ operands[2] = gen_lowpart (DImode, operands[0]);
+ /* HIGHPART is lower memory address when WORDS_BIG_ENDIAN. */
+#if HOST_BITS_PER_WIDE_INT >= 64
+ val = ((HOST_WIDE_INT)(unsigned long)l[endian] << 32
+ | ((HOST_WIDE_INT)(unsigned long)l[1 - endian]));
+
+ operands[3] = gen_int_mode (val, DImode);
+#else
+ operands[3] = immed_double_const (l[1 - endian], l[endian], DImode);
+#endif
+}")
+
+;; Don't have reload use general registers to load a constant. First,
+;; it might not work if the output operand is the equivalent of
+;; a non-offsettable memref, but also it is less efficient than loading
+;; the constant into an FP register, since it will probably be used there.
+;; The "??" is a kludge until we can figure out a more reasonable way
+;; of handling these non-offsettable values.
+(define_insn "*movdd_hardfloat32"
+ [(set (match_operand:DD 0 "nonimmediate_operand" "=!r,??r,m,f,f,m,!r,!r,!r")
+ (match_operand:DD 1 "input_operand" "r,m,r,f,m,f,G,H,F"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS
+ && (gpc_reg_operand (operands[0], DDmode)
+ || gpc_reg_operand (operands[1], DDmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register
+ of operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ if (rs6000_offsettable_memref_p (operands[1])
+ || (GET_CODE (operands[1]) == MEM
+ && (GET_CODE (XEXP (operands[1], 0)) == LO_SUM
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_INC
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)))
+ {
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1|lwz%U1} %0,%1\;{l|lwz} %L0,%L1\";
+ }
+ else
+ {
+ rtx addreg;
+
+ addreg = find_addr_reg (XEXP (operands[1], 0));
+ if (refers_to_regno_p (REGNO (operands[0]),
+ REGNO (operands[0]) + 1,
+ operands[1], 0))
+ {
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{lx|lwzx} %L0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"{lx|lwzx} %0,%1\";
+ }
+ else
+ {
+ output_asm_insn (\"{lx|lwzx} %0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{lx|lwzx} %L0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"\";
+ }
+ }
+ case 2:
+ if (rs6000_offsettable_memref_p (operands[0])
+ || (GET_CODE (operands[0]) == MEM
+ && (GET_CODE (XEXP (operands[0], 0)) == LO_SUM
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_INC
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)))
+ return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\";
+ else
+ {
+ rtx addreg;
+
+ addreg = find_addr_reg (XEXP (operands[0], 0));
+ output_asm_insn (\"{stx|stwx} %1,%0\", operands);
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{stx|stwx} %L1,%0\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"\";
+ }
+ case 3:
+ return \"fmr %0,%1\";
+ case 4:
+ return \"lfd%U1%X1 %0,%1\";
+ case 5:
+ return \"stfd%U0%X0 %1,%0\";
+ case 6:
+ case 7:
+ case 8:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "two,load,store,fp,fpload,fpstore,*,*,*")
+ (set_attr "length" "8,16,16,4,4,4,8,12,16")])
+
+(define_insn "*movdd_softfloat32"
+ [(set (match_operand:DD 0 "nonimmediate_operand" "=r,r,m,r,r,r")
+ (match_operand:DD 1 "input_operand" "r,m,r,G,H,F"))]
+ "! TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ && (gpc_reg_operand (operands[0], DDmode)
+ || gpc_reg_operand (operands[1], DDmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1|lwz%U1} %0,%1\;{l|lwz} %L0,%L1\";
+ case 2:
+ return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\";
+ case 3:
+ case 4:
+ case 5:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "two,load,store,*,*,*")
+ (set_attr "length" "8,8,8,8,12,16")])
+
+; ld/std require word-aligned displacements -> 'Y' constraint.
+; List Y->r and r->Y before r->r for reload.
+(define_insn "*movdd_hardfloat64_mfpgpr"
+ [(set (match_operand:DD 0 "nonimmediate_operand" "=Y,r,!r,f,f,m,*c*l,!r,*h,!r,!r,!r,r,f")
+ (match_operand:DD 1 "input_operand" "r,Y,r,f,m,f,r,h,0,G,H,F,f,r"))]
+ "TARGET_POWERPC64 && TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS
+ && (gpc_reg_operand (operands[0], DDmode)
+ || gpc_reg_operand (operands[1], DDmode))"
+ "@
+ std%U0%X0 %1,%0
+ ld%U1%X1 %0,%1
+ mr %0,%1
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ mt%0 %1
+ mf%1 %0
+ {cror 0,0,0|nop}
+ #
+ #
+ #
+ mftgpr %0,%1
+ mffgpr %0,%1"
+ [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*,mftgpr,mffgpr")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16,4,4")])
+
+; ld/std require word-aligned displacements -> 'Y' constraint.
+; List Y->r and r->Y before r->r for reload.
+(define_insn "*movdd_hardfloat64"
+ [(set (match_operand:DD 0 "nonimmediate_operand" "=Y,r,!r,f,f,m,*c*l,!r,*h,!r,!r,!r")
+ (match_operand:DD 1 "input_operand" "r,Y,r,f,m,f,r,h,0,G,H,F"))]
+ "TARGET_POWERPC64 && !TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS
+ && (gpc_reg_operand (operands[0], DDmode)
+ || gpc_reg_operand (operands[1], DDmode))"
+ "@
+ std%U0%X0 %1,%0
+ ld%U1%X1 %0,%1
+ mr %0,%1
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ mt%0 %1
+ mf%1 %0
+ {cror 0,0,0|nop}
+ #
+ #
+ #"
+ [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16")])
+
+(define_insn "*movdd_softfloat64"
+ [(set (match_operand:DD 0 "nonimmediate_operand" "=r,Y,r,cl,r,r,r,r,*h")
+ (match_operand:DD 1 "input_operand" "Y,r,r,r,h,G,H,F,0"))]
+ "TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ && (gpc_reg_operand (operands[0], DDmode)
+ || gpc_reg_operand (operands[1], DDmode))"
+ "@
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
+ mr %0,%1
+ mt%0 %1
+ mf%1 %0
+ #
+ #
+ #
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "load,store,*,mtjmpr,mfjmpr,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,8,12,16,4")])
+
+(define_expand "negtd2"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "")
+ (neg:TD (match_operand:TD 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "")
+
+(define_insn "*negtd2_fpr"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (neg:TD (match_operand:TD 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fneg %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "abstd2"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "")
+ (abs:TD (match_operand:TD 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "")
+
+(define_insn "*abstd2_fpr"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (abs:TD (match_operand:TD 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "*nabstd2_fpr"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (neg:TD (abs:TD (match_operand:DF 1 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "fnabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "movtd"
+ [(set (match_operand:TD 0 "general_operand" "")
+ (match_operand:TD 1 "any_operand" ""))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{ rs6000_emit_move (operands[0], operands[1], TDmode); DONE; }")
+
+; It's important to list the o->f and f->o moves before f->f because
+; otherwise reload, given m->f, will try to pick f->f and reload it,
+; which doesn't make progress. Likewise r->Y must be before r->r.
+(define_insn_and_split "*movtd_internal"
+ [(set (match_operand:TD 0 "nonimmediate_operand" "=o,f,f,r,Y,r")
+ (match_operand:TD 1 "input_operand" "f,o,f,YGHF,r,r"))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS
+ && (gpc_reg_operand (operands[0], TDmode)
+ || gpc_reg_operand (operands[1], TDmode))"
+ "#"
+ "&& reload_completed"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; }
+ [(set_attr "length" "8,8,8,20,20,16")])
+
+;; Hardware support for decimal floating point operations.
+
+(define_insn "extendddtd2"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (float_extend:TD (match_operand:DD 1 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dctqpq %0,%1"
+ [(set_attr "type" "fp")])
+
+;; The result of drdpq is an even/odd register pair with the converted
+;; value in the even register and zero in the odd register.
+;; FIXME: Avoid the register move by using a reload constraint to ensure
+;; that the result is the first of the pair receiving the result of drdpq.
+
+(define_insn "trunctddd2"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (float_truncate:DD (match_operand:TD 1 "gpc_reg_operand" "f")))
+ (clobber (match_scratch:TD 2 "=f"))]
+ "TARGET_DFP"
+ "drdpq %2,%1\;fmr %0,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "adddd3"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (plus:DD (match_operand:DD 1 "gpc_reg_operand" "%f")
+ (match_operand:DD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dadd %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "addtd3"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (plus:TD (match_operand:TD 1 "gpc_reg_operand" "%f")
+ (match_operand:TD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "daddq %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "subdd3"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (minus:DD (match_operand:DD 1 "gpc_reg_operand" "f")
+ (match_operand:DD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dsub %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "subtd3"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (minus:TD (match_operand:TD 1 "gpc_reg_operand" "f")
+ (match_operand:TD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dsubq %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "muldd3"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (mult:DD (match_operand:DD 1 "gpc_reg_operand" "%f")
+ (match_operand:DD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dmul %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "multd3"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (mult:TD (match_operand:TD 1 "gpc_reg_operand" "%f")
+ (match_operand:TD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dmulq %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "divdd3"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (div:DD (match_operand:DD 1 "gpc_reg_operand" "f")
+ (match_operand:DD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "ddiv %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "divtd3"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (div:TD (match_operand:TD 1 "gpc_reg_operand" "f")
+ (match_operand:TD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "ddivq %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "*cmpdd_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:DD 1 "gpc_reg_operand" "f")
+ (match_operand:DD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dcmpu %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+(define_insn "*cmptd_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:TD 1 "gpc_reg_operand" "f")
+ (match_operand:TD 2 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dcmpuq %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+(define_insn "floatditd2"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (float:TD (match_operand:DI 1 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dcffixq %0,%1"
+ [(set_attr "type" "fp")])
+
+;; Convert a decimal64 to a decimal64 whose value is an integer.
+;; This is the first stage of converting it to an integer type.
+
+(define_insn "ftruncdd2"
+ [(set (match_operand:DD 0 "gpc_reg_operand" "=f")
+ (fix:DD (match_operand:DD 1 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "drintn. 0,%0,%1,1"
+ [(set_attr "type" "fp")])
+
+;; Convert a decimal64 whose value is an integer to an actual integer.
+;; This is the second stage of converting decimal float to integer type.
+
+(define_insn "fixdddi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=f")
+ (fix:DI (match_operand:DD 1 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dctfix %0,%1"
+ [(set_attr "type" "fp")])
+
+;; Convert a decimal128 to a decimal128 whose value is an integer.
+;; This is the first stage of converting it to an integer type.
+
+(define_insn "ftrunctd2"
+ [(set (match_operand:TD 0 "gpc_reg_operand" "=f")
+ (fix:TD (match_operand:TD 1 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "drintnq. 0,%0,%1,1"
+ [(set_attr "type" "fp")])
+
+;; Convert a decimal128 whose value is an integer to an actual integer.
+;; This is the second stage of converting decimal float to integer type.
+
+(define_insn "fixtddi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=f")
+ (fix:DI (match_operand:TD 1 "gpc_reg_operand" "f")))]
+ "TARGET_DFP"
+ "dctfixq %0,%1"
+ [(set_attr "type" "fp")])
diff --git a/gcc-4.4.3/gcc/config/rs6000/driver-rs6000.c b/gcc-4.4.3/gcc/config/rs6000/driver-rs6000.c
new file mode 100644
index 000000000..3f5524ea0
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/driver-rs6000.c
@@ -0,0 +1,426 @@
+/* Subroutines for the gcc driver.
+ Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include <stdlib.h>
+
+#ifdef _AIX
+# include <sys/systemcfg.h>
+#endif
+
+#ifdef __linux__
+# include <link.h>
+#endif
+
+#if defined (__APPLE__) || (__FreeBSD__)
+# include <sys/types.h>
+# include <sys/sysctl.h>
+#endif
+
+const char *host_detect_local_cpu (int argc, const char **argv);
+
+#if GCC_VERSION >= 0
+
+/* Returns parameters that describe L1_ASSOC associative cache of size
+ L1_SIZEKB with lines of size L1_LINE, and L2_SIZEKB. */
+
+static char *
+describe_cache (unsigned l1_sizekb, unsigned l1_line,
+ unsigned l1_assoc ATTRIBUTE_UNUSED, unsigned l2_sizekb)
+{
+ char l1size[1000], line[1000], l2size[1000];
+
+ /* At the moment, gcc middle-end does not use the information about the
+ associativity of the cache. */
+
+ sprintf (l1size, "--param l1-cache-size=%u", l1_sizekb);
+ sprintf (line, "--param l1-cache-line-size=%u", l1_line);
+ sprintf (l2size, "--param l2-cache-size=%u", l2_sizekb);
+
+ return concat (l1size, " ", line, " ", l2size, " ", NULL);
+}
+
+#ifdef __APPLE__
+
+/* Returns the description of caches on Darwin. */
+
+static char *
+detect_caches_darwin (void)
+{
+ unsigned l1_sizekb, l1_line, l1_assoc, l2_sizekb;
+ size_t len = 4;
+ static int l1_size_name[2] = { CTL_HW, HW_L1DCACHESIZE };
+ static int l1_line_name[2] = { CTL_HW, HW_CACHELINE };
+ static int l2_size_name[2] = { CTL_HW, HW_L2CACHESIZE };
+
+ sysctl (l1_size_name, 2, &l1_sizekb, &len, NULL, 0);
+ sysctl (l1_line_name, 2, &l1_line, &len, NULL, 0);
+ sysctl (l2_size_name, 2, &l2_sizekb, &len, NULL, 0);
+ l1_assoc = 0;
+
+ return describe_cache (l1_sizekb / 1024, l1_line, l1_assoc,
+ l2_sizekb / 1024);
+}
+
+static const char *
+detect_processor_darwin (void)
+{
+ unsigned int proc;
+ size_t len = 4;
+
+ sysctlbyname ("hw.cpusubtype", &proc, &len, NULL, 0);
+
+ if (len > 0)
+ switch (proc)
+ {
+ case 1:
+ return "601";
+ case 2:
+ return "602";
+ case 3:
+ return "603";
+ case 4:
+ case 5:
+ return "603e";
+ case 6:
+ return "604";
+ case 7:
+ return "604e";
+ case 8:
+ return "620";
+ case 9:
+ return "750";
+ case 10:
+ return "7400";
+ case 11:
+ return "7450";
+ case 100:
+ return "970";
+ default:
+ return "powerpc";
+ }
+
+ return "powerpc";
+}
+
+#endif /* __APPLE__ */
+
+#ifdef __FreeBSD__
+
+/* Returns the description of caches on FreeBSD PPC. */
+
+static char *
+detect_caches_freebsd (void)
+{
+ unsigned l1_sizekb, l1_line, l1_assoc, l2_sizekb;
+ size_t len = 4;
+
+ /* Currently, as of FreeBSD-7.0, there is only the cacheline_size
+ available via sysctl. */
+ sysctlbyname ("machdep.cacheline_size", &l1_line, &len, NULL, 0);
+
+ l1_sizekb = 32;
+ l1_assoc = 0;
+ l2_sizekb = 512;
+
+ return describe_cache (l1_sizekb, l1_line, l1_assoc, l2_sizekb);
+}
+
+/* Currently returns default powerpc. */
+static const char *
+detect_processor_freebsd (void)
+{
+ return "powerpc";
+}
+
+#endif /* __FreeBSD__ */
+
+#ifdef __linux__
+
+/* Returns AT_PLATFORM if present, otherwise generic PowerPC. */
+
+static const char *
+elf_platform (void)
+{
+ int fd;
+
+ fd = open ("/proc/self/auxv", O_RDONLY);
+
+ if (fd != -1)
+ {
+ char buf[1024];
+ ElfW(auxv_t) *av;
+ ssize_t n;
+
+ n = read (fd, buf, sizeof (buf));
+ close (fd);
+
+ if (n > 0)
+ {
+ for (av = (ElfW(auxv_t) *) buf; av->a_type != AT_NULL; ++av)
+ switch (av->a_type)
+ {
+ case AT_PLATFORM:
+ return (const char *) av->a_un.a_val;
+
+ default:
+ break;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* Returns AT_PLATFORM if present, otherwise generic 32. */
+
+static int
+elf_dcachebsize (void)
+{
+ int fd;
+
+ fd = open ("/proc/self/auxv", O_RDONLY);
+
+ if (fd != -1)
+ {
+ char buf[1024];
+ ElfW(auxv_t) *av;
+ ssize_t n;
+
+ n = read (fd, buf, sizeof (buf));
+ close (fd);
+
+ if (n > 0)
+ {
+ for (av = (ElfW(auxv_t) *) buf; av->a_type != AT_NULL; ++av)
+ switch (av->a_type)
+ {
+ case AT_DCACHEBSIZE:
+ return av->a_un.a_val;
+
+ default:
+ break;
+ }
+ }
+ }
+ return 32;
+}
+
+/* Returns the description of caches on Linux. */
+
+static char *
+detect_caches_linux (void)
+{
+ unsigned l1_sizekb, l1_line, l1_assoc, l2_sizekb;
+ const char *platform;
+
+ platform = elf_platform ();
+
+ if (platform != NULL)
+ {
+ l1_line = 128;
+
+ if (platform[5] == '6')
+ /* POWER6 and POWER6x */
+ l1_sizekb = 64;
+ else
+ l1_sizekb = 32;
+ }
+ else
+ {
+ l1_line = elf_dcachebsize ();
+ l1_sizekb = 32;
+ }
+
+ l1_assoc = 0;
+ l2_sizekb = 512;
+
+ return describe_cache (l1_sizekb, l1_line, l1_assoc, l2_sizekb);
+}
+
+static const char *
+detect_processor_linux (void)
+{
+ const char *platform;
+
+ platform = elf_platform ();
+
+ if (platform != NULL)
+ return platform;
+ else
+ return "powerpc";
+}
+
+#endif /* __linux__ */
+
+#ifdef _AIX
+/* Returns the description of caches on AIX. */
+
+static char *
+detect_caches_aix (void)
+{
+ unsigned l1_sizekb, l1_line, l1_assoc, l2_sizekb;
+
+ l1_sizekb = _system_configuration.dcache_size / 1024;
+ l1_line = _system_configuration.dcache_line;
+ l1_assoc = _system_configuration.dcache_asc;
+ l2_sizekb = _system_configuration.L2_cache_size / 1024;
+
+ return describe_cache (l1_sizekb, l1_line, l1_assoc, l2_sizekb);
+}
+
+
+/* Returns the processor implementation on AIX. */
+
+static const char *
+detect_processor_aix (void)
+{
+ switch (_system_configuration.implementation)
+ {
+ case 0x0001:
+ return "rios1";
+
+ case 0x0002:
+ return "rsc";
+
+ case 0x0004:
+ return "rios2";
+
+ case 0x0008:
+ return "601";
+
+ case 0x0020:
+ return "603";
+
+ case 0x0010:
+ return "604";
+
+ case 0x0040:
+ return "620";
+
+ case 0x0080:
+ return "630";
+
+ case 0x0100:
+ case 0x0200:
+ case 0x0400:
+ return "rs64";
+
+ case 0x0800:
+ return "power4";
+
+ case 0x2000:
+ if (_system_configuration.version == 0x0F0000)
+ return "power5";
+ else
+ return "power5+";
+
+ case 0x4000:
+ return "power6";
+
+ default:
+ return "powerpc";
+ }
+}
+#endif /* _AIX */
+
+
+/* This will be called by the spec parser in gcc.c when it sees
+ a %:local_cpu_detect(args) construct. Currently it will be called
+ with either "arch" or "tune" as argument depending on if -march=native
+ or -mtune=native is to be substituted.
+
+ It returns a string containing new command line parameters to be
+ put at the place of the above two options, depending on what CPU
+ this is executed.
+
+ ARGC and ARGV are set depending on the actual arguments given
+ in the spec. */
+const char
+*host_detect_local_cpu (int argc, const char **argv)
+{
+ const char *cpu = NULL;
+ const char *cache = "";
+ const char *options = "";
+ bool arch;
+
+ if (argc < 1)
+ return NULL;
+
+ arch = strcmp (argv[0], "cpu") == 0;
+ if (!arch && strcmp (argv[0], "tune"))
+ return NULL;
+
+#if defined (_AIX)
+ cache = detect_caches_aix ();
+#elif defined (__APPLE__)
+ cache = detect_caches_darwin ();
+#elif defined (__FreeBSD__)
+ cache = detect_caches_freebsd ();
+ /* FreeBSD PPC does not provide any cache information yet. */
+ cache = "";
+#elif defined (__linux__)
+ cache = detect_caches_linux ();
+ /* PPC Linux does not provide any cache information yet. */
+ cache = "";
+#else
+ cache = "";
+#endif
+
+#if defined (_AIX)
+ cpu = detect_processor_aix ();
+#elif defined (__APPLE__)
+ cpu = detect_processor_darwin ();
+#elif defined (__FreeBSD__)
+ cpu = detect_processor_freebsd ();
+#elif defined (__linux__)
+ cpu = detect_processor_linux ();
+#else
+ cpu = "powerpc";
+#endif
+
+ return concat (cache, "-m", argv[0], "=", cpu, " ", options, NULL);
+}
+
+#else /* GCC_VERSION */
+
+/* If we aren't compiling with GCC we just provide a minimal
+ default value. */
+const char *host_detect_local_cpu (int argc, const char **argv)
+{
+ const char *cpu;
+ bool arch;
+
+ if (argc < 1)
+ return NULL;
+
+ arch = strcmp (argv[0], "cpu") == 0;
+ if (!arch && strcmp (argv[0], "tune"))
+ return NULL;
+
+ if (arch)
+ cpu = "powerpc";
+
+ return concat ("-m", argv[0], "=", cpu, NULL);
+}
+
+#endif /* GCC_VERSION */
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/e300c2c3.md b/gcc-4.4.3/gcc/config/rs6000/e300c2c3.md
new file mode 100644
index 000000000..31bf14ce3
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e300c2c3.md
@@ -0,0 +1,189 @@
+;; Pipeline description for Motorola PowerPC e300c3 core.
+;; Copyright (C) 2008 Free Software Foundation, Inc.
+;; Contributed by Edmar Wienskoski (edmar@freescale.com)
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "ppce300c3_most,ppce300c3_long,ppce300c3_retire")
+(define_cpu_unit "ppce300c3_decode_0,ppce300c3_decode_1" "ppce300c3_most")
+
+;; We don't simulate general issue queue (GIC). If we have SU insn
+;; and then SU1 insn, they can not be issued on the same cycle
+;; (although SU1 insn and then SU insn can be issued) because the SU
+;; insn will go to SU1 from GIC0 entry. Fortunately, the first cycle
+;; multipass insn scheduling will find the situation and issue the SU1
+;; insn and then the SU insn.
+(define_cpu_unit "ppce300c3_issue_0,ppce300c3_issue_1" "ppce300c3_most")
+
+;; We could describe completion buffers slots in combination with the
+;; retirement units and the order of completion but the result
+;; automaton would behave in the same way because we can not describe
+;; real latency time with taking in order completion into account.
+;; Actually we could define the real latency time by querying reserved
+;; automaton units but the current scheduler uses latency time before
+;; issuing insns and making any reservations.
+;;
+;; So our description is aimed to achieve a insn schedule in which the
+;; insns would not wait in the completion buffer.
+(define_cpu_unit "ppce300c3_retire_0,ppce300c3_retire_1" "ppce300c3_retire")
+
+;; Branch unit:
+(define_cpu_unit "ppce300c3_bu" "ppce300c3_most")
+
+;; IU:
+(define_cpu_unit "ppce300c3_iu0_stage0,ppce300c3_iu1_stage0" "ppce300c3_most")
+
+;; IU: This used to describe non-pipelined division.
+(define_cpu_unit "ppce300c3_mu_div" "ppce300c3_long")
+
+;; SRU:
+(define_cpu_unit "ppce300c3_sru_stage0" "ppce300c3_most")
+
+;; Here we simplified LSU unit description not describing the stages.
+(define_cpu_unit "ppce300c3_lsu" "ppce300c3_most")
+
+;; FPU:
+(define_cpu_unit "ppce300c3_fpu" "ppce300c3_most")
+
+;; The following units are used to make automata deterministic
+(define_cpu_unit "present_ppce300c3_decode_0" "ppce300c3_most")
+(define_cpu_unit "present_ppce300c3_issue_0" "ppce300c3_most")
+(define_cpu_unit "present_ppce300c3_retire_0" "ppce300c3_retire")
+(define_cpu_unit "present_ppce300c3_iu0_stage0" "ppce300c3_most")
+
+;; The following sets to make automata deterministic when option ndfa is used.
+(presence_set "present_ppce300c3_decode_0" "ppce300c3_decode_0")
+(presence_set "present_ppce300c3_issue_0" "ppce300c3_issue_0")
+(presence_set "present_ppce300c3_retire_0" "ppce300c3_retire_0")
+(presence_set "present_ppce300c3_iu0_stage0" "ppce300c3_iu0_stage0")
+
+;; Some useful abbreviations.
+(define_reservation "ppce300c3_decode"
+ "ppce300c3_decode_0|ppce300c3_decode_1+present_ppce300c3_decode_0")
+(define_reservation "ppce300c3_issue"
+ "ppce300c3_issue_0|ppce300c3_issue_1+present_ppce300c3_issue_0")
+(define_reservation "ppce300c3_retire"
+ "ppce300c3_retire_0|ppce300c3_retire_1+present_ppce300c3_retire_0")
+(define_reservation "ppce300c3_iu_stage0"
+ "ppce300c3_iu0_stage0|ppce300c3_iu1_stage0+present_ppce300c3_iu0_stage0")
+
+;; Compares can be executed either one of the IU or SRU
+(define_insn_reservation "ppce300c3_cmp" 1
+ (and (eq_attr "type" "cmp,compare,delayed_compare,fast_compare")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+(ppce300c3_iu_stage0|ppce300c3_sru_stage0) \
+ +ppce300c3_retire")
+
+;; Other one cycle IU insns
+(define_insn_reservation "ppce300c3_iu" 1
+ (and (eq_attr "type" "integer,insert_word")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_iu_stage0+ppce300c3_retire")
+
+;; Branch. Actually this latency time is not used by the scheduler.
+(define_insn_reservation "ppce300c3_branch" 1
+ (and (eq_attr "type" "jmpreg,branch")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_bu,ppce300c3_retire")
+
+;; Multiply is non-pipelined but can be executed in any IU
+(define_insn_reservation "ppce300c3_multiply" 2
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_iu_stage0, \
+ ppce300c3_iu_stage0+ppce300c3_retire")
+
+;; Divide. We use the average latency time here. We omit reserving a
+;; retire unit because of the result automata will be huge.
+(define_insn_reservation "ppce300c3_divide" 20
+ (and (eq_attr "type" "idiv")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_iu_stage0+ppce300c3_mu_div,\
+ ppce300c3_mu_div*19")
+
+;; CR logical
+(define_insn_reservation "ppce300c3_cr_logical" 1
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire")
+
+;; Mfcr
+(define_insn_reservation "ppce300c3_mfcr" 1
+ (and (eq_attr "type" "mfcr")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire")
+
+;; Mtcrf
+(define_insn_reservation "ppce300c3_mtcrf" 1
+ (and (eq_attr "type" "mtcr")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire")
+
+;; Mtjmpr
+(define_insn_reservation "ppce300c3_mtjmpr" 1
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_sru_stage0+ppce300c3_retire")
+
+;; Float point instructions
+(define_insn_reservation "ppce300c3_fpcompare" 3
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppce300c3"))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,nothing,ppce300c3_retire")
+
+(define_insn_reservation "ppce300c3_fp" 3
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppce300c3"))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,nothing,ppce300c3_retire")
+
+(define_insn_reservation "ppce300c3_dmul" 4
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppce300c3"))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,ppce300c3_fpu,nothing,ppce300c3_retire")
+
+; Divides are not pipelined
+(define_insn_reservation "ppce300c3_sdiv" 18
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppce300c3"))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,ppce300c3_fpu*17")
+
+(define_insn_reservation "ppce300c3_ddiv" 33
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppce300c3"))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_fpu,ppce300c3_fpu*32")
+
+;; Loads
+(define_insn_reservation "ppce300c3_load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire")
+
+(define_insn_reservation "ppce300c3_fpload" 2
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppce300c3"))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire")
+
+;; Stores.
+(define_insn_reservation "ppce300c3_store" 2
+ (and (eq_attr "type" "store,store_ux,store_u")
+ (ior (eq_attr "cpu" "ppce300c2") (eq_attr "cpu" "ppce300c3")))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire")
+
+(define_insn_reservation "ppce300c3_fpstore" 2
+ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppce300c3"))
+ "ppce300c3_decode,ppce300c3_issue+ppce300c3_lsu,ppce300c3_retire")
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500-double.h b/gcc-4.4.3/gcc/config/rs6000/e500-double.h
new file mode 100644
index 000000000..5545a8c93
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500-double.h
@@ -0,0 +1,24 @@
+/* Target definitions for E500 with double precision FP.
+ Copyright (C) 2004, 2006, 2007 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef SUB3TARGET_OVERRIDE_OPTIONS
+#define SUB3TARGET_OVERRIDE_OPTIONS \
+ if (!rs6000_explicit_options.float_gprs) \
+ rs6000_float_gprs = 2;
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500.h b/gcc-4.4.3/gcc/config/rs6000/e500.h
new file mode 100644
index 000000000..81fb472e7
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500.h
@@ -0,0 +1,45 @@
+/* Enable E500 support.
+ Copyright (C) 2003, 2004, 2006, 2007, 2008 Free Software Foundation, Inc.
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_SPE_ABI
+#undef TARGET_SPE
+#undef TARGET_E500
+#undef TARGET_FPRS
+#undef TARGET_E500_SINGLE
+#undef TARGET_E500_DOUBLE
+#undef CHECK_E500_OPTIONS
+
+#define TARGET_SPE_ABI rs6000_spe_abi
+#define TARGET_SPE rs6000_spe
+#define TARGET_E500 (rs6000_cpu == PROCESSOR_PPC8540)
+#define TARGET_FPRS (rs6000_float_gprs == 0)
+#define TARGET_E500_SINGLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 1)
+#define TARGET_E500_DOUBLE (TARGET_HARD_FLOAT && rs6000_float_gprs == 2)
+#define CHECK_E500_OPTIONS \
+ do { \
+ if (TARGET_E500 || TARGET_SPE || TARGET_SPE_ABI \
+ || TARGET_E500_SINGLE || TARGET_E500_DOUBLE) \
+ { \
+ if (TARGET_ALTIVEC) \
+ error ("AltiVec and E500 instructions cannot coexist"); \
+ if (TARGET_64BIT) \
+ error ("64-bit E500 not supported"); \
+ if (TARGET_HARD_FLOAT && TARGET_FPRS) \
+ error ("E500 and FPRs not supported"); \
+ } \
+ } while (0)
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtres32gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtres32gpr.asm
new file mode 100644
index 000000000..6fbff820b
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtres32gpr.asm
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 32-bit integer registers, called by the compiler. */
+/* "Bare" versions that simply return to their caller. */
+
+HIDDEN_FUNC(_rest32gpr_14) lwz 14,-72(11)
+HIDDEN_FUNC(_rest32gpr_15) lwz 15,-68(11)
+HIDDEN_FUNC(_rest32gpr_16) lwz 16,-64(11)
+HIDDEN_FUNC(_rest32gpr_17) lwz 17,-60(11)
+HIDDEN_FUNC(_rest32gpr_18) lwz 18,-56(11)
+HIDDEN_FUNC(_rest32gpr_19) lwz 19,-52(11)
+HIDDEN_FUNC(_rest32gpr_20) lwz 20,-48(11)
+HIDDEN_FUNC(_rest32gpr_21) lwz 21,-44(11)
+HIDDEN_FUNC(_rest32gpr_22) lwz 22,-40(11)
+HIDDEN_FUNC(_rest32gpr_23) lwz 23,-36(11)
+HIDDEN_FUNC(_rest32gpr_24) lwz 24,-32(11)
+HIDDEN_FUNC(_rest32gpr_25) lwz 25,-28(11)
+HIDDEN_FUNC(_rest32gpr_26) lwz 26,-24(11)
+HIDDEN_FUNC(_rest32gpr_27) lwz 27,-20(11)
+HIDDEN_FUNC(_rest32gpr_28) lwz 28,-16(11)
+HIDDEN_FUNC(_rest32gpr_29) lwz 29,-12(11)
+HIDDEN_FUNC(_rest32gpr_30) lwz 30,-8(11)
+HIDDEN_FUNC(_rest32gpr_31) lwz 31,-4(11)
+ blr
+FUNC_END(_rest32gpr_31)
+FUNC_END(_rest32gpr_30)
+FUNC_END(_rest32gpr_29)
+FUNC_END(_rest32gpr_28)
+FUNC_END(_rest32gpr_27)
+FUNC_END(_rest32gpr_26)
+FUNC_END(_rest32gpr_25)
+FUNC_END(_rest32gpr_24)
+FUNC_END(_rest32gpr_23)
+FUNC_END(_rest32gpr_22)
+FUNC_END(_rest32gpr_21)
+FUNC_END(_rest32gpr_20)
+FUNC_END(_rest32gpr_19)
+FUNC_END(_rest32gpr_18)
+FUNC_END(_rest32gpr_17)
+FUNC_END(_rest32gpr_16)
+FUNC_END(_rest32gpr_15)
+FUNC_END(_rest32gpr_14)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtres64gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtres64gpr.asm
new file mode 100644
index 000000000..5182e5539
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtres64gpr.asm
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 64-bit integer registers, called by the compiler. */
+/* "Bare" versions that return to their caller. */
+
+HIDDEN_FUNC(_rest64gpr_14) evldd 14,0(11)
+HIDDEN_FUNC(_rest64gpr_15) evldd 15,8(11)
+HIDDEN_FUNC(_rest64gpr_16) evldd 16,16(11)
+HIDDEN_FUNC(_rest64gpr_17) evldd 17,24(11)
+HIDDEN_FUNC(_rest64gpr_18) evldd 18,32(11)
+HIDDEN_FUNC(_rest64gpr_19) evldd 19,40(11)
+HIDDEN_FUNC(_rest64gpr_20) evldd 20,48(11)
+HIDDEN_FUNC(_rest64gpr_21) evldd 21,56(11)
+HIDDEN_FUNC(_rest64gpr_22) evldd 22,64(11)
+HIDDEN_FUNC(_rest64gpr_23) evldd 23,72(11)
+HIDDEN_FUNC(_rest64gpr_24) evldd 24,80(11)
+HIDDEN_FUNC(_rest64gpr_25) evldd 25,88(11)
+HIDDEN_FUNC(_rest64gpr_26) evldd 26,96(11)
+HIDDEN_FUNC(_rest64gpr_27) evldd 27,104(11)
+HIDDEN_FUNC(_rest64gpr_28) evldd 28,112(11)
+HIDDEN_FUNC(_rest64gpr_29) evldd 29,120(11)
+HIDDEN_FUNC(_rest64gpr_30) evldd 30,128(11)
+HIDDEN_FUNC(_rest64gpr_31) evldd 31,136(11)
+ blr
+FUNC_END(_rest64gpr_31)
+FUNC_END(_rest64gpr_30)
+FUNC_END(_rest64gpr_29)
+FUNC_END(_rest64gpr_28)
+FUNC_END(_rest64gpr_27)
+FUNC_END(_rest64gpr_26)
+FUNC_END(_rest64gpr_25)
+FUNC_END(_rest64gpr_24)
+FUNC_END(_rest64gpr_23)
+FUNC_END(_rest64gpr_22)
+FUNC_END(_rest64gpr_21)
+FUNC_END(_rest64gpr_20)
+FUNC_END(_rest64gpr_19)
+FUNC_END(_rest64gpr_18)
+FUNC_END(_rest64gpr_17)
+FUNC_END(_rest64gpr_16)
+FUNC_END(_rest64gpr_15)
+FUNC_END(_rest64gpr_14)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtres64gprctr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtres64gprctr.asm
new file mode 100644
index 000000000..e4b016bdb
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtres64gprctr.asm
@@ -0,0 +1,72 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 64-bit integer registers where the number of
+ registers to be restored is passed in CTR, called by the compiler. */
+
+HIDDEN_FUNC(_rest64gpr_ctr_14) evldd 14,0(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_15) evldd 15,8(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_16) evldd 16,16(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_17) evldd 17,24(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_18) evldd 18,32(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_19) evldd 19,40(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_20) evldd 20,48(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_21) evldd 21,56(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_22) evldd 22,64(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_23) evldd 23,72(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_24) evldd 24,80(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_25) evldd 25,88(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_26) evldd 26,96(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_27) evldd 27,104(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_28) evldd 28,112(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_29) evldd 29,120(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_30) evldd 30,128(11)
+ bdz _rest64_gpr_ctr_done
+HIDDEN_FUNC(_rest64gpr_ctr_31) evldd 31,136(11)
+_rest64gpr_ctr_done: blr
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtrest32gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtrest32gpr.asm
new file mode 100644
index 000000000..4e61010dc
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtrest32gpr.asm
@@ -0,0 +1,75 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 32-bit integer registers, called by the compiler. */
+/* "Tail" versions that perform a tail call. */
+
+HIDDEN_FUNC(_rest32gpr_14_t) lwz 14,-72(11)
+HIDDEN_FUNC(_rest32gpr_15_t) lwz 15,-68(11)
+HIDDEN_FUNC(_rest32gpr_16_t) lwz 16,-64(11)
+HIDDEN_FUNC(_rest32gpr_17_t) lwz 17,-60(11)
+HIDDEN_FUNC(_rest32gpr_18_t) lwz 18,-56(11)
+HIDDEN_FUNC(_rest32gpr_19_t) lwz 19,-52(11)
+HIDDEN_FUNC(_rest32gpr_20_t) lwz 20,-48(11)
+HIDDEN_FUNC(_rest32gpr_21_t) lwz 21,-44(11)
+HIDDEN_FUNC(_rest32gpr_22_t) lwz 22,-40(11)
+HIDDEN_FUNC(_rest32gpr_23_t) lwz 23,-36(11)
+HIDDEN_FUNC(_rest32gpr_24_t) lwz 24,-32(11)
+HIDDEN_FUNC(_rest32gpr_25_t) lwz 25,-28(11)
+HIDDEN_FUNC(_rest32gpr_26_t) lwz 26,-24(11)
+HIDDEN_FUNC(_rest32gpr_27_t) lwz 27,-20(11)
+HIDDEN_FUNC(_rest32gpr_28_t) lwz 28,-16(11)
+HIDDEN_FUNC(_rest32gpr_29_t) lwz 29,-12(11)
+HIDDEN_FUNC(_rest32gpr_30_t) lwz 30,-8(11)
+HIDDEN_FUNC(_rest32gpr_31_t) lwz 31,-4(11)
+ lwz 0,4(11)
+ mr 1,11
+ blr
+FUNC_END(_rest32gpr_31_t)
+FUNC_END(_rest32gpr_30_t)
+FUNC_END(_rest32gpr_29_t)
+FUNC_END(_rest32gpr_28_t)
+FUNC_END(_rest32gpr_27_t)
+FUNC_END(_rest32gpr_26_t)
+FUNC_END(_rest32gpr_25_t)
+FUNC_END(_rest32gpr_24_t)
+FUNC_END(_rest32gpr_23_t)
+FUNC_END(_rest32gpr_22_t)
+FUNC_END(_rest32gpr_21_t)
+FUNC_END(_rest32gpr_20_t)
+FUNC_END(_rest32gpr_19_t)
+FUNC_END(_rest32gpr_18_t)
+FUNC_END(_rest32gpr_17_t)
+FUNC_END(_rest32gpr_16_t)
+FUNC_END(_rest32gpr_15_t)
+FUNC_END(_rest32gpr_14_t)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtrest64gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtrest64gpr.asm
new file mode 100644
index 000000000..090786fdc
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtrest64gpr.asm
@@ -0,0 +1,74 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* "Tail" versions that perform a tail call. */
+
+HIDDEN_FUNC(_rest64gpr_14_t) evldd 14,0(11)
+HIDDEN_FUNC(_rest64gpr_15_t) evldd 15,8(11)
+HIDDEN_FUNC(_rest64gpr_16_t) evldd 16,16(11)
+HIDDEN_FUNC(_rest64gpr_17_t) evldd 17,24(11)
+HIDDEN_FUNC(_rest64gpr_18_t) evldd 18,32(11)
+HIDDEN_FUNC(_rest64gpr_19_t) evldd 19,40(11)
+HIDDEN_FUNC(_rest64gpr_20_t) evldd 20,48(11)
+HIDDEN_FUNC(_rest64gpr_21_t) evldd 21,56(11)
+HIDDEN_FUNC(_rest64gpr_22_t) evldd 22,64(11)
+HIDDEN_FUNC(_rest64gpr_23_t) evldd 23,72(11)
+HIDDEN_FUNC(_rest64gpr_24_t) evldd 24,80(11)
+HIDDEN_FUNC(_rest64gpr_25_t) evldd 25,88(11)
+HIDDEN_FUNC(_rest64gpr_26_t) evldd 26,96(11)
+HIDDEN_FUNC(_rest64gpr_27_t) evldd 27,104(11)
+HIDDEN_FUNC(_rest64gpr_28_t) evldd 28,112(11)
+HIDDEN_FUNC(_rest64gpr_29_t) evldd 29,120(11)
+HIDDEN_FUNC(_rest64gpr_30_t) evldd 30,128(11)
+HIDDEN_FUNC(_rest64gpr_31_t) lwz 0,148(11)
+ evldd 31,136(11)
+ addi 1,11,144
+ blr
+FUNC_END(_rest64gpr_31_t)
+FUNC_END(_rest64gpr_30_t)
+FUNC_END(_rest64gpr_29_t)
+FUNC_END(_rest64gpr_28_t)
+FUNC_END(_rest64gpr_27_t)
+FUNC_END(_rest64gpr_26_t)
+FUNC_END(_rest64gpr_25_t)
+FUNC_END(_rest64gpr_24_t)
+FUNC_END(_rest64gpr_23_t)
+FUNC_END(_rest64gpr_22_t)
+FUNC_END(_rest64gpr_21_t)
+FUNC_END(_rest64gpr_20_t)
+FUNC_END(_rest64gpr_19_t)
+FUNC_END(_rest64gpr_18_t)
+FUNC_END(_rest64gpr_17_t)
+FUNC_END(_rest64gpr_16_t)
+FUNC_END(_rest64gpr_15_t)
+FUNC_END(_rest64gpr_14_t)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtresx32gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtresx32gpr.asm
new file mode 100644
index 000000000..0b35245df
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtresx32gpr.asm
@@ -0,0 +1,75 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for restoring 32-bit integer registers, called by the compiler. */
+/* "Exit" versions that return to the caller's caller. */
+
+HIDDEN_FUNC(_rest32gpr_14_x) lwz 14,-72(11)
+HIDDEN_FUNC(_rest32gpr_15_x) lwz 15,-68(11)
+HIDDEN_FUNC(_rest32gpr_16_x) lwz 16,-64(11)
+HIDDEN_FUNC(_rest32gpr_17_x) lwz 17,-60(11)
+HIDDEN_FUNC(_rest32gpr_18_x) lwz 18,-56(11)
+HIDDEN_FUNC(_rest32gpr_19_x) lwz 19,-52(11)
+HIDDEN_FUNC(_rest32gpr_20_x) lwz 20,-48(11)
+HIDDEN_FUNC(_rest32gpr_21_x) lwz 21,-44(11)
+HIDDEN_FUNC(_rest32gpr_22_x) lwz 22,-40(11)
+HIDDEN_FUNC(_rest32gpr_23_x) lwz 23,-36(11)
+HIDDEN_FUNC(_rest32gpr_24_x) lwz 24,-32(11)
+HIDDEN_FUNC(_rest32gpr_25_x) lwz 25,-28(11)
+HIDDEN_FUNC(_rest32gpr_26_x) lwz 26,-24(11)
+HIDDEN_FUNC(_rest32gpr_27_x) lwz 27,-20(11)
+HIDDEN_FUNC(_rest32gpr_28_x) lwz 28,-16(11)
+HIDDEN_FUNC(_rest32gpr_29_x) lwz 29,-12(11)
+HIDDEN_FUNC(_rest32gpr_30_x) lwz 30,-8(11)
+HIDDEN_FUNC(_rest32gpr_31_x) lwz 0,4(11)
+ lwz 31,-4(11)
+ mr 1,11
+ mtlr 0
+ blr
+FUNC_END(_rest32gpr_31_x)
+FUNC_END(_rest32gpr_30_x)
+FUNC_END(_rest32gpr_29_x)
+FUNC_END(_rest32gpr_28_x)
+FUNC_END(_rest32gpr_27_x)
+FUNC_END(_rest32gpr_26_x)
+FUNC_END(_rest32gpr_25_x)
+FUNC_END(_rest32gpr_24_x)
+FUNC_END(_rest32gpr_23_x)
+FUNC_END(_rest32gpr_22_x)
+FUNC_END(_rest32gpr_21_x)
+FUNC_END(_rest32gpr_20_x)
+FUNC_END(_rest32gpr_19_x)
+FUNC_END(_rest32gpr_18_x)
+FUNC_END(_rest32gpr_17_x)
+FUNC_END(_rest32gpr_16_x)
+FUNC_END(_rest32gpr_15_x)
+FUNC_END(_rest32gpr_14_x)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtresx64gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtresx64gpr.asm
new file mode 100644
index 000000000..ce2a6cfa2
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtresx64gpr.asm
@@ -0,0 +1,75 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* "Exit" versions that return to their caller's caller. */
+
+HIDDEN_FUNC(_rest64gpr_14_x) evldd 14,0(11)
+HIDDEN_FUNC(_rest64gpr_15_x) evldd 15,8(11)
+HIDDEN_FUNC(_rest64gpr_16_x) evldd 16,16(11)
+HIDDEN_FUNC(_rest64gpr_17_x) evldd 17,24(11)
+HIDDEN_FUNC(_rest64gpr_18_x) evldd 18,32(11)
+HIDDEN_FUNC(_rest64gpr_19_x) evldd 19,40(11)
+HIDDEN_FUNC(_rest64gpr_20_x) evldd 20,48(11)
+HIDDEN_FUNC(_rest64gpr_21_x) evldd 21,56(11)
+HIDDEN_FUNC(_rest64gpr_22_x) evldd 22,64(11)
+HIDDEN_FUNC(_rest64gpr_23_x) evldd 23,72(11)
+HIDDEN_FUNC(_rest64gpr_24_x) evldd 24,80(11)
+HIDDEN_FUNC(_rest64gpr_25_x) evldd 25,88(11)
+HIDDEN_FUNC(_rest64gpr_26_x) evldd 26,96(11)
+HIDDEN_FUNC(_rest64gpr_27_x) evldd 27,104(11)
+HIDDEN_FUNC(_rest64gpr_28_x) evldd 28,112(11)
+HIDDEN_FUNC(_rest64gpr_29_x) evldd 29,120(11)
+HIDDEN_FUNC(_rest64gpr_30_x) evldd 30,128(11)
+HIDDEN_FUNC(_rest64gpr_31_x) lwz 0,148(11)
+ evldd 31,136(11)
+ addi 1,11,144
+ mtlr 0
+ blr
+FUNC_END(_rest64gpr_31_x)
+FUNC_END(_rest64gpr_30_x)
+FUNC_END(_rest64gpr_29_x)
+FUNC_END(_rest64gpr_28_x)
+FUNC_END(_rest64gpr_27_x)
+FUNC_END(_rest64gpr_26_x)
+FUNC_END(_rest64gpr_25_x)
+FUNC_END(_rest64gpr_24_x)
+FUNC_END(_rest64gpr_23_x)
+FUNC_END(_rest64gpr_22_x)
+FUNC_END(_rest64gpr_21_x)
+FUNC_END(_rest64gpr_20_x)
+FUNC_END(_rest64gpr_19_x)
+FUNC_END(_rest64gpr_18_x)
+FUNC_END(_rest64gpr_17_x)
+FUNC_END(_rest64gpr_16_x)
+FUNC_END(_rest64gpr_15_x)
+FUNC_END(_rest64gpr_14_x)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtsav32gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtsav32gpr.asm
new file mode 100644
index 000000000..c89103050
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtsav32gpr.asm
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 32-bit integer registers, called by the compiler. */
+/* "Bare" versions that simply return to their caller. */
+
+HIDDEN_FUNC(_save32gpr_14) stw 14,-72(11)
+HIDDEN_FUNC(_save32gpr_15) stw 15,-68(11)
+HIDDEN_FUNC(_save32gpr_16) stw 16,-64(11)
+HIDDEN_FUNC(_save32gpr_17) stw 17,-60(11)
+HIDDEN_FUNC(_save32gpr_18) stw 18,-56(11)
+HIDDEN_FUNC(_save32gpr_19) stw 19,-52(11)
+HIDDEN_FUNC(_save32gpr_20) stw 20,-48(11)
+HIDDEN_FUNC(_save32gpr_21) stw 21,-44(11)
+HIDDEN_FUNC(_save32gpr_22) stw 22,-40(11)
+HIDDEN_FUNC(_save32gpr_23) stw 23,-36(11)
+HIDDEN_FUNC(_save32gpr_24) stw 24,-32(11)
+HIDDEN_FUNC(_save32gpr_25) stw 25,-28(11)
+HIDDEN_FUNC(_save32gpr_26) stw 26,-24(11)
+HIDDEN_FUNC(_save32gpr_27) stw 27,-20(11)
+HIDDEN_FUNC(_save32gpr_28) stw 28,-16(11)
+HIDDEN_FUNC(_save32gpr_29) stw 29,-12(11)
+HIDDEN_FUNC(_save32gpr_30) stw 30,-8(11)
+HIDDEN_FUNC(_save32gpr_31) stw 31,-4(11)
+ blr
+FUNC_END(_save32gpr_31)
+FUNC_END(_save32gpr_30)
+FUNC_END(_save32gpr_29)
+FUNC_END(_save32gpr_28)
+FUNC_END(_save32gpr_27)
+FUNC_END(_save32gpr_26)
+FUNC_END(_save32gpr_25)
+FUNC_END(_save32gpr_24)
+FUNC_END(_save32gpr_23)
+FUNC_END(_save32gpr_22)
+FUNC_END(_save32gpr_21)
+FUNC_END(_save32gpr_20)
+FUNC_END(_save32gpr_19)
+FUNC_END(_save32gpr_18)
+FUNC_END(_save32gpr_17)
+FUNC_END(_save32gpr_16)
+FUNC_END(_save32gpr_15)
+FUNC_END(_save32gpr_14)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtsav64gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtsav64gpr.asm
new file mode 100644
index 000000000..2a5d3e475
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtsav64gpr.asm
@@ -0,0 +1,72 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 64-bit integer registers, called by the compiler. */
+
+HIDDEN_FUNC(_save64gpr_14) evstdd 14,0(11)
+HIDDEN_FUNC(_save64gpr_15) evstdd 15,8(11)
+HIDDEN_FUNC(_save64gpr_16) evstdd 16,16(11)
+HIDDEN_FUNC(_save64gpr_17) evstdd 17,24(11)
+HIDDEN_FUNC(_save64gpr_18) evstdd 18,32(11)
+HIDDEN_FUNC(_save64gpr_19) evstdd 19,40(11)
+HIDDEN_FUNC(_save64gpr_20) evstdd 20,48(11)
+HIDDEN_FUNC(_save64gpr_21) evstdd 21,56(11)
+HIDDEN_FUNC(_save64gpr_22) evstdd 22,64(11)
+HIDDEN_FUNC(_save64gpr_23) evstdd 23,72(11)
+HIDDEN_FUNC(_save64gpr_24) evstdd 24,80(11)
+HIDDEN_FUNC(_save64gpr_25) evstdd 25,88(11)
+HIDDEN_FUNC(_save64gpr_26) evstdd 26,96(11)
+HIDDEN_FUNC(_save64gpr_27) evstdd 27,104(11)
+HIDDEN_FUNC(_save64gpr_28) evstdd 28,112(11)
+HIDDEN_FUNC(_save64gpr_29) evstdd 29,120(11)
+HIDDEN_FUNC(_save64gpr_30) evstdd 30,128(11)
+HIDDEN_FUNC(_save64gpr_31) evstdd 31,136(11)
+ blr
+FUNC_END(_save64gpr_31)
+FUNC_END(_save64gpr_30)
+FUNC_END(_save64gpr_29)
+FUNC_END(_save64gpr_28)
+FUNC_END(_save64gpr_27)
+FUNC_END(_save64gpr_26)
+FUNC_END(_save64gpr_25)
+FUNC_END(_save64gpr_24)
+FUNC_END(_save64gpr_23)
+FUNC_END(_save64gpr_22)
+FUNC_END(_save64gpr_21)
+FUNC_END(_save64gpr_20)
+FUNC_END(_save64gpr_19)
+FUNC_END(_save64gpr_18)
+FUNC_END(_save64gpr_17)
+FUNC_END(_save64gpr_16)
+FUNC_END(_save64gpr_15)
+FUNC_END(_save64gpr_14)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtsav64gprctr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtsav64gprctr.asm
new file mode 100644
index 000000000..b555984e1
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtsav64gprctr.asm
@@ -0,0 +1,91 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 64-bit integer registers where the number of
+ registers to be saved is passed in CTR, called by the compiler. */
+/* "Bare" versions that return to their caller. */
+
+HIDDEN_FUNC(_save64gpr_ctr_14) evstdd 14,0(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_15) evstdd 15,8(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_16) evstdd 16,16(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_17) evstdd 17,24(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_18) evstdd 18,32(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_19) evstdd 19,40(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_20) evstdd 20,48(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_21) evstdd 21,56(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_22) evstdd 22,64(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_23) evstdd 23,72(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_24) evstdd 24,80(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_25) evstdd 25,88(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_26) evstdd 26,96(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_27) evstdd 27,104(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_28) evstdd 28,112(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_29) evstdd 29,120(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_30) evstdd 30,128(11)
+ bdz _save64_gpr_ctr_done
+HIDDEN_FUNC(_save64gpr_ctr_31) evstdd 31,136(11)
+_save64gpr_ctr_done: blr
+FUNC_END(_save64gpr_ctr_31)
+FUNC_END(_save64gpr_ctr_30)
+FUNC_END(_save64gpr_ctr_29)
+FUNC_END(_save64gpr_ctr_28)
+FUNC_END(_save64gpr_ctr_27)
+FUNC_END(_save64gpr_ctr_26)
+FUNC_END(_save64gpr_ctr_25)
+FUNC_END(_save64gpr_ctr_24)
+FUNC_END(_save64gpr_ctr_23)
+FUNC_END(_save64gpr_ctr_22)
+FUNC_END(_save64gpr_ctr_21)
+FUNC_END(_save64gpr_ctr_20)
+FUNC_END(_save64gpr_ctr_19)
+FUNC_END(_save64gpr_ctr_18)
+FUNC_END(_save64gpr_ctr_17)
+FUNC_END(_save64gpr_ctr_16)
+FUNC_END(_save64gpr_ctr_15)
+FUNC_END(_save64gpr_ctr_14)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtsavg32gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtsavg32gpr.asm
new file mode 100644
index 000000000..d14088e0d
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtsavg32gpr.asm
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 32-bit integer registers, called by the compiler. */
+/* "GOT" versions that load the address of the GOT into lr before returning. */
+
+HIDDEN_FUNC(_save32gpr_14_g) stw 14,-72(11)
+HIDDEN_FUNC(_save32gpr_15_g) stw 15,-68(11)
+HIDDEN_FUNC(_save32gpr_16_g) stw 16,-64(11)
+HIDDEN_FUNC(_save32gpr_17_g) stw 17,-60(11)
+HIDDEN_FUNC(_save32gpr_18_g) stw 18,-56(11)
+HIDDEN_FUNC(_save32gpr_19_g) stw 19,-52(11)
+HIDDEN_FUNC(_save32gpr_20_g) stw 20,-48(11)
+HIDDEN_FUNC(_save32gpr_21_g) stw 21,-44(11)
+HIDDEN_FUNC(_save32gpr_22_g) stw 22,-40(11)
+HIDDEN_FUNC(_save32gpr_23_g) stw 23,-36(11)
+HIDDEN_FUNC(_save32gpr_24_g) stw 24,-32(11)
+HIDDEN_FUNC(_save32gpr_25_g) stw 25,-28(11)
+HIDDEN_FUNC(_save32gpr_26_g) stw 26,-24(11)
+HIDDEN_FUNC(_save32gpr_27_g) stw 27,-20(11)
+HIDDEN_FUNC(_save32gpr_28_g) stw 28,-16(11)
+HIDDEN_FUNC(_save32gpr_29_g) stw 29,-12(11)
+HIDDEN_FUNC(_save32gpr_30_g) stw 30,-8(11)
+HIDDEN_FUNC(_save32gpr_31_g) stw 31,-4(11)
+ b _GLOBAL_OFFSET_TABLE_-4
+FUNC_END(_save32gpr_31_g)
+FUNC_END(_save32gpr_30_g)
+FUNC_END(_save32gpr_29_g)
+FUNC_END(_save32gpr_28_g)
+FUNC_END(_save32gpr_27_g)
+FUNC_END(_save32gpr_26_g)
+FUNC_END(_save32gpr_25_g)
+FUNC_END(_save32gpr_24_g)
+FUNC_END(_save32gpr_23_g)
+FUNC_END(_save32gpr_22_g)
+FUNC_END(_save32gpr_21_g)
+FUNC_END(_save32gpr_20_g)
+FUNC_END(_save32gpr_19_g)
+FUNC_END(_save32gpr_18_g)
+FUNC_END(_save32gpr_17_g)
+FUNC_END(_save32gpr_16_g)
+FUNC_END(_save32gpr_15_g)
+FUNC_END(_save32gpr_14_g)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtsavg64gpr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtsavg64gpr.asm
new file mode 100644
index 000000000..cbad75bc0
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtsavg64gpr.asm
@@ -0,0 +1,73 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 64-bit integer registers, called by the compiler. */
+/* "GOT" versions that load the address of the GOT into lr before returning. */
+
+HIDDEN_FUNC(_save64gpr_14_g) evstdd 14,0(11)
+HIDDEN_FUNC(_save64gpr_15_g) evstdd 15,8(11)
+HIDDEN_FUNC(_save64gpr_16_g) evstdd 16,16(11)
+HIDDEN_FUNC(_save64gpr_17_g) evstdd 17,24(11)
+HIDDEN_FUNC(_save64gpr_18_g) evstdd 18,32(11)
+HIDDEN_FUNC(_save64gpr_19_g) evstdd 19,40(11)
+HIDDEN_FUNC(_save64gpr_20_g) evstdd 20,48(11)
+HIDDEN_FUNC(_save64gpr_21_g) evstdd 21,56(11)
+HIDDEN_FUNC(_save64gpr_22_g) evstdd 22,64(11)
+HIDDEN_FUNC(_save64gpr_23_g) evstdd 23,72(11)
+HIDDEN_FUNC(_save64gpr_24_g) evstdd 24,80(11)
+HIDDEN_FUNC(_save64gpr_25_g) evstdd 25,88(11)
+HIDDEN_FUNC(_save64gpr_26_g) evstdd 26,96(11)
+HIDDEN_FUNC(_save64gpr_27_g) evstdd 27,104(11)
+HIDDEN_FUNC(_save64gpr_28_g) evstdd 28,112(11)
+HIDDEN_FUNC(_save64gpr_29_g) evstdd 29,120(11)
+HIDDEN_FUNC(_save64gpr_30_g) evstdd 30,128(11)
+HIDDEN_FUNC(_save64gpr_31_g) evstdd 31,136(11)
+ b _GLOBAL_OFFSET_TABLE_-4
+FUNC_END(_save64gpr_31_g)
+FUNC_END(_save64gpr_30_g)
+FUNC_END(_save64gpr_29_g)
+FUNC_END(_save64gpr_28_g)
+FUNC_END(_save64gpr_27_g)
+FUNC_END(_save64gpr_26_g)
+FUNC_END(_save64gpr_25_g)
+FUNC_END(_save64gpr_24_g)
+FUNC_END(_save64gpr_23_g)
+FUNC_END(_save64gpr_22_g)
+FUNC_END(_save64gpr_21_g)
+FUNC_END(_save64gpr_20_g)
+FUNC_END(_save64gpr_19_g)
+FUNC_END(_save64gpr_18_g)
+FUNC_END(_save64gpr_17_g)
+FUNC_END(_save64gpr_16_g)
+FUNC_END(_save64gpr_15_g)
+FUNC_END(_save64gpr_14_g)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500crtsavg64gprctr.asm b/gcc-4.4.3/gcc/config/rs6000/e500crtsavg64gprctr.asm
new file mode 100644
index 000000000..b663f642b
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500crtsavg64gprctr.asm
@@ -0,0 +1,90 @@
+/*
+ * Special support for e500 eabi and SVR4
+ *
+ * Copyright (C) 2008, 2009 Free Software Foundation, Inc.
+ * Written by Nathan Froyd
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifdef __SPE__
+
+/* Routines for saving 64-bit integer registers, called by the compiler. */
+/* "GOT" versions that load the address of the GOT into lr before returning. */
+
+HIDDEN_FUNC(_save64gpr_ctr_14_g) evstdd 14,0(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_15_g) evstdd 15,8(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_16_g) evstdd 16,16(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_17_g) evstdd 17,24(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_18_g) evstdd 18,32(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_19_g) evstdd 19,40(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_20_g) evstdd 20,48(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_21_g) evstdd 21,56(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_22_g) evstdd 22,64(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_23_g) evstdd 23,72(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_24_g) evstdd 24,80(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_25_g) evstdd 25,88(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_26_g) evstdd 26,96(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_27_g) evstdd 27,104(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_28_g) evstdd 28,112(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_29_g) evstdd 29,120(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_30_g) evstdd 30,128(11)
+ bdz _save64_gpr_ctr_g_done
+HIDDEN_FUNC(_save64gpr_ctr_31_g) evstdd 31,136(11)
+_save64gpr_ctr_g_done: b _GLOBAL_OFFSET_TABLE_-4
+FUNC_END(_save64gpr_ctr_31_g)
+FUNC_END(_save64gpr_ctr_30_g)
+FUNC_END(_save64gpr_ctr_29_g)
+FUNC_END(_save64gpr_ctr_28_g)
+FUNC_END(_save64gpr_ctr_27_g)
+FUNC_END(_save64gpr_ctr_26_g)
+FUNC_END(_save64gpr_ctr_25_g)
+FUNC_END(_save64gpr_ctr_24_g)
+FUNC_END(_save64gpr_ctr_23_g)
+FUNC_END(_save64gpr_ctr_22_g)
+FUNC_END(_save64gpr_ctr_21_g)
+FUNC_END(_save64gpr_ctr_20_g)
+FUNC_END(_save64gpr_ctr_19_g)
+FUNC_END(_save64gpr_ctr_18_g)
+FUNC_END(_save64gpr_ctr_17_g)
+FUNC_END(_save64gpr_ctr_16_g)
+FUNC_END(_save64gpr_ctr_15_g)
+FUNC_END(_save64gpr_ctr_14_g)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/e500mc.md b/gcc-4.4.3/gcc/config/rs6000/e500mc.md
new file mode 100644
index 000000000..86434f95f
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/e500mc.md
@@ -0,0 +1,200 @@
+;; Pipeline description for Motorola PowerPC e500mc core.
+;; Copyright (C) 2008 Free Software Foundation, Inc.
+;; Contributed by Edmar Wienskoski (edmar@freescale.com)
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; e500mc 32-bit SU(2), LSU, FPU, BPU
+;; Max issue 3 insns/clock cycle (includes 1 branch)
+;; FP is half clocked, timings of other instructions are as in the e500v2.
+
+(define_automaton "e500mc_most,e500mc_long,e500mc_retire")
+(define_cpu_unit "e500mc_decode_0,e500mc_decode_1" "e500mc_most")
+(define_cpu_unit "e500mc_issue_0,e500mc_issue_1" "e500mc_most")
+(define_cpu_unit "e500mc_retire_0,e500mc_retire_1" "e500mc_retire")
+
+;; SU.
+(define_cpu_unit "e500mc_su0_stage0,e500mc_su1_stage0" "e500mc_most")
+
+;; MU.
+(define_cpu_unit "e500mc_mu_stage0,e500mc_mu_stage1" "e500mc_most")
+(define_cpu_unit "e500mc_mu_stage2,e500mc_mu_stage3" "e500mc_most")
+
+;; Non-pipelined division.
+(define_cpu_unit "e500mc_mu_div" "e500mc_long")
+
+;; LSU.
+(define_cpu_unit "e500mc_lsu" "e500mc_most")
+
+;; FPU.
+(define_cpu_unit "e500mc_fpu" "e500mc_most")
+
+;; Branch unit.
+(define_cpu_unit "e500mc_bu" "e500mc_most")
+
+;; The following units are used to make the automata deterministic.
+(define_cpu_unit "present_e500mc_decode_0" "e500mc_most")
+(define_cpu_unit "present_e500mc_issue_0" "e500mc_most")
+(define_cpu_unit "present_e500mc_retire_0" "e500mc_retire")
+(define_cpu_unit "present_e500mc_su0_stage0" "e500mc_most")
+
+;; The following sets to make automata deterministic when option ndfa is used.
+(presence_set "present_e500mc_decode_0" "e500mc_decode_0")
+(presence_set "present_e500mc_issue_0" "e500mc_issue_0")
+(presence_set "present_e500mc_retire_0" "e500mc_retire_0")
+(presence_set "present_e500mc_su0_stage0" "e500mc_su0_stage0")
+
+;; Some useful abbreviations.
+(define_reservation "e500mc_decode"
+ "e500mc_decode_0|e500mc_decode_1+present_e500mc_decode_0")
+(define_reservation "e500mc_issue"
+ "e500mc_issue_0|e500mc_issue_1+present_e500mc_issue_0")
+(define_reservation "e500mc_retire"
+ "e500mc_retire_0|e500mc_retire_1+present_e500mc_retire_0")
+(define_reservation "e500mc_su_stage0"
+ "e500mc_su0_stage0|e500mc_su1_stage0+present_e500mc_su0_stage0")
+
+;; Simple SU insns.
+(define_insn_reservation "e500mc_su" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,cmp,compare,\
+ delayed_compare,var_delayed_compare,fast_compare,\
+ shift,trap,var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire")
+
+(define_insn_reservation "e500mc_two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire,\
+ e500mc_issue+e500mc_su_stage0+e500mc_retire")
+
+(define_insn_reservation "e500mc_three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire,\
+ e500mc_issue+e500mc_su_stage0+e500mc_retire,\
+ e500mc_issue+e500mc_su_stage0+e500mc_retire")
+
+;; Multiply.
+(define_insn_reservation "e500mc_multiply" 4
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_mu_stage0,e500mc_mu_stage1,\
+ e500mc_mu_stage2,e500mc_mu_stage3+e500mc_retire")
+
+;; Divide. We use the average latency time here.
+(define_insn_reservation "e500mc_divide" 14
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_mu_stage0+e500mc_mu_div,\
+ e500mc_mu_div*13")
+
+;; Branch.
+(define_insn_reservation "e500mc_branch" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_bu,e500mc_retire")
+
+;; CR logical.
+(define_insn_reservation "e500mc_cr_logical" 1
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_bu,e500mc_retire")
+
+;; Mfcr.
+(define_insn_reservation "e500mc_mfcr" 1
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_su1_stage0+e500mc_retire")
+
+;; Mtcrf.
+(define_insn_reservation "e500mc_mtcrf" 1
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_su1_stage0+e500mc_retire")
+
+;; Mtjmpr.
+(define_insn_reservation "e500mc_mtjmpr" 1
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire")
+
+;; Brinc.
+(define_insn_reservation "e500mc_brinc" 1
+ (and (eq_attr "type" "brinc")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_su_stage0+e500mc_retire")
+
+;; Loads.
+(define_insn_reservation "e500mc_load" 3
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,sync")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_lsu,nothing,e500mc_retire")
+
+(define_insn_reservation "e500mc_fpload" 4
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_lsu,nothing*2,e500mc_retire")
+
+;; Stores.
+(define_insn_reservation "e500mc_store" 3
+ (and (eq_attr "type" "store,store_ux,store_u,store_c")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_lsu,nothing,e500mc_retire")
+
+(define_insn_reservation "e500mc_fpstore" 3
+ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_lsu,nothing,e500mc_retire")
+
+;; The following ignores the retire unit to avoid a large automata.
+
+;; Simple FP.
+(define_insn_reservation "e500mc_simple_float" 8
+ (and (eq_attr "type" "fpsimple")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_fpu")
+; "e500mc_decode,e500mc_issue+e500mc_fpu,nothing*6,e500mc_retire")
+
+;; FP.
+(define_insn_reservation "e500mc_float" 8
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_fpu")
+; "e500mc_decode,e500mc_issue+e500mc_fpu,nothing*6,e500mc_retire")
+
+(define_insn_reservation "e500mc_fpcompare" 8
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_fpu")
+
+(define_insn_reservation "e500mc_dmul" 10
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_fpu")
+
+;; FP divides are not pipelined.
+(define_insn_reservation "e500mc_sdiv" 36
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_fpu,e500mc_fpu*35")
+
+(define_insn_reservation "e500mc_ddiv" 66
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppce500mc"))
+ "e500mc_decode,e500mc_issue+e500mc_fpu,e500mc_fpu*65")
diff --git a/gcc-4.4.3/gcc/config/rs6000/eabi-ci.asm b/gcc-4.4.3/gcc/config/rs6000/eabi-ci.asm
new file mode 100644
index 000000000..696f33d39
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/eabi-ci.asm
@@ -0,0 +1,113 @@
+/* crti.s for eabi
+ Copyright (C) 1996, 2000, 2008, 2009 Free Software Foundation, Inc.
+ Written By Michael Meissner
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This file just supplies labeled starting points for the .got* and other
+ special sections. It is linked in first before other modules. */
+
+ .ident "GNU C crti.s"
+
+#include <ppc-asm.h>
+
+#ifndef __powerpc64__
+ .section ".got","aw"
+ .globl __GOT_START__
+ .type __GOT_START__,@object
+__GOT_START__:
+
+ .section ".got1","aw"
+ .globl __GOT1_START__
+ .type __GOT1_START__,@object
+__GOT1_START__:
+
+ .section ".got2","aw"
+ .globl __GOT2_START__
+ .type __GOT2_START__,@object
+__GOT2_START__:
+
+ .section ".fixup","aw"
+ .globl __FIXUP_START__
+ .type __FIXUP_START__,@object
+__FIXUP_START__:
+
+ .section ".ctors","aw"
+ .globl __CTOR_LIST__
+ .type __CTOR_LIST__,@object
+__CTOR_LIST__:
+
+ .section ".dtors","aw"
+ .globl __DTOR_LIST__
+ .type __DTOR_LIST__,@object
+__DTOR_LIST__:
+
+ .section ".sdata","aw"
+ .globl __SDATA_START__
+ .type __SDATA_START__,@object
+ .weak _SDA_BASE_
+ .type _SDA_BASE_,@object
+__SDATA_START__:
+_SDA_BASE_:
+
+ .section ".sbss","aw",@nobits
+ .globl __SBSS_START__
+ .type __SBSS_START__,@object
+__SBSS_START__:
+
+ .section ".sdata2","a"
+ .weak _SDA2_BASE_
+ .type _SDA2_BASE_,@object
+ .globl __SDATA2_START__
+ .type __SDATA2_START__,@object
+__SDATA2_START__:
+_SDA2_BASE_:
+
+ .section ".sbss2","a"
+ .globl __SBSS2_START__
+ .type __SBSS2_START__,@object
+__SBSS2_START__:
+
+ .section ".gcc_except_table","aw"
+ .globl __EXCEPT_START__
+ .type __EXCEPT_START__,@object
+__EXCEPT_START__:
+
+ .section ".eh_frame","aw"
+ .globl __EH_FRAME_BEGIN__
+ .type __EH_FRAME_BEGIN__,@object
+__EH_FRAME_BEGIN__:
+
+/* Head of __init function used for static constructors. */
+ .section ".init","ax"
+ .align 2
+FUNC_START(__init)
+ stwu 1,-16(1)
+ mflr 0
+ stw 0,20(1)
+
+/* Head of __fini function used for static destructors. */
+ .section ".fini","ax"
+ .align 2
+FUNC_START(__fini)
+ stwu 1,-16(1)
+ mflr 0
+ stw 0,20(1)
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/eabi-cn.asm b/gcc-4.4.3/gcc/config/rs6000/eabi-cn.asm
new file mode 100644
index 000000000..68774097c
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/eabi-cn.asm
@@ -0,0 +1,104 @@
+/* crtn.s for eabi
+ Copyright (C) 1996, 2000, 2007, 2008, 2009 Free Software Foundation, Inc.
+ Written By Michael Meissner
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* This file just supplies labeled ending points for the .got* and other
+ special sections. It is linked in last after other modules. */
+
+ .ident "GNU C crtn.s"
+
+#ifndef __powerpc64__
+ .section ".got","aw"
+ .globl __GOT_END__
+ .type __GOT_END__,@object
+__GOT_END__:
+
+ .section ".got1","aw"
+ .globl __GOT1_END__
+ .type __GOT1_END__,@object
+__GOT1_END__:
+
+ .section ".got2","aw"
+ .globl __GOT2_END__
+ .type __GOT2_END__,@object
+__GOT2_END__:
+
+ .section ".fixup","aw"
+ .globl __FIXUP_END__
+ .type __FIXUP_END__,@object
+__FIXUP_END__:
+
+ .section ".ctors","aw"
+ .globl __CTOR_END__
+ .type __CTOR_END__,@object
+__CTOR_END__:
+
+ .section ".dtors","aw"
+ .weak __DTOR_END__
+ .type __DTOR_END__,@object
+__DTOR_END__:
+
+ .section ".sdata","aw"
+ .globl __SDATA_END__
+ .type __SDATA_END__,@object
+__SDATA_END__:
+
+ .section ".sbss","aw",@nobits
+ .globl __SBSS_END__
+ .type __SBSS_END__,@object
+__SBSS_END__:
+
+ .section ".sdata2","a"
+ .globl __SDATA2_END__
+ .type __SDATA2_END__,@object
+__SDATA2_END__:
+
+ .section ".sbss2","a"
+ .globl __SBSS2_END__
+ .type __SBSS2_END__,@object
+__SBSS2_END__:
+
+ .section ".gcc_except_table","aw"
+ .globl __EXCEPT_END__
+ .type __EXCEPT_END__,@object
+__EXCEPT_END__:
+
+ .section ".eh_frame","aw"
+ .globl __EH_FRAME_END__
+ .type __EH_FRAME_END__,@object
+__EH_FRAME_END__:
+ .long 0
+
+/* Tail of __init function used for static constructors. */
+ .section ".init","ax"
+ lwz 0,20(1)
+ mtlr 0
+ addi 1,1,16
+ blr
+
+/* Tail of __fini function used for static destructors. */
+ .section ".fini","ax"
+ lwz 0,20(1)
+ mtlr 0
+ addi 1,1,16
+ blr
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/eabi.asm b/gcc-4.4.3/gcc/config/rs6000/eabi.asm
new file mode 100644
index 000000000..f5581efeb
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/eabi.asm
@@ -0,0 +1,289 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001, 2008, 2009
+ * Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifndef __powerpc64__
+
+ .section ".got2","aw"
+ .align 2
+.LCTOC1 = . /* +32768 */
+
+/* Table of addresses */
+.Ltable = .-.LCTOC1
+ .long .LCTOC1 /* address we are really at */
+
+.Lsda = .-.LCTOC1
+ .long _SDA_BASE_ /* address of the first small data area */
+
+.Lsdas = .-.LCTOC1
+ .long __SDATA_START__ /* start of .sdata/.sbss section */
+
+.Lsdae = .-.LCTOC1
+ .long __SBSS_END__ /* end of .sdata/.sbss section */
+
+.Lsda2 = .-.LCTOC1
+ .long _SDA2_BASE_ /* address of the second small data area */
+
+.Lsda2s = .-.LCTOC1
+ .long __SDATA2_START__ /* start of .sdata2/.sbss2 section */
+
+.Lsda2e = .-.LCTOC1
+ .long __SBSS2_END__ /* end of .sdata2/.sbss2 section */
+
+#ifdef _RELOCATABLE
+.Lgots = .-.LCTOC1
+ .long __GOT_START__ /* Global offset table start */
+
+.Lgotm1 = .-.LCTOC1
+ .long _GLOBAL_OFFSET_TABLE_-4 /* end of GOT ptrs before BLCL + 3 reserved words */
+
+.Lgotm2 = .-.LCTOC1
+ .long _GLOBAL_OFFSET_TABLE_+12 /* start of GOT ptrs after BLCL + 3 reserved words */
+
+.Lgote = .-.LCTOC1
+ .long __GOT_END__ /* Global offset table end */
+
+.Lgot2s = .-.LCTOC1
+ .long __GOT2_START__ /* -mrelocatable GOT pointers start */
+
+.Lgot2e = .-.LCTOC1
+ .long __GOT2_END__ /* -mrelocatable GOT pointers end */
+
+.Lfixups = .-.LCTOC1
+ .long __FIXUP_START__ /* start of .fixup section */
+
+.Lfixupe = .-.LCTOC1
+ .long __FIXUP_END__ /* end of .fixup section */
+
+.Lctors = .-.LCTOC1
+ .long __CTOR_LIST__ /* start of .ctor section */
+
+.Lctore = .-.LCTOC1
+ .long __CTOR_END__ /* end of .ctor section */
+
+.Ldtors = .-.LCTOC1
+ .long __DTOR_LIST__ /* start of .dtor section */
+
+.Ldtore = .-.LCTOC1
+ .long __DTOR_END__ /* end of .dtor section */
+
+.Lexcepts = .-.LCTOC1
+ .long __EXCEPT_START__ /* start of .gcc_except_table section */
+
+.Lexcepte = .-.LCTOC1
+ .long __EXCEPT_END__ /* end of .gcc_except_table section */
+
+.Linit = .-.LCTOC1
+ .long .Linit_p /* address of variable to say we've been called */
+
+ .text
+ .align 2
+.Lptr:
+ .long .LCTOC1-.Laddr /* PC relative pointer to .got2 */
+#endif
+
+ .data
+ .align 2
+.Linit_p:
+ .long 0
+
+ .text
+
+FUNC_START(__eabi)
+
+/* Eliminate -mrelocatable code if not -mrelocatable, so that this file can
+ be assembled with other assemblers than GAS. */
+
+#ifndef _RELOCATABLE
+ addis 10,0,.Linit_p@ha /* init flag */
+ addis 11,0,.LCTOC1@ha /* load address of .LCTOC1 */
+ lwz 9,.Linit_p@l(10) /* init flag */
+ addi 11,11,.LCTOC1@l
+ cmplwi 2,9,0 /* init flag != 0? */
+ bnelr 2 /* return now, if we've been called already */
+ stw 1,.Linit_p@l(10) /* store a nonzero value in the done flag */
+
+#else /* -mrelocatable */
+ mflr 0
+ bl .Laddr /* get current address */
+.Laddr:
+ mflr 12 /* real address of .Laddr */
+ lwz 11,(.Lptr-.Laddr)(12) /* linker generated address of .LCTOC1 */
+ add 11,11,12 /* correct to real pointer */
+ lwz 12,.Ltable(11) /* get linker's idea of where .Laddr is */
+ lwz 10,.Linit(11) /* address of init flag */
+ subf. 12,12,11 /* calculate difference */
+ lwzx 9,10,12 /* done flag */
+ cmplwi 2,9,0 /* init flag != 0? */
+ mtlr 0 /* restore in case branch was taken */
+ bnelr 2 /* return now, if we've been called already */
+ stwx 1,10,12 /* store a nonzero value in the done flag */
+ beq+ 0,.Lsdata /* skip if we don't need to relocate */
+
+/* We need to relocate the .got2 pointers. */
+
+ lwz 3,.Lgot2s(11) /* GOT2 pointers start */
+ lwz 4,.Lgot2e(11) /* GOT2 pointers end */
+ add 3,12,3 /* adjust pointers */
+ add 4,12,4
+ bl FUNC_NAME(__eabi_convert) /* convert pointers in .got2 section */
+
+/* Fixup the .ctor section for static constructors */
+
+ lwz 3,.Lctors(11) /* constructors pointers start */
+ lwz 4,.Lctore(11) /* constructors pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert constructors */
+
+/* Fixup the .dtor section for static destructors */
+
+ lwz 3,.Ldtors(11) /* destructors pointers start */
+ lwz 4,.Ldtore(11) /* destructors pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert destructors */
+
+/* Fixup the .gcc_except_table section for G++ exceptions */
+
+ lwz 3,.Lexcepts(11) /* exception table pointers start */
+ lwz 4,.Lexcepte(11) /* exception table pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert exceptions */
+
+/* Fixup the addresses in the GOT below _GLOBAL_OFFSET_TABLE_-4 */
+
+ lwz 3,.Lgots(11) /* GOT table pointers start */
+ lwz 4,.Lgotm1(11) /* GOT table pointers below _GLOBAL_OFFSET_TABLE-4 */
+ bl FUNC_NAME(__eabi_convert) /* convert lower GOT */
+
+/* Fixup the addresses in the GOT above _GLOBAL_OFFSET_TABLE_+12 */
+
+ lwz 3,.Lgotm2(11) /* GOT table pointers above _GLOBAL_OFFSET_TABLE+12 */
+ lwz 4,.Lgote(11) /* GOT table pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert lower GOT */
+
+/* Fixup any user initialized pointers now (the compiler drops pointers to */
+/* each of the relocs that it does in the .fixup section). */
+
+.Lfix:
+ lwz 3,.Lfixups(11) /* fixup pointers start */
+ lwz 4,.Lfixupe(11) /* fixup pointers end */
+ bl FUNC_NAME(__eabi_uconvert) /* convert user initialized pointers */
+
+.Lsdata:
+ mtlr 0 /* restore link register */
+#endif /* _RELOCATABLE */
+
+/* Only load up register 13 if there is a .sdata and/or .sbss section */
+ lwz 3,.Lsdas(11) /* start of .sdata/.sbss section */
+ lwz 4,.Lsdae(11) /* end of .sdata/.sbss section */
+ cmpw 1,3,4 /* .sdata/.sbss section non-empty? */
+ beq- 1,.Lsda2l /* skip loading r13 */
+
+ lwz 13,.Lsda(11) /* load r13 with _SDA_BASE_ address */
+
+/* Only load up register 2 if there is a .sdata2 and/or .sbss2 section */
+
+.Lsda2l:
+ lwz 3,.Lsda2s(11) /* start of .sdata/.sbss section */
+ lwz 4,.Lsda2e(11) /* end of .sdata/.sbss section */
+ cmpw 1,3,4 /* .sdata/.sbss section non-empty? */
+ beq+ 1,.Ldone /* skip loading r2 */
+
+ lwz 2,.Lsda2(11) /* load r2 with _SDA2_BASE_ address */
+
+/* Done adjusting pointers, return by way of doing the C++ global constructors. */
+
+.Ldone:
+ b FUNC_NAME(__init) /* do any C++ global constructors (which returns to caller) */
+FUNC_END(__eabi)
+
+/* Special subroutine to convert a bunch of pointers directly.
+ r0 has original link register
+ r3 has low pointer to convert
+ r4 has high pointer to convert
+ r5 .. r10 are scratch registers
+ r11 has the address of .LCTOC1 in it.
+ r12 has the value to add to each pointer
+ r13 .. r31 are unchanged */
+
+FUNC_START(__eabi_convert)
+ cmplw 1,3,4 /* any pointers to convert? */
+ subf 5,3,4 /* calculate number of words to convert */
+ bclr 4,4 /* return if no pointers */
+
+ srawi 5,5,2
+ addi 3,3,-4 /* start-4 for use with lwzu */
+ mtctr 5
+
+.Lcvt:
+ lwzu 6,4(3) /* pointer to convert */
+ cmpwi 0,6,0
+ beq- .Lcvt2 /* if pointer is null, don't convert */
+
+ add 6,6,12 /* convert pointer */
+ stw 6,0(3)
+.Lcvt2:
+ bdnz+ .Lcvt
+ blr
+
+FUNC_END(__eabi_convert)
+
+/* Special subroutine to convert the pointers the user has initialized. The
+ compiler has placed the address of the initialized pointer into the .fixup
+ section.
+
+ r0 has original link register
+ r3 has low pointer to convert
+ r4 has high pointer to convert
+ r5 .. r10 are scratch registers
+ r11 has the address of .LCTOC1 in it.
+ r12 has the value to add to each pointer
+ r13 .. r31 are unchanged */
+
+FUNC_START(__eabi_uconvert)
+ cmplw 1,3,4 /* any pointers to convert? */
+ subf 5,3,4 /* calculate number of words to convert */
+ bclr 4,4 /* return if no pointers */
+
+ srawi 5,5,2
+ addi 3,3,-4 /* start-4 for use with lwzu */
+ mtctr 5
+
+.Lucvt:
+ lwzu 6,4(3) /* next pointer to pointer to convert */
+ add 6,6,12 /* adjust pointer */
+ lwz 7,0(6) /* get the pointer it points to */
+ stw 6,0(3) /* store adjusted pointer */
+ add 7,7,12 /* adjust */
+ stw 7,0(6)
+ bdnz+ .Lucvt
+ blr
+
+FUNC_END(__eabi_uconvert)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/eabi.h b/gcc-4.4.3/gcc/config/rs6000/eabi.h
new file mode 100644
index 000000000..3024a7586
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/eabi.h
@@ -0,0 +1,44 @@
+/* Core target definitions for GNU compiler
+ for IBM RS/6000 PowerPC targeted to embedded ELF systems.
+ Copyright (C) 1995, 1996, 2000, 2003, 2004, 2007 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Add -meabi to target flags. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI)
+
+/* Invoke an initializer function to set up the GOT. */
+#define NAME__MAIN "__eabi"
+#define INVOKE__main
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded)");
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("PPC"); \
+ builtin_define ("__embedded__"); \
+ builtin_assert ("system=embedded"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ TARGET_OS_SYSV_CPP_BUILTINS (); \
+ } \
+ while (0)
diff --git a/gcc-4.4.3/gcc/config/rs6000/eabialtivec.h b/gcc-4.4.3/gcc/config/rs6000/eabialtivec.h
new file mode 100644
index 000000000..417be97a4
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/eabialtivec.h
@@ -0,0 +1,30 @@
+/* Core target definitions for GNU compiler
+ for PowerPC targeted systems with AltiVec support.
+ Copyright (C) 2001, 2003, 2007 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Add -meabi and -maltivec to target flags. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI | MASK_ALTIVEC)
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded with AltiVec)");
+
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS rs6000_altivec_abi = 1
diff --git a/gcc-4.4.3/gcc/config/rs6000/eabisim.h b/gcc-4.4.3/gcc/config/rs6000/eabisim.h
new file mode 100644
index 000000000..65bc14dff
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/eabisim.h
@@ -0,0 +1,54 @@
+/* Support for GCC on simulated PowerPC systems targeted to embedded ELF
+ systems.
+ Copyright (C) 1995, 1996, 2000, 2003, 2007 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC Simulated)");
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("PPC"); \
+ builtin_define ("__embedded__"); \
+ builtin_define ("__simulator__"); \
+ builtin_assert ("system=embedded"); \
+ builtin_assert ("system=simulator"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ TARGET_OS_SYSV_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+/* Make the simulator the default */
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_sim)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_sim)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_sim)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_sim)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_sim)"
diff --git a/gcc-4.4.3/gcc/config/rs6000/eabispe.h b/gcc-4.4.3/gcc/config/rs6000/eabispe.h
new file mode 100644
index 000000000..d3fc8a6be
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/eabispe.h
@@ -0,0 +1,54 @@
+/* Core target definitions for GNU compiler
+ for PowerPC embedded targeted systems with SPE support.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI \
+ | MASK_STRICT_ALIGN)
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded SPE)");
+
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS \
+ if (rs6000_select[1].string == NULL) \
+ rs6000_cpu = PROCESSOR_PPC8540; \
+ if (!rs6000_explicit_options.spe_abi) \
+ rs6000_spe_abi = 1; \
+ if (!rs6000_explicit_options.float_gprs) \
+ rs6000_float_gprs = 1; \
+ if (!rs6000_explicit_options.spe) \
+ rs6000_spe = 1; \
+ if (target_flags & MASK_64BIT) \
+ error ("-m64 not supported in this configuration")
+
+/* The e500 ABI says that either long doubles are 128 bits, or if
+ implemented in any other size, the compiler/linker should error out.
+ We have no emulation libraries for 128 bit long doubles, and I hate
+ the dozens of failures on the regression suite. So I'm breaking ABI
+ specifications, until I properly fix the emulation.
+
+ Enable these later.
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE (TARGET_SPE ? 128 : 64)
+*/
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mppc -mspe -me500"
diff --git a/gcc-4.4.3/gcc/config/rs6000/freebsd.h b/gcc-4.4.3/gcc/config/rs6000/freebsd.h
new file mode 100644
index 000000000..bc2a10bb7
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/freebsd.h
@@ -0,0 +1,74 @@
+/* Definitions for PowerPC running FreeBSD using the ELF format
+ Copyright (C) 2001, 2003, 2007, 2009 Free Software Foundation, Inc.
+ Contributed by David E. O'Brien <obrien@FreeBSD.org> and BSDi.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Override the defaults, which exist to force the proper definition. */
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_freebsd)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_freebsd)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_freebsd)"
+
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_freebsd)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_freebsd)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_freebsd)"
+
+/* XXX: This is wrong for many platforms in sysv4.h.
+ We should work on getting that definition fixed. */
+#undef LINK_SHLIB_SPEC
+#define LINK_SHLIB_SPEC "%{shared:-shared} %{!shared: %{static:-static}}"
+
+
+/************************[ Target stuff ]***********************************/
+
+/* Define the actual types of some ANSI-mandated types.
+ Needs to agree with <machine/ansi.h>. GCC defaults come from c-decl.c,
+ c-common.c, and config/<arch>/<arch>.h. */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+/* rs6000.h gets this wrong for FreeBSD. We use the GCC defaults instead. */
+#undef WCHAR_TYPE
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (FreeBSD/PowerPC ELF)");
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+/* Define SVR4_ASM_SPEC, we use GAS by default. See svr4.h for details. */
+#define SVR4_ASM_SPEC \
+ "%{v:-V} %{Wa,*:%*}"
diff --git a/gcc-4.4.3/gcc/config/rs6000/gnu.h b/gcc-4.4.3/gcc/config/rs6000/gnu.h
new file mode 100644
index 000000000..0f329e53f
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/gnu.h
@@ -0,0 +1,37 @@
+/* Definitions of target machine for GNU compiler,
+ for PowerPC machines running GNU.
+ Copyright (C) 2001, 2003, 2007 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_gnu)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_gnu)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_gnu)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_gnu)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_gnu)"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC GNU)");
diff --git a/gcc-4.4.3/gcc/config/rs6000/host-darwin.c b/gcc-4.4.3/gcc/config/rs6000/host-darwin.c
new file mode 100644
index 000000000..333f4884e
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/host-darwin.c
@@ -0,0 +1,156 @@
+/* Darwin/powerpc host-specific hook definitions.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include <signal.h>
+#include <sys/ucontext.h>
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+#include "toplev.h"
+#include "diagnostic.h"
+#include "config/host-darwin.h"
+
+static void segv_crash_handler (int);
+static void segv_handler (int, siginfo_t *, void *);
+static void darwin_rs6000_extra_signals (void);
+
+#ifndef HAVE_DECL_SIGALTSTACK
+/* This doesn't have a prototype in signal.h in 10.2.x and earlier,
+ fixed in later releases. */
+extern int sigaltstack(const struct sigaltstack *, struct sigaltstack *);
+#endif
+
+/* The fields of the mcontext_t type have acquired underscores in later
+ OS versions. */
+#ifdef HAS_MCONTEXT_T_UNDERSCORES
+#define MC_FLD(x) __ ## x
+#else
+#define MC_FLD(x) x
+#endif
+
+#undef HOST_HOOKS_EXTRA_SIGNALS
+#define HOST_HOOKS_EXTRA_SIGNALS darwin_rs6000_extra_signals
+
+/* On Darwin/powerpc, hitting the stack limit turns into a SIGSEGV.
+ This code detects the difference between hitting the stack limit and
+ a true wild pointer dereference by looking at the instruction that
+ faulted; only a few kinds of instruction are used to access below
+ the previous bottom of the stack. */
+
+static void
+segv_crash_handler (int sig ATTRIBUTE_UNUSED)
+{
+ internal_error ("Segmentation Fault (code)");
+}
+
+static void
+segv_handler (int sig ATTRIBUTE_UNUSED,
+ siginfo_t *sip ATTRIBUTE_UNUSED,
+ void *scp)
+{
+ ucontext_t *uc = (ucontext_t *)scp;
+ sigset_t sigset;
+ unsigned faulting_insn;
+
+ /* The fault might have happened when trying to run some instruction, in
+ which case the next line will segfault _again_. Handle this case. */
+ signal (SIGSEGV, segv_crash_handler);
+ sigemptyset (&sigset);
+ sigaddset (&sigset, SIGSEGV);
+ sigprocmask (SIG_UNBLOCK, &sigset, NULL);
+
+ faulting_insn = *(unsigned *)uc->uc_mcontext->MC_FLD(ss).MC_FLD(srr0);
+
+ /* Note that this only has to work for GCC, so we don't have to deal
+ with all the possible cases (GCC has no AltiVec code, for
+ instance). It's complicated because Darwin allows stores to
+ below the stack pointer, and the prologue code takes advantage of
+ this. */
+
+ if ((faulting_insn & 0xFFFF8000) == 0x94218000 /* stwu %r1, -xxx(%r1) */
+ || (faulting_insn & 0xFC1F03FF) == 0x7C01016E /* stwux xxx, %r1, xxx */
+ || (faulting_insn & 0xFC1F8000) == 0x90018000 /* stw xxx, -yyy(%r1) */
+ || (faulting_insn & 0xFC1F8000) == 0xD8018000 /* stfd xxx, -yyy(%r1) */
+ || (faulting_insn & 0xFC1F8000) == 0xBC018000 /* stmw xxx, -yyy(%r1) */)
+ {
+ char *shell_name;
+
+ fnotice (stderr, "Out of stack space.\n");
+ shell_name = getenv ("SHELL");
+ if (shell_name != NULL)
+ shell_name = strrchr (shell_name, '/');
+ if (shell_name != NULL)
+ {
+ static const char * shell_commands[][2] = {
+ { "sh", "ulimit -S -s unlimited" },
+ { "bash", "ulimit -S -s unlimited" },
+ { "tcsh", "limit stacksize unlimited" },
+ { "csh", "limit stacksize unlimited" },
+ /* zsh doesn't have "unlimited", this will work under the
+ default configuration. */
+ { "zsh", "limit stacksize 32m" }
+ };
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE (shell_commands); i++)
+ if (strcmp (shell_commands[i][0], shell_name + 1) == 0)
+ {
+ fnotice (stderr,
+ "Try running '%s' in the shell to raise its limit.\n",
+ shell_commands[i][1]);
+ }
+ }
+
+ if (global_dc->abort_on_error)
+ fancy_abort (__FILE__, __LINE__, __FUNCTION__);
+
+ exit (FATAL_EXIT_CODE);
+ }
+
+ fprintf (stderr, "[address=%08lx pc=%08x]\n",
+ uc->uc_mcontext->MC_FLD(es).MC_FLD(dar),
+ uc->uc_mcontext->MC_FLD(ss).MC_FLD(srr0));
+ internal_error ("Segmentation Fault");
+ exit (FATAL_EXIT_CODE);
+}
+
+static void
+darwin_rs6000_extra_signals (void)
+{
+ struct sigaction sact;
+ stack_t sigstk;
+
+ sigstk.ss_sp = (char*)xmalloc (SIGSTKSZ);
+ sigstk.ss_size = SIGSTKSZ;
+ sigstk.ss_flags = 0;
+ if (sigaltstack (&sigstk, NULL) < 0)
+ fatal_error ("While setting up signal stack: %m");
+
+ sigemptyset(&sact.sa_mask);
+ sact.sa_flags = SA_ONSTACK | SA_SIGINFO;
+ sact.sa_sigaction = segv_handler;
+ if (sigaction (SIGSEGV, &sact, 0) < 0)
+ fatal_error ("While setting up signal handler: %m");
+}
+
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc-4.4.3/gcc/config/rs6000/host-ppc64-darwin.c b/gcc-4.4.3/gcc/config/rs6000/host-ppc64-darwin.c
new file mode 100644
index 000000000..49a920475
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/host-ppc64-darwin.c
@@ -0,0 +1,30 @@
+/* ppc64-darwin host-specific hook definitions.
+ Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "hosthooks.h"
+#include "hosthooks-def.h"
+#include "config/host-darwin.h"
+
+/* Darwin doesn't do anything special for ppc64 hosts; this file exists just
+ to include config/host-darwin.h. */
+
+const struct host_hooks host_hooks = HOST_HOOKS_INITIALIZER;
diff --git a/gcc-4.4.3/gcc/config/rs6000/libgcc-ppc-glibc.ver b/gcc-4.4.3/gcc/config/rs6000/libgcc-ppc-glibc.ver
new file mode 100644
index 000000000..c74d732e5
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/libgcc-ppc-glibc.ver
@@ -0,0 +1,55 @@
+%ifndef _SOFT_FLOAT
+%ifndef __powerpc64__
+%exclude {
+ __multc3
+ __divtc3
+ __powitf2
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+}
+
+GCC_4.1.0 {
+ # long double support
+ __multc3
+ __divtc3
+ __powitf2
+ __fixtfdi
+ __fixunstfdi
+ __floatditf
+
+%else
+GCC_3.4.4 {
+%endif
+%else
+GCC_4.2.0 {
+%endif
+
+ # long double support
+ __gcc_qadd
+ __gcc_qsub
+ __gcc_qmul
+ __gcc_qdiv
+
+%ifdef _SOFT_DOUBLE
+ __gcc_qneg
+ __gcc_qeq
+ __gcc_qne
+ __gcc_qgt
+ __gcc_qge
+ __gcc_qlt
+ __gcc_qle
+ __gcc_stoq
+ __gcc_dtoq
+ __gcc_qtos
+ __gcc_qtod
+ __gcc_qtoi
+ __gcc_qtou
+ __gcc_itoq
+ __gcc_utoq
+%endif
+
+%ifdef __NO_FPRS__
+ __gcc_qunord
+%endif
+}
diff --git a/gcc-4.4.3/gcc/config/rs6000/libgcc-ppc64.ver b/gcc-4.4.3/gcc/config/rs6000/libgcc-ppc64.ver
new file mode 100644
index 000000000..b27b4b492
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/libgcc-ppc64.ver
@@ -0,0 +1,7 @@
+GCC_3.4.4 {
+ # long double support
+ __gcc_qadd
+ __gcc_qsub
+ __gcc_qmul
+ __gcc_qdiv
+}
diff --git a/gcc-4.4.3/gcc/config/rs6000/linux-unwind.h b/gcc-4.4.3/gcc/config/rs6000/linux-unwind.h
new file mode 100644
index 000000000..ba573ae7e
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/linux-unwind.h
@@ -0,0 +1,358 @@
+/* DWARF2 EH unwinding support for PowerPC and PowerPC64 Linux.
+ Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#define R_LR 65
+#define R_CR2 70
+#define R_VR0 77
+#define R_VRSAVE 109
+#define R_VSCR 110
+
+struct gcc_vregs
+{
+ __attribute__ ((vector_size (16))) int vr[32];
+#ifdef __powerpc64__
+ unsigned int pad1[3];
+ unsigned int vscr;
+ unsigned int vsave;
+ unsigned int pad2[3];
+#else
+ unsigned int vsave;
+ unsigned int pad[2];
+ unsigned int vscr;
+#endif
+};
+
+struct gcc_regs
+{
+ unsigned long gpr[32];
+ unsigned long nip;
+ unsigned long msr;
+ unsigned long orig_gpr3;
+ unsigned long ctr;
+ unsigned long link;
+ unsigned long xer;
+ unsigned long ccr;
+ unsigned long softe;
+ unsigned long trap;
+ unsigned long dar;
+ unsigned long dsisr;
+ unsigned long result;
+ unsigned long pad1[4];
+ double fpr[32];
+ unsigned int pad2;
+ unsigned int fpscr;
+#ifdef __powerpc64__
+ struct gcc_vregs *vp;
+#else
+ unsigned int pad3[2];
+#endif
+ struct gcc_vregs vregs;
+};
+
+struct gcc_ucontext
+{
+#ifdef __powerpc64__
+ unsigned long pad[28];
+#else
+ unsigned long pad[12];
+#endif
+ struct gcc_regs *regs;
+ struct gcc_regs rsave;
+};
+
+#ifdef __powerpc64__
+
+enum { SIGNAL_FRAMESIZE = 128 };
+
+/* If PC is at a sigreturn trampoline, return a pointer to the
+ regs. Otherwise return NULL. */
+
+static struct gcc_regs *
+get_regs (struct _Unwind_Context *context)
+{
+ const unsigned char *pc = context->ra;
+
+ /* addi r1, r1, 128; li r0, 0x0077; sc (sigreturn) */
+ /* addi r1, r1, 128; li r0, 0x00AC; sc (rt_sigreturn) */
+ if (*(unsigned int *) (pc + 0) != 0x38210000 + SIGNAL_FRAMESIZE
+ || *(unsigned int *) (pc + 8) != 0x44000002)
+ return NULL;
+ if (*(unsigned int *) (pc + 4) == 0x38000077)
+ {
+ struct sigframe {
+ char gap[SIGNAL_FRAMESIZE];
+ unsigned long pad[7];
+ struct gcc_regs *regs;
+ } *frame = (struct sigframe *) context->cfa;
+ return frame->regs;
+ }
+ else if (*(unsigned int *) (pc + 4) == 0x380000AC)
+ {
+ /* This works for 2.4 kernels, but not for 2.6 kernels with vdso
+ because pc isn't pointing into the stack. Can be removed when
+ no one is running 2.4.19 or 2.4.20, the first two ppc64
+ kernels released. */
+ struct rt_sigframe_24 {
+ int tramp[6];
+ void *pinfo;
+ struct gcc_ucontext *puc;
+ } *frame24 = (struct rt_sigframe_24 *) pc;
+
+ /* Test for magic value in *puc of vdso. */
+ if ((long) frame24->puc != -21 * 8)
+ return frame24->puc->regs;
+ else
+ {
+ /* This works for 2.4.21 and later kernels. */
+ struct rt_sigframe {
+ char gap[SIGNAL_FRAMESIZE];
+ struct gcc_ucontext uc;
+ unsigned long pad[2];
+ int tramp[6];
+ void *pinfo;
+ struct gcc_ucontext *puc;
+ } *frame = (struct rt_sigframe *) context->cfa;
+ return frame->uc.regs;
+ }
+ }
+ return NULL;
+}
+
+#else /* !__powerpc64__ */
+
+enum { SIGNAL_FRAMESIZE = 64 };
+
+static struct gcc_regs *
+get_regs (struct _Unwind_Context *context)
+{
+ const unsigned char *pc = context->ra;
+
+ /* li r0, 0x7777; sc (sigreturn old) */
+ /* li r0, 0x0077; sc (sigreturn new) */
+ /* li r0, 0x6666; sc (rt_sigreturn old) */
+ /* li r0, 0x00AC; sc (rt_sigreturn new) */
+ if (*(const unsigned int *) (pc + 4) != 0x44000002)
+ return NULL;
+ if (*(const unsigned int *) (pc + 0) == 0x38007777
+ || *(const unsigned int *) (pc + 0) == 0x38000077)
+ {
+ struct sigframe {
+ char gap[SIGNAL_FRAMESIZE];
+ unsigned long pad[7];
+ struct gcc_regs *regs;
+ } *frame = (struct sigframe *) context->cfa;
+ return frame->regs;
+ }
+ else if (*(const unsigned int *) (pc + 0) == 0x38006666
+ || *(const unsigned int *) (pc + 0) == 0x380000AC)
+ {
+ struct rt_sigframe {
+ char gap[SIGNAL_FRAMESIZE + 16];
+ char siginfo[128];
+ struct gcc_ucontext uc;
+ } *frame = (struct rt_sigframe *) context->cfa;
+ return frame->uc.regs;
+ }
+ return NULL;
+}
+#endif
+
+/* Find an entry in the process auxiliary vector. The canonical way to
+ test for VMX is to look at AT_HWCAP. */
+
+static long
+ppc_linux_aux_vector (long which)
+{
+ /* __libc_stack_end holds the original stack passed to a process. */
+ extern long *__libc_stack_end;
+ long argc;
+ char **argv;
+ char **envp;
+ struct auxv
+ {
+ long a_type;
+ long a_val;
+ } *auxp;
+
+ /* The Linux kernel puts argc first on the stack. */
+ argc = __libc_stack_end[0];
+ /* Followed by argv, NULL terminated. */
+ argv = (char **) __libc_stack_end + 1;
+ /* Followed by environment string pointers, NULL terminated. */
+ envp = argv + argc + 1;
+ while (*envp++)
+ continue;
+ /* Followed by the aux vector, zero terminated. */
+ for (auxp = (struct auxv *) envp; auxp->a_type != 0; ++auxp)
+ if (auxp->a_type == which)
+ return auxp->a_val;
+ return 0;
+}
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs. */
+
+#define MD_FALLBACK_FRAME_STATE_FOR ppc_fallback_frame_state
+
+static _Unwind_Reason_Code
+ppc_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ static long hwcap = 0;
+ struct gcc_regs *regs = get_regs (context);
+ long new_cfa;
+ int i;
+
+ if (regs == NULL)
+ return _URC_END_OF_STACK;
+
+ new_cfa = regs->gpr[STACK_POINTER_REGNUM];
+ fs->regs.cfa_how = CFA_REG_OFFSET;
+ fs->regs.cfa_reg = STACK_POINTER_REGNUM;
+ fs->regs.cfa_offset = new_cfa - (long) context->cfa;
+
+ for (i = 0; i < 32; i++)
+ if (i != STACK_POINTER_REGNUM)
+ {
+ fs->regs.reg[i].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i].loc.offset = (long) &regs->gpr[i] - new_cfa;
+ }
+
+ fs->regs.reg[R_CR2].how = REG_SAVED_OFFSET;
+ /* CR? regs are always 32-bit and PPC is big-endian, so in 64-bit
+ libgcc loc.offset needs to point to the low 32 bits of regs->ccr. */
+ fs->regs.reg[R_CR2].loc.offset = (long) &regs->ccr - new_cfa
+ + sizeof (long) - 4;
+
+ fs->regs.reg[R_LR].how = REG_SAVED_OFFSET;
+ fs->regs.reg[R_LR].loc.offset = (long) &regs->link - new_cfa;
+
+ fs->regs.reg[ARG_POINTER_REGNUM].how = REG_SAVED_OFFSET;
+ fs->regs.reg[ARG_POINTER_REGNUM].loc.offset = (long) &regs->nip - new_cfa;
+ fs->retaddr_column = ARG_POINTER_REGNUM;
+ fs->signal_frame = 1;
+
+ if (hwcap == 0)
+ {
+ hwcap = ppc_linux_aux_vector (16);
+ /* These will already be set if we found AT_HWCAP. A nonzero
+ value stops us looking again if for some reason we couldn't
+ find AT_HWCAP. */
+#ifdef __powerpc64__
+ hwcap |= 0xc0000000;
+#else
+ hwcap |= 0x80000000;
+#endif
+ }
+
+ /* If we have a FPU... */
+ if (hwcap & 0x08000000)
+ for (i = 0; i < 32; i++)
+ {
+ fs->regs.reg[i + 32].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i + 32].loc.offset = (long) &regs->fpr[i] - new_cfa;
+ }
+
+ /* If we have a VMX unit... */
+ if (hwcap & 0x10000000)
+ {
+ struct gcc_vregs *vregs;
+#ifdef __powerpc64__
+ vregs = regs->vp;
+#else
+ vregs = &regs->vregs;
+#endif
+ if (regs->msr & (1 << 25))
+ {
+ for (i = 0; i < 32; i++)
+ {
+ fs->regs.reg[i + R_VR0].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i + R_VR0].loc.offset
+ = (long) &vregs->vr[i] - new_cfa;
+ }
+
+ fs->regs.reg[R_VSCR].how = REG_SAVED_OFFSET;
+ fs->regs.reg[R_VSCR].loc.offset = (long) &vregs->vscr - new_cfa;
+ }
+
+ fs->regs.reg[R_VRSAVE].how = REG_SAVED_OFFSET;
+ fs->regs.reg[R_VRSAVE].loc.offset = (long) &vregs->vsave - new_cfa;
+ }
+
+ /* If we have SPE register high-parts... we check at compile-time to
+ avoid expanding the code for all other PowerPC. */
+#ifdef __SPE__
+ for (i = 0; i < 32; i++)
+ {
+ fs->regs.reg[i + FIRST_PSEUDO_REGISTER - 1].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i + FIRST_PSEUDO_REGISTER - 1].loc.offset
+ = (long) &regs->vregs - new_cfa + 4 * i;
+ }
+#endif
+
+ return _URC_NO_REASON;
+}
+
+#define MD_FROB_UPDATE_CONTEXT frob_update_context
+
+static void
+frob_update_context (struct _Unwind_Context *context, _Unwind_FrameState *fs ATTRIBUTE_UNUSED)
+{
+ const unsigned int *pc = (const unsigned int *) context->ra;
+
+ /* Fix up for 2.6.12 - 2.6.16 Linux kernels that have vDSO, but don't
+ have S flag in it. */
+#ifdef __powerpc64__
+ /* addi r1, r1, 128; li r0, 0x0077; sc (sigreturn) */
+ /* addi r1, r1, 128; li r0, 0x00AC; sc (rt_sigreturn) */
+ if (pc[0] == 0x38210000 + SIGNAL_FRAMESIZE
+ && (pc[1] == 0x38000077 || pc[1] == 0x380000AC)
+ && pc[2] == 0x44000002)
+ _Unwind_SetSignalFrame (context, 1);
+#else
+ /* li r0, 0x7777; sc (sigreturn old) */
+ /* li r0, 0x0077; sc (sigreturn new) */
+ /* li r0, 0x6666; sc (rt_sigreturn old) */
+ /* li r0, 0x00AC; sc (rt_sigreturn new) */
+ if ((pc[0] == 0x38007777 || pc[0] == 0x38000077
+ || pc[0] == 0x38006666 || pc[0] == 0x380000AC)
+ && pc[1] == 0x44000002)
+ _Unwind_SetSignalFrame (context, 1);
+#endif
+
+#ifdef __powerpc64__
+ if (fs->regs.reg[2].how == REG_UNSAVED)
+ {
+ /* If the current unwind info (FS) does not contain explicit info
+ saving R2, then we have to do a minor amount of code reading to
+ figure out if it was saved. The big problem here is that the
+ code that does the save/restore is generated by the linker, so
+ we have no good way to determine at compile time what to do. */
+ unsigned int *insn
+ = (unsigned int *) _Unwind_GetGR (context, R_LR);
+ if (insn && *insn == 0xE8410028)
+ _Unwind_SetGRPtr (context, 2, context->cfa + 40);
+ }
+#endif
+}
diff --git a/gcc-4.4.3/gcc/config/rs6000/linux.h b/gcc-4.4.3/gcc/config/rs6000/linux.h
new file mode 100644
index 000000000..4831273c5
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/linux.h
@@ -0,0 +1,130 @@
+/* Definitions of target machine for GNU compiler,
+ for PowerPC machines running Linux.
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
+ 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
+ Contributed by Michael Meissner (meissner@cygnus.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+/* Linux doesn't support saving and restoring 64-bit regs in a 32-bit
+ process. */
+#define OS_MISSING_POWERPC64 1
+
+/* We use glibc _mcount for profiling. */
+#define NO_PROFILE_COUNTERS 1
+
+/* glibc has float and long double forms of math functions. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS (OPTION_GLIBC)
+
+/* Whether we have sincos that follows the GNU extension. */
+#undef TARGET_HAS_SINCOS
+#define TARGET_HAS_SINCOS (OPTION_GLIBC)
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("PPC"); \
+ builtin_define_std ("powerpc"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ TARGET_OS_SYSV_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_linux)"
+
+/* The GNU C++ standard library currently requires _GNU_SOURCE being
+ defined on glibc-based systems. This temporary hack accomplishes this,
+ it should go away as soon as libstdc++-v3 has a real fix. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
+#undef LINK_SHLIB_SPEC
+#define LINK_SHLIB_SPEC "%{shared:-shared} %{!shared: %{static:-static}}"
+
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_linux)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_linux)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_linux)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_linux)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_linux)"
+
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
+
+/* Use --as-needed -lgcc_s for eh support. */
+#ifdef HAVE_LD_AS_NEEDED
+#define USE_LD_AS_NEEDED 1
+#endif
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC GNU/Linux)");
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* For backward compatibility, we must continue to use the AIX
+ structure return convention. */
+#undef DRAFT_V4_STRUCT_RET
+#define DRAFT_V4_STRUCT_RET 1
+
+/* We are 32-bit all the time, so optimize a little. */
+#undef TARGET_64BIT
+#define TARGET_64BIT 0
+
+/* We don't need to generate entries in .fixup, except when
+ -mrelocatable or -mrelocatable-lib is given. */
+#undef RELOCATABLE_NEEDS_FIXUP
+#define RELOCATABLE_NEEDS_FIXUP \
+ (target_flags & target_flags_explicit & MASK_RELOCATABLE)
+
+#define TARGET_ASM_FILE_END file_end_indicate_exec_stack
+
+#define TARGET_POSIX_IO
+
+#define MD_UNWIND_SUPPORT "config/rs6000/linux-unwind.h"
+
+#ifdef TARGET_LIBC_PROVIDES_SSP
+/* ppc32 glibc provides __stack_chk_guard in -0x7008(2). */
+#define TARGET_THREAD_SSP_OFFSET -0x7008
+#endif
+
+#define POWERPC_LINUX
+
+/* ppc linux has 128-bit long double support in glibc 2.4 and later. */
+#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/linux64.h b/gcc-4.4.3/gcc/config/rs6000/linux64.h
new file mode 100644
index 000000000..a83cc24db
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/linux64.h
@@ -0,0 +1,534 @@
+/* Definitions of target machine for GNU compiler,
+ for 64 bit PowerPC linux.
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
+ 2009 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef RS6000_BI_ARCH
+
+#undef DEFAULT_ABI
+#define DEFAULT_ABI ABI_AIX
+
+#undef TARGET_64BIT
+#define TARGET_64BIT 1
+
+#define DEFAULT_ARCH64_P 1
+#define RS6000_BI_ARCH_P 0
+
+#else
+
+#define DEFAULT_ARCH64_P (TARGET_DEFAULT & MASK_64BIT)
+#define RS6000_BI_ARCH_P 1
+
+#endif
+
+#ifdef IN_LIBGCC2
+#undef TARGET_64BIT
+#ifdef __powerpc64__
+#define TARGET_64BIT 1
+#else
+#define TARGET_64BIT 0
+#endif
+#endif
+
+#undef TARGET_AIX
+#define TARGET_AIX TARGET_64BIT
+
+#ifdef HAVE_LD_NO_DOT_SYMS
+/* New ABI uses a local sym for the function entry point. */
+extern int dot_symbols;
+#undef DOT_SYMBOLS
+#define DOT_SYMBOLS dot_symbols
+#endif
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_POWER6
+#undef PROCESSOR_DEFAULT64
+#define PROCESSOR_DEFAULT64 PROCESSOR_POWER6
+
+/* We don't need to generate entries in .fixup, except when
+ -mrelocatable or -mrelocatable-lib is given. */
+#undef RELOCATABLE_NEEDS_FIXUP
+#define RELOCATABLE_NEEDS_FIXUP \
+ (target_flags & target_flags_explicit & MASK_RELOCATABLE)
+
+#undef RS6000_ABI_NAME
+#define RS6000_ABI_NAME "linux"
+
+#define INVALID_64BIT "-m%s not supported in this configuration"
+#define INVALID_32BIT INVALID_64BIT
+
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS \
+ do \
+ { \
+ if (!rs6000_explicit_options.alignment) \
+ rs6000_alignment_flags = MASK_ALIGN_NATURAL; \
+ if (TARGET_64BIT) \
+ { \
+ if (DEFAULT_ABI != ABI_AIX) \
+ { \
+ rs6000_current_abi = ABI_AIX; \
+ error (INVALID_64BIT, "call"); \
+ } \
+ dot_symbols = !strcmp (rs6000_abi_name, "aixdesc"); \
+ if (target_flags & MASK_RELOCATABLE) \
+ { \
+ target_flags &= ~MASK_RELOCATABLE; \
+ error (INVALID_64BIT, "relocatable"); \
+ } \
+ if (target_flags & MASK_EABI) \
+ { \
+ target_flags &= ~MASK_EABI; \
+ error (INVALID_64BIT, "eabi"); \
+ } \
+ if (TARGET_PROTOTYPE) \
+ { \
+ TARGET_PROTOTYPE = 0; \
+ error (INVALID_64BIT, "prototype"); \
+ } \
+ if ((target_flags & MASK_POWERPC64) == 0) \
+ { \
+ target_flags |= MASK_POWERPC64; \
+ error ("-m64 requires a PowerPC64 cpu"); \
+ } \
+ } \
+ else \
+ { \
+ if (!RS6000_BI_ARCH_P) \
+ error (INVALID_32BIT, "32"); \
+ if (TARGET_PROFILE_KERNEL) \
+ { \
+ target_flags &= ~MASK_PROFILE_KERNEL; \
+ error (INVALID_32BIT, "profile-kernel"); \
+ } \
+ } \
+ } \
+ while (0)
+
+#ifdef RS6000_BI_ARCH
+
+#undef OVERRIDE_OPTIONS
+#define OVERRIDE_OPTIONS \
+ rs6000_override_options (((TARGET_DEFAULT ^ target_flags) & MASK_64BIT) \
+ ? (char *) 0 : TARGET_CPU_DEFAULT)
+
+#endif
+
+#undef ASM_DEFAULT_SPEC
+#undef ASM_SPEC
+#undef LINK_OS_LINUX_SPEC
+
+#ifndef RS6000_BI_ARCH
+#define ASM_DEFAULT_SPEC "-mppc64"
+#define ASM_SPEC "%(asm_spec64) %(asm_spec_common)"
+#define LINK_OS_LINUX_SPEC "%(link_os_linux_spec64)"
+#else
+#if DEFAULT_ARCH64_P
+#define ASM_DEFAULT_SPEC "-mppc%{!m32:64}"
+#define ASM_SPEC "%{m32:%(asm_spec32)}%{!m32:%(asm_spec64)} %(asm_spec_common)"
+#define LINK_OS_LINUX_SPEC "%{m32:%(link_os_linux_spec32)}%{!m32:%(link_os_linux_spec64)}"
+#else
+#define ASM_DEFAULT_SPEC "-mppc%{m64:64}"
+#define ASM_SPEC "%{!m64:%(asm_spec32)}%{m64:%(asm_spec64)} %(asm_spec_common)"
+#define LINK_OS_LINUX_SPEC "%{!m64:%(link_os_linux_spec32)}%{m64:%(link_os_linux_spec64)}"
+#endif
+#endif
+
+#define ASM_SPEC32 "-a32 %{n} %{T} %{Ym,*} %{Yd,*} \
+%{mrelocatable} %{mrelocatable-lib} %{fpic:-K PIC} %{fPIC:-K PIC} \
+%{memb} %{!memb: %{msdata: -memb} %{msdata=eabi: -memb}} \
+%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \
+ %{mcall-freebsd: -mbig} \
+ %{mcall-i960-old: -mlittle} \
+ %{mcall-linux: -mbig} \
+ %{mcall-gnu: -mbig} \
+ %{mcall-netbsd: -mbig} \
+}}}}"
+
+#define ASM_SPEC64 "-a64"
+
+#define ASM_SPEC_COMMON "%(asm_cpu) \
+%{,assembler|,assembler-with-cpp: %{mregnames} %{mno-regnames}} \
+%{v:-V} %{Qy:} %{!Qn:-Qy} %{Wa,*:%*} \
+%{mlittle} %{mlittle-endian} %{mbig} %{mbig-endian}"
+
+#undef SUBSUBTARGET_EXTRA_SPECS
+#define SUBSUBTARGET_EXTRA_SPECS \
+ { "asm_spec_common", ASM_SPEC_COMMON }, \
+ { "asm_spec32", ASM_SPEC32 }, \
+ { "asm_spec64", ASM_SPEC64 }, \
+ { "link_os_linux_spec32", LINK_OS_LINUX_SPEC32 }, \
+ { "link_os_linux_spec64", LINK_OS_LINUX_SPEC64 },
+
+#undef MULTILIB_DEFAULTS
+#if DEFAULT_ARCH64_P
+#define MULTILIB_DEFAULTS { "m64" }
+#else
+#define MULTILIB_DEFAULTS { "m32" }
+#endif
+
+#ifndef RS6000_BI_ARCH
+
+/* 64-bit PowerPC Linux is always big-endian. */
+#undef TARGET_LITTLE_ENDIAN
+#define TARGET_LITTLE_ENDIAN 0
+
+/* 64-bit PowerPC Linux always has a TOC. */
+#undef TARGET_TOC
+#define TARGET_TOC 1
+
+/* Some things from sysv4.h we don't do when 64 bit. */
+#undef TARGET_RELOCATABLE
+#define TARGET_RELOCATABLE 0
+#undef TARGET_EABI
+#define TARGET_EABI 0
+#undef TARGET_PROTOTYPE
+#define TARGET_PROTOTYPE 0
+#undef RELOCATABLE_NEEDS_FIXUP
+#define RELOCATABLE_NEEDS_FIXUP 0
+
+#endif
+
+/* We use glibc _mcount for profiling. */
+#define NO_PROFILE_COUNTERS 1
+#define PROFILE_HOOK(LABEL) \
+ do { if (TARGET_64BIT) output_profile_hook (LABEL); } while (0)
+
+/* PowerPC64 Linux word-aligns FP doubles when -malign-power is given. */
+#undef ADJUST_FIELD_ALIGN
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ ((TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \
+ ? 128 \
+ : (TARGET_64BIT \
+ && TARGET_ALIGN_NATURAL == 0 \
+ && TYPE_MODE (strip_array_types (TREE_TYPE (FIELD))) == DFmode) \
+ ? MIN ((COMPUTED), 32) \
+ : (COMPUTED))
+
+/* PowerPC64 Linux increases natural record alignment to doubleword if
+ the first field is an FP double, only if in power alignment mode. */
+#undef ROUND_TYPE_ALIGN
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ ((TARGET_64BIT \
+ && (TREE_CODE (STRUCT) == RECORD_TYPE \
+ || TREE_CODE (STRUCT) == UNION_TYPE \
+ || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
+ && TARGET_ALIGN_NATURAL == 0) \
+ ? rs6000_special_round_type_align (STRUCT, COMPUTED, SPECIFIED) \
+ : MAX ((COMPUTED), (SPECIFIED)))
+
+/* Use the default for compiling target libs. */
+#ifdef IN_TARGET_LIBS
+#undef TARGET_ALIGN_NATURAL
+#define TARGET_ALIGN_NATURAL 1
+#endif
+
+/* Indicate that jump tables go in the text section. */
+#undef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION TARGET_64BIT
+
+/* The linux ppc64 ABI isn't explicit on whether aggregates smaller
+ than a doubleword should be padded upward or downward. You could
+ reasonably assume that they follow the normal rules for structure
+ layout treating the parameter area as any other block of memory,
+ then map the reg param area to registers. i.e. pad upward.
+ Setting both of the following defines results in this behavior.
+ Setting just the first one will result in aggregates that fit in a
+ doubleword being padded downward, and others being padded upward.
+ Not a bad idea as this results in struct { int x; } being passed
+ the same way as an int. */
+#define AGGREGATE_PADDING_FIXED TARGET_64BIT
+#define AGGREGATES_PAD_UPWARD_ALWAYS 0
+
+/* Specify padding for the last element of a block move between
+ registers and memory. FIRST is nonzero if this is the only
+ element. */
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (!(FIRST) ? upward : FUNCTION_ARG_PADDING (MODE, TYPE))
+
+/* __throw will restore its own return address to be the same as the
+ return address of the function that the throw is being made to.
+ This is unfortunate, because we want to check the original
+ return address to see if we need to restore the TOC.
+ So we have to squirrel it away with this. */
+#define SETUP_FRAME_ADDRESSES() \
+ do { if (TARGET_64BIT) rs6000_aix_emit_builtin_unwind_init (); } while (0)
+
+/* Override svr4.h */
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+/* Linux doesn't support saving and restoring 64-bit regs in a 32-bit
+ process. */
+#define OS_MISSING_POWERPC64 !TARGET_64BIT
+
+/* glibc has float and long double forms of math functions. */
+#undef TARGET_C99_FUNCTIONS
+#define TARGET_C99_FUNCTIONS (OPTION_GLIBC)
+
+/* Whether we have sincos that follows the GNU extension. */
+#undef TARGET_HAS_SINCOS
+#define TARGET_HAS_SINCOS (OPTION_GLIBC)
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ if (TARGET_64BIT) \
+ { \
+ builtin_define ("__PPC__"); \
+ builtin_define ("__PPC64__"); \
+ builtin_define ("__powerpc__"); \
+ builtin_define ("__powerpc64__"); \
+ builtin_assert ("cpu=powerpc64"); \
+ builtin_assert ("machine=powerpc64"); \
+ } \
+ else \
+ { \
+ builtin_define_std ("PPC"); \
+ builtin_define_std ("powerpc"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ TARGET_OS_SYSV_CPP_BUILTINS (); \
+ } \
+ } \
+ while (0)
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_linux)"
+
+/* The GNU C++ standard library currently requires _GNU_SOURCE being
+ defined on glibc-based systems. This temporary hack accomplishes this,
+ it should go away as soon as libstdc++-v3 has a real fix. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
+#undef LINK_SHLIB_SPEC
+#define LINK_SHLIB_SPEC "%{shared:-shared} %{!shared: %{static:-static}}"
+
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_linux)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_linux)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_linux)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_linux)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_linux)"
+
+#define GLIBC_DYNAMIC_LINKER32 "/lib/ld.so.1"
+#define GLIBC_DYNAMIC_LINKER64 "/lib64/ld64.so.1"
+#define UCLIBC_DYNAMIC_LINKER32 "/lib/ld-uClibc.so.0"
+#define UCLIBC_DYNAMIC_LINKER64 "/lib/ld64-uClibc.so.0"
+#if UCLIBC_DEFAULT
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:%{muclibc:%e-mglibc and -muclibc used together}" G ";:" U "}"
+#else
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{muclibc:%{mglibc:%e-mglibc and -muclibc used together}" U ";:" G "}"
+#endif
+#define LINUX_DYNAMIC_LINKER32 \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER32, UCLIBC_DYNAMIC_LINKER32)
+#define LINUX_DYNAMIC_LINKER64 \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER64, UCLIBC_DYNAMIC_LINKER64)
+
+
+#define LINK_OS_LINUX_SPEC32 "-m elf32ppclinux %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER32 "}}}"
+
+#define LINK_OS_LINUX_SPEC64 "-m elf64ppc %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER64 "}}}"
+
+#undef TOC_SECTION_ASM_OP
+#define TOC_SECTION_ASM_OP \
+ (TARGET_64BIT \
+ ? "\t.section\t\".toc\",\"aw\"" \
+ : "\t.section\t\".got\",\"aw\"")
+
+#undef MINIMAL_TOC_SECTION_ASM_OP
+#define MINIMAL_TOC_SECTION_ASM_OP \
+ (TARGET_64BIT \
+ ? "\t.section\t\".toc1\",\"aw\"" \
+ : ((TARGET_RELOCATABLE || flag_pic) \
+ ? "\t.section\t\".got2\",\"aw\"" \
+ : "\t.section\t\".got1\",\"aw\""))
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC64 GNU/Linux)");
+
+/* Must be at least as big as our pointer type. */
+#undef SIZE_TYPE
+#define SIZE_TYPE (TARGET_64BIT ? "long unsigned int" : "unsigned int")
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_64BIT ? "long int" : "int")
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE (TARGET_64BIT ? "int" : "long int")
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* PowerPC no-op instruction. */
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE (TARGET_64BIT ? "nop" : "cror 31,31,31")
+
+#undef RS6000_MCOUNT
+#define RS6000_MCOUNT "_mcount"
+
+#ifdef __powerpc64__
+/* _init and _fini functions are built from bits spread across many
+ object files, each potentially with a different TOC pointer. For
+ that reason, place a nop after the call so that the linker can
+ restore the TOC pointer if a TOC adjusting call stub is needed. */
+#if DOT_SYMBOLS
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n" \
+" bl ." #FUNC "\n" \
+" nop\n" \
+" .previous");
+#else
+#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \
+ asm (SECTION_OP "\n" \
+" bl " #FUNC "\n" \
+" nop\n" \
+" .previous");
+#endif
+#endif
+
+/* FP save and restore routines. */
+#undef SAVE_FP_PREFIX
+#define SAVE_FP_PREFIX (TARGET_64BIT ? "._savef" : "_savefpr_")
+#undef SAVE_FP_SUFFIX
+#define SAVE_FP_SUFFIX (TARGET_64BIT ? "" : "_l")
+#undef RESTORE_FP_PREFIX
+#define RESTORE_FP_PREFIX (TARGET_64BIT ? "._restf" : "_restfpr_")
+#undef RESTORE_FP_SUFFIX
+#define RESTORE_FP_SUFFIX (TARGET_64BIT ? "" : "_l")
+
+/* Dwarf2 debugging. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* This is how to declare the size of a function. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do \
+ { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ fputs ("\t.size\t", (FILE)); \
+ if (TARGET_64BIT && DOT_SYMBOLS) \
+ putc ('.', (FILE)); \
+ assemble_name ((FILE), (FNAME)); \
+ fputs (",.-", (FILE)); \
+ rs6000_output_function_entry (FILE, FNAME); \
+ putc ('\n', (FILE)); \
+ } \
+ } \
+ while (0)
+
+/* Return nonzero if this entry is to be written into the constant
+ pool in a special way. We do so if this is a SYMBOL_REF, LABEL_REF
+ or a CONST containing one of them. If -mfp-in-toc (the default),
+ we also do this for floating-point constants. We actually can only
+ do this if the FP formats of the target and host machines are the
+ same, but we can't check that since not every file that uses
+ GO_IF_LEGITIMATE_ADDRESS_P includes real.h. We also do this when
+ we can write the entry into the TOC and the entry is not larger
+ than a TOC entry. */
+
+#undef ASM_OUTPUT_SPECIAL_POOL_ENTRY_P
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) \
+ (TARGET_TOC \
+ && (GET_CODE (X) == SYMBOL_REF \
+ || (GET_CODE (X) == CONST && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF) \
+ || GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST_INT \
+ && GET_MODE_BITSIZE (MODE) <= GET_MODE_BITSIZE (Pmode)) \
+ || (GET_CODE (X) == CONST_DOUBLE \
+ && ((TARGET_64BIT \
+ && (TARGET_MINIMAL_TOC \
+ || (SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
+ && ! TARGET_NO_FP_IN_TOC))) \
+ || (!TARGET_64BIT \
+ && !TARGET_NO_FP_IN_TOC \
+ && !TARGET_RELOCATABLE \
+ && SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
+ && BITS_PER_WORD == HOST_BITS_PER_INT)))))
+
+/* Select a format to encode pointers in exception handling data. CODE
+ is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
+ true if the symbol may be affected by dynamic relocations. */
+#undef ASM_PREFERRED_EH_DATA_FORMAT
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
+ ((TARGET_64BIT || flag_pic || TARGET_RELOCATABLE) \
+ ? (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel \
+ | (TARGET_64BIT ? DW_EH_PE_udata8 : DW_EH_PE_sdata4)) \
+ : DW_EH_PE_absptr)
+
+/* For backward compatibility, we must continue to use the AIX
+ structure return convention. */
+#undef DRAFT_V4_STRUCT_RET
+#define DRAFT_V4_STRUCT_RET (!TARGET_64BIT)
+
+#define TARGET_ASM_FILE_END rs6000_elf_end_indicate_exec_stack
+
+#define TARGET_POSIX_IO
+
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "%{static:--start-group} %G %L %{static:--end-group}%{!static:%G}"
+
+/* Use --as-needed -lgcc_s for eh support. */
+#ifdef HAVE_LD_AS_NEEDED
+#define USE_LD_AS_NEEDED 1
+#endif
+
+#define MD_UNWIND_SUPPORT "config/rs6000/linux-unwind.h"
+
+#ifdef TARGET_LIBC_PROVIDES_SSP
+/* ppc32 glibc provides __stack_chk_guard in -0x7008(2),
+ ppc64 glibc provides it at -0x7010(13). */
+#define TARGET_THREAD_SSP_OFFSET (TARGET_64BIT ? -0x7010 : -0x7008)
+#endif
+
+#define POWERPC_LINUX
+
+/* ppc{32,64} linux has 128-bit long double support in glibc 2.4 and later. */
+#ifdef TARGET_DEFAULT_LONG_DOUBLE_128
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 128
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/linux64.opt b/gcc-4.4.3/gcc/config/rs6000/linux64.opt
new file mode 100644
index 000000000..f408eb88d
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/linux64.opt
@@ -0,0 +1,24 @@
+; Options for 64-bit PowerPC Linux.
+;
+; Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mprofile-kernel
+Target Report Mask(PROFILE_KERNEL)
+Call mcount for profiling before a function prologue
diff --git a/gcc-4.4.3/gcc/config/rs6000/linuxaltivec.h b/gcc-4.4.3/gcc/config/rs6000/linuxaltivec.h
new file mode 100644
index 000000000..a6e1523ea
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/linuxaltivec.h
@@ -0,0 +1,30 @@
+/* Definitions of target machine for GNU compiler,
+ for AltiVec enhanced PowerPC machines running GNU/Linux.
+ Copyright (C) 2001, 2003, 2007 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC AltiVec GNU/Linux)");
+
+/* Override rs6000.h and sysv4.h definition. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_ALTIVEC)
+
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS rs6000_altivec_abi = 1
diff --git a/gcc-4.4.3/gcc/config/rs6000/linuxspe.h b/gcc-4.4.3/gcc/config/rs6000/linuxspe.h
new file mode 100644
index 000000000..3cef9d9dc
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/linuxspe.h
@@ -0,0 +1,44 @@
+/* Definitions of target machine for GNU compiler,
+ for PowerPC e500 machines running GNU/Linux.
+ Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldy@quesejoda.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC E500 GNU/Linux)");
+
+/* Override rs6000.h and sysv4.h definition. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_STRICT_ALIGN)
+
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS \
+ if (rs6000_select[1].string == NULL) \
+ rs6000_cpu = PROCESSOR_PPC8540; \
+ if (!rs6000_explicit_options.spe_abi) \
+ rs6000_spe_abi = 1; \
+ if (!rs6000_explicit_options.float_gprs) \
+ rs6000_float_gprs = 1; \
+ if (!rs6000_explicit_options.spe) \
+ rs6000_spe = 1; \
+ if (target_flags & MASK_64BIT) \
+ error ("-m64 not supported in this configuration")
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mppc -mspe -me500"
diff --git a/gcc-4.4.3/gcc/config/rs6000/lynx.h b/gcc-4.4.3/gcc/config/rs6000/lynx.h
new file mode 100644
index 000000000..30dd2999c
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/lynx.h
@@ -0,0 +1,123 @@
+/* Definitions for Rs6000 running LynxOS.
+ Copyright (C) 1995, 1996, 2000, 2002, 2003, 2004, 2005, 2007
+ Free Software Foundation, Inc.
+ Contributed by David Henkel-Wallace, Cygnus Support (gumby@cygnus.com)
+ Rewritten by Adam Nemet, LynuxWorks Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Override the definition in sysv4.h. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fputs (" (PowerPC/LynxOS)", stderr);
+
+/* Undefine the definition to enable the LynxOS default from the
+ top-level lynx.h. */
+
+#undef SUBTARGET_EXTRA_SPECS
+
+/* Get rid off the spec definitions from rs6000/sysv4.h. */
+
+#undef CPP_SPEC
+#define CPP_SPEC \
+"%{msoft-float: -D_SOFT_FLOAT} \
+ %(cpp_cpu) \
+ %(cpp_os_lynx)"
+
+/* LynxOS only supports big-endian on PPC so we override the
+ definition from sysv4.h. Since the LynxOS 4.0 compiler was set to
+ return every structure in memory regardless of their size we have
+ to emulate the same behavior here with disabling the SVR4 structure
+ returning. */
+
+#undef CC1_SPEC
+#define CC1_SPEC \
+"%{G*} %{mno-sdata:-msdata=none} \
+ %{maltivec:-mabi=altivec} \
+ -maix-struct-return"
+
+#undef ASM_SPEC
+#define ASM_SPEC \
+"%(asm_cpu) \
+ %{,assembler|,assembler-with-cpp: %{mregnames} %{mno-regnames}}"
+
+#undef STARTFILE_SPEC
+#undef ENDFILE_SPEC
+#undef LIB_SPEC
+#undef LINK_SPEC
+#define LINK_SPEC \
+"%{!msdata=none:%{G*}} %{msdata=none:-G0} \
+ %(link_os_lynx)"
+
+/* Override the definition from sysv4.h. */
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__BIG_ENDIAN__"); \
+ builtin_define ("__powerpc__"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ builtin_define ("__PPC__"); \
+ } \
+ while (0)
+
+/* Override the rs6000.h definition. */
+
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+/* Override the rs6000.h definition. */
+
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* LynxOS does not do anything with .fixup plus let's not create
+ writable section for linkonce.r and linkonce.t. */
+
+#undef RELOCATABLE_NEEDS_FIXUP
+
+/* Override these from rs6000.h with the generic definition. */
+
+#undef SIZE_TYPE
+#undef ASM_OUTPUT_ALIGN
+#undef PREFERRED_DEBUGGING_TYPE
+
+/* The file rs6000.c defines TARGET_HAVE_TLS unconditionally to the
+ value of HAVE_AS_TLS. HAVE_AS_TLS is true as gas support for TLS
+ is detected by configure. Override the definition to false. */
+
+#undef HAVE_AS_TLS
+#define HAVE_AS_TLS 0
+
+#ifdef CRT_BEGIN
+/* This function is part of crtbegin*.o which is at the beginning of
+ the link and is called from .fini which is usually toward the end
+ of the executable. Make it longcall so that we don't limit the
+ text size of the executables to 32M. */
+
+static void __do_global_dtors_aux (void) __attribute__ ((longcall));
+#endif /* CRT_BEGIN */
+
+#ifdef CRT_END
+/* Similarly here. This function resides in crtend*.o which is toward
+ to end of the link and is called from .init which is at the
+ beginning. */
+
+static void __do_global_ctors_aux (void) __attribute__ ((longcall));
+#endif /* CRT_END */
diff --git a/gcc-4.4.3/gcc/config/rs6000/milli.exp b/gcc-4.4.3/gcc/config/rs6000/milli.exp
new file mode 100644
index 000000000..ea3a2b757
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/milli.exp
@@ -0,0 +1,7 @@
+#!
+__mulh 0x3100
+__mull 0x3180
+__divss 0x3200
+__divus 0x3280
+__quoss 0x3300
+__quous 0x3380
diff --git a/gcc-4.4.3/gcc/config/rs6000/mpc.md b/gcc-4.4.3/gcc/config/rs6000/mpc.md
new file mode 100644
index 000000000..a839f9366
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/mpc.md
@@ -0,0 +1,111 @@
+;; Scheduling description for Motorola PowerPC processor cores.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "mpc,mpcfp")
+(define_cpu_unit "iu_mpc,mciu_mpc" "mpc")
+(define_cpu_unit "fpu_mpc" "mpcfp")
+(define_cpu_unit "lsu_mpc,bpu_mpc" "mpc")
+
+;; MPCCORE 32-bit SCIU, MCIU, LSU, FPU, BPU
+;; 505/801/821/823
+
+(define_insn_reservation "mpccore-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "mpccore"))
+ "lsu_mpc")
+
+(define_insn_reservation "mpccore-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "mpccore"))
+ "lsu_mpc")
+
+(define_insn_reservation "mpccore-fpload" 2
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "mpccore"))
+ "lsu_mpc")
+
+(define_insn_reservation "mpccore-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\
+ var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc")
+
+(define_insn_reservation "mpccore-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc,iu_mpc")
+
+(define_insn_reservation "mpccore-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc,iu_mpc,iu_mpc")
+
+(define_insn_reservation "mpccore-imul" 2
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "mpccore"))
+ "mciu_mpc")
+
+; Divide latency varies greatly from 2-11, use 6 as average
+(define_insn_reservation "mpccore-idiv" 6
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "mpccore"))
+ "mciu_mpc*6")
+
+(define_insn_reservation "mpccore-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "mpccore"))
+ "iu_mpc,nothing,bpu_mpc")
+
+(define_insn_reservation "mpccore-fpcompare" 2
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc,bpu_mpc")
+
+(define_insn_reservation "mpccore-fp" 4
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc*2")
+
+(define_insn_reservation "mpccore-dmul" 5
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc*5")
+
+(define_insn_reservation "mpccore-sdiv" 10
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc*10")
+
+(define_insn_reservation "mpccore-ddiv" 17
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "mpccore"))
+ "fpu_mpc*17")
+
+(define_insn_reservation "mpccore-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "mpccore"))
+ "bpu_mpc")
+
+(define_insn_reservation "mpccore-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,cr_logical,delayed_cr,mfcr,mtcr,isync")
+ (eq_attr "cpu" "mpccore"))
+ "bpu_mpc")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/netbsd.h b/gcc-4.4.3/gcc/config/rs6000/netbsd.h
new file mode 100644
index 000000000..637060e63
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/netbsd.h
@@ -0,0 +1,91 @@
+/* Definitions of target machine for GNU compiler,
+ for PowerPC NetBSD systems.
+ Copyright 2002, 2003, 2007, 2008 Free Software Foundation, Inc.
+ Contributed by Wasabi Systems, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_OS_CPP_BUILTINS /* FIXME: sysv4.h should not define this! */
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ NETBSD_OS_CPP_BUILTINS_ELF(); \
+ builtin_define ("__powerpc__"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ } \
+ while (0)
+
+/* Override the default from rs6000.h to avoid conflicts with macros
+ defined in NetBSD header files. */
+
+#undef RS6000_CPU_CPP_ENDIAN_BUILTINS
+#define RS6000_CPU_CPP_ENDIAN_BUILTINS() \
+ do \
+ { \
+ if (BYTES_BIG_ENDIAN) \
+ { \
+ builtin_define ("__BIG_ENDIAN__"); \
+ builtin_assert ("machine=bigendian"); \
+ } \
+ else \
+ { \
+ builtin_define ("__LITTLE_ENDIAN__"); \
+ builtin_assert ("machine=littleendian"); \
+ } \
+ } \
+ while (0)
+
+/* Make GCC agree with <machine/ansi.h>. */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+/* Undo the spec mess from sysv4.h, and just define the specs
+ the way NetBSD systems actually expect. */
+
+#undef CPP_SPEC
+#define CPP_SPEC NETBSD_CPP_SPEC
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{!msdata=none:%{G*}} %{msdata=none:-G0} \
+ %(netbsd_link_spec)"
+
+#define NETBSD_ENTRY_POINT "_start"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC NETBSD_STARTFILE_SPEC
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "%(netbsd_endfile_spec)"
+
+#undef LIB_SPEC
+#define LIB_SPEC NETBSD_LIB_SPEC
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "netbsd_link_spec", NETBSD_LINK_SPEC_ELF }, \
+ { "netbsd_entry_point", NETBSD_ENTRY_POINT }, \
+ { "netbsd_endfile_spec", NETBSD_ENDFILE_SPEC },
+
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (NetBSD/powerpc ELF)");
diff --git a/gcc-4.4.3/gcc/config/rs6000/paired.h b/gcc-4.4.3/gcc/config/rs6000/paired.h
new file mode 100644
index 000000000..57c6ca4fd
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/paired.h
@@ -0,0 +1,75 @@
+/* PowerPC 750CL user include file.
+ Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+ Contributed by Revital Eres (eres@il.ibm.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _PAIRED_H
+#define _PAIRED_H
+
+#define vector __attribute__((vector_size(8)))
+
+#define paired_msub __builtin_paired_msub
+#define paired_madd __builtin_paired_madd
+#define paired_nmsub __builtin_paired_nmsub
+#define paired_nmadd __builtin_paired_nmadd
+#define paired_sum0 __builtin_paired_sum0
+#define paired_sum1 __builtin_paired_sum1
+#define paired_div __builtin_paired_divv2sf3
+#define paired_add __builtin_paired_addv2sf3
+#define paired_sub __builtin_paired_subv2sf3
+#define paired_mul __builtin_paired_mulv2sf3
+#define paired_muls0 __builtin_paired_muls0
+#define paired_muls1 __builtin_paired_muls1
+#define paired_madds0 __builtin_paired_madds0
+#define paired_madds1 __builtin_paired_madds1
+#define paired_merge00 __builtin_paired_merge00
+#define paired_merge01 __builtin_paired_merge01
+#define paired_merge10 __builtin_paired_merge10
+#define paired_merge11 __builtin_paired_merge11
+#define paired_abs __builtin_paired_absv2sf2
+#define paired_nabs __builtin_paired_nabsv2sf2
+#define paired_neg __builtin_paired_negv2sf2
+#define paired_sqrt __builtin_paired_sqrtv2sf2
+#define paired_res __builtin_paired_resv2sf2
+#define paired_stx __builtin_paired_stx
+#define paired_lx __builtin_paired_lx
+#define paired_cmpu0 __builtin_paired_cmpu0
+#define paired_cmpu1 __builtin_paired_cmpu1
+#define paired_sel __builtin_paired_selv2sf4
+
+/* Condition register codes for Paired predicates. */
+#define LT 0
+#define GT 1
+#define EQ 2
+#define UN 3
+
+#define paired_cmpu0_un(a,b) __builtin_paired_cmpu0 (UN, (a), (b))
+#define paired_cmpu0_eq(a,b) __builtin_paired_cmpu0 (EQ, (a), (b))
+#define paired_cmpu0_lt(a,b) __builtin_paired_cmpu0 (LT, (a), (b))
+#define paired_cmpu0_gt(a,b) __builtin_paired_cmpu0 (GT, (a), (b))
+#define paired_cmpu1_un(a,b) __builtin_paired_cmpu1 (UN, (a), (b))
+#define paired_cmpu1_eq(a,b) __builtin_paired_cmpu1 (EQ, (a), (b))
+#define paired_cmpu1_lt(a,b) __builtin_paired_cmpu1 (LT, (a), (b))
+#define paired_cmpu1_gt(a,b) __builtin_paired_cmpu1 (GT, (a), (b))
+
+#endif /* _PAIRED_H */
diff --git a/gcc-4.4.3/gcc/config/rs6000/paired.md b/gcc-4.4.3/gcc/config/rs6000/paired.md
new file mode 100644
index 000000000..ed4233830
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/paired.md
@@ -0,0 +1,519 @@
+;; PowerPC paired single and double hummer description
+;; Copyright (C) 2007, 2009
+;; Free Software Foundation, Inc.
+;; Contributed by David Edelsohn <edelsohn@gnu.org> and Revital Eres
+;; <eres@il.ibm.com>
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with this program; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_constants
+[(UNSPEC_INTERHI_V2SF 330)
+ (UNSPEC_INTERLO_V2SF 331)
+ (UNSPEC_EXTEVEN_V2SF 332)
+ (UNSPEC_EXTODD_V2SF 333)
+])
+
+(define_insn "negv2sf2"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_neg %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "sqrtv2sf2"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (sqrt:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_rsqrte %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "absv2sf2"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_abs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "nabsv2sf2"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (neg:V2SF (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f"))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_nabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "addv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_add %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "subv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_sub %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "mulv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_mul %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "resv2sf2"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))]
+ "TARGET_PAIRED_FLOAT && flag_finite_math_only"
+ "ps_res %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "divv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_div %0,%1,%2"
+ [(set_attr "type" "sdiv")])
+
+(define_insn "paired_madds0"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF
+ (plus:SF (mult:SF (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)])))
+ (vec_select:SF (match_operand:V2SF 3 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)])))
+ (plus:SF (mult:SF (vec_select:SF (match_dup 1)
+ (parallel [(const_int 1)]))
+ (vec_select:SF (match_dup 2)
+ (parallel [(const_int 0)])))
+ (vec_select:SF (match_dup 3)
+ (parallel [(const_int 1)])))))]
+ "TARGET_PAIRED_FLOAT && TARGET_FUSED_MADD"
+ "ps_madds0 %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_madds1"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF
+ (plus:SF (mult:SF (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)])))
+ (vec_select:SF (match_operand:V2SF 3 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)])))
+ (plus:SF (mult:SF (vec_select:SF (match_dup 1)
+ (parallel [(const_int 1)]))
+ (vec_select:SF (match_dup 2)
+ (parallel [(const_int 1)])))
+ (vec_select:SF (match_dup 3)
+ (parallel [(const_int 1)])))))]
+ "TARGET_PAIRED_FLOAT && TARGET_FUSED_MADD"
+ "ps_madds1 %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_madd"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (plus:V2SF (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f"))
+ (match_operand:V2SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT && TARGET_FUSED_MADD"
+ "ps_madd %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_msub"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (minus:V2SF (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f"))
+ (match_operand:V2SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT && TARGET_FUSED_MADD"
+ "ps_msub %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_nmadd"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (neg:V2SF (plus:V2SF (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f"))
+ (match_operand:V2SF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_PAIRED_FLOAT && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (SFmode)"
+ "ps_nmadd %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_nmsub"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (neg:V2SF (minus:V2SF (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "%f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f"))
+ (match_operand:V2SF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_PAIRED_FLOAT && TARGET_FUSED_MADD
+ && HONOR_SIGNED_ZEROS (DFmode)"
+ "ps_nmsub %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn "selv2sf4"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF
+ (if_then_else:SF (ge (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))
+ (match_operand:SF 4 "zero_fp_constant" "F"))
+ (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_operand:V2SF 3 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)])))
+ (if_then_else:SF (ge (vec_select:SF (match_dup 1)
+ (parallel [(const_int 1)]))
+ (match_dup 4))
+ (vec_select:SF (match_dup 2)
+ (parallel [(const_int 1)]))
+ (vec_select:SF (match_dup 3)
+ (parallel [(const_int 1)])))))]
+
+ "TARGET_PAIRED_FLOAT"
+ "ps_sel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "*movv2sf_paired"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "=Z,f,f,o,r,r,f")
+ (match_operand:V2SF 1 "input_operand" "f,Z,f,r,o,r,W"))]
+ "TARGET_PAIRED_FLOAT
+ && (register_operand (operands[0], V2SFmode)
+ || register_operand (operands[1], V2SFmode))"
+{
+ switch (which_alternative)
+ {
+ case 0: return "psq_stx %1,%y0,0,0";
+ case 1: return "psq_lx %0,%y1,0,0";
+ case 2: return "ps_mr %0,%1";
+ case 3: return "#";
+ case 4: return "#";
+ case 5: return "#";
+ case 6: return "#";
+ default: gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "fpstore,fpload,fp,*,*,*,*")])
+
+(define_insn "paired_stx"
+ [(set (match_operand:V2SF 0 "memory_operand" "=Z")
+ (match_operand:V2SF 1 "gpc_reg_operand" "f"))]
+ "TARGET_PAIRED_FLOAT"
+ "psq_stx %1,%y0,0,0"
+ [(set_attr "type" "fpstore")])
+
+(define_insn "paired_lx"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (match_operand:V2SF 1 "memory_operand" "Z"))]
+ "TARGET_PAIRED_FLOAT"
+ "psq_lx %0,%y1,0,0"
+ [(set_attr "type" "fpload")])
+
+
+(define_split
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "")
+ (match_operand:V2SF 1 "input_operand" ""))]
+ "TARGET_PAIRED_FLOAT && reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+ {
+ rs6000_split_multireg_move (operands[0], operands[1]); DONE;
+ })
+
+(define_insn "paired_cmpu0"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (vec_select:SF
+ (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))
+ (vec_select:SF
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_cmpu0 %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+(define_insn "paired_cmpu1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (vec_select:SF
+ (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)]))
+ (vec_select:SF
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)]))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_cmpu1 %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+(define_insn "paired_merge00"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF
+ (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_merge00 %0, %1, %2"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_merge01"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF
+ (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))
+ (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)]))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_merge01 %0, %1, %2"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_merge10"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF
+ (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)]))
+ (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_merge10 %0, %1, %2"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_merge11"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF
+ (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)]))
+ (vec_select:SF (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)]))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_merge11 %0, %1, %2"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_sum0"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF (plus:SF (vec_select:SF
+ (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))
+ (vec_select:SF
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)])))
+ (vec_select:SF
+ (match_operand:V2SF 3 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)]))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_sum0 %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_sum1"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF (vec_select:SF
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)]))
+ (plus:SF (vec_select:SF
+ (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)]))
+ (vec_select:SF
+ (match_operand:V2SF 3 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)])))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_sum1 %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "paired_muls0"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (mult:V2SF (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (vec_duplicate:V2SF
+ (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 0)])))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_muls0 %0, %1, %2"
+ [(set_attr "type" "fp")])
+
+
+(define_insn "paired_muls1"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (mult:V2SF (match_operand:V2SF 2 "gpc_reg_operand" "f")
+ (vec_duplicate:V2SF
+ (vec_select:SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (parallel [(const_int 1)])))))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_muls1 %0, %1, %2"
+ [(set_attr "type" "fp")])
+
+(define_expand "vec_initv2sf"
+ [(match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (match_operand 1 "" "")]
+ "TARGET_PAIRED_FLOAT"
+{
+ paired_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "*vconcatsf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (vec_concat:V2SF
+ (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+ "ps_merge00 %0, %1, %2"
+ [(set_attr "type" "fp")])
+
+(define_expand "sminv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (smin:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+{
+ rtx tmp = gen_reg_rtx (V2SFmode);
+
+ emit_insn (gen_subv2sf3 (tmp, operands[1], operands[2]));
+ emit_insn (gen_selv2sf4 (operands[0], tmp, operands[2], operands[1], CONST0_RTX (SFmode)));
+ DONE;
+})
+
+(define_expand "smaxv2sf3"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (smax:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT"
+{
+ rtx tmp = gen_reg_rtx (V2SFmode);
+
+ emit_insn (gen_subv2sf3 (tmp, operands[1], operands[2]));
+ emit_insn (gen_selv2sf4 (operands[0], tmp, operands[1], operands[2], CONST0_RTX (SFmode)));
+ DONE;
+})
+
+(define_expand "reduc_smax_v2sf"
+ [(match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (match_operand:V2SF 1 "gpc_reg_operand" "f")]
+ "TARGET_PAIRED_FLOAT"
+{
+ rtx tmp_swap = gen_reg_rtx (V2SFmode);
+ rtx tmp = gen_reg_rtx (V2SFmode);
+
+ emit_insn (gen_paired_merge10 (tmp_swap, operands[1], operands[1]));
+ emit_insn (gen_subv2sf3 (tmp, operands[1], tmp_swap));
+ emit_insn (gen_selv2sf4 (operands[0], tmp, operands[1], tmp_swap, CONST0_RTX (SFmode)));
+
+ DONE;
+})
+
+(define_expand "reduc_smin_v2sf"
+ [(match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (match_operand:V2SF 1 "gpc_reg_operand" "f")]
+ "TARGET_PAIRED_FLOAT"
+{
+ rtx tmp_swap = gen_reg_rtx (V2SFmode);
+ rtx tmp = gen_reg_rtx (V2SFmode);
+
+ emit_insn (gen_paired_merge10 (tmp_swap, operands[1], operands[1]));
+ emit_insn (gen_subv2sf3 (tmp, operands[1], tmp_swap));
+ emit_insn (gen_selv2sf4 (operands[0], tmp, tmp_swap, operands[1], CONST0_RTX (SFmode)));
+
+ DONE;
+})
+
+(define_expand "vec_interleave_highv2sf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")]
+ UNSPEC_INTERHI_V2SF))]
+ "TARGET_PAIRED_FLOAT"
+ "
+{
+ emit_insn (gen_paired_merge00 (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_interleave_lowv2sf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")]
+ UNSPEC_INTERLO_V2SF))]
+ "TARGET_PAIRED_FLOAT"
+ "
+{
+ emit_insn (gen_paired_merge11 (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_extract_evenv2sf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")]
+ UNSPEC_EXTEVEN_V2SF))]
+ "TARGET_PAIRED_FLOAT"
+ "
+{
+ emit_insn (gen_paired_merge00 (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_expand "vec_extract_oddv2sf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")]
+ UNSPEC_EXTODD_V2SF))]
+ "TARGET_PAIRED_FLOAT"
+ "
+{
+ emit_insn (gen_paired_merge11 (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+
+(define_expand "reduc_splus_v2sf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (match_operand:V2SF 1 "gpc_reg_operand" "f"))]
+ "TARGET_PAIRED_FLOAT"
+ "
+{
+ emit_insn (gen_paired_sum1 (operands[0], operands[1], operands[1], operands[1]));
+ DONE;
+}")
+
+(define_expand "movmisalignv2sf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (match_operand:V2SF 1 "gpc_reg_operand" "f"))]
+ "TARGET_PAIRED_FLOAT"
+{
+ paired_expand_vector_move (operands);
+ DONE;
+})
+
+(define_expand "vcondv2sf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=f")
+ (if_then_else:V2SF
+ (match_operator 3 "gpc_reg_operand"
+ [(match_operand:V2SF 4 "gpc_reg_operand" "f")
+ (match_operand:V2SF 5 "gpc_reg_operand" "f")])
+ (match_operand:V2SF 1 "gpc_reg_operand" "f")
+ (match_operand:V2SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_PAIRED_FLOAT && flag_unsafe_math_optimizations"
+ "
+{
+ if (paired_emit_vector_cond_expr (operands[0], operands[1], operands[2],
+ operands[3], operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+}")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/power4.md b/gcc-4.4.3/gcc/config/rs6000/power4.md
new file mode 100644
index 000000000..0afa17e87
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/power4.md
@@ -0,0 +1,410 @@
+;; Scheduling description for IBM Power4 and PowerPC 970 processors.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Sources: IBM Red Book and White Paper on POWER4
+
+;; The POWER4 has 2 iu, 2 fpu, 2 lsu per engine (2 engines per chip).
+;; Instructions that update more than one register get broken into two
+;; (split) or more internal ops. The chip can issue up to 5
+;; internal ops per cycle.
+
+(define_automaton "power4iu,power4fpu,power4vec,power4misc")
+
+(define_cpu_unit "iu1_power4,iu2_power4" "power4iu")
+(define_cpu_unit "lsu1_power4,lsu2_power4" "power4misc")
+(define_cpu_unit "fpu1_power4,fpu2_power4" "power4fpu")
+(define_cpu_unit "bpu_power4,cru_power4" "power4misc")
+(define_cpu_unit "vec_power4,vecperm_power4" "power4vec")
+(define_cpu_unit "du1_power4,du2_power4,du3_power4,du4_power4,du5_power4"
+ "power4misc")
+
+(define_reservation "lsq_power4"
+ "(du1_power4,lsu1_power4)\
+ |(du2_power4,lsu2_power4)\
+ |(du3_power4,lsu2_power4)\
+ |(du4_power4,lsu1_power4)")
+
+(define_reservation "lsuq_power4"
+ "(du1_power4+du2_power4,lsu1_power4+iu2_power4)\
+ |(du2_power4+du3_power4,lsu2_power4+iu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4+iu1_power4)")
+
+(define_reservation "iq_power4"
+ "(du1_power4,iu1_power4)\
+ |(du2_power4,iu2_power4)\
+ |(du3_power4,iu2_power4)\
+ |(du4_power4,iu1_power4)")
+
+(define_reservation "fpq_power4"
+ "(du1_power4,fpu1_power4)\
+ |(du2_power4,fpu2_power4)\
+ |(du3_power4,fpu2_power4)\
+ |(du4_power4,fpu1_power4)")
+
+(define_reservation "vq_power4"
+ "(du1_power4,vec_power4)\
+ |(du2_power4,vec_power4)\
+ |(du3_power4,vec_power4)\
+ |(du4_power4,vec_power4)")
+
+(define_reservation "vpq_power4"
+ "(du1_power4,vecperm_power4)\
+ |(du2_power4,vecperm_power4)\
+ |(du3_power4,vecperm_power4)\
+ |(du4_power4,vecperm_power4)")
+
+
+; Dispatch slots are allocated in order conforming to program order.
+(absence_set "du1_power4" "du2_power4,du3_power4,du4_power4,du5_power4")
+(absence_set "du2_power4" "du3_power4,du4_power4,du5_power4")
+(absence_set "du3_power4" "du4_power4,du5_power4")
+(absence_set "du4_power4" "du5_power4")
+
+
+; Load/store
+(define_insn_reservation "power4-load" 4 ; 3
+ (and (eq_attr "type" "load")
+ (eq_attr "cpu" "power4"))
+ "lsq_power4")
+
+(define_insn_reservation "power4-load-ext" 5
+ (and (eq_attr "type" "load_ext")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,lsu1_power4,nothing,nothing,iu2_power4)\
+ |(du2_power4+du3_power4,lsu2_power4,nothing,nothing,iu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4,nothing,nothing,iu1_power4)")
+
+(define_insn_reservation "power4-load-ext-update" 5
+ (and (eq_attr "type" "load_ext_u")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ lsu1_power4+iu2_power4,nothing,nothing,iu2_power4")
+
+(define_insn_reservation "power4-load-ext-update-indexed" 5
+ (and (eq_attr "type" "load_ext_ux")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ iu1_power4,lsu2_power4+iu1_power4,nothing,nothing,iu2_power4")
+
+(define_insn_reservation "power4-load-update-indexed" 3
+ (and (eq_attr "type" "load_ux")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ iu1_power4,lsu2_power4+iu2_power4")
+
+(define_insn_reservation "power4-load-update" 4 ; 3
+ (and (eq_attr "type" "load_u")
+ (eq_attr "cpu" "power4"))
+ "lsuq_power4")
+
+(define_insn_reservation "power4-fpload" 6 ; 5
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "power4"))
+ "lsq_power4")
+
+(define_insn_reservation "power4-fpload-update" 6 ; 5
+ (and (eq_attr "type" "fpload_u,fpload_ux")
+ (eq_attr "cpu" "power4"))
+ "lsuq_power4")
+
+(define_insn_reservation "power4-vecload" 6 ; 5
+ (and (eq_attr "type" "vecload")
+ (eq_attr "cpu" "power4"))
+ "lsq_power4")
+
+(define_insn_reservation "power4-store" 12
+ (and (eq_attr "type" "store")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,lsu1_power4,iu1_power4)\
+ |(du2_power4,lsu2_power4,iu2_power4)\
+ |(du3_power4,lsu2_power4,iu2_power4)\
+ |(du4_power4,lsu1_power4,iu1_power4)")
+
+(define_insn_reservation "power4-store-update" 12
+ (and (eq_attr "type" "store_u")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,lsu1_power4+iu2_power4,iu1_power4)\
+ |(du2_power4+du3_power4,lsu2_power4+iu2_power4,iu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4+iu1_power4,iu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4,iu1_power4,iu2_power4)")
+
+(define_insn_reservation "power4-store-update-indexed" 12
+ (and (eq_attr "type" "store_ux")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ iu1_power4,lsu2_power4+iu2_power4,iu2_power4")
+
+(define_insn_reservation "power4-fpstore" 12
+ (and (eq_attr "type" "fpstore")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,lsu1_power4,fpu1_power4)\
+ |(du2_power4,lsu2_power4,fpu2_power4)\
+ |(du3_power4,lsu2_power4,fpu2_power4)\
+ |(du4_power4,lsu1_power4,fpu1_power4)")
+
+(define_insn_reservation "power4-fpstore-update" 12
+ (and (eq_attr "type" "fpstore_u,fpstore_ux")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,lsu1_power4+iu2_power4,fpu1_power4)\
+ |(du2_power4+du3_power4,lsu2_power4+iu2_power4,fpu2_power4)\
+ |(du3_power4+du4_power4,lsu2_power4+iu1_power4,fpu2_power4)")
+
+(define_insn_reservation "power4-vecstore" 12
+ (and (eq_attr "type" "vecstore")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,lsu1_power4,vec_power4)\
+ |(du2_power4,lsu2_power4,vec_power4)\
+ |(du3_power4,lsu2_power4,vec_power4)\
+ |(du4_power4,lsu1_power4,vec_power4)")
+
+(define_insn_reservation "power4-llsc" 11
+ (and (eq_attr "type" "load_l,store_c,sync")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ lsu1_power4")
+
+
+; Integer latency is 2 cycles
+(define_insn_reservation "power4-integer" 2
+ (and (eq_attr "type" "integer,insert_dword,shift,trap,\
+ var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "power4"))
+ "iq_power4")
+
+(define_insn_reservation "power4-two" 2
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4,nothing,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4,nothing,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4,nothing,iu1_power4)\
+ |(du4_power4+du1_power4,iu1_power4,nothing,iu1_power4)")
+
+(define_insn_reservation "power4-three" 2
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4+du3_power4,\
+ iu1_power4,nothing,iu2_power4,nothing,iu2_power4)\
+ |(du2_power4+du3_power4+du4_power4,\
+ iu2_power4,nothing,iu2_power4,nothing,iu1_power4)\
+ |(du3_power4+du4_power4+du1_power4,\
+ iu2_power4,nothing,iu1_power4,nothing,iu1_power4)\
+ |(du4_power4+du1_power4+du2_power4,\
+ iu1_power4,nothing,iu2_power4,nothing,iu2_power4)")
+
+(define_insn_reservation "power4-insert" 4
+ (and (eq_attr "type" "insert_word")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4,nothing,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4,nothing,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4,nothing,iu1_power4)")
+
+(define_insn_reservation "power4-cmp" 3
+ (and (eq_attr "type" "cmp,fast_compare")
+ (eq_attr "cpu" "power4"))
+ "iq_power4")
+
+(define_insn_reservation "power4-compare" 2
+ (and (eq_attr "type" "compare,delayed_compare,var_delayed_compare")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4,iu1_power4)")
+
+(define_bypass 4 "power4-compare" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
+
+(define_insn_reservation "power4-lmul-cmp" 7
+ (and (eq_attr "type" "lmul_compare")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4*6,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4*6,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4*6,iu1_power4)")
+
+(define_bypass 10 "power4-lmul-cmp" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
+
+(define_insn_reservation "power4-imul-cmp" 5
+ (and (eq_attr "type" "imul_compare")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4+du2_power4,iu1_power4*4,iu2_power4)\
+ |(du2_power4+du3_power4,iu2_power4*4,iu2_power4)\
+ |(du3_power4+du4_power4,iu2_power4*4,iu1_power4)")
+
+(define_bypass 8 "power4-imul-cmp" "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
+
+(define_insn_reservation "power4-lmul" 7
+ (and (eq_attr "type" "lmul")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,iu1_power4*6)\
+ |(du2_power4,iu2_power4*6)\
+ |(du3_power4,iu2_power4*6)\
+ |(du4_power4,iu1_power4*6)")
+
+(define_insn_reservation "power4-imul" 5
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,iu1_power4*4)\
+ |(du2_power4,iu2_power4*4)\
+ |(du3_power4,iu2_power4*4)\
+ |(du4_power4,iu1_power4*4)")
+
+(define_insn_reservation "power4-imul3" 4
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,iu1_power4*3)\
+ |(du2_power4,iu2_power4*3)\
+ |(du3_power4,iu2_power4*3)\
+ |(du4_power4,iu1_power4*3)")
+
+
+; SPR move only executes in first IU.
+; Integer division only executes in second IU.
+(define_insn_reservation "power4-idiv" 36
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4,iu2_power4*35")
+
+(define_insn_reservation "power4-ldiv" 68
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4,iu2_power4*67")
+
+
+(define_insn_reservation "power4-mtjmpr" 3
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "power4"))
+ "du1_power4,bpu_power4")
+
+
+; Branches take dispatch Slot 4. The presence_sets prevent other insn from
+; grabbing previous dispatch slots once this is assigned.
+(define_insn_reservation "power4-branch" 2
+ (and (eq_attr "type" "jmpreg,branch")
+ (eq_attr "cpu" "power4"))
+ "(du5_power4\
+ |du4_power4+du5_power4\
+ |du3_power4+du4_power4+du5_power4\
+ |du2_power4+du3_power4+du4_power4+du5_power4\
+ |du1_power4+du2_power4+du3_power4+du4_power4+du5_power4),bpu_power4")
+
+
+; Condition Register logical ops are split if non-destructive (RT != RB)
+(define_insn_reservation "power4-crlogical" 2
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "power4"))
+ "du1_power4,cru_power4")
+
+(define_insn_reservation "power4-delayedcr" 4
+ (and (eq_attr "type" "delayed_cr")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4,cru_power4,cru_power4")
+
+; 4 mfcrf (each 3 cyc, 1/cyc) + 3 fxu
+(define_insn_reservation "power4-mfcr" 6
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ du1_power4+du2_power4+du3_power4+du4_power4+cru_power4,\
+ cru_power4,cru_power4,cru_power4")
+
+; mfcrf (1 field)
+(define_insn_reservation "power4-mfcrf" 3
+ (and (eq_attr "type" "mfcrf")
+ (eq_attr "cpu" "power4"))
+ "du1_power4,cru_power4")
+
+; mtcrf (1 field)
+(define_insn_reservation "power4-mtcr" 4
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "power4"))
+ "du1_power4,iu1_power4")
+
+; Basic FP latency is 6 cycles
+(define_insn_reservation "power4-fp" 6
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "power4"))
+ "fpq_power4")
+
+(define_insn_reservation "power4-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "power4"))
+ "fpq_power4")
+
+(define_insn_reservation "power4-sdiv" 33
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,fpu1_power4*28)\
+ |(du2_power4,fpu2_power4*28)\
+ |(du3_power4,fpu2_power4*28)\
+ |(du4_power4,fpu1_power4*28)")
+
+(define_insn_reservation "power4-sqrt" 40
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "power4"))
+ "(du1_power4,fpu1_power4*35)\
+ |(du2_power4,fpu2_power4*35)\
+ |(du3_power4,fpu2_power4*35)\
+ |(du4_power4,fpu2_power4*35)")
+
+(define_insn_reservation "power4-isync" 2
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "power4"))
+ "du1_power4+du2_power4+du3_power4+du4_power4,\
+ lsu1_power4")
+
+
+; VMX
+(define_insn_reservation "power4-vecsimple" 2
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "power4"))
+ "vq_power4")
+
+(define_insn_reservation "power4-veccomplex" 5
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "power4"))
+ "vq_power4")
+
+; vecfp compare
+(define_insn_reservation "power4-veccmp" 8
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "power4"))
+ "vq_power4")
+
+(define_insn_reservation "power4-vecfloat" 8
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "power4"))
+ "vq_power4")
+
+(define_insn_reservation "power4-vecperm" 2
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "power4"))
+ "vpq_power4")
+
+(define_bypass 4 "power4-vecload" "power4-vecperm")
+
+(define_bypass 3 "power4-vecsimple" "power4-vecperm")
+(define_bypass 6 "power4-veccomplex" "power4-vecperm")
+(define_bypass 3 "power4-vecperm"
+ "power4-vecsimple,power4-veccomplex,power4-vecfloat")
+(define_bypass 9 "power4-vecfloat" "power4-vecperm")
+
+(define_bypass 5 "power4-vecsimple,power4-veccomplex"
+ "power4-branch,power4-crlogical,power4-delayedcr,power4-mfcr,power4-mfcrf")
+
+(define_bypass 4 "power4-vecsimple,power4-vecperm" "power4-vecstore")
+(define_bypass 7 "power4-veccomplex" "power4-vecstore")
+(define_bypass 10 "power4-vecfloat" "power4-vecstore")
diff --git a/gcc-4.4.3/gcc/config/rs6000/power5.md b/gcc-4.4.3/gcc/config/rs6000/power5.md
new file mode 100644
index 000000000..1b73e093e
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/power5.md
@@ -0,0 +1,321 @@
+;; Scheduling description for IBM POWER5 processor.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Sources: IBM Red Book and White Paper on POWER5
+
+;; The POWER5 has 2 iu, 2 fpu, 2 lsu per engine (2 engines per chip).
+;; Instructions that update more than one register get broken into two
+;; (split) or more internal ops. The chip can issue up to 5
+;; internal ops per cycle.
+
+(define_automaton "power5iu,power5fpu,power5misc")
+
+(define_cpu_unit "iu1_power5,iu2_power5" "power5iu")
+(define_cpu_unit "lsu1_power5,lsu2_power5" "power5misc")
+(define_cpu_unit "fpu1_power5,fpu2_power5" "power5fpu")
+(define_cpu_unit "bpu_power5,cru_power5" "power5misc")
+(define_cpu_unit "du1_power5,du2_power5,du3_power5,du4_power5,du5_power5"
+ "power5misc")
+
+(define_reservation "lsq_power5"
+ "(du1_power5,lsu1_power5)\
+ |(du2_power5,lsu2_power5)\
+ |(du3_power5,lsu2_power5)\
+ |(du4_power5,lsu1_power5)")
+
+(define_reservation "iq_power5"
+ "(du1_power5,iu1_power5)\
+ |(du2_power5,iu2_power5)\
+ |(du3_power5,iu2_power5)\
+ |(du4_power5,iu1_power5)")
+
+(define_reservation "fpq_power5"
+ "(du1_power5,fpu1_power5)\
+ |(du2_power5,fpu2_power5)\
+ |(du3_power5,fpu2_power5)\
+ |(du4_power5,fpu1_power5)")
+
+; Dispatch slots are allocated in order conforming to program order.
+(absence_set "du1_power5" "du2_power5,du3_power5,du4_power5,du5_power5")
+(absence_set "du2_power5" "du3_power5,du4_power5,du5_power5")
+(absence_set "du3_power5" "du4_power5,du5_power5")
+(absence_set "du4_power5" "du5_power5")
+
+
+; Load/store
+(define_insn_reservation "power5-load" 4 ; 3
+ (and (eq_attr "type" "load")
+ (eq_attr "cpu" "power5"))
+ "lsq_power5")
+
+(define_insn_reservation "power5-load-ext" 5
+ (and (eq_attr "type" "load_ext")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5,nothing,nothing,iu2_power5")
+
+(define_insn_reservation "power5-load-ext-update" 5
+ (and (eq_attr "type" "load_ext_u")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ lsu1_power5+iu2_power5,nothing,nothing,iu2_power5")
+
+(define_insn_reservation "power5-load-ext-update-indexed" 5
+ (and (eq_attr "type" "load_ext_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ iu1_power5,lsu2_power5+iu1_power5,nothing,nothing,iu2_power5")
+
+(define_insn_reservation "power5-load-update-indexed" 3
+ (and (eq_attr "type" "load_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ iu1_power5,lsu2_power5+iu2_power5")
+
+(define_insn_reservation "power5-load-update" 4 ; 3
+ (and (eq_attr "type" "load_u")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5+iu2_power5")
+
+(define_insn_reservation "power5-fpload" 6 ; 5
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "power5"))
+ "lsq_power5")
+
+(define_insn_reservation "power5-fpload-update" 6 ; 5
+ (and (eq_attr "type" "fpload_u,fpload_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5+iu2_power5")
+
+(define_insn_reservation "power5-store" 12
+ (and (eq_attr "type" "store")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,lsu1_power5,iu1_power5)\
+ |(du2_power5,lsu2_power5,iu2_power5)\
+ |(du3_power5,lsu2_power5,iu2_power5)\
+ |(du4_power5,lsu1_power5,iu1_power5)")
+
+(define_insn_reservation "power5-store-update" 12
+ (and (eq_attr "type" "store_u")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5+iu2_power5,iu1_power5")
+
+(define_insn_reservation "power5-store-update-indexed" 12
+ (and (eq_attr "type" "store_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ iu1_power5,lsu2_power5+iu2_power5,iu2_power5")
+
+(define_insn_reservation "power5-fpstore" 12
+ (and (eq_attr "type" "fpstore")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,lsu1_power5,fpu1_power5)\
+ |(du2_power5,lsu2_power5,fpu2_power5)\
+ |(du3_power5,lsu2_power5,fpu2_power5)\
+ |(du4_power5,lsu1_power5,fpu1_power5)")
+
+(define_insn_reservation "power5-fpstore-update" 12
+ (and (eq_attr "type" "fpstore_u,fpstore_ux")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,lsu1_power5+iu2_power5,fpu1_power5")
+
+(define_insn_reservation "power5-llsc" 11
+ (and (eq_attr "type" "load_l,store_c,sync")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ lsu1_power5")
+
+
+; Integer latency is 2 cycles
+(define_insn_reservation "power5-integer" 2
+ (and (eq_attr "type" "integer,insert_dword,shift,trap,\
+ var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "power5"))
+ "iq_power5")
+
+(define_insn_reservation "power5-two" 2
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5+du2_power5,iu1_power5,nothing,iu2_power5)\
+ |(du2_power5+du3_power5,iu2_power5,nothing,iu2_power5)\
+ |(du3_power5+du4_power5,iu2_power5,nothing,iu1_power5)\
+ |(du4_power5+du1_power5,iu1_power5,nothing,iu1_power5)")
+
+(define_insn_reservation "power5-three" 2
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5+du2_power5+du3_power5,\
+ iu1_power5,nothing,iu2_power5,nothing,iu2_power5)\
+ |(du2_power5+du3_power5+du4_power5,\
+ iu2_power5,nothing,iu2_power5,nothing,iu1_power5)\
+ |(du3_power5+du4_power5+du1_power5,\
+ iu2_power5,nothing,iu1_power5,nothing,iu1_power5)\
+ |(du4_power5+du1_power5+du2_power5,\
+ iu1_power5,nothing,iu2_power5,nothing,iu2_power5)")
+
+(define_insn_reservation "power5-insert" 4
+ (and (eq_attr "type" "insert_word")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu1_power5,nothing,iu2_power5")
+
+(define_insn_reservation "power5-cmp" 3
+ (and (eq_attr "type" "cmp,fast_compare")
+ (eq_attr "cpu" "power5"))
+ "iq_power5")
+
+(define_insn_reservation "power5-compare" 2
+ (and (eq_attr "type" "compare,delayed_compare,var_delayed_compare")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu1_power5,iu2_power5")
+
+(define_bypass 4 "power5-compare" "power5-branch,power5-crlogical,power5-delayedcr,power5-mfcr,power5-mfcrf")
+
+(define_insn_reservation "power5-lmul-cmp" 7
+ (and (eq_attr "type" "lmul_compare")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu1_power5*6,iu2_power5")
+
+(define_bypass 10 "power5-lmul-cmp" "power5-branch,power5-crlogical,power5-delayedcr,power5-mfcr,power5-mfcrf")
+
+(define_insn_reservation "power5-imul-cmp" 5
+ (and (eq_attr "type" "imul_compare")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu1_power5*4,iu2_power5")
+
+(define_bypass 8 "power5-imul-cmp" "power5-branch,power5-crlogical,power5-delayedcr,power5-mfcr,power5-mfcrf")
+
+(define_insn_reservation "power5-lmul" 7
+ (and (eq_attr "type" "lmul")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,iu1_power5*6)\
+ |(du2_power5,iu2_power5*6)\
+ |(du3_power5,iu2_power5*6)\
+ |(du4_power5,iu1_power5*6)")
+
+(define_insn_reservation "power5-imul" 5
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,iu1_power5*4)\
+ |(du2_power5,iu2_power5*4)\
+ |(du3_power5,iu2_power5*4)\
+ |(du4_power5,iu1_power5*4)")
+
+(define_insn_reservation "power5-imul3" 4
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,iu1_power5*3)\
+ |(du2_power5,iu2_power5*3)\
+ |(du3_power5,iu2_power5*3)\
+ |(du4_power5,iu1_power5*3)")
+
+
+; SPR move only executes in first IU.
+; Integer division only executes in second IU.
+(define_insn_reservation "power5-idiv" 36
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu2_power5*35")
+
+(define_insn_reservation "power5-ldiv" 68
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,iu2_power5*67")
+
+
+(define_insn_reservation "power5-mtjmpr" 3
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "power5"))
+ "du1_power5,bpu_power5")
+
+
+; Branches take dispatch Slot 4. The presence_sets prevent other insn from
+; grabbing previous dispatch slots once this is assigned.
+(define_insn_reservation "power5-branch" 2
+ (and (eq_attr "type" "jmpreg,branch")
+ (eq_attr "cpu" "power5"))
+ "(du5_power5\
+ |du4_power5+du5_power5\
+ |du3_power5+du4_power5+du5_power5\
+ |du2_power5+du3_power5+du4_power5+du5_power5\
+ |du1_power5+du2_power5+du3_power5+du4_power5+du5_power5),bpu_power5")
+
+
+; Condition Register logical ops are split if non-destructive (RT != RB)
+(define_insn_reservation "power5-crlogical" 2
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "power5"))
+ "du1_power5,cru_power5")
+
+(define_insn_reservation "power5-delayedcr" 4
+ (and (eq_attr "type" "delayed_cr")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5,cru_power5,cru_power5")
+
+; 4 mfcrf (each 3 cyc, 1/cyc) + 3 fxu
+(define_insn_reservation "power5-mfcr" 6
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ du1_power5+du2_power5+du3_power5+du4_power5+cru_power5,\
+ cru_power5,cru_power5,cru_power5")
+
+; mfcrf (1 field)
+(define_insn_reservation "power5-mfcrf" 3
+ (and (eq_attr "type" "mfcrf")
+ (eq_attr "cpu" "power5"))
+ "du1_power5,cru_power5")
+
+; mtcrf (1 field)
+(define_insn_reservation "power5-mtcr" 4
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "power5"))
+ "du1_power5,iu1_power5")
+
+; Basic FP latency is 6 cycles
+(define_insn_reservation "power5-fp" 6
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "power5"))
+ "fpq_power5")
+
+(define_insn_reservation "power5-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "power5"))
+ "fpq_power5")
+
+(define_insn_reservation "power5-sdiv" 33
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,fpu1_power5*28)\
+ |(du2_power5,fpu2_power5*28)\
+ |(du3_power5,fpu2_power5*28)\
+ |(du4_power5,fpu1_power5*28)")
+
+(define_insn_reservation "power5-sqrt" 40
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "power5"))
+ "(du1_power5,fpu1_power5*35)\
+ |(du2_power5,fpu2_power5*35)\
+ |(du3_power5,fpu2_power5*35)\
+ |(du4_power5,fpu2_power5*35)")
+
+(define_insn_reservation "power5-isync" 2
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "power5"))
+ "du1_power5+du2_power5+du3_power5+du4_power5,\
+ lsu1_power5")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/power6.md b/gcc-4.4.3/gcc/config/rs6000/power6.md
new file mode 100644
index 000000000..ba6524cfa
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/power6.md
@@ -0,0 +1,568 @@
+;; Scheduling description for IBM POWER6 processor.
+;; Copyright (C) 2006, 2007 Free Software Foundation, Inc.
+;; Contributed by Peter Steinmetz (steinmtz@us.ibm.com)
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Sources:
+
+;; The POWER6 has 2 iu, 2 fpu, 2 lsu, and 1 bu/cru unit per engine
+;; (2 engines per chip). The chip can issue up to 5 internal ops
+;; per cycle.
+
+(define_automaton "power6iu,power6lsu,power6fpu,power6bu")
+
+(define_cpu_unit "iu1_power6,iu2_power6" "power6iu")
+(define_cpu_unit "lsu1_power6,lsu2_power6" "power6lsu")
+(define_cpu_unit "bpu_power6" "power6bu")
+(define_cpu_unit "fpu1_power6,fpu2_power6" "power6fpu")
+
+(define_reservation "LS2_power6"
+ "lsu1_power6+lsu2_power6")
+
+(define_reservation "FPU_power6"
+ "fpu1_power6|fpu2_power6")
+
+(define_reservation "BRU_power6"
+ "bpu_power6")
+
+(define_reservation "LSU_power6"
+ "lsu1_power6|lsu2_power6")
+
+(define_reservation "LSF_power6"
+ "(lsu1_power6+fpu1_power6)\
+ |(lsu1_power6+fpu2_power6)\
+ |(lsu2_power6+fpu1_power6)\
+ |(lsu2_power6+fpu2_power6)")
+
+(define_reservation "LX2_power6"
+ "(iu1_power6+iu2_power6+lsu1_power6)\
+ |(iu1_power6+iu2_power6+lsu2_power6)")
+
+(define_reservation "FX2_power6"
+ "iu1_power6+iu2_power6")
+
+(define_reservation "X2F_power6"
+ "(iu1_power6+iu2_power6+fpu1_power6)\
+ |(iu1_power6+iu2_power6+fpu2_power6)")
+
+(define_reservation "BX2_power6"
+ "iu1_power6+iu2_power6+bpu_power6")
+
+(define_reservation "LSX_power6"
+ "(iu1_power6+lsu1_power6)\
+ |(iu1_power6+lsu2_power6)\
+ |(iu2_power6+lsu1_power6)\
+ |(iu2_power6+lsu2_power6)")
+
+(define_reservation "FXU_power6"
+ "iu1_power6|iu2_power6")
+
+(define_reservation "XLF_power6"
+ "(iu1_power6+lsu1_power6+fpu1_power6)\
+ |(iu1_power6+lsu1_power6+fpu2_power6)\
+ |(iu1_power6+lsu2_power6+fpu1_power6)\
+ |(iu1_power6+lsu2_power6+fpu2_power6)\
+ |(iu2_power6+lsu1_power6+fpu1_power6)\
+ |(iu2_power6+lsu1_power6+fpu2_power6)\
+ |(iu2_power6+lsu2_power6+fpu1_power6)\
+ |(iu2_power6+lsu2_power6+fpu2_power6)")
+
+(define_reservation "BRX_power6"
+ "(bpu_power6+iu1_power6)\
+ |(bpu_power6+iu2_power6)")
+
+; Load/store
+
+; The default for a value written by a fixed point load
+; that is read/written by a subsequent fixed point op.
+(define_insn_reservation "power6-load" 2 ; fx
+ (and (eq_attr "type" "load")
+ (eq_attr "cpu" "power6"))
+ "LSU_power6")
+
+; define the bypass for the case where the value written
+; by a fixed point load is used as the source value on
+; a store.
+(define_bypass 1 "power6-load,\
+ power6-load-update,\
+ power6-load-update-indexed"
+ "power6-store,\
+ power6-store-update,\
+ power6-store-update-indexed,\
+ power6-fpstore,\
+ power6-fpstore-update"
+ "store_data_bypass_p")
+
+(define_insn_reservation "power6-load-ext" 4 ; fx
+ (and (eq_attr "type" "load_ext")
+ (eq_attr "cpu" "power6"))
+ "LSU_power6")
+
+; define the bypass for the case where the value written
+; by a fixed point load ext is used as the source value on
+; a store.
+(define_bypass 1 "power6-load-ext,\
+ power6-load-ext-update,\
+ power6-load-ext-update-indexed"
+ "power6-store,\
+ power6-store-update,\
+ power6-store-update-indexed,\
+ power6-fpstore,\
+ power6-fpstore-update"
+ "store_data_bypass_p")
+
+(define_insn_reservation "power6-load-update" 2 ; fx
+ (and (eq_attr "type" "load_u")
+ (eq_attr "cpu" "power6"))
+ "LSX_power6")
+
+(define_insn_reservation "power6-load-update-indexed" 2 ; fx
+ (and (eq_attr "type" "load_ux")
+ (eq_attr "cpu" "power6"))
+ "LSX_power6")
+
+(define_insn_reservation "power6-load-ext-update" 4 ; fx
+ (and (eq_attr "type" "load_ext_u")
+ (eq_attr "cpu" "power6"))
+ "LSX_power6")
+
+(define_insn_reservation "power6-load-ext-update-indexed" 4 ; fx
+ (and (eq_attr "type" "load_ext_ux")
+ (eq_attr "cpu" "power6"))
+ "LSX_power6")
+
+(define_insn_reservation "power6-fpload" 1
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "power6"))
+ "LSU_power6")
+
+(define_insn_reservation "power6-fpload-update" 1
+ (and (eq_attr "type" "fpload_u,fpload_ux")
+ (eq_attr "cpu" "power6"))
+ "LSX_power6")
+
+(define_insn_reservation "power6-store" 14
+ (and (eq_attr "type" "store")
+ (eq_attr "cpu" "power6"))
+ "LSU_power6")
+
+(define_insn_reservation "power6-store-update" 14
+ (and (eq_attr "type" "store_u")
+ (eq_attr "cpu" "power6"))
+ "LSX_power6")
+
+(define_insn_reservation "power6-store-update-indexed" 14
+ (and (eq_attr "type" "store_ux")
+ (eq_attr "cpu" "power6"))
+ "LX2_power6")
+
+(define_insn_reservation "power6-fpstore" 14
+ (and (eq_attr "type" "fpstore")
+ (eq_attr "cpu" "power6"))
+ "LSF_power6")
+
+(define_insn_reservation "power6-fpstore-update" 14
+ (and (eq_attr "type" "fpstore_u,fpstore_ux")
+ (eq_attr "cpu" "power6"))
+ "XLF_power6")
+
+(define_insn_reservation "power6-larx" 3
+ (and (eq_attr "type" "load_l")
+ (eq_attr "cpu" "power6"))
+ "LS2_power6")
+
+(define_insn_reservation "power6-stcx" 10 ; best case
+ (and (eq_attr "type" "store_c")
+ (eq_attr "cpu" "power6"))
+ "LSX_power6")
+
+(define_insn_reservation "power6-sync" 11 ; N/A
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "power6"))
+ "LSU_power6")
+
+(define_insn_reservation "power6-integer" 1
+ (and (eq_attr "type" "integer")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_insn_reservation "power6-exts" 1
+ (and (eq_attr "type" "exts")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_insn_reservation "power6-shift" 1
+ (and (eq_attr "type" "shift")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_insn_reservation "power6-insert" 1
+ (and (eq_attr "type" "insert_word")
+ (eq_attr "cpu" "power6"))
+ "FX2_power6")
+
+(define_insn_reservation "power6-insert-dword" 1
+ (and (eq_attr "type" "insert_dword")
+ (eq_attr "cpu" "power6"))
+ "FX2_power6")
+
+; define the bypass for the case where the value written
+; by a fixed point op is used as the source value on a
+; store.
+(define_bypass 1 "power6-integer,\
+ power6-exts,\
+ power6-shift,\
+ power6-insert,\
+ power6-insert-dword"
+ "power6-store,\
+ power6-store-update,\
+ power6-store-update-indexed,\
+ power6-fpstore,\
+ power6-fpstore-update"
+ "store_data_bypass_p")
+
+(define_insn_reservation "power6-cntlz" 2
+ (and (eq_attr "type" "cntlz")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_bypass 1 "power6-cntlz"
+ "power6-store,\
+ power6-store-update,\
+ power6-store-update-indexed,\
+ power6-fpstore,\
+ power6-fpstore-update"
+ "store_data_bypass_p")
+
+(define_insn_reservation "power6-var-rotate" 4
+ (and (eq_attr "type" "var_shift_rotate")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_insn_reservation "power6-trap" 1 ; N/A
+ (and (eq_attr "type" "trap")
+ (eq_attr "cpu" "power6"))
+ "BRX_power6")
+
+(define_insn_reservation "power6-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "power6"))
+ "(iu1_power6,iu1_power6)\
+ |(iu1_power6+iu2_power6,nothing)\
+ |(iu1_power6,iu2_power6)\
+ |(iu2_power6,iu1_power6)\
+ |(iu2_power6,iu2_power6)")
+
+(define_insn_reservation "power6-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "power6"))
+ "(iu1_power6,iu1_power6,iu1_power6)\
+ |(iu1_power6,iu1_power6,iu2_power6)\
+ |(iu1_power6,iu2_power6,iu1_power6)\
+ |(iu1_power6,iu2_power6,iu2_power6)\
+ |(iu2_power6,iu1_power6,iu1_power6)\
+ |(iu2_power6,iu1_power6,iu2_power6)\
+ |(iu2_power6,iu2_power6,iu1_power6)\
+ |(iu2_power6,iu2_power6,iu2_power6)\
+ |(iu1_power6+iu2_power6,iu1_power6)\
+ |(iu1_power6+iu2_power6,iu2_power6)\
+ |(iu1_power6,iu1_power6+iu2_power6)\
+ |(iu2_power6,iu1_power6+iu2_power6)")
+
+(define_insn_reservation "power6-cmp" 1
+ (and (eq_attr "type" "cmp")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_insn_reservation "power6-compare" 1
+ (and (eq_attr "type" "compare")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_insn_reservation "power6-fast-compare" 1
+ (and (eq_attr "type" "fast_compare")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+; define the bypass for the case where the value written
+; by a fixed point rec form op is used as the source value
+; on a store.
+(define_bypass 1 "power6-compare,\
+ power6-fast-compare"
+ "power6-store,\
+ power6-store-update,\
+ power6-store-update-indexed,\
+ power6-fpstore,\
+ power6-fpstore-update"
+ "store_data_bypass_p")
+
+(define_insn_reservation "power6-delayed-compare" 2 ; N/A
+ (and (eq_attr "type" "delayed_compare")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_insn_reservation "power6-var-delayed-compare" 4
+ (and (eq_attr "type" "var_delayed_compare")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_insn_reservation "power6-lmul-cmp" 16
+ (and (eq_attr "type" "lmul_compare")
+ (eq_attr "cpu" "power6"))
+ "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\
+ |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)");
+
+(define_insn_reservation "power6-imul-cmp" 16
+ (and (eq_attr "type" "imul_compare")
+ (eq_attr "cpu" "power6"))
+ "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\
+ |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)");
+
+(define_insn_reservation "power6-lmul" 16
+ (and (eq_attr "type" "lmul")
+ (eq_attr "cpu" "power6"))
+ "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\
+ |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)");
+
+(define_insn_reservation "power6-imul" 16
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "power6"))
+ "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\
+ |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)");
+
+(define_insn_reservation "power6-imul3" 16
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "power6"))
+ "(iu1_power6*16+iu2_power6*16+fpu1_power6*16)\
+ |(iu1_power6*16+iu2_power6*16+fpu2_power6*16)");
+
+(define_bypass 9 "power6-imul,\
+ power6-lmul,\
+ power6-imul-cmp,\
+ power6-lmul-cmp,\
+ power6-imul3"
+ "power6-store,\
+ power6-store-update,\
+ power6-store-update-indexed,\
+ power6-fpstore,\
+ power6-fpstore-update"
+ "store_data_bypass_p")
+
+(define_insn_reservation "power6-idiv" 44
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "power6"))
+ "(iu1_power6*44+iu2_power6*44+fpu1_power6*44)\
+ |(iu1_power6*44+iu2_power6*44+fpu2_power6*44)");
+
+; The latency for this bypass is yet to be defined
+;(define_bypass ? "power6-idiv"
+; "power6-store,\
+; power6-store-update,\
+; power6-store-update-indexed,\
+; power6-fpstore,\
+; power6-fpstore-update"
+; "store_data_bypass_p")
+
+(define_insn_reservation "power6-ldiv" 56
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "power6"))
+ "(iu1_power6*56+iu2_power6*56+fpu1_power6*56)\
+ |(iu1_power6*56+iu2_power6*56+fpu2_power6*56)");
+
+; The latency for this bypass is yet to be defined
+;(define_bypass ? "power6-ldiv"
+; "power6-store,\
+; power6-store-update,\
+; power6-store-update-indexed,\
+; power6-fpstore,\
+; power6-fpstore-update"
+; "store_data_bypass_p")
+
+(define_insn_reservation "power6-mtjmpr" 2
+ (and (eq_attr "type" "mtjmpr,mfjmpr")
+ (eq_attr "cpu" "power6"))
+ "BX2_power6")
+
+(define_bypass 5 "power6-mtjmpr" "power6-branch")
+
+(define_insn_reservation "power6-branch" 2
+ (and (eq_attr "type" "jmpreg,branch")
+ (eq_attr "cpu" "power6"))
+ "BRU_power6")
+
+(define_bypass 5 "power6-branch" "power6-mtjmpr")
+
+(define_insn_reservation "power6-crlogical" 3
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "power6"))
+ "BRU_power6")
+
+(define_bypass 3 "power6-crlogical" "power6-branch")
+
+(define_insn_reservation "power6-delayedcr" 3
+ (and (eq_attr "type" "delayed_cr")
+ (eq_attr "cpu" "power6"))
+ "BRU_power6")
+
+(define_insn_reservation "power6-mfcr" 6 ; N/A
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "power6"))
+ "BX2_power6")
+
+; mfcrf (1 field)
+(define_insn_reservation "power6-mfcrf" 3 ; N/A
+ (and (eq_attr "type" "mfcrf")
+ (eq_attr "cpu" "power6"))
+ "BX2_power6") ;
+
+; mtcrf (1 field)
+(define_insn_reservation "power6-mtcr" 4 ; N/A
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "power6"))
+ "BX2_power6")
+
+(define_bypass 9 "power6-mtcr" "power6-branch")
+
+(define_insn_reservation "power6-fp" 6
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+; Any fp instruction that updates a CR has a latency
+; of 6 to a dependent branch
+(define_bypass 6 "power6-fp" "power6-branch")
+
+(define_bypass 1 "power6-fp"
+ "power6-fpstore,power6-fpstore-update"
+ "store_data_bypass_p")
+
+(define_insn_reservation "power6-fpcompare" 8
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_bypass 12 "power6-fpcompare"
+ "power6-branch,power6-crlogical")
+
+(define_insn_reservation "power6-sdiv" 26
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_insn_reservation "power6-ddiv" 32
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_insn_reservation "power6-sqrt" 30
+ (and (eq_attr "type" "ssqrt")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_insn_reservation "power6-dsqrt" 42
+ (and (eq_attr "type" "dsqrt")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_insn_reservation "power6-isync" 2 ; N/A
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "power6"))
+ "FXU_power6")
+
+(define_insn_reservation "power6-vecload" 1
+ (and (eq_attr "type" "vecload")
+ (eq_attr "cpu" "power6"))
+ "LSU_power6")
+
+(define_insn_reservation "power6-vecstore" 1
+ (and (eq_attr "type" "vecstore")
+ (eq_attr "cpu" "power6"))
+ "LSF_power6")
+
+(define_insn_reservation "power6-vecsimple" 3
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_bypass 6 "power6-vecsimple" "power6-veccomplex,\
+ power6-vecperm")
+
+(define_bypass 5 "power6-vecsimple" "power6-vecfloat")
+
+(define_bypass 4 "power6-vecsimple" "power6-vecstore" )
+
+(define_insn_reservation "power6-veccmp" 1
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_bypass 10 "power6-veccmp" "power6-branch")
+
+(define_insn_reservation "power6-vecfloat" 7
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_bypass 10 "power6-vecfloat" "power6-vecsimple")
+
+(define_bypass 11 "power6-vecfloat" "power6-veccomplex,\
+ power6-vecperm")
+
+(define_bypass 9 "power6-vecfloat" "power6-vecstore" )
+
+(define_insn_reservation "power6-veccomplex" 7
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_bypass 10 "power6-veccomplex" "power6-vecsimple,\
+ power6-vecfloat" )
+
+(define_bypass 9 "power6-veccomplex" "power6-vecperm" )
+
+(define_bypass 8 "power6-veccomplex" "power6-vecstore" )
+
+(define_insn_reservation "power6-vecperm" 4
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "power6"))
+ "FPU_power6")
+
+(define_bypass 7 "power6-vecperm" "power6-vecsimple,\
+ power6-vecfloat" )
+
+(define_bypass 6 "power6-vecperm" "power6-veccomplex" )
+
+(define_bypass 5 "power6-vecperm" "power6-vecstore" )
+
+(define_insn_reservation "power6-mftgpr" 8
+ (and (eq_attr "type" "mftgpr")
+ (eq_attr "cpu" "power6"))
+ "X2F_power6")
+
+(define_insn_reservation "power6-mffgpr" 14
+ (and (eq_attr "type" "mffgpr")
+ (eq_attr "cpu" "power6"))
+ "LX2_power6")
+
+(define_bypass 4 "power6-mftgpr" "power6-imul,\
+ power6-lmul,\
+ power6-imul-cmp,\
+ power6-lmul-cmp,\
+ power6-imul3,\
+ power6-idiv,\
+ power6-ldiv" )
diff --git a/gcc-4.4.3/gcc/config/rs6000/ppc-asm.h b/gcc-4.4.3/gcc/config/rs6000/ppc-asm.h
new file mode 100644
index 000000000..7f39a2a43
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/ppc-asm.h
@@ -0,0 +1,178 @@
+/* PowerPC asm definitions for GNU C. */
+/* Under winnt, 1) gas supports the following as names and 2) in particular
+ defining "toc" breaks the FUNC_START macro as ".toc" becomes ".2" */
+
+#define r0 0
+#define sp 1
+#define toc 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+#define f0 0
+#define f1 1
+#define f2 2
+#define f3 3
+#define f4 4
+#define f5 5
+#define f6 6
+#define f7 7
+#define f8 8
+#define f9 9
+#define f10 10
+#define f11 11
+#define f12 12
+#define f13 13
+#define f14 14
+#define f15 15
+#define f16 16
+#define f17 17
+#define f18 18
+#define f19 19
+#define f20 20
+#define f21 21
+#define f22 22
+#define f23 23
+#define f24 24
+#define f25 25
+#define f26 26
+#define f27 27
+#define f28 28
+#define f29 29
+#define f30 30
+#define f31 31
+
+/*
+ * Macros to glue together two tokens.
+ */
+
+#ifdef __STDC__
+#define XGLUE(a,b) a##b
+#else
+#define XGLUE(a,b) a/**/b
+#endif
+
+#define GLUE(a,b) XGLUE(a,b)
+
+/*
+ * Macros to begin and end a function written in assembler. If -mcall-aixdesc
+ * or -mcall-nt, create a function descriptor with the given name, and create
+ * the real function with one or two leading periods respectively.
+ */
+
+#if defined (__powerpc64__)
+#define FUNC_NAME(name) GLUE(.,name)
+#define JUMP_TARGET(name) FUNC_NAME(name)
+#define FUNC_START(name) \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+ .globl name; \
+ .globl GLUE(.,name); \
+GLUE(.,name):
+
+#define HIDDEN_FUNC(name) \
+ FUNC_START(name) \
+ .hidden name; \
+ .hidden GLUE(.,name);
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size GLUE(.,name),GLUE(.L,name)-GLUE(.,name)
+
+#elif defined(_CALL_AIXDESC)
+
+#ifdef _RELOCATABLE
+#define DESC_SECTION ".got2"
+#else
+#define DESC_SECTION ".got1"
+#endif
+
+#define FUNC_NAME(name) GLUE(.,name)
+#define JUMP_TARGET(name) FUNC_NAME(name)
+#define FUNC_START(name) \
+ .section DESC_SECTION,"aw"; \
+name: \
+ .long GLUE(.,name); \
+ .long _GLOBAL_OFFSET_TABLE_; \
+ .long 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+ .globl name; \
+ .globl GLUE(.,name); \
+GLUE(.,name):
+
+#define HIDDEN_FUNC(name) \
+ FUNC_START(name) \
+ .hidden name; \
+ .hidden GLUE(.,name);
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size GLUE(.,name),GLUE(.L,name)-GLUE(.,name)
+
+#else
+
+#define FUNC_NAME(name) GLUE(__USER_LABEL_PREFIX__,name)
+#if defined __PIC__ || defined __pic__
+#define JUMP_TARGET(name) FUNC_NAME(name@plt)
+#else
+#define JUMP_TARGET(name) FUNC_NAME(name)
+#endif
+#define FUNC_START(name) \
+ .type FUNC_NAME(name),@function; \
+ .globl FUNC_NAME(name); \
+FUNC_NAME(name):
+
+#define HIDDEN_FUNC(name) \
+ FUNC_START(name) \
+ .hidden FUNC_NAME(name);
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size FUNC_NAME(name),GLUE(.L,name)-FUNC_NAME(name)
+#endif
+
+#if defined __linux__ && !defined __powerpc64__
+ .section .note.GNU-stack
+ .previous
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/ppc64-fp.c b/gcc-4.4.3/gcc/config/rs6000/ppc64-fp.c
new file mode 100644
index 000000000..62861ee16
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/ppc64-fp.c
@@ -0,0 +1,239 @@
+/* Functions needed for soft-float on powerpc64-linux, copied from
+ libgcc2.c with macros expanded to force the use of specific types.
+
+ Copyright (C) 1989, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2006, 2009 Free Software Foundation,
+ Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#if defined(__powerpc64__) || defined (__64BIT__) || defined(__ppc64__)
+#define TMODES
+#include "config/fp-bit.h"
+
+extern DItype __fixtfdi (TFtype);
+extern DItype __fixdfdi (DFtype);
+extern DItype __fixsfdi (SFtype);
+extern USItype __fixunsdfsi (DFtype);
+extern USItype __fixunssfsi (SFtype);
+extern TFtype __floatditf (DItype);
+extern TFtype __floatunditf (UDItype);
+extern DFtype __floatdidf (DItype);
+extern DFtype __floatundidf (UDItype);
+extern SFtype __floatdisf (DItype);
+extern SFtype __floatundisf (UDItype);
+extern DItype __fixunstfdi (TFtype);
+
+static DItype local_fixunssfdi (SFtype);
+static DItype local_fixunsdfdi (DFtype);
+
+DItype
+__fixtfdi (TFtype a)
+{
+ if (a < 0)
+ return - __fixunstfdi (-a);
+ return __fixunstfdi (a);
+}
+
+DItype
+__fixdfdi (DFtype a)
+{
+ if (a < 0)
+ return - local_fixunsdfdi (-a);
+ return local_fixunsdfdi (a);
+}
+
+DItype
+__fixsfdi (SFtype a)
+{
+ if (a < 0)
+ return - local_fixunssfdi (-a);
+ return local_fixunssfdi (a);
+}
+
+USItype
+__fixunsdfsi (DFtype a)
+{
+ if (a >= - (DFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
+ return (SItype) a;
+}
+
+USItype
+__fixunssfsi (SFtype a)
+{
+ if (a >= - (SFtype) (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
+ return (SItype) (a + (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1))
+ - (- ((SItype)(((USItype)1 << ((4 * 8) - 1)) - 1)) - 1);
+ return (SItype) a;
+}
+
+TFtype
+__floatditf (DItype u)
+{
+ DFtype dh, dl;
+
+ dh = (SItype) (u >> (sizeof (SItype) * 8));
+ dh *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ dl = (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (TFtype) dh + (TFtype) dl;
+}
+
+TFtype
+__floatunditf (UDItype u)
+{
+ DFtype dh, dl;
+
+ dh = (USItype) (u >> (sizeof (SItype) * 8));
+ dh *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ dl = (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (TFtype) dh + (TFtype) dl;
+}
+
+DFtype
+__floatdidf (DItype u)
+{
+ DFtype d;
+
+ d = (SItype) (u >> (sizeof (SItype) * 8));
+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return d;
+}
+
+DFtype
+__floatundidf (UDItype u)
+{
+ DFtype d;
+
+ d = (USItype) (u >> (sizeof (SItype) * 8));
+ d *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ d += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return d;
+}
+
+SFtype
+__floatdisf (DItype u)
+{
+ DFtype f;
+
+ if (53 < (sizeof (DItype) * 8)
+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
+ {
+ if (! (- ((DItype) 1 << 53) < u
+ && u < ((DItype) 1 << 53)))
+ {
+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
+ {
+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
+ }
+ }
+ }
+ f = (SItype) (u >> (sizeof (SItype) * 8));
+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (SFtype) f;
+}
+
+SFtype
+__floatundisf (UDItype u)
+{
+ DFtype f;
+
+ if (53 < (sizeof (DItype) * 8)
+ && 53 > ((sizeof (DItype) * 8) - 53 + 24))
+ {
+ if (u >= ((UDItype) 1 << 53))
+ {
+ if ((UDItype) u & (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1))
+ {
+ u &= ~ (((UDItype) 1 << ((sizeof (DItype) * 8) - 53)) - 1);
+ u |= ((UDItype) 1 << ((sizeof (DItype) * 8) - 53));
+ }
+ }
+ }
+ f = (USItype) (u >> (sizeof (SItype) * 8));
+ f *= 2.0 * (((UDItype) 1) << ((sizeof (SItype) * 8) - 1));
+ f += (USItype) (u & ((((UDItype) 1) << (sizeof (SItype) * 8)) - 1));
+
+ return (SFtype) f;
+}
+
+DItype
+__fixunstfdi (TFtype a)
+{
+ if (a < 0)
+ return 0;
+
+ /* Compute high word of result, as a flonum. */
+ const TFtype b = (a / (((UDItype) 1) << (sizeof (SItype) * 8)));
+ /* Convert that to fixed (but not to DItype!),
+ and shift it into the high word. */
+ UDItype v = (USItype) b;
+ v <<= (sizeof (SItype) * 8);
+ /* Remove high part from the TFtype, leaving the low part as flonum. */
+ a -= (TFtype) v;
+ /* Convert that to fixed (but not to DItype!) and add it in.
+ Sometimes A comes out negative. This is significant, since
+ A has more bits than a long int does. */
+ if (a < 0)
+ v -= (USItype) (-a);
+ else
+ v += (USItype) a;
+ return v;
+}
+
+/* This version is needed to prevent recursion; fixunsdfdi in libgcc
+ calls fixdfdi, which in turn calls calls fixunsdfdi. */
+
+static DItype
+local_fixunsdfdi (DFtype a)
+{
+ USItype hi, lo;
+
+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
+}
+
+/* This version is needed to prevent recursion; fixunssfdi in libgcc
+ calls fixsfdi, which in turn calls calls fixunssfdi. */
+
+static DItype
+local_fixunssfdi (SFtype original_a)
+{
+ DFtype a = original_a;
+ USItype hi, lo;
+
+ hi = a / (((UDItype) 1) << (sizeof (SItype) * 8));
+ lo = (a - ((DFtype) hi) * (((UDItype) 1) << (sizeof (SItype) * 8)));
+ return ((UDItype) hi << (sizeof (SItype) * 8)) | lo;
+}
+
+#endif /* __powerpc64__ */
diff --git a/gcc-4.4.3/gcc/config/rs6000/ppu_intrinsics.h b/gcc-4.4.3/gcc/config/rs6000/ppu_intrinsics.h
new file mode 100644
index 000000000..2acb32db9
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/ppu_intrinsics.h
@@ -0,0 +1,727 @@
+/* PPU intrinsics as defined by the C/C++ Language extension for Cell BEA.
+ Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* TODO:
+ misc ops (traps)
+ supervisor/hypervisor mode ops. */
+
+#ifndef _PPU_INTRINSICS_H
+#define _PPU_INTRINSICS_H
+
+#if !defined(__PPU__) && !defined(__ppc__) && !defined(__ppc64__) \
+ && !defined(__GNUC__)
+ #error ppu_intrinsics.h included on wrong platform/compiler
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * unsigned int __cntlzw(unsigned int)
+ * unsigned int __cntlzd(unsigned long long)
+ * int __mulhw(int, int)
+ * unsigned int __mulhwu(unsigned int, unsigned int)
+ * long long __mulhd(long long, long long)
+ * unsigned long long __mulhdu(unsigned long long, unsigned long long)
+ *
+ * void __sync(void)
+ * void __isync(void)
+ * void __lwsync(void)
+ * void __eieio(void)
+ *
+ * void __nop(void)
+ * void __cctpl(void)
+ * void __cctpm(void)
+ * void __cctph(void)
+ * void __db8cyc(void)
+ * void __db10cyc(void)
+ * void __db12cyc(void)
+ * void __db16cyc(void)
+ *
+ * void __mtspr(unsigned int spr, unsigned long long value)
+ * unsigned long long __mfspr(unsigned int spr)
+ * unsigned long long __mftb(void)
+ *
+ * void __icbi(void *base)
+ * void __dcbi(void *base)
+ *
+ * void __dcbf(void *base)
+ * void __dcbz(void *base)
+ * void __dcbst(void *base)
+ * void __dcbtst(void *base)
+ * void __dcbt(void *base)
+ * void __dcbt_TH1000(void *EATRUNC, bool D, bool UG, int ID)
+ * void __dcbt_TH1010(bool GO, int S, int UNITCNT, bool T, bool U, int ID)
+ *
+ * unsigned __lwarx(void *base)
+ * unsigned long long __ldarx(void *base)
+ * bool __stwcx(void *base, unsigned value)
+ * bool __stdcx(void *base, unsigned long long value)
+ *
+ * unsigned short __lhbrx(void *base)
+ * unsigned int __lwbrx(void *base)
+ * unsigned long long __ldbrx(void *base)
+ * void __sthbrx(void *base, unsigned short value)
+ * void __stwbrx(void *base, unsigned int value)
+ * void __stdbrx(void *base, unsigned long long value)
+ *
+ * double __fabs(double x)
+ * float __fabsf(float x)
+ * double __fnabs(double x)
+ * float __fnabsf(float x)
+ * double __fmadd(double x, double y, double z)
+ * double __fmsub(double x, double y, double z)
+ * double __fnmadd(double x, double y, double z)
+ * double __fnmsub(double x, double y, double z)
+ * float __fmadds(float x, float y, float z)
+ * float __fmsubs(float x, float y, float z)
+ * float __fnmadds(float x, float y, float z)
+ * float __fnmsubs(float x, float y, float z)
+ * double __fsel(double x, double y, double z)
+ * float __fsels(float x, float y, float z)
+ * double __frsqrte(double x)
+ * float __fres(float x)
+ * double __fsqrt(double x)
+ * float __fsqrts(float x)
+ * long long __fctid(double x)
+ * long long __fctiw(double x)
+ * double __fcfid(long long x)
+ * double __mffs(void)
+ * void __mtfsf(int mask, double value)
+ * void __mtfsfi(int bits, int field)
+ * void __mtfsb0(int)
+ * void __mtfsb1(int)
+ * double __setflm(double)
+ *
+ * dcbt intrinsics
+ * void __protected_unlimited_stream_set (unsigned int direction, const void *add, unsigned int ID)
+ * void __protected_stream_set (unsigned int direction, const void *add, unsigned int ID)
+ * void __protected_stream_stop_all (void)
+ * void __protected_stream_stop (unsigned int ID)
+ * void __protected_stream_count (unsigned int unit_cnt, unsigned int ID)
+ * void __protected_stream_go (void)
+ */
+
+typedef int __V4SI __attribute__((vector_size(16)));
+
+#define __cntlzw(v) __builtin_clz(v)
+#define __cntlzd(v) __builtin_clzll(v)
+
+#define __mulhw(a,b) __extension__ \
+ ({int result; \
+ __asm__ ("mulhw %0,%1,%2" \
+ : "=r" (result) \
+ : "r" ((int) (a)), \
+ "r" ((int) (b))); \
+ result; })
+
+#define __mulhwu(a,b) __extension__ \
+ ({unsigned int result; \
+ __asm__ ("mulhwu %0,%1,%2" \
+ : "=r" (result) \
+ : "r" ((unsigned int) (a)), \
+ "r" ((unsigned int) (b))); \
+ result; })
+
+#ifdef __powerpc64__
+#define __mulhd(a,b) __extension__ \
+ ({ long long result; \
+ __asm__ ("mulhd %0,%1,%2" \
+ : "=r" (result) \
+ : "r" ((long long) (a)), \
+ "r" ((long long) (b))); \
+ result; })
+
+#define __mulhdu(a,b) __extension__ \
+ ({unsigned long long result; \
+ __asm__ ("mulhdu %0,%1,%2" \
+ : "=r" (result) \
+ : "r" ((unsigned long long) (a)), \
+ "r" ((unsigned long long) (b))); \
+ result; })
+#endif /* __powerpc64__ */
+
+#define __sync() __asm__ volatile ("sync" : : : "memory")
+#define __isync() __asm__ volatile ("isync" : : : "memory")
+#define __lwsync() __asm__ volatile ("lwsync" : : : "memory")
+#define __eieio() __asm__ volatile ("eieio" : : : "memory")
+
+#define __nop() __asm__ volatile ("ori 0,0,0" : : : "memory")
+#define __cctpl() __asm__ volatile ("or 1,1,1" : : : "memory")
+#define __cctpm() __asm__ volatile ("or 2,2,2" : : : "memory")
+#define __cctph() __asm__ volatile ("or 3,3,3" : : : "memory")
+#define __db8cyc() __asm__ volatile ("or 28,28,28" : : : "memory")
+#define __db10cyc() __asm__ volatile ("or 29,29,29" : : : "memory")
+#define __db12cyc() __asm__ volatile ("or 30,30,30" : : : "memory")
+#define __db16cyc() __asm__ volatile ("or 31,31,31" : : : "memory")
+
+#ifdef __powerpc64__
+#define __mtspr(spr, value) \
+ __asm__ volatile ("mtspr %0,%1" : : "n" (spr), "r" (value))
+
+#define __mfspr(spr) __extension__ \
+ ({ unsigned long long result; \
+ __asm__ volatile ("mfspr %0,%1" : "=r" (result) : "n" (spr)); \
+ result; })
+#endif /* __powerpc64__ */
+
+#ifdef __powerpc64__
+/* Work around the hardware bug in the current Cell implementation. */
+#define __mftb() __extension__ \
+ ({ unsigned long long result; \
+ __asm__ volatile ("1: mftb %[current_tb]\n" \
+ "\tcmpwi 7, %[current_tb], 0\n" \
+ "\tbeq- 7, 1b" \
+ : [current_tb] "=r" (result): \
+ :"cr7"); \
+ result; })
+#else
+#define __mftb() __extension__ \
+ ({ unsigned long long result; \
+ unsigned long t; \
+ __asm__ volatile ("1:\n" \
+ "\tmftbu %0\n" \
+ "\tmftb %L0\n" \
+ "\tmftbu %1\n" \
+ "\tcmpw %0,%1\n" \
+ "\tbne 1b" \
+ : "=r" (result), "=r" (t)); \
+ result; })
+#endif /* __powerpc64__ */
+
+#define __dcbf(base) \
+ __asm__ volatile ("dcbf %y0" : "=Z" (*(__V4SI*) (base)) : : "memory")
+
+#define __dcbz(base) \
+ __asm__ volatile ("dcbz %y0" : "=Z" (*(__V4SI*) (base)) : : "memory")
+
+#define __dcbst(base) \
+ __asm__ volatile ("dcbst %y0" : "=Z" (*(__V4SI*) (base)) : : "memory")
+
+#define __dcbtst(base) \
+ __asm__ volatile ("dcbtst %y0" : "=Z" (*(__V4SI*) (base)) : : "memory")
+
+#define __dcbt(base) \
+ __asm__ volatile ("dcbt %y0" : "=Z" (*(__V4SI*) (base)) : : "memory")
+
+#define __icbi(base) \
+ __asm__ volatile ("icbi %y0" : "=Z" (*(__V4SI*) (base)) : : "memory")
+
+#define __dcbt_TH1000(EATRUNC, D, UG, ID) \
+ __asm__ volatile ("dcbt %y0,8" \
+ : "=Z" (*(__V4SI*) (__SIZE_TYPE__)((((__SIZE_TYPE__) (EATRUNC)) & ~0x7F) \
+ | ((((D) & 1) << 6) \
+ | (((UG) & 1) << 5) \
+ | ((ID) & 0xF)))) : : "memory")
+
+#define __dcbt_TH1010(GO, S, UNITCNT, T, U, ID) \
+ __asm__ volatile ("dcbt %y0,10" \
+ : "=Z" (*(__V4SI*) (__SIZE_TYPE__)((((__SIZE_TYPE__) (GO) & 1) << 31) \
+ | (((S) & 0x3) << 29) \
+ | (((UNITCNT) & 0x3FF) << 7) \
+ | (((T) & 1) << 6) \
+ | (((U) & 1) << 5) \
+ | ((ID) & 0xF))) : : "memory")
+
+#define __protected_unlimited_stream_set(DIRECTION, ADDR, ID) \
+ __dcbt_TH1000 ((ADDR), (DIRECTION)>>1, 1, (ID))
+
+#define __protected_stream_set(DIRECTION, ADDR, ID) \
+ __dcbt_TH1000 ((ADDR), (DIRECTION)>>1, 0, (ID))
+
+#define __protected_stream_stop_all() \
+ __dcbt_TH1010 (0, 3, 0, 0, 0, 0)
+
+#define __protected_stream_stop(ID) \
+ __dcbt_TH1010 (0, 2, 0, 0, 0, (ID))
+
+#define __protected_stream_count(COUNT, ID) \
+ __dcbt_TH1010 (0, 0, (COUNT), 0, 0, (ID))
+
+#define __protected_stream_go() \
+ __dcbt_TH1010 (1, 0, 0, 0, 0, 0)
+
+#define __lhbrx(base) __extension__ \
+ ({unsigned short result; \
+ typedef struct {char a[2];} halfwordsize; \
+ halfwordsize *ptrp = (halfwordsize*)(void*)(base); \
+ __asm__ ("lhbrx %0,%y1" \
+ : "=r" (result) \
+ : "Z" (*ptrp)); \
+ result; })
+
+#define __lwbrx(base) __extension__ \
+ ({unsigned int result; \
+ typedef struct {char a[4];} wordsize; \
+ wordsize *ptrp = (wordsize*)(void*)(base); \
+ __asm__ ("lwbrx %0,%y1" \
+ : "=r" (result) \
+ : "Z" (*ptrp)); \
+ result; })
+
+
+#ifdef __powerpc64__
+#define __ldbrx(base) __extension__ \
+ ({unsigned long long result; \
+ typedef struct {char a[8];} doublewordsize; \
+ doublewordsize *ptrp = (doublewordsize*)(void*)(base); \
+ __asm__ ("ldbrx %0,%y1" \
+ : "=r" (result) \
+ : "Z" (*ptrp)); \
+ result; })
+#else
+#define __ldbrx(base) __extension__ \
+ ({unsigned long long result; \
+ typedef struct {char a[8];} doublewordsize; \
+ doublewordsize *ptrp = (doublewordsize*)(void*)(base); \
+ __asm__ ("lwbrx %L0,%y1\n" \
+ "\tlwbrx %0,%y2" \
+ : "=&r" (result) \
+ : "Z" (*ptrp), "Z" (*((char *) ptrp + 4))); \
+ result; })
+#endif /* __powerpc64__ */
+
+
+#define __sthbrx(base, value) do { \
+ typedef struct {char a[2];} halfwordsize; \
+ halfwordsize *ptrp = (halfwordsize*)(void*)(base); \
+ __asm__ ("sthbrx %1,%y0" \
+ : "=Z" (*ptrp) \
+ : "r" (value)); \
+ } while (0)
+
+#define __stwbrx(base, value) do { \
+ typedef struct {char a[4];} wordsize; \
+ wordsize *ptrp = (wordsize*)(void*)(base); \
+ __asm__ ("stwbrx %1,%y0" \
+ : "=Z" (*ptrp) \
+ : "r" (value)); \
+ } while (0)
+
+#ifdef __powerpc64__
+#define __stdbrx(base, value) do { \
+ typedef struct {char a[8];} doublewordsize; \
+ doublewordsize *ptrp = (doublewordsize*)(void*)(base); \
+ __asm__ ("stdbrx %1,%y0" \
+ : "=Z" (*ptrp) \
+ : "r" (value)); \
+ } while (0)
+#else
+#define __stdbrx(base, value) do { \
+ typedef struct {char a[8];} doublewordsize; \
+ doublewordsize *ptrp = (doublewordsize*)(void*)(base); \
+ __asm__ ("stwbrx %L2,%y0\n" \
+ "\tstwbrx %2,%y1" \
+ : "=Z" (*ptrp), "=Z" (*((char *) ptrp + 4)) \
+ : "r" (value)); \
+ } while (0)
+#endif /* __powerpc64__ */
+
+
+#define __lwarx(base) __extension__ \
+ ({unsigned int result; \
+ typedef struct {char a[4];} wordsize; \
+ wordsize *ptrp = (wordsize*)(void*)(base); \
+ __asm__ volatile ("lwarx %0,%y1" \
+ : "=r" (result) \
+ : "Z" (*ptrp)); \
+ result; })
+
+#ifdef __powerpc64__
+#define __ldarx(base) __extension__ \
+ ({unsigned long long result; \
+ typedef struct {char a[8];} doublewordsize; \
+ doublewordsize *ptrp = (doublewordsize*)(void*)(base); \
+ __asm__ volatile ("ldarx %0,%y1" \
+ : "=r" (result) \
+ : "Z" (*ptrp)); \
+ result; })
+#endif /* __powerpc64__ */
+
+#define __stwcx(base, value) __extension__ \
+ ({unsigned int result; \
+ typedef struct {char a[4];} wordsize; \
+ wordsize *ptrp = (wordsize*)(void*)(base); \
+ __asm__ volatile ("stwcx. %2,%y1\n" \
+ "\tmfocrf %0,0x80" \
+ : "=r" (result), \
+ "=Z" (*ptrp) \
+ : "r" (value) : "cr0"); \
+ ((result & 0x20000000) >> 29); })
+
+
+#ifdef __powerpc64__
+#define __stdcx(base, value) __extension__ \
+ ({unsigned long long result; \
+ typedef struct {char a[8];} doublewordsize; \
+ doublewordsize *ptrp = (doublewordsize*)(void*)(base); \
+ __asm__ volatile ("stdcx. %2,%y1\n" \
+ "\tmfocrf %0,0x80" \
+ : "=r" (result), \
+ "=Z" (*ptrp) \
+ : "r" (value) : "cr0"); \
+ ((result & 0x20000000) >> 29); })
+#endif /* __powerpc64__ */
+
+#define __mffs() __extension__ \
+ ({double result; \
+ __asm__ volatile ("mffs %0" : "=f" (result)); \
+ result; })
+
+#define __mtfsf(mask,value) \
+ __asm__ volatile ("mtfsf %0,%1" : : "n" (mask), "f" ((double) (value)))
+
+#define __mtfsfi(bits,field) \
+ __asm__ volatile ("mtfsfi %0,%1" : : "n" (bits), "n" (field))
+
+#define __mtfsb0(bit) __asm__ volatile ("mtfsb0 %0" : : "n" (bit))
+#define __mtfsb1(bit) __asm__ volatile ("mtfsb1 %0" : : "n" (bit))
+
+#define __setflm(v) __extension__ \
+ ({double result; \
+ __asm__ volatile ("mffs %0\n\tmtfsf 255,%1" \
+ : "=&f" (result) \
+ : "f" ((double) (v))); \
+ result; })
+
+/* __builtin_fabs may perform unnecessary rounding. */
+
+/* Rename __fabs and __fabsf to work around internal prototypes defined
+ in bits/mathcalls.h with some glibc versions. */
+#define __fabs __ppu_fabs
+#define __fabsf __ppu_fabsf
+
+static __inline__ double __fabs(double x) __attribute__((always_inline));
+static __inline__ double
+__fabs(double x)
+{
+ double r;
+ __asm__("fabs %0,%1" : "=f"(r) : "f"(x));
+ return r;
+}
+
+static __inline__ float __fabsf(float x) __attribute__((always_inline));
+static __inline__ float
+__fabsf(float x)
+{
+ float r;
+ __asm__("fabs %0,%1" : "=f"(r) : "f"(x));
+ return r;
+}
+
+static __inline__ double __fnabs(double x) __attribute__((always_inline));
+static __inline__ double
+__fnabs(double x)
+{
+ double r;
+ __asm__("fnabs %0,%1" : "=f"(r) : "f"(x));
+ return r;
+}
+
+static __inline__ float __fnabsf(float x) __attribute__((always_inline));
+static __inline__ float
+__fnabsf(float x)
+{
+ float r;
+ __asm__("fnabs %0,%1" : "=f"(r) : "f"(x));
+ return r;
+}
+
+static __inline__ double __fmadd(double x, double y, double z)
+ __attribute__((always_inline));
+static __inline__ double
+__fmadd(double x, double y, double z)
+{
+ double r;
+ __asm__("fmadd %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ double __fmsub(double x, double y, double z)
+ __attribute__((always_inline));
+static __inline__ double
+__fmsub(double x, double y, double z)
+{
+ double r;
+ __asm__("fmsub %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ double __fnmadd(double x, double y, double z)
+ __attribute__((always_inline));
+static __inline__ double
+__fnmadd(double x, double y, double z)
+{
+ double r;
+ __asm__("fnmadd %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ double __fnmsub(double x, double y, double z)
+ __attribute__((always_inline));
+static __inline__ double
+__fnmsub(double x, double y, double z)
+{
+ double r;
+ __asm__("fnmsub %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ float __fmadds(float x, float y, float z)
+ __attribute__((always_inline));
+static __inline__ float
+__fmadds(float x, float y, float z)
+{
+ float r;
+ __asm__("fmadds %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ float __fmsubs(float x, float y, float z)
+ __attribute__((always_inline));
+static __inline__ float
+__fmsubs(float x, float y, float z)
+{
+ float r;
+ __asm__("fmsubs %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ float __fnmadds(float x, float y, float z)
+ __attribute__((always_inline));
+static __inline__ float
+__fnmadds(float x, float y, float z)
+{
+ float r;
+ __asm__("fnmadds %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ float __fnmsubs(float x, float y, float z)
+ __attribute__((always_inline));
+static __inline__ float
+__fnmsubs(float x, float y, float z)
+{
+ float r;
+ __asm__("fnmsubs %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ double __fsel(double x, double y, double z)
+ __attribute__((always_inline));
+static __inline__ double
+__fsel(double x, double y, double z)
+{
+ double r;
+ __asm__("fsel %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ float __fsels(float x, float y, float z)
+ __attribute__((always_inline));
+static __inline__ float
+__fsels(float x, float y, float z)
+{
+ float r;
+ __asm__("fsel %0,%1,%2,%3" : "=f"(r) : "f"(x),"f"(y),"f"(z));
+ return r;
+}
+
+static __inline__ double __frsqrte(double x) __attribute__((always_inline));
+static __inline__ double
+__frsqrte(double x)
+{
+ double r;
+ __asm__("frsqrte %0,%1" : "=f" (r) : "f" (x));
+ return r;
+}
+
+static __inline__ float __fres(float x) __attribute__((always_inline));
+static __inline__ float
+__fres(float x)
+{
+ float r;
+ __asm__("fres %0,%1" : "=f"(r) : "f"(x));
+ return r;
+}
+
+static __inline__ double __fsqrt(double x) __attribute__((always_inline));
+static __inline__ double
+__fsqrt(double x)
+{
+ double r;
+ __asm__("fsqrt %0,%1" : "=f"(r) : "f"(x));
+ return r;
+}
+
+static __inline__ float __fsqrts(float x) __attribute__((always_inline));
+static __inline__ float
+__fsqrts(float x)
+{
+ float r;
+ __asm__("fsqrts %0,%1" : "=f"(r) : "f"(x));
+ return r;
+}
+
+static __inline__ double __fmul (double a, double b) __attribute__ ((always_inline));
+static __inline__ double
+__fmul(double a, double b)
+{
+ double d;
+ __asm__ ("fmul %0,%1,%2" : "=f" (d) : "f" (a), "f" (b));
+ return d;
+}
+
+static __inline__ float __fmuls (float a, float b) __attribute__ ((always_inline));
+static __inline__ float
+__fmuls (float a, float b)
+{
+ float d;
+ __asm__ ("fmuls %0,%1,%2" : "=f" (d) : "f" (a), "f" (b));
+ return d;
+}
+
+static __inline__ float __frsp (float a) __attribute__ ((always_inline));
+static __inline__ float
+__frsp (float a)
+{
+ float d;
+ __asm__ ("frsp %0,%1" : "=f" (d) : "f" (a));
+ return d;
+}
+
+static __inline__ double __fcfid (long long a) __attribute__((always_inline));
+static __inline__ double
+__fcfid (long long a)
+{
+ double d;
+ __asm__ ("fcfid %0,%1" : "=f" (d) : "f" (a));
+ return d;
+}
+
+static __inline__ long long __fctid (double a) __attribute__ ((always_inline));
+static __inline__ long long
+__fctid (double a)
+{
+ long long d;
+ __asm__ ("fctid %0,%1" : "=f" (d) : "f" (a));
+ return d;
+}
+
+static __inline__ long long __fctidz (double a) __attribute__ ((always_inline));
+static __inline__ long long
+__fctidz (double a)
+{
+ long long d;
+ __asm__ ("fctidz %0,%1" : "=f" (d) : "f" (a));
+ return d;
+}
+
+static __inline__ int __fctiw (double a) __attribute__ ((always_inline));
+static __inline__ int
+__fctiw (double a)
+{
+ unsigned long long d;
+ __asm__ ("fctiw %0,%1" : "=f" (d) : "f" (a));
+ return (int) d;
+}
+
+static __inline__ int __fctiwz (double a) __attribute__ ((always_inline));
+static __inline__ int
+__fctiwz (double a)
+{
+ long long d;
+ __asm__ ("fctiwz %0,%1" : "=f" (d) : "f" (a));
+ return (int) d;
+}
+
+#ifdef __powerpc64__
+#define __rldcl(a,b,mb) __extension__ \
+ ({ \
+ unsigned long long d; \
+ __asm__ ("rldcl %0,%1,%2,%3" : "=r" (d) : "r" (a), "r" (b), "i" (mb)); \
+ d; \
+ })
+
+#define __rldcr(a,b,me) __extension__ \
+ ({ \
+ unsigned long long d; \
+ __asm__ ("rldcr %0,%1,%2,%3" : "=r" (d) : "r" (a), "r" (b), "i" (me)); \
+ d; \
+ })
+
+#define __rldic(a,sh,mb) __extension__ \
+ ({ \
+ unsigned long long d; \
+ __asm__ ("rldic %0,%1,%2,%3" : "=r" (d) : "r" (a), "i" (sh), "i" (mb)); \
+ d; \
+ })
+
+#define __rldicl(a,sh,mb) __extension__ \
+ ({ \
+ unsigned long long d; \
+ __asm__ ("rldicl %0,%1,%2,%3" : "=r" (d) : "r" (a), "i" (sh), "i" (mb)); \
+ d; \
+ })
+
+#define __rldicr(a,sh,me) __extension__ \
+ ({ \
+ unsigned long long d; \
+ __asm__ ("rldicr %0,%1,%2,%3" : "=r" (d) : "r" (a), "i" (sh), "i" (me)); \
+ d; \
+ })
+
+#define __rldimi(a,b,sh,mb) __extension__ \
+ ({ \
+ unsigned long long d; \
+ __asm__ ("rldimi %0,%1,%2,%3" : "=r" (d) : "r" (b), "i" (sh), "i" (mb), "0" (a)); \
+ d; \
+ })
+#endif /* __powerpc64__ */
+
+#define __rlwimi(a,b,sh,mb,me) __extension__ \
+ ({ \
+ unsigned int d; \
+ __asm__ ("rlwimi %0,%1,%2,%3,%4" : "=r" (d) : "r" (b), "i" (sh), "i" (mb), "i" (me), "0" (a)); \
+ d; \
+ })
+
+#define __rlwinm(a,sh,mb,me) __extension__ \
+ ({ \
+ unsigned int d; \
+ __asm__ ("rlwinm %0,%1,%2,%3,%4" : "=r" (d) : "r" (a), "i" (sh), "i" (mb), "i" (me)); \
+ d; \
+ })
+
+#define __rlwnm(a,b,mb,me) __extension__ \
+ ({ \
+ unsigned int d; \
+ __asm__ ("rlwnm %0,%1,%2,%3,%4" : "=r" (d) : "r" (a), "r" (b), "i" (mb), "i" (me)); \
+ d; \
+ })
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _PPU_INTRINSICS_H */
diff --git a/gcc-4.4.3/gcc/config/rs6000/predicates.md b/gcc-4.4.3/gcc/config/rs6000/predicates.md
new file mode 100644
index 000000000..af80ef46b
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/predicates.md
@@ -0,0 +1,1338 @@
+;; Predicate definitions for POWER and PowerPC.
+;; Copyright (C) 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Return 1 for anything except PARALLEL.
+(define_predicate "any_operand"
+ (match_code "const_int,const_double,const,symbol_ref,label_ref,subreg,reg,mem"))
+
+;; Return 1 for any PARALLEL.
+(define_predicate "any_parallel_operand"
+ (match_code "parallel"))
+
+;; Return 1 if op is COUNT register.
+(define_predicate "count_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == CTR_REGNO
+ || REGNO (op) > LAST_VIRTUAL_REGISTER")))
+
+;; Return 1 if op is an Altivec register.
+(define_predicate "altivec_register_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || ALTIVEC_REGNO_P (REGNO (op))
+ || REGNO (op) > LAST_VIRTUAL_REGISTER")))
+
+;; Return 1 if op is XER register.
+(define_predicate "xer_operand"
+ (and (match_code "reg")
+ (match_test "XER_REGNO_P (REGNO (op))")))
+
+;; Return 1 if op is a signed 5-bit constant integer.
+(define_predicate "s5bit_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= -16 && INTVAL (op) <= 15")))
+
+;; Return 1 if op is a unsigned 5-bit constant integer.
+(define_predicate "u5bit_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= 0 && INTVAL (op) <= 31")))
+
+;; Return 1 if op is a signed 8-bit constant integer.
+;; Integer multiplication complete more quickly
+(define_predicate "s8bit_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) >= -128 && INTVAL (op) <= 127")))
+
+;; Return 1 if op is a constant integer that can fit in a D field.
+(define_predicate "short_cint_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_I (op)")))
+
+;; Return 1 if op is a constant integer that can fit in an unsigned D field.
+(define_predicate "u_short_cint_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_K (op)")))
+
+;; Return 1 if op is a constant integer that cannot fit in a signed D field.
+(define_predicate "non_short_cint_operand"
+ (and (match_code "const_int")
+ (match_test "(unsigned HOST_WIDE_INT)
+ (INTVAL (op) + 0x8000) >= 0x10000")))
+
+;; Return 1 if op is a positive constant integer that is an exact power of 2.
+(define_predicate "exact_log2_cint_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) > 0 && exact_log2 (INTVAL (op)) >= 0")))
+
+;; Return 1 if op is a register that is not special.
+(define_predicate "gpc_reg_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "(GET_CODE (op) != REG
+ || (REGNO (op) >= ARG_POINTER_REGNUM
+ && !XER_REGNO_P (REGNO (op)))
+ || REGNO (op) < MQ_REGNO)
+ && !((TARGET_E500_DOUBLE || TARGET_SPE)
+ && invalid_e500_subreg (op, mode))")))
+
+;; Return 1 if op is a register that is a condition register field.
+(define_predicate "cc_reg_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || REGNO (op) > LAST_VIRTUAL_REGISTER
+ || CR_REGNO_P (REGNO (op))")))
+
+;; Return 1 if op is a register that is a condition register field not cr0.
+(define_predicate "cc_reg_not_cr0_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || REGNO (op) > LAST_VIRTUAL_REGISTER
+ || CR_REGNO_NOT_CR0_P (REGNO (op))")))
+
+;; Return 1 if op is a register that is a condition register field and if generating microcode, not cr0.
+(define_predicate "cc_reg_not_micro_cr0_operand"
+ (and (match_operand 0 "register_operand")
+ (match_test "GET_CODE (op) != REG
+ || REGNO (op) > LAST_VIRTUAL_REGISTER
+ || (rs6000_gen_cell_microcode && CR_REGNO_NOT_CR0_P (REGNO (op)))
+ || (!rs6000_gen_cell_microcode && CR_REGNO_P (REGNO (op)))")))
+
+;; Return 1 if op is a constant integer valid for D field
+;; or non-special register register.
+(define_predicate "reg_or_short_operand"
+ (if_then_else (match_code "const_int")
+ (match_operand 0 "short_cint_operand")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid whose negation is valid for
+;; D field or non-special register register.
+;; Do not allow a constant zero because all patterns that call this
+;; predicate use "addic r1,r2,-const" to set carry when r2 is greater than
+;; or equal to const, which does not work for zero.
+(define_predicate "reg_or_neg_short_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "satisfies_constraint_P (op)
+ && INTVAL (op) != 0")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid for DS field
+;; or non-special register.
+(define_predicate "reg_or_aligned_short_operand"
+ (if_then_else (match_code "const_int")
+ (and (match_operand 0 "short_cint_operand")
+ (match_test "!(INTVAL (op) & 3)"))
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer whose high-order 16 bits are zero
+;; or non-special register.
+(define_predicate "reg_or_u_short_operand"
+ (if_then_else (match_code "const_int")
+ (match_operand 0 "u_short_cint_operand")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is any constant integer
+;; or non-special register.
+(define_predicate "reg_or_cint_operand"
+ (ior (match_code "const_int")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid for addition
+;; or non-special register.
+(define_predicate "reg_or_add_cint_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(HOST_BITS_PER_WIDE_INT == 32
+ && (mode == SImode || INTVAL (op) < 0x7fff8000))
+ || ((unsigned HOST_WIDE_INT) (INTVAL (op) + 0x80008000)
+ < (unsigned HOST_WIDE_INT) 0x100000000ll)")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is a constant integer valid for subtraction
+;; or non-special register.
+(define_predicate "reg_or_sub_cint_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(HOST_BITS_PER_WIDE_INT == 32
+ && (mode == SImode || - INTVAL (op) < 0x7fff8000))
+ || ((unsigned HOST_WIDE_INT) (- INTVAL (op)
+ + (mode == SImode
+ ? 0x80000000 : 0x80008000))
+ < (unsigned HOST_WIDE_INT) 0x100000000ll)")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if op is any 32-bit unsigned constant integer
+;; or non-special register.
+(define_predicate "reg_or_logical_cint_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "(GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ && INTVAL (op) >= 0)
+ || ((INTVAL (op) & GET_MODE_MASK (mode)
+ & (~ (unsigned HOST_WIDE_INT) 0xffffffff)) == 0)")
+ (if_then_else (match_code "const_double")
+ (match_test "GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT
+ && mode == DImode
+ && CONST_DOUBLE_HIGH (op) == 0")
+ (match_operand 0 "gpc_reg_operand"))))
+
+;; Return 1 if operand is a CONST_DOUBLE that can be set in a register
+;; with no more than one instruction per word.
+(define_predicate "easy_fp_constant"
+ (match_code "const_double")
+{
+ long k[4];
+ REAL_VALUE_TYPE rv;
+
+ if (GET_MODE (op) != mode
+ || (!SCALAR_FLOAT_MODE_P (mode) && mode != DImode))
+ return 0;
+
+ /* Consider all constants with -msoft-float to be easy. */
+ if ((TARGET_SOFT_FLOAT || TARGET_E500_SINGLE
+ || (TARGET_HARD_FLOAT && (TARGET_SINGLE_FLOAT && ! TARGET_DOUBLE_FLOAT)))
+ && mode != DImode)
+ return 1;
+
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return 0;
+
+ /* If we are using V.4 style PIC, consider all constants to be hard. */
+ if (flag_pic && DEFAULT_ABI == ABI_V4)
+ return 0;
+
+#ifdef TARGET_RELOCATABLE
+ /* Similarly if we are using -mrelocatable, consider all constants
+ to be hard. */
+ if (TARGET_RELOCATABLE)
+ return 0;
+#endif
+
+ switch (mode)
+ {
+ case TFmode:
+ if (TARGET_E500_DOUBLE)
+ return 0;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+
+ return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[2]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[3]) == 1);
+
+ case DFmode:
+ /* Force constants to memory before reload to utilize
+ compress_float_constant.
+ Avoid this when flag_unsafe_math_optimizations is enabled
+ because RDIV division to reciprocal optimization is not able
+ to regenerate the division. */
+ if (TARGET_E500_DOUBLE
+ || (!reload_in_progress && !reload_completed
+ && !flag_unsafe_math_optimizations))
+ return 0;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ return (num_insns_constant_wide ((HOST_WIDE_INT) k[0]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT) k[1]) == 1);
+
+ case SFmode:
+ /* The constant 0.f is easy. */
+ if (op == CONST0_RTX (SFmode))
+ return 1;
+
+ /* Force constants to memory before reload to utilize
+ compress_float_constant.
+ Avoid this when flag_unsafe_math_optimizations is enabled
+ because RDIV division to reciprocal optimization is not able
+ to regenerate the division. */
+ if (!reload_in_progress && !reload_completed
+ && !flag_unsafe_math_optimizations)
+ return 0;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, k[0]);
+
+ return num_insns_constant_wide (k[0]) == 1;
+
+ case DImode:
+ return ((TARGET_POWERPC64
+ && GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_LOW (op) == 0)
+ || (num_insns_constant (op, DImode) <= 2));
+
+ case SImode:
+ return 1;
+
+ default:
+ gcc_unreachable ();
+ }
+})
+
+;; Return 1 if the operand is a CONST_VECTOR and can be loaded into a
+;; vector register without using memory.
+(define_predicate "easy_vector_constant"
+ (match_code "const_vector")
+{
+ /* As the paired vectors are actually FPRs it seems that there is
+ no easy way to load a CONST_VECTOR without using memory. */
+ if (TARGET_PAIRED_FLOAT)
+ return false;
+
+ if (ALTIVEC_VECTOR_MODE (mode))
+ {
+ if (zero_constant (op, mode))
+ return true;
+ return easy_altivec_constant (op, mode);
+ }
+
+ if (SPE_VECTOR_MODE (mode))
+ {
+ int cst, cst2;
+ if (zero_constant (op, mode))
+ return true;
+ if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT)
+ return false;
+
+ /* Limit SPE vectors to 15 bits signed. These we can generate with:
+ li r0, CONSTANT1
+ evmergelo r0, r0, r0
+ li r0, CONSTANT2
+
+ I don't know how efficient it would be to allow bigger constants,
+ considering we'll have an extra 'ori' for every 'li'. I doubt 5
+ instructions is better than a 64-bit memory load, but I don't
+ have the e500 timing specs. */
+ if (mode == V2SImode)
+ {
+ cst = INTVAL (CONST_VECTOR_ELT (op, 0));
+ cst2 = INTVAL (CONST_VECTOR_ELT (op, 1));
+ return cst >= -0x7fff && cst <= 0x7fff
+ && cst2 >= -0x7fff && cst2 <= 0x7fff;
+ }
+ }
+
+ return false;
+})
+
+;; Same as easy_vector_constant but only for EASY_VECTOR_15_ADD_SELF.
+(define_predicate "easy_vector_constant_add_self"
+ (and (match_code "const_vector")
+ (and (match_test "TARGET_ALTIVEC")
+ (match_test "easy_altivec_constant (op, mode)")))
+{
+ HOST_WIDE_INT val = const_vector_elt_as_int (op, GET_MODE_NUNITS (mode) - 1);
+ val = ((val & 0xff) ^ 0x80) - 0x80;
+ return EASY_VECTOR_15_ADD_SELF (val);
+})
+
+;; Return 1 if operand is constant zero (scalars and vectors).
+(define_predicate "zero_constant"
+ (and (match_code "const_int,const_double,const_vector")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; Return 1 if operand is 0.0.
+;; or non-special register register field no cr0
+(define_predicate "zero_fp_constant"
+ (and (match_code "const_double")
+ (match_test "SCALAR_FLOAT_MODE_P (mode)
+ && op == CONST0_RTX (mode)")))
+
+;; Return 1 if the operand is in volatile memory. Note that during the
+;; RTL generation phase, memory_operand does not return TRUE for volatile
+;; memory references. So this function allows us to recognize volatile
+;; references where it's safe.
+(define_predicate "volatile_mem_operand"
+ (and (and (match_code "mem")
+ (match_test "MEM_VOLATILE_P (op)"))
+ (if_then_else (match_test "reload_completed")
+ (match_operand 0 "memory_operand")
+ (if_then_else (match_test "reload_in_progress")
+ (match_test "strict_memory_address_p (mode, XEXP (op, 0))")
+ (match_test "memory_address_p (mode, XEXP (op, 0))")))))
+
+;; Return 1 if the operand is an offsettable memory operand.
+(define_predicate "offsettable_mem_operand"
+ (and (match_operand 0 "memory_operand")
+ (match_test "GET_CODE (XEXP (op, 0)) != PRE_INC
+ && GET_CODE (XEXP (op, 0)) != PRE_DEC
+ && GET_CODE (XEXP (op, 0)) != PRE_MODIFY")))
+
+;; Return 1 if the operand is a memory operand with an address divisible by 4
+(define_predicate "word_offset_memref_operand"
+ (match_operand 0 "memory_operand")
+{
+ /* Address inside MEM. */
+ op = XEXP (op, 0);
+
+ /* Extract address from auto-inc/dec. */
+ if (GET_CODE (op) == PRE_INC
+ || GET_CODE (op) == PRE_DEC)
+ op = XEXP (op, 0);
+ else if (GET_CODE (op) == PRE_MODIFY)
+ op = XEXP (op, 1);
+
+ return (GET_CODE (op) != PLUS
+ || ! REG_P (XEXP (op, 0))
+ || GET_CODE (XEXP (op, 1)) != CONST_INT
+ || INTVAL (XEXP (op, 1)) % 4 == 0);
+})
+
+;; Return 1 if the operand is an indexed or indirect memory operand.
+(define_predicate "indexed_or_indirect_operand"
+ (match_code "mem")
+{
+ op = XEXP (op, 0);
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (op) == AND
+ && GET_CODE (XEXP (op, 1)) == CONST_INT
+ && INTVAL (XEXP (op, 1)) == -16)
+ op = XEXP (op, 0);
+
+ return indexed_or_indirect_address (op, mode);
+})
+
+;; Return 1 if the operand is an indexed or indirect address.
+(define_special_predicate "indexed_or_indirect_address"
+ (and (match_test "REG_P (op)
+ || (GET_CODE (op) == PLUS
+ /* Omit testing REG_P (XEXP (op, 0)). */
+ && REG_P (XEXP (op, 1)))")
+ (match_operand 0 "address_operand")))
+
+;; Used for the destination of the fix_truncdfsi2 expander.
+;; If stfiwx will be used, the result goes to memory; otherwise,
+;; we're going to emit a store and a load of a subreg, so the dest is a
+;; register.
+(define_predicate "fix_trunc_dest_operand"
+ (if_then_else (match_test "! TARGET_E500_DOUBLE && TARGET_PPC_GFXOPT")
+ (match_operand 0 "memory_operand")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if the operand is either a non-special register or can be used
+;; as the operand of a `mode' add insn.
+(define_predicate "add_operand"
+ (if_then_else (match_code "const_int")
+ (match_test "satisfies_constraint_I (op)
+ || satisfies_constraint_L (op)")
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if OP is a constant but not a valid add_operand.
+(define_predicate "non_add_cint_operand"
+ (and (match_code "const_int")
+ (match_test "!satisfies_constraint_I (op)
+ && !satisfies_constraint_L (op)")))
+
+;; Return 1 if the operand is a constant that can be used as the operand
+;; of an OR or XOR.
+(define_predicate "logical_const_operand"
+ (match_code "const_int,const_double")
+{
+ HOST_WIDE_INT opl, oph;
+
+ if (GET_CODE (op) == CONST_INT)
+ {
+ opl = INTVAL (op) & GET_MODE_MASK (mode);
+
+ if (HOST_BITS_PER_WIDE_INT <= 32
+ && GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT && opl < 0)
+ return 0;
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ {
+ gcc_assert (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT);
+
+ opl = CONST_DOUBLE_LOW (op);
+ oph = CONST_DOUBLE_HIGH (op);
+ if (oph != 0)
+ return 0;
+ }
+ else
+ return 0;
+
+ return ((opl & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0
+ || (opl & ~ (unsigned HOST_WIDE_INT) 0xffff0000) == 0);
+})
+
+;; Return 1 if the operand is a non-special register or a constant that
+;; can be used as the operand of an OR or XOR.
+(define_predicate "logical_operand"
+ (ior (match_operand 0 "gpc_reg_operand")
+ (match_operand 0 "logical_const_operand")))
+
+;; Return 1 if op is a constant that is not a logical operand, but could
+;; be split into one.
+(define_predicate "non_logical_cint_operand"
+ (and (match_code "const_int,const_double")
+ (and (not (match_operand 0 "logical_operand"))
+ (match_operand 0 "reg_or_logical_cint_operand"))))
+
+;; Return 1 if op is a constant that can be encoded in a 32-bit mask,
+;; suitable for use with rlwinm (no more than two 1->0 or 0->1
+;; transitions). Reject all ones and all zeros, since these should have
+;; been optimized away and confuse the making of MB and ME.
+(define_predicate "mask_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ if (TARGET_POWERPC64)
+ {
+ /* Fail if the mask is not 32-bit. */
+ if (mode == DImode && (c & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0)
+ return 0;
+
+ /* Fail if the mask wraps around because the upper 32-bits of the
+ mask will all be 1s, contrary to GCC's internal view. */
+ if ((c & 0x80000001) == 0x80000001)
+ return 0;
+ }
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Reject all zeros or all ones. */
+ if (c == 0)
+ return 0;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a second transition. */
+ c = ~c;
+
+ /* Erase first transition. */
+ c &= -lsb;
+
+ /* Find the second transition (if any). */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+})
+
+;; Return 1 for the PowerPC64 rlwinm corner case.
+(define_predicate "mask_operand_wrap"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ if ((c & 0x80000001) != 0x80000001)
+ return 0;
+
+ c = ~c;
+ if (c == 0)
+ return 0;
+
+ lsb = c & -c;
+ c = ~c;
+ c &= -lsb;
+ lsb = c & -c;
+ return c == -lsb;
+})
+
+;; Return 1 if the operand is a constant that is a PowerPC64 mask
+;; suitable for use with rldicl or rldicr (no more than one 1->0 or 0->1
+;; transition). Reject all zeros, since zero should have been
+;; optimized away and confuses the making of MB and ME.
+(define_predicate "mask64_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ /* Reject all zeros. */
+ if (c == 0)
+ return 0;
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+})
+
+;; Like mask64_operand, but allow up to three transitions. This
+;; predicate is used by insn patterns that generate two rldicl or
+;; rldicr machine insns.
+(define_predicate "mask64_2_operand"
+ (match_code "const_int")
+{
+ HOST_WIDE_INT c, lsb;
+
+ c = INTVAL (op);
+
+ /* Disallow all zeros. */
+ if (c == 0)
+ return 0;
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a second transition. */
+ c = ~c;
+
+ /* Erase first transition. */
+ c &= -lsb;
+
+ /* Find the second transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a third transition. */
+ c = ~c;
+
+ /* Erase second transition. */
+ c &= -lsb;
+
+ /* Find the third transition (if any). */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+})
+
+;; Like and_operand, but also match constants that can be implemented
+;; with two rldicl or rldicr insns.
+(define_predicate "and64_2_operand"
+ (ior (match_operand 0 "mask64_2_operand")
+ (if_then_else (match_test "fixed_regs[CR0_REGNO]")
+ (match_operand 0 "gpc_reg_operand")
+ (match_operand 0 "logical_operand"))))
+
+;; Return 1 if the operand is either a non-special register or a
+;; constant that can be used as the operand of a logical AND.
+(define_predicate "and_operand"
+ (ior (match_operand 0 "mask_operand")
+ (ior (and (match_test "TARGET_POWERPC64 && mode == DImode")
+ (match_operand 0 "mask64_operand"))
+ (if_then_else (match_test "fixed_regs[CR0_REGNO]")
+ (match_operand 0 "gpc_reg_operand")
+ (match_operand 0 "logical_operand")))))
+
+;; Return 1 if the operand is either a logical operand or a short cint operand.
+(define_predicate "scc_eq_operand"
+ (ior (match_operand 0 "logical_operand")
+ (match_operand 0 "short_cint_operand")))
+
+;; Return 1 if the operand is a general non-special register or memory operand.
+(define_predicate "reg_or_mem_operand"
+ (ior (match_operand 0 "memory_operand")
+ (ior (and (match_code "mem")
+ (match_test "macho_lo_sum_memory_operand (op, mode)"))
+ (ior (match_operand 0 "volatile_mem_operand")
+ (match_operand 0 "gpc_reg_operand")))))
+
+;; Return 1 if the operand is either an easy FP constant or memory or reg.
+(define_predicate "reg_or_none500mem_operand"
+ (if_then_else (match_code "mem")
+ (and (match_test "!TARGET_E500_DOUBLE")
+ (ior (match_operand 0 "memory_operand")
+ (ior (match_test "macho_lo_sum_memory_operand (op, mode)")
+ (match_operand 0 "volatile_mem_operand"))))
+ (match_operand 0 "gpc_reg_operand")))
+
+;; Return 1 if the operand is CONST_DOUBLE 0, register or memory operand.
+(define_predicate "zero_reg_mem_operand"
+ (ior (match_operand 0 "zero_fp_constant")
+ (match_operand 0 "reg_or_mem_operand")))
+
+;; Return 1 if the operand is a general register or memory operand without
+;; pre_inc or pre_dec or pre_modify, which produces invalid form of PowerPC
+;; lwa instruction.
+(define_predicate "lwa_operand"
+ (match_code "reg,subreg,mem")
+{
+ rtx inner = op;
+
+ if (reload_completed && GET_CODE (inner) == SUBREG)
+ inner = SUBREG_REG (inner);
+
+ return gpc_reg_operand (inner, mode)
+ || (memory_operand (inner, mode)
+ && GET_CODE (XEXP (inner, 0)) != PRE_INC
+ && GET_CODE (XEXP (inner, 0)) != PRE_DEC
+ && (GET_CODE (XEXP (inner, 0)) != PRE_MODIFY
+ || legitimate_indexed_address_p (XEXP (XEXP (inner, 0), 1), 0))
+ && (GET_CODE (XEXP (inner, 0)) != PLUS
+ || GET_CODE (XEXP (XEXP (inner, 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (inner, 0), 1)) % 4 == 0));
+})
+
+;; Return 1 if the operand, used inside a MEM, is a SYMBOL_REF.
+(define_predicate "symbol_ref_operand"
+ (and (match_code "symbol_ref")
+ (match_test "(mode == VOIDmode || GET_MODE (op) == mode)
+ && (DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op))")))
+
+;; Return 1 if op is an operand that can be loaded via the GOT.
+;; or non-special register register field no cr0
+(define_predicate "got_operand"
+ (match_code "symbol_ref,const,label_ref"))
+
+;; Return 1 if op is a simple reference that can be loaded via the GOT,
+;; excluding labels involving addition.
+(define_predicate "got_no_const_operand"
+ (match_code "symbol_ref,label_ref"))
+
+;; Return 1 if op is a SYMBOL_REF for a TLS symbol.
+(define_predicate "rs6000_tls_symbol_ref"
+ (and (match_code "symbol_ref")
+ (match_test "RS6000_SYMBOL_REF_TLS_P (op)")))
+
+;; Return 1 if the operand, used inside a MEM, is a valid first argument
+;; to CALL. This is a SYMBOL_REF, a pseudo-register, LR or CTR.
+(define_predicate "call_operand"
+ (if_then_else (match_code "reg")
+ (match_test "REGNO (op) == LR_REGNO
+ || REGNO (op) == CTR_REGNO
+ || REGNO (op) >= FIRST_PSEUDO_REGISTER")
+ (match_code "symbol_ref")))
+
+;; Return 1 if the operand is a SYMBOL_REF for a function known to be in
+;; this file.
+(define_predicate "current_file_function_operand"
+ (and (match_code "symbol_ref")
+ (match_test "(DEFAULT_ABI != ABI_AIX || SYMBOL_REF_FUNCTION_P (op))
+ && ((SYMBOL_REF_LOCAL_P (op)
+ && (DEFAULT_ABI != ABI_AIX
+ || !SYMBOL_REF_EXTERNAL_P (op)))
+ || (op == XEXP (DECL_RTL (current_function_decl),
+ 0)))")))
+
+;; Return 1 if this operand is a valid input for a move insn.
+(define_predicate "input_operand"
+ (match_code "label_ref,symbol_ref,const,high,reg,subreg,mem,
+ const_double,const_vector,const_int,plus")
+{
+ /* Memory is always valid. */
+ if (memory_operand (op, mode))
+ return 1;
+
+ /* For floating-point, easy constants are valid. */
+ if (SCALAR_FLOAT_MODE_P (mode)
+ && CONSTANT_P (op)
+ && easy_fp_constant (op, mode))
+ return 1;
+
+ /* Allow any integer constant. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && (GET_CODE (op) == CONST_INT
+ || GET_CODE (op) == CONST_DOUBLE))
+ return 1;
+
+ /* Allow easy vector constants. */
+ if (GET_CODE (op) == CONST_VECTOR
+ && easy_vector_constant (op, mode))
+ return 1;
+
+ /* Do not allow invalid E500 subregs. */
+ if ((TARGET_E500_DOUBLE || TARGET_SPE)
+ && GET_CODE (op) == SUBREG
+ && invalid_e500_subreg (op, mode))
+ return 0;
+
+ /* For floating-point or multi-word mode, the only remaining valid type
+ is a register. */
+ if (SCALAR_FLOAT_MODE_P (mode)
+ || GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return register_operand (op, mode);
+
+ /* The only cases left are integral modes one word or smaller (we
+ do not get called for MODE_CC values). These can be in any
+ register. */
+ if (register_operand (op, mode))
+ return 1;
+
+ /* A SYMBOL_REF referring to the TOC is valid. */
+ if (legitimate_constant_pool_address_p (op))
+ return 1;
+
+ /* A constant pool expression (relative to the TOC) is valid */
+ if (toc_relative_expr_p (op))
+ return 1;
+
+ /* V.4 allows SYMBOL_REFs and CONSTs that are in the small data region
+ to be valid. */
+ if (DEFAULT_ABI == ABI_V4
+ && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST)
+ && small_data_operand (op, Pmode))
+ return 1;
+
+ return 0;
+})
+
+;; Return true if OP is an invalid SUBREG operation on the e500.
+(define_predicate "rs6000_nonimmediate_operand"
+ (match_code "reg,subreg,mem")
+{
+ if ((TARGET_E500_DOUBLE || TARGET_SPE)
+ && GET_CODE (op) == SUBREG
+ && invalid_e500_subreg (op, mode))
+ return 0;
+
+ return nonimmediate_operand (op, mode);
+})
+
+;; Return true if operand is boolean operator.
+(define_predicate "boolean_operator"
+ (match_code "and,ior,xor"))
+
+;; Return true if operand is OR-form of boolean operator.
+(define_predicate "boolean_or_operator"
+ (match_code "ior,xor"))
+
+;; Return true if operand is an equality operator.
+(define_special_predicate "equality_operator"
+ (match_code "eq,ne"))
+
+;; Return true if operand is MIN or MAX operator.
+(define_predicate "min_max_operator"
+ (match_code "smin,smax,umin,umax"))
+
+;; Return 1 if OP is a comparison operation that is valid for a branch
+;; instruction. We check the opcode against the mode of the CC value.
+;; validate_condition_mode is an assertion.
+(define_predicate "branch_comparison_operator"
+ (and (match_operand 0 "comparison_operator")
+ (and (match_test "GET_MODE_CLASS (GET_MODE (XEXP (op, 0))) == MODE_CC")
+ (match_test "validate_condition_mode (GET_CODE (op),
+ GET_MODE (XEXP (op, 0))),
+ 1"))))
+
+;; Return 1 if OP is a comparison operation that is valid for an SCC insn --
+;; it must be a positive comparison.
+(define_predicate "scc_comparison_operator"
+ (and (match_operand 0 "branch_comparison_operator")
+ (match_code "eq,lt,gt,ltu,gtu,unordered")))
+
+;; Return 1 if OP is a comparison operation that is valid for a branch
+;; insn, which is true if the corresponding bit in the CC register is set.
+(define_predicate "branch_positive_comparison_operator"
+ (and (match_operand 0 "branch_comparison_operator")
+ (match_code "eq,lt,gt,ltu,gtu,unordered")))
+
+;; Return 1 is OP is a comparison operation that is valid for a trap insn.
+(define_predicate "trap_comparison_operator"
+ (and (match_operand 0 "comparison_operator")
+ (match_code "eq,ne,le,lt,ge,gt,leu,ltu,geu,gtu")))
+
+;; Return 1 if OP is a load multiple operation, known to be a PARALLEL.
+(define_predicate "load_multiple_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx src_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is a store multiple operation, known to be a PARALLEL.
+;; The second vector element is a CLOBBER.
+(define_predicate "store_multiple_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0) - 1;
+ unsigned int src_regno;
+ rtx dest_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i + 1);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is valid for a save_world call in prologue, known to be
+;; a PARLLEL.
+(define_predicate "save_world_operation"
+ (match_code "parallel")
+{
+ int index;
+ int i;
+ rtx elt;
+ int count = XVECLEN (op, 0);
+
+ if (count != 54)
+ return 0;
+
+ index = 0;
+ if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE)
+ return 0;
+
+ for (i=1; i <= 18; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), DFmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != DFmode)
+ return 0;
+ }
+
+ for (i=1; i <= 12; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != V4SImode)
+ return 0;
+ }
+
+ for (i=1; i <= 19; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), Pmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != Pmode)
+ return 0;
+ }
+
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || ! memory_operand (SET_DEST (elt), Pmode)
+ || GET_CODE (SET_SRC (elt)) != REG
+ || REGNO (SET_SRC (elt)) != CR2_REGNO
+ || GET_MODE (SET_SRC (elt)) != Pmode)
+ return 0;
+
+ if (GET_CODE (XVECEXP (op, 0, index++)) != SET
+ || GET_CODE (XVECEXP (op, 0, index++)) != SET)
+ return 0;
+ return 1;
+})
+
+;; Return 1 if OP is valid for a restore_world call in epilogue, known to be
+;; a PARLLEL.
+(define_predicate "restore_world_operation"
+ (match_code "parallel")
+{
+ int index;
+ int i;
+ rtx elt;
+ int count = XVECLEN (op, 0);
+
+ if (count != 59)
+ return 0;
+
+ index = 0;
+ if (GET_CODE (XVECEXP (op, 0, index++)) != RETURN
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER)
+ return 0;
+
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), Pmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || REGNO (SET_DEST (elt)) != CR2_REGNO
+ || GET_MODE (SET_DEST (elt)) != Pmode)
+ return 0;
+
+ for (i=1; i <= 19; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), Pmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != Pmode)
+ return 0;
+ }
+
+ for (i=1; i <= 12; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != V4SImode)
+ return 0;
+ }
+
+ for (i=1; i <= 18; i++)
+ {
+ elt = XVECEXP (op, 0, index++);
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || ! memory_operand (SET_SRC (elt), DFmode)
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != DFmode)
+ return 0;
+ }
+
+ if (GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != CLOBBER
+ || GET_CODE (XVECEXP (op, 0, index++)) != USE)
+ return 0;
+ return 1;
+})
+
+;; Return 1 if OP is valid for a vrsave call, known to be a PARALLEL.
+(define_predicate "vrsave_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno, src_regno;
+ int i;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC_VOLATILE
+ || XINT (SET_SRC (XVECEXP (op, 0, 0)), 1) != UNSPECV_SET_VRSAVE)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_regno = REGNO (XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 1));
+
+ if (dest_regno != VRSAVE_REGNO || src_regno != VRSAVE_REGNO)
+ return 0;
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != CLOBBER
+ && GET_CODE (elt) != SET)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is valid for mfcr insn, known to be a PARALLEL.
+(define_predicate "mfcr_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count < 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC
+ || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx exp = XVECEXP (op, 0, i);
+ rtx unspec;
+ int maskval;
+ rtx src_reg;
+
+ src_reg = XVECEXP (SET_SRC (exp), 0, 0);
+
+ if (GET_CODE (src_reg) != REG
+ || GET_MODE (src_reg) != CCmode
+ || ! CR_REGNO_P (REGNO (src_reg)))
+ return 0;
+
+ if (GET_CODE (exp) != SET
+ || GET_CODE (SET_DEST (exp)) != REG
+ || GET_MODE (SET_DEST (exp)) != SImode
+ || ! INT_REGNO_P (REGNO (SET_DEST (exp))))
+ return 0;
+ unspec = SET_SRC (exp);
+ maskval = 1 << (MAX_CR_REGNO - REGNO (src_reg));
+
+ if (GET_CODE (unspec) != UNSPEC
+ || XINT (unspec, 1) != UNSPEC_MOVESI_FROM_CR
+ || XVECLEN (unspec, 0) != 2
+ || XVECEXP (unspec, 0, 0) != src_reg
+ || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT
+ || INTVAL (XVECEXP (unspec, 0, 1)) != maskval)
+ return 0;
+ }
+ return 1;
+})
+
+;; Return 1 if OP is valid for mtcrf insn, known to be a PARALLEL.
+(define_predicate "mtcrf_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ int i;
+ rtx src_reg;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count < 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC
+ || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
+ return 0;
+ src_reg = XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 0);
+
+ if (GET_CODE (src_reg) != REG
+ || GET_MODE (src_reg) != SImode
+ || ! INT_REGNO_P (REGNO (src_reg)))
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx exp = XVECEXP (op, 0, i);
+ rtx unspec;
+ int maskval;
+
+ if (GET_CODE (exp) != SET
+ || GET_CODE (SET_DEST (exp)) != REG
+ || GET_MODE (SET_DEST (exp)) != CCmode
+ || ! CR_REGNO_P (REGNO (SET_DEST (exp))))
+ return 0;
+ unspec = SET_SRC (exp);
+ maskval = 1 << (MAX_CR_REGNO - REGNO (SET_DEST (exp)));
+
+ if (GET_CODE (unspec) != UNSPEC
+ || XINT (unspec, 1) != UNSPEC_MOVESI_TO_CR
+ || XVECLEN (unspec, 0) != 2
+ || XVECEXP (unspec, 0, 0) != src_reg
+ || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT
+ || INTVAL (XVECEXP (unspec, 0, 1)) != maskval)
+ return 0;
+ }
+ return 1;
+})
+
+;; Return 1 if OP is valid for lmw insn, known to be a PARALLEL.
+(define_predicate "lmw_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx src_addr;
+ unsigned int base_regno;
+ HOST_WIDE_INT offset;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
+
+ if (dest_regno > 31
+ || count != 32 - (int) dest_regno)
+ return 0;
+
+ if (legitimate_indirect_address_p (src_addr, 0))
+ {
+ offset = 0;
+ base_regno = REGNO (src_addr);
+ if (base_regno == 0)
+ return 0;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, src_addr, 0))
+ {
+ offset = INTVAL (XEXP (src_addr, 1));
+ base_regno = REGNO (XEXP (src_addr, 0));
+ }
+ else
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ rtx newaddr;
+ rtx addr_reg;
+ HOST_WIDE_INT newoffset;
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode)
+ return 0;
+ newaddr = XEXP (SET_SRC (elt), 0);
+ if (legitimate_indirect_address_p (newaddr, 0))
+ {
+ newoffset = 0;
+ addr_reg = newaddr;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, newaddr, 0))
+ {
+ addr_reg = XEXP (newaddr, 0);
+ newoffset = INTVAL (XEXP (newaddr, 1));
+ }
+ else
+ return 0;
+ if (REGNO (addr_reg) != base_regno
+ || newoffset != offset + 4 * i)
+ return 0;
+ }
+
+ return 1;
+})
+
+;; Return 1 if OP is valid for stmw insn, known to be a PARALLEL.
+(define_predicate "stmw_operation"
+ (match_code "parallel")
+{
+ int count = XVECLEN (op, 0);
+ unsigned int src_regno;
+ rtx dest_addr;
+ unsigned int base_regno;
+ HOST_WIDE_INT offset;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
+
+ if (src_regno > 31
+ || count != 32 - (int) src_regno)
+ return 0;
+
+ if (legitimate_indirect_address_p (dest_addr, 0))
+ {
+ offset = 0;
+ base_regno = REGNO (dest_addr);
+ if (base_regno == 0)
+ return 0;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, dest_addr, 0))
+ {
+ offset = INTVAL (XEXP (dest_addr, 1));
+ base_regno = REGNO (XEXP (dest_addr, 0));
+ }
+ else
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ rtx newaddr;
+ rtx addr_reg;
+ HOST_WIDE_INT newoffset;
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode)
+ return 0;
+ newaddr = XEXP (SET_DEST (elt), 0);
+ if (legitimate_indirect_address_p (newaddr, 0))
+ {
+ newoffset = 0;
+ addr_reg = newaddr;
+ }
+ else if (rs6000_legitimate_offset_address_p (SImode, newaddr, 0))
+ {
+ addr_reg = XEXP (newaddr, 0);
+ newoffset = INTVAL (XEXP (newaddr, 1));
+ }
+ else
+ return 0;
+ if (REGNO (addr_reg) != base_regno
+ || newoffset != offset + 4 * i)
+ return 0;
+ }
+
+ return 1;
+})
diff --git a/gcc-4.4.3/gcc/config/rs6000/rios1.md b/gcc-4.4.3/gcc/config/rs6000/rios1.md
new file mode 100644
index 000000000..be2262d12
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rios1.md
@@ -0,0 +1,191 @@
+;; Scheduling description for IBM POWER processor.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "rios1,rios1fp")
+(define_cpu_unit "iu_rios1" "rios1")
+(define_cpu_unit "fpu_rios1" "rios1fp")
+(define_cpu_unit "bpu_rios1" "rios1")
+
+;; RIOS1 32-bit IU, FPU, BPU
+
+(define_insn_reservation "rios1-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1")
+
+(define_insn_reservation "rios1-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1")
+
+(define_insn_reservation "rios1-fpload" 2
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1")
+
+(define_insn_reservation "ppc601-fpload" 3
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1")
+
+(define_insn_reservation "rios1-fpstore" 3
+ (and (eq_attr "type" "fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1+fpu_rios1")
+
+(define_insn_reservation "rios1-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,\
+ trap,var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1")
+
+(define_insn_reservation "rios1-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,iu_rios1")
+
+(define_insn_reservation "rios1-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,iu_rios1,iu_rios1")
+
+(define_insn_reservation "rios1-imul" 5
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1*5")
+
+(define_insn_reservation "rios1-imul2" 4
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1*4")
+
+(define_insn_reservation "rios1-imul3" 3
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1*3")
+
+(define_insn_reservation "ppc601-imul" 5
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1*5")
+
+(define_insn_reservation "rios1-idiv" 19
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1*19")
+
+(define_insn_reservation "ppc601-idiv" 36
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1*36")
+
+; compare executes on integer unit, but feeds insns which
+; execute on the branch unit.
+(define_insn_reservation "rios1-compare" 4
+ (and (eq_attr "type" "cmp,fast_compare,compare")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1,nothing*2,bpu_rios1")
+
+(define_insn_reservation "rios1-delayed_compare" 5
+ (and (eq_attr "type" "delayed_compare,var_delayed_compare")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1,nothing*3,bpu_rios1")
+
+(define_insn_reservation "ppc601-compare" 3
+ (and (eq_attr "type" "cmp,compare,delayed_compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1,nothing,bpu_rios1")
+
+(define_insn_reservation "rios1-fpcompare" 9
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "rios1"))
+ "fpu_rios1,nothing*3,bpu_rios1")
+
+(define_insn_reservation "ppc601-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc601"))
+ "(fpu_rios1+iu_rios1*2),nothing*2,bpu_rios1")
+
+(define_insn_reservation "rios1-fp" 2
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "rios1"))
+ "fpu_rios1")
+
+(define_insn_reservation "ppc601-fp" 4
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc601"))
+ "fpu_rios1")
+
+(define_insn_reservation "rios1-dmul" 5
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc601"))
+ "fpu_rios1*2")
+
+(define_insn_reservation "rios1-sdiv" 19
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "rios1"))
+ "fpu_rios1*19")
+
+(define_insn_reservation "ppc601-sdiv" 17
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc601"))
+ "fpu_rios1*17")
+
+(define_insn_reservation "ppc601-ddiv" 31
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc601"))
+ "fpu_rios1*31")
+
+(define_insn_reservation "rios1-mfcr" 2
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "rios1-mtcr" 4
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "rios1-crlogical" 4
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "bpu_rios1")
+
+(define_insn_reservation "rios1-mtjmpr" 5
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "rios1"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "ppc601-mtjmpr" 4
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc601"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "rios1-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "iu_rios1,bpu_rios1")
+
+(define_insn_reservation "rios1-branch" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "rios1,ppc601"))
+ "bpu_rios1")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/rios2.md b/gcc-4.4.3/gcc/config/rs6000/rios2.md
new file mode 100644
index 000000000..24fbc15b9
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rios2.md
@@ -0,0 +1,129 @@
+;; Scheduling description for IBM Power2 processor.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "rios2,rios2fp")
+(define_cpu_unit "iu1_rios2,iu2_rios2" "rios2")
+(define_cpu_unit "fpu1_rios2,fpu2_rios2" "rios2fp")
+(define_cpu_unit "bpu_rios2" "rios2")
+
+;; RIOS2 32-bit 2xIU, 2xFPU, BPU
+;; IU1 can perform all integer operations
+;; IU2 can perform all integer operations except imul and idiv
+
+(define_insn_reservation "rios2-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,\
+ load_ux,load_u,fpload,fpload_ux,fpload_u,\
+ load_l,store_c,sync")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\
+ var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2,iu1_rios2|iu2_rios2")
+
+(define_insn_reservation "rios2-imul" 2
+ (and (eq_attr "type" "imul,imul2,imul3,imul_compare")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2*2")
+
+(define_insn_reservation "rios2-idiv" 13
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2*13")
+
+; compare executes on integer unit, but feeds insns which
+; execute on the branch unit.
+(define_insn_reservation "rios2-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,delayed_compare,\
+ var_delayed_compare")
+ (eq_attr "cpu" "rios2"))
+ "(iu1_rios2|iu2_rios2),nothing,bpu_rios2")
+
+(define_insn_reservation "rios2-fp" 2
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "rios2"))
+ "fpu1_rios2|fpu2_rios2")
+
+(define_insn_reservation "rios2-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "rios2"))
+ "(fpu1_rios2|fpu2_rios2),nothing*3,bpu_rios2")
+
+(define_insn_reservation "rios2-dmul" 2
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "rios2"))
+ "fpu1_rios2|fpu2_rios2")
+
+(define_insn_reservation "rios2-sdiv" 17
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "rios2"))
+ "(fpu1_rios2*17)|(fpu2_rios2*17)")
+
+(define_insn_reservation "rios2-ssqrt" 26
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "rios2"))
+ "(fpu1_rios2*26)|(fpu2_rios2*26)")
+
+(define_insn_reservation "rios2-mfcr" 2
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2,bpu_rios2")
+
+(define_insn_reservation "rios2-mtcr" 3
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2,bpu_rios2")
+
+(define_insn_reservation "rios2-crlogical" 3
+ (and (eq_attr "type" "cr_logical,delayed_cr")
+ (eq_attr "cpu" "rios2"))
+ "bpu_rios2")
+
+(define_insn_reservation "rios2-mtjmpr" 5
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2,bpu_rios2")
+
+(define_insn_reservation "rios2-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "rios2"))
+ "iu1_rios2,bpu_rios2")
+
+(define_insn_reservation "rios2-branch" 1
+ (and (eq_attr "type" "jmpreg,branch,isync")
+ (eq_attr "cpu" "rios2"))
+ "bpu_rios2")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/rs6000-c.c b/gcc-4.4.3/gcc/config/rs6000/rs6000-c.c
new file mode 100644
index 000000000..3cae1c695
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rs6000-c.c
@@ -0,0 +1,3272 @@
+/* Subroutines for the C front end on the POWER and PowerPC architectures.
+ Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+ Free Software Foundation, Inc.
+
+ Contributed by Zack Weinberg <zack@codesourcery.com>
+ and Paolo Bonzini <bonzini@gnu.org>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "cpplib.h"
+#include "tree.h"
+#include "c-common.h"
+#include "c-pragma.h"
+#include "c-tree.h"
+#include "toplev.h"
+#include "tm_p.h"
+#include "target.h"
+#include "langhooks.h"
+
+
+
+/* Handle the machine specific pragma longcall. Its syntax is
+
+ # pragma longcall ( TOGGLE )
+
+ where TOGGLE is either 0 or 1.
+
+ rs6000_default_long_calls is set to the value of TOGGLE, changing
+ whether or not new function declarations receive a longcall
+ attribute by default. */
+
+#define SYNTAX_ERROR(gmsgid) do { \
+ warning (OPT_Wpragmas, gmsgid); \
+ warning (OPT_Wpragmas, "ignoring malformed #pragma longcall"); \
+ return; \
+} while (0)
+
+void
+rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
+{
+ tree x, n;
+
+ /* If we get here, generic code has already scanned the directive
+ leader and the word "longcall". */
+
+ if (pragma_lex (&x) != CPP_OPEN_PAREN)
+ SYNTAX_ERROR ("missing open paren");
+ if (pragma_lex (&n) != CPP_NUMBER)
+ SYNTAX_ERROR ("missing number");
+ if (pragma_lex (&x) != CPP_CLOSE_PAREN)
+ SYNTAX_ERROR ("missing close paren");
+
+ if (n != integer_zero_node && n != integer_one_node)
+ SYNTAX_ERROR ("number must be 0 or 1");
+
+ if (pragma_lex (&x) != CPP_EOF)
+ warning (OPT_Wpragmas, "junk at end of #pragma longcall");
+
+ rs6000_default_long_calls = (n == integer_one_node);
+}
+
+/* Handle defining many CPP flags based on TARGET_xxx. As a general
+ policy, rather than trying to guess what flags a user might want a
+ #define for, it's better to define a flag for everything. */
+
+#define builtin_define(TXT) cpp_define (pfile, TXT)
+#define builtin_assert(TXT) cpp_assert (pfile, TXT)
+
+/* Keep the AltiVec keywords handy for fast comparisons. */
+static GTY(()) tree __vector_keyword;
+static GTY(()) tree vector_keyword;
+static GTY(()) tree __pixel_keyword;
+static GTY(()) tree pixel_keyword;
+static GTY(()) tree __bool_keyword;
+static GTY(()) tree bool_keyword;
+static GTY(()) tree _Bool_keyword;
+
+/* Preserved across calls. */
+static tree expand_bool_pixel;
+
+static cpp_hashnode *
+altivec_categorize_keyword (const cpp_token *tok)
+{
+ if (tok->type == CPP_NAME)
+ {
+ cpp_hashnode *ident = tok->val.node;
+
+ if (ident == C_CPP_HASHNODE (vector_keyword))
+ return C_CPP_HASHNODE (__vector_keyword);
+
+ if (ident == C_CPP_HASHNODE (pixel_keyword))
+ return C_CPP_HASHNODE (__pixel_keyword);
+
+ if (ident == C_CPP_HASHNODE (bool_keyword))
+ return C_CPP_HASHNODE (__bool_keyword);
+
+ if (ident == C_CPP_HASHNODE (_Bool_keyword))
+ return C_CPP_HASHNODE (__bool_keyword);
+
+ return ident;
+ }
+
+ return 0;
+}
+
+static void
+init_vector_keywords (void)
+{
+ /* Keywords without two leading underscores are context-sensitive,
+ and hence implemented as conditional macros, controlled by the
+ rs6000_macro_to_expand() function below. */
+
+ __vector_keyword = get_identifier ("__vector");
+ C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL;
+
+ __pixel_keyword = get_identifier ("__pixel");
+ C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL;
+
+ __bool_keyword = get_identifier ("__bool");
+ C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL;
+
+ vector_keyword = get_identifier ("vector");
+ C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL;
+
+ pixel_keyword = get_identifier ("pixel");
+ C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL;
+
+ bool_keyword = get_identifier ("bool");
+ C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL;
+
+ _Bool_keyword = get_identifier ("_Bool");
+ C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL;
+}
+
+/* Called to decide whether a conditional macro should be expanded.
+ Since we have exactly one such macro (i.e, 'vector'), we do not
+ need to examine the 'tok' parameter. */
+
+static cpp_hashnode *
+rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
+{
+ cpp_hashnode *expand_this = tok->val.node;
+ cpp_hashnode *ident;
+
+ ident = altivec_categorize_keyword (tok);
+
+ if (ident != expand_this)
+ expand_this = NULL;
+
+ if (ident == C_CPP_HASHNODE (__vector_keyword))
+ {
+ int idx = 0;
+ do
+ tok = cpp_peek_token (pfile, idx++);
+ while (tok->type == CPP_PADDING);
+ ident = altivec_categorize_keyword (tok);
+
+ if (ident == C_CPP_HASHNODE (__pixel_keyword))
+ {
+ expand_this = C_CPP_HASHNODE (__vector_keyword);
+ expand_bool_pixel = __pixel_keyword;
+ }
+ else if (ident == C_CPP_HASHNODE (__bool_keyword))
+ {
+ expand_this = C_CPP_HASHNODE (__vector_keyword);
+ expand_bool_pixel = __bool_keyword;
+ }
+ else if (ident)
+ {
+ enum rid rid_code = (enum rid)(ident->rid_code);
+ if (ident->type == NT_MACRO)
+ {
+ do
+ (void) cpp_get_token (pfile);
+ while (--idx > 0);
+ do
+ tok = cpp_peek_token (pfile, idx++);
+ while (tok->type == CPP_PADDING);
+ ident = altivec_categorize_keyword (tok);
+ if (ident == C_CPP_HASHNODE (__pixel_keyword))
+ {
+ expand_this = C_CPP_HASHNODE (__vector_keyword);
+ expand_bool_pixel = __pixel_keyword;
+ rid_code = RID_MAX;
+ }
+ else if (ident == C_CPP_HASHNODE (__bool_keyword))
+ {
+ expand_this = C_CPP_HASHNODE (__vector_keyword);
+ expand_bool_pixel = __bool_keyword;
+ rid_code = RID_MAX;
+ }
+ else if (ident)
+ rid_code = (enum rid)(ident->rid_code);
+ }
+
+ if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
+ || rid_code == RID_SHORT || rid_code == RID_SIGNED
+ || rid_code == RID_INT || rid_code == RID_CHAR
+ || rid_code == RID_FLOAT)
+ {
+ expand_this = C_CPP_HASHNODE (__vector_keyword);
+ /* If the next keyword is bool or pixel, it
+ will need to be expanded as well. */
+ do
+ tok = cpp_peek_token (pfile, idx++);
+ while (tok->type == CPP_PADDING);
+ ident = altivec_categorize_keyword (tok);
+
+ if (ident == C_CPP_HASHNODE (__pixel_keyword))
+ expand_bool_pixel = __pixel_keyword;
+ else if (ident == C_CPP_HASHNODE (__bool_keyword))
+ expand_bool_pixel = __bool_keyword;
+ else
+ {
+ /* Try two tokens down, too. */
+ do
+ tok = cpp_peek_token (pfile, idx++);
+ while (tok->type == CPP_PADDING);
+ ident = altivec_categorize_keyword (tok);
+ if (ident == C_CPP_HASHNODE (__pixel_keyword))
+ expand_bool_pixel = __pixel_keyword;
+ else if (ident == C_CPP_HASHNODE (__bool_keyword))
+ expand_bool_pixel = __bool_keyword;
+ }
+ }
+ }
+ }
+ else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword))
+ {
+ expand_this = C_CPP_HASHNODE (__pixel_keyword);
+ expand_bool_pixel = 0;
+ }
+ else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword))
+ {
+ expand_this = C_CPP_HASHNODE (__bool_keyword);
+ expand_bool_pixel = 0;
+ }
+
+ return expand_this;
+}
+
+void
+rs6000_cpu_cpp_builtins (cpp_reader *pfile)
+{
+ if (TARGET_POWER2)
+ builtin_define ("_ARCH_PWR2");
+ else if (TARGET_POWER)
+ builtin_define ("_ARCH_PWR");
+ if (TARGET_POWERPC)
+ builtin_define ("_ARCH_PPC");
+ if (TARGET_PPC_GPOPT)
+ builtin_define ("_ARCH_PPCSQ");
+ if (TARGET_PPC_GFXOPT)
+ builtin_define ("_ARCH_PPCGR");
+ if (TARGET_POWERPC64)
+ builtin_define ("_ARCH_PPC64");
+ if (TARGET_MFCRF)
+ builtin_define ("_ARCH_PWR4");
+ if (TARGET_POPCNTB)
+ builtin_define ("_ARCH_PWR5");
+ if (TARGET_FPRND)
+ builtin_define ("_ARCH_PWR5X");
+ if (TARGET_CMPB)
+ builtin_define ("_ARCH_PWR6");
+ if (TARGET_MFPGPR)
+ builtin_define ("_ARCH_PWR6X");
+ if (! TARGET_POWER && ! TARGET_POWER2 && ! TARGET_POWERPC)
+ builtin_define ("_ARCH_COM");
+ if (TARGET_ALTIVEC)
+ {
+ builtin_define ("__ALTIVEC__");
+ builtin_define ("__VEC__=10206");
+
+ /* Define the AltiVec syntactic elements. */
+ builtin_define ("__vector=__attribute__((altivec(vector__)))");
+ builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
+ builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
+
+ if (!flag_iso)
+ {
+ /* Define this when supporting context-sensitive keywords. */
+ builtin_define ("__APPLE_ALTIVEC__");
+
+ builtin_define ("vector=vector");
+ builtin_define ("pixel=pixel");
+ builtin_define ("bool=bool");
+ builtin_define ("_Bool=_Bool");
+ init_vector_keywords ();
+
+ /* Enable context-sensitive macros. */
+ cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
+ }
+ }
+ if (rs6000_cpu == PROCESSOR_CELL)
+ builtin_define ("__PPU__");
+ if (TARGET_SPE)
+ builtin_define ("__SPE__");
+ if (TARGET_PAIRED_FLOAT)
+ builtin_define ("__PAIRED__");
+ if (TARGET_SOFT_FLOAT)
+ builtin_define ("_SOFT_FLOAT");
+ if ((!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
+ ||(TARGET_HARD_FLOAT && TARGET_FPRS && !TARGET_DOUBLE_FLOAT))
+ builtin_define ("_SOFT_DOUBLE");
+ /* Used by lwarx/stwcx. errata work-around. */
+ if (rs6000_cpu == PROCESSOR_PPC405)
+ builtin_define ("__PPC405__");
+ /* Used by libstdc++. */
+ if (TARGET_NO_LWSYNC)
+ builtin_define ("__NO_LWSYNC__");
+
+ /* May be overridden by target configuration. */
+ RS6000_CPU_CPP_ENDIAN_BUILTINS();
+
+ if (TARGET_LONG_DOUBLE_128)
+ {
+ builtin_define ("__LONG_DOUBLE_128__");
+ builtin_define ("__LONGDOUBLE128");
+ }
+
+ switch (rs6000_current_abi)
+ {
+ case ABI_V4:
+ builtin_define ("_CALL_SYSV");
+ break;
+ case ABI_AIX:
+ builtin_define ("_CALL_AIXDESC");
+ builtin_define ("_CALL_AIX");
+ break;
+ case ABI_DARWIN:
+ builtin_define ("_CALL_DARWIN");
+ break;
+ default:
+ break;
+ }
+
+ /* Let the compiled code know if 'f' class registers will not be available. */
+ if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ builtin_define ("__NO_FPRS__");
+}
+
+
+struct altivec_builtin_types
+{
+ enum rs6000_builtins code;
+ enum rs6000_builtins overloaded_code;
+ signed char ret_type;
+ signed char op1;
+ signed char op2;
+ signed char op3;
+};
+
+const struct altivec_builtin_types altivec_overloaded_builtins[] = {
+ /* Unary AltiVec builtins. */
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABS, ALTIVEC_BUILTIN_ABS_V4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ABSS, ALTIVEC_BUILTIN_ABSS_V4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_CEIL, ALTIVEC_BUILTIN_VRFIP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_EXPTE, ALTIVEC_BUILTIN_VEXPTEFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_FLOOR, ALTIVEC_BUILTIN_VRFIM,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_LOGE, ALTIVEC_BUILTIN_VLOGEFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_MTVSCR, ALTIVEC_BUILTIN_MTVSCR,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_RE, ALTIVEC_BUILTIN_VREFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_ROUND, ALTIVEC_BUILTIN_VRFIN,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_RSQRTE, ALTIVEC_BUILTIN_VRSQRTEFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_TRUNC, ALTIVEC_BUILTIN_VRFIZ,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKH, ALTIVEC_BUILTIN_VUPKHPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSH, ALTIVEC_BUILTIN_VUPKHSH,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHPX, ALTIVEC_BUILTIN_VUPKHPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKHSB, ALTIVEC_BUILTIN_VUPKHSB,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_UNPACKL, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLPX, ALTIVEC_BUILTIN_VUPKLPX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_pixel_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSH, ALTIVEC_BUILTIN_VUPKLSH,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V8HI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0, 0 },
+ { ALTIVEC_BUILTIN_VEC_VUPKLSB, ALTIVEC_BUILTIN_VUPKLSB,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V16QI, 0, 0 },
+
+ /* Binary AltiVec builtins. */
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADD, ALTIVEC_BUILTIN_VADDFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDFP, ALTIVEC_BUILTIN_VADDFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWM, ALTIVEC_BUILTIN_VADDUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHM, ALTIVEC_BUILTIN_VADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBM, ALTIVEC_BUILTIN_VADDUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDC, ALTIVEC_BUILTIN_VADDCUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ADDS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSWS, ALTIVEC_BUILTIN_VADDSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUWS, ALTIVEC_BUILTIN_VADDUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSHS, ALTIVEC_BUILTIN_VADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUHS, ALTIVEC_BUILTIN_VADDUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDSBS, ALTIVEC_BUILTIN_VADDSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VADDUBS, ALTIVEC_BUILTIN_VADDUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AND, ALTIVEC_BUILTIN_VAND,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_ANDC, ALTIVEC_BUILTIN_VANDC,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_AVG, ALTIVEC_BUILTIN_VAVGSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGSW, ALTIVEC_BUILTIN_VAVGSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGUW, ALTIVEC_BUILTIN_VAVGUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGSH, ALTIVEC_BUILTIN_VAVGSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGUH, ALTIVEC_BUILTIN_VAVGUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGSB, ALTIVEC_BUILTIN_VAVGSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VAVGUB, ALTIVEC_BUILTIN_VAVGUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPB, ALTIVEC_BUILTIN_VCMPBFP,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPEQ, ALTIVEC_BUILTIN_VCMPEQFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQFP, ALTIVEC_BUILTIN_VCMPEQFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUW, ALTIVEC_BUILTIN_VCMPEQUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUH, ALTIVEC_BUILTIN_VCMPEQUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPEQUB, ALTIVEC_BUILTIN_VCMPEQUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+
+ { ALTIVEC_BUILTIN_VEC_CMPGE, ALTIVEC_BUILTIN_VCMPGEFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPGT, ALTIVEC_BUILTIN_VCMPGTFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTFP, ALTIVEC_BUILTIN_VCMPGTFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSW, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSW, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUW, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUW, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSH, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSH, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUH, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUH, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSB, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTSB, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUB, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCMPGTUB, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLE, ALTIVEC_BUILTIN_VCMPGEFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTUW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTSW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CMPLT, ALTIVEC_BUILTIN_VCMPGTFP,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFUX,
+ RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTF, ALTIVEC_BUILTIN_VCFSX,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCFSX, ALTIVEC_BUILTIN_VCFSX,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VCFUX, ALTIVEC_BUILTIN_VCFUX,
+ RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTS, ALTIVEC_BUILTIN_VCTSXS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_CTU, ALTIVEC_BUILTIN_VCTUXS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LD, ALTIVEC_BUILTIN_LVX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDE, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEWX, ALTIVEC_BUILTIN_LVEWX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEHX, ALTIVEC_BUILTIN_LVEHX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVEBX, ALTIVEC_BUILTIN_LVEBX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LDL, ALTIVEC_BUILTIN_LVXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSL, ALTIVEC_BUILTIN_LVSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_long, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVSR, ALTIVEC_BUILTIN_LVSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLX, ALTIVEC_BUILTIN_LVLX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVLXL, ALTIVEC_BUILTIN_LVLXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRX, ALTIVEC_BUILTIN_LVRX,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_LVRXL, ALTIVEC_BUILTIN_LVRXL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MAX, ALTIVEC_BUILTIN_VMAXFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXFP, ALTIVEC_BUILTIN_VMAXFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSW, ALTIVEC_BUILTIN_VMAXSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUW, ALTIVEC_BUILTIN_VMAXUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSH, ALTIVEC_BUILTIN_VMAXSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUH, ALTIVEC_BUILTIN_VMAXUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXSB, ALTIVEC_BUILTIN_VMAXSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMAXUB, ALTIVEC_BUILTIN_VMAXUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEH, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHW, ALTIVEC_BUILTIN_VMRGHW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHH, ALTIVEC_BUILTIN_VMRGHH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGHB, ALTIVEC_BUILTIN_VMRGHB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MERGEL, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLW, ALTIVEC_BUILTIN_VMRGLW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLH, ALTIVEC_BUILTIN_VMRGLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMRGLB, ALTIVEC_BUILTIN_VMRGLB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MIN, ALTIVEC_BUILTIN_VMINFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINFP, ALTIVEC_BUILTIN_VMINFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSW, ALTIVEC_BUILTIN_VMINSW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUW, ALTIVEC_BUILTIN_VMINUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSH, ALTIVEC_BUILTIN_VMINSH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINSB, ALTIVEC_BUILTIN_VMINSB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUH, ALTIVEC_BUILTIN_VMINUH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMINUB, ALTIVEC_BUILTIN_VMINUB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULEUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULE, ALTIVEC_BUILTIN_VMULESH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULEUB, ALTIVEC_BUILTIN_VMULEUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULESB, ALTIVEC_BUILTIN_VMULESB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULEUH, ALTIVEC_BUILTIN_VMULEUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULESH, ALTIVEC_BUILTIN_VMULESH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_MULO, ALTIVEC_BUILTIN_VMULOSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOSH, ALTIVEC_BUILTIN_VMULOSH,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOUH, ALTIVEC_BUILTIN_VMULOUH,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOSB, ALTIVEC_BUILTIN_VMULOSB,
+ RS6000_BTI_V8HI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VMULOUB, ALTIVEC_BUILTIN_VMULOUB,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_NOR, ALTIVEC_BUILTIN_VNOR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_OR, ALTIVEC_BUILTIN_VOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACK, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUM, ALTIVEC_BUILTIN_VPKUWUM,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUM, ALTIVEC_BUILTIN_VPKUHUM,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKPX, ALTIVEC_BUILTIN_VPKPX,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSHSS,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKUWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKS, ALTIVEC_BUILTIN_VPKSWSS,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSWSS, ALTIVEC_BUILTIN_VPKSWSS,
+ RS6000_BTI_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUWUS, ALTIVEC_BUILTIN_VPKUWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSHSS, ALTIVEC_BUILTIN_VPKSHSS,
+ RS6000_BTI_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKUHUS, ALTIVEC_BUILTIN_VPKUHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKUWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_PACKSU, ALTIVEC_BUILTIN_VPKSWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSWUS, ALTIVEC_BUILTIN_VPKSWUS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VPKSHUS, ALTIVEC_BUILTIN_VPKSHUS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_RL, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLW, ALTIVEC_BUILTIN_VRLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLH, ALTIVEC_BUILTIN_VRLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VRLB, ALTIVEC_BUILTIN_VRLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SL, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLW, ALTIVEC_BUILTIN_VSLW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLH, ALTIVEC_BUILTIN_VSLH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSLB, ALTIVEC_BUILTIN_VSLB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLL, ALTIVEC_BUILTIN_VSL,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SLO, ALTIVEC_BUILTIN_VSLO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SPLAT, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTW, ALTIVEC_BUILTIN_VSPLTW,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTH, ALTIVEC_BUILTIN_VSPLTH,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSPLTB, ALTIVEC_BUILTIN_VSPLTB,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SR, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRW, ALTIVEC_BUILTIN_VSRW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRH, ALTIVEC_BUILTIN_VSRH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRB, ALTIVEC_BUILTIN_VSRB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRA, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAW, ALTIVEC_BUILTIN_VSRAW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAH, ALTIVEC_BUILTIN_VSRAH,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSRAB, ALTIVEC_BUILTIN_VSRAB,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRL, ALTIVEC_BUILTIN_VSR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SRO, ALTIVEC_BUILTIN_VSRO,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUB, ALTIVEC_BUILTIN_VSUBFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBFP, ALTIVEC_BUILTIN_VSUBFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWM, ALTIVEC_BUILTIN_VSUBUWM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHM, ALTIVEC_BUILTIN_VSUBUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBM, ALTIVEC_BUILTIN_VSUBUBM,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBC, ALTIVEC_BUILTIN_VSUBCUW,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUBS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSWS, ALTIVEC_BUILTIN_VSUBSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUWS, ALTIVEC_BUILTIN_VSUBUWS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSHS, ALTIVEC_BUILTIN_VSUBSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUHS, ALTIVEC_BUILTIN_VSUBUHS,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBSBS, ALTIVEC_BUILTIN_VSUBSBS,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUBUBS, ALTIVEC_BUILTIN_VSUBUBS,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4UBS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SBS,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM4S, ALTIVEC_BUILTIN_VSUM4SHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUM4SHS, ALTIVEC_BUILTIN_VSUM4SHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUM4SBS, ALTIVEC_BUILTIN_VSUM4SBS,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_VSUM4UBS, ALTIVEC_BUILTIN_VSUM4UBS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUM2S, ALTIVEC_BUILTIN_VSUM2SWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_SUMS, ALTIVEC_BUILTIN_VSUMSWS,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SF, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI, 0 },
+ { ALTIVEC_BUILTIN_VEC_XOR, ALTIVEC_BUILTIN_VXOR,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, 0 },
+
+ /* Ternary AltiVec builtins. */
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DST, ALTIVEC_BUILTIN_DST,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTST, ALTIVEC_BUILTIN_DSTST,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTSTT, ALTIVEC_BUILTIN_DSTSTT,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_V4SF, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTQI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTHI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_UINTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_unsigned_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_long, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_DSTT, ALTIVEC_BUILTIN_DSTT,
+ RS6000_BTI_void, ~RS6000_BTI_float, RS6000_BTI_INTSI, RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_MADD, ALTIVEC_BUILTIN_VMADDFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_MADDS, ALTIVEC_BUILTIN_VMHADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MLADD, ALTIVEC_BUILTIN_VMLADDUHM,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MRADDS, ALTIVEC_BUILTIN_VMHRADDSHS,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUBM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMMBM,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMUHM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUM, ALTIVEC_BUILTIN_VMSUMSHM,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMSHM, ALTIVEC_BUILTIN_VMSUMSHM,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMUHM, ALTIVEC_BUILTIN_VMSUMUHM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMMBM, ALTIVEC_BUILTIN_VMSUMMBM,
+ RS6000_BTI_V4SI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMUBM, ALTIVEC_BUILTIN_VMSUMUBM,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMUHS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_MSUMS, ALTIVEC_BUILTIN_VMSUMSHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMSHS, ALTIVEC_BUILTIN_VMSUMSHS,
+ RS6000_BTI_V4SI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_VMSUMUHS, ALTIVEC_BUILTIN_VMSUMUHS,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_NMSUB, ALTIVEC_BUILTIN_VNMSUBFP,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_8HI,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_PERM, ALTIVEC_BUILTIN_VPERM_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SEL, ALTIVEC_BUILTIN_VSEL_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SF,
+ RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_V4SF, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI,
+ RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_V4SI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI,
+ RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_4SI,
+ RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_8HI,
+ RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
+ RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
+ RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_SLD, ALTIVEC_BUILTIN_VSLDOI_16QI,
+ RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI, RS6000_BTI_NOT_OPAQUE },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_ST, ALTIVEC_BUILTIN_STVX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STE, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEWX, ALTIVEC_BUILTIN_STVEWX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEHX, ALTIVEC_BUILTIN_STVEHX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STVEBX, ALTIVEC_BUILTIN_STVEBX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_void },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STL, ALTIVEC_BUILTIN_STVXL,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVLX, ALTIVEC_BUILTIN_STVLX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVLXL, ALTIVEC_BUILTIN_STVLXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVRX, ALTIVEC_BUILTIN_STVRX,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_V4SF },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_V4SF, RS6000_BTI_INTSI, ~RS6000_BTI_float },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_INTSI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V4SI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTSI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_pixel_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_INTHI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V8HI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTHI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_bool_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_INTQI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VEC_STVRXL, ALTIVEC_BUILTIN_STVRXL,
+ RS6000_BTI_void, RS6000_BTI_unsigned_V16QI, RS6000_BTI_INTSI, ~RS6000_BTI_UINTQI },
+
+ /* Predicates. */
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGT_P, ALTIVEC_BUILTIN_VCMPGTFP_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+
+
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_pixel_V8HI, RS6000_BTI_pixel_V8HI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPEQ_P, ALTIVEC_BUILTIN_VCMPEQFP_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+
+
+ /* cmpge is the same as cmpgt for all cases except floating point.
+ There is further code to deal with this special case in
+ altivec_build_resolved_builtin. */
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V16QI, RS6000_BTI_unsigned_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_bool_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSB_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V16QI, RS6000_BTI_V16QI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V8HI, RS6000_BTI_unsigned_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V8HI, RS6000_BTI_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSH_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V8HI, RS6000_BTI_bool_V8HI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTUW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_unsigned_V4SI, RS6000_BTI_unsigned_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_bool_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_bool_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGTSW_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SI, RS6000_BTI_V4SI },
+ { ALTIVEC_BUILTIN_VCMPGE_P, ALTIVEC_BUILTIN_VCMPGEFP_P,
+ RS6000_BTI_INTSI, RS6000_BTI_INTSI, RS6000_BTI_V4SF, RS6000_BTI_V4SF },
+
+ { 0, 0, 0, 0, 0, 0 }
+};
+
+
+/* Convert a type stored into a struct altivec_builtin_types as ID,
+ into a tree. The types are in rs6000_builtin_types: negative values
+ create a pointer type for the type associated to ~ID. Note it is
+ a logical NOT, rather than a negation, otherwise you cannot represent
+ a pointer type for ID 0. */
+
+static inline tree
+rs6000_builtin_type (int id)
+{
+ tree t;
+ t = rs6000_builtin_types[id < 0 ? ~id : id];
+ return id < 0 ? build_pointer_type (t) : t;
+}
+
+/* Check whether the type of an argument, T, is compatible with a
+ type ID stored into a struct altivec_builtin_types. Integer
+ types are considered compatible; otherwise, the language hook
+ lang_hooks.types_compatible_p makes the decision. */
+
+static inline bool
+rs6000_builtin_type_compatible (tree t, int id)
+{
+ tree builtin_type;
+ builtin_type = rs6000_builtin_type (id);
+ if (t == error_mark_node)
+ return false;
+ if (INTEGRAL_TYPE_P (t) && INTEGRAL_TYPE_P (builtin_type))
+ return true;
+ else
+ return lang_hooks.types_compatible_p (t, builtin_type);
+}
+
+
+/* Build a tree for a function call to an Altivec non-overloaded builtin.
+ The overloaded builtin that matched the types and args is described
+ by DESC. The N arguments are given in ARGS, respectively.
+
+ Actually the only thing it does is calling fold_convert on ARGS, with
+ a small exception for vec_{all,any}_{ge,le} predicates. */
+
+static tree
+altivec_build_resolved_builtin (tree *args, int n,
+ const struct altivec_builtin_types *desc)
+{
+ tree impl_fndecl = rs6000_builtin_decls[desc->overloaded_code];
+ tree ret_type = rs6000_builtin_type (desc->ret_type);
+ tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (impl_fndecl));
+ tree arg_type[3];
+ tree call;
+
+ int i;
+ for (i = 0; i < n; i++)
+ arg_type[i] = TREE_VALUE (argtypes), argtypes = TREE_CHAIN (argtypes);
+
+ /* The AltiVec overloading implementation is overall gross, but this
+ is particularly disgusting. The vec_{all,any}_{ge,le} builtins
+ are completely different for floating-point vs. integer vector
+ types, because the former has vcmpgefp, but the latter should use
+ vcmpgtXX.
+
+ In practice, the second and third arguments are swapped, and the
+ condition (LT vs. EQ, which is recognizable by bit 1 of the first
+ argument) is reversed. Patch the arguments here before building
+ the resolved CALL_EXPR. */
+ if (desc->code == ALTIVEC_BUILTIN_VCMPGE_P
+ && desc->overloaded_code != ALTIVEC_BUILTIN_VCMPGEFP_P)
+ {
+ tree t;
+ t = args[2], args[2] = args[1], args[1] = t;
+ t = arg_type[2], arg_type[2] = arg_type[1], arg_type[1] = t;
+
+ args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
+ build_int_cst (NULL_TREE, 2));
+ }
+
+ switch (n)
+ {
+ case 0:
+ call = build_call_expr (impl_fndecl, 0);
+ break;
+ case 1:
+ call = build_call_expr (impl_fndecl, 1,
+ fold_convert (arg_type[0], args[0]));
+ break;
+ case 2:
+ call = build_call_expr (impl_fndecl, 2,
+ fold_convert (arg_type[0], args[0]),
+ fold_convert (arg_type[1], args[1]));
+ break;
+ case 3:
+ call = build_call_expr (impl_fndecl, 3,
+ fold_convert (arg_type[0], args[0]),
+ fold_convert (arg_type[1], args[1]),
+ fold_convert (arg_type[2], args[2]));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ return fold_convert (ret_type, call);
+}
+
+/* Implementation of the resolve_overloaded_builtin target hook, to
+ support Altivec's overloaded builtins. */
+
+tree
+altivec_resolve_overloaded_builtin (tree fndecl, tree arglist)
+{
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
+ tree types[3], args[3];
+ const struct altivec_builtin_types *desc;
+ int n;
+
+ if (fcode < ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ || fcode > ALTIVEC_BUILTIN_OVERLOADED_LAST)
+ return NULL_TREE;
+
+ /* For now treat vec_splats and vec_promote as the same. */
+ if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS
+ || fcode == ALTIVEC_BUILTIN_VEC_PROMOTE)
+ {
+ tree type, arg;
+ int size;
+ int i;
+ bool unsigned_p;
+ VEC(constructor_elt,gc) *vec;
+ const char *name = fcode == ALTIVEC_BUILTIN_VEC_SPLATS ? "vec_splats": "vec_promote";
+
+ if (!arglist)
+ {
+ error ("%s only accepts %d arguments", name, (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE)+1 );
+ return error_mark_node;
+ }
+ if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS && TREE_CHAIN (arglist))
+ {
+ error ("%s only accepts 1 argument", name);
+ return error_mark_node;
+ }
+ if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE && !TREE_CHAIN (arglist))
+ {
+ error ("%s only accepts 2 arguments", name);
+ return error_mark_node;
+ }
+ /* Ignore promote's element argument. */
+ if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE
+ && TREE_CHAIN (TREE_CHAIN (arglist)))
+ {
+ error ("%s only accepts 2 arguments", name);
+ return error_mark_node;
+ }
+ if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE
+ && !INTEGRAL_TYPE_P (TREE_TYPE (TREE_VALUE (TREE_CHAIN (arglist)))))
+ goto bad;
+
+ arg = TREE_VALUE (arglist);
+ type = TREE_TYPE (arg);
+ if (!SCALAR_FLOAT_TYPE_P (type)
+ && !INTEGRAL_TYPE_P (type))
+ goto bad;
+ unsigned_p = TYPE_UNSIGNED (type);
+ if (type == long_long_unsigned_type_node
+ || type == long_long_integer_type_node)
+ goto bad;
+ switch (TYPE_MODE (type))
+ {
+ case SImode:
+ type = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
+ size = 4;
+ break;
+ case HImode:
+ type = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
+ size = 8;
+ break;
+ case QImode:
+ type = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
+ size = 16;
+ break;
+ case SFmode: type = V4SF_type_node; size = 4; break;
+ default:
+ goto bad;
+ }
+ arg = save_expr (fold_convert (TREE_TYPE (type), arg));
+ vec = VEC_alloc (constructor_elt, gc, size);
+ for(i = 0; i < size; i++)
+ {
+ constructor_elt *elt;
+
+ elt = VEC_quick_push (constructor_elt, vec, NULL);
+ elt->index = NULL_TREE;
+ elt->value = arg;
+ }
+ return build_constructor (type, vec);
+ }
+
+ /* For now use pointer tricks to do the extaction. */
+ if (fcode == ALTIVEC_BUILTIN_VEC_EXTRACT)
+ {
+ tree arg1;
+ tree arg1_type;
+ tree arg2;
+ tree arg1_inner_type;
+ tree decl, stmt;
+ tree innerptrtype;
+
+ /* No second argument. */
+ if (!arglist || !TREE_CHAIN (arglist)
+ || TREE_CHAIN (TREE_CHAIN (arglist)))
+ {
+ error ("vec_extract only accepts 2 arguments");
+ return error_mark_node;
+ }
+
+ arg2 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg1 = TREE_VALUE (arglist);
+ arg1_type = TREE_TYPE (arg1);
+
+ if (TREE_CODE (arg1_type) != VECTOR_TYPE)
+ goto bad;
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
+ goto bad;
+ /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2). */
+ arg1_inner_type = TREE_TYPE (arg1_type);
+ arg2 = build_binary_op (input_location, BIT_AND_EXPR, arg2,
+ build_int_cst (TREE_TYPE (arg2),
+ TYPE_VECTOR_SUBPARTS (arg1_type)
+ - 1), 0);
+ decl = build_decl (VAR_DECL, NULL_TREE, arg1_type);
+ DECL_EXTERNAL (decl) = 0;
+ TREE_PUBLIC (decl) = 0;
+ DECL_CONTEXT (decl) = current_function_decl;
+ TREE_USED (decl) = 1;
+ TREE_TYPE (decl) = arg1_type;
+ TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
+ DECL_INITIAL (decl) = arg1;
+ stmt = build1 (DECL_EXPR, arg1_type, decl);
+ TREE_ADDRESSABLE (decl) = 1;
+ SET_EXPR_LOCATION (stmt, input_location);
+ stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
+
+ innerptrtype = build_pointer_type (arg1_inner_type);
+
+ stmt = build_unary_op (input_location, ADDR_EXPR, stmt, 0);
+ stmt = convert (innerptrtype, stmt);
+ stmt = build_binary_op (input_location, PLUS_EXPR, stmt, arg2, 1);
+ stmt = build_indirect_ref (input_location, stmt, NULL);
+
+ return stmt;
+ }
+
+ /* For now use pointer tricks to do the insertation. */
+ if (fcode == ALTIVEC_BUILTIN_VEC_INSERT)
+ {
+ tree arg0;
+ tree arg1;
+ tree arg2;
+ tree arg1_type;
+ tree arg1_inner_type;
+ tree decl, stmt;
+ tree innerptrtype;
+
+ /* No second or third arguments. */
+ if (!arglist || !TREE_CHAIN (arglist)
+ || !TREE_CHAIN (TREE_CHAIN (arglist))
+ || TREE_CHAIN (TREE_CHAIN (TREE_CHAIN (arglist))))
+ {
+ error ("vec_insert only accepts 3 arguments");
+ return error_mark_node;
+ }
+
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg1_type = TREE_TYPE (arg1);
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+
+ if (TREE_CODE (arg1_type) != VECTOR_TYPE)
+ goto bad;
+ if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
+ goto bad;
+ /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2) = arg0. */
+ arg1_inner_type = TREE_TYPE (arg1_type);
+ arg2 = build_binary_op (input_location, BIT_AND_EXPR, arg2,
+ build_int_cst (TREE_TYPE (arg2),
+ TYPE_VECTOR_SUBPARTS (arg1_type)
+ - 1), 0);
+ decl = build_decl (VAR_DECL, NULL_TREE, arg1_type);
+ DECL_EXTERNAL (decl) = 0;
+ TREE_PUBLIC (decl) = 0;
+ DECL_CONTEXT (decl) = current_function_decl;
+ TREE_USED (decl) = 1;
+ TREE_TYPE (decl) = arg1_type;
+ TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
+ DECL_INITIAL (decl) = arg1;
+ stmt = build1 (DECL_EXPR, arg1_type, decl);
+ TREE_ADDRESSABLE (decl) = 1;
+ SET_EXPR_LOCATION (stmt, input_location);
+ stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
+
+ innerptrtype = build_pointer_type (arg1_inner_type);
+
+ stmt = build_unary_op (input_location, ADDR_EXPR, stmt, 0);
+ stmt = convert (innerptrtype, stmt);
+ stmt = build_binary_op (input_location, PLUS_EXPR, stmt, arg2, 1);
+ stmt = build_indirect_ref (input_location, stmt, NULL);
+ stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt,
+ convert (TREE_TYPE (stmt), arg0));
+ stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
+ return stmt;
+ }
+
+ for (n = 0;
+ !VOID_TYPE_P (TREE_VALUE (fnargs)) && arglist;
+ fnargs = TREE_CHAIN (fnargs), arglist = TREE_CHAIN (arglist), n++)
+ {
+ tree decl_type = TREE_VALUE (fnargs);
+ tree arg = TREE_VALUE (arglist);
+ tree type;
+
+ if (arg == error_mark_node)
+ return error_mark_node;
+
+ if (n >= 3)
+ abort ();
+
+ arg = default_conversion (arg);
+
+ /* The C++ front-end converts float * to const void * using
+ NOP_EXPR<const void *> (NOP_EXPR<void *> (x)). */
+ type = TREE_TYPE (arg);
+ if (POINTER_TYPE_P (type)
+ && TREE_CODE (arg) == NOP_EXPR
+ && lang_hooks.types_compatible_p (TREE_TYPE (arg),
+ const_ptr_type_node)
+ && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
+ ptr_type_node))
+ {
+ arg = TREE_OPERAND (arg, 0);
+ type = TREE_TYPE (arg);
+ }
+
+ /* Remove the const from the pointers to simplify the overload
+ matching further down. */
+ if (POINTER_TYPE_P (decl_type)
+ && POINTER_TYPE_P (type)
+ && TYPE_QUALS (TREE_TYPE (type)) != 0)
+ {
+ if (TYPE_READONLY (TREE_TYPE (type))
+ && !TYPE_READONLY (TREE_TYPE (decl_type)))
+ warning (0, "passing arg %d of %qE discards qualifiers from"
+ "pointer target type", n + 1, fndecl);
+ type = build_pointer_type (build_qualified_type (TREE_TYPE (type),
+ 0));
+ arg = fold_convert (type, arg);
+ }
+
+ args[n] = arg;
+ types[n] = type;
+ }
+
+ /* If the number of arguments did not match the prototype, return NULL
+ and the generic code will issue the appropriate error message. */
+ if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || arglist)
+ return NULL;
+
+ if (n == 0)
+ abort ();
+
+ if (fcode == ALTIVEC_BUILTIN_VEC_STEP)
+ {
+ if (TREE_CODE (types[0]) != VECTOR_TYPE)
+ goto bad;
+
+ return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (types[0]));
+ }
+
+ for (desc = altivec_overloaded_builtins;
+ desc->code && desc->code != fcode; desc++)
+ continue;
+
+ /* For arguments after the last, we have RS6000_BTI_NOT_OPAQUE in
+ the opX fields. */
+ for (; desc->code == fcode; desc++)
+ if ((desc->op1 == RS6000_BTI_NOT_OPAQUE
+ || rs6000_builtin_type_compatible (types[0], desc->op1))
+ && (desc->op2 == RS6000_BTI_NOT_OPAQUE
+ || rs6000_builtin_type_compatible (types[1], desc->op2))
+ && (desc->op3 == RS6000_BTI_NOT_OPAQUE
+ || rs6000_builtin_type_compatible (types[2], desc->op3)))
+ return altivec_build_resolved_builtin (args, n, desc);
+
+ bad:
+ error ("invalid parameter combination for AltiVec intrinsic");
+ return error_mark_node;
+}
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/rs6000-modes.def b/gcc-4.4.3/gcc/config/rs6000/rs6000-modes.def
new file mode 100644
index 000000000..0fa7b3d7a
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rs6000-modes.def
@@ -0,0 +1,46 @@
+/* Definitions of target machine for GNU compiler, for IBM RS/6000.
+ Copyright (C) 2002, 2003, 2004, 2007 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* 128-bit floating point. ABI_V4 uses IEEE quad, AIX/Darwin
+ adjust this in rs6000_override_options. */
+FLOAT_MODE (TF, 16, ieee_quad_format);
+
+/* PSImode is used for the XER register. The XER register
+ is not used for anything; perhaps it should be deleted,
+ except that that would change register numbers. */
+PARTIAL_INT_MODE (SI);
+
+/* Add any extra modes needed to represent the condition code.
+
+ For the RS/6000, we need separate modes when unsigned (logical) comparisons
+ are being done and we need a separate mode for floating-point. We also
+ use a mode for the case when we are comparing the results of two
+ comparisons, as then only the EQ bit is valid in the register. */
+
+CC_MODE (CCUNS);
+CC_MODE (CCFP);
+CC_MODE (CCEQ);
+
+/* Vector modes. */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
+VECTOR_MODES (INT, 16); /* V16QI V8HI V4SI V2DI */
+VECTOR_MODE (INT, DI, 1);
+VECTOR_MODES (FLOAT, 8); /* V4HF V2SF */
+VECTOR_MODES (FLOAT, 16); /* V8HF V4SF V2DF */
diff --git a/gcc-4.4.3/gcc/config/rs6000/rs6000-protos.h b/gcc-4.4.3/gcc/config/rs6000/rs6000-protos.h
new file mode 100644
index 000000000..9fa0e3458
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rs6000-protos.h
@@ -0,0 +1,192 @@
+/* Definitions of target machine for GNU compiler, for IBM RS/6000.
+ Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_RS6000_PROTOS_H
+#define GCC_RS6000_PROTOS_H
+
+/* Declare functions in rs6000.c */
+
+#ifdef RTX_CODE
+
+#ifdef TREE_CODE
+extern void init_cumulative_args (CUMULATIVE_ARGS *, tree, rtx, int, int, int);
+#endif /* TREE_CODE */
+
+extern bool easy_altivec_constant (rtx, enum machine_mode);
+extern HOST_WIDE_INT const_vector_elt_as_int (rtx, unsigned int);
+extern bool macho_lo_sum_memory_operand (rtx, enum machine_mode);
+extern int num_insns_constant (rtx, enum machine_mode);
+extern int num_insns_constant_wide (HOST_WIDE_INT);
+extern int small_data_operand (rtx, enum machine_mode);
+extern bool toc_relative_expr_p (rtx);
+extern bool invalid_e500_subreg (rtx, enum machine_mode);
+extern void validate_condition_mode (enum rtx_code, enum machine_mode);
+extern bool legitimate_constant_pool_address_p (rtx);
+extern bool legitimate_indirect_address_p (rtx, int);
+extern bool legitimate_indexed_address_p (rtx, int);
+extern bool avoiding_indexed_address_p (enum machine_mode);
+
+extern rtx rs6000_got_register (rtx);
+extern rtx find_addr_reg (rtx);
+extern rtx gen_easy_altivec_constant (rtx);
+extern const char *output_vec_const_move (rtx *);
+extern void rs6000_expand_vector_init (rtx, rtx);
+extern void paired_expand_vector_init (rtx, rtx);
+extern void rs6000_expand_vector_set (rtx, rtx, int);
+extern void rs6000_expand_vector_extract (rtx, rtx, int);
+extern void build_mask64_2_operands (rtx, rtx *);
+extern int expand_block_clear (rtx[]);
+extern int expand_block_move (rtx[]);
+extern const char * rs6000_output_load_multiple (rtx[]);
+extern int includes_lshift_p (rtx, rtx);
+extern int includes_rshift_p (rtx, rtx);
+extern int includes_rldic_lshift_p (rtx, rtx);
+extern int includes_rldicr_lshift_p (rtx, rtx);
+extern int insvdi_rshift_rlwimi_p (rtx, rtx, rtx);
+extern int registers_ok_for_quad_peep (rtx, rtx);
+extern int mems_ok_for_quad_peep (rtx, rtx);
+extern bool gpr_or_gpr_p (rtx, rtx);
+extern enum reg_class rs6000_secondary_reload_class (enum reg_class,
+ enum machine_mode, rtx);
+
+extern int paired_emit_vector_cond_expr (rtx, rtx, rtx,
+ rtx, rtx, rtx);
+extern void paired_expand_vector_move (rtx operands[]);
+
+
+extern int ccr_bit (rtx, int);
+extern int extract_MB (rtx);
+extern int extract_ME (rtx);
+extern void rs6000_output_function_entry (FILE *, const char *);
+extern void print_operand (FILE *, rtx, int);
+extern void print_operand_address (FILE *, rtx);
+extern bool rs6000_output_addr_const_extra (FILE *, rtx);
+extern enum rtx_code rs6000_reverse_condition (enum machine_mode,
+ enum rtx_code);
+extern void rs6000_emit_sCOND (enum rtx_code, rtx);
+extern void rs6000_emit_cbranch (enum rtx_code, rtx);
+extern char * output_cbranch (rtx, const char *, int, rtx);
+extern char * output_e500_flip_gt_bit (rtx, rtx);
+extern rtx rs6000_emit_set_const (rtx, enum machine_mode, rtx, int);
+extern int rs6000_emit_cmove (rtx, rtx, rtx, rtx);
+extern int rs6000_emit_vector_cond_expr (rtx, rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_emit_minmax (rtx, enum rtx_code, rtx, rtx);
+extern void rs6000_emit_sync (enum rtx_code, enum machine_mode,
+ rtx, rtx, rtx, rtx, bool);
+extern void rs6000_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_split_compare_and_swap (rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_expand_compare_and_swapqhi (rtx, rtx, rtx, rtx);
+extern void rs6000_split_compare_and_swapqhi (rtx, rtx, rtx, rtx, rtx, rtx);
+extern void rs6000_split_lock_test_and_set (rtx, rtx, rtx, rtx);
+extern void rs6000_emit_swdivsf (rtx, rtx, rtx);
+extern void rs6000_emit_swdivdf (rtx, rtx, rtx);
+extern void rs6000_emit_swrsqrtsf (rtx, rtx);
+extern void output_toc (FILE *, rtx, int, enum machine_mode);
+extern void rs6000_initialize_trampoline (rtx, rtx, rtx);
+extern rtx rs6000_longcall_ref (rtx);
+extern void rs6000_fatal_bad_address (rtx);
+extern rtx create_TOC_reference (rtx);
+extern void rs6000_split_multireg_move (rtx, rtx);
+extern void rs6000_emit_move (rtx, rtx, enum machine_mode);
+extern rtx rs6000_secondary_memory_needed_rtx (enum machine_mode);
+extern rtx rs6000_legitimize_address (rtx, rtx, enum machine_mode);
+extern rtx rs6000_legitimize_reload_address (rtx, enum machine_mode,
+ int, int, int, int *);
+extern int rs6000_legitimate_address (enum machine_mode, rtx, int);
+extern bool rs6000_legitimate_offset_address_p (enum machine_mode, rtx, int);
+extern bool rs6000_mode_dependent_address (rtx);
+extern rtx rs6000_find_base_term (rtx);
+extern bool rs6000_offsettable_memref_p (rtx);
+extern rtx rs6000_return_addr (int, rtx);
+extern void rs6000_output_symbol_ref (FILE*, rtx);
+extern HOST_WIDE_INT rs6000_initial_elimination_offset (int, int);
+extern void rs6000_emit_popcount (rtx, rtx);
+extern void rs6000_emit_parity (rtx, rtx);
+
+extern rtx rs6000_machopic_legitimize_pic_address (rtx, enum machine_mode,
+ rtx);
+#endif /* RTX_CODE */
+
+#ifdef TREE_CODE
+extern unsigned int rs6000_special_round_type_align (tree, unsigned int,
+ unsigned int);
+extern unsigned int darwin_rs6000_special_round_type_align (tree, unsigned int,
+ unsigned int);
+extern void function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, int, int);
+extern int function_arg_boundary (enum machine_mode, tree);
+extern rtx function_arg (CUMULATIVE_ARGS *, enum machine_mode, tree, int);
+extern tree altivec_resolve_overloaded_builtin (tree, tree);
+extern rtx rs6000_function_value (const_tree, const_tree);
+extern rtx rs6000_libcall_value (enum machine_mode);
+extern rtx rs6000_va_arg (tree, tree);
+extern int function_ok_for_sibcall (tree);
+extern void rs6000_elf_declare_function_name (FILE *, const char *, tree);
+extern bool rs6000_elf_in_small_data_p (const_tree);
+#ifdef ARGS_SIZE_RTX
+/* expr.h defines ARGS_SIZE_RTX and `enum direction' */
+extern enum direction function_arg_padding (enum machine_mode, const_tree);
+#endif /* ARGS_SIZE_RTX */
+
+#endif /* TREE_CODE */
+
+extern void optimization_options (int, int);
+extern void rs6000_override_options (const char *);
+extern int direct_return (void);
+extern int first_reg_to_save (void);
+extern int first_fp_reg_to_save (void);
+extern void output_ascii (FILE *, const char *, int);
+extern void rs6000_gen_section_name (char **, const char *, const char *);
+extern void output_function_profiler (FILE *, int);
+extern void output_profile_hook (int);
+extern int rs6000_trampoline_size (void);
+extern alias_set_type get_TOC_alias_set (void);
+extern void rs6000_emit_prologue (void);
+extern void rs6000_emit_load_toc_table (int);
+extern void rs6000_aix_emit_builtin_unwind_init (void);
+extern unsigned int rs6000_dbx_register_number (unsigned int);
+extern void rs6000_emit_epilogue (int);
+extern void rs6000_emit_eh_reg_restore (rtx, rtx);
+extern const char * output_isel (rtx *);
+extern int rs6000_register_move_cost (enum machine_mode,
+ enum reg_class, enum reg_class);
+extern int rs6000_memory_move_cost (enum machine_mode, enum reg_class, int);
+extern bool rs6000_tls_referenced_p (rtx);
+extern int rs6000_hard_regno_nregs (int, enum machine_mode);
+extern void rs6000_conditional_register_usage (void);
+
+/* Declare functions in rs6000-c.c */
+
+extern void rs6000_pragma_longcall (struct cpp_reader *);
+extern void rs6000_cpu_cpp_builtins (struct cpp_reader *);
+
+#if TARGET_MACHO
+char *output_call (rtx, rtx *, int, int);
+#endif
+
+#ifdef NO_DOLLAR_IN_LABEL
+const char * rs6000_xcoff_strip_dollar (const char *);
+#endif
+
+void rs6000_final_prescan_insn (rtx, rtx *operand, int num_operands);
+
+extern bool rs6000_hard_regno_mode_ok_p[][FIRST_PSEUDO_REGISTER];
+#endif /* rs6000-protos.h */
diff --git a/gcc-4.4.3/gcc/config/rs6000/rs6000.c b/gcc-4.4.3/gcc/config/rs6000/rs6000.c
new file mode 100644
index 000000000..bac3ef385
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rs6000.c
@@ -0,0 +1,22885 @@
+/* Subroutines used for code generation on IBM RS/6000.
+ Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "recog.h"
+#include "obstack.h"
+#include "tree.h"
+#include "expr.h"
+#include "optabs.h"
+#include "except.h"
+#include "function.h"
+#include "output.h"
+#include "basic-block.h"
+#include "integrate.h"
+#include "toplev.h"
+#include "ggc.h"
+#include "hashtab.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "langhooks.h"
+#include "reload.h"
+#include "cfglayout.h"
+#include "sched-int.h"
+#include "gimple.h"
+#include "tree-flow.h"
+#include "intl.h"
+#include "params.h"
+#include "tm-constrs.h"
+#if TARGET_XCOFF
+#include "xcoffout.h" /* get declarations of xcoff_*_section_name */
+#endif
+#if TARGET_MACHO
+#include "gstab.h" /* for N_SLINE */
+#endif
+
+#ifndef TARGET_NO_PROTOTYPE
+#define TARGET_NO_PROTOTYPE 0
+#endif
+
+#define min(A,B) ((A) < (B) ? (A) : (B))
+#define max(A,B) ((A) > (B) ? (A) : (B))
+
+/* Structure used to define the rs6000 stack */
+typedef struct rs6000_stack {
+ int first_gp_reg_save; /* first callee saved GP register used */
+ int first_fp_reg_save; /* first callee saved FP register used */
+ int first_altivec_reg_save; /* first callee saved AltiVec register used */
+ int lr_save_p; /* true if the link reg needs to be saved */
+ int cr_save_p; /* true if the CR reg needs to be saved */
+ unsigned int vrsave_mask; /* mask of vec registers to save */
+ int push_p; /* true if we need to allocate stack space */
+ int calls_p; /* true if the function makes any calls */
+ int world_save_p; /* true if we're saving *everything*:
+ r13-r31, cr, f14-f31, vrsave, v20-v31 */
+ enum rs6000_abi abi; /* which ABI to use */
+ int gp_save_offset; /* offset to save GP regs from initial SP */
+ int fp_save_offset; /* offset to save FP regs from initial SP */
+ int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
+ int lr_save_offset; /* offset to save LR from initial SP */
+ int cr_save_offset; /* offset to save CR from initial SP */
+ int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
+ int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
+ int varargs_save_offset; /* offset to save the varargs registers */
+ int ehrd_offset; /* offset to EH return data */
+ int reg_size; /* register size (4 or 8) */
+ HOST_WIDE_INT vars_size; /* variable save area size */
+ int parm_size; /* outgoing parameter size */
+ int save_size; /* save area size */
+ int fixed_size; /* fixed size of stack frame */
+ int gp_size; /* size of saved GP registers */
+ int fp_size; /* size of saved FP registers */
+ int altivec_size; /* size of saved AltiVec registers */
+ int cr_size; /* size to hold CR if not in save_size */
+ int vrsave_size; /* size to hold VRSAVE if not in save_size */
+ int altivec_padding_size; /* size of altivec alignment padding if
+ not in save_size */
+ int spe_gp_size; /* size of 64-bit GPR save size for SPE */
+ int spe_padding_size;
+ HOST_WIDE_INT total_size; /* total bytes allocated for stack */
+ int spe_64bit_regs_used;
+} rs6000_stack_t;
+
+/* A C structure for machine-specific, per-function data.
+ This is added to the cfun structure. */
+typedef struct machine_function GTY(())
+{
+ /* Flags if __builtin_return_address (n) with n >= 1 was used. */
+ int ra_needs_full_frame;
+ /* Some local-dynamic symbol. */
+ const char *some_ld_name;
+ /* Whether the instruction chain has been scanned already. */
+ int insn_chain_scanned_p;
+ /* Flags if __builtin_return_address (0) was used. */
+ int ra_need_lr;
+ /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
+ varargs save area. */
+ HOST_WIDE_INT varargs_save_offset;
+ /* Temporary stack slot to use for SDmode copies. This slot is
+ 64-bits wide and is allocated early enough so that the offset
+ does not overflow the 16-bit load/store offset field. */
+ rtx sdmode_stack_slot;
+} machine_function;
+
+/* Target cpu type */
+
+enum processor_type rs6000_cpu;
+struct rs6000_cpu_select rs6000_select[3] =
+{
+ /* switch name, tune arch */
+ { (const char *)0, "--with-cpu=", 1, 1 },
+ { (const char *)0, "-mcpu=", 1, 1 },
+ { (const char *)0, "-mtune=", 1, 0 },
+};
+
+/* Always emit branch hint bits. */
+static GTY(()) bool rs6000_always_hint;
+
+/* Schedule instructions for group formation. */
+static GTY(()) bool rs6000_sched_groups;
+
+/* Align branch targets. */
+static GTY(()) bool rs6000_align_branch_targets;
+
+/* Support for -msched-costly-dep option. */
+const char *rs6000_sched_costly_dep_str;
+enum rs6000_dependence_cost rs6000_sched_costly_dep;
+
+/* Support for -minsert-sched-nops option. */
+const char *rs6000_sched_insert_nops_str;
+enum rs6000_nop_insertion rs6000_sched_insert_nops;
+
+/* Support targetm.vectorize.builtin_mask_for_load. */
+static GTY(()) tree altivec_builtin_mask_for_load;
+
+/* Size of long double. */
+int rs6000_long_double_type_size;
+
+/* IEEE quad extended precision long double. */
+int rs6000_ieeequad;
+
+/* Nonzero to use AltiVec ABI. */
+int rs6000_altivec_abi;
+
+/* Nonzero if we want SPE SIMD instructions. */
+int rs6000_spe;
+
+/* Nonzero if we want SPE ABI extensions. */
+int rs6000_spe_abi;
+
+/* Nonzero to use isel instructions. */
+int rs6000_isel;
+
+/* Nonzero if floating point operations are done in the GPRs. */
+int rs6000_float_gprs = 0;
+
+/* Nonzero if we want Darwin's struct-by-value-in-regs ABI. */
+int rs6000_darwin64_abi;
+
+/* Set to nonzero once AIX common-mode calls have been defined. */
+static GTY(()) int common_mode_defined;
+
+/* Save information from a "cmpxx" operation until the branch or scc is
+ emitted. */
+rtx rs6000_compare_op0, rs6000_compare_op1;
+int rs6000_compare_fp_p;
+
+/* Label number of label created for -mrelocatable, to call to so we can
+ get the address of the GOT section */
+int rs6000_pic_labelno;
+
+#ifdef USING_ELFOS_H
+/* Which abi to adhere to */
+const char *rs6000_abi_name;
+
+/* Semantics of the small data area */
+enum rs6000_sdata_type rs6000_sdata = SDATA_DATA;
+
+/* Which small data model to use */
+const char *rs6000_sdata_name = (char *)0;
+
+/* Counter for labels which are to be placed in .fixup. */
+int fixuplabelno = 0;
+#endif
+
+/* Bit size of immediate TLS offsets and string from which it is decoded. */
+int rs6000_tls_size = 32;
+const char *rs6000_tls_size_string;
+
+/* ABI enumeration available for subtarget to use. */
+enum rs6000_abi rs6000_current_abi;
+
+/* Whether to use variant of AIX ABI for PowerPC64 Linux. */
+int dot_symbols;
+
+/* Debug flags */
+const char *rs6000_debug_name;
+int rs6000_debug_stack; /* debug stack applications */
+int rs6000_debug_arg; /* debug argument handling */
+
+/* Value is TRUE if register/mode pair is acceptable. */
+bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
+
+/* Built in types. */
+
+tree rs6000_builtin_types[RS6000_BTI_MAX];
+tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
+
+const char *rs6000_traceback_name;
+static enum {
+ traceback_default = 0,
+ traceback_none,
+ traceback_part,
+ traceback_full
+} rs6000_traceback;
+
+/* Flag to say the TOC is initialized */
+int toc_initialized;
+char toc_label_name[10];
+
+/* Cached value of rs6000_variable_issue. This is cached in
+ rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
+static short cached_can_issue_more;
+
+static GTY(()) section *read_only_data_section;
+static GTY(()) section *private_data_section;
+static GTY(()) section *read_only_private_data_section;
+static GTY(()) section *sdata2_section;
+static GTY(()) section *toc_section;
+
+/* Control alignment for fields within structures. */
+/* String from -malign-XXXXX. */
+int rs6000_alignment_flags;
+
+/* True for any options that were explicitly set. */
+struct {
+ bool aix_struct_ret; /* True if -maix-struct-ret was used. */
+ bool alignment; /* True if -malign- was used. */
+ bool spe_abi; /* True if -mabi=spe/no-spe was used. */
+ bool altivec_abi; /* True if -mabi=altivec/no-altivec used. */
+ bool spe; /* True if -mspe= was used. */
+ bool float_gprs; /* True if -mfloat-gprs= was used. */
+ bool isel; /* True if -misel was used. */
+ bool long_double; /* True if -mlong-double- was used. */
+ bool ieee; /* True if -mabi=ieee/ibmlongdouble used. */
+ bool vrsave; /* True if -mvrsave was used. */
+} rs6000_explicit_options;
+
+struct builtin_description
+{
+ /* mask is not const because we're going to alter it below. This
+ nonsense will go away when we rewrite the -march infrastructure
+ to give us more target flag bits. */
+ unsigned int mask;
+ const enum insn_code icode;
+ const char *const name;
+ const enum rs6000_builtins code;
+};
+
+/* Target cpu costs. */
+
+struct processor_costs {
+ const int mulsi; /* cost of SImode multiplication. */
+ const int mulsi_const; /* cost of SImode multiplication by constant. */
+ const int mulsi_const9; /* cost of SImode mult by short constant. */
+ const int muldi; /* cost of DImode multiplication. */
+ const int divsi; /* cost of SImode division. */
+ const int divdi; /* cost of DImode division. */
+ const int fp; /* cost of simple SFmode and DFmode insns. */
+ const int dmul; /* cost of DFmode multiplication (and fmadd). */
+ const int sdiv; /* cost of SFmode division (fdivs). */
+ const int ddiv; /* cost of DFmode division (fdiv). */
+ const int cache_line_size; /* cache line size in bytes. */
+ const int l1_cache_size; /* size of l1 cache, in kilobytes. */
+ const int l2_cache_size; /* size of l2 cache, in kilobytes. */
+ const int simultaneous_prefetches; /* number of parallel prefetch
+ operations. */
+};
+
+const struct processor_costs *rs6000_cost;
+
+/* Processor costs (relative to an add) */
+
+/* Instruction size costs on 32bit processors. */
+static const
+struct processor_costs size32_cost = {
+ COSTS_N_INSNS (1), /* mulsi */
+ COSTS_N_INSNS (1), /* mulsi_const */
+ COSTS_N_INSNS (1), /* mulsi_const9 */
+ COSTS_N_INSNS (1), /* muldi */
+ COSTS_N_INSNS (1), /* divsi */
+ COSTS_N_INSNS (1), /* divdi */
+ COSTS_N_INSNS (1), /* fp */
+ COSTS_N_INSNS (1), /* dmul */
+ COSTS_N_INSNS (1), /* sdiv */
+ COSTS_N_INSNS (1), /* ddiv */
+ 32,
+ 0,
+ 0,
+ 0,
+};
+
+/* Instruction size costs on 64bit processors. */
+static const
+struct processor_costs size64_cost = {
+ COSTS_N_INSNS (1), /* mulsi */
+ COSTS_N_INSNS (1), /* mulsi_const */
+ COSTS_N_INSNS (1), /* mulsi_const9 */
+ COSTS_N_INSNS (1), /* muldi */
+ COSTS_N_INSNS (1), /* divsi */
+ COSTS_N_INSNS (1), /* divdi */
+ COSTS_N_INSNS (1), /* fp */
+ COSTS_N_INSNS (1), /* dmul */
+ COSTS_N_INSNS (1), /* sdiv */
+ COSTS_N_INSNS (1), /* ddiv */
+ 128,
+ 0,
+ 0,
+ 0,
+};
+
+/* Instruction costs on RIOS1 processors. */
+static const
+struct processor_costs rios1_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (19), /* divsi */
+ COSTS_N_INSNS (19), /* divdi */
+ COSTS_N_INSNS (2), /* fp */
+ COSTS_N_INSNS (2), /* dmul */
+ COSTS_N_INSNS (19), /* sdiv */
+ COSTS_N_INSNS (19), /* ddiv */
+ 128, /* cache line size */
+ 64, /* l1 cache */
+ 512, /* l2 cache */
+ 0, /* streams */
+};
+
+/* Instruction costs on RIOS2 processors. */
+static const
+struct processor_costs rios2_cost = {
+ COSTS_N_INSNS (2), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (2), /* muldi */
+ COSTS_N_INSNS (13), /* divsi */
+ COSTS_N_INSNS (13), /* divdi */
+ COSTS_N_INSNS (2), /* fp */
+ COSTS_N_INSNS (2), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (17), /* ddiv */
+ 256, /* cache line size */
+ 256, /* l1 cache */
+ 1024, /* l2 cache */
+ 0, /* streams */
+};
+
+/* Instruction costs on RS64A processors. */
+static const
+struct processor_costs rs64a_cost = {
+ COSTS_N_INSNS (20), /* mulsi */
+ COSTS_N_INSNS (12), /* mulsi_const */
+ COSTS_N_INSNS (8), /* mulsi_const9 */
+ COSTS_N_INSNS (34), /* muldi */
+ COSTS_N_INSNS (65), /* divsi */
+ COSTS_N_INSNS (67), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (31), /* sdiv */
+ COSTS_N_INSNS (31), /* ddiv */
+ 128, /* cache line size */
+ 128, /* l1 cache */
+ 2048, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on MPCCORE processors. */
+static const
+struct processor_costs mpccore_cost = {
+ COSTS_N_INSNS (2), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (2), /* muldi */
+ COSTS_N_INSNS (6), /* divsi */
+ COSTS_N_INSNS (6), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (10), /* sdiv */
+ COSTS_N_INSNS (17), /* ddiv */
+ 32, /* cache line size */
+ 4, /* l1 cache */
+ 16, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC403 processors. */
+static const
+struct processor_costs ppc403_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (33), /* divsi */
+ COSTS_N_INSNS (33), /* divdi */
+ COSTS_N_INSNS (11), /* fp */
+ COSTS_N_INSNS (11), /* dmul */
+ COSTS_N_INSNS (11), /* sdiv */
+ COSTS_N_INSNS (11), /* ddiv */
+ 32, /* cache line size */
+ 4, /* l1 cache */
+ 16, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC405 processors. */
+static const
+struct processor_costs ppc405_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (35), /* divsi */
+ COSTS_N_INSNS (35), /* divdi */
+ COSTS_N_INSNS (11), /* fp */
+ COSTS_N_INSNS (11), /* dmul */
+ COSTS_N_INSNS (11), /* sdiv */
+ COSTS_N_INSNS (11), /* ddiv */
+ 32, /* cache line size */
+ 16, /* l1 cache */
+ 128, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC440 processors. */
+static const
+struct processor_costs ppc440_cost = {
+ COSTS_N_INSNS (3), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (3), /* muldi */
+ COSTS_N_INSNS (34), /* divsi */
+ COSTS_N_INSNS (34), /* divdi */
+ COSTS_N_INSNS (5), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (19), /* sdiv */
+ COSTS_N_INSNS (33), /* ddiv */
+ 32, /* cache line size */
+ 32, /* l1 cache */
+ 256, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC601 processors. */
+static const
+struct processor_costs ppc601_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (5), /* mulsi_const */
+ COSTS_N_INSNS (5), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (36), /* divsi */
+ COSTS_N_INSNS (36), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (31), /* ddiv */
+ 32, /* cache line size */
+ 32, /* l1 cache */
+ 256, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC603 processors. */
+static const
+struct processor_costs ppc603_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (3), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (37), /* divsi */
+ COSTS_N_INSNS (37), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (33), /* ddiv */
+ 32, /* cache line size */
+ 8, /* l1 cache */
+ 64, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC604 processors. */
+static const
+struct processor_costs ppc604_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (20), /* divsi */
+ COSTS_N_INSNS (20), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (32), /* ddiv */
+ 32, /* cache line size */
+ 16, /* l1 cache */
+ 512, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC604e processors. */
+static const
+struct processor_costs ppc604e_cost = {
+ COSTS_N_INSNS (2), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (2), /* muldi */
+ COSTS_N_INSNS (20), /* divsi */
+ COSTS_N_INSNS (20), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (32), /* ddiv */
+ 32, /* cache line size */
+ 32, /* l1 cache */
+ 1024, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC620 processors. */
+static const
+struct processor_costs ppc620_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (7), /* muldi */
+ COSTS_N_INSNS (21), /* divsi */
+ COSTS_N_INSNS (37), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (32), /* ddiv */
+ 128, /* cache line size */
+ 32, /* l1 cache */
+ 1024, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC630 processors. */
+static const
+struct processor_costs ppc630_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (7), /* muldi */
+ COSTS_N_INSNS (21), /* divsi */
+ COSTS_N_INSNS (37), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (21), /* ddiv */
+ 128, /* cache line size */
+ 64, /* l1 cache */
+ 1024, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on Cell processor. */
+/* COSTS_N_INSNS (1) ~ one add. */
+static const
+struct processor_costs ppccell_cost = {
+ COSTS_N_INSNS (9/2)+2, /* mulsi */
+ COSTS_N_INSNS (6/2), /* mulsi_const */
+ COSTS_N_INSNS (6/2), /* mulsi_const9 */
+ COSTS_N_INSNS (15/2)+2, /* muldi */
+ COSTS_N_INSNS (38/2), /* divsi */
+ COSTS_N_INSNS (70/2), /* divdi */
+ COSTS_N_INSNS (10/2), /* fp */
+ COSTS_N_INSNS (10/2), /* dmul */
+ COSTS_N_INSNS (74/2), /* sdiv */
+ COSTS_N_INSNS (74/2), /* ddiv */
+ 128, /* cache line size */
+ 32, /* l1 cache */
+ 512, /* l2 cache */
+ 6, /* streams */
+};
+
+/* Instruction costs on PPC750 and PPC7400 processors. */
+static const
+struct processor_costs ppc750_cost = {
+ COSTS_N_INSNS (5), /* mulsi */
+ COSTS_N_INSNS (3), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (5), /* muldi */
+ COSTS_N_INSNS (17), /* divsi */
+ COSTS_N_INSNS (17), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (31), /* ddiv */
+ 32, /* cache line size */
+ 32, /* l1 cache */
+ 512, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC7450 processors. */
+static const
+struct processor_costs ppc7450_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (3), /* mulsi_const */
+ COSTS_N_INSNS (3), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (23), /* divsi */
+ COSTS_N_INSNS (23), /* divdi */
+ COSTS_N_INSNS (5), /* fp */
+ COSTS_N_INSNS (5), /* dmul */
+ COSTS_N_INSNS (21), /* sdiv */
+ COSTS_N_INSNS (35), /* ddiv */
+ 32, /* cache line size */
+ 32, /* l1 cache */
+ 1024, /* l2 cache */
+ 1, /* streams */
+};
+
+/* Instruction costs on PPC8540 processors. */
+static const
+struct processor_costs ppc8540_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (19), /* divsi */
+ COSTS_N_INSNS (19), /* divdi */
+ COSTS_N_INSNS (4), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (29), /* sdiv */
+ COSTS_N_INSNS (29), /* ddiv */
+ 32, /* cache line size */
+ 32, /* l1 cache */
+ 256, /* l2 cache */
+ 1, /* prefetch streams /*/
+};
+
+/* Instruction costs on E300C2 and E300C3 cores. */
+static const
+struct processor_costs ppce300c2c3_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (19), /* divsi */
+ COSTS_N_INSNS (19), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (4), /* dmul */
+ COSTS_N_INSNS (18), /* sdiv */
+ COSTS_N_INSNS (33), /* ddiv */
+ 32,
+ 16, /* l1 cache */
+ 16, /* l2 cache */
+ 1, /* prefetch streams /*/
+};
+
+/* Instruction costs on PPCE500MC processors. */
+static const
+struct processor_costs ppce500mc_cost = {
+ COSTS_N_INSNS (4), /* mulsi */
+ COSTS_N_INSNS (4), /* mulsi_const */
+ COSTS_N_INSNS (4), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (14), /* divsi */
+ COSTS_N_INSNS (14), /* divdi */
+ COSTS_N_INSNS (8), /* fp */
+ COSTS_N_INSNS (10), /* dmul */
+ COSTS_N_INSNS (36), /* sdiv */
+ COSTS_N_INSNS (66), /* ddiv */
+ 64, /* cache line size */
+ 32, /* l1 cache */
+ 128, /* l2 cache */
+ 1, /* prefetch streams /*/
+};
+
+/* Instruction costs on POWER4 and POWER5 processors. */
+static const
+struct processor_costs power4_cost = {
+ COSTS_N_INSNS (3), /* mulsi */
+ COSTS_N_INSNS (2), /* mulsi_const */
+ COSTS_N_INSNS (2), /* mulsi_const9 */
+ COSTS_N_INSNS (4), /* muldi */
+ COSTS_N_INSNS (18), /* divsi */
+ COSTS_N_INSNS (34), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (17), /* sdiv */
+ COSTS_N_INSNS (17), /* ddiv */
+ 128, /* cache line size */
+ 32, /* l1 cache */
+ 1024, /* l2 cache */
+ 8, /* prefetch streams /*/
+};
+
+/* Instruction costs on POWER6 processors. */
+static const
+struct processor_costs power6_cost = {
+ COSTS_N_INSNS (8), /* mulsi */
+ COSTS_N_INSNS (8), /* mulsi_const */
+ COSTS_N_INSNS (8), /* mulsi_const9 */
+ COSTS_N_INSNS (8), /* muldi */
+ COSTS_N_INSNS (22), /* divsi */
+ COSTS_N_INSNS (28), /* divdi */
+ COSTS_N_INSNS (3), /* fp */
+ COSTS_N_INSNS (3), /* dmul */
+ COSTS_N_INSNS (13), /* sdiv */
+ COSTS_N_INSNS (16), /* ddiv */
+ 128, /* cache line size */
+ 64, /* l1 cache */
+ 2048, /* l2 cache */
+ 16, /* prefetch streams */
+};
+
+
+static bool rs6000_function_ok_for_sibcall (tree, tree);
+static const char *rs6000_invalid_within_doloop (const_rtx);
+static rtx rs6000_generate_compare (enum rtx_code);
+static void rs6000_emit_stack_tie (void);
+static void rs6000_frame_related (rtx, rtx, HOST_WIDE_INT, rtx, rtx);
+static bool spe_func_has_64bit_regs_p (void);
+static void emit_frame_save (rtx, rtx, enum machine_mode, unsigned int,
+ int, HOST_WIDE_INT);
+static rtx gen_frame_mem_offset (enum machine_mode, rtx, int);
+static void rs6000_emit_allocate_stack (HOST_WIDE_INT, int, int);
+static unsigned rs6000_hash_constant (rtx);
+static unsigned toc_hash_function (const void *);
+static int toc_hash_eq (const void *, const void *);
+static bool constant_pool_expr_p (rtx);
+static bool legitimate_small_data_p (enum machine_mode, rtx);
+static bool legitimate_lo_sum_address_p (enum machine_mode, rtx, int);
+static struct machine_function * rs6000_init_machine_status (void);
+static bool rs6000_assemble_integer (rtx, unsigned int, int);
+static bool no_global_regs_above (int, bool);
+#ifdef HAVE_GAS_HIDDEN
+static void rs6000_assemble_visibility (tree, int);
+#endif
+static int rs6000_ra_ever_killed (void);
+static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
+static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
+static bool rs6000_ms_bitfield_layout_p (const_tree);
+static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
+static void rs6000_eliminate_indexed_memrefs (rtx operands[2]);
+static const char *rs6000_mangle_type (const_tree);
+extern const struct attribute_spec rs6000_attribute_table[];
+static void rs6000_set_default_type_attributes (tree);
+static rtx rs6000_savres_routine_sym (rs6000_stack_t *, bool, bool, bool);
+static void rs6000_emit_stack_reset (rs6000_stack_t *, rtx, rtx, int, bool);
+static rtx rs6000_make_savres_rtx (rs6000_stack_t *, rtx, int,
+ enum machine_mode, bool, bool, bool);
+static bool rs6000_reg_live_or_pic_offset_p (int);
+static int rs6000_savres_strategy (rs6000_stack_t *, bool, int, int);
+static void rs6000_restore_saved_cr (rtx, int);
+static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT);
+static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT);
+static void rs6000_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
+ tree);
+static rtx rs6000_emit_set_long_const (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+static bool rs6000_return_in_memory (const_tree, const_tree);
+static void rs6000_file_start (void);
+#if TARGET_ELF
+static int rs6000_elf_reloc_rw_mask (void);
+static void rs6000_elf_asm_out_constructor (rtx, int);
+static void rs6000_elf_asm_out_destructor (rtx, int);
+static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED;
+static void rs6000_elf_asm_init_sections (void);
+static section *rs6000_elf_select_rtx_section (enum machine_mode, rtx,
+ unsigned HOST_WIDE_INT);
+static void rs6000_elf_encode_section_info (tree, rtx, int)
+ ATTRIBUTE_UNUSED;
+#endif
+static bool rs6000_use_blocks_for_constant_p (enum machine_mode, const_rtx);
+static void rs6000_alloc_sdmode_stack_slot (void);
+static void rs6000_instantiate_decls (void);
+#if TARGET_XCOFF
+static void rs6000_xcoff_asm_output_anchor (rtx);
+static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
+static void rs6000_xcoff_asm_init_sections (void);
+static int rs6000_xcoff_reloc_rw_mask (void);
+static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree);
+static section *rs6000_xcoff_select_section (tree, int,
+ unsigned HOST_WIDE_INT);
+static void rs6000_xcoff_unique_section (tree, int);
+static section *rs6000_xcoff_select_rtx_section
+ (enum machine_mode, rtx, unsigned HOST_WIDE_INT);
+static const char * rs6000_xcoff_strip_name_encoding (const char *);
+static unsigned int rs6000_xcoff_section_type_flags (tree, const char *, int);
+static void rs6000_xcoff_file_start (void);
+static void rs6000_xcoff_file_end (void);
+#endif
+static int rs6000_variable_issue (FILE *, int, rtx, int);
+static bool rs6000_rtx_costs (rtx, int, int, int *, bool);
+static int rs6000_adjust_cost (rtx, rtx, rtx, int);
+static void rs6000_sched_init (FILE *, int, int);
+static bool is_microcoded_insn (rtx);
+static bool is_nonpipeline_insn (rtx);
+static bool is_cracked_insn (rtx);
+static bool is_branch_slot_insn (rtx);
+static bool is_load_insn (rtx);
+static rtx get_store_dest (rtx pat);
+static bool is_store_insn (rtx);
+static bool set_to_load_agen (rtx,rtx);
+static bool adjacent_mem_locations (rtx,rtx);
+static int rs6000_adjust_priority (rtx, int);
+static int rs6000_issue_rate (void);
+static bool rs6000_is_costly_dependence (dep_t, int, int);
+static rtx get_next_active_insn (rtx, rtx);
+static bool insn_terminates_group_p (rtx , enum group_termination);
+static bool insn_must_be_first_in_group (rtx);
+static bool insn_must_be_last_in_group (rtx);
+static bool is_costly_group (rtx *, rtx);
+static int force_new_group (int, FILE *, rtx *, rtx, bool *, int, int *);
+static int redefine_groups (FILE *, int, rtx, rtx);
+static int pad_groups (FILE *, int, rtx, rtx);
+static void rs6000_sched_finish (FILE *, int);
+static int rs6000_sched_reorder (FILE *, int, rtx *, int *, int);
+static int rs6000_sched_reorder2 (FILE *, int, rtx *, int *, int);
+static int rs6000_use_sched_lookahead (void);
+static int rs6000_use_sched_lookahead_guard (rtx);
+static void * rs6000_alloc_sched_context (void);
+static void rs6000_init_sched_context (void *, bool);
+static void rs6000_set_sched_context (void *);
+static void rs6000_free_sched_context (void *);
+static tree rs6000_builtin_reciprocal (unsigned int, bool, bool);
+static tree rs6000_builtin_mask_for_load (void);
+static tree rs6000_builtin_mul_widen_even (tree);
+static tree rs6000_builtin_mul_widen_odd (tree);
+static tree rs6000_builtin_conversion (enum tree_code, tree);
+static tree rs6000_builtin_vec_perm (tree, tree *);
+
+static void def_builtin (int, const char *, tree, int);
+static bool rs6000_vector_alignment_reachable (const_tree, bool);
+static void rs6000_init_builtins (void);
+static rtx rs6000_expand_unop_builtin (enum insn_code, tree, rtx);
+static rtx rs6000_expand_binop_builtin (enum insn_code, tree, rtx);
+static rtx rs6000_expand_ternop_builtin (enum insn_code, tree, rtx);
+static rtx rs6000_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
+static void altivec_init_builtins (void);
+static void rs6000_common_init_builtins (void);
+static void rs6000_init_libfuncs (void);
+
+static void paired_init_builtins (void);
+static rtx paired_expand_builtin (tree, rtx, bool *);
+static rtx paired_expand_lv_builtin (enum insn_code, tree, rtx);
+static rtx paired_expand_stv_builtin (enum insn_code, tree);
+static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
+
+static void enable_mask_for_builtins (struct builtin_description *, int,
+ enum rs6000_builtins,
+ enum rs6000_builtins);
+static tree build_opaque_vector_type (tree, int);
+static void spe_init_builtins (void);
+static rtx spe_expand_builtin (tree, rtx, bool *);
+static rtx spe_expand_stv_builtin (enum insn_code, tree);
+static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
+static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
+static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
+static rs6000_stack_t *rs6000_stack_info (void);
+static void debug_stack_info (rs6000_stack_t *);
+
+static rtx altivec_expand_builtin (tree, rtx, bool *);
+static rtx altivec_expand_ld_builtin (tree, rtx, bool *);
+static rtx altivec_expand_st_builtin (tree, rtx, bool *);
+static rtx altivec_expand_dst_builtin (tree, rtx, bool *);
+static rtx altivec_expand_abs_builtin (enum insn_code, tree, rtx);
+static rtx altivec_expand_predicate_builtin (enum insn_code,
+ const char *, tree, rtx);
+static rtx altivec_expand_stv_builtin (enum insn_code, tree);
+static rtx altivec_expand_vec_init_builtin (tree, tree, rtx);
+static rtx altivec_expand_vec_set_builtin (tree);
+static rtx altivec_expand_vec_ext_builtin (tree, rtx);
+static int get_element_number (tree, tree);
+static bool rs6000_handle_option (size_t, const char *, int);
+static void rs6000_parse_tls_size_option (void);
+static void rs6000_parse_yes_no_option (const char *, const char *, int *);
+static int first_altivec_reg_to_save (void);
+static unsigned int compute_vrsave_mask (void);
+static void compute_save_world_info (rs6000_stack_t *info_ptr);
+static void is_altivec_return_reg (rtx, void *);
+static rtx generate_set_vrsave (rtx, rs6000_stack_t *, int);
+int easy_vector_constant (rtx, enum machine_mode);
+static bool rs6000_is_opaque_type (const_tree);
+static rtx rs6000_dwarf_register_span (rtx);
+static void rs6000_init_dwarf_reg_sizes_extra (tree);
+static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
+static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
+static rtx rs6000_tls_get_addr (void);
+static rtx rs6000_got_sym (void);
+static int rs6000_tls_symbol_ref_1 (rtx *, void *);
+static const char *rs6000_get_some_local_dynamic_name (void);
+static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
+static rtx rs6000_complex_function_value (enum machine_mode);
+static rtx rs6000_spe_function_arg (CUMULATIVE_ARGS *,
+ enum machine_mode, tree);
+static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *,
+ HOST_WIDE_INT);
+static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *,
+ tree, HOST_WIDE_INT);
+static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *,
+ HOST_WIDE_INT,
+ rtx[], int *);
+static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *,
+ const_tree, HOST_WIDE_INT,
+ rtx[], int *);
+static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree, int, bool);
+static rtx rs6000_mixed_function_arg (enum machine_mode, tree, int);
+static void rs6000_move_block_from_reg (int regno, rtx x, int nregs);
+static void setup_incoming_varargs (CUMULATIVE_ARGS *,
+ enum machine_mode, tree,
+ int *, int);
+static bool rs6000_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
+ const_tree, bool);
+static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
+ tree, bool);
+static const char *invalid_arg_for_unprototyped_fn (const_tree, const_tree, const_tree);
+#if TARGET_MACHO
+static void macho_branch_islands (void);
+static int no_previous_def (tree function_name);
+static tree get_prev_label (tree function_name);
+static void rs6000_darwin_file_start (void);
+#endif
+
+static tree rs6000_build_builtin_va_list (void);
+static void rs6000_va_start (tree, rtx);
+static tree rs6000_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
+static bool rs6000_must_pass_in_stack (enum machine_mode, const_tree);
+static bool rs6000_scalar_mode_supported_p (enum machine_mode);
+static bool rs6000_vector_mode_supported_p (enum machine_mode);
+static int get_vec_cmp_insn (enum rtx_code, enum machine_mode,
+ enum machine_mode);
+static rtx rs6000_emit_vector_compare (enum rtx_code, rtx, rtx,
+ enum machine_mode);
+static int get_vsel_insn (enum machine_mode);
+static void rs6000_emit_vector_select (rtx, rtx, rtx, rtx);
+static tree rs6000_stack_protect_fail (void);
+
+const int INSN_NOT_AVAILABLE = -1;
+static enum machine_mode rs6000_eh_return_filter_mode (void);
+
+/* Hash table stuff for keeping track of TOC entries. */
+
+struct toc_hash_struct GTY(())
+{
+ /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
+ ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
+ rtx key;
+ enum machine_mode key_mode;
+ int labelno;
+};
+
+static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
+
+/* Default register names. */
+char rs6000_reg_names[][8] =
+{
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "mq", "lr", "ctr","ap",
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "xer",
+ /* AltiVec registers. */
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "vrsave", "vscr",
+ /* SPE registers. */
+ "spe_acc", "spefscr",
+ /* Soft frame pointer. */
+ "sfp"
+};
+
+#ifdef TARGET_REGNAMES
+static const char alt_reg_names[][8] =
+{
+ "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
+ "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
+ "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
+ "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
+ "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
+ "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
+ "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
+ "mq", "lr", "ctr", "ap",
+ "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
+ "xer",
+ /* AltiVec registers. */
+ "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
+ "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
+ "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
+ "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
+ "vrsave", "vscr",
+ /* SPE registers. */
+ "spe_acc", "spefscr",
+ /* Soft frame pointer. */
+ "sfp"
+};
+#endif
+
+#ifndef MASK_STRICT_ALIGN
+#define MASK_STRICT_ALIGN 0
+#endif
+#ifndef TARGET_PROFILE_KERNEL
+#define TARGET_PROFILE_KERNEL 0
+#endif
+
+/* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
+#define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
+#undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
+#define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
+
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
+
+/* Default unaligned ops are only provided for ELF. Find the ops needed
+ for non-ELF systems. */
+#ifndef OBJECT_FORMAT_ELF
+#if TARGET_XCOFF
+/* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
+ 64-bit targets. */
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
+#else
+/* For Darwin. */
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
+#endif
+#endif
+
+/* This hook deals with fixups for relocatable code and DI-mode objects
+ in 64-bit code. */
+#undef TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER rs6000_assemble_integer
+
+#ifdef HAVE_GAS_HIDDEN
+#undef TARGET_ASM_ASSEMBLE_VISIBILITY
+#define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
+#endif
+
+#undef TARGET_HAVE_TLS
+#define TARGET_HAVE_TLS HAVE_AS_TLS
+
+#undef TARGET_CANNOT_FORCE_CONST_MEM
+#define TARGET_CANNOT_FORCE_CONST_MEM rs6000_tls_referenced_p
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
+
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
+
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
+#undef TARGET_SCHED_ADJUST_PRIORITY
+#define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
+#undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
+#define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
+#undef TARGET_SCHED_INIT
+#define TARGET_SCHED_INIT rs6000_sched_init
+#undef TARGET_SCHED_FINISH
+#define TARGET_SCHED_FINISH rs6000_sched_finish
+#undef TARGET_SCHED_REORDER
+#define TARGET_SCHED_REORDER rs6000_sched_reorder
+#undef TARGET_SCHED_REORDER2
+#define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
+
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
+
+#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
+#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
+
+#undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
+#define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
+#undef TARGET_SCHED_INIT_SCHED_CONTEXT
+#define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
+#undef TARGET_SCHED_SET_SCHED_CONTEXT
+#define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
+#undef TARGET_SCHED_FREE_SCHED_CONTEXT
+#define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
+
+#undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
+#define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
+#undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
+#define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even
+#undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD
+#define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd
+#undef TARGET_VECTORIZE_BUILTIN_CONVERSION
+#define TARGET_VECTORIZE_BUILTIN_CONVERSION rs6000_builtin_conversion
+#undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
+#define TARGET_VECTORIZE_BUILTIN_VEC_PERM rs6000_builtin_vec_perm
+
+#undef TARGET_VECTOR_ALIGNMENT_REACHABLE
+#define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS rs6000_init_builtins
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
+
+#undef TARGET_MANGLE_TYPE
+#define TARGET_MANGLE_TYPE rs6000_mangle_type
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
+
+#if TARGET_MACHO
+#undef TARGET_BINDS_LOCAL_P
+#define TARGET_BINDS_LOCAL_P darwin_binds_local_p
+#endif
+
+#undef TARGET_MS_BITFIELD_LAYOUT_P
+#define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
+
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
+
+#undef TARGET_INVALID_WITHIN_DOLOOP
+#define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS rs6000_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST hook_int_rtx_bool_0
+
+#undef TARGET_VECTOR_OPAQUE_P
+#define TARGET_VECTOR_OPAQUE_P rs6000_is_opaque_type
+
+#undef TARGET_DWARF_REGISTER_SPAN
+#define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
+
+#undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
+#define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
+
+/* On rs6000, function arguments are promoted, as are function return
+ values. */
+#undef TARGET_PROMOTE_FUNCTION_ARGS
+#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
+#undef TARGET_PROMOTE_FUNCTION_RETURN
+#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
+
+/* Always strict argument naming on rs6000. */
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
+#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
+#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
+#undef TARGET_SPLIT_COMPLEX_ARG
+#define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
+
+#undef TARGET_EXPAND_BUILTIN_VA_START
+#define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
+
+#undef TARGET_GIMPLIFY_VA_ARG_EXPR
+#define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
+
+#undef TARGET_EH_RETURN_FILTER_MODE
+#define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
+
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
+
+#undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
+#define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
+
+#undef TARGET_HANDLE_OPTION
+#define TARGET_HANDLE_OPTION rs6000_handle_option
+
+#undef TARGET_DEFAULT_TARGET_FLAGS
+#define TARGET_DEFAULT_TARGET_FLAGS \
+ (TARGET_DEFAULT)
+
+#undef TARGET_STACK_PROTECT_FAIL
+#define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
+
+/* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
+ The PowerPC architecture requires only weak consistency among
+ processors--that is, memory accesses between processors need not be
+ sequentially consistent and memory accesses among processors can occur
+ in any order. The ability to order memory accesses weakly provides
+ opportunities for more efficient use of the system bus. Unless a
+ dependency exists, the 604e allows read operations to precede store
+ operations. */
+#undef TARGET_RELAXED_ORDERING
+#define TARGET_RELAXED_ORDERING true
+
+#ifdef HAVE_AS_TLS
+#undef TARGET_ASM_OUTPUT_DWARF_DTPREL
+#define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
+#endif
+
+/* Use a 32-bit anchor range. This leads to sequences like:
+
+ addis tmp,anchor,high
+ add dest,tmp,low
+
+ where tmp itself acts as an anchor, and can be shared between
+ accesses to the same 64k page. */
+#undef TARGET_MIN_ANCHOR_OFFSET
+#define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
+#undef TARGET_MAX_ANCHOR_OFFSET
+#define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
+#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
+#define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
+
+#undef TARGET_BUILTIN_RECIPROCAL
+#define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
+
+#undef TARGET_EXPAND_TO_RTL_HOOK
+#define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
+
+#undef TARGET_INSTANTIATE_DECLS
+#define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode
+ MODE. */
+static int
+rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ /* The GPRs can hold any mode, but values bigger than one register
+ cannot go past R31. */
+ if (INT_REGNO_P (regno))
+ return INT_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1);
+
+ /* The float registers can only hold floating modes and DImode.
+ This excludes the 32-bit decimal float mode for now. */
+ if (FP_REGNO_P (regno))
+ return
+ ((SCALAR_FLOAT_MODE_P (mode)
+ && (mode != TDmode || (regno % 2) == 0)
+ && FP_REGNO_P (regno + HARD_REGNO_NREGS (regno, mode) - 1))
+ || (GET_MODE_CLASS (mode) == MODE_INT
+ && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
+ || (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
+ && PAIRED_VECTOR_MODE (mode)));
+
+ /* The CR register can only hold CC modes. */
+ if (CR_REGNO_P (regno))
+ return GET_MODE_CLASS (mode) == MODE_CC;
+
+ if (XER_REGNO_P (regno))
+ return mode == PSImode;
+
+ /* AltiVec only in AldyVec registers. */
+ if (ALTIVEC_REGNO_P (regno))
+ return ALTIVEC_VECTOR_MODE (mode);
+
+ /* ...but GPRs can hold SIMD data on the SPE in one register. */
+ if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return 1;
+
+ /* We cannot put TImode anywhere except general register and it must be
+ able to fit within the register set. */
+
+ return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
+}
+
+/* Initialize rs6000_hard_regno_mode_ok_p table. */
+static void
+rs6000_init_hard_regno_mode_ok (void)
+{
+ int r, m;
+
+ for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
+ for (m = 0; m < NUM_MACHINE_MODES; ++m)
+ if (rs6000_hard_regno_mode_ok (r, m))
+ rs6000_hard_regno_mode_ok_p[m][r] = true;
+}
+
+#if TARGET_MACHO
+/* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
+
+static void
+darwin_rs6000_override_options (void)
+{
+ /* The Darwin ABI always includes AltiVec, can't be (validly) turned
+ off. */
+ rs6000_altivec_abi = 1;
+ TARGET_ALTIVEC_VRSAVE = 1;
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ if (MACHO_DYNAMIC_NO_PIC_P)
+ {
+ if (flag_pic)
+ warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC");
+ flag_pic = 0;
+ }
+ else if (flag_pic == 1)
+ {
+ flag_pic = 2;
+ }
+ }
+ if (TARGET_64BIT && ! TARGET_POWERPC64)
+ {
+ target_flags |= MASK_POWERPC64;
+ warning (0, "-m64 requires PowerPC64 architecture, enabling");
+ }
+ if (flag_mkernel)
+ {
+ rs6000_default_long_calls = 1;
+ target_flags |= MASK_SOFT_FLOAT;
+ }
+
+ /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
+ Altivec. */
+ if (!flag_mkernel && !flag_apple_kext
+ && TARGET_64BIT
+ && ! (target_flags_explicit & MASK_ALTIVEC))
+ target_flags |= MASK_ALTIVEC;
+
+ /* Unless the user (not the configurer) has explicitly overridden
+ it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
+ G4 unless targetting the kernel. */
+ if (!flag_mkernel
+ && !flag_apple_kext
+ && strverscmp (darwin_macosx_version_min, "10.5") >= 0
+ && ! (target_flags_explicit & MASK_ALTIVEC)
+ && ! rs6000_select[1].string)
+ {
+ target_flags |= MASK_ALTIVEC;
+ }
+}
+#endif
+
+/* If not otherwise specified by a target, make 'long double' equivalent to
+ 'double'. */
+
+#ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
+#define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
+#endif
+
+/* Override command line options. Mostly we process the processor
+ type and sometimes adjust other TARGET_ options. */
+
+void
+rs6000_override_options (const char *default_cpu)
+{
+ size_t i, j;
+ struct rs6000_cpu_select *ptr;
+ int set_masks;
+
+ /* Simplifications for entries below. */
+
+ enum {
+ POWERPC_BASE_MASK = MASK_POWERPC | MASK_NEW_MNEMONICS,
+ POWERPC_7400_MASK = POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_ALTIVEC
+ };
+
+ /* This table occasionally claims that a processor does not support
+ a particular feature even though it does, but the feature is slower
+ than the alternative. Thus, it shouldn't be relied on as a
+ complete description of the processor's support.
+
+ Please keep this list in order, and don't forget to update the
+ documentation in invoke.texi when adding a new processor or
+ flag. */
+ static struct ptt
+ {
+ const char *const name; /* Canonical processor name. */
+ const enum processor_type processor; /* Processor type enum value. */
+ const int target_enable; /* Target flags to enable. */
+ } const processor_target_table[]
+ = {{"401", PROCESSOR_PPC403, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"403", PROCESSOR_PPC403,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_STRICT_ALIGN},
+ {"405", PROCESSOR_PPC405,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
+ {"405fp", PROCESSOR_PPC405,
+ POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
+ {"440", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
+ {"440fp", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
+ {"464", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_SOFT_FLOAT | MASK_MULHW | MASK_DLMZB},
+ {"464fp", PROCESSOR_PPC440,
+ POWERPC_BASE_MASK | MASK_MULHW | MASK_DLMZB},
+ {"505", PROCESSOR_MPCCORE, POWERPC_BASE_MASK},
+ {"601", PROCESSOR_PPC601,
+ MASK_POWER | POWERPC_BASE_MASK | MASK_MULTIPLE | MASK_STRING},
+ {"602", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"603", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"604", PROCESSOR_PPC604, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"604e", PROCESSOR_PPC604e, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"620", PROCESSOR_PPC620,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
+ {"630", PROCESSOR_PPC630,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
+ {"740", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"7400", PROCESSOR_PPC7400, POWERPC_7400_MASK},
+ {"7450", PROCESSOR_PPC7450, POWERPC_7400_MASK},
+ {"750", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"801", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"821", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"823", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"8540", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN},
+ /* 8548 has a dummy entry for now. */
+ {"8548", PROCESSOR_PPC8540, POWERPC_BASE_MASK | MASK_STRICT_ALIGN},
+ {"e300c2", PROCESSOR_PPCE300C2, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"e300c3", PROCESSOR_PPCE300C3, POWERPC_BASE_MASK},
+ {"e500mc", PROCESSOR_PPCE500MC, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"860", PROCESSOR_MPCCORE, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"970", PROCESSOR_POWER4,
+ POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
+ {"cell", PROCESSOR_CELL,
+ POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
+ {"common", PROCESSOR_COMMON, MASK_NEW_MNEMONICS},
+ {"ec603e", PROCESSOR_PPC603, POWERPC_BASE_MASK | MASK_SOFT_FLOAT},
+ {"G3", PROCESSOR_PPC750, POWERPC_BASE_MASK | MASK_PPC_GFXOPT},
+ {"G4", PROCESSOR_PPC7450, POWERPC_7400_MASK},
+ {"G5", PROCESSOR_POWER4,
+ POWERPC_7400_MASK | MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64},
+ {"power", PROCESSOR_POWER, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"power2", PROCESSOR_POWER,
+ MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
+ {"power3", PROCESSOR_PPC630,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
+ {"power4", PROCESSOR_POWER4,
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
+ | MASK_MFCRF},
+ {"power5", PROCESSOR_POWER5,
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
+ | MASK_MFCRF | MASK_POPCNTB},
+ {"power5+", PROCESSOR_POWER5,
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND},
+ {"power6", PROCESSOR_POWER6,
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP},
+ {"power6x", PROCESSOR_POWER6,
+ POWERPC_BASE_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_PPC_GFXOPT
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP
+ | MASK_MFPGPR},
+ {"power7", PROCESSOR_POWER5,
+ POWERPC_7400_MASK | MASK_POWERPC64 | MASK_PPC_GPOPT | MASK_MFCRF
+ | MASK_POPCNTB | MASK_FPRND | MASK_CMPB | MASK_DFP},
+ {"powerpc", PROCESSOR_POWERPC, POWERPC_BASE_MASK},
+ {"powerpc64", PROCESSOR_POWERPC64,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64},
+ {"rios", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"rios1", PROCESSOR_RIOS1, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"rios2", PROCESSOR_RIOS2,
+ MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING},
+ {"rsc", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"rsc1", PROCESSOR_PPC601, MASK_POWER | MASK_MULTIPLE | MASK_STRING},
+ {"rs64", PROCESSOR_RS64A,
+ POWERPC_BASE_MASK | MASK_PPC_GFXOPT | MASK_POWERPC64}
+ };
+
+ const size_t ptt_size = ARRAY_SIZE (processor_target_table);
+
+ /* Some OSs don't support saving the high part of 64-bit registers on
+ context switch. Other OSs don't support saving Altivec registers.
+ On those OSs, we don't touch the MASK_POWERPC64 or MASK_ALTIVEC
+ settings; if the user wants either, the user must explicitly specify
+ them and we won't interfere with the user's specification. */
+
+ enum {
+ POWER_MASKS = MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
+ POWERPC_MASKS = (POWERPC_BASE_MASK | MASK_PPC_GPOPT | MASK_STRICT_ALIGN
+ | MASK_PPC_GFXOPT | MASK_POWERPC64 | MASK_ALTIVEC
+ | MASK_MFCRF | MASK_POPCNTB | MASK_FPRND | MASK_MULHW
+ | MASK_DLMZB | MASK_CMPB | MASK_MFPGPR | MASK_DFP)
+ };
+
+ set_masks = POWER_MASKS | POWERPC_MASKS | MASK_SOFT_FLOAT;
+#ifdef OS_MISSING_POWERPC64
+ if (OS_MISSING_POWERPC64)
+ set_masks &= ~MASK_POWERPC64;
+#endif
+#ifdef OS_MISSING_ALTIVEC
+ if (OS_MISSING_ALTIVEC)
+ set_masks &= ~MASK_ALTIVEC;
+#endif
+
+ /* Don't override by the processor default if given explicitly. */
+ set_masks &= ~target_flags_explicit;
+
+ /* Identify the processor type. */
+ rs6000_select[0].string = default_cpu;
+ rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
+
+ for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ {
+ ptr = &rs6000_select[i];
+ if (ptr->string != (char *)0 && ptr->string[0] != '\0')
+ {
+ for (j = 0; j < ptt_size; j++)
+ if (! strcmp (ptr->string, processor_target_table[j].name))
+ {
+ if (ptr->set_tune_p)
+ rs6000_cpu = processor_target_table[j].processor;
+
+ if (ptr->set_arch_p)
+ {
+ target_flags &= ~set_masks;
+ target_flags |= (processor_target_table[j].target_enable
+ & set_masks);
+ }
+ break;
+ }
+
+ if (j == ptt_size)
+ error ("bad value (%s) for %s switch", ptr->string, ptr->name);
+ }
+ }
+
+ if ((TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC)
+ && !rs6000_explicit_options.isel)
+ rs6000_isel = 1;
+
+ if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
+ || rs6000_cpu == PROCESSOR_PPCE500MC)
+ {
+ if (TARGET_ALTIVEC)
+ error ("AltiVec not supported in this target");
+ if (TARGET_SPE)
+ error ("Spe not supported in this target");
+ }
+
+ /* Disable Cell microcode if we are optimizing for the Cell
+ and not optimizing for size. */
+ if (rs6000_gen_cell_microcode == -1)
+ rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
+ && !optimize_size);
+
+ /* If we are optimizing big endian systems for space, use the load/store
+ multiple and string instructions unless we are not generating
+ Cell microcode. */
+ if (BYTES_BIG_ENDIAN && optimize_size && !rs6000_gen_cell_microcode)
+ target_flags |= ~target_flags_explicit & (MASK_MULTIPLE | MASK_STRING);
+
+ /* Don't allow -mmultiple or -mstring on little endian systems
+ unless the cpu is a 750, because the hardware doesn't support the
+ instructions used in little endian mode, and causes an alignment
+ trap. The 750 does not cause an alignment trap (except when the
+ target is unaligned). */
+
+ if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
+ {
+ if (TARGET_MULTIPLE)
+ {
+ target_flags &= ~MASK_MULTIPLE;
+ if ((target_flags_explicit & MASK_MULTIPLE) != 0)
+ warning (0, "-mmultiple is not supported on little endian systems");
+ }
+
+ if (TARGET_STRING)
+ {
+ target_flags &= ~MASK_STRING;
+ if ((target_flags_explicit & MASK_STRING) != 0)
+ warning (0, "-mstring is not supported on little endian systems");
+ }
+ }
+
+ /* Set debug flags */
+ if (rs6000_debug_name)
+ {
+ if (! strcmp (rs6000_debug_name, "all"))
+ rs6000_debug_stack = rs6000_debug_arg = 1;
+ else if (! strcmp (rs6000_debug_name, "stack"))
+ rs6000_debug_stack = 1;
+ else if (! strcmp (rs6000_debug_name, "arg"))
+ rs6000_debug_arg = 1;
+ else
+ error ("unknown -mdebug-%s switch", rs6000_debug_name);
+ }
+
+ if (rs6000_traceback_name)
+ {
+ if (! strncmp (rs6000_traceback_name, "full", 4))
+ rs6000_traceback = traceback_full;
+ else if (! strncmp (rs6000_traceback_name, "part", 4))
+ rs6000_traceback = traceback_part;
+ else if (! strncmp (rs6000_traceback_name, "no", 2))
+ rs6000_traceback = traceback_none;
+ else
+ error ("unknown -mtraceback arg %qs; expecting %<full%>, %<partial%> or %<none%>",
+ rs6000_traceback_name);
+ }
+
+ if (!rs6000_explicit_options.long_double)
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+
+#ifndef POWERPC_LINUX
+ if (!rs6000_explicit_options.ieee)
+ rs6000_ieeequad = 1;
+#endif
+
+ /* Enable Altivec ABI for AIX -maltivec. */
+ if (TARGET_XCOFF && TARGET_ALTIVEC)
+ rs6000_altivec_abi = 1;
+
+ /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
+ PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
+ be explicitly overridden in either case. */
+ if (TARGET_ELF)
+ {
+ if (!rs6000_explicit_options.altivec_abi
+ && (TARGET_64BIT || TARGET_ALTIVEC))
+ rs6000_altivec_abi = 1;
+
+ /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
+ if (!rs6000_explicit_options.vrsave)
+ TARGET_ALTIVEC_VRSAVE = rs6000_altivec_abi;
+ }
+
+ /* Set the Darwin64 ABI as default for 64-bit Darwin. */
+ if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
+ {
+ rs6000_darwin64_abi = 1;
+#if TARGET_MACHO
+ darwin_one_byte_bool = 1;
+#endif
+ /* Default to natural alignment, for better performance. */
+ rs6000_alignment_flags = MASK_ALIGN_NATURAL;
+ }
+
+ /* Place FP constants in the constant pool instead of TOC
+ if section anchors enabled. */
+ if (flag_section_anchors)
+ TARGET_NO_FP_IN_TOC = 1;
+
+ /* Handle -mtls-size option. */
+ rs6000_parse_tls_size_option ();
+
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+#ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
+ SUBSUBTARGET_OVERRIDE_OPTIONS;
+#endif
+#ifdef SUB3TARGET_OVERRIDE_OPTIONS
+ SUB3TARGET_OVERRIDE_OPTIONS;
+#endif
+
+ if (TARGET_E500 || rs6000_cpu == PROCESSOR_PPCE500MC)
+ {
+ /* The e500 and e500mc do not have string instructions, and we set
+ MASK_STRING above when optimizing for size. */
+ if ((target_flags & MASK_STRING) != 0)
+ target_flags = target_flags & ~MASK_STRING;
+ }
+ else if (rs6000_select[1].string != NULL)
+ {
+ /* For the powerpc-eabispe configuration, we set all these by
+ default, so let's unset them if we manually set another
+ CPU that is not the E500. */
+ if (!rs6000_explicit_options.spe_abi)
+ rs6000_spe_abi = 0;
+ if (!rs6000_explicit_options.spe)
+ rs6000_spe = 0;
+ if (!rs6000_explicit_options.float_gprs)
+ rs6000_float_gprs = 0;
+ if (!rs6000_explicit_options.isel)
+ rs6000_isel = 0;
+ }
+
+ /* Detect invalid option combinations with E500. */
+ CHECK_E500_OPTIONS;
+
+ rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
+ && rs6000_cpu != PROCESSOR_POWER5
+ && rs6000_cpu != PROCESSOR_POWER6
+ && rs6000_cpu != PROCESSOR_CELL);
+ rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
+ || rs6000_cpu == PROCESSOR_POWER5);
+ rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
+ || rs6000_cpu == PROCESSOR_POWER5
+ || rs6000_cpu == PROCESSOR_POWER6);
+
+ rs6000_sched_restricted_insns_priority
+ = (rs6000_sched_groups ? 1 : 0);
+
+ /* Handle -msched-costly-dep option. */
+ rs6000_sched_costly_dep
+ = (rs6000_sched_groups ? store_to_load_dep_costly : no_dep_costly);
+
+ if (rs6000_sched_costly_dep_str)
+ {
+ if (! strcmp (rs6000_sched_costly_dep_str, "no"))
+ rs6000_sched_costly_dep = no_dep_costly;
+ else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
+ rs6000_sched_costly_dep = all_deps_costly;
+ else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
+ rs6000_sched_costly_dep = true_store_to_load_dep_costly;
+ else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
+ rs6000_sched_costly_dep = store_to_load_dep_costly;
+ else
+ rs6000_sched_costly_dep = atoi (rs6000_sched_costly_dep_str);
+ }
+
+ /* Handle -minsert-sched-nops option. */
+ rs6000_sched_insert_nops
+ = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
+
+ if (rs6000_sched_insert_nops_str)
+ {
+ if (! strcmp (rs6000_sched_insert_nops_str, "no"))
+ rs6000_sched_insert_nops = sched_finish_none;
+ else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
+ rs6000_sched_insert_nops = sched_finish_pad_groups;
+ else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
+ rs6000_sched_insert_nops = sched_finish_regroup_exact;
+ else
+ rs6000_sched_insert_nops = atoi (rs6000_sched_insert_nops_str);
+ }
+
+#ifdef TARGET_REGNAMES
+ /* If the user desires alternate register names, copy in the
+ alternate names now. */
+ if (TARGET_REGNAMES)
+ memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
+#endif
+
+ /* Set aix_struct_return last, after the ABI is determined.
+ If -maix-struct-return or -msvr4-struct-return was explicitly
+ used, don't override with the ABI default. */
+ if (!rs6000_explicit_options.aix_struct_ret)
+ aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
+
+ if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
+ REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
+
+ if (TARGET_TOC)
+ ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
+
+ /* We can only guarantee the availability of DI pseudo-ops when
+ assembling for 64-bit targets. */
+ if (!TARGET_64BIT)
+ {
+ targetm.asm_out.aligned_op.di = NULL;
+ targetm.asm_out.unaligned_op.di = NULL;
+ }
+
+ /* Set branch target alignment, if not optimizing for size. */
+ if (!optimize_size)
+ {
+ /* Cell wants to be aligned 8byte for dual issue. */
+ if (rs6000_cpu == PROCESSOR_CELL)
+ {
+ if (align_functions <= 0)
+ align_functions = 8;
+ if (align_jumps <= 0)
+ align_jumps = 8;
+ if (align_loops <= 0)
+ align_loops = 8;
+ }
+ if (rs6000_align_branch_targets)
+ {
+ if (align_functions <= 0)
+ align_functions = 16;
+ if (align_jumps <= 0)
+ align_jumps = 16;
+ if (align_loops <= 0)
+ align_loops = 16;
+ }
+ if (align_jumps_max_skip <= 0)
+ align_jumps_max_skip = 15;
+ if (align_loops_max_skip <= 0)
+ align_loops_max_skip = 15;
+ }
+
+ /* Arrange to save and restore machine status around nested functions. */
+ init_machine_status = rs6000_init_machine_status;
+
+ /* We should always be splitting complex arguments, but we can't break
+ Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
+ if (DEFAULT_ABI != ABI_AIX)
+ targetm.calls.split_complex_arg = NULL;
+
+ /* Initialize rs6000_cost with the appropriate target costs. */
+ if (optimize_size)
+ rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
+ else
+ switch (rs6000_cpu)
+ {
+ case PROCESSOR_RIOS1:
+ rs6000_cost = &rios1_cost;
+ break;
+
+ case PROCESSOR_RIOS2:
+ rs6000_cost = &rios2_cost;
+ break;
+
+ case PROCESSOR_RS64A:
+ rs6000_cost = &rs64a_cost;
+ break;
+
+ case PROCESSOR_MPCCORE:
+ rs6000_cost = &mpccore_cost;
+ break;
+
+ case PROCESSOR_PPC403:
+ rs6000_cost = &ppc403_cost;
+ break;
+
+ case PROCESSOR_PPC405:
+ rs6000_cost = &ppc405_cost;
+ break;
+
+ case PROCESSOR_PPC440:
+ rs6000_cost = &ppc440_cost;
+ break;
+
+ case PROCESSOR_PPC601:
+ rs6000_cost = &ppc601_cost;
+ break;
+
+ case PROCESSOR_PPC603:
+ rs6000_cost = &ppc603_cost;
+ break;
+
+ case PROCESSOR_PPC604:
+ rs6000_cost = &ppc604_cost;
+ break;
+
+ case PROCESSOR_PPC604e:
+ rs6000_cost = &ppc604e_cost;
+ break;
+
+ case PROCESSOR_PPC620:
+ rs6000_cost = &ppc620_cost;
+ break;
+
+ case PROCESSOR_PPC630:
+ rs6000_cost = &ppc630_cost;
+ break;
+
+ case PROCESSOR_CELL:
+ rs6000_cost = &ppccell_cost;
+ break;
+
+ case PROCESSOR_PPC750:
+ case PROCESSOR_PPC7400:
+ rs6000_cost = &ppc750_cost;
+ break;
+
+ case PROCESSOR_PPC7450:
+ rs6000_cost = &ppc7450_cost;
+ break;
+
+ case PROCESSOR_PPC8540:
+ rs6000_cost = &ppc8540_cost;
+ break;
+
+ case PROCESSOR_PPCE300C2:
+ case PROCESSOR_PPCE300C3:
+ rs6000_cost = &ppce300c2c3_cost;
+ break;
+
+ case PROCESSOR_PPCE500MC:
+ rs6000_cost = &ppce500mc_cost;
+ break;
+
+ case PROCESSOR_POWER4:
+ case PROCESSOR_POWER5:
+ rs6000_cost = &power4_cost;
+ break;
+
+ case PROCESSOR_POWER6:
+ rs6000_cost = &power6_cost;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES))
+ set_param_value ("simultaneous-prefetches",
+ rs6000_cost->simultaneous_prefetches);
+ if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE))
+ set_param_value ("l1-cache-size", rs6000_cost->l1_cache_size);
+ if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE))
+ set_param_value ("l1-cache-line-size", rs6000_cost->cache_line_size);
+ if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE))
+ set_param_value ("l2-cache-size", rs6000_cost->l2_cache_size);
+
+ /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
+ can be optimized to ap = __builtin_next_arg (0). */
+ if (DEFAULT_ABI != ABI_V4)
+ targetm.expand_builtin_va_start = NULL;
+
+ /* Set up single/double float flags.
+ If TARGET_HARD_FLOAT is set, but neither single or double is set,
+ then set both flags. */
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && rs6000_single_float == 0 && rs6000_double_float == 0)
+ rs6000_single_float = rs6000_double_float = 1;
+
+ /* Reset single and double FP flags if target is E500. */
+ if (TARGET_E500)
+ {
+ rs6000_single_float = rs6000_double_float = 0;
+ if (TARGET_E500_SINGLE)
+ rs6000_single_float = 1;
+ if (TARGET_E500_DOUBLE)
+ rs6000_single_float = rs6000_double_float = 1;
+ }
+
+ /* If not explicitly specified via option, decide whether to generate indexed
+ load/store instructions. */
+ if (TARGET_AVOID_XFORM == -1)
+ /* Avoid indexed addressing when targeting Power6 in order to avoid
+ the DERAT mispredict penalty. */
+ TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB);
+
+ rs6000_init_hard_regno_mode_ok ();
+}
+
+/* Implement targetm.vectorize.builtin_mask_for_load. */
+static tree
+rs6000_builtin_mask_for_load (void)
+{
+ if (TARGET_ALTIVEC)
+ return altivec_builtin_mask_for_load;
+ else
+ return 0;
+}
+
+/* Implement targetm.vectorize.builtin_conversion.
+ Returns a decl of a function that implements conversion of an integer vector
+ into a floating-point vector, or vice-versa. TYPE is the type of the integer
+ side of the conversion.
+ Return NULL_TREE if it is not available. */
+static tree
+rs6000_builtin_conversion (enum tree_code code, tree type)
+{
+ if (!TARGET_ALTIVEC)
+ return NULL_TREE;
+
+ switch (code)
+ {
+ case FIX_TRUNC_EXPR:
+ switch (TYPE_MODE (type))
+ {
+ case V4SImode:
+ return TYPE_UNSIGNED (type)
+ ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VCTUXS]
+ : rs6000_builtin_decls[ALTIVEC_BUILTIN_VCTSXS];
+ default:
+ return NULL_TREE;
+ }
+
+ case FLOAT_EXPR:
+ switch (TYPE_MODE (type))
+ {
+ case V4SImode:
+ return TYPE_UNSIGNED (type)
+ ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VCFUX]
+ : rs6000_builtin_decls[ALTIVEC_BUILTIN_VCFSX];
+ default:
+ return NULL_TREE;
+ }
+
+ default:
+ return NULL_TREE;
+ }
+}
+
+/* Implement targetm.vectorize.builtin_mul_widen_even. */
+static tree
+rs6000_builtin_mul_widen_even (tree type)
+{
+ if (!TARGET_ALTIVEC)
+ return NULL_TREE;
+
+ switch (TYPE_MODE (type))
+ {
+ case V8HImode:
+ return TYPE_UNSIGNED (type)
+ ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUH]
+ : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESH];
+
+ case V16QImode:
+ return TYPE_UNSIGNED (type)
+ ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULEUB]
+ : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULESB];
+ default:
+ return NULL_TREE;
+ }
+}
+
+/* Implement targetm.vectorize.builtin_mul_widen_odd. */
+static tree
+rs6000_builtin_mul_widen_odd (tree type)
+{
+ if (!TARGET_ALTIVEC)
+ return NULL_TREE;
+
+ switch (TYPE_MODE (type))
+ {
+ case V8HImode:
+ return TYPE_UNSIGNED (type)
+ ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUH]
+ : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSH];
+
+ case V16QImode:
+ return TYPE_UNSIGNED (type)
+ ? rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOUB]
+ : rs6000_builtin_decls[ALTIVEC_BUILTIN_VMULOSB];
+ default:
+ return NULL_TREE;
+ }
+}
+
+
+/* Return true iff, data reference of TYPE can reach vector alignment (16)
+ after applying N number of iterations. This routine does not determine
+ how may iterations are required to reach desired alignment. */
+
+static bool
+rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
+{
+ if (is_packed)
+ return false;
+
+ if (TARGET_32BIT)
+ {
+ if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
+ return true;
+
+ if (rs6000_alignment_flags == MASK_ALIGN_POWER)
+ return true;
+
+ return false;
+ }
+ else
+ {
+ if (TARGET_MACHO)
+ return false;
+
+ /* Assuming that all other types are naturally aligned. CHECKME! */
+ return true;
+ }
+}
+
+/* Implement targetm.vectorize.builtin_vec_perm. */
+tree
+rs6000_builtin_vec_perm (tree type, tree *mask_element_type)
+{
+ tree d;
+
+ *mask_element_type = unsigned_char_type_node;
+
+ switch (TYPE_MODE (type))
+ {
+ case V16QImode:
+ d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_16QI];
+ break;
+
+ case V8HImode:
+ d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_8HI];
+ break;
+
+ case V4SImode:
+ d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SI];
+ break;
+
+ case V4SFmode:
+ d = rs6000_builtin_decls[ALTIVEC_BUILTIN_VPERM_4SF];
+ break;
+
+ default:
+ return NULL_TREE;
+ }
+
+ gcc_assert (d);
+ return d;
+}
+
+/* Handle generic options of the form -mfoo=yes/no.
+ NAME is the option name.
+ VALUE is the option value.
+ FLAG is the pointer to the flag where to store a 1 or 0, depending on
+ whether the option value is 'yes' or 'no' respectively. */
+static void
+rs6000_parse_yes_no_option (const char *name, const char *value, int *flag)
+{
+ if (value == 0)
+ return;
+ else if (!strcmp (value, "yes"))
+ *flag = 1;
+ else if (!strcmp (value, "no"))
+ *flag = 0;
+ else
+ error ("unknown -m%s= option specified: '%s'", name, value);
+}
+
+/* Validate and record the size specified with the -mtls-size option. */
+
+static void
+rs6000_parse_tls_size_option (void)
+{
+ if (rs6000_tls_size_string == 0)
+ return;
+ else if (strcmp (rs6000_tls_size_string, "16") == 0)
+ rs6000_tls_size = 16;
+ else if (strcmp (rs6000_tls_size_string, "32") == 0)
+ rs6000_tls_size = 32;
+ else if (strcmp (rs6000_tls_size_string, "64") == 0)
+ rs6000_tls_size = 64;
+ else
+ error ("bad value %qs for -mtls-size switch", rs6000_tls_size_string);
+}
+
+void
+optimization_options (int level ATTRIBUTE_UNUSED, int size ATTRIBUTE_UNUSED)
+{
+ if (DEFAULT_ABI == ABI_DARWIN)
+ /* The Darwin libraries never set errno, so we might as well
+ avoid calling them when that's the only reason we would. */
+ flag_errno_math = 0;
+
+ /* Double growth factor to counter reduced min jump length. */
+ set_param_value ("max-grow-copy-bb-insns", 16);
+
+ /* Enable section anchors by default.
+ Skip section anchors for Objective C and Objective C++
+ until front-ends fixed. */
+ if (!TARGET_MACHO && lang_hooks.name[4] != 'O')
+ flag_section_anchors = 2;
+}
+
+static enum fpu_type_t
+rs6000_parse_fpu_option (const char *option)
+{
+ if (!strcmp("none", option)) return FPU_NONE;
+ if (!strcmp("sp_lite", option)) return FPU_SF_LITE;
+ if (!strcmp("dp_lite", option)) return FPU_DF_LITE;
+ if (!strcmp("sp_full", option)) return FPU_SF_FULL;
+ if (!strcmp("dp_full", option)) return FPU_DF_FULL;
+ error("unknown value %s for -mfpu", option);
+ return FPU_NONE;
+}
+
+/* Implement TARGET_HANDLE_OPTION. */
+
+static bool
+rs6000_handle_option (size_t code, const char *arg, int value)
+{
+ enum fpu_type_t fpu_type = FPU_NONE;
+
+ switch (code)
+ {
+ case OPT_mno_power:
+ target_flags &= ~(MASK_POWER | MASK_POWER2
+ | MASK_MULTIPLE | MASK_STRING);
+ target_flags_explicit |= (MASK_POWER | MASK_POWER2
+ | MASK_MULTIPLE | MASK_STRING);
+ break;
+ case OPT_mno_powerpc:
+ target_flags &= ~(MASK_POWERPC | MASK_PPC_GPOPT
+ | MASK_PPC_GFXOPT | MASK_POWERPC64);
+ target_flags_explicit |= (MASK_POWERPC | MASK_PPC_GPOPT
+ | MASK_PPC_GFXOPT | MASK_POWERPC64);
+ break;
+ case OPT_mfull_toc:
+ target_flags &= ~MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 0;
+ TARGET_NO_SUM_IN_TOC = 0;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+#ifdef TARGET_USES_SYSV4_OPT
+ /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
+ just the same as -mminimal-toc. */
+ target_flags |= MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+#endif
+ break;
+
+#ifdef TARGET_USES_SYSV4_OPT
+ case OPT_mtoc:
+ /* Make -mtoc behave like -mminimal-toc. */
+ target_flags |= MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+ break;
+#endif
+
+#ifdef TARGET_USES_AIX64_OPT
+ case OPT_maix64:
+#else
+ case OPT_m64:
+#endif
+ target_flags |= MASK_POWERPC64 | MASK_POWERPC;
+ target_flags |= ~target_flags_explicit & MASK_PPC_GFXOPT;
+ target_flags_explicit |= MASK_POWERPC64 | MASK_POWERPC;
+ break;
+
+#ifdef TARGET_USES_AIX64_OPT
+ case OPT_maix32:
+#else
+ case OPT_m32:
+#endif
+ target_flags &= ~MASK_POWERPC64;
+ target_flags_explicit |= MASK_POWERPC64;
+ break;
+
+ case OPT_minsert_sched_nops_:
+ rs6000_sched_insert_nops_str = arg;
+ break;
+
+ case OPT_mminimal_toc:
+ if (value == 1)
+ {
+ TARGET_NO_FP_IN_TOC = 0;
+ TARGET_NO_SUM_IN_TOC = 0;
+ }
+ break;
+
+ case OPT_mpower:
+ if (value == 1)
+ {
+ target_flags |= (MASK_MULTIPLE | MASK_STRING);
+ target_flags_explicit |= (MASK_MULTIPLE | MASK_STRING);
+ }
+ break;
+
+ case OPT_mpower2:
+ if (value == 1)
+ {
+ target_flags |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
+ target_flags_explicit |= (MASK_POWER | MASK_MULTIPLE | MASK_STRING);
+ }
+ break;
+
+ case OPT_mpowerpc_gpopt:
+ case OPT_mpowerpc_gfxopt:
+ if (value == 1)
+ {
+ target_flags |= MASK_POWERPC;
+ target_flags_explicit |= MASK_POWERPC;
+ }
+ break;
+
+ case OPT_maix_struct_return:
+ case OPT_msvr4_struct_return:
+ rs6000_explicit_options.aix_struct_ret = true;
+ break;
+
+ case OPT_mvrsave_:
+ rs6000_explicit_options.vrsave = true;
+ rs6000_parse_yes_no_option ("vrsave", arg, &(TARGET_ALTIVEC_VRSAVE));
+ break;
+
+ case OPT_misel:
+ rs6000_explicit_options.isel = true;
+ rs6000_isel = value;
+ break;
+
+ case OPT_misel_:
+ rs6000_explicit_options.isel = true;
+ rs6000_parse_yes_no_option ("isel", arg, &(rs6000_isel));
+ break;
+
+ case OPT_mspe:
+ rs6000_explicit_options.spe = true;
+ rs6000_spe = value;
+ break;
+
+ case OPT_mspe_:
+ rs6000_explicit_options.spe = true;
+ rs6000_parse_yes_no_option ("spe", arg, &(rs6000_spe));
+ break;
+
+ case OPT_mdebug_:
+ rs6000_debug_name = arg;
+ break;
+
+#ifdef TARGET_USES_SYSV4_OPT
+ case OPT_mcall_:
+ rs6000_abi_name = arg;
+ break;
+
+ case OPT_msdata_:
+ rs6000_sdata_name = arg;
+ break;
+
+ case OPT_mtls_size_:
+ rs6000_tls_size_string = arg;
+ break;
+
+ case OPT_mrelocatable:
+ if (value == 1)
+ {
+ target_flags |= MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 1;
+ }
+ break;
+
+ case OPT_mrelocatable_lib:
+ if (value == 1)
+ {
+ target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
+ target_flags_explicit |= MASK_RELOCATABLE | MASK_MINIMAL_TOC;
+ TARGET_NO_FP_IN_TOC = 1;
+ }
+ else
+ {
+ target_flags &= ~MASK_RELOCATABLE;
+ target_flags_explicit |= MASK_RELOCATABLE;
+ }
+ break;
+#endif
+
+ case OPT_mabi_:
+ if (!strcmp (arg, "altivec"))
+ {
+ rs6000_explicit_options.altivec_abi = true;
+ rs6000_altivec_abi = 1;
+
+ /* Enabling the AltiVec ABI turns off the SPE ABI. */
+ rs6000_spe_abi = 0;
+ }
+ else if (! strcmp (arg, "no-altivec"))
+ {
+ rs6000_explicit_options.altivec_abi = true;
+ rs6000_altivec_abi = 0;
+ }
+ else if (! strcmp (arg, "spe"))
+ {
+ rs6000_explicit_options.spe_abi = true;
+ rs6000_spe_abi = 1;
+ rs6000_altivec_abi = 0;
+ if (!TARGET_SPE_ABI)
+ error ("not configured for ABI: '%s'", arg);
+ }
+ else if (! strcmp (arg, "no-spe"))
+ {
+ rs6000_explicit_options.spe_abi = true;
+ rs6000_spe_abi = 0;
+ }
+
+ /* These are here for testing during development only, do not
+ document in the manual please. */
+ else if (! strcmp (arg, "d64"))
+ {
+ rs6000_darwin64_abi = 1;
+ warning (0, "Using darwin64 ABI");
+ }
+ else if (! strcmp (arg, "d32"))
+ {
+ rs6000_darwin64_abi = 0;
+ warning (0, "Using old darwin ABI");
+ }
+
+ else if (! strcmp (arg, "ibmlongdouble"))
+ {
+ rs6000_explicit_options.ieee = true;
+ rs6000_ieeequad = 0;
+ warning (0, "Using IBM extended precision long double");
+ }
+ else if (! strcmp (arg, "ieeelongdouble"))
+ {
+ rs6000_explicit_options.ieee = true;
+ rs6000_ieeequad = 1;
+ warning (0, "Using IEEE extended precision long double");
+ }
+
+ else
+ {
+ error ("unknown ABI specified: '%s'", arg);
+ return false;
+ }
+ break;
+
+ case OPT_mcpu_:
+ rs6000_select[1].string = arg;
+ break;
+
+ case OPT_mtune_:
+ rs6000_select[2].string = arg;
+ break;
+
+ case OPT_mtraceback_:
+ rs6000_traceback_name = arg;
+ break;
+
+ case OPT_mfloat_gprs_:
+ rs6000_explicit_options.float_gprs = true;
+ if (! strcmp (arg, "yes") || ! strcmp (arg, "single"))
+ rs6000_float_gprs = 1;
+ else if (! strcmp (arg, "double"))
+ rs6000_float_gprs = 2;
+ else if (! strcmp (arg, "no"))
+ rs6000_float_gprs = 0;
+ else
+ {
+ error ("invalid option for -mfloat-gprs: '%s'", arg);
+ return false;
+ }
+ break;
+
+ case OPT_mlong_double_:
+ rs6000_explicit_options.long_double = true;
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+ if (value != 64 && value != 128)
+ {
+ error ("Unknown switch -mlong-double-%s", arg);
+ rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
+ return false;
+ }
+ else
+ rs6000_long_double_type_size = value;
+ break;
+
+ case OPT_msched_costly_dep_:
+ rs6000_sched_costly_dep_str = arg;
+ break;
+
+ case OPT_malign_:
+ rs6000_explicit_options.alignment = true;
+ if (! strcmp (arg, "power"))
+ {
+ /* On 64-bit Darwin, power alignment is ABI-incompatible with
+ some C library functions, so warn about it. The flag may be
+ useful for performance studies from time to time though, so
+ don't disable it entirely. */
+ if (DEFAULT_ABI == ABI_DARWIN && TARGET_64BIT)
+ warning (0, "-malign-power is not supported for 64-bit Darwin;"
+ " it is incompatible with the installed C and C++ libraries");
+ rs6000_alignment_flags = MASK_ALIGN_POWER;
+ }
+ else if (! strcmp (arg, "natural"))
+ rs6000_alignment_flags = MASK_ALIGN_NATURAL;
+ else
+ {
+ error ("unknown -malign-XXXXX option specified: '%s'", arg);
+ return false;
+ }
+ break;
+
+ case OPT_msingle_float:
+ if (!TARGET_SINGLE_FPU)
+ warning (0, "-msingle-float option equivalent to -mhard-float");
+ /* -msingle-float implies -mno-double-float and TARGET_HARD_FLOAT. */
+ rs6000_double_float = 0;
+ target_flags &= ~MASK_SOFT_FLOAT;
+ target_flags_explicit |= MASK_SOFT_FLOAT;
+ break;
+
+ case OPT_mdouble_float:
+ /* -mdouble-float implies -msingle-float and TARGET_HARD_FLOAT. */
+ rs6000_single_float = 1;
+ target_flags &= ~MASK_SOFT_FLOAT;
+ target_flags_explicit |= MASK_SOFT_FLOAT;
+ break;
+
+ case OPT_msimple_fpu:
+ if (!TARGET_SINGLE_FPU)
+ warning (0, "-msimple-fpu option ignored");
+ break;
+
+ case OPT_mhard_float:
+ /* -mhard_float implies -msingle-float and -mdouble-float. */
+ rs6000_single_float = rs6000_double_float = 1;
+ break;
+
+ case OPT_msoft_float:
+ /* -msoft_float implies -mnosingle-float and -mnodouble-float. */
+ rs6000_single_float = rs6000_double_float = 0;
+ break;
+
+ case OPT_mfpu_:
+ fpu_type = rs6000_parse_fpu_option(arg);
+ if (fpu_type != FPU_NONE)
+ /* If -mfpu is not none, then turn off SOFT_FLOAT, turn on HARD_FLOAT. */
+ {
+ target_flags &= ~MASK_SOFT_FLOAT;
+ target_flags_explicit |= MASK_SOFT_FLOAT;
+ rs6000_xilinx_fpu = 1;
+ if (fpu_type == FPU_SF_LITE || fpu_type == FPU_SF_FULL)
+ rs6000_single_float = 1;
+ if (fpu_type == FPU_DF_LITE || fpu_type == FPU_DF_FULL)
+ rs6000_single_float = rs6000_double_float = 1;
+ if (fpu_type == FPU_SF_LITE || fpu_type == FPU_DF_LITE)
+ rs6000_simple_fpu = 1;
+ }
+ else
+ {
+ /* -mfpu=none is equivalent to -msoft-float */
+ target_flags |= MASK_SOFT_FLOAT;
+ target_flags_explicit |= MASK_SOFT_FLOAT;
+ rs6000_single_float = rs6000_double_float = 0;
+ }
+ break;
+ }
+ return true;
+}
+
+/* Do anything needed at the start of the asm file. */
+
+static void
+rs6000_file_start (void)
+{
+ size_t i;
+ char buffer[80];
+ const char *start = buffer;
+ struct rs6000_cpu_select *ptr;
+ const char *default_cpu = TARGET_CPU_DEFAULT;
+ FILE *file = asm_out_file;
+
+ default_file_start ();
+
+#ifdef TARGET_BI_ARCH
+ if ((TARGET_DEFAULT ^ target_flags) & MASK_64BIT)
+ default_cpu = 0;
+#endif
+
+ if (flag_verbose_asm)
+ {
+ sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
+ rs6000_select[0].string = default_cpu;
+
+ for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ {
+ ptr = &rs6000_select[i];
+ if (ptr->string != (char *)0 && ptr->string[0] != '\0')
+ {
+ fprintf (file, "%s %s%s", start, ptr->name, ptr->string);
+ start = "";
+ }
+ }
+
+ if (PPC405_ERRATUM77)
+ {
+ fprintf (file, "%s PPC405CR_ERRATUM77", start);
+ start = "";
+ }
+
+#ifdef USING_ELFOS_H
+ switch (rs6000_sdata)
+ {
+ case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
+ case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
+ case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
+ case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
+ }
+
+ if (rs6000_sdata && g_switch_value)
+ {
+ fprintf (file, "%s -G " HOST_WIDE_INT_PRINT_UNSIGNED, start,
+ g_switch_value);
+ start = "";
+ }
+#endif
+
+ if (*start == '\0')
+ putc ('\n', file);
+ }
+
+#ifdef HAVE_AS_GNU_ATTRIBUTE
+ if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
+ {
+ fprintf (file, "\t.gnu_attribute 4, %d\n",
+ ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
+ : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
+ : 2));
+ fprintf (file, "\t.gnu_attribute 8, %d\n",
+ (TARGET_ALTIVEC_ABI ? 2
+ : TARGET_SPE_ABI ? 3
+ : 1));
+ fprintf (file, "\t.gnu_attribute 12, %d\n",
+ aix_struct_return ? 2 : 1);
+
+ }
+#endif
+
+ if (DEFAULT_ABI == ABI_AIX || (TARGET_ELF && flag_pic == 2))
+ {
+ switch_to_section (toc_section);
+ switch_to_section (text_section);
+ }
+}
+
+
+/* Return nonzero if this function is known to have a null epilogue. */
+
+int
+direct_return (void)
+{
+ if (reload_completed)
+ {
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (info->first_gp_reg_save == 32
+ && info->first_fp_reg_save == 64
+ && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
+ && ! info->lr_save_p
+ && ! info->cr_save_p
+ && info->vrsave_mask == 0
+ && ! info->push_p)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return the number of instructions it takes to form a constant in an
+ integer register. */
+
+int
+num_insns_constant_wide (HOST_WIDE_INT value)
+{
+ /* signed constant loadable with {cal|addi} */
+ if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
+ return 1;
+
+ /* constant loadable with {cau|addis} */
+ else if ((value & 0xffff) == 0
+ && (value >> 31 == -1 || value >> 31 == 0))
+ return 1;
+
+#if HOST_BITS_PER_WIDE_INT == 64
+ else if (TARGET_POWERPC64)
+ {
+ HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ HOST_WIDE_INT high = value >> 31;
+
+ if (high == 0 || high == -1)
+ return 2;
+
+ high >>= 1;
+
+ if (low == 0)
+ return num_insns_constant_wide (high) + 1;
+ else
+ return (num_insns_constant_wide (high)
+ + num_insns_constant_wide (low) + 1);
+ }
+#endif
+
+ else
+ return 2;
+}
+
+int
+num_insns_constant (rtx op, enum machine_mode mode)
+{
+ HOST_WIDE_INT low, high;
+
+ switch (GET_CODE (op))
+ {
+ case CONST_INT:
+#if HOST_BITS_PER_WIDE_INT == 64
+ if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
+ && mask64_operand (op, mode))
+ return 2;
+ else
+#endif
+ return num_insns_constant_wide (INTVAL (op));
+
+ case CONST_DOUBLE:
+ if (mode == SFmode || mode == SDmode)
+ {
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
+ else
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+ return num_insns_constant_wide ((HOST_WIDE_INT) l);
+ }
+
+ if (mode == VOIDmode || mode == DImode)
+ {
+ high = CONST_DOUBLE_HIGH (op);
+ low = CONST_DOUBLE_LOW (op);
+ }
+ else
+ {
+ long l[2];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
+ else
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+ high = l[WORDS_BIG_ENDIAN == 0];
+ low = l[WORDS_BIG_ENDIAN != 0];
+ }
+
+ if (TARGET_32BIT)
+ return (num_insns_constant_wide (low)
+ + num_insns_constant_wide (high));
+ else
+ {
+ if ((high == 0 && low >= 0)
+ || (high == -1 && low < 0))
+ return num_insns_constant_wide (low);
+
+ else if (mask64_operand (op, mode))
+ return 2;
+
+ else if (low == 0)
+ return num_insns_constant_wide (high) + 1;
+
+ else
+ return (num_insns_constant_wide (high)
+ + num_insns_constant_wide (low) + 1);
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Interpret element ELT of the CONST_VECTOR OP as an integer value.
+ If the mode of OP is MODE_VECTOR_INT, this simply returns the
+ corresponding element of the vector, but for V4SFmode and V2SFmode,
+ the corresponding "float" is interpreted as an SImode integer. */
+
+HOST_WIDE_INT
+const_vector_elt_as_int (rtx op, unsigned int elt)
+{
+ rtx tmp = CONST_VECTOR_ELT (op, elt);
+ if (GET_MODE (op) == V4SFmode
+ || GET_MODE (op) == V2SFmode)
+ tmp = gen_lowpart (SImode, tmp);
+ return INTVAL (tmp);
+}
+
+/* Return true if OP can be synthesized with a particular vspltisb, vspltish
+ or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
+ depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
+ all items are set to the same value and contain COPIES replicas of the
+ vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
+ operand and the others are set to the value of the operand's msb. */
+
+static bool
+vspltis_constant (rtx op, unsigned step, unsigned copies)
+{
+ enum machine_mode mode = GET_MODE (op);
+ enum machine_mode inner = GET_MODE_INNER (mode);
+
+ unsigned i;
+ unsigned nunits = GET_MODE_NUNITS (mode);
+ unsigned bitsize = GET_MODE_BITSIZE (inner);
+ unsigned mask = GET_MODE_MASK (inner);
+
+ HOST_WIDE_INT val = const_vector_elt_as_int (op, nunits - 1);
+ HOST_WIDE_INT splat_val = val;
+ HOST_WIDE_INT msb_val = val > 0 ? 0 : -1;
+
+ /* Construct the value to be splatted, if possible. If not, return 0. */
+ for (i = 2; i <= copies; i *= 2)
+ {
+ HOST_WIDE_INT small_val;
+ bitsize /= 2;
+ small_val = splat_val >> bitsize;
+ mask >>= bitsize;
+ if (splat_val != ((small_val << bitsize) | (small_val & mask)))
+ return false;
+ splat_val = small_val;
+ }
+
+ /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
+ if (EASY_VECTOR_15 (splat_val))
+ ;
+
+ /* Also check if we can splat, and then add the result to itself. Do so if
+ the value is positive, of if the splat instruction is using OP's mode;
+ for splat_val < 0, the splat and the add should use the same mode. */
+ else if (EASY_VECTOR_15_ADD_SELF (splat_val)
+ && (splat_val >= 0 || (step == 1 && copies == 1)))
+ ;
+
+ else
+ return false;
+
+ /* Check if VAL is present in every STEP-th element, and the
+ other elements are filled with its most significant bit. */
+ for (i = 0; i < nunits - 1; ++i)
+ {
+ HOST_WIDE_INT desired_val;
+ if (((i + 1) & (step - 1)) == 0)
+ desired_val = val;
+ else
+ desired_val = msb_val;
+
+ if (desired_val != const_vector_elt_as_int (op, i))
+ return false;
+ }
+
+ return true;
+}
+
+
+/* Return true if OP is of the given MODE and can be synthesized
+ with a vspltisb, vspltish or vspltisw. */
+
+bool
+easy_altivec_constant (rtx op, enum machine_mode mode)
+{
+ unsigned step, copies;
+
+ if (mode == VOIDmode)
+ mode = GET_MODE (op);
+ else if (mode != GET_MODE (op))
+ return false;
+
+ /* Start with a vspltisw. */
+ step = GET_MODE_NUNITS (mode) / 4;
+ copies = 1;
+
+ if (vspltis_constant (op, step, copies))
+ return true;
+
+ /* Then try with a vspltish. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return true;
+
+ /* And finally a vspltisb. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return true;
+
+ return false;
+}
+
+/* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
+ result is OP. Abort if it is not possible. */
+
+rtx
+gen_easy_altivec_constant (rtx op)
+{
+ enum machine_mode mode = GET_MODE (op);
+ int nunits = GET_MODE_NUNITS (mode);
+ rtx last = CONST_VECTOR_ELT (op, nunits - 1);
+ unsigned step = nunits / 4;
+ unsigned copies = 1;
+
+ /* Start with a vspltisw. */
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, last));
+
+ /* Then try with a vspltish. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, last));
+
+ /* And finally a vspltisb. */
+ if (step == 1)
+ copies <<= 1;
+ else
+ step >>= 1;
+
+ if (vspltis_constant (op, step, copies))
+ return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, last));
+
+ gcc_unreachable ();
+}
+
+const char *
+output_vec_const_move (rtx *operands)
+{
+ int cst, cst2;
+ enum machine_mode mode;
+ rtx dest, vec;
+
+ dest = operands[0];
+ vec = operands[1];
+ mode = GET_MODE (dest);
+
+ if (TARGET_ALTIVEC)
+ {
+ rtx splat_vec;
+ if (zero_constant (vec, mode))
+ return "vxor %0,%0,%0";
+
+ splat_vec = gen_easy_altivec_constant (vec);
+ gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
+ operands[1] = XEXP (splat_vec, 0);
+ if (!EASY_VECTOR_15 (INTVAL (operands[1])))
+ return "#";
+
+ switch (GET_MODE (splat_vec))
+ {
+ case V4SImode:
+ return "vspltisw %0,%1";
+
+ case V8HImode:
+ return "vspltish %0,%1";
+
+ case V16QImode:
+ return "vspltisb %0,%1";
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ gcc_assert (TARGET_SPE);
+
+ /* Vector constant 0 is handled as a splitter of V2SI, and in the
+ pattern of V1DI, V4HI, and V2SF.
+
+ FIXME: We should probably return # and add post reload
+ splitters for these, but this way is so easy ;-). */
+ cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
+ cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
+ operands[1] = CONST_VECTOR_ELT (vec, 0);
+ operands[2] = CONST_VECTOR_ELT (vec, 1);
+ if (cst == cst2)
+ return "li %0,%1\n\tevmergelo %0,%0,%0";
+ else
+ return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
+}
+
+/* Initialize TARGET of vector PAIRED to VALS. */
+
+void
+paired_expand_vector_init (rtx target, rtx vals)
+{
+ enum machine_mode mode = GET_MODE (target);
+ int n_elts = GET_MODE_NUNITS (mode);
+ int n_var = 0;
+ rtx x, new_rtx, tmp, constant_op, op1, op2;
+ int i;
+
+ for (i = 0; i < n_elts; ++i)
+ {
+ x = XVECEXP (vals, 0, i);
+ if (!CONSTANT_P (x))
+ ++n_var;
+ }
+ if (n_var == 0)
+ {
+ /* Load from constant pool. */
+ emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
+ return;
+ }
+
+ if (n_var == 2)
+ {
+ /* The vector is initialized only with non-constants. */
+ new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
+ XVECEXP (vals, 0, 1));
+
+ emit_move_insn (target, new_rtx);
+ return;
+ }
+
+ /* One field is non-constant and the other one is a constant. Load the
+ constant from the constant pool and use ps_merge instruction to
+ construct the whole vector. */
+ op1 = XVECEXP (vals, 0, 0);
+ op2 = XVECEXP (vals, 0, 1);
+
+ constant_op = (CONSTANT_P (op1)) ? op1 : op2;
+
+ tmp = gen_reg_rtx (GET_MODE (constant_op));
+ emit_move_insn (tmp, constant_op);
+
+ if (CONSTANT_P (op1))
+ new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
+ else
+ new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
+
+ emit_move_insn (target, new_rtx);
+}
+
+void
+paired_expand_vector_move (rtx operands[])
+{
+ rtx op0 = operands[0], op1 = operands[1];
+
+ emit_move_insn (op0, op1);
+}
+
+/* Emit vector compare for code RCODE. DEST is destination, OP1 and
+ OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
+ operands for the relation operation COND. This is a recursive
+ function. */
+
+static void
+paired_emit_vector_compare (enum rtx_code rcode,
+ rtx dest, rtx op0, rtx op1,
+ rtx cc_op0, rtx cc_op1)
+{
+ rtx tmp = gen_reg_rtx (V2SFmode);
+ rtx tmp1, max, min, equal_zero;
+
+ gcc_assert (TARGET_PAIRED_FLOAT);
+ gcc_assert (GET_MODE (op0) == GET_MODE (op1));
+
+ switch (rcode)
+ {
+ case LT:
+ case LTU:
+ paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
+ return;
+ case GE:
+ case GEU:
+ emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
+ emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
+ return;
+ case LE:
+ case LEU:
+ paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
+ return;
+ case GT:
+ paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
+ return;
+ case EQ:
+ tmp1 = gen_reg_rtx (V2SFmode);
+ max = gen_reg_rtx (V2SFmode);
+ min = gen_reg_rtx (V2SFmode);
+ equal_zero = gen_reg_rtx (V2SFmode);
+
+ emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
+ emit_insn (gen_selv2sf4
+ (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
+ emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
+ emit_insn (gen_selv2sf4
+ (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
+ emit_insn (gen_subv2sf3 (tmp1, min, max));
+ emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
+ return;
+ case NE:
+ paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
+ return;
+ case UNLE:
+ paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
+ return;
+ case UNLT:
+ paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
+ return;
+ case UNGE:
+ paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
+ return;
+ case UNGT:
+ paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
+ return;
+ default:
+ gcc_unreachable ();
+ }
+
+ return;
+}
+
+/* Emit vector conditional expression.
+ DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
+ CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
+
+int
+paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
+ rtx cond, rtx cc_op0, rtx cc_op1)
+{
+ enum rtx_code rcode = GET_CODE (cond);
+
+ if (!TARGET_PAIRED_FLOAT)
+ return 0;
+
+ paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
+
+ return 1;
+}
+
+/* Initialize vector TARGET to VALS. */
+
+void
+rs6000_expand_vector_init (rtx target, rtx vals)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ int n_elts = GET_MODE_NUNITS (mode);
+ int n_var = 0, one_var = -1;
+ bool all_same = true, all_const_zero = true;
+ rtx x, mem;
+ int i;
+
+ for (i = 0; i < n_elts; ++i)
+ {
+ x = XVECEXP (vals, 0, i);
+ if (!CONSTANT_P (x))
+ ++n_var, one_var = i;
+ else if (x != CONST0_RTX (inner_mode))
+ all_const_zero = false;
+
+ if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
+ all_same = false;
+ }
+
+ if (n_var == 0)
+ {
+ rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
+ if (mode != V4SFmode && all_const_zero)
+ {
+ /* Zero register. */
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_XOR (mode, target, target)));
+ return;
+ }
+ else if (mode != V4SFmode && easy_vector_constant (const_vec, mode))
+ {
+ /* Splat immediate. */
+ emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
+ return;
+ }
+ else if (all_same)
+ ; /* Splat vector element. */
+ else
+ {
+ /* Load from constant pool. */
+ emit_move_insn (target, const_vec);
+ return;
+ }
+ }
+
+ /* Store value to stack temp. Load vector element. Splat. */
+ if (all_same)
+ {
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
+ XVECEXP (vals, 0, 0));
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_LVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ target, mem),
+ x)));
+ x = gen_rtx_VEC_SELECT (inner_mode, target,
+ gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (1, const0_rtx)));
+ emit_insn (gen_rtx_SET (VOIDmode, target,
+ gen_rtx_VEC_DUPLICATE (mode, x)));
+ return;
+ }
+
+ /* One field is non-constant. Load constant then overwrite
+ varying field. */
+ if (n_var == 1)
+ {
+ rtx copy = copy_rtx (vals);
+
+ /* Load constant part of vector, substitute neighboring value for
+ varying element. */
+ XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
+ rs6000_expand_vector_init (target, copy);
+
+ /* Insert variable. */
+ rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
+ return;
+ }
+
+ /* Construct the vector in memory one field at a time
+ and load the whole vector. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+ for (i = 0; i < n_elts; i++)
+ emit_move_insn (adjust_address_nv (mem, inner_mode,
+ i * GET_MODE_SIZE (inner_mode)),
+ XVECEXP (vals, 0, i));
+ emit_move_insn (target, mem);
+}
+
+/* Set field ELT of TARGET to VAL. */
+
+void
+rs6000_expand_vector_set (rtx target, rtx val, int elt)
+{
+ enum machine_mode mode = GET_MODE (target);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ rtx reg = gen_reg_rtx (mode);
+ rtx mask, mem, x;
+ int width = GET_MODE_SIZE (inner_mode);
+ int i;
+
+ /* Load single variable value. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode), 0);
+ emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_LVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ reg, mem),
+ x)));
+
+ /* Linear sequence. */
+ mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
+ for (i = 0; i < 16; ++i)
+ XVECEXP (mask, 0, i) = GEN_INT (i);
+
+ /* Set permute mask to insert element into target. */
+ for (i = 0; i < width; ++i)
+ XVECEXP (mask, 0, elt*width + i)
+ = GEN_INT (i + 0x10);
+ x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
+ x = gen_rtx_UNSPEC (mode,
+ gen_rtvec (3, target, reg,
+ force_reg (V16QImode, x)),
+ UNSPEC_VPERM);
+ emit_insn (gen_rtx_SET (VOIDmode, target, x));
+}
+
+/* Extract field ELT from VEC into TARGET. */
+
+void
+rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
+{
+ enum machine_mode mode = GET_MODE (vec);
+ enum machine_mode inner_mode = GET_MODE_INNER (mode);
+ rtx mem, x;
+
+ /* Allocate mode-sized buffer. */
+ mem = assign_stack_temp (mode, GET_MODE_SIZE (mode), 0);
+
+ /* Add offset to field within buffer matching vector element. */
+ mem = adjust_address_nv (mem, mode, elt * GET_MODE_SIZE (inner_mode));
+
+ /* Store single field into mode-sized buffer. */
+ x = gen_rtx_UNSPEC (VOIDmode,
+ gen_rtvec (1, const0_rtx), UNSPEC_STVE);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ mem, vec),
+ x)));
+ emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
+}
+
+/* Generates shifts and masks for a pair of rldicl or rldicr insns to
+ implement ANDing by the mask IN. */
+void
+build_mask64_2_operands (rtx in, rtx *out)
+{
+#if HOST_BITS_PER_WIDE_INT >= 64
+ unsigned HOST_WIDE_INT c, lsb, m1, m2;
+ int shift;
+
+ gcc_assert (GET_CODE (in) == CONST_INT);
+
+ c = INTVAL (in);
+ if (c & 1)
+ {
+ /* Assume c initially something like 0x00fff000000fffff. The idea
+ is to rotate the word so that the middle ^^^^^^ group of zeros
+ is at the MS end and can be cleared with an rldicl mask. We then
+ rotate back and clear off the MS ^^ group of zeros with a
+ second rldicl. */
+ c = ~c; /* c == 0xff000ffffff00000 */
+ lsb = c & -c; /* lsb == 0x0000000000100000 */
+ m1 = -lsb; /* m1 == 0xfffffffffff00000 */
+ c = ~c; /* c == 0x00fff000000fffff */
+ c &= -lsb; /* c == 0x00fff00000000000 */
+ lsb = c & -c; /* lsb == 0x0000100000000000 */
+ c = ~c; /* c == 0xff000fffffffffff */
+ c &= -lsb; /* c == 0xff00000000000000 */
+ shift = 0;
+ while ((lsb >>= 1) != 0)
+ shift++; /* shift == 44 on exit from loop */
+ m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
+ m1 = ~m1; /* m1 == 0x000000ffffffffff */
+ m2 = ~c; /* m2 == 0x00ffffffffffffff */
+ }
+ else
+ {
+ /* Assume c initially something like 0xff000f0000000000. The idea
+ is to rotate the word so that the ^^^ middle group of zeros
+ is at the LS end and can be cleared with an rldicr mask. We then
+ rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
+ a second rldicr. */
+ lsb = c & -c; /* lsb == 0x0000010000000000 */
+ m2 = -lsb; /* m2 == 0xffffff0000000000 */
+ c = ~c; /* c == 0x00fff0ffffffffff */
+ c &= -lsb; /* c == 0x00fff00000000000 */
+ lsb = c & -c; /* lsb == 0x0000100000000000 */
+ c = ~c; /* c == 0xff000fffffffffff */
+ c &= -lsb; /* c == 0xff00000000000000 */
+ shift = 0;
+ while ((lsb >>= 1) != 0)
+ shift++; /* shift == 44 on exit from loop */
+ m1 = ~c; /* m1 == 0x00ffffffffffffff */
+ m1 >>= shift; /* m1 == 0x0000000000000fff */
+ m1 = ~m1; /* m1 == 0xfffffffffffff000 */
+ }
+
+ /* Note that when we only have two 0->1 and 1->0 transitions, one of the
+ masks will be all 1's. We are guaranteed more than one transition. */
+ out[0] = GEN_INT (64 - shift);
+ out[1] = GEN_INT (m1);
+ out[2] = GEN_INT (shift);
+ out[3] = GEN_INT (m2);
+#else
+ (void)in;
+ (void)out;
+ gcc_unreachable ();
+#endif
+}
+
+/* Return TRUE if OP is an invalid SUBREG operation on the e500. */
+
+bool
+invalid_e500_subreg (rtx op, enum machine_mode mode)
+{
+ if (TARGET_E500_DOUBLE)
+ {
+ /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
+ subreg:TI and reg:TF. Decimal float modes are like integer
+ modes (only low part of each register used) for this
+ purpose. */
+ if (GET_CODE (op) == SUBREG
+ && (mode == SImode || mode == DImode || mode == TImode
+ || mode == DDmode || mode == TDmode)
+ && REG_P (SUBREG_REG (op))
+ && (GET_MODE (SUBREG_REG (op)) == DFmode
+ || GET_MODE (SUBREG_REG (op)) == TFmode))
+ return true;
+
+ /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
+ reg:TI. */
+ if (GET_CODE (op) == SUBREG
+ && (mode == DFmode || mode == TFmode)
+ && REG_P (SUBREG_REG (op))
+ && (GET_MODE (SUBREG_REG (op)) == DImode
+ || GET_MODE (SUBREG_REG (op)) == TImode
+ || GET_MODE (SUBREG_REG (op)) == DDmode
+ || GET_MODE (SUBREG_REG (op)) == TDmode))
+ return true;
+ }
+
+ if (TARGET_SPE
+ && GET_CODE (op) == SUBREG
+ && mode == SImode
+ && REG_P (SUBREG_REG (op))
+ && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
+ return true;
+
+ return false;
+}
+
+/* AIX increases natural record alignment to doubleword if the first
+ field is an FP double while the FP fields remain word aligned. */
+
+unsigned int
+rs6000_special_round_type_align (tree type, unsigned int computed,
+ unsigned int specified)
+{
+ unsigned int align = MAX (computed, specified);
+ tree field = TYPE_FIELDS (type);
+
+ /* Skip all non field decls */
+ while (field != NULL && TREE_CODE (field) != FIELD_DECL)
+ field = TREE_CHAIN (field);
+
+ if (field != NULL && field != type)
+ {
+ type = TREE_TYPE (field);
+ while (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+
+ if (type != error_mark_node && TYPE_MODE (type) == DFmode)
+ align = MAX (align, 64);
+ }
+
+ return align;
+}
+
+/* Darwin increases record alignment to the natural alignment of
+ the first field. */
+
+unsigned int
+darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
+ unsigned int specified)
+{
+ unsigned int align = MAX (computed, specified);
+
+ if (TYPE_PACKED (type))
+ return align;
+
+ /* Find the first field, looking down into aggregates. */
+ do {
+ tree field = TYPE_FIELDS (type);
+ /* Skip all non field decls */
+ while (field != NULL && TREE_CODE (field) != FIELD_DECL)
+ field = TREE_CHAIN (field);
+ if (! field)
+ break;
+ type = TREE_TYPE (field);
+ while (TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+ } while (AGGREGATE_TYPE_P (type));
+
+ if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
+ align = MAX (align, TYPE_ALIGN (type));
+
+ return align;
+}
+
+/* Return 1 for an operand in small memory on V.4/eabi. */
+
+int
+small_data_operand (rtx op ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED)
+{
+#if TARGET_ELF
+ rtx sym_ref;
+
+ if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
+ return 0;
+
+ if (DEFAULT_ABI != ABI_V4)
+ return 0;
+
+ /* Vector and float memory instructions have a limited offset on the
+ SPE, so using a vector or float variable directly as an operand is
+ not useful. */
+ if (TARGET_SPE
+ && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
+ return 0;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ sym_ref = op;
+
+ else if (GET_CODE (op) != CONST
+ || GET_CODE (XEXP (op, 0)) != PLUS
+ || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
+ return 0;
+
+ else
+ {
+ rtx sum = XEXP (op, 0);
+ HOST_WIDE_INT summand;
+
+ /* We have to be careful here, because it is the referenced address
+ that must be 32k from _SDA_BASE_, not just the symbol. */
+ summand = INTVAL (XEXP (sum, 1));
+ if (summand < 0 || (unsigned HOST_WIDE_INT) summand > g_switch_value)
+ return 0;
+
+ sym_ref = XEXP (sum, 0);
+ }
+
+ return SYMBOL_REF_SMALL_P (sym_ref);
+#else
+ return 0;
+#endif
+}
+
+/* Return true if either operand is a general purpose register. */
+
+bool
+gpr_or_gpr_p (rtx op0, rtx op1)
+{
+ return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
+ || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
+}
+
+
+/* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address. */
+
+static bool
+constant_pool_expr_p (rtx op)
+{
+ rtx base, offset;
+
+ split_const (op, &base, &offset);
+ return (GET_CODE (base) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (base)
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
+}
+
+bool
+toc_relative_expr_p (rtx op)
+{
+ rtx base, offset;
+
+ if (GET_CODE (op) != CONST)
+ return false;
+
+ split_const (op, &base, &offset);
+ return (GET_CODE (base) == UNSPEC
+ && XINT (base, 1) == UNSPEC_TOCREL);
+}
+
+bool
+legitimate_constant_pool_address_p (rtx x)
+{
+ return (TARGET_TOC
+ && GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && (TARGET_MINIMAL_TOC || REGNO (XEXP (x, 0)) == TOC_REGISTER)
+ && toc_relative_expr_p (XEXP (x, 1)));
+}
+
+static bool
+legitimate_small_data_p (enum machine_mode mode, rtx x)
+{
+ return (DEFAULT_ABI == ABI_V4
+ && !flag_pic && !TARGET_TOC
+ && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
+ && small_data_operand (x, mode));
+}
+
+/* SPE offset addressing is limited to 5-bits worth of double words. */
+#define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
+
+bool
+rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x, int strict)
+{
+ unsigned HOST_WIDE_INT offset, extra;
+
+ if (GET_CODE (x) != PLUS)
+ return false;
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ return false;
+ if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
+ return false;
+ if (legitimate_constant_pool_address_p (x))
+ return true;
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return false;
+
+ offset = INTVAL (XEXP (x, 1));
+ extra = 0;
+ switch (mode)
+ {
+ case V16QImode:
+ case V8HImode:
+ case V4SFmode:
+ case V4SImode:
+ /* AltiVec vector modes. Only reg+reg addressing is valid and
+ constant offset zero should not occur due to canonicalization. */
+ return false;
+
+ case V4HImode:
+ case V2SImode:
+ case V1DImode:
+ case V2SFmode:
+ /* Paired vector modes. Only reg+reg addressing is valid and
+ constant offset zero should not occur due to canonicalization. */
+ if (TARGET_PAIRED_FLOAT)
+ return false;
+ /* SPE vector modes. */
+ return SPE_CONST_OFFSET_OK (offset);
+
+ case DFmode:
+ if (TARGET_E500_DOUBLE)
+ return SPE_CONST_OFFSET_OK (offset);
+
+ case DDmode:
+ case DImode:
+ /* On e500v2, we may have:
+
+ (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
+
+ Which gets addressed with evldd instructions. */
+ if (TARGET_E500_DOUBLE)
+ return SPE_CONST_OFFSET_OK (offset);
+
+ if (mode == DFmode || mode == DDmode || !TARGET_POWERPC64)
+ extra = 4;
+ else if (offset & 3)
+ return false;
+ break;
+
+ case TFmode:
+ if (TARGET_E500_DOUBLE)
+ return (SPE_CONST_OFFSET_OK (offset)
+ && SPE_CONST_OFFSET_OK (offset + 8));
+
+ case TDmode:
+ case TImode:
+ if (mode == TFmode || mode == TDmode || !TARGET_POWERPC64)
+ extra = 12;
+ else if (offset & 3)
+ return false;
+ else
+ extra = 8;
+ break;
+
+ default:
+ break;
+ }
+
+ offset += 0x8000;
+ return (offset < 0x10000) && (offset + extra < 0x10000);
+}
+
+bool
+legitimate_indexed_address_p (rtx x, int strict)
+{
+ rtx op0, op1;
+
+ if (GET_CODE (x) != PLUS)
+ return false;
+
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+
+ /* Recognize the rtl generated by reload which we know will later be
+ replaced with proper base and index regs. */
+ if (!strict
+ && reload_in_progress
+ && (REG_P (op0) || GET_CODE (op0) == PLUS)
+ && REG_P (op1))
+ return true;
+
+ return (REG_P (op0) && REG_P (op1)
+ && ((INT_REG_OK_FOR_BASE_P (op0, strict)
+ && INT_REG_OK_FOR_INDEX_P (op1, strict))
+ || (INT_REG_OK_FOR_BASE_P (op1, strict)
+ && INT_REG_OK_FOR_INDEX_P (op0, strict))));
+}
+
+bool
+avoiding_indexed_address_p (enum machine_mode mode)
+{
+ /* Avoid indexed addressing for modes that have non-indexed
+ load/store instruction forms. */
+ return TARGET_AVOID_XFORM && !ALTIVEC_VECTOR_MODE (mode);
+}
+
+inline bool
+legitimate_indirect_address_p (rtx x, int strict)
+{
+ return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
+}
+
+bool
+macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
+{
+ if (!TARGET_MACHO || !flag_pic
+ || mode != SImode || GET_CODE (x) != MEM)
+ return false;
+ x = XEXP (x, 0);
+
+ if (GET_CODE (x) != LO_SUM)
+ return false;
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ return false;
+ if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
+ return false;
+ x = XEXP (x, 1);
+
+ return CONSTANT_P (x);
+}
+
+static bool
+legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
+{
+ if (GET_CODE (x) != LO_SUM)
+ return false;
+ if (GET_CODE (XEXP (x, 0)) != REG)
+ return false;
+ if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
+ return false;
+ /* Restrict addressing for DI because of our SUBREG hackery. */
+ if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
+ || mode == DDmode || mode == TDmode
+ || mode == DImode))
+ return false;
+ x = XEXP (x, 1);
+
+ if (TARGET_ELF || TARGET_MACHO)
+ {
+ if (DEFAULT_ABI != ABI_AIX && DEFAULT_ABI != ABI_DARWIN && flag_pic)
+ return false;
+ if (TARGET_TOC)
+ return false;
+ if (GET_MODE_NUNITS (mode) != 1)
+ return false;
+ if (GET_MODE_BITSIZE (mode) > 64
+ || (GET_MODE_BITSIZE (mode) > 32 && !TARGET_POWERPC64
+ && !(TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && (mode == DFmode || mode == DDmode))))
+ return false;
+
+ return CONSTANT_P (x);
+ }
+
+ return false;
+}
+
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This is used from only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was
+ called. In some cases it is useful to look at this to decide what
+ needs to be done.
+
+ MODE is passed so that this function can use GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this function to do nothing. It exists to
+ recognize opportunities to optimize the output.
+
+ On RS/6000, first check for the sum of a register with a constant
+ integer that is out of range. If so, generate code to add the
+ constant with the low-order 16 bits masked to the register and force
+ this result into another register (this can be done with `cau').
+ Then generate an address of REG+(CONST&0xffff), allowing for the
+ possibility of bit 16 being a one.
+
+ Then check for the sum of a register and something not constant, try to
+ load the other things into a register and return the sum. */
+
+rtx
+rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+ enum machine_mode mode)
+{
+ unsigned int extra = 0;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+ enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
+ if (model != 0)
+ return rs6000_legitimize_tls_address (x, model);
+ }
+
+ switch (mode)
+ {
+ case DFmode:
+ case DDmode:
+ extra = 4;
+ break;
+ case DImode:
+ if (!TARGET_POWERPC64)
+ extra = 4;
+ break;
+ case TFmode:
+ case TDmode:
+ extra = 12;
+ break;
+ case TImode:
+ extra = TARGET_POWERPC64 ? 8 : 12;
+ break;
+ default:
+ break;
+ }
+
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
+ >= 0x10000 - extra)
+ && !((TARGET_POWERPC64
+ && (mode == DImode || mode == TImode)
+ && (INTVAL (XEXP (x, 1)) & 3) != 0)
+ || SPE_VECTOR_MODE (mode)
+ || ALTIVEC_VECTOR_MODE (mode)
+ || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
+ || mode == DImode || mode == DDmode
+ || mode == TDmode))))
+ {
+ HOST_WIDE_INT high_int, low_int;
+ rtx sum;
+ low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
+ if (low_int >= 0x8000 - extra)
+ low_int = 0;
+ high_int = INTVAL (XEXP (x, 1)) - low_int;
+ sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
+ GEN_INT (high_int)), 0);
+ return plus_constant (sum, low_int);
+ }
+ else if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) != CONST_INT
+ && GET_MODE_NUNITS (mode) == 1
+ && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
+ || TARGET_POWERPC64
+ || ((mode != DImode && mode != DFmode && mode != DDmode)
+ || (TARGET_E500_DOUBLE && mode != DDmode)))
+ && (TARGET_POWERPC64 || mode != DImode)
+ && !avoiding_indexed_address_p (mode)
+ && mode != TImode
+ && mode != TFmode
+ && mode != TDmode)
+ {
+ return gen_rtx_PLUS (Pmode, XEXP (x, 0),
+ force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
+ }
+ else if (ALTIVEC_VECTOR_MODE (mode))
+ {
+ rtx reg;
+
+ /* Make sure both operands are registers. */
+ if (GET_CODE (x) == PLUS)
+ return gen_rtx_PLUS (Pmode, force_reg (Pmode, XEXP (x, 0)),
+ force_reg (Pmode, XEXP (x, 1)));
+
+ reg = force_reg (Pmode, x);
+ return reg;
+ }
+ else if (SPE_VECTOR_MODE (mode)
+ || (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
+ || mode == DDmode || mode == TDmode
+ || mode == DImode)))
+ {
+ if (mode == DImode)
+ return NULL_RTX;
+ /* We accept [reg + reg] and [reg + OFFSET]. */
+
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx op1 = XEXP (x, 0);
+ rtx op2 = XEXP (x, 1);
+ rtx y;
+
+ op1 = force_reg (Pmode, op1);
+
+ if (GET_CODE (op2) != REG
+ && (GET_CODE (op2) != CONST_INT
+ || !SPE_CONST_OFFSET_OK (INTVAL (op2))
+ || (GET_MODE_SIZE (mode) > 8
+ && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
+ op2 = force_reg (Pmode, op2);
+
+ /* We can't always do [reg + reg] for these, because [reg +
+ reg + offset] is not a legitimate addressing mode. */
+ y = gen_rtx_PLUS (Pmode, op1, op2);
+
+ if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
+ return force_reg (Pmode, y);
+ else
+ return y;
+ }
+
+ return force_reg (Pmode, x);
+ }
+ else if (TARGET_ELF
+ && TARGET_32BIT
+ && TARGET_NO_TOC
+ && ! flag_pic
+ && GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_DOUBLE
+ && CONSTANT_P (x)
+ && GET_MODE_NUNITS (mode) == 1
+ && (GET_MODE_BITSIZE (mode) <= 32
+ || ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
+ && (mode == DFmode || mode == DDmode))))
+ {
+ rtx reg = gen_reg_rtx (Pmode);
+ emit_insn (gen_elf_high (reg, x));
+ return gen_rtx_LO_SUM (Pmode, reg, x);
+ }
+ else if (TARGET_MACHO && TARGET_32BIT && TARGET_NO_TOC
+ && ! flag_pic
+#if TARGET_MACHO
+ && ! MACHO_DYNAMIC_NO_PIC_P
+#endif
+ && GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_DOUBLE
+ && CONSTANT_P (x)
+ && GET_MODE_NUNITS (mode) == 1
+ && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
+ || (mode != DFmode && mode != DDmode))
+ && mode != DImode
+ && mode != TImode)
+ {
+ rtx reg = gen_reg_rtx (Pmode);
+ emit_insn (gen_macho_high (reg, x));
+ return gen_rtx_LO_SUM (Pmode, reg, x);
+ }
+ else if (TARGET_TOC
+ && GET_CODE (x) == SYMBOL_REF
+ && constant_pool_expr_p (x)
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
+ {
+ return create_TOC_reference (x);
+ }
+ else
+ return NULL_RTX;
+}
+
+/* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
+ We need to emit DTP-relative relocations. */
+
+static void
+rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
+{
+ switch (size)
+ {
+ case 4:
+ fputs ("\t.long\t", file);
+ break;
+ case 8:
+ fputs (DOUBLE_INT_ASM_OP, file);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ output_addr_const (file, x);
+ fputs ("@dtprel+0x8000", file);
+}
+
+/* Construct the SYMBOL_REF for the tls_get_addr function. */
+
+static GTY(()) rtx rs6000_tls_symbol;
+static rtx
+rs6000_tls_get_addr (void)
+{
+ if (!rs6000_tls_symbol)
+ rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
+
+ return rs6000_tls_symbol;
+}
+
+/* Construct the SYMBOL_REF for TLS GOT references. */
+
+static GTY(()) rtx rs6000_got_symbol;
+static rtx
+rs6000_got_sym (void)
+{
+ if (!rs6000_got_symbol)
+ {
+ rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
+ SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
+ SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
+ }
+
+ return rs6000_got_symbol;
+}
+
+/* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
+ this (thread-local) address. */
+
+static rtx
+rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
+{
+ rtx dest, insn;
+
+ dest = gen_reg_rtx (Pmode);
+ if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
+ {
+ rtx tlsreg;
+
+ if (TARGET_64BIT)
+ {
+ tlsreg = gen_rtx_REG (Pmode, 13);
+ insn = gen_tls_tprel_64 (dest, tlsreg, addr);
+ }
+ else
+ {
+ tlsreg = gen_rtx_REG (Pmode, 2);
+ insn = gen_tls_tprel_32 (dest, tlsreg, addr);
+ }
+ emit_insn (insn);
+ }
+ else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
+ {
+ rtx tlsreg, tmp;
+
+ tmp = gen_reg_rtx (Pmode);
+ if (TARGET_64BIT)
+ {
+ tlsreg = gen_rtx_REG (Pmode, 13);
+ insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
+ }
+ else
+ {
+ tlsreg = gen_rtx_REG (Pmode, 2);
+ insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
+ }
+ emit_insn (insn);
+ if (TARGET_64BIT)
+ insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
+ else
+ insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
+ emit_insn (insn);
+ }
+ else
+ {
+ rtx r3, got, tga, tmp1, tmp2, eqv;
+
+ /* We currently use relocations like @got@tlsgd for tls, which
+ means the linker will handle allocation of tls entries, placing
+ them in the .got section. So use a pointer to the .got section,
+ not one to secondary TOC sections used by 64-bit -mminimal-toc,
+ or to secondary GOT sections used by 32-bit -fPIC. */
+ if (TARGET_64BIT)
+ got = gen_rtx_REG (Pmode, 2);
+ else
+ {
+ if (flag_pic == 1)
+ got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
+ else
+ {
+ rtx gsym = rs6000_got_sym ();
+ got = gen_reg_rtx (Pmode);
+ if (flag_pic == 0)
+ rs6000_emit_move (got, gsym, Pmode);
+ else
+ {
+ rtx tmp3, mem;
+ rtx first, last;
+
+ tmp1 = gen_reg_rtx (Pmode);
+ tmp2 = gen_reg_rtx (Pmode);
+ tmp3 = gen_reg_rtx (Pmode);
+ mem = gen_const_mem (Pmode, tmp1);
+
+ first = emit_insn (gen_load_toc_v4_PIC_1b (gsym));
+ emit_move_insn (tmp1,
+ gen_rtx_REG (Pmode, LR_REGNO));
+ emit_move_insn (tmp2, mem);
+ emit_insn (gen_addsi3 (tmp3, tmp1, tmp2));
+ last = emit_move_insn (got, tmp3);
+ set_unique_reg_note (last, REG_EQUAL, gsym);
+ }
+ }
+ }
+
+ if (model == TLS_MODEL_GLOBAL_DYNAMIC)
+ {
+ r3 = gen_rtx_REG (Pmode, 3);
+ tga = rs6000_tls_get_addr ();
+
+ if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
+ insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
+ else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
+ insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
+ else if (DEFAULT_ABI == ABI_V4)
+ insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
+ else
+ gcc_unreachable ();
+
+ start_sequence ();
+ insn = emit_call_insn (insn);
+ RTL_CONST_CALL_P (insn) = 1;
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r3);
+ if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
+ insn = get_insns ();
+ end_sequence ();
+ emit_libcall_block (insn, dest, r3, addr);
+ }
+ else if (model == TLS_MODEL_LOCAL_DYNAMIC)
+ {
+ r3 = gen_rtx_REG (Pmode, 3);
+ tga = rs6000_tls_get_addr ();
+
+ if (DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
+ insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
+ else if (DEFAULT_ABI == ABI_AIX && !TARGET_64BIT)
+ insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
+ else if (DEFAULT_ABI == ABI_V4)
+ insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
+ else
+ gcc_unreachable ();
+
+ start_sequence ();
+ insn = emit_call_insn (insn);
+ RTL_CONST_CALL_P (insn) = 1;
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r3);
+ if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
+ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
+ insn = get_insns ();
+ end_sequence ();
+ tmp1 = gen_reg_rtx (Pmode);
+ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
+ UNSPEC_TLSLD);
+ emit_libcall_block (insn, tmp1, r3, eqv);
+ if (rs6000_tls_size == 16)
+ {
+ if (TARGET_64BIT)
+ insn = gen_tls_dtprel_64 (dest, tmp1, addr);
+ else
+ insn = gen_tls_dtprel_32 (dest, tmp1, addr);
+ }
+ else if (rs6000_tls_size == 32)
+ {
+ tmp2 = gen_reg_rtx (Pmode);
+ if (TARGET_64BIT)
+ insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
+ else
+ insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
+ emit_insn (insn);
+ if (TARGET_64BIT)
+ insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
+ else
+ insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
+ }
+ else
+ {
+ tmp2 = gen_reg_rtx (Pmode);
+ if (TARGET_64BIT)
+ insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
+ else
+ insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
+ emit_insn (insn);
+ insn = gen_rtx_SET (Pmode, dest,
+ gen_rtx_PLUS (Pmode, tmp2, tmp1));
+ }
+ emit_insn (insn);
+ }
+ else
+ {
+ /* IE, or 64-bit offset LE. */
+ tmp2 = gen_reg_rtx (Pmode);
+ if (TARGET_64BIT)
+ insn = gen_tls_got_tprel_64 (tmp2, got, addr);
+ else
+ insn = gen_tls_got_tprel_32 (tmp2, got, addr);
+ emit_insn (insn);
+ if (TARGET_64BIT)
+ insn = gen_tls_tls_64 (dest, tmp2, addr);
+ else
+ insn = gen_tls_tls_32 (dest, tmp2, addr);
+ emit_insn (insn);
+ }
+ }
+
+ return dest;
+}
+
+/* Return 1 if X contains a thread-local symbol. */
+
+bool
+rs6000_tls_referenced_p (rtx x)
+{
+ if (! TARGET_HAVE_TLS)
+ return false;
+
+ return for_each_rtx (&x, &rs6000_tls_symbol_ref_1, 0);
+}
+
+/* Return 1 if *X is a thread-local symbol. This is the same as
+ rs6000_tls_symbol_ref except for the type of the unused argument. */
+
+static int
+rs6000_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
+{
+ return RS6000_SYMBOL_REF_TLS_P (*x);
+}
+
+/* The convention appears to be to define this wherever it is used.
+ With legitimize_reload_address now defined here, REG_MODE_OK_FOR_BASE_P
+ is now used here. */
+#ifndef REG_MODE_OK_FOR_BASE_P
+#define REG_MODE_OK_FOR_BASE_P(REGNO, MODE) REG_OK_FOR_BASE_P (REGNO)
+#endif
+
+/* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
+ replace the input X, or the original X if no replacement is called for.
+ The output parameter *WIN is 1 if the calling macro should goto WIN,
+ 0 if it should not.
+
+ For RS/6000, we wish to handle large displacements off a base
+ register by splitting the addend across an addiu/addis and the mem insn.
+ This cuts number of extra insns needed from 3 to 1.
+
+ On Darwin, we use this to generate code for floating point constants.
+ A movsf_low is generated so we wind up with 2 instructions rather than 3.
+ The Darwin code is inside #if TARGET_MACHO because only then are the
+ machopic_* functions defined. */
+rtx
+rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
+ int opnum, int type,
+ int ind_levels ATTRIBUTE_UNUSED, int *win)
+{
+ /* We must recognize output that we have already generated ourselves. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN && flag_pic
+ && GET_CODE (x) == LO_SUM
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
+ && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
+ && machopic_operand_p (XEXP (x, 1)))
+ {
+ /* Result of previous invocation of this function on Darwin
+ floating point constant. */
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+#endif
+
+ /* Force ld/std non-word aligned offset into base register by wrapping
+ in offset 0. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < 32
+ && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (INTVAL (XEXP (x, 1)) & 3) != 0
+ && !ALTIVEC_VECTOR_MODE (mode)
+ && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
+ && TARGET_POWERPC64)
+ {
+ x = gen_rtx_PLUS (GET_MODE (x), x, GEN_INT (0));
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+ *win = 1;
+ return x;
+ }
+
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && !SPE_VECTOR_MODE (mode)
+ && !(TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
+ || mode == DDmode || mode == TDmode
+ || mode == DImode))
+ && !ALTIVEC_VECTOR_MODE (mode))
+ {
+ HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
+ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT high
+ = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
+
+ /* Check for 32-bit overflow. */
+ if (high + low != val)
+ {
+ *win = 0;
+ return x;
+ }
+
+ /* Reload the high part into a base reg; leave the low part
+ in the mem directly. */
+
+ x = gen_rtx_PLUS (GET_MODE (x),
+ gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
+ GEN_INT (high)),
+ GEN_INT (low));
+
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+
+ if (GET_CODE (x) == SYMBOL_REF
+ && !ALTIVEC_VECTOR_MODE (mode)
+ && !SPE_VECTOR_MODE (mode)
+#if TARGET_MACHO
+ && DEFAULT_ABI == ABI_DARWIN
+ && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
+#else
+ && DEFAULT_ABI == ABI_V4
+ && !flag_pic
+#endif
+ /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
+ The same goes for DImode without 64-bit gprs and DFmode and DDmode
+ without fprs. */
+ && mode != TFmode
+ && mode != TDmode
+ && (mode != DImode || TARGET_POWERPC64)
+ && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
+ || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
+ {
+#if TARGET_MACHO
+ if (flag_pic)
+ {
+ rtx offset = machopic_gen_offset (x);
+ x = gen_rtx_LO_SUM (GET_MODE (x),
+ gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
+ gen_rtx_HIGH (Pmode, offset)), offset);
+ }
+ else
+#endif
+ x = gen_rtx_LO_SUM (GET_MODE (x),
+ gen_rtx_HIGH (Pmode, x), x);
+
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+
+ /* Reload an offset address wrapped by an AND that represents the
+ masking of the lower bits. Strip the outer AND and let reload
+ convert the offset address into an indirect address. */
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (x) == AND
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == -16)
+ {
+ x = XEXP (x, 0);
+ *win = 1;
+ return x;
+ }
+
+ if (TARGET_TOC
+ && GET_CODE (x) == SYMBOL_REF
+ && constant_pool_expr_p (x)
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
+ {
+ x = create_TOC_reference (x);
+ *win = 1;
+ return x;
+ }
+ *win = 0;
+ return x;
+}
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On the RS/6000, there are four valid address: a SYMBOL_REF that
+ refers to a constant pool entry of an address (or the sum of it
+ plus a constant), a short (16-bit signed) constant plus a register,
+ the sum of two registers, or a register indirect, possibly with an
+ auto-increment. For DFmode, DDmode and DImode with a constant plus
+ register, we must ensure that both words are addressable or PowerPC64
+ with offset word aligned.
+
+ For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
+ 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
+ because adjacent memory cells are accessed by adding word-sized offsets
+ during assembly output. */
+int
+rs6000_legitimate_address (enum machine_mode mode, rtx x, int reg_ok_strict)
+{
+ /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
+ if (TARGET_ALTIVEC
+ && ALTIVEC_VECTOR_MODE (mode)
+ && GET_CODE (x) == AND
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == -16)
+ x = XEXP (x, 0);
+
+ if (RS6000_SYMBOL_REF_TLS_P (x))
+ return 0;
+ if (legitimate_indirect_address_p (x, reg_ok_strict))
+ return 1;
+ if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
+ && !ALTIVEC_VECTOR_MODE (mode)
+ && !SPE_VECTOR_MODE (mode)
+ && mode != TFmode
+ && mode != TDmode
+ /* Restrict addressing for DI because of our SUBREG hackery. */
+ && !(TARGET_E500_DOUBLE
+ && (mode == DFmode || mode == DDmode || mode == DImode))
+ && TARGET_UPDATE
+ && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
+ return 1;
+ if (legitimate_small_data_p (mode, x))
+ return 1;
+ if (legitimate_constant_pool_address_p (x))
+ return 1;
+ /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
+ if (! reg_ok_strict
+ && GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && (XEXP (x, 0) == virtual_stack_vars_rtx
+ || XEXP (x, 0) == arg_pointer_rtx)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return 1;
+ if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict))
+ return 1;
+ if (mode != TImode
+ && mode != TFmode
+ && mode != TDmode
+ && ((TARGET_HARD_FLOAT && TARGET_FPRS)
+ || TARGET_POWERPC64
+ || (mode != DFmode && mode != DDmode)
+ || (TARGET_E500_DOUBLE && mode != DDmode))
+ && (TARGET_POWERPC64 || mode != DImode)
+ && !avoiding_indexed_address_p (mode)
+ && legitimate_indexed_address_p (x, reg_ok_strict))
+ return 1;
+ if (GET_CODE (x) == PRE_MODIFY
+ && mode != TImode
+ && mode != TFmode
+ && mode != TDmode
+ && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
+ || TARGET_POWERPC64
+ || ((mode != DFmode && mode != DDmode) || TARGET_E500_DOUBLE))
+ && (TARGET_POWERPC64 || mode != DImode)
+ && !ALTIVEC_VECTOR_MODE (mode)
+ && !SPE_VECTOR_MODE (mode)
+ /* Restrict addressing for DI because of our SUBREG hackery. */
+ && !(TARGET_E500_DOUBLE
+ && (mode == DFmode || mode == DDmode || mode == DImode))
+ && TARGET_UPDATE
+ && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
+ && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1), reg_ok_strict)
+ || (!avoiding_indexed_address_p (mode)
+ && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
+ && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
+ return 1;
+ if (legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
+ return 1;
+ return 0;
+}
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+
+ On the RS/6000 this is true of all integral offsets (since AltiVec
+ modes don't allow them) or is a pre-increment or decrement.
+
+ ??? Except that due to conceptual problems in offsettable_address_p
+ we can't really report the problems of integral offsets. So leave
+ this assuming that the adjustable offset must be valid for the
+ sub-words of a TFmode operand, which is what we had before. */
+
+bool
+rs6000_mode_dependent_address (rtx addr)
+{
+ switch (GET_CODE (addr))
+ {
+ case PLUS:
+ if (GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
+ return val + 12 + 0x8000 >= 0x10000;
+ }
+ break;
+
+ case LO_SUM:
+ return true;
+
+ /* Auto-increment cases are now treated generically in recog.c. */
+ case PRE_MODIFY:
+ return TARGET_UPDATE;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Implement FIND_BASE_TERM. */
+
+rtx
+rs6000_find_base_term (rtx op)
+{
+ rtx base, offset;
+
+ split_const (op, &base, &offset);
+ if (GET_CODE (base) == UNSPEC)
+ switch (XINT (base, 1))
+ {
+ case UNSPEC_TOCREL:
+ case UNSPEC_MACHOPIC_OFFSET:
+ /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
+ for aliasing purposes. */
+ return XVECEXP (base, 0, 0);
+ }
+
+ return op;
+}
+
+/* More elaborate version of recog's offsettable_memref_p predicate
+ that works around the ??? note of rs6000_mode_dependent_address.
+ In particular it accepts
+
+ (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
+
+ in 32-bit mode, that the recog predicate rejects. */
+
+bool
+rs6000_offsettable_memref_p (rtx op)
+{
+ if (!MEM_P (op))
+ return false;
+
+ /* First mimic offsettable_memref_p. */
+ if (offsettable_address_p (1, GET_MODE (op), XEXP (op, 0)))
+ return true;
+
+ /* offsettable_address_p invokes rs6000_mode_dependent_address, but
+ the latter predicate knows nothing about the mode of the memory
+ reference and, therefore, assumes that it is the largest supported
+ mode (TFmode). As a consequence, legitimate offsettable memory
+ references are rejected. rs6000_legitimate_offset_address_p contains
+ the correct logic for the PLUS case of rs6000_mode_dependent_address. */
+ return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0), 1);
+}
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ For the SPE, GPRs are 64 bits but only 32 bits are visible in
+ scalar instructions. The upper 32 bits are only available to the
+ SIMD instructions.
+
+ POWER and PowerPC GPRs hold 32 bits worth;
+ PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
+
+int
+rs6000_hard_regno_nregs (int regno, enum machine_mode mode)
+{
+ if (FP_REGNO_P (regno))
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
+
+ if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return (GET_MODE_SIZE (mode) + UNITS_PER_SPE_WORD - 1) / UNITS_PER_SPE_WORD;
+
+ if (ALTIVEC_REGNO_P (regno))
+ return
+ (GET_MODE_SIZE (mode) + UNITS_PER_ALTIVEC_WORD - 1) / UNITS_PER_ALTIVEC_WORD;
+
+ /* The value returned for SCmode in the E500 double case is 2 for
+ ABI compatibility; storing an SCmode value in a single register
+ would require function_arg and rs6000_spe_function_arg to handle
+ SCmode so as to pass the value correctly in a pair of
+ registers. */
+ if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
+ && !DECIMAL_FLOAT_MODE_P (mode))
+ return (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
+
+ return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+}
+
+/* Change register usage conditional on target flags. */
+void
+rs6000_conditional_register_usage (void)
+{
+ int i;
+
+ /* Set MQ register fixed (already call_used) if not POWER
+ architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
+ be allocated. */
+ if (! TARGET_POWER)
+ fixed_regs[64] = 1;
+
+ /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
+ if (TARGET_64BIT)
+ fixed_regs[13] = call_used_regs[13]
+ = call_really_used_regs[13] = 1;
+
+ /* Conditionally disable FPRs. */
+ if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ for (i = 32; i < 64; i++)
+ fixed_regs[i] = call_used_regs[i]
+ = call_really_used_regs[i] = 1;
+
+ /* The TOC register is not killed across calls in a way that is
+ visible to the compiler. */
+ if (DEFAULT_ABI == ABI_AIX)
+ call_really_used_regs[2] = 0;
+
+ if (DEFAULT_ABI == ABI_V4
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && flag_pic == 2)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (DEFAULT_ABI == ABI_V4
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
+ && flag_pic == 1)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (DEFAULT_ABI == ABI_DARWIN
+ && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (TARGET_TOC && TARGET_MINIMAL_TOC)
+ fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
+ = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ if (TARGET_SPE)
+ {
+ global_regs[SPEFSCR_REGNO] = 1;
+ /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
+ registers in prologues and epilogues. We no longer use r14
+ for FIXED_SCRATCH, but we're keeping r14 out of the allocation
+ pool for link-compatibility with older versions of GCC. Once
+ "old" code has died out, we can return r14 to the allocation
+ pool. */
+ fixed_regs[14]
+ = call_used_regs[14]
+ = call_really_used_regs[14] = 1;
+ }
+
+ if (!TARGET_ALTIVEC)
+ {
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
+ call_really_used_regs[VRSAVE_REGNO] = 1;
+ }
+
+ if (TARGET_ALTIVEC)
+ global_regs[VSCR_REGNO] = 1;
+
+ if (TARGET_ALTIVEC_ABI)
+ {
+ for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
+ call_used_regs[i] = call_really_used_regs[i] = 1;
+
+ /* AIX reserves VR20:31 in non-extended ABI mode. */
+ if (TARGET_XCOFF)
+ for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
+ fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
+ }
+}
+
+/* Try to output insns to set TARGET equal to the constant C if it can
+ be done in less than N insns. Do all computations in MODE.
+ Returns the place where the output has been placed if it can be
+ done and the insns have been emitted. If it would take more than N
+ insns, zero is returned and no insns and emitted. */
+
+rtx
+rs6000_emit_set_const (rtx dest, enum machine_mode mode,
+ rtx source, int n ATTRIBUTE_UNUSED)
+{
+ rtx result, insn, set;
+ HOST_WIDE_INT c0, c1;
+
+ switch (mode)
+ {
+ case QImode:
+ case HImode:
+ if (dest == NULL)
+ dest = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, source));
+ return dest;
+
+ case SImode:
+ result = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
+
+ emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (result),
+ GEN_INT (INTVAL (source)
+ & (~ (HOST_WIDE_INT) 0xffff))));
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ gen_rtx_IOR (SImode, copy_rtx (result),
+ GEN_INT (INTVAL (source) & 0xffff))));
+ result = dest;
+ break;
+
+ case DImode:
+ switch (GET_CODE (source))
+ {
+ case CONST_INT:
+ c0 = INTVAL (source);
+ c1 = -(c0 < 0);
+ break;
+
+ case CONST_DOUBLE:
+#if HOST_BITS_PER_WIDE_INT >= 64
+ c0 = CONST_DOUBLE_LOW (source);
+ c1 = -(c0 < 0);
+#else
+ c0 = CONST_DOUBLE_LOW (source);
+ c1 = CONST_DOUBLE_HIGH (source);
+#endif
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ result = rs6000_emit_set_long_const (dest, c0, c1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ insn = get_last_insn ();
+ set = single_set (insn);
+ if (! CONSTANT_P (SET_SRC (set)))
+ set_unique_reg_note (insn, REG_EQUAL, source);
+
+ return result;
+}
+
+/* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
+ fall back to a straight forward decomposition. We do this to avoid
+ exponential run times encountered when looking for longer sequences
+ with rs6000_emit_set_const. */
+static rtx
+rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c1, HOST_WIDE_INT c2)
+{
+ if (!TARGET_POWERPC64)
+ {
+ rtx operand1, operand2;
+
+ operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
+ DImode);
+ operand2 = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN != 0,
+ DImode);
+ emit_move_insn (operand1, GEN_INT (c1));
+ emit_move_insn (operand2, GEN_INT (c2));
+ }
+ else
+ {
+ HOST_WIDE_INT ud1, ud2, ud3, ud4;
+
+ ud1 = c1 & 0xffff;
+ ud2 = (c1 & 0xffff0000) >> 16;
+#if HOST_BITS_PER_WIDE_INT >= 64
+ c2 = c1 >> 32;
+#endif
+ ud3 = c2 & 0xffff;
+ ud4 = (c2 & 0xffff0000) >> 16;
+
+ if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
+ || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
+ {
+ if (ud1 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
+ else
+ emit_move_insn (dest, GEN_INT (ud1));
+ }
+
+ else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
+ || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
+ {
+ if (ud2 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
+ - 0x80000000));
+ else
+ emit_move_insn (dest, GEN_INT (ud2 << 16));
+ if (ud1 != 0)
+ emit_move_insn (copy_rtx (dest),
+ gen_rtx_IOR (DImode, copy_rtx (dest),
+ GEN_INT (ud1)));
+ }
+ else if ((ud4 == 0xffff && (ud3 & 0x8000))
+ || (ud4 == 0 && ! (ud3 & 0x8000)))
+ {
+ if (ud3 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
+ - 0x80000000));
+ else
+ emit_move_insn (dest, GEN_INT (ud3 << 16));
+
+ if (ud2 != 0)
+ emit_move_insn (copy_rtx (dest),
+ gen_rtx_IOR (DImode, copy_rtx (dest),
+ GEN_INT (ud2)));
+ emit_move_insn (copy_rtx (dest),
+ gen_rtx_ASHIFT (DImode, copy_rtx (dest),
+ GEN_INT (16)));
+ if (ud1 != 0)
+ emit_move_insn (copy_rtx (dest),
+ gen_rtx_IOR (DImode, copy_rtx (dest),
+ GEN_INT (ud1)));
+ }
+ else
+ {
+ if (ud4 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
+ - 0x80000000));
+ else
+ emit_move_insn (dest, GEN_INT (ud4 << 16));
+
+ if (ud3 != 0)
+ emit_move_insn (copy_rtx (dest),
+ gen_rtx_IOR (DImode, copy_rtx (dest),
+ GEN_INT (ud3)));
+
+ emit_move_insn (copy_rtx (dest),
+ gen_rtx_ASHIFT (DImode, copy_rtx (dest),
+ GEN_INT (32)));
+ if (ud2 != 0)
+ emit_move_insn (copy_rtx (dest),
+ gen_rtx_IOR (DImode, copy_rtx (dest),
+ GEN_INT (ud2 << 16)));
+ if (ud1 != 0)
+ emit_move_insn (copy_rtx (dest),
+ gen_rtx_IOR (DImode, copy_rtx (dest), GEN_INT (ud1)));
+ }
+ }
+ return dest;
+}
+
+/* Helper for the following. Get rid of [r+r] memory refs
+ in cases where it won't work (TImode, TFmode, TDmode). */
+
+static void
+rs6000_eliminate_indexed_memrefs (rtx operands[2])
+{
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (XEXP (operands[0], 0)) != REG
+ && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0))
+ && ! reload_in_progress)
+ operands[0]
+ = replace_equiv_address (operands[0],
+ copy_addr_to_reg (XEXP (operands[0], 0)));
+
+ if (GET_CODE (operands[1]) == MEM
+ && GET_CODE (XEXP (operands[1], 0)) != REG
+ && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0))
+ && ! reload_in_progress)
+ operands[1]
+ = replace_equiv_address (operands[1],
+ copy_addr_to_reg (XEXP (operands[1], 0)));
+}
+
+/* Emit a move from SOURCE to DEST in mode MODE. */
+void
+rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
+{
+ rtx operands[2];
+ operands[0] = dest;
+ operands[1] = source;
+
+ /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && ! FLOAT_MODE_P (mode)
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ /* FIXME. This should never happen. */
+ /* Since it seems that it does, do the safe thing and convert
+ to a CONST_INT. */
+ operands[1] = gen_int_mode (CONST_DOUBLE_LOW (operands[1]), mode);
+ }
+ gcc_assert (GET_CODE (operands[1]) != CONST_DOUBLE
+ || FLOAT_MODE_P (mode)
+ || ((CONST_DOUBLE_HIGH (operands[1]) != 0
+ || CONST_DOUBLE_LOW (operands[1]) < 0)
+ && (CONST_DOUBLE_HIGH (operands[1]) != -1
+ || CONST_DOUBLE_LOW (operands[1]) >= 0)));
+
+ /* Check if GCC is setting up a block move that will end up using FP
+ registers as temporaries. We must make sure this is acceptable. */
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == MEM
+ && mode == DImode
+ && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
+ || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
+ && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
+ ? 32 : MEM_ALIGN (operands[0])))
+ || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
+ ? 32
+ : MEM_ALIGN (operands[1]))))
+ && ! MEM_VOLATILE_P (operands [0])
+ && ! MEM_VOLATILE_P (operands [1]))
+ {
+ emit_move_insn (adjust_address (operands[0], SImode, 0),
+ adjust_address (operands[1], SImode, 0));
+ emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
+ adjust_address (copy_rtx (operands[1]), SImode, 4));
+ return;
+ }
+
+ if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
+ && !gpc_reg_operand (operands[1], mode))
+ operands[1] = force_reg (mode, operands[1]);
+
+ if (mode == SFmode && ! TARGET_POWERPC
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && GET_CODE (operands[0]) == MEM)
+ {
+ int regnum;
+
+ if (reload_in_progress || reload_completed)
+ regnum = true_regnum (operands[1]);
+ else if (GET_CODE (operands[1]) == REG)
+ regnum = REGNO (operands[1]);
+ else
+ regnum = -1;
+
+ /* If operands[1] is a register, on POWER it may have
+ double-precision data in it, so truncate it to single
+ precision. */
+ if (FP_REGNO_P (regnum) || regnum >= FIRST_PSEUDO_REGISTER)
+ {
+ rtx newreg;
+ newreg = (!can_create_pseudo_p () ? copy_rtx (operands[1])
+ : gen_reg_rtx (mode));
+ emit_insn (gen_aux_truncdfsf2 (newreg, operands[1]));
+ operands[1] = newreg;
+ }
+ }
+
+ /* Recognize the case where operand[1] is a reference to thread-local
+ data and load its address to a register. */
+ if (rs6000_tls_referenced_p (operands[1]))
+ {
+ enum tls_model model;
+ rtx tmp = operands[1];
+ rtx addend = NULL;
+
+ if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
+ {
+ addend = XEXP (XEXP (tmp, 0), 1);
+ tmp = XEXP (XEXP (tmp, 0), 0);
+ }
+
+ gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
+ model = SYMBOL_REF_TLS_MODEL (tmp);
+ gcc_assert (model != 0);
+
+ tmp = rs6000_legitimize_tls_address (tmp, model);
+ if (addend)
+ {
+ tmp = gen_rtx_PLUS (mode, tmp, addend);
+ tmp = force_operand (tmp, operands[0]);
+ }
+ operands[1] = tmp;
+ }
+
+ /* Handle the case where reload calls us with an invalid address. */
+ if (reload_in_progress && mode == Pmode
+ && (! general_operand (operands[1], mode)
+ || ! nonimmediate_operand (operands[0], mode)))
+ goto emit_set;
+
+ /* 128-bit constant floating-point values on Darwin should really be
+ loaded as two parts. */
+ if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
+ && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ /* DImode is used, not DFmode, because simplify_gen_subreg doesn't
+ know how to get a DFmode SUBREG of a TFmode. */
+ enum machine_mode imode = (TARGET_E500_DOUBLE ? DFmode : DImode);
+ rs6000_emit_move (simplify_gen_subreg (imode, operands[0], mode, 0),
+ simplify_gen_subreg (imode, operands[1], mode, 0),
+ imode);
+ rs6000_emit_move (simplify_gen_subreg (imode, operands[0], mode,
+ GET_MODE_SIZE (imode)),
+ simplify_gen_subreg (imode, operands[1], mode,
+ GET_MODE_SIZE (imode)),
+ imode);
+ return;
+ }
+
+ if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
+ cfun->machine->sdmode_stack_slot =
+ eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
+
+ if (reload_in_progress
+ && mode == SDmode
+ && MEM_P (operands[0])
+ && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
+ && REG_P (operands[1]))
+ {
+ if (FP_REGNO_P (REGNO (operands[1])))
+ {
+ rtx mem = adjust_address_nv (operands[0], DDmode, 0);
+ mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
+ emit_insn (gen_movsd_store (mem, operands[1]));
+ }
+ else if (INT_REGNO_P (REGNO (operands[1])))
+ {
+ rtx mem = adjust_address_nv (operands[0], mode, 4);
+ mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
+ emit_insn (gen_movsd_hardfloat (mem, operands[1]));
+ }
+ else
+ gcc_unreachable();
+ return;
+ }
+ if (reload_in_progress
+ && mode == SDmode
+ && REG_P (operands[0])
+ && MEM_P (operands[1])
+ && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
+ {
+ if (FP_REGNO_P (REGNO (operands[0])))
+ {
+ rtx mem = adjust_address_nv (operands[1], DDmode, 0);
+ mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
+ emit_insn (gen_movsd_load (operands[0], mem));
+ }
+ else if (INT_REGNO_P (REGNO (operands[0])))
+ {
+ rtx mem = adjust_address_nv (operands[1], mode, 4);
+ mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
+ emit_insn (gen_movsd_hardfloat (operands[0], mem));
+ }
+ else
+ gcc_unreachable();
+ return;
+ }
+
+ /* FIXME: In the long term, this switch statement should go away
+ and be replaced by a sequence of tests based on things like
+ mode == Pmode. */
+ switch (mode)
+ {
+ case HImode:
+ case QImode:
+ if (CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != CONST_INT)
+ operands[1] = force_const_mem (mode, operands[1]);
+ break;
+
+ case TFmode:
+ case TDmode:
+ rs6000_eliminate_indexed_memrefs (operands);
+ /* fall through */
+
+ case DFmode:
+ case DDmode:
+ case SFmode:
+ case SDmode:
+ if (CONSTANT_P (operands[1])
+ && ! easy_fp_constant (operands[1], mode))
+ operands[1] = force_const_mem (mode, operands[1]);
+ break;
+
+ case V16QImode:
+ case V8HImode:
+ case V4SFmode:
+ case V4SImode:
+ case V4HImode:
+ case V2SFmode:
+ case V2SImode:
+ case V1DImode:
+ if (CONSTANT_P (operands[1])
+ && !easy_vector_constant (operands[1], mode))
+ operands[1] = force_const_mem (mode, operands[1]);
+ break;
+
+ case SImode:
+ case DImode:
+ /* Use default pattern for address of ELF small data */
+ if (TARGET_ELF
+ && mode == Pmode
+ && DEFAULT_ABI == ABI_V4
+ && (GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == CONST)
+ && small_data_operand (operands[1], mode))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ return;
+ }
+
+ if (DEFAULT_ABI == ABI_V4
+ && mode == Pmode && mode == SImode
+ && flag_pic == 1 && got_operand (operands[1], mode))
+ {
+ emit_insn (gen_movsi_got (operands[0], operands[1]));
+ return;
+ }
+
+ if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
+ && TARGET_NO_TOC
+ && ! flag_pic
+ && mode == Pmode
+ && CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != HIGH
+ && GET_CODE (operands[1]) != CONST_INT)
+ {
+ rtx target = (!can_create_pseudo_p ()
+ ? operands[0]
+ : gen_reg_rtx (mode));
+
+ /* If this is a function address on -mcall-aixdesc,
+ convert it to the address of the descriptor. */
+ if (DEFAULT_ABI == ABI_AIX
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && XSTR (operands[1], 0)[0] == '.')
+ {
+ const char *name = XSTR (operands[1], 0);
+ rtx new_ref;
+ while (*name == '.')
+ name++;
+ new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
+ CONSTANT_POOL_ADDRESS_P (new_ref)
+ = CONSTANT_POOL_ADDRESS_P (operands[1]);
+ SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
+ SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
+ SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
+ operands[1] = new_ref;
+ }
+
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+#if TARGET_MACHO
+ if (MACHO_DYNAMIC_NO_PIC_P)
+ {
+ /* Take care of any required data indirection. */
+ operands[1] = rs6000_machopic_legitimize_pic_address (
+ operands[1], mode, operands[0]);
+ if (operands[0] != operands[1])
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operands[0], operands[1]));
+ return;
+ }
+#endif
+ emit_insn (gen_macho_high (target, operands[1]));
+ emit_insn (gen_macho_low (operands[0], target, operands[1]));
+ return;
+ }
+
+ emit_insn (gen_elf_high (target, operands[1]));
+ emit_insn (gen_elf_low (operands[0], target, operands[1]));
+ return;
+ }
+
+ /* If this is a SYMBOL_REF that refers to a constant pool entry,
+ and we have put it in the TOC, we just need to make a TOC-relative
+ reference to it. */
+ if (TARGET_TOC
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && constant_pool_expr_p (operands[1])
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (operands[1]),
+ get_pool_mode (operands[1])))
+ {
+ operands[1] = create_TOC_reference (operands[1]);
+ }
+ else if (mode == Pmode
+ && CONSTANT_P (operands[1])
+ && ((GET_CODE (operands[1]) != CONST_INT
+ && ! easy_fp_constant (operands[1], mode))
+ || (GET_CODE (operands[1]) == CONST_INT
+ && num_insns_constant (operands[1], mode) > 2)
+ || (GET_CODE (operands[0]) == REG
+ && FP_REGNO_P (REGNO (operands[0]))))
+ && GET_CODE (operands[1]) != HIGH
+ && ! legitimate_constant_pool_address_p (operands[1])
+ && ! toc_relative_expr_p (operands[1]))
+ {
+
+#if TARGET_MACHO
+ /* Darwin uses a special PIC legitimizer. */
+ if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
+ {
+ operands[1] =
+ rs6000_machopic_legitimize_pic_address (operands[1], mode,
+ operands[0]);
+ if (operands[0] != operands[1])
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ return;
+ }
+#endif
+
+ /* If we are to limit the number of things we put in the TOC and
+ this is a symbol plus a constant we can add in one insn,
+ just put the symbol in the TOC and add the constant. Don't do
+ this if reload is in progress. */
+ if (GET_CODE (operands[1]) == CONST
+ && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
+ && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
+ || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
+ && ! side_effects_p (operands[0]))
+ {
+ rtx sym =
+ force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
+ rtx other = XEXP (XEXP (operands[1], 0), 1);
+
+ sym = force_reg (mode, sym);
+ if (mode == SImode)
+ emit_insn (gen_addsi3 (operands[0], sym, other));
+ else
+ emit_insn (gen_adddi3 (operands[0], sym, other));
+ return;
+ }
+
+ operands[1] = force_const_mem (mode, operands[1]);
+
+ if (TARGET_TOC
+ && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
+ && constant_pool_expr_p (XEXP (operands[1], 0))
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
+ get_pool_constant (XEXP (operands[1], 0)),
+ get_pool_mode (XEXP (operands[1], 0))))
+ {
+ operands[1]
+ = gen_const_mem (mode,
+ create_TOC_reference (XEXP (operands[1], 0)));
+ set_mem_alias_set (operands[1], get_TOC_alias_set ());
+ }
+ }
+ break;
+
+ case TImode:
+ rs6000_eliminate_indexed_memrefs (operands);
+
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode,
+ operands[0], operands[1]),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_SCRATCH (SImode)))));
+ return;
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Above, we may have called force_const_mem which may have returned
+ an invalid address. If we can, fix this up; otherwise, reload will
+ have to deal with it. */
+ if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
+ operands[1] = validize_mem (operands[1]);
+
+ emit_set:
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+}
+
+/* Nonzero if we can use a floating-point register to pass this arg. */
+#define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
+ (SCALAR_FLOAT_MODE_P (MODE) \
+ && (CUM)->fregno <= FP_ARG_MAX_REG \
+ && TARGET_HARD_FLOAT && TARGET_FPRS)
+
+/* Nonzero if we can use an AltiVec register to pass this arg. */
+#define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
+ (ALTIVEC_VECTOR_MODE (MODE) \
+ && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
+ && TARGET_ALTIVEC_ABI \
+ && (NAMED))
+
+/* Return a nonzero value to say to return the function value in
+ memory, just as large structures are always returned. TYPE will be
+ the data type of the value, and FNTYPE will be the type of the
+ function doing the returning, or @code{NULL} for libcalls.
+
+ The AIX ABI for the RS/6000 specifies that all structures are
+ returned in memory. The Darwin ABI does the same. The SVR4 ABI
+ specifies that structures <= 8 bytes are returned in r3/r4, but a
+ draft put them in memory, and GCC used to implement the draft
+ instead of the final standard. Therefore, aix_struct_return
+ controls this instead of DEFAULT_ABI; V.4 targets needing backward
+ compatibility can change DRAFT_V4_STRUCT_RET to override the
+ default, and -m switches get the final word. See
+ rs6000_override_options for more details.
+
+ The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
+ long double support is enabled. These values are returned in memory.
+
+ int_size_in_bytes returns -1 for variable size objects, which go in
+ memory always. The cast to unsigned makes -1 > 8. */
+
+static bool
+rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ /* In the darwin64 abi, try to use registers for larger structs
+ if possible. */
+ if (rs6000_darwin64_abi
+ && TREE_CODE (type) == RECORD_TYPE
+ && int_size_in_bytes (type) > 0)
+ {
+ CUMULATIVE_ARGS valcum;
+ rtx valret;
+
+ valcum.words = 0;
+ valcum.fregno = FP_ARG_MIN_REG;
+ valcum.vregno = ALTIVEC_ARG_MIN_REG;
+ /* Do a trial code generation as if this were going to be passed
+ as an argument; if any part goes in memory, we return NULL. */
+ valret = rs6000_darwin64_record_arg (&valcum, type, 1, true);
+ if (valret)
+ return false;
+ /* Otherwise fall through to more conventional ABI rules. */
+ }
+
+ if (AGGREGATE_TYPE_P (type)
+ && (aix_struct_return
+ || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
+ return true;
+
+ /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
+ modes only exist for GCC vector types if -maltivec. */
+ if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
+ return false;
+
+ /* Return synthetic vectors in memory. */
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
+ {
+ static bool warned_for_return_big_vectors = false;
+ if (!warned_for_return_big_vectors)
+ {
+ warning (0, "GCC vector returned by reference: "
+ "non-standard ABI extension with no compatibility guarantee");
+ warned_for_return_big_vectors = true;
+ }
+ return true;
+ }
+
+ if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
+ return true;
+
+ return false;
+}
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ For incoming args we set the number of arguments in the prototype large
+ so we never return a PARALLEL. */
+
+void
+init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
+ rtx libname ATTRIBUTE_UNUSED, int incoming,
+ int libcall, int n_named_args)
+{
+ static CUMULATIVE_ARGS zero_cumulative;
+
+ *cum = zero_cumulative;
+ cum->words = 0;
+ cum->fregno = FP_ARG_MIN_REG;
+ cum->vregno = ALTIVEC_ARG_MIN_REG;
+ cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
+ cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
+ ? CALL_LIBCALL : CALL_NORMAL);
+ cum->sysv_gregno = GP_ARG_MIN_REG;
+ cum->stdarg = fntype
+ && (TYPE_ARG_TYPES (fntype) != 0
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ cum->nargs_prototype = 0;
+ if (incoming || cum->prototype)
+ cum->nargs_prototype = n_named_args;
+
+ /* Check for a longcall attribute. */
+ if ((!fntype && rs6000_default_long_calls)
+ || (fntype
+ && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
+ && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
+ cum->call_cookie |= CALL_LONG;
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "\ninit_cumulative_args:");
+ if (fntype)
+ {
+ tree ret_type = TREE_TYPE (fntype);
+ fprintf (stderr, " ret code = %s,",
+ tree_code_name[ (int)TREE_CODE (ret_type) ]);
+ }
+
+ if (cum->call_cookie & CALL_LONG)
+ fprintf (stderr, " longcall,");
+
+ fprintf (stderr, " proto = %d, nargs = %d\n",
+ cum->prototype, cum->nargs_prototype);
+ }
+
+ if (fntype
+ && !TARGET_ALTIVEC
+ && TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
+ {
+ error ("cannot return value in vector register because"
+ " altivec instructions are disabled, use -maltivec"
+ " to enable them");
+ }
+}
+
+/* Return true if TYPE must be passed on the stack and not in registers. */
+
+static bool
+rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
+{
+ if (DEFAULT_ABI == ABI_AIX || TARGET_64BIT)
+ return must_pass_in_stack_var_size (mode, type);
+ else
+ return must_pass_in_stack_var_size_or_pad (mode, type);
+}
+
+/* If defined, a C expression which determines whether, and in which
+ direction, to pad out an argument with extra space. The value
+ should be of type `enum direction': either `upward' to pad above
+ the argument, `downward' to pad below, or `none' to inhibit
+ padding.
+
+ For the AIX ABI structs are always stored left shifted in their
+ argument slot. */
+
+enum direction
+function_arg_padding (enum machine_mode mode, const_tree type)
+{
+#ifndef AGGREGATE_PADDING_FIXED
+#define AGGREGATE_PADDING_FIXED 0
+#endif
+#ifndef AGGREGATES_PAD_UPWARD_ALWAYS
+#define AGGREGATES_PAD_UPWARD_ALWAYS 0
+#endif
+
+ if (!AGGREGATE_PADDING_FIXED)
+ {
+ /* GCC used to pass structures of the same size as integer types as
+ if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
+ i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
+ passed padded downward, except that -mstrict-align further
+ muddied the water in that multi-component structures of 2 and 4
+ bytes in size were passed padded upward.
+
+ The following arranges for best compatibility with previous
+ versions of gcc, but removes the -mstrict-align dependency. */
+ if (BYTES_BIG_ENDIAN)
+ {
+ HOST_WIDE_INT size = 0;
+
+ if (mode == BLKmode)
+ {
+ if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
+ size = int_size_in_bytes (type);
+ }
+ else
+ size = GET_MODE_SIZE (mode);
+
+ if (size == 1 || size == 2 || size == 4)
+ return downward;
+ }
+ return upward;
+ }
+
+ if (AGGREGATES_PAD_UPWARD_ALWAYS)
+ {
+ if (type != 0 && AGGREGATE_TYPE_P (type))
+ return upward;
+ }
+
+ /* Fall back to the default. */
+ return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
+}
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ PARM_BOUNDARY is used for all arguments.
+
+ V.4 wants long longs and doubles to be double word aligned. Just
+ testing the mode size is a boneheaded way to do this as it means
+ that other types such as complex int are also double word aligned.
+ However, we're stuck with this because changing the ABI might break
+ existing library interfaces.
+
+ Doubleword align SPE vectors.
+ Quadword align Altivec vectors.
+ Quadword align large synthetic vector types. */
+
+int
+function_arg_boundary (enum machine_mode mode, tree type)
+{
+ if (DEFAULT_ABI == ABI_V4
+ && (GET_MODE_SIZE (mode) == 8
+ || (TARGET_HARD_FLOAT
+ && TARGET_FPRS
+ && (mode == TFmode || mode == TDmode))))
+ return 64;
+ else if (SPE_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) >= 8
+ && int_size_in_bytes (type) < 16))
+ return 64;
+ else if (ALTIVEC_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) >= 16))
+ return 128;
+ else if (rs6000_darwin64_abi && mode == BLKmode
+ && type && TYPE_ALIGN (type) > 64)
+ return 128;
+ else
+ return PARM_BOUNDARY;
+}
+
+/* For a function parm of MODE and TYPE, return the starting word in
+ the parameter area. NWORDS of the parameter area are already used. */
+
+static unsigned int
+rs6000_parm_start (enum machine_mode mode, tree type, unsigned int nwords)
+{
+ unsigned int align;
+ unsigned int parm_offset;
+
+ align = function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
+ parm_offset = DEFAULT_ABI == ABI_V4 ? 2 : 6;
+ return nwords + (-(parm_offset + nwords) & align);
+}
+
+/* Compute the size (in words) of a function argument. */
+
+static unsigned long
+rs6000_arg_size (enum machine_mode mode, tree type)
+{
+ unsigned long size;
+
+ if (mode != BLKmode)
+ size = GET_MODE_SIZE (mode);
+ else
+ size = int_size_in_bytes (type);
+
+ if (TARGET_32BIT)
+ return (size + 3) >> 2;
+ else
+ return (size + 7) >> 3;
+}
+
+/* Use this to flush pending int fields. */
+
+static void
+rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
+ HOST_WIDE_INT bitpos)
+{
+ unsigned int startbit, endbit;
+ int intregs, intoffset;
+ enum machine_mode mode;
+
+ if (cum->intoffset == -1)
+ return;
+
+ intoffset = cum->intoffset;
+ cum->intoffset = -1;
+
+ if (intoffset % BITS_PER_WORD != 0)
+ {
+ mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
+ MODE_INT, 0);
+ if (mode == BLKmode)
+ {
+ /* We couldn't find an appropriate mode, which happens,
+ e.g., in packed structs when there are 3 bytes to load.
+ Back intoffset back to the beginning of the word in this
+ case. */
+ intoffset = intoffset & -BITS_PER_WORD;
+ }
+ }
+
+ startbit = intoffset & -BITS_PER_WORD;
+ endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
+ intregs = (endbit - startbit) / BITS_PER_WORD;
+ cum->words += intregs;
+}
+
+/* The darwin64 ABI calls for us to recurse down through structs,
+ looking for elements passed in registers. Unfortunately, we have
+ to track int register count here also because of misalignments
+ in powerpc alignment mode. */
+
+static void
+rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
+ tree type,
+ HOST_WIDE_INT startbitpos)
+{
+ tree f;
+
+ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ HOST_WIDE_INT bitpos = startbitpos;
+ tree ftype = TREE_TYPE (f);
+ enum machine_mode mode;
+ if (ftype == error_mark_node)
+ continue;
+ mode = TYPE_MODE (ftype);
+
+ if (DECL_SIZE (f) != 0
+ && host_integerp (bit_position (f), 1))
+ bitpos += int_bit_position (f);
+
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (ftype) == RECORD_TYPE)
+ rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
+ else if (USE_FP_FOR_ARG_P (cum, mode, ftype))
+ {
+ rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
+ cum->words += (GET_MODE_SIZE (mode) + 7) >> 3;
+ }
+ else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, 1))
+ {
+ rs6000_darwin64_record_arg_advance_flush (cum, bitpos);
+ cum->vregno++;
+ cum->words += 2;
+ }
+ else if (cum->intoffset == -1)
+ cum->intoffset = bitpos;
+ }
+}
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.)
+
+ Note that for args passed by reference, function_arg will be called
+ with MODE and TYPE set to that of the pointer to the arg, not the arg
+ itself. */
+
+void
+function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int named, int depth)
+{
+ int size;
+
+ /* Only tick off an argument if we're not recursing. */
+ if (depth == 0)
+ cum->nargs_prototype--;
+
+ if (TARGET_ALTIVEC_ABI
+ && (ALTIVEC_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) == 16)))
+ {
+ bool stack = false;
+
+ if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
+ {
+ cum->vregno++;
+ if (!TARGET_ALTIVEC)
+ error ("cannot pass argument in vector register because"
+ " altivec instructions are disabled, use -maltivec"
+ " to enable them");
+
+ /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
+ even if it is going to be passed in a vector register.
+ Darwin does the same for variable-argument functions. */
+ if ((DEFAULT_ABI == ABI_AIX && TARGET_64BIT)
+ || (cum->stdarg && DEFAULT_ABI != ABI_V4))
+ stack = true;
+ }
+ else
+ stack = true;
+
+ if (stack)
+ {
+ int align;
+
+ /* Vector parameters must be 16-byte aligned. This places
+ them at 2 mod 4 in terms of words in 32-bit mode, since
+ the parameter save area starts at offset 24 from the
+ stack. In 64-bit mode, they just have to start on an
+ even word, since the parameter save area is 16-byte
+ aligned. Space for GPRs is reserved even if the argument
+ will be passed in memory. */
+ if (TARGET_32BIT)
+ align = (2 - cum->words) & 3;
+ else
+ align = cum->words & 1;
+ cum->words += align + rs6000_arg_size (mode, type);
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "function_adv: words = %2d, align=%d, ",
+ cum->words, align);
+ fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
+ cum->nargs_prototype, cum->prototype,
+ GET_MODE_NAME (mode));
+ }
+ }
+ }
+ else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
+ && !cum->stdarg
+ && cum->sysv_gregno <= GP_ARG_MAX_REG)
+ cum->sysv_gregno++;
+
+ else if (rs6000_darwin64_abi
+ && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE
+ && (size = int_size_in_bytes (type)) > 0)
+ {
+ /* Variable sized types have size == -1 and are
+ treated as if consisting entirely of ints.
+ Pad to 16 byte boundary if needed. */
+ if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
+ && (cum->words % 2) != 0)
+ cum->words++;
+ /* For varargs, we can just go up by the size of the struct. */
+ if (!named)
+ cum->words += (size + 7) / 8;
+ else
+ {
+ /* It is tempting to say int register count just goes up by
+ sizeof(type)/8, but this is wrong in a case such as
+ { int; double; int; } [powerpc alignment]. We have to
+ grovel through the fields for these too. */
+ cum->intoffset = 0;
+ rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
+ rs6000_darwin64_record_arg_advance_flush (cum,
+ size * BITS_PER_UNIT);
+ }
+ }
+ else if (DEFAULT_ABI == ABI_V4)
+ {
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && ((TARGET_SINGLE_FLOAT && mode == SFmode)
+ || (TARGET_DOUBLE_FLOAT && mode == DFmode)
+ || (mode == TFmode && !TARGET_IEEEQUAD)
+ || mode == SDmode || mode == DDmode || mode == TDmode))
+ {
+ /* _Decimal128 must use an even/odd register pair. This assumes
+ that the register number is odd when fregno is odd. */
+ if (mode == TDmode && (cum->fregno % 2) == 1)
+ cum->fregno++;
+
+ if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
+ <= FP_ARG_V4_MAX_REG)
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
+ else
+ {
+ cum->fregno = FP_ARG_V4_MAX_REG + 1;
+ if (mode == DFmode || mode == TFmode
+ || mode == DDmode || mode == TDmode)
+ cum->words += cum->words & 1;
+ cum->words += rs6000_arg_size (mode, type);
+ }
+ }
+ else
+ {
+ int n_words = rs6000_arg_size (mode, type);
+ int gregno = cum->sysv_gregno;
+
+ /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
+ (r7,r8) or (r9,r10). As does any other 2 word item such
+ as complex int due to a historical mistake. */
+ if (n_words == 2)
+ gregno += (1 - gregno) & 1;
+
+ /* Multi-reg args are not split between registers and stack. */
+ if (gregno + n_words - 1 > GP_ARG_MAX_REG)
+ {
+ /* Long long and SPE vectors are aligned on the stack.
+ So are other 2 word items such as complex int due to
+ a historical mistake. */
+ if (n_words == 2)
+ cum->words += cum->words & 1;
+ cum->words += n_words;
+ }
+
+ /* Note: continuing to accumulate gregno past when we've started
+ spilling to the stack indicates the fact that we've started
+ spilling to the stack to expand_builtin_saveregs. */
+ cum->sysv_gregno = gregno + n_words;
+ }
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
+ cum->words, cum->fregno);
+ fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
+ cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
+ fprintf (stderr, "mode = %4s, named = %d\n",
+ GET_MODE_NAME (mode), named);
+ }
+ }
+ else
+ {
+ int n_words = rs6000_arg_size (mode, type);
+ int start_words = cum->words;
+ int align_words = rs6000_parm_start (mode, type, start_words);
+
+ cum->words = align_words + n_words;
+
+ if (SCALAR_FLOAT_MODE_P (mode)
+ && TARGET_HARD_FLOAT && TARGET_FPRS)
+ {
+ /* _Decimal128 must be passed in an even/odd float register pair.
+ This assumes that the register number is odd when fregno is
+ odd. */
+ if (mode == TDmode && (cum->fregno % 2) == 1)
+ cum->fregno++;
+ cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
+ }
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
+ cum->words, cum->fregno);
+ fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
+ cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
+ fprintf (stderr, "named = %d, align = %d, depth = %d\n",
+ named, align_words - start_words, depth);
+ }
+ }
+}
+
+static rtx
+spe_build_register_parallel (enum machine_mode mode, int gregno)
+{
+ rtx r1, r3, r5, r7;
+
+ switch (mode)
+ {
+ case DFmode:
+ r1 = gen_rtx_REG (DImode, gregno);
+ r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
+ return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
+
+ case DCmode:
+ case TFmode:
+ r1 = gen_rtx_REG (DImode, gregno);
+ r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
+ r3 = gen_rtx_REG (DImode, gregno + 2);
+ r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
+
+ case TCmode:
+ r1 = gen_rtx_REG (DImode, gregno);
+ r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
+ r3 = gen_rtx_REG (DImode, gregno + 2);
+ r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
+ r5 = gen_rtx_REG (DImode, gregno + 4);
+ r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
+ r7 = gen_rtx_REG (DImode, gregno + 6);
+ r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Determine where to put a SIMD argument on the SPE. */
+static rtx
+rs6000_spe_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type)
+{
+ int gregno = cum->sysv_gregno;
+
+ /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
+ are passed and returned in a pair of GPRs for ABI compatibility. */
+ if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
+ || mode == DCmode || mode == TCmode))
+ {
+ int n_words = rs6000_arg_size (mode, type);
+
+ /* Doubles go in an odd/even register pair (r5/r6, etc). */
+ if (mode == DFmode)
+ gregno += (1 - gregno) & 1;
+
+ /* Multi-reg args are not split between registers and stack. */
+ if (gregno + n_words - 1 > GP_ARG_MAX_REG)
+ return NULL_RTX;
+
+ return spe_build_register_parallel (mode, gregno);
+ }
+ if (cum->stdarg)
+ {
+ int n_words = rs6000_arg_size (mode, type);
+
+ /* SPE vectors are put in odd registers. */
+ if (n_words == 2 && (gregno & 1) == 0)
+ gregno += 1;
+
+ if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
+ {
+ rtx r1, r2;
+ enum machine_mode m = SImode;
+
+ r1 = gen_rtx_REG (m, gregno);
+ r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
+ r2 = gen_rtx_REG (m, gregno + 1);
+ r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
+ }
+ else
+ return NULL_RTX;
+ }
+ else
+ {
+ if (gregno <= GP_ARG_MAX_REG)
+ return gen_rtx_REG (mode, gregno);
+ else
+ return NULL_RTX;
+ }
+}
+
+/* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
+ structure between cum->intoffset and bitpos to integer registers. */
+
+static void
+rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
+ HOST_WIDE_INT bitpos, rtx rvec[], int *k)
+{
+ enum machine_mode mode;
+ unsigned int regno;
+ unsigned int startbit, endbit;
+ int this_regno, intregs, intoffset;
+ rtx reg;
+
+ if (cum->intoffset == -1)
+ return;
+
+ intoffset = cum->intoffset;
+ cum->intoffset = -1;
+
+ /* If this is the trailing part of a word, try to only load that
+ much into the register. Otherwise load the whole register. Note
+ that in the latter case we may pick up unwanted bits. It's not a
+ problem at the moment but may wish to revisit. */
+
+ if (intoffset % BITS_PER_WORD != 0)
+ {
+ mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
+ MODE_INT, 0);
+ if (mode == BLKmode)
+ {
+ /* We couldn't find an appropriate mode, which happens,
+ e.g., in packed structs when there are 3 bytes to load.
+ Back intoffset back to the beginning of the word in this
+ case. */
+ intoffset = intoffset & -BITS_PER_WORD;
+ mode = word_mode;
+ }
+ }
+ else
+ mode = word_mode;
+
+ startbit = intoffset & -BITS_PER_WORD;
+ endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
+ intregs = (endbit - startbit) / BITS_PER_WORD;
+ this_regno = cum->words + intoffset / BITS_PER_WORD;
+
+ if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
+ cum->use_stack = 1;
+
+ intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
+ if (intregs <= 0)
+ return;
+
+ intoffset /= BITS_PER_UNIT;
+ do
+ {
+ regno = GP_ARG_MIN_REG + this_regno;
+ reg = gen_rtx_REG (mode, regno);
+ rvec[(*k)++] =
+ gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
+
+ this_regno += 1;
+ intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
+ mode = word_mode;
+ intregs -= 1;
+ }
+ while (intregs > 0);
+}
+
+/* Recursive workhorse for the following. */
+
+static void
+rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
+ HOST_WIDE_INT startbitpos, rtx rvec[],
+ int *k)
+{
+ tree f;
+
+ for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
+ if (TREE_CODE (f) == FIELD_DECL)
+ {
+ HOST_WIDE_INT bitpos = startbitpos;
+ tree ftype = TREE_TYPE (f);
+ enum machine_mode mode;
+ if (ftype == error_mark_node)
+ continue;
+ mode = TYPE_MODE (ftype);
+
+ if (DECL_SIZE (f) != 0
+ && host_integerp (bit_position (f), 1))
+ bitpos += int_bit_position (f);
+
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (ftype) == RECORD_TYPE)
+ rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
+ else if (cum->named && USE_FP_FOR_ARG_P (cum, mode, ftype))
+ {
+#if 0
+ switch (mode)
+ {
+ case SCmode: mode = SFmode; break;
+ case DCmode: mode = DFmode; break;
+ case TCmode: mode = TFmode; break;
+ default: break;
+ }
+#endif
+ rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
+ rvec[(*k)++]
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->fregno++),
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ if (mode == TFmode || mode == TDmode)
+ cum->fregno++;
+ }
+ else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, ftype, 1))
+ {
+ rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
+ rvec[(*k)++]
+ = gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->vregno++),
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ }
+ else if (cum->intoffset == -1)
+ cum->intoffset = bitpos;
+ }
+}
+
+/* For the darwin64 ABI, we want to construct a PARALLEL consisting of
+ the register(s) to be used for each field and subfield of a struct
+ being passed by value, along with the offset of where the
+ register's value may be found in the block. FP fields go in FP
+ register, vector fields go in vector registers, and everything
+ else goes in int registers, packed as in memory.
+
+ This code is also used for function return values. RETVAL indicates
+ whether this is the case.
+
+ Much of this is taken from the SPARC V9 port, which has a similar
+ calling convention. */
+
+static rtx
+rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
+ int named, bool retval)
+{
+ rtx rvec[FIRST_PSEUDO_REGISTER];
+ int k = 1, kbase = 1;
+ HOST_WIDE_INT typesize = int_size_in_bytes (type);
+ /* This is a copy; modifications are not visible to our caller. */
+ CUMULATIVE_ARGS copy_cum = *orig_cum;
+ CUMULATIVE_ARGS *cum = &copy_cum;
+
+ /* Pad to 16 byte boundary if needed. */
+ if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
+ && (cum->words % 2) != 0)
+ cum->words++;
+
+ cum->intoffset = 0;
+ cum->use_stack = 0;
+ cum->named = named;
+
+ /* Put entries into rvec[] for individual FP and vector fields, and
+ for the chunks of memory that go in int regs. Note we start at
+ element 1; 0 is reserved for an indication of using memory, and
+ may or may not be filled in below. */
+ rs6000_darwin64_record_arg_recurse (cum, type, 0, rvec, &k);
+ rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
+
+ /* If any part of the struct went on the stack put all of it there.
+ This hack is because the generic code for
+ FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
+ parts of the struct are not at the beginning. */
+ if (cum->use_stack)
+ {
+ if (retval)
+ return NULL_RTX; /* doesn't go in registers at all */
+ kbase = 0;
+ rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+ }
+ if (k > 1 || cum->use_stack)
+ return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
+ else
+ return NULL_RTX;
+}
+
+/* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
+
+static rtx
+rs6000_mixed_function_arg (enum machine_mode mode, tree type, int align_words)
+{
+ int n_units;
+ int i, k;
+ rtx rvec[GP_ARG_NUM_REG + 1];
+
+ if (align_words >= GP_ARG_NUM_REG)
+ return NULL_RTX;
+
+ n_units = rs6000_arg_size (mode, type);
+
+ /* Optimize the simple case where the arg fits in one gpr, except in
+ the case of BLKmode due to assign_parms assuming that registers are
+ BITS_PER_WORD wide. */
+ if (n_units == 0
+ || (n_units == 1 && mode != BLKmode))
+ return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+
+ k = 0;
+ if (align_words + n_units > GP_ARG_NUM_REG)
+ /* Not all of the arg fits in gprs. Say that it goes in memory too,
+ using a magic NULL_RTX component.
+ This is not strictly correct. Only some of the arg belongs in
+ memory, not all of it. However, the normal scheme using
+ function_arg_partial_nregs can result in unusual subregs, eg.
+ (subreg:SI (reg:DF) 4), which are not handled well. The code to
+ store the whole arg to memory is often more efficient than code
+ to store pieces, and we know that space is available in the right
+ place for the whole arg. */
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+
+ i = 0;
+ do
+ {
+ rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
+ rtx off = GEN_INT (i++ * 4);
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
+ }
+ while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
+
+ return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
+}
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called. It is
+ not modified in this routine.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On RS/6000 the first eight words of non-FP are normally in registers
+ and the rest are pushed. Under AIX, the first 13 FP args are in registers.
+ Under V.4, the first 8 FP args are in registers.
+
+ If this is floating-point and no prototype is specified, we use
+ both an FP and integer register (or possibly FP reg and stack). Library
+ functions (when CALL_LIBCALL is set) always have the proper types for args,
+ so we can pass the FP value just in one register. emit_library_function
+ doesn't support PARALLEL anyway.
+
+ Note that for args passed by reference, function_arg will be called
+ with MODE and TYPE set to that of the pointer to the arg, not the arg
+ itself. */
+
+rtx
+function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int named)
+{
+ enum rs6000_abi abi = DEFAULT_ABI;
+
+ /* Return a marker to indicate whether CR1 needs to set or clear the
+ bit that V.4 uses to say fp args were passed in registers.
+ Assume that we don't need the marker for software floating point,
+ or compiler generated library calls. */
+ if (mode == VOIDmode)
+ {
+ if (abi == ABI_V4
+ && (cum->call_cookie & CALL_LIBCALL) == 0
+ && (cum->stdarg
+ || (cum->nargs_prototype < 0
+ && (cum->prototype || TARGET_NO_PROTOTYPE))))
+ {
+ /* For the SPE, we need to crxor CR6 always. */
+ if (TARGET_SPE_ABI)
+ return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
+ else if (TARGET_HARD_FLOAT && TARGET_FPRS)
+ return GEN_INT (cum->call_cookie
+ | ((cum->fregno == FP_ARG_MIN_REG)
+ ? CALL_V4_SET_FP_ARGS
+ : CALL_V4_CLEAR_FP_ARGS));
+ }
+
+ return GEN_INT (cum->call_cookie);
+ }
+
+ if (rs6000_darwin64_abi && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE)
+ {
+ rtx rslt = rs6000_darwin64_record_arg (cum, type, named, false);
+ if (rslt != NULL_RTX)
+ return rslt;
+ /* Else fall through to usual handling. */
+ }
+
+ if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named))
+ if (TARGET_64BIT && ! cum->prototype)
+ {
+ /* Vector parameters get passed in vector register
+ and also in GPRs or memory, in absence of prototype. */
+ int align_words;
+ rtx slot;
+ align_words = (cum->words + 1) & ~1;
+
+ if (align_words >= GP_ARG_NUM_REG)
+ {
+ slot = NULL_RTX;
+ }
+ else
+ {
+ slot = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+ }
+ return gen_rtx_PARALLEL (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ slot, const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->vregno),
+ const0_rtx)));
+ }
+ else
+ return gen_rtx_REG (mode, cum->vregno);
+ else if (TARGET_ALTIVEC_ABI
+ && (ALTIVEC_VECTOR_MODE (mode)
+ || (type && TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) == 16)))
+ {
+ if (named || abi == ABI_V4)
+ return NULL_RTX;
+ else
+ {
+ /* Vector parameters to varargs functions under AIX or Darwin
+ get passed in memory and possibly also in GPRs. */
+ int align, align_words, n_words;
+ enum machine_mode part_mode;
+
+ /* Vector parameters must be 16-byte aligned. This places them at
+ 2 mod 4 in terms of words in 32-bit mode, since the parameter
+ save area starts at offset 24 from the stack. In 64-bit mode,
+ they just have to start on an even word, since the parameter
+ save area is 16-byte aligned. */
+ if (TARGET_32BIT)
+ align = (2 - cum->words) & 3;
+ else
+ align = cum->words & 1;
+ align_words = cum->words + align;
+
+ /* Out of registers? Memory, then. */
+ if (align_words >= GP_ARG_NUM_REG)
+ return NULL_RTX;
+
+ if (TARGET_32BIT && TARGET_POWERPC64)
+ return rs6000_mixed_function_arg (mode, type, align_words);
+
+ /* The vector value goes in GPRs. Only the part of the
+ value in GPRs is reported here. */
+ part_mode = mode;
+ n_words = rs6000_arg_size (mode, type);
+ if (align_words + n_words > GP_ARG_NUM_REG)
+ /* Fortunately, there are only two possibilities, the value
+ is either wholly in GPRs or half in GPRs and half not. */
+ part_mode = DImode;
+
+ return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
+ }
+ }
+ else if (TARGET_SPE_ABI && TARGET_SPE
+ && (SPE_VECTOR_MODE (mode)
+ || (TARGET_E500_DOUBLE && (mode == DFmode
+ || mode == DCmode
+ || mode == TFmode
+ || mode == TCmode))))
+ return rs6000_spe_function_arg (cum, mode, type);
+
+ else if (abi == ABI_V4)
+ {
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && ((TARGET_SINGLE_FLOAT && mode == SFmode)
+ || (TARGET_DOUBLE_FLOAT && mode == DFmode)
+ || (mode == TFmode && !TARGET_IEEEQUAD)
+ || mode == SDmode || mode == DDmode || mode == TDmode))
+ {
+ /* _Decimal128 must use an even/odd register pair. This assumes
+ that the register number is odd when fregno is odd. */
+ if (mode == TDmode && (cum->fregno % 2) == 1)
+ cum->fregno++;
+
+ if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
+ <= FP_ARG_V4_MAX_REG)
+ return gen_rtx_REG (mode, cum->fregno);
+ else
+ return NULL_RTX;
+ }
+ else
+ {
+ int n_words = rs6000_arg_size (mode, type);
+ int gregno = cum->sysv_gregno;
+
+ /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
+ (r7,r8) or (r9,r10). As does any other 2 word item such
+ as complex int due to a historical mistake. */
+ if (n_words == 2)
+ gregno += (1 - gregno) & 1;
+
+ /* Multi-reg args are not split between registers and stack. */
+ if (gregno + n_words - 1 > GP_ARG_MAX_REG)
+ return NULL_RTX;
+
+ if (TARGET_32BIT && TARGET_POWERPC64)
+ return rs6000_mixed_function_arg (mode, type,
+ gregno - GP_ARG_MIN_REG);
+ return gen_rtx_REG (mode, gregno);
+ }
+ }
+ else
+ {
+ int align_words = rs6000_parm_start (mode, type, cum->words);
+
+ /* _Decimal128 must be passed in an even/odd float register pair.
+ This assumes that the register number is odd when fregno is odd. */
+ if (mode == TDmode && (cum->fregno % 2) == 1)
+ cum->fregno++;
+
+ if (USE_FP_FOR_ARG_P (cum, mode, type))
+ {
+ rtx rvec[GP_ARG_NUM_REG + 1];
+ rtx r;
+ int k;
+ bool needs_psave;
+ enum machine_mode fmode = mode;
+ unsigned long n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
+
+ if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
+ {
+ /* Currently, we only ever need one reg here because complex
+ doubles are split. */
+ gcc_assert (cum->fregno == FP_ARG_MAX_REG
+ && (fmode == TFmode || fmode == TDmode));
+
+ /* Long double or _Decimal128 split over regs and memory. */
+ fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
+ }
+
+ /* Do we also need to pass this arg in the parameter save
+ area? */
+ needs_psave = (type
+ && (cum->nargs_prototype <= 0
+ || (DEFAULT_ABI == ABI_AIX
+ && TARGET_XL_COMPAT
+ && align_words >= GP_ARG_NUM_REG)));
+
+ if (!needs_psave && mode == fmode)
+ return gen_rtx_REG (fmode, cum->fregno);
+
+ k = 0;
+ if (needs_psave)
+ {
+ /* Describe the part that goes in gprs or the stack.
+ This piece must come first, before the fprs. */
+ if (align_words < GP_ARG_NUM_REG)
+ {
+ unsigned long n_words = rs6000_arg_size (mode, type);
+
+ if (align_words + n_words > GP_ARG_NUM_REG
+ || (TARGET_32BIT && TARGET_POWERPC64))
+ {
+ /* If this is partially on the stack, then we only
+ include the portion actually in registers here. */
+ enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
+ rtx off;
+ int i = 0;
+ if (align_words + n_words > GP_ARG_NUM_REG)
+ /* Not all of the arg fits in gprs. Say that it
+ goes in memory too, using a magic NULL_RTX
+ component. Also see comment in
+ rs6000_mixed_function_arg for why the normal
+ function_arg_partial_nregs scheme doesn't work
+ in this case. */
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX,
+ const0_rtx);
+ do
+ {
+ r = gen_rtx_REG (rmode,
+ GP_ARG_MIN_REG + align_words);
+ off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
+ }
+ while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
+ }
+ else
+ {
+ /* The whole arg fits in gprs. */
+ r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
+ }
+ }
+ else
+ /* It's entirely in memory. */
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+ }
+
+ /* Describe where this piece goes in the fprs. */
+ r = gen_rtx_REG (fmode, cum->fregno);
+ rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
+
+ return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
+ }
+ else if (align_words < GP_ARG_NUM_REG)
+ {
+ if (TARGET_32BIT && TARGET_POWERPC64)
+ return rs6000_mixed_function_arg (mode, type, align_words);
+
+ if (mode == BLKmode)
+ mode = Pmode;
+
+ return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+ }
+ else
+ return NULL_RTX;
+ }
+}
+
+/* For an arg passed partly in registers and partly in memory, this is
+ the number of bytes passed in registers. For args passed entirely in
+ registers or entirely in memory, zero. When an arg is described by a
+ PARALLEL, perhaps using more than one register type, this function
+ returns the number of bytes used by the first element of the PARALLEL. */
+
+static int
+rs6000_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, bool named)
+{
+ int ret = 0;
+ int align_words;
+
+ if (DEFAULT_ABI == ABI_V4)
+ return 0;
+
+ if (USE_ALTIVEC_FOR_ARG_P (cum, mode, type, named)
+ && cum->nargs_prototype >= 0)
+ return 0;
+
+ /* In this complicated case we just disable the partial_nregs code. */
+ if (rs6000_darwin64_abi && mode == BLKmode
+ && TREE_CODE (type) == RECORD_TYPE
+ && int_size_in_bytes (type) > 0)
+ return 0;
+
+ align_words = rs6000_parm_start (mode, type, cum->words);
+
+ if (USE_FP_FOR_ARG_P (cum, mode, type))
+ {
+ /* If we are passing this arg in the fixed parameter save area
+ (gprs or memory) as well as fprs, then this function should
+ return the number of partial bytes passed in the parameter
+ save area rather than partial bytes passed in fprs. */
+ if (type
+ && (cum->nargs_prototype <= 0
+ || (DEFAULT_ABI == ABI_AIX
+ && TARGET_XL_COMPAT
+ && align_words >= GP_ARG_NUM_REG)))
+ return 0;
+ else if (cum->fregno + ((GET_MODE_SIZE (mode) + 7) >> 3)
+ > FP_ARG_MAX_REG + 1)
+ ret = (FP_ARG_MAX_REG + 1 - cum->fregno) * 8;
+ else if (cum->nargs_prototype >= 0)
+ return 0;
+ }
+
+ if (align_words < GP_ARG_NUM_REG
+ && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
+ ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
+
+ if (ret != 0 && TARGET_DEBUG_ARG)
+ fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
+
+ return ret;
+}
+
+/* A C expression that indicates when an argument must be passed by
+ reference. If nonzero for an argument, a copy of that argument is
+ made in memory and a pointer to the argument is passed instead of
+ the argument itself. The pointer is passed in whatever way is
+ appropriate for passing a pointer to that type.
+
+ Under V.4, aggregates and long double are passed by reference.
+
+ As an extension to all 32-bit ABIs, AltiVec vectors are passed by
+ reference unless the AltiVec vector extension ABI is in force.
+
+ As an extension to all ABIs, variable sized types are passed by
+ reference. */
+
+static bool
+rs6000_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
+ return 1;
+ }
+
+ if (!type)
+ return 0;
+
+ if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
+ return 1;
+ }
+
+ if (int_size_in_bytes (type) < 0)
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
+ return 1;
+ }
+
+ /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
+ modes only exist for GCC vector types if -maltivec. */
+ if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
+ return 1;
+ }
+
+ /* Pass synthetic vectors in memory. */
+ if (TREE_CODE (type) == VECTOR_TYPE
+ && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
+ {
+ static bool warned_for_pass_big_vectors = false;
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
+ if (!warned_for_pass_big_vectors)
+ {
+ warning (0, "GCC vector passed by reference: "
+ "non-standard ABI extension with no compatibility guarantee");
+ warned_for_pass_big_vectors = true;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+static void
+rs6000_move_block_from_reg (int regno, rtx x, int nregs)
+{
+ int i;
+ enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
+
+ if (nregs == 0)
+ return;
+
+ for (i = 0; i < nregs; i++)
+ {
+ rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
+ if (reload_completed)
+ {
+ if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
+ tem = NULL_RTX;
+ else
+ tem = simplify_gen_subreg (reg_mode, x, BLKmode,
+ i * GET_MODE_SIZE (reg_mode));
+ }
+ else
+ tem = replace_equiv_address (tem, XEXP (tem, 0));
+
+ gcc_assert (tem);
+
+ emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
+ }
+}
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is as above.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed. */
+
+static void
+setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
+ tree type, int *pretend_size ATTRIBUTE_UNUSED,
+ int no_rtl)
+{
+ CUMULATIVE_ARGS next_cum;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ rtx save_area = NULL_RTX, mem;
+ int first_reg_offset;
+ alias_set_type set;
+
+ /* Skip the last named argument. */
+ next_cum = *cum;
+ function_arg_advance (&next_cum, mode, type, 1, 0);
+
+ if (DEFAULT_ABI == ABI_V4)
+ {
+ first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
+
+ if (! no_rtl)
+ {
+ int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
+ HOST_WIDE_INT offset = 0;
+
+ /* Try to optimize the size of the varargs save area.
+ The ABI requires that ap.reg_save_area is doubleword
+ aligned, but we don't need to allocate space for all
+ the bytes, only those to which we actually will save
+ anything. */
+ if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
+ gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && next_cum.fregno <= FP_ARG_V4_MAX_REG
+ && cfun->va_list_fpr_size)
+ {
+ if (gpr_reg_num)
+ fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
+ * UNITS_PER_FP_WORD;
+ if (cfun->va_list_fpr_size
+ < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
+ fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
+ else
+ fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
+ * UNITS_PER_FP_WORD;
+ }
+ if (gpr_reg_num)
+ {
+ offset = -((first_reg_offset * reg_size) & ~7);
+ if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
+ {
+ gpr_reg_num = cfun->va_list_gpr_size;
+ if (reg_size == 4 && (first_reg_offset & 1))
+ gpr_reg_num++;
+ }
+ gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
+ }
+ else if (fpr_size)
+ offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
+ * UNITS_PER_FP_WORD
+ - (int) (GP_ARG_NUM_REG * reg_size);
+
+ if (gpr_size + fpr_size)
+ {
+ rtx reg_save_area
+ = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
+ gcc_assert (GET_CODE (reg_save_area) == MEM);
+ reg_save_area = XEXP (reg_save_area, 0);
+ if (GET_CODE (reg_save_area) == PLUS)
+ {
+ gcc_assert (XEXP (reg_save_area, 0)
+ == virtual_stack_vars_rtx);
+ gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
+ offset += INTVAL (XEXP (reg_save_area, 1));
+ }
+ else
+ gcc_assert (reg_save_area == virtual_stack_vars_rtx);
+ }
+
+ cfun->machine->varargs_save_offset = offset;
+ save_area = plus_constant (virtual_stack_vars_rtx, offset);
+ }
+ }
+ else
+ {
+ first_reg_offset = next_cum.words;
+ save_area = virtual_incoming_args_rtx;
+
+ if (targetm.calls.must_pass_in_stack (mode, type))
+ first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
+ }
+
+ set = get_varargs_alias_set ();
+ if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
+ && cfun->va_list_gpr_size)
+ {
+ int nregs = GP_ARG_NUM_REG - first_reg_offset;
+
+ if (va_list_gpr_counter_field)
+ {
+ /* V4 va_list_gpr_size counts number of registers needed. */
+ if (nregs > cfun->va_list_gpr_size)
+ nregs = cfun->va_list_gpr_size;
+ }
+ else
+ {
+ /* char * va_list instead counts number of bytes needed. */
+ if (nregs > cfun->va_list_gpr_size / reg_size)
+ nregs = cfun->va_list_gpr_size / reg_size;
+ }
+
+ mem = gen_rtx_MEM (BLKmode,
+ plus_constant (save_area,
+ first_reg_offset * reg_size));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, set);
+ set_mem_align (mem, BITS_PER_WORD);
+
+ rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
+ nregs);
+ }
+
+ /* Save FP registers if needed. */
+ if (DEFAULT_ABI == ABI_V4
+ && TARGET_HARD_FLOAT && TARGET_FPRS
+ && ! no_rtl
+ && next_cum.fregno <= FP_ARG_V4_MAX_REG
+ && cfun->va_list_fpr_size)
+ {
+ int fregno = next_cum.fregno, nregs;
+ rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
+ rtx lab = gen_label_rtx ();
+ int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
+ * UNITS_PER_FP_WORD);
+
+ emit_jump_insn
+ (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx_NE (VOIDmode, cr1,
+ const0_rtx),
+ gen_rtx_LABEL_REF (VOIDmode, lab),
+ pc_rtx)));
+
+ for (nregs = 0;
+ fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
+ fregno++, off += UNITS_PER_FP_WORD, nregs++)
+ {
+ mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode,
+ plus_constant (save_area, off));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, set);
+ set_mem_align (mem, GET_MODE_ALIGNMENT (
+ (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode));
+ emit_move_insn (mem, gen_rtx_REG (
+ (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode, fregno));
+ }
+
+ emit_label (lab);
+ }
+}
+
+/* Create the va_list data type. */
+
+static tree
+rs6000_build_builtin_va_list (void)
+{
+ tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
+
+ /* For AIX, prefer 'char *' because that's what the system
+ header files like. */
+ if (DEFAULT_ABI != ABI_V4)
+ return build_pointer_type (char_type_node);
+
+ record = (*lang_hooks.types.make_type) (RECORD_TYPE);
+ type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
+
+ f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
+ unsigned_char_type_node);
+ f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
+ unsigned_char_type_node);
+ /* Give the two bytes of padding a name, so that -Wpadded won't warn on
+ every user file. */
+ f_res = build_decl (FIELD_DECL, get_identifier ("reserved"),
+ short_unsigned_type_node);
+ f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
+ ptr_type_node);
+ f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
+ ptr_type_node);
+
+ va_list_gpr_counter_field = f_gpr;
+ va_list_fpr_counter_field = f_fpr;
+
+ DECL_FIELD_CONTEXT (f_gpr) = record;
+ DECL_FIELD_CONTEXT (f_fpr) = record;
+ DECL_FIELD_CONTEXT (f_res) = record;
+ DECL_FIELD_CONTEXT (f_ovf) = record;
+ DECL_FIELD_CONTEXT (f_sav) = record;
+
+ TREE_CHAIN (record) = type_decl;
+ TYPE_NAME (record) = type_decl;
+ TYPE_FIELDS (record) = f_gpr;
+ TREE_CHAIN (f_gpr) = f_fpr;
+ TREE_CHAIN (f_fpr) = f_res;
+ TREE_CHAIN (f_res) = f_ovf;
+ TREE_CHAIN (f_ovf) = f_sav;
+
+ layout_type (record);
+
+ /* The correct type is an array type of one element. */
+ return build_array_type (record, build_index_type (size_zero_node));
+}
+
+/* Implement va_start. */
+
+static void
+rs6000_va_start (tree valist, rtx nextarg)
+{
+ HOST_WIDE_INT words, n_gpr, n_fpr;
+ tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
+ tree gpr, fpr, ovf, sav, t;
+
+ /* Only SVR4 needs something special. */
+ if (DEFAULT_ABI != ABI_V4)
+ {
+ std_expand_builtin_va_start (valist, nextarg);
+ return;
+ }
+
+ f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
+ f_fpr = TREE_CHAIN (f_gpr);
+ f_res = TREE_CHAIN (f_fpr);
+ f_ovf = TREE_CHAIN (f_res);
+ f_sav = TREE_CHAIN (f_ovf);
+
+ valist = build_va_arg_indirect_ref (valist);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
+ f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
+ f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
+ f_sav, NULL_TREE);
+
+ /* Count number of gp and fp argument registers used. */
+ words = crtl->args.info.words;
+ n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
+ GP_ARG_NUM_REG);
+ n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
+ FP_ARG_NUM_REG);
+
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
+ HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
+ words, n_gpr, n_fpr);
+
+ if (cfun->va_list_gpr_size)
+ {
+ t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
+ build_int_cst (NULL_TREE, n_gpr));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+
+ if (cfun->va_list_fpr_size)
+ {
+ t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
+ build_int_cst (NULL_TREE, n_fpr));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+
+ /* Find the overflow area. */
+ t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
+ if (words != 0)
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovf), t,
+ size_int (words * UNITS_PER_WORD));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* If there were no va_arg invocations, don't set up the register
+ save area. */
+ if (!cfun->va_list_gpr_size
+ && !cfun->va_list_fpr_size
+ && n_gpr < GP_ARG_NUM_REG
+ && n_fpr < FP_ARG_V4_MAX_REG)
+ return;
+
+ /* Find the register save area. */
+ t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
+ if (cfun->machine->varargs_save_offset)
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (sav), t,
+ size_int (cfun->machine->varargs_save_offset));
+ t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+}
+
+/* Implement va_arg. */
+
+tree
+rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ gimple_seq *post_p)
+{
+ tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
+ tree gpr, fpr, ovf, sav, reg, t, u;
+ int size, rsize, n_reg, sav_ofs, sav_scale;
+ tree lab_false, lab_over, addr;
+ int align;
+ tree ptrtype = build_pointer_type (type);
+ int regalign = 0;
+ gimple stmt;
+
+ if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
+ {
+ t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
+ return build_va_arg_indirect_ref (t);
+ }
+
+ if (DEFAULT_ABI != ABI_V4)
+ {
+ if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
+ {
+ tree elem_type = TREE_TYPE (type);
+ enum machine_mode elem_mode = TYPE_MODE (elem_type);
+ int elem_size = GET_MODE_SIZE (elem_mode);
+
+ if (elem_size < UNITS_PER_WORD)
+ {
+ tree real_part, imag_part;
+ gimple_seq post = NULL;
+
+ real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
+ &post);
+ /* Copy the value into a temporary, lest the formal temporary
+ be reused out from under us. */
+ real_part = get_initialized_tmp_var (real_part, pre_p, &post);
+ gimple_seq_add_seq (pre_p, post);
+
+ imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
+ post_p);
+
+ return build2 (COMPLEX_EXPR, type, real_part, imag_part);
+ }
+ }
+
+ return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
+ }
+
+ f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
+ f_fpr = TREE_CHAIN (f_gpr);
+ f_res = TREE_CHAIN (f_fpr);
+ f_ovf = TREE_CHAIN (f_res);
+ f_sav = TREE_CHAIN (f_ovf);
+
+ valist = build_va_arg_indirect_ref (valist);
+ gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
+ fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
+ f_fpr, NULL_TREE);
+ ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
+ f_ovf, NULL_TREE);
+ sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
+ f_sav, NULL_TREE);
+
+ size = int_size_in_bytes (type);
+ rsize = (size + 3) / 4;
+ align = 1;
+
+ if (TARGET_HARD_FLOAT && TARGET_FPRS
+ && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
+ || (TARGET_DOUBLE_FLOAT
+ && (TYPE_MODE (type) == DFmode
+ || TYPE_MODE (type) == TFmode
+ || TYPE_MODE (type) == SDmode
+ || TYPE_MODE (type) == DDmode
+ || TYPE_MODE (type) == TDmode))))
+ {
+ /* FP args go in FP registers, if present. */
+ reg = fpr;
+ n_reg = (size + 7) / 8;
+ sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
+ sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
+ if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
+ align = 8;
+ }
+ else
+ {
+ /* Otherwise into GP registers. */
+ reg = gpr;
+ n_reg = rsize;
+ sav_ofs = 0;
+ sav_scale = 4;
+ if (n_reg == 2)
+ align = 8;
+ }
+
+ /* Pull the value out of the saved registers.... */
+
+ lab_over = NULL;
+ addr = create_tmp_var (ptr_type_node, "addr");
+ DECL_POINTER_ALIAS_SET (addr) = get_varargs_alias_set ();
+
+ /* AltiVec vectors never go in registers when -mabi=altivec. */
+ if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
+ align = 16;
+ else
+ {
+ lab_false = create_artificial_label ();
+ lab_over = create_artificial_label ();
+
+ /* Long long and SPE vectors are aligned in the registers.
+ As are any other 2 gpr item such as complex int due to a
+ historical mistake. */
+ u = reg;
+ if (n_reg == 2 && reg == gpr)
+ {
+ regalign = 1;
+ u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
+ build_int_cst (TREE_TYPE (reg), n_reg - 1));
+ u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
+ unshare_expr (reg), u);
+ }
+ /* _Decimal128 is passed in even/odd fpr pairs; the stored
+ reg number is 0 for f1, so we want to make it odd. */
+ else if (reg == fpr && TYPE_MODE (type) == TDmode)
+ {
+ regalign = 1;
+ t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
+ build_int_cst (TREE_TYPE (reg), 1));
+ u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
+ }
+
+ t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
+ t = build2 (GE_EXPR, boolean_type_node, u, t);
+ u = build1 (GOTO_EXPR, void_type_node, lab_false);
+ t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
+ gimplify_and_add (t, pre_p);
+
+ t = sav;
+ if (sav_ofs)
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, sav, size_int (sav_ofs));
+
+ u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
+ build_int_cst (TREE_TYPE (reg), n_reg));
+ u = fold_convert (sizetype, u);
+ u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
+ t = build2 (POINTER_PLUS_EXPR, ptr_type_node, t, u);
+
+ /* _Decimal32 varargs are located in the second word of the 64-bit
+ FP register for 32-bit binaries. */
+ if (!TARGET_POWERPC64
+ && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TYPE_MODE (type) == SDmode)
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
+
+ gimplify_assign (addr, t, pre_p);
+
+ gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
+
+ stmt = gimple_build_label (lab_false);
+ gimple_seq_add_stmt (pre_p, stmt);
+
+ if ((n_reg == 2 && !regalign) || n_reg > 2)
+ {
+ /* Ensure that we don't find any more args in regs.
+ Alignment has taken care of for special cases. */
+ gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
+ }
+ }
+
+ /* ... otherwise out of the overflow area. */
+
+ /* Care for on-stack alignment if needed. */
+ t = ovf;
+ if (align != 1)
+ {
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (align - 1));
+ t = fold_convert (sizetype, t);
+ t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
+ size_int (-align));
+ t = fold_convert (TREE_TYPE (ovf), t);
+ }
+ gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
+
+ gimplify_assign (unshare_expr (addr), t, pre_p);
+
+ t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, size_int (size));
+ gimplify_assign (unshare_expr (ovf), t, pre_p);
+
+ if (lab_over)
+ {
+ stmt = gimple_build_label (lab_over);
+ gimple_seq_add_stmt (pre_p, stmt);
+ }
+
+ if (STRICT_ALIGNMENT
+ && (TYPE_ALIGN (type)
+ > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
+ {
+ /* The value (of type complex double, for example) may not be
+ aligned in memory in the saved registers, so copy via a
+ temporary. (This is the same code as used for SPARC.) */
+ tree tmp = create_tmp_var (type, "va_arg_tmp");
+ tree dest_addr = build_fold_addr_expr (tmp);
+
+ tree copy = build_call_expr (implicit_built_in_decls[BUILT_IN_MEMCPY],
+ 3, dest_addr, addr, size_int (rsize * 4));
+
+ gimplify_and_add (copy, pre_p);
+ addr = dest_addr;
+ }
+
+ addr = fold_convert (ptrtype, addr);
+ return build_va_arg_indirect_ref (addr);
+}
+
+/* Builtins. */
+
+static void
+def_builtin (int mask, const char *name, tree type, int code)
+{
+ if ((mask & target_flags) || TARGET_PAIRED_FLOAT)
+ {
+ if (rs6000_builtin_decls[code])
+ abort ();
+
+ rs6000_builtin_decls[code] =
+ add_builtin_function (name, type, code, BUILT_IN_MD,
+ NULL, NULL_TREE);
+ }
+}
+
+/* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
+
+static const struct builtin_description bdesc_3arg[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmaddfp, "__builtin_altivec_vmaddfp", ALTIVEC_BUILTIN_VMADDFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmhaddshs, "__builtin_altivec_vmhaddshs", ALTIVEC_BUILTIN_VMHADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmhraddshs, "__builtin_altivec_vmhraddshs", ALTIVEC_BUILTIN_VMHRADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmladduhm, "__builtin_altivec_vmladduhm", ALTIVEC_BUILTIN_VMLADDUHM},
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumubm, "__builtin_altivec_vmsumubm", ALTIVEC_BUILTIN_VMSUMUBM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsummbm, "__builtin_altivec_vmsummbm", ALTIVEC_BUILTIN_VMSUMMBM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhm, "__builtin_altivec_vmsumuhm", ALTIVEC_BUILTIN_VMSUMUHM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshm, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhs, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshs, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_v16qi, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_v16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v16qi, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v8hi, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4si, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_v4sf, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madd", ALTIVEC_BUILTIN_VEC_MADD },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_madds", ALTIVEC_BUILTIN_VEC_MADDS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mladd", ALTIVEC_BUILTIN_VEC_MLADD },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mradds", ALTIVEC_BUILTIN_VEC_MRADDS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msum", ALTIVEC_BUILTIN_VEC_MSUM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshm", ALTIVEC_BUILTIN_VEC_VMSUMSHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhm", ALTIVEC_BUILTIN_VEC_VMSUMUHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsummbm", ALTIVEC_BUILTIN_VEC_VMSUMMBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumubm", ALTIVEC_BUILTIN_VEC_VMSUMUBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_msums", ALTIVEC_BUILTIN_VEC_MSUMS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumshs", ALTIVEC_BUILTIN_VEC_VMSUMSHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmsumuhs", ALTIVEC_BUILTIN_VEC_VMSUMUHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nmsub", ALTIVEC_BUILTIN_VEC_NMSUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_perm", ALTIVEC_BUILTIN_VEC_PERM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sel", ALTIVEC_BUILTIN_VEC_SEL },
+
+ { 0, CODE_FOR_paired_msub, "__builtin_paired_msub", PAIRED_BUILTIN_MSUB },
+ { 0, CODE_FOR_paired_madd, "__builtin_paired_madd", PAIRED_BUILTIN_MADD },
+ { 0, CODE_FOR_paired_madds0, "__builtin_paired_madds0", PAIRED_BUILTIN_MADDS0 },
+ { 0, CODE_FOR_paired_madds1, "__builtin_paired_madds1", PAIRED_BUILTIN_MADDS1 },
+ { 0, CODE_FOR_paired_nmsub, "__builtin_paired_nmsub", PAIRED_BUILTIN_NMSUB },
+ { 0, CODE_FOR_paired_nmadd, "__builtin_paired_nmadd", PAIRED_BUILTIN_NMADD },
+ { 0, CODE_FOR_paired_sum0, "__builtin_paired_sum0", PAIRED_BUILTIN_SUM0 },
+ { 0, CODE_FOR_paired_sum1, "__builtin_paired_sum1", PAIRED_BUILTIN_SUM1 },
+ { 0, CODE_FOR_selv2sf4, "__builtin_paired_selv2sf4", PAIRED_BUILTIN_SELV2SF4 },
+};
+
+/* DST operations: void foo (void *, const int, const char). */
+
+static const struct builtin_description bdesc_dst[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_dst, "__builtin_altivec_dst", ALTIVEC_BUILTIN_DST },
+ { MASK_ALTIVEC, CODE_FOR_altivec_dstt, "__builtin_altivec_dstt", ALTIVEC_BUILTIN_DSTT },
+ { MASK_ALTIVEC, CODE_FOR_altivec_dstst, "__builtin_altivec_dstst", ALTIVEC_BUILTIN_DSTST },
+ { MASK_ALTIVEC, CODE_FOR_altivec_dststt, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dst", ALTIVEC_BUILTIN_VEC_DST },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstt", ALTIVEC_BUILTIN_VEC_DSTT },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dstst", ALTIVEC_BUILTIN_VEC_DSTST },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_dststt", ALTIVEC_BUILTIN_VEC_DSTSTT }
+};
+
+/* Simple binary operations: VECc = foo (VECa, VECb). */
+
+static struct builtin_description bdesc_2arg[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_addv16qi3, "__builtin_altivec_vaddubm", ALTIVEC_BUILTIN_VADDUBM },
+ { MASK_ALTIVEC, CODE_FOR_addv8hi3, "__builtin_altivec_vadduhm", ALTIVEC_BUILTIN_VADDUHM },
+ { MASK_ALTIVEC, CODE_FOR_addv4si3, "__builtin_altivec_vadduwm", ALTIVEC_BUILTIN_VADDUWM },
+ { MASK_ALTIVEC, CODE_FOR_addv4sf3, "__builtin_altivec_vaddfp", ALTIVEC_BUILTIN_VADDFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddcuw, "__builtin_altivec_vaddcuw", ALTIVEC_BUILTIN_VADDCUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddubs, "__builtin_altivec_vaddubs", ALTIVEC_BUILTIN_VADDUBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddsbs, "__builtin_altivec_vaddsbs", ALTIVEC_BUILTIN_VADDSBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vadduhs, "__builtin_altivec_vadduhs", ALTIVEC_BUILTIN_VADDUHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddshs, "__builtin_altivec_vaddshs", ALTIVEC_BUILTIN_VADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vadduws, "__builtin_altivec_vadduws", ALTIVEC_BUILTIN_VADDUWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddsws, "__builtin_altivec_vaddsws", ALTIVEC_BUILTIN_VADDSWS },
+ { MASK_ALTIVEC, CODE_FOR_andv4si3, "__builtin_altivec_vand", ALTIVEC_BUILTIN_VAND },
+ { MASK_ALTIVEC, CODE_FOR_andcv4si3, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgub, "__builtin_altivec_vavgub", ALTIVEC_BUILTIN_VAVGUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgsb, "__builtin_altivec_vavgsb", ALTIVEC_BUILTIN_VAVGSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavguh, "__builtin_altivec_vavguh", ALTIVEC_BUILTIN_VAVGUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgsh, "__builtin_altivec_vavgsh", ALTIVEC_BUILTIN_VAVGSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavguw, "__builtin_altivec_vavguw", ALTIVEC_BUILTIN_VAVGUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgsw, "__builtin_altivec_vavgsw", ALTIVEC_BUILTIN_VAVGSW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcfux, "__builtin_altivec_vcfux", ALTIVEC_BUILTIN_VCFUX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcfsx, "__builtin_altivec_vcfsx", ALTIVEC_BUILTIN_VCFSX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp, "__builtin_altivec_vcmpbfp", ALTIVEC_BUILTIN_VCMPBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequb, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequh, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequw, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpeqfp, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgefp, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtub, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsb, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuh, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsh, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuw, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsw, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtfp, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vctsxs, "__builtin_altivec_vctsxs", ALTIVEC_BUILTIN_VCTSXS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vctuxs, "__builtin_altivec_vctuxs", ALTIVEC_BUILTIN_VCTUXS },
+ { MASK_ALTIVEC, CODE_FOR_umaxv16qi3, "__builtin_altivec_vmaxub", ALTIVEC_BUILTIN_VMAXUB },
+ { MASK_ALTIVEC, CODE_FOR_smaxv16qi3, "__builtin_altivec_vmaxsb", ALTIVEC_BUILTIN_VMAXSB },
+ { MASK_ALTIVEC, CODE_FOR_umaxv8hi3, "__builtin_altivec_vmaxuh", ALTIVEC_BUILTIN_VMAXUH },
+ { MASK_ALTIVEC, CODE_FOR_smaxv8hi3, "__builtin_altivec_vmaxsh", ALTIVEC_BUILTIN_VMAXSH },
+ { MASK_ALTIVEC, CODE_FOR_umaxv4si3, "__builtin_altivec_vmaxuw", ALTIVEC_BUILTIN_VMAXUW },
+ { MASK_ALTIVEC, CODE_FOR_smaxv4si3, "__builtin_altivec_vmaxsw", ALTIVEC_BUILTIN_VMAXSW },
+ { MASK_ALTIVEC, CODE_FOR_smaxv4sf3, "__builtin_altivec_vmaxfp", ALTIVEC_BUILTIN_VMAXFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrghb, "__builtin_altivec_vmrghb", ALTIVEC_BUILTIN_VMRGHB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrghh, "__builtin_altivec_vmrghh", ALTIVEC_BUILTIN_VMRGHH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrghw, "__builtin_altivec_vmrghw", ALTIVEC_BUILTIN_VMRGHW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrglb, "__builtin_altivec_vmrglb", ALTIVEC_BUILTIN_VMRGLB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrglh, "__builtin_altivec_vmrglh", ALTIVEC_BUILTIN_VMRGLH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrglw, "__builtin_altivec_vmrglw", ALTIVEC_BUILTIN_VMRGLW },
+ { MASK_ALTIVEC, CODE_FOR_uminv16qi3, "__builtin_altivec_vminub", ALTIVEC_BUILTIN_VMINUB },
+ { MASK_ALTIVEC, CODE_FOR_sminv16qi3, "__builtin_altivec_vminsb", ALTIVEC_BUILTIN_VMINSB },
+ { MASK_ALTIVEC, CODE_FOR_uminv8hi3, "__builtin_altivec_vminuh", ALTIVEC_BUILTIN_VMINUH },
+ { MASK_ALTIVEC, CODE_FOR_sminv8hi3, "__builtin_altivec_vminsh", ALTIVEC_BUILTIN_VMINSH },
+ { MASK_ALTIVEC, CODE_FOR_uminv4si3, "__builtin_altivec_vminuw", ALTIVEC_BUILTIN_VMINUW },
+ { MASK_ALTIVEC, CODE_FOR_sminv4si3, "__builtin_altivec_vminsw", ALTIVEC_BUILTIN_VMINSW },
+ { MASK_ALTIVEC, CODE_FOR_sminv4sf3, "__builtin_altivec_vminfp", ALTIVEC_BUILTIN_VMINFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmuleub, "__builtin_altivec_vmuleub", ALTIVEC_BUILTIN_VMULEUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulesb, "__builtin_altivec_vmulesb", ALTIVEC_BUILTIN_VMULESB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmuleuh, "__builtin_altivec_vmuleuh", ALTIVEC_BUILTIN_VMULEUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulesh, "__builtin_altivec_vmulesh", ALTIVEC_BUILTIN_VMULESH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmuloub, "__builtin_altivec_vmuloub", ALTIVEC_BUILTIN_VMULOUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulosb, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulouh, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulosh, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_norv4si3, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
+ { MASK_ALTIVEC, CODE_FOR_iorv4si3, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkpx, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkshss, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkswss, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhus, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkshus, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwus, "__builtin_altivec_vpkuwus", ALTIVEC_BUILTIN_VPKUWUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkswus, "__builtin_altivec_vpkswus", ALTIVEC_BUILTIN_VPKSWUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrlb, "__builtin_altivec_vrlb", ALTIVEC_BUILTIN_VRLB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrlh, "__builtin_altivec_vrlh", ALTIVEC_BUILTIN_VRLH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrlw, "__builtin_altivec_vrlw", ALTIVEC_BUILTIN_VRLW },
+ { MASK_ALTIVEC, CODE_FOR_vashlv16qi3, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB },
+ { MASK_ALTIVEC, CODE_FOR_vashlv8hi3, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH },
+ { MASK_ALTIVEC, CODE_FOR_vashlv4si3, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsl, "__builtin_altivec_vsl", ALTIVEC_BUILTIN_VSL },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vslo, "__builtin_altivec_vslo", ALTIVEC_BUILTIN_VSLO },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltb, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsplth, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltw, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW },
+ { MASK_ALTIVEC, CODE_FOR_vlshrv16qi3, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
+ { MASK_ALTIVEC, CODE_FOR_vlshrv8hi3, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
+ { MASK_ALTIVEC, CODE_FOR_vlshrv4si3, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
+ { MASK_ALTIVEC, CODE_FOR_vashrv16qi3, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
+ { MASK_ALTIVEC, CODE_FOR_vashrv8hi3, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
+ { MASK_ALTIVEC, CODE_FOR_vashrv4si3, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsr, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsro, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO },
+ { MASK_ALTIVEC, CODE_FOR_subv16qi3, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM },
+ { MASK_ALTIVEC, CODE_FOR_subv8hi3, "__builtin_altivec_vsubuhm", ALTIVEC_BUILTIN_VSUBUHM },
+ { MASK_ALTIVEC, CODE_FOR_subv4si3, "__builtin_altivec_vsubuwm", ALTIVEC_BUILTIN_VSUBUWM },
+ { MASK_ALTIVEC, CODE_FOR_subv4sf3, "__builtin_altivec_vsubfp", ALTIVEC_BUILTIN_VSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubcuw, "__builtin_altivec_vsubcuw", ALTIVEC_BUILTIN_VSUBCUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsububs, "__builtin_altivec_vsububs", ALTIVEC_BUILTIN_VSUBUBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubsbs, "__builtin_altivec_vsubsbs", ALTIVEC_BUILTIN_VSUBSBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubuhs, "__builtin_altivec_vsubuhs", ALTIVEC_BUILTIN_VSUBUHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubshs, "__builtin_altivec_vsubshs", ALTIVEC_BUILTIN_VSUBSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubuws, "__builtin_altivec_vsubuws", ALTIVEC_BUILTIN_VSUBUWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubsws, "__builtin_altivec_vsubsws", ALTIVEC_BUILTIN_VSUBSWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum4ubs, "__builtin_altivec_vsum4ubs", ALTIVEC_BUILTIN_VSUM4UBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum4sbs, "__builtin_altivec_vsum4sbs", ALTIVEC_BUILTIN_VSUM4SBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum4shs, "__builtin_altivec_vsum4shs", ALTIVEC_BUILTIN_VSUM4SHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum2sws, "__builtin_altivec_vsum2sws", ALTIVEC_BUILTIN_VSUM2SWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsumsws, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS },
+ { MASK_ALTIVEC, CODE_FOR_xorv4si3, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhm", ALTIVEC_BUILTIN_VEC_VADDUHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubm", ALTIVEC_BUILTIN_VEC_VADDUBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_addc", ALTIVEC_BUILTIN_VEC_ADDC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_adds", ALTIVEC_BUILTIN_VEC_ADDS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsws", ALTIVEC_BUILTIN_VEC_VADDSWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduws", ALTIVEC_BUILTIN_VEC_VADDUWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddshs", ALTIVEC_BUILTIN_VEC_VADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vadduhs", ALTIVEC_BUILTIN_VEC_VADDUHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddsbs", ALTIVEC_BUILTIN_VEC_VADDSBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vaddubs", ALTIVEC_BUILTIN_VEC_VADDUBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_and", ALTIVEC_BUILTIN_VEC_AND },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_andc", ALTIVEC_BUILTIN_VEC_ANDC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_avg", ALTIVEC_BUILTIN_VEC_AVG },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsw", ALTIVEC_BUILTIN_VEC_VAVGSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguw", ALTIVEC_BUILTIN_VEC_VAVGUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsh", ALTIVEC_BUILTIN_VEC_VAVGSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavguh", ALTIVEC_BUILTIN_VEC_VAVGUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgsb", ALTIVEC_BUILTIN_VEC_VAVGSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vavgub", ALTIVEC_BUILTIN_VEC_VAVGUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpb", ALTIVEC_BUILTIN_VEC_CMPB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpeq", ALTIVEC_BUILTIN_VEC_CMPEQ },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpeqfp", ALTIVEC_BUILTIN_VEC_VCMPEQFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequw", ALTIVEC_BUILTIN_VEC_VCMPEQUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequh", ALTIVEC_BUILTIN_VEC_VCMPEQUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpequb", ALTIVEC_BUILTIN_VEC_VCMPEQUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpge", ALTIVEC_BUILTIN_VEC_CMPGE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmpgt", ALTIVEC_BUILTIN_VEC_CMPGT },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtfp", ALTIVEC_BUILTIN_VEC_VCMPGTFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsw", ALTIVEC_BUILTIN_VEC_VCMPGTSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuw", ALTIVEC_BUILTIN_VEC_VCMPGTUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsh", ALTIVEC_BUILTIN_VEC_VCMPGTSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtuh", ALTIVEC_BUILTIN_VEC_VCMPGTUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtsb", ALTIVEC_BUILTIN_VEC_VCMPGTSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vcmpgtub", ALTIVEC_BUILTIN_VEC_VCMPGTUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmple", ALTIVEC_BUILTIN_VEC_CMPLE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_cmplt", ALTIVEC_BUILTIN_VEC_CMPLT },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_max", ALTIVEC_BUILTIN_VEC_MAX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxfp", ALTIVEC_BUILTIN_VEC_VMAXFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsw", ALTIVEC_BUILTIN_VEC_VMAXSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuw", ALTIVEC_BUILTIN_VEC_VMAXUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsh", ALTIVEC_BUILTIN_VEC_VMAXSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxuh", ALTIVEC_BUILTIN_VEC_VMAXUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxsb", ALTIVEC_BUILTIN_VEC_VMAXSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmaxub", ALTIVEC_BUILTIN_VEC_VMAXUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergeh", ALTIVEC_BUILTIN_VEC_MERGEH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghw", ALTIVEC_BUILTIN_VEC_VMRGHW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghh", ALTIVEC_BUILTIN_VEC_VMRGHH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrghb", ALTIVEC_BUILTIN_VEC_VMRGHB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mergel", ALTIVEC_BUILTIN_VEC_MERGEL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglw", ALTIVEC_BUILTIN_VEC_VMRGLW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglh", ALTIVEC_BUILTIN_VEC_VMRGLH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmrglb", ALTIVEC_BUILTIN_VEC_VMRGLB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_min", ALTIVEC_BUILTIN_VEC_MIN },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminfp", ALTIVEC_BUILTIN_VEC_VMINFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsw", ALTIVEC_BUILTIN_VEC_VMINSW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuw", ALTIVEC_BUILTIN_VEC_VMINUW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsh", ALTIVEC_BUILTIN_VEC_VMINSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminuh", ALTIVEC_BUILTIN_VEC_VMINUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminsb", ALTIVEC_BUILTIN_VEC_VMINSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vminub", ALTIVEC_BUILTIN_VEC_VMINUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mule", ALTIVEC_BUILTIN_VEC_MULE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleub", ALTIVEC_BUILTIN_VEC_VMULEUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesb", ALTIVEC_BUILTIN_VEC_VMULESB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuleuh", ALTIVEC_BUILTIN_VEC_VMULEUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulesh", ALTIVEC_BUILTIN_VEC_VMULESH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mulo", ALTIVEC_BUILTIN_VEC_MULO },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosh", ALTIVEC_BUILTIN_VEC_VMULOSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulouh", ALTIVEC_BUILTIN_VEC_VMULOUH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmulosb", ALTIVEC_BUILTIN_VEC_VMULOSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vmuloub", ALTIVEC_BUILTIN_VEC_VMULOUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_nor", ALTIVEC_BUILTIN_VEC_NOR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_or", ALTIVEC_BUILTIN_VEC_OR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_pack", ALTIVEC_BUILTIN_VEC_PACK },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwum", ALTIVEC_BUILTIN_VEC_VPKUWUM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhum", ALTIVEC_BUILTIN_VEC_VPKUHUM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packpx", ALTIVEC_BUILTIN_VEC_PACKPX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packs", ALTIVEC_BUILTIN_VEC_PACKS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswss", ALTIVEC_BUILTIN_VEC_VPKSWSS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuwus", ALTIVEC_BUILTIN_VEC_VPKUWUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshss", ALTIVEC_BUILTIN_VEC_VPKSHSS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkuhus", ALTIVEC_BUILTIN_VEC_VPKUHUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_packsu", ALTIVEC_BUILTIN_VEC_PACKSU },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkswus", ALTIVEC_BUILTIN_VEC_VPKSWUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vpkshus", ALTIVEC_BUILTIN_VEC_VPKSHUS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rl", ALTIVEC_BUILTIN_VEC_RL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlw", ALTIVEC_BUILTIN_VEC_VRLW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlh", ALTIVEC_BUILTIN_VEC_VRLH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vrlb", ALTIVEC_BUILTIN_VEC_VRLB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sl", ALTIVEC_BUILTIN_VEC_SL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslw", ALTIVEC_BUILTIN_VEC_VSLW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslh", ALTIVEC_BUILTIN_VEC_VSLH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vslb", ALTIVEC_BUILTIN_VEC_VSLB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sll", ALTIVEC_BUILTIN_VEC_SLL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_slo", ALTIVEC_BUILTIN_VEC_SLO },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sr", ALTIVEC_BUILTIN_VEC_SR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrw", ALTIVEC_BUILTIN_VEC_VSRW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrh", ALTIVEC_BUILTIN_VEC_VSRH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrb", ALTIVEC_BUILTIN_VEC_VSRB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sra", ALTIVEC_BUILTIN_VEC_SRA },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsraw", ALTIVEC_BUILTIN_VEC_VSRAW },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrah", ALTIVEC_BUILTIN_VEC_VSRAH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsrab", ALTIVEC_BUILTIN_VEC_VSRAB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_srl", ALTIVEC_BUILTIN_VEC_SRL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sro", ALTIVEC_BUILTIN_VEC_SRO },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sub", ALTIVEC_BUILTIN_VEC_SUB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubfp", ALTIVEC_BUILTIN_VEC_VSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuwm", ALTIVEC_BUILTIN_VEC_VSUBUWM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhm", ALTIVEC_BUILTIN_VEC_VSUBUHM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububm", ALTIVEC_BUILTIN_VEC_VSUBUBM },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subc", ALTIVEC_BUILTIN_VEC_SUBC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_subs", ALTIVEC_BUILTIN_VEC_SUBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsws", ALTIVEC_BUILTIN_VEC_VSUBSWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuws", ALTIVEC_BUILTIN_VEC_VSUBUWS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubshs", ALTIVEC_BUILTIN_VEC_VSUBSHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubuhs", ALTIVEC_BUILTIN_VEC_VSUBUHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsubsbs", ALTIVEC_BUILTIN_VEC_VSUBSBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsububs", ALTIVEC_BUILTIN_VEC_VSUBUBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum4s", ALTIVEC_BUILTIN_VEC_SUM4S },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4shs", ALTIVEC_BUILTIN_VEC_VSUM4SHS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4sbs", ALTIVEC_BUILTIN_VEC_VSUM4SBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vsum4ubs", ALTIVEC_BUILTIN_VEC_VSUM4UBS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sum2s", ALTIVEC_BUILTIN_VEC_SUM2S },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR },
+
+ { 0, CODE_FOR_divv2sf3, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3 },
+ { 0, CODE_FOR_addv2sf3, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3 },
+ { 0, CODE_FOR_subv2sf3, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3 },
+ { 0, CODE_FOR_mulv2sf3, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3 },
+ { 0, CODE_FOR_paired_muls0, "__builtin_paired_muls0", PAIRED_BUILTIN_MULS0 },
+ { 0, CODE_FOR_paired_muls1, "__builtin_paired_muls1", PAIRED_BUILTIN_MULS1 },
+ { 0, CODE_FOR_paired_merge00, "__builtin_paired_merge00", PAIRED_BUILTIN_MERGE00 },
+ { 0, CODE_FOR_paired_merge01, "__builtin_paired_merge01", PAIRED_BUILTIN_MERGE01 },
+ { 0, CODE_FOR_paired_merge10, "__builtin_paired_merge10", PAIRED_BUILTIN_MERGE10 },
+ { 0, CODE_FOR_paired_merge11, "__builtin_paired_merge11", PAIRED_BUILTIN_MERGE11 },
+
+ /* Place holder, leave as first spe builtin. */
+ { 0, CODE_FOR_spe_evaddw, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW },
+ { 0, CODE_FOR_spe_evand, "__builtin_spe_evand", SPE_BUILTIN_EVAND },
+ { 0, CODE_FOR_spe_evandc, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC },
+ { 0, CODE_FOR_spe_evdivws, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS },
+ { 0, CODE_FOR_spe_evdivwu, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU },
+ { 0, CODE_FOR_spe_eveqv, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV },
+ { 0, CODE_FOR_spe_evfsadd, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD },
+ { 0, CODE_FOR_spe_evfsdiv, "__builtin_spe_evfsdiv", SPE_BUILTIN_EVFSDIV },
+ { 0, CODE_FOR_spe_evfsmul, "__builtin_spe_evfsmul", SPE_BUILTIN_EVFSMUL },
+ { 0, CODE_FOR_spe_evfssub, "__builtin_spe_evfssub", SPE_BUILTIN_EVFSSUB },
+ { 0, CODE_FOR_spe_evmergehi, "__builtin_spe_evmergehi", SPE_BUILTIN_EVMERGEHI },
+ { 0, CODE_FOR_spe_evmergehilo, "__builtin_spe_evmergehilo", SPE_BUILTIN_EVMERGEHILO },
+ { 0, CODE_FOR_spe_evmergelo, "__builtin_spe_evmergelo", SPE_BUILTIN_EVMERGELO },
+ { 0, CODE_FOR_spe_evmergelohi, "__builtin_spe_evmergelohi", SPE_BUILTIN_EVMERGELOHI },
+ { 0, CODE_FOR_spe_evmhegsmfaa, "__builtin_spe_evmhegsmfaa", SPE_BUILTIN_EVMHEGSMFAA },
+ { 0, CODE_FOR_spe_evmhegsmfan, "__builtin_spe_evmhegsmfan", SPE_BUILTIN_EVMHEGSMFAN },
+ { 0, CODE_FOR_spe_evmhegsmiaa, "__builtin_spe_evmhegsmiaa", SPE_BUILTIN_EVMHEGSMIAA },
+ { 0, CODE_FOR_spe_evmhegsmian, "__builtin_spe_evmhegsmian", SPE_BUILTIN_EVMHEGSMIAN },
+ { 0, CODE_FOR_spe_evmhegumiaa, "__builtin_spe_evmhegumiaa", SPE_BUILTIN_EVMHEGUMIAA },
+ { 0, CODE_FOR_spe_evmhegumian, "__builtin_spe_evmhegumian", SPE_BUILTIN_EVMHEGUMIAN },
+ { 0, CODE_FOR_spe_evmhesmf, "__builtin_spe_evmhesmf", SPE_BUILTIN_EVMHESMF },
+ { 0, CODE_FOR_spe_evmhesmfa, "__builtin_spe_evmhesmfa", SPE_BUILTIN_EVMHESMFA },
+ { 0, CODE_FOR_spe_evmhesmfaaw, "__builtin_spe_evmhesmfaaw", SPE_BUILTIN_EVMHESMFAAW },
+ { 0, CODE_FOR_spe_evmhesmfanw, "__builtin_spe_evmhesmfanw", SPE_BUILTIN_EVMHESMFANW },
+ { 0, CODE_FOR_spe_evmhesmi, "__builtin_spe_evmhesmi", SPE_BUILTIN_EVMHESMI },
+ { 0, CODE_FOR_spe_evmhesmia, "__builtin_spe_evmhesmia", SPE_BUILTIN_EVMHESMIA },
+ { 0, CODE_FOR_spe_evmhesmiaaw, "__builtin_spe_evmhesmiaaw", SPE_BUILTIN_EVMHESMIAAW },
+ { 0, CODE_FOR_spe_evmhesmianw, "__builtin_spe_evmhesmianw", SPE_BUILTIN_EVMHESMIANW },
+ { 0, CODE_FOR_spe_evmhessf, "__builtin_spe_evmhessf", SPE_BUILTIN_EVMHESSF },
+ { 0, CODE_FOR_spe_evmhessfa, "__builtin_spe_evmhessfa", SPE_BUILTIN_EVMHESSFA },
+ { 0, CODE_FOR_spe_evmhessfaaw, "__builtin_spe_evmhessfaaw", SPE_BUILTIN_EVMHESSFAAW },
+ { 0, CODE_FOR_spe_evmhessfanw, "__builtin_spe_evmhessfanw", SPE_BUILTIN_EVMHESSFANW },
+ { 0, CODE_FOR_spe_evmhessiaaw, "__builtin_spe_evmhessiaaw", SPE_BUILTIN_EVMHESSIAAW },
+ { 0, CODE_FOR_spe_evmhessianw, "__builtin_spe_evmhessianw", SPE_BUILTIN_EVMHESSIANW },
+ { 0, CODE_FOR_spe_evmheumi, "__builtin_spe_evmheumi", SPE_BUILTIN_EVMHEUMI },
+ { 0, CODE_FOR_spe_evmheumia, "__builtin_spe_evmheumia", SPE_BUILTIN_EVMHEUMIA },
+ { 0, CODE_FOR_spe_evmheumiaaw, "__builtin_spe_evmheumiaaw", SPE_BUILTIN_EVMHEUMIAAW },
+ { 0, CODE_FOR_spe_evmheumianw, "__builtin_spe_evmheumianw", SPE_BUILTIN_EVMHEUMIANW },
+ { 0, CODE_FOR_spe_evmheusiaaw, "__builtin_spe_evmheusiaaw", SPE_BUILTIN_EVMHEUSIAAW },
+ { 0, CODE_FOR_spe_evmheusianw, "__builtin_spe_evmheusianw", SPE_BUILTIN_EVMHEUSIANW },
+ { 0, CODE_FOR_spe_evmhogsmfaa, "__builtin_spe_evmhogsmfaa", SPE_BUILTIN_EVMHOGSMFAA },
+ { 0, CODE_FOR_spe_evmhogsmfan, "__builtin_spe_evmhogsmfan", SPE_BUILTIN_EVMHOGSMFAN },
+ { 0, CODE_FOR_spe_evmhogsmiaa, "__builtin_spe_evmhogsmiaa", SPE_BUILTIN_EVMHOGSMIAA },
+ { 0, CODE_FOR_spe_evmhogsmian, "__builtin_spe_evmhogsmian", SPE_BUILTIN_EVMHOGSMIAN },
+ { 0, CODE_FOR_spe_evmhogumiaa, "__builtin_spe_evmhogumiaa", SPE_BUILTIN_EVMHOGUMIAA },
+ { 0, CODE_FOR_spe_evmhogumian, "__builtin_spe_evmhogumian", SPE_BUILTIN_EVMHOGUMIAN },
+ { 0, CODE_FOR_spe_evmhosmf, "__builtin_spe_evmhosmf", SPE_BUILTIN_EVMHOSMF },
+ { 0, CODE_FOR_spe_evmhosmfa, "__builtin_spe_evmhosmfa", SPE_BUILTIN_EVMHOSMFA },
+ { 0, CODE_FOR_spe_evmhosmfaaw, "__builtin_spe_evmhosmfaaw", SPE_BUILTIN_EVMHOSMFAAW },
+ { 0, CODE_FOR_spe_evmhosmfanw, "__builtin_spe_evmhosmfanw", SPE_BUILTIN_EVMHOSMFANW },
+ { 0, CODE_FOR_spe_evmhosmi, "__builtin_spe_evmhosmi", SPE_BUILTIN_EVMHOSMI },
+ { 0, CODE_FOR_spe_evmhosmia, "__builtin_spe_evmhosmia", SPE_BUILTIN_EVMHOSMIA },
+ { 0, CODE_FOR_spe_evmhosmiaaw, "__builtin_spe_evmhosmiaaw", SPE_BUILTIN_EVMHOSMIAAW },
+ { 0, CODE_FOR_spe_evmhosmianw, "__builtin_spe_evmhosmianw", SPE_BUILTIN_EVMHOSMIANW },
+ { 0, CODE_FOR_spe_evmhossf, "__builtin_spe_evmhossf", SPE_BUILTIN_EVMHOSSF },
+ { 0, CODE_FOR_spe_evmhossfa, "__builtin_spe_evmhossfa", SPE_BUILTIN_EVMHOSSFA },
+ { 0, CODE_FOR_spe_evmhossfaaw, "__builtin_spe_evmhossfaaw", SPE_BUILTIN_EVMHOSSFAAW },
+ { 0, CODE_FOR_spe_evmhossfanw, "__builtin_spe_evmhossfanw", SPE_BUILTIN_EVMHOSSFANW },
+ { 0, CODE_FOR_spe_evmhossiaaw, "__builtin_spe_evmhossiaaw", SPE_BUILTIN_EVMHOSSIAAW },
+ { 0, CODE_FOR_spe_evmhossianw, "__builtin_spe_evmhossianw", SPE_BUILTIN_EVMHOSSIANW },
+ { 0, CODE_FOR_spe_evmhoumi, "__builtin_spe_evmhoumi", SPE_BUILTIN_EVMHOUMI },
+ { 0, CODE_FOR_spe_evmhoumia, "__builtin_spe_evmhoumia", SPE_BUILTIN_EVMHOUMIA },
+ { 0, CODE_FOR_spe_evmhoumiaaw, "__builtin_spe_evmhoumiaaw", SPE_BUILTIN_EVMHOUMIAAW },
+ { 0, CODE_FOR_spe_evmhoumianw, "__builtin_spe_evmhoumianw", SPE_BUILTIN_EVMHOUMIANW },
+ { 0, CODE_FOR_spe_evmhousiaaw, "__builtin_spe_evmhousiaaw", SPE_BUILTIN_EVMHOUSIAAW },
+ { 0, CODE_FOR_spe_evmhousianw, "__builtin_spe_evmhousianw", SPE_BUILTIN_EVMHOUSIANW },
+ { 0, CODE_FOR_spe_evmwhsmf, "__builtin_spe_evmwhsmf", SPE_BUILTIN_EVMWHSMF },
+ { 0, CODE_FOR_spe_evmwhsmfa, "__builtin_spe_evmwhsmfa", SPE_BUILTIN_EVMWHSMFA },
+ { 0, CODE_FOR_spe_evmwhsmi, "__builtin_spe_evmwhsmi", SPE_BUILTIN_EVMWHSMI },
+ { 0, CODE_FOR_spe_evmwhsmia, "__builtin_spe_evmwhsmia", SPE_BUILTIN_EVMWHSMIA },
+ { 0, CODE_FOR_spe_evmwhssf, "__builtin_spe_evmwhssf", SPE_BUILTIN_EVMWHSSF },
+ { 0, CODE_FOR_spe_evmwhssfa, "__builtin_spe_evmwhssfa", SPE_BUILTIN_EVMWHSSFA },
+ { 0, CODE_FOR_spe_evmwhumi, "__builtin_spe_evmwhumi", SPE_BUILTIN_EVMWHUMI },
+ { 0, CODE_FOR_spe_evmwhumia, "__builtin_spe_evmwhumia", SPE_BUILTIN_EVMWHUMIA },
+ { 0, CODE_FOR_spe_evmwlsmiaaw, "__builtin_spe_evmwlsmiaaw", SPE_BUILTIN_EVMWLSMIAAW },
+ { 0, CODE_FOR_spe_evmwlsmianw, "__builtin_spe_evmwlsmianw", SPE_BUILTIN_EVMWLSMIANW },
+ { 0, CODE_FOR_spe_evmwlssiaaw, "__builtin_spe_evmwlssiaaw", SPE_BUILTIN_EVMWLSSIAAW },
+ { 0, CODE_FOR_spe_evmwlssianw, "__builtin_spe_evmwlssianw", SPE_BUILTIN_EVMWLSSIANW },
+ { 0, CODE_FOR_spe_evmwlumi, "__builtin_spe_evmwlumi", SPE_BUILTIN_EVMWLUMI },
+ { 0, CODE_FOR_spe_evmwlumia, "__builtin_spe_evmwlumia", SPE_BUILTIN_EVMWLUMIA },
+ { 0, CODE_FOR_spe_evmwlumiaaw, "__builtin_spe_evmwlumiaaw", SPE_BUILTIN_EVMWLUMIAAW },
+ { 0, CODE_FOR_spe_evmwlumianw, "__builtin_spe_evmwlumianw", SPE_BUILTIN_EVMWLUMIANW },
+ { 0, CODE_FOR_spe_evmwlusiaaw, "__builtin_spe_evmwlusiaaw", SPE_BUILTIN_EVMWLUSIAAW },
+ { 0, CODE_FOR_spe_evmwlusianw, "__builtin_spe_evmwlusianw", SPE_BUILTIN_EVMWLUSIANW },
+ { 0, CODE_FOR_spe_evmwsmf, "__builtin_spe_evmwsmf", SPE_BUILTIN_EVMWSMF },
+ { 0, CODE_FOR_spe_evmwsmfa, "__builtin_spe_evmwsmfa", SPE_BUILTIN_EVMWSMFA },
+ { 0, CODE_FOR_spe_evmwsmfaa, "__builtin_spe_evmwsmfaa", SPE_BUILTIN_EVMWSMFAA },
+ { 0, CODE_FOR_spe_evmwsmfan, "__builtin_spe_evmwsmfan", SPE_BUILTIN_EVMWSMFAN },
+ { 0, CODE_FOR_spe_evmwsmi, "__builtin_spe_evmwsmi", SPE_BUILTIN_EVMWSMI },
+ { 0, CODE_FOR_spe_evmwsmia, "__builtin_spe_evmwsmia", SPE_BUILTIN_EVMWSMIA },
+ { 0, CODE_FOR_spe_evmwsmiaa, "__builtin_spe_evmwsmiaa", SPE_BUILTIN_EVMWSMIAA },
+ { 0, CODE_FOR_spe_evmwsmian, "__builtin_spe_evmwsmian", SPE_BUILTIN_EVMWSMIAN },
+ { 0, CODE_FOR_spe_evmwssf, "__builtin_spe_evmwssf", SPE_BUILTIN_EVMWSSF },
+ { 0, CODE_FOR_spe_evmwssfa, "__builtin_spe_evmwssfa", SPE_BUILTIN_EVMWSSFA },
+ { 0, CODE_FOR_spe_evmwssfaa, "__builtin_spe_evmwssfaa", SPE_BUILTIN_EVMWSSFAA },
+ { 0, CODE_FOR_spe_evmwssfan, "__builtin_spe_evmwssfan", SPE_BUILTIN_EVMWSSFAN },
+ { 0, CODE_FOR_spe_evmwumi, "__builtin_spe_evmwumi", SPE_BUILTIN_EVMWUMI },
+ { 0, CODE_FOR_spe_evmwumia, "__builtin_spe_evmwumia", SPE_BUILTIN_EVMWUMIA },
+ { 0, CODE_FOR_spe_evmwumiaa, "__builtin_spe_evmwumiaa", SPE_BUILTIN_EVMWUMIAA },
+ { 0, CODE_FOR_spe_evmwumian, "__builtin_spe_evmwumian", SPE_BUILTIN_EVMWUMIAN },
+ { 0, CODE_FOR_spe_evnand, "__builtin_spe_evnand", SPE_BUILTIN_EVNAND },
+ { 0, CODE_FOR_spe_evnor, "__builtin_spe_evnor", SPE_BUILTIN_EVNOR },
+ { 0, CODE_FOR_spe_evor, "__builtin_spe_evor", SPE_BUILTIN_EVOR },
+ { 0, CODE_FOR_spe_evorc, "__builtin_spe_evorc", SPE_BUILTIN_EVORC },
+ { 0, CODE_FOR_spe_evrlw, "__builtin_spe_evrlw", SPE_BUILTIN_EVRLW },
+ { 0, CODE_FOR_spe_evslw, "__builtin_spe_evslw", SPE_BUILTIN_EVSLW },
+ { 0, CODE_FOR_spe_evsrws, "__builtin_spe_evsrws", SPE_BUILTIN_EVSRWS },
+ { 0, CODE_FOR_spe_evsrwu, "__builtin_spe_evsrwu", SPE_BUILTIN_EVSRWU },
+ { 0, CODE_FOR_spe_evsubfw, "__builtin_spe_evsubfw", SPE_BUILTIN_EVSUBFW },
+
+ /* SPE binary operations expecting a 5-bit unsigned literal. */
+ { 0, CODE_FOR_spe_evaddiw, "__builtin_spe_evaddiw", SPE_BUILTIN_EVADDIW },
+
+ { 0, CODE_FOR_spe_evrlwi, "__builtin_spe_evrlwi", SPE_BUILTIN_EVRLWI },
+ { 0, CODE_FOR_spe_evslwi, "__builtin_spe_evslwi", SPE_BUILTIN_EVSLWI },
+ { 0, CODE_FOR_spe_evsrwis, "__builtin_spe_evsrwis", SPE_BUILTIN_EVSRWIS },
+ { 0, CODE_FOR_spe_evsrwiu, "__builtin_spe_evsrwiu", SPE_BUILTIN_EVSRWIU },
+ { 0, CODE_FOR_spe_evsubifw, "__builtin_spe_evsubifw", SPE_BUILTIN_EVSUBIFW },
+ { 0, CODE_FOR_spe_evmwhssfaa, "__builtin_spe_evmwhssfaa", SPE_BUILTIN_EVMWHSSFAA },
+ { 0, CODE_FOR_spe_evmwhssmaa, "__builtin_spe_evmwhssmaa", SPE_BUILTIN_EVMWHSSMAA },
+ { 0, CODE_FOR_spe_evmwhsmfaa, "__builtin_spe_evmwhsmfaa", SPE_BUILTIN_EVMWHSMFAA },
+ { 0, CODE_FOR_spe_evmwhsmiaa, "__builtin_spe_evmwhsmiaa", SPE_BUILTIN_EVMWHSMIAA },
+ { 0, CODE_FOR_spe_evmwhusiaa, "__builtin_spe_evmwhusiaa", SPE_BUILTIN_EVMWHUSIAA },
+ { 0, CODE_FOR_spe_evmwhumiaa, "__builtin_spe_evmwhumiaa", SPE_BUILTIN_EVMWHUMIAA },
+ { 0, CODE_FOR_spe_evmwhssfan, "__builtin_spe_evmwhssfan", SPE_BUILTIN_EVMWHSSFAN },
+ { 0, CODE_FOR_spe_evmwhssian, "__builtin_spe_evmwhssian", SPE_BUILTIN_EVMWHSSIAN },
+ { 0, CODE_FOR_spe_evmwhsmfan, "__builtin_spe_evmwhsmfan", SPE_BUILTIN_EVMWHSMFAN },
+ { 0, CODE_FOR_spe_evmwhsmian, "__builtin_spe_evmwhsmian", SPE_BUILTIN_EVMWHSMIAN },
+ { 0, CODE_FOR_spe_evmwhusian, "__builtin_spe_evmwhusian", SPE_BUILTIN_EVMWHUSIAN },
+ { 0, CODE_FOR_spe_evmwhumian, "__builtin_spe_evmwhumian", SPE_BUILTIN_EVMWHUMIAN },
+ { 0, CODE_FOR_spe_evmwhgssfaa, "__builtin_spe_evmwhgssfaa", SPE_BUILTIN_EVMWHGSSFAA },
+ { 0, CODE_FOR_spe_evmwhgsmfaa, "__builtin_spe_evmwhgsmfaa", SPE_BUILTIN_EVMWHGSMFAA },
+ { 0, CODE_FOR_spe_evmwhgsmiaa, "__builtin_spe_evmwhgsmiaa", SPE_BUILTIN_EVMWHGSMIAA },
+ { 0, CODE_FOR_spe_evmwhgumiaa, "__builtin_spe_evmwhgumiaa", SPE_BUILTIN_EVMWHGUMIAA },
+ { 0, CODE_FOR_spe_evmwhgssfan, "__builtin_spe_evmwhgssfan", SPE_BUILTIN_EVMWHGSSFAN },
+ { 0, CODE_FOR_spe_evmwhgsmfan, "__builtin_spe_evmwhgsmfan", SPE_BUILTIN_EVMWHGSMFAN },
+ { 0, CODE_FOR_spe_evmwhgsmian, "__builtin_spe_evmwhgsmian", SPE_BUILTIN_EVMWHGSMIAN },
+ { 0, CODE_FOR_spe_evmwhgumian, "__builtin_spe_evmwhgumian", SPE_BUILTIN_EVMWHGUMIAN },
+ { 0, CODE_FOR_spe_brinc, "__builtin_spe_brinc", SPE_BUILTIN_BRINC },
+
+ /* Place-holder. Leave as last binary SPE builtin. */
+ { 0, CODE_FOR_xorv2si3, "__builtin_spe_evxor", SPE_BUILTIN_EVXOR }
+};
+
+/* AltiVec predicates. */
+
+struct builtin_description_predicates
+{
+ const unsigned int mask;
+ const enum insn_code icode;
+ const char *opcode;
+ const char *const name;
+ const enum rs6000_builtins code;
+};
+
+static const struct builtin_description_predicates bdesc_altivec_preds[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpbfp.", "__builtin_altivec_vcmpbfp_p", ALTIVEC_BUILTIN_VCMPBFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpeqfp.", "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgefp.", "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4sf, "*vcmpgtfp.", "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpequw.", "__builtin_altivec_vcmpequw_p", ALTIVEC_BUILTIN_VCMPEQUW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpgtsw.", "__builtin_altivec_vcmpgtsw_p", ALTIVEC_BUILTIN_VCMPGTSW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v4si, "*vcmpgtuw.", "__builtin_altivec_vcmpgtuw_p", ALTIVEC_BUILTIN_VCMPGTUW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpgtuh.", "__builtin_altivec_vcmpgtuh_p", ALTIVEC_BUILTIN_VCMPGTUH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpgtsh.", "__builtin_altivec_vcmpgtsh_p", ALTIVEC_BUILTIN_VCMPGTSH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v8hi, "*vcmpequh.", "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpequb.", "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtsb.", "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_predicate_v16qi, "*vcmpgtub.", "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P },
+
+ { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpeq_p", ALTIVEC_BUILTIN_VCMPEQ_P },
+ { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpgt_p", ALTIVEC_BUILTIN_VCMPGT_P },
+ { MASK_ALTIVEC, 0, NULL, "__builtin_vec_vcmpge_p", ALTIVEC_BUILTIN_VCMPGE_P }
+};
+
+/* SPE predicates. */
+static struct builtin_description bdesc_spe_predicates[] =
+{
+ /* Place-holder. Leave as first. */
+ { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evcmpeq", SPE_BUILTIN_EVCMPEQ },
+ { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evcmpgts", SPE_BUILTIN_EVCMPGTS },
+ { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evcmpgtu", SPE_BUILTIN_EVCMPGTU },
+ { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evcmplts", SPE_BUILTIN_EVCMPLTS },
+ { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evcmpltu", SPE_BUILTIN_EVCMPLTU },
+ { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evfscmpeq", SPE_BUILTIN_EVFSCMPEQ },
+ { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evfscmpgt", SPE_BUILTIN_EVFSCMPGT },
+ { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evfscmplt", SPE_BUILTIN_EVFSCMPLT },
+ { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evfststeq", SPE_BUILTIN_EVFSTSTEQ },
+ { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evfststgt", SPE_BUILTIN_EVFSTSTGT },
+ /* Place-holder. Leave as last. */
+ { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evfststlt", SPE_BUILTIN_EVFSTSTLT },
+};
+
+/* SPE evsel predicates. */
+static struct builtin_description bdesc_spe_evsel[] =
+{
+ /* Place-holder. Leave as first. */
+ { 0, CODE_FOR_spe_evcmpgts, "__builtin_spe_evsel_gts", SPE_BUILTIN_EVSEL_CMPGTS },
+ { 0, CODE_FOR_spe_evcmpgtu, "__builtin_spe_evsel_gtu", SPE_BUILTIN_EVSEL_CMPGTU },
+ { 0, CODE_FOR_spe_evcmplts, "__builtin_spe_evsel_lts", SPE_BUILTIN_EVSEL_CMPLTS },
+ { 0, CODE_FOR_spe_evcmpltu, "__builtin_spe_evsel_ltu", SPE_BUILTIN_EVSEL_CMPLTU },
+ { 0, CODE_FOR_spe_evcmpeq, "__builtin_spe_evsel_eq", SPE_BUILTIN_EVSEL_CMPEQ },
+ { 0, CODE_FOR_spe_evfscmpgt, "__builtin_spe_evsel_fsgt", SPE_BUILTIN_EVSEL_FSCMPGT },
+ { 0, CODE_FOR_spe_evfscmplt, "__builtin_spe_evsel_fslt", SPE_BUILTIN_EVSEL_FSCMPLT },
+ { 0, CODE_FOR_spe_evfscmpeq, "__builtin_spe_evsel_fseq", SPE_BUILTIN_EVSEL_FSCMPEQ },
+ { 0, CODE_FOR_spe_evfststgt, "__builtin_spe_evsel_fststgt", SPE_BUILTIN_EVSEL_FSTSTGT },
+ { 0, CODE_FOR_spe_evfststlt, "__builtin_spe_evsel_fststlt", SPE_BUILTIN_EVSEL_FSTSTLT },
+ /* Place-holder. Leave as last. */
+ { 0, CODE_FOR_spe_evfststeq, "__builtin_spe_evsel_fststeq", SPE_BUILTIN_EVSEL_FSTSTEQ },
+};
+
+/* PAIRED predicates. */
+static const struct builtin_description bdesc_paired_preds[] =
+{
+ /* Place-holder. Leave as first. */
+ { 0, CODE_FOR_paired_cmpu0, "__builtin_paired_cmpu0", PAIRED_BUILTIN_CMPU0 },
+ /* Place-holder. Leave as last. */
+ { 0, CODE_FOR_paired_cmpu1, "__builtin_paired_cmpu1", PAIRED_BUILTIN_CMPU1 },
+};
+
+/* ABS* operations. */
+
+static const struct builtin_description bdesc_abs[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_absv4si2, "__builtin_altivec_abs_v4si", ALTIVEC_BUILTIN_ABS_V4SI },
+ { MASK_ALTIVEC, CODE_FOR_absv8hi2, "__builtin_altivec_abs_v8hi", ALTIVEC_BUILTIN_ABS_V8HI },
+ { MASK_ALTIVEC, CODE_FOR_absv4sf2, "__builtin_altivec_abs_v4sf", ALTIVEC_BUILTIN_ABS_V4SF },
+ { MASK_ALTIVEC, CODE_FOR_absv16qi2, "__builtin_altivec_abs_v16qi", ALTIVEC_BUILTIN_ABS_V16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_abss_v4si, "__builtin_altivec_abss_v4si", ALTIVEC_BUILTIN_ABSS_V4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_abss_v8hi, "__builtin_altivec_abss_v8hi", ALTIVEC_BUILTIN_ABSS_V8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_abss_v16qi, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI }
+};
+
+/* Simple unary operations: VECb = foo (unsigned literal) or VECb =
+ foo (VECa). */
+
+static struct builtin_description bdesc_1arg[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_vexptefp, "__builtin_altivec_vexptefp", ALTIVEC_BUILTIN_VEXPTEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vlogefp, "__builtin_altivec_vlogefp", ALTIVEC_BUILTIN_VLOGEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrefp, "__builtin_altivec_vrefp", ALTIVEC_BUILTIN_VREFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrfim, "__builtin_altivec_vrfim", ALTIVEC_BUILTIN_VRFIM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrfin, "__builtin_altivec_vrfin", ALTIVEC_BUILTIN_VRFIN },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrfip, "__builtin_altivec_vrfip", ALTIVEC_BUILTIN_VRFIP },
+ { MASK_ALTIVEC, CODE_FOR_ftruncv4sf2, "__builtin_altivec_vrfiz", ALTIVEC_BUILTIN_VRFIZ },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrsqrtefp, "__builtin_altivec_vrsqrtefp", ALTIVEC_BUILTIN_VRSQRTEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltisb, "__builtin_altivec_vspltisb", ALTIVEC_BUILTIN_VSPLTISB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltish, "__builtin_altivec_vspltish", ALTIVEC_BUILTIN_VSPLTISH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltisw, "__builtin_altivec_vspltisw", ALTIVEC_BUILTIN_VSPLTISW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsb, "__builtin_altivec_vupkhsb", ALTIVEC_BUILTIN_VUPKHSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupkhpx, "__builtin_altivec_vupkhpx", ALTIVEC_BUILTIN_VUPKHPX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsh, "__builtin_altivec_vupkhsh", ALTIVEC_BUILTIN_VUPKHSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupklsb, "__builtin_altivec_vupklsb", ALTIVEC_BUILTIN_VUPKLSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupklpx, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupklsh, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH },
+
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abs", ALTIVEC_BUILTIN_VEC_ABS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_abss", ALTIVEC_BUILTIN_VEC_ABSS },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_ceil", ALTIVEC_BUILTIN_VEC_CEIL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_expte", ALTIVEC_BUILTIN_VEC_EXPTE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_floor", ALTIVEC_BUILTIN_VEC_FLOOR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_loge", ALTIVEC_BUILTIN_VEC_LOGE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_mtvscr", ALTIVEC_BUILTIN_VEC_MTVSCR },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_re", ALTIVEC_BUILTIN_VEC_RE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_round", ALTIVEC_BUILTIN_VEC_ROUND },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_rsqrte", ALTIVEC_BUILTIN_VEC_RSQRTE },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_trunc", ALTIVEC_BUILTIN_VEC_TRUNC },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackh", ALTIVEC_BUILTIN_VEC_UNPACKH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsh", ALTIVEC_BUILTIN_VEC_VUPKHSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhpx", ALTIVEC_BUILTIN_VEC_VUPKHPX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupkhsb", ALTIVEC_BUILTIN_VEC_VUPKHSB },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_unpackl", ALTIVEC_BUILTIN_VEC_UNPACKL },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklpx", ALTIVEC_BUILTIN_VEC_VUPKLPX },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsh", ALTIVEC_BUILTIN_VEC_VUPKLSH },
+ { MASK_ALTIVEC, CODE_FOR_nothing, "__builtin_vec_vupklsb", ALTIVEC_BUILTIN_VEC_VUPKLSB },
+
+ /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
+ end with SPE_BUILTIN_EVSUBFUSIAAW. */
+ { 0, CODE_FOR_spe_evabs, "__builtin_spe_evabs", SPE_BUILTIN_EVABS },
+ { 0, CODE_FOR_spe_evaddsmiaaw, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW },
+ { 0, CODE_FOR_spe_evaddssiaaw, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW },
+ { 0, CODE_FOR_spe_evaddumiaaw, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW },
+ { 0, CODE_FOR_spe_evaddusiaaw, "__builtin_spe_evaddusiaaw", SPE_BUILTIN_EVADDUSIAAW },
+ { 0, CODE_FOR_spe_evcntlsw, "__builtin_spe_evcntlsw", SPE_BUILTIN_EVCNTLSW },
+ { 0, CODE_FOR_spe_evcntlzw, "__builtin_spe_evcntlzw", SPE_BUILTIN_EVCNTLZW },
+ { 0, CODE_FOR_spe_evextsb, "__builtin_spe_evextsb", SPE_BUILTIN_EVEXTSB },
+ { 0, CODE_FOR_spe_evextsh, "__builtin_spe_evextsh", SPE_BUILTIN_EVEXTSH },
+ { 0, CODE_FOR_spe_evfsabs, "__builtin_spe_evfsabs", SPE_BUILTIN_EVFSABS },
+ { 0, CODE_FOR_spe_evfscfsf, "__builtin_spe_evfscfsf", SPE_BUILTIN_EVFSCFSF },
+ { 0, CODE_FOR_spe_evfscfsi, "__builtin_spe_evfscfsi", SPE_BUILTIN_EVFSCFSI },
+ { 0, CODE_FOR_spe_evfscfuf, "__builtin_spe_evfscfuf", SPE_BUILTIN_EVFSCFUF },
+ { 0, CODE_FOR_spe_evfscfui, "__builtin_spe_evfscfui", SPE_BUILTIN_EVFSCFUI },
+ { 0, CODE_FOR_spe_evfsctsf, "__builtin_spe_evfsctsf", SPE_BUILTIN_EVFSCTSF },
+ { 0, CODE_FOR_spe_evfsctsi, "__builtin_spe_evfsctsi", SPE_BUILTIN_EVFSCTSI },
+ { 0, CODE_FOR_spe_evfsctsiz, "__builtin_spe_evfsctsiz", SPE_BUILTIN_EVFSCTSIZ },
+ { 0, CODE_FOR_spe_evfsctuf, "__builtin_spe_evfsctuf", SPE_BUILTIN_EVFSCTUF },
+ { 0, CODE_FOR_spe_evfsctui, "__builtin_spe_evfsctui", SPE_BUILTIN_EVFSCTUI },
+ { 0, CODE_FOR_spe_evfsctuiz, "__builtin_spe_evfsctuiz", SPE_BUILTIN_EVFSCTUIZ },
+ { 0, CODE_FOR_spe_evfsnabs, "__builtin_spe_evfsnabs", SPE_BUILTIN_EVFSNABS },
+ { 0, CODE_FOR_spe_evfsneg, "__builtin_spe_evfsneg", SPE_BUILTIN_EVFSNEG },
+ { 0, CODE_FOR_spe_evmra, "__builtin_spe_evmra", SPE_BUILTIN_EVMRA },
+ { 0, CODE_FOR_negv2si2, "__builtin_spe_evneg", SPE_BUILTIN_EVNEG },
+ { 0, CODE_FOR_spe_evrndw, "__builtin_spe_evrndw", SPE_BUILTIN_EVRNDW },
+ { 0, CODE_FOR_spe_evsubfsmiaaw, "__builtin_spe_evsubfsmiaaw", SPE_BUILTIN_EVSUBFSMIAAW },
+ { 0, CODE_FOR_spe_evsubfssiaaw, "__builtin_spe_evsubfssiaaw", SPE_BUILTIN_EVSUBFSSIAAW },
+ { 0, CODE_FOR_spe_evsubfumiaaw, "__builtin_spe_evsubfumiaaw", SPE_BUILTIN_EVSUBFUMIAAW },
+
+ /* Place-holder. Leave as last unary SPE builtin. */
+ { 0, CODE_FOR_spe_evsubfusiaaw, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW },
+
+ { 0, CODE_FOR_absv2sf2, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2 },
+ { 0, CODE_FOR_nabsv2sf2, "__builtin_paired_nabsv2sf2", PAIRED_BUILTIN_NABSV2SF2 },
+ { 0, CODE_FOR_negv2sf2, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2 },
+ { 0, CODE_FOR_sqrtv2sf2, "__builtin_paired_sqrtv2sf2", PAIRED_BUILTIN_SQRTV2SF2 },
+ { 0, CODE_FOR_resv2sf2, "__builtin_paired_resv2sf2", PAIRED_BUILTIN_RESV2SF2 }
+};
+
+static rtx
+rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ rtx op0 = expand_normal (arg0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (icode == CODE_FOR_altivec_vspltisb
+ || icode == CODE_FOR_altivec_vspltish
+ || icode == CODE_FOR_altivec_vspltisw
+ || icode == CODE_FOR_spe_evsplatfi
+ || icode == CODE_FOR_spe_evsplati)
+ {
+ /* Only allow 5-bit *signed* literals. */
+ if (GET_CODE (op0) != CONST_INT
+ || INTVAL (op0) > 15
+ || INTVAL (op0) < -16)
+ {
+ error ("argument 1 must be a 5-bit signed literal");
+ return const0_rtx;
+ }
+ }
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat, scratch1, scratch2;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ rtx op0 = expand_normal (arg0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ /* If we have invalid arguments, bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ scratch1 = gen_reg_rtx (mode0);
+ scratch2 = gen_reg_rtx (mode0);
+
+ pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (icode == CODE_FOR_altivec_vcfux
+ || icode == CODE_FOR_altivec_vcfsx
+ || icode == CODE_FOR_altivec_vctsxs
+ || icode == CODE_FOR_altivec_vctuxs
+ || icode == CODE_FOR_altivec_vspltb
+ || icode == CODE_FOR_altivec_vsplth
+ || icode == CODE_FOR_altivec_vspltw
+ || icode == CODE_FOR_spe_evaddiw
+ || icode == CODE_FOR_spe_evldd
+ || icode == CODE_FOR_spe_evldh
+ || icode == CODE_FOR_spe_evldw
+ || icode == CODE_FOR_spe_evlhhesplat
+ || icode == CODE_FOR_spe_evlhhossplat
+ || icode == CODE_FOR_spe_evlhhousplat
+ || icode == CODE_FOR_spe_evlwhe
+ || icode == CODE_FOR_spe_evlwhos
+ || icode == CODE_FOR_spe_evlwhou
+ || icode == CODE_FOR_spe_evlwhsplat
+ || icode == CODE_FOR_spe_evlwwsplat
+ || icode == CODE_FOR_spe_evrlwi
+ || icode == CODE_FOR_spe_evslwi
+ || icode == CODE_FOR_spe_evsrwis
+ || icode == CODE_FOR_spe_evsubifw
+ || icode == CODE_FOR_spe_evsrwiu)
+ {
+ /* Only allow 5-bit unsigned literals. */
+ STRIP_NOPS (arg1);
+ if (TREE_CODE (arg1) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg1) & ~0x1f)
+ {
+ error ("argument 2 must be a 5-bit unsigned literal");
+ return const0_rtx;
+ }
+ }
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+altivec_expand_predicate_builtin (enum insn_code icode, const char *opcode,
+ tree exp, rtx target)
+{
+ rtx pat, scratch;
+ tree cr6_form = CALL_EXPR_ARG (exp, 0);
+ tree arg0 = CALL_EXPR_ARG (exp, 1);
+ tree arg1 = CALL_EXPR_ARG (exp, 2);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode tmode = SImode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ int cr6_form_int;
+
+ if (TREE_CODE (cr6_form) != INTEGER_CST)
+ {
+ error ("argument 1 of __builtin_altivec_predicate must be a constant");
+ return const0_rtx;
+ }
+ else
+ cr6_form_int = TREE_INT_CST_LOW (cr6_form);
+
+ gcc_assert (mode0 == mode1);
+
+ /* If we have invalid arguments, bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ scratch = gen_reg_rtx (mode0);
+
+ pat = GEN_FCN (icode) (scratch, op0, op1,
+ gen_rtx_SYMBOL_REF (Pmode, opcode));
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ /* The vec_any* and vec_all* predicates use the same opcodes for two
+ different operations, but the bits in CR6 will be different
+ depending on what information we want. So we have to play tricks
+ with CR6 to get the right bits out.
+
+ If you think this is disgusting, look at the specs for the
+ AltiVec predicates. */
+
+ switch (cr6_form_int)
+ {
+ case 0:
+ emit_insn (gen_cr6_test_for_zero (target));
+ break;
+ case 1:
+ emit_insn (gen_cr6_test_for_zero_reverse (target));
+ break;
+ case 2:
+ emit_insn (gen_cr6_test_for_lt (target));
+ break;
+ case 3:
+ emit_insn (gen_cr6_test_for_lt_reverse (target));
+ break;
+ default:
+ error ("argument 1 of __builtin_altivec_predicate is out of range");
+ break;
+ }
+
+ return target;
+}
+
+static rtx
+paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat, addr;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = Pmode;
+ enum machine_mode mode1 = Pmode;
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ if (op0 == const0_rtx)
+ {
+ addr = gen_rtx_MEM (tmode, op1);
+ }
+ else
+ {
+ op0 = copy_to_mode_reg (mode0, op0);
+ addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
+ }
+
+ pat = GEN_FCN (icode) (target, addr);
+
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
+{
+ rtx pat, addr;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = Pmode;
+ enum machine_mode mode1 = Pmode;
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ if (op0 == const0_rtx)
+ {
+ addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
+ }
+ else
+ {
+ op0 = copy_to_mode_reg (mode0, op0);
+ addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
+ }
+
+ pat = GEN_FCN (icode) (target, addr);
+
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+spe_expand_stv_builtin (enum insn_code icode, tree exp)
+{
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ tree arg2 = CALL_EXPR_ARG (exp, 2);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx pat;
+ enum machine_mode mode0 = insn_data[icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[2].mode;
+
+ /* Invalid arguments. Bail before doing anything stoopid! */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return const0_rtx;
+
+ if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
+ op0 = copy_to_mode_reg (mode2, op0);
+ if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
+ op1 = copy_to_mode_reg (mode0, op1);
+ if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
+ op2 = copy_to_mode_reg (mode1, op2);
+
+ pat = GEN_FCN (icode) (op1, op2, op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+}
+
+static rtx
+paired_expand_stv_builtin (enum insn_code icode, tree exp)
+{
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ tree arg2 = CALL_EXPR_ARG (exp, 2);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx pat, addr;
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode1 = Pmode;
+ enum machine_mode mode2 = Pmode;
+
+ /* Invalid arguments. Bail before doing anything stoopid! */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return const0_rtx;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
+ op0 = copy_to_mode_reg (tmode, op0);
+
+ op2 = copy_to_mode_reg (mode2, op2);
+
+ if (op1 == const0_rtx)
+ {
+ addr = gen_rtx_MEM (tmode, op2);
+ }
+ else
+ {
+ op1 = copy_to_mode_reg (mode1, op1);
+ addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
+ }
+
+ pat = GEN_FCN (icode) (addr, op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+}
+
+static rtx
+altivec_expand_stv_builtin (enum insn_code icode, tree exp)
+{
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ tree arg2 = CALL_EXPR_ARG (exp, 2);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx pat, addr;
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode1 = Pmode;
+ enum machine_mode mode2 = Pmode;
+
+ /* Invalid arguments. Bail before doing anything stoopid! */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return const0_rtx;
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
+ op0 = copy_to_mode_reg (tmode, op0);
+
+ op2 = copy_to_mode_reg (mode2, op2);
+
+ if (op1 == const0_rtx)
+ {
+ addr = gen_rtx_MEM (tmode, op2);
+ }
+ else
+ {
+ op1 = copy_to_mode_reg (mode1, op1);
+ addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
+ }
+
+ pat = GEN_FCN (icode) (addr, op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+}
+
+static rtx
+rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ tree arg2 = CALL_EXPR_ARG (exp, 2);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[3].mode;
+
+ if (icode == CODE_FOR_nothing)
+ /* Builtin not supported on this processor. */
+ return 0;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return const0_rtx;
+
+ if (icode == CODE_FOR_altivec_vsldoi_v4sf
+ || icode == CODE_FOR_altivec_vsldoi_v4si
+ || icode == CODE_FOR_altivec_vsldoi_v8hi
+ || icode == CODE_FOR_altivec_vsldoi_v16qi)
+ {
+ /* Only allow 4-bit unsigned literals. */
+ STRIP_NOPS (arg2);
+ if (TREE_CODE (arg2) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg2) & ~0xf)
+ {
+ error ("argument 3 must be a 4-bit unsigned literal");
+ return const0_rtx;
+ }
+ }
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
+ op2 = copy_to_mode_reg (mode2, op2);
+
+ if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
+ pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
+ else
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+/* Expand the lvx builtins. */
+static rtx
+altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree arg0;
+ enum machine_mode tmode, mode0;
+ rtx pat, op0;
+ enum insn_code icode;
+
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
+ icode = CODE_FOR_altivec_lvx_v16qi;
+ break;
+ case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
+ icode = CODE_FOR_altivec_lvx_v8hi;
+ break;
+ case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
+ icode = CODE_FOR_altivec_lvx_v4si;
+ break;
+ case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
+ icode = CODE_FOR_altivec_lvx_v4sf;
+ break;
+ default:
+ *expandedp = false;
+ return NULL_RTX;
+ }
+
+ *expandedp = true;
+
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Expand the stvx builtins. */
+static rtx
+altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+ bool *expandedp)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree arg0, arg1;
+ enum machine_mode mode0, mode1;
+ rtx pat, op0, op1;
+ enum insn_code icode;
+
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
+ icode = CODE_FOR_altivec_stvx_v16qi;
+ break;
+ case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
+ icode = CODE_FOR_altivec_stvx_v8hi;
+ break;
+ case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
+ icode = CODE_FOR_altivec_stvx_v4si;
+ break;
+ case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
+ icode = CODE_FOR_altivec_stvx_v4sf;
+ break;
+ default:
+ *expandedp = false;
+ return NULL_RTX;
+ }
+
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ mode0 = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (pat)
+ emit_insn (pat);
+
+ *expandedp = true;
+ return NULL_RTX;
+}
+
+/* Expand the dst builtins. */
+static rtx
+altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+ bool *expandedp)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree arg0, arg1, arg2;
+ enum machine_mode mode0, mode1, mode2;
+ rtx pat, op0, op1, op2;
+ const struct builtin_description *d;
+ size_t i;
+
+ *expandedp = false;
+
+ /* Handle DST variants. */
+ d = bdesc_dst;
+ for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
+ if (d->code == fcode)
+ {
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
+ op0 = expand_normal (arg0);
+ op1 = expand_normal (arg1);
+ op2 = expand_normal (arg2);
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+
+ /* Invalid arguments, bail out before generating bad rtl. */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return const0_rtx;
+
+ *expandedp = true;
+ STRIP_NOPS (arg2);
+ if (TREE_CODE (arg2) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg2) & ~0x3)
+ {
+ error ("argument to %qs must be a 2-bit unsigned literal", d->name);
+ return const0_rtx;
+ }
+
+ if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (Pmode, op0);
+ if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (d->icode) (op0, op1, op2);
+ if (pat != 0)
+ emit_insn (pat);
+
+ return NULL_RTX;
+ }
+
+ return NULL_RTX;
+}
+
+/* Expand vec_init builtin. */
+static rtx
+altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
+{
+ enum machine_mode tmode = TYPE_MODE (type);
+ enum machine_mode inner_mode = GET_MODE_INNER (tmode);
+ int i, n_elt = GET_MODE_NUNITS (tmode);
+ rtvec v = rtvec_alloc (n_elt);
+
+ gcc_assert (VECTOR_MODE_P (tmode));
+ gcc_assert (n_elt == call_expr_nargs (exp));
+
+ for (i = 0; i < n_elt; ++i)
+ {
+ rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
+ RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
+ }
+
+ if (!target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
+ return target;
+}
+
+/* Return the integer constant in ARG. Constrain it to be in the range
+ of the subparts of VEC_TYPE; issue an error if not. */
+
+static int
+get_element_number (tree vec_type, tree arg)
+{
+ unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
+
+ if (!host_integerp (arg, 1)
+ || (elt = tree_low_cst (arg, 1), elt > max))
+ {
+ error ("selector must be an integer constant in the range 0..%wi", max);
+ return 0;
+ }
+
+ return elt;
+}
+
+/* Expand vec_set builtin. */
+static rtx
+altivec_expand_vec_set_builtin (tree exp)
+{
+ enum machine_mode tmode, mode1;
+ tree arg0, arg1, arg2;
+ int elt;
+ rtx op0, op1;
+
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+ arg2 = CALL_EXPR_ARG (exp, 2);
+
+ tmode = TYPE_MODE (TREE_TYPE (arg0));
+ mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ gcc_assert (VECTOR_MODE_P (tmode));
+
+ op0 = expand_expr (arg0, NULL_RTX, tmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, mode1, 0);
+ elt = get_element_number (TREE_TYPE (arg0), arg2);
+
+ if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
+ op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
+
+ op0 = force_reg (tmode, op0);
+ op1 = force_reg (mode1, op1);
+
+ rs6000_expand_vector_set (op0, op1, elt);
+
+ return op0;
+}
+
+/* Expand vec_ext builtin. */
+static rtx
+altivec_expand_vec_ext_builtin (tree exp, rtx target)
+{
+ enum machine_mode tmode, mode0;
+ tree arg0, arg1;
+ int elt;
+ rtx op0;
+
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ arg1 = CALL_EXPR_ARG (exp, 1);
+
+ op0 = expand_normal (arg0);
+ elt = get_element_number (TREE_TYPE (arg0), arg1);
+
+ tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
+ mode0 = TYPE_MODE (TREE_TYPE (arg0));
+ gcc_assert (VECTOR_MODE_P (mode0));
+
+ op0 = force_reg (mode0, op0);
+
+ if (optimize || !target || !register_operand (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ rs6000_expand_vector_extract (target, op0, elt);
+
+ return target;
+}
+
+/* Expand the builtin in EXP and store the result in TARGET. Store
+ true in *EXPANDEDP if we found a builtin to expand. */
+static rtx
+altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
+{
+ const struct builtin_description *d;
+ const struct builtin_description_predicates *dp;
+ size_t i;
+ enum insn_code icode;
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ tree arg0;
+ rtx op0, pat;
+ enum machine_mode tmode, mode0;
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ if (fcode >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && fcode <= ALTIVEC_BUILTIN_OVERLOADED_LAST)
+ {
+ *expandedp = true;
+ error ("unresolved overload for Altivec builtin %qF", fndecl);
+ return const0_rtx;
+ }
+
+ target = altivec_expand_ld_builtin (exp, target, expandedp);
+ if (*expandedp)
+ return target;
+
+ target = altivec_expand_st_builtin (exp, target, expandedp);
+ if (*expandedp)
+ return target;
+
+ target = altivec_expand_dst_builtin (exp, target, expandedp);
+ if (*expandedp)
+ return target;
+
+ *expandedp = true;
+
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_STVX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx, exp);
+ case ALTIVEC_BUILTIN_STVEBX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
+ case ALTIVEC_BUILTIN_STVEHX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
+ case ALTIVEC_BUILTIN_STVEWX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
+ case ALTIVEC_BUILTIN_STVXL:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, exp);
+
+ case ALTIVEC_BUILTIN_STVLX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
+ case ALTIVEC_BUILTIN_STVLXL:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
+ case ALTIVEC_BUILTIN_STVRX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
+ case ALTIVEC_BUILTIN_STVRXL:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
+
+ case ALTIVEC_BUILTIN_MFVSCR:
+ icode = CODE_FOR_altivec_mfvscr;
+ tmode = insn_data[icode].operand[0].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ pat = GEN_FCN (icode) (target);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ALTIVEC_BUILTIN_MTVSCR:
+ icode = CODE_FOR_altivec_mtvscr;
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+ mode0 = insn_data[icode].operand[0].mode;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_DSSALL:
+ emit_insn (gen_altivec_dssall ());
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_DSS:
+ icode = CODE_FOR_altivec_dss;
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ STRIP_NOPS (arg0);
+ op0 = expand_normal (arg0);
+ mode0 = insn_data[icode].operand[0].mode;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (TREE_CODE (arg0) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg0) & ~0x3)
+ {
+ error ("argument to dss must be a 2-bit unsigned literal");
+ return const0_rtx;
+ }
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ emit_insn (gen_altivec_dss (op0));
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
+ case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
+ return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
+
+ case ALTIVEC_BUILTIN_VEC_SET_V4SI:
+ case ALTIVEC_BUILTIN_VEC_SET_V8HI:
+ case ALTIVEC_BUILTIN_VEC_SET_V16QI:
+ case ALTIVEC_BUILTIN_VEC_SET_V4SF:
+ return altivec_expand_vec_set_builtin (exp);
+
+ case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
+ case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
+ return altivec_expand_vec_ext_builtin (exp, target);
+
+ default:
+ break;
+ /* Fall through. */
+ }
+
+ /* Expand abs* operations. */
+ d = bdesc_abs;
+ for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
+ if (d->code == fcode)
+ return altivec_expand_abs_builtin (d->icode, exp, target);
+
+ /* Expand the AltiVec predicates. */
+ dp = bdesc_altivec_preds;
+ for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
+ if (dp->code == fcode)
+ return altivec_expand_predicate_builtin (dp->icode, dp->opcode,
+ exp, target);
+
+ /* LV* are funky. We initialized them differently. */
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_LVSL:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
+ exp, target, false);
+ case ALTIVEC_BUILTIN_LVSR:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
+ exp, target, false);
+ case ALTIVEC_BUILTIN_LVEBX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
+ exp, target, false);
+ case ALTIVEC_BUILTIN_LVEHX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
+ exp, target, false);
+ case ALTIVEC_BUILTIN_LVEWX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
+ exp, target, false);
+ case ALTIVEC_BUILTIN_LVXL:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl,
+ exp, target, false);
+ case ALTIVEC_BUILTIN_LVX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx,
+ exp, target, false);
+ case ALTIVEC_BUILTIN_LVLX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
+ exp, target, true);
+ case ALTIVEC_BUILTIN_LVLXL:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
+ exp, target, true);
+ case ALTIVEC_BUILTIN_LVRX:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
+ exp, target, true);
+ case ALTIVEC_BUILTIN_LVRXL:
+ return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
+ exp, target, true);
+ default:
+ break;
+ /* Fall through. */
+ }
+
+ *expandedp = false;
+ return NULL_RTX;
+}
+
+/* Expand the builtin in EXP and store the result in TARGET. Store
+ true in *EXPANDEDP if we found a builtin to expand. */
+static rtx
+paired_expand_builtin (tree exp, rtx target, bool * expandedp)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ const struct builtin_description *d;
+ size_t i;
+
+ *expandedp = true;
+
+ switch (fcode)
+ {
+ case PAIRED_BUILTIN_STX:
+ return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
+ case PAIRED_BUILTIN_LX:
+ return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
+ default:
+ break;
+ /* Fall through. */
+ }
+
+ /* Expand the paired predicates. */
+ d = bdesc_paired_preds;
+ for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
+ if (d->code == fcode)
+ return paired_expand_predicate_builtin (d->icode, exp, target);
+
+ *expandedp = false;
+ return NULL_RTX;
+}
+
+/* Binops that need to be initialized manually, but can be expanded
+ automagically by rs6000_expand_binop_builtin. */
+static struct builtin_description bdesc_2arg_spe[] =
+{
+ { 0, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
+ { 0, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
+ { 0, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
+ { 0, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
+ { 0, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
+ { 0, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
+ { 0, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
+ { 0, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
+ { 0, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
+ { 0, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
+ { 0, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
+ { 0, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
+ { 0, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
+ { 0, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
+ { 0, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
+ { 0, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
+ { 0, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
+ { 0, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
+ { 0, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
+ { 0, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
+ { 0, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
+ { 0, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
+};
+
+/* Expand the builtin in EXP and store the result in TARGET. Store
+ true in *EXPANDEDP if we found a builtin to expand.
+
+ This expands the SPE builtins that are not simple unary and binary
+ operations. */
+static rtx
+spe_expand_builtin (tree exp, rtx target, bool *expandedp)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ tree arg1, arg0;
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ enum insn_code icode;
+ enum machine_mode tmode, mode0;
+ rtx pat, op0;
+ struct builtin_description *d;
+ size_t i;
+
+ *expandedp = true;
+
+ /* Syntax check for a 5-bit unsigned immediate. */
+ switch (fcode)
+ {
+ case SPE_BUILTIN_EVSTDD:
+ case SPE_BUILTIN_EVSTDH:
+ case SPE_BUILTIN_EVSTDW:
+ case SPE_BUILTIN_EVSTWHE:
+ case SPE_BUILTIN_EVSTWHO:
+ case SPE_BUILTIN_EVSTWWE:
+ case SPE_BUILTIN_EVSTWWO:
+ arg1 = CALL_EXPR_ARG (exp, 2);
+ if (TREE_CODE (arg1) != INTEGER_CST
+ || TREE_INT_CST_LOW (arg1) & ~0x1f)
+ {
+ error ("argument 2 must be a 5-bit unsigned literal");
+ return const0_rtx;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* The evsplat*i instructions are not quite generic. */
+ switch (fcode)
+ {
+ case SPE_BUILTIN_EVSPLATFI:
+ return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
+ exp, target);
+ case SPE_BUILTIN_EVSPLATI:
+ return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
+ exp, target);
+ default:
+ break;
+ }
+
+ d = (struct builtin_description *) bdesc_2arg_spe;
+ for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
+ if (d->code == fcode)
+ return rs6000_expand_binop_builtin (d->icode, exp, target);
+
+ d = (struct builtin_description *) bdesc_spe_predicates;
+ for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
+ if (d->code == fcode)
+ return spe_expand_predicate_builtin (d->icode, exp, target);
+
+ d = (struct builtin_description *) bdesc_spe_evsel;
+ for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
+ if (d->code == fcode)
+ return spe_expand_evsel_builtin (d->icode, exp, target);
+
+ switch (fcode)
+ {
+ case SPE_BUILTIN_EVSTDDX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
+ case SPE_BUILTIN_EVSTDHX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
+ case SPE_BUILTIN_EVSTDWX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
+ case SPE_BUILTIN_EVSTWHEX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
+ case SPE_BUILTIN_EVSTWHOX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
+ case SPE_BUILTIN_EVSTWWEX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
+ case SPE_BUILTIN_EVSTWWOX:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
+ case SPE_BUILTIN_EVSTDD:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
+ case SPE_BUILTIN_EVSTDH:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
+ case SPE_BUILTIN_EVSTDW:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
+ case SPE_BUILTIN_EVSTWHE:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
+ case SPE_BUILTIN_EVSTWHO:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
+ case SPE_BUILTIN_EVSTWWE:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
+ case SPE_BUILTIN_EVSTWWO:
+ return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
+ case SPE_BUILTIN_MFSPEFSCR:
+ icode = CODE_FOR_spe_mfspefscr;
+ tmode = insn_data[icode].operand[0].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ pat = GEN_FCN (icode) (target);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+ case SPE_BUILTIN_MTSPEFSCR:
+ icode = CODE_FOR_spe_mtspefscr;
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_normal (arg0);
+ mode0 = insn_data[icode].operand[0].mode;
+
+ if (arg0 == error_mark_node)
+ return const0_rtx;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+ default:
+ break;
+ }
+
+ *expandedp = false;
+ return NULL_RTX;
+}
+
+static rtx
+paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat, scratch, tmp;
+ tree form = CALL_EXPR_ARG (exp, 0);
+ tree arg0 = CALL_EXPR_ARG (exp, 1);
+ tree arg1 = CALL_EXPR_ARG (exp, 2);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ int form_int;
+ enum rtx_code code;
+
+ if (TREE_CODE (form) != INTEGER_CST)
+ {
+ error ("argument 1 of __builtin_paired_predicate must be a constant");
+ return const0_rtx;
+ }
+ else
+ form_int = TREE_INT_CST_LOW (form);
+
+ gcc_assert (mode0 == mode1);
+
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != SImode
+ || !(*insn_data[icode].operand[0].predicate) (target, SImode))
+ target = gen_reg_rtx (SImode);
+ if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ scratch = gen_reg_rtx (CCFPmode);
+
+ pat = GEN_FCN (icode) (scratch, op0, op1);
+ if (!pat)
+ return const0_rtx;
+
+ emit_insn (pat);
+
+ switch (form_int)
+ {
+ /* LT bit. */
+ case 0:
+ code = LT;
+ break;
+ /* GT bit. */
+ case 1:
+ code = GT;
+ break;
+ /* EQ bit. */
+ case 2:
+ code = EQ;
+ break;
+ /* UN bit. */
+ case 3:
+ emit_insn (gen_move_from_CR_ov_bit (target, scratch));
+ return target;
+ default:
+ error ("argument 1 of __builtin_paired_predicate is out of range");
+ return const0_rtx;
+ }
+
+ tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
+ emit_move_insn (target, tmp);
+ return target;
+}
+
+static rtx
+spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat, scratch, tmp;
+ tree form = CALL_EXPR_ARG (exp, 0);
+ tree arg0 = CALL_EXPR_ARG (exp, 1);
+ tree arg1 = CALL_EXPR_ARG (exp, 2);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ int form_int;
+ enum rtx_code code;
+
+ if (TREE_CODE (form) != INTEGER_CST)
+ {
+ error ("argument 1 of __builtin_spe_predicate must be a constant");
+ return const0_rtx;
+ }
+ else
+ form_int = TREE_INT_CST_LOW (form);
+
+ gcc_assert (mode0 == mode1);
+
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != SImode
+ || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
+ target = gen_reg_rtx (SImode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ scratch = gen_reg_rtx (CCmode);
+
+ pat = GEN_FCN (icode) (scratch, op0, op1);
+ if (! pat)
+ return const0_rtx;
+ emit_insn (pat);
+
+ /* There are 4 variants for each predicate: _any_, _all_, _upper_,
+ _lower_. We use one compare, but look in different bits of the
+ CR for each variant.
+
+ There are 2 elements in each SPE simd type (upper/lower). The CR
+ bits are set as follows:
+
+ BIT0 | BIT 1 | BIT 2 | BIT 3
+ U | L | (U | L) | (U & L)
+
+ So, for an "all" relationship, BIT 3 would be set.
+ For an "any" relationship, BIT 2 would be set. Etc.
+
+ Following traditional nomenclature, these bits map to:
+
+ BIT0 | BIT 1 | BIT 2 | BIT 3
+ LT | GT | EQ | OV
+
+ Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
+ */
+
+ switch (form_int)
+ {
+ /* All variant. OV bit. */
+ case 0:
+ /* We need to get to the OV bit, which is the ORDERED bit. We
+ could generate (ordered:SI (reg:CC xx) (const_int 0)), but
+ that's ugly and will make validate_condition_mode die.
+ So let's just use another pattern. */
+ emit_insn (gen_move_from_CR_ov_bit (target, scratch));
+ return target;
+ /* Any variant. EQ bit. */
+ case 1:
+ code = EQ;
+ break;
+ /* Upper variant. LT bit. */
+ case 2:
+ code = LT;
+ break;
+ /* Lower variant. GT bit. */
+ case 3:
+ code = GT;
+ break;
+ default:
+ error ("argument 1 of __builtin_spe_predicate is out of range");
+ return const0_rtx;
+ }
+
+ tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
+ emit_move_insn (target, tmp);
+
+ return target;
+}
+
+/* The evsel builtins look like this:
+
+ e = __builtin_spe_evsel_OP (a, b, c, d);
+
+ and work like this:
+
+ e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
+ e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
+*/
+
+static rtx
+spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat, scratch;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ tree arg2 = CALL_EXPR_ARG (exp, 2);
+ tree arg3 = CALL_EXPR_ARG (exp, 3);
+ rtx op0 = expand_normal (arg0);
+ rtx op1 = expand_normal (arg1);
+ rtx op2 = expand_normal (arg2);
+ rtx op3 = expand_normal (arg3);
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ gcc_assert (mode0 == mode1);
+
+ if (arg0 == error_mark_node || arg1 == error_mark_node
+ || arg2 == error_mark_node || arg3 == error_mark_node)
+ return const0_rtx;
+
+ if (target == 0
+ || GET_MODE (target) != mode0
+ || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
+ target = gen_reg_rtx (mode0);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode0, op1);
+ if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
+ op2 = copy_to_mode_reg (mode0, op2);
+ if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
+ op3 = copy_to_mode_reg (mode0, op3);
+
+ /* Generate the compare. */
+ scratch = gen_reg_rtx (CCmode);
+ pat = GEN_FCN (icode) (scratch, op0, op1);
+ if (! pat)
+ return const0_rtx;
+ emit_insn (pat);
+
+ if (mode0 == V2SImode)
+ emit_insn (gen_spe_evsel (target, op2, op3, scratch));
+ else
+ emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
+
+ return target;
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ const struct builtin_description *d;
+ size_t i;
+ rtx ret;
+ bool success;
+
+ if (fcode == RS6000_BUILTIN_RECIP)
+ return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
+
+ if (fcode == RS6000_BUILTIN_RECIPF)
+ return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
+
+ if (fcode == RS6000_BUILTIN_RSQRTF)
+ return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
+
+ if (fcode == ALTIVEC_BUILTIN_MASK_FOR_LOAD
+ || fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
+ {
+ int icode = (int) CODE_FOR_altivec_lvsr;
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode = insn_data[icode].operand[1].mode;
+ tree arg;
+ rtx op, addr, pat;
+
+ gcc_assert (TARGET_ALTIVEC);
+
+ arg = CALL_EXPR_ARG (exp, 0);
+ gcc_assert (TREE_CODE (TREE_TYPE (arg)) == POINTER_TYPE);
+ op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
+ addr = memory_address (mode, op);
+ if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
+ op = addr;
+ else
+ {
+ /* For the load case need to negate the address. */
+ op = gen_reg_rtx (GET_MODE (addr));
+ emit_insn (gen_rtx_SET (VOIDmode, op,
+ gen_rtx_NEG (GET_MODE (addr), addr)));
+ }
+ op = gen_rtx_MEM (mode, op);
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ /*pat = gen_altivec_lvsr (target, op);*/
+ pat = GEN_FCN (icode) (target, op);
+ if (!pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+ }
+
+ /* FIXME: There's got to be a nicer way to handle this case than
+ constructing a new CALL_EXPR. */
+ if (fcode == ALTIVEC_BUILTIN_VCFUX
+ || fcode == ALTIVEC_BUILTIN_VCFSX
+ || fcode == ALTIVEC_BUILTIN_VCTUXS
+ || fcode == ALTIVEC_BUILTIN_VCTSXS)
+ {
+ if (call_expr_nargs (exp) == 1)
+ exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
+ 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
+ }
+
+ if (TARGET_ALTIVEC)
+ {
+ ret = altivec_expand_builtin (exp, target, &success);
+
+ if (success)
+ return ret;
+ }
+ if (TARGET_SPE)
+ {
+ ret = spe_expand_builtin (exp, target, &success);
+
+ if (success)
+ return ret;
+ }
+ if (TARGET_PAIRED_FLOAT)
+ {
+ ret = paired_expand_builtin (exp, target, &success);
+
+ if (success)
+ return ret;
+ }
+
+ gcc_assert (TARGET_ALTIVEC || TARGET_SPE || TARGET_PAIRED_FLOAT);
+
+ /* Handle simple unary operations. */
+ d = (struct builtin_description *) bdesc_1arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ if (d->code == fcode)
+ return rs6000_expand_unop_builtin (d->icode, exp, target);
+
+ /* Handle simple binary operations. */
+ d = (struct builtin_description *) bdesc_2arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ if (d->code == fcode)
+ return rs6000_expand_binop_builtin (d->icode, exp, target);
+
+ /* Handle simple ternary operations. */
+ d = bdesc_3arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
+ if (d->code == fcode)
+ return rs6000_expand_ternop_builtin (d->icode, exp, target);
+
+ gcc_unreachable ();
+}
+
+static tree
+build_opaque_vector_type (tree node, int nunits)
+{
+ node = copy_node (node);
+ TYPE_MAIN_VARIANT (node) = node;
+ TYPE_CANONICAL (node) = node;
+ return build_vector_type (node, nunits);
+}
+
+static void
+rs6000_init_builtins (void)
+{
+ V2SI_type_node = build_vector_type (intSI_type_node, 2);
+ V2SF_type_node = build_vector_type (float_type_node, 2);
+ V4HI_type_node = build_vector_type (intHI_type_node, 4);
+ V4SI_type_node = build_vector_type (intSI_type_node, 4);
+ V4SF_type_node = build_vector_type (float_type_node, 4);
+ V8HI_type_node = build_vector_type (intHI_type_node, 8);
+ V16QI_type_node = build_vector_type (intQI_type_node, 16);
+
+ unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
+ unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
+ unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
+
+ opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
+ opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
+ opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
+ opaque_V4SI_type_node = copy_node (V4SI_type_node);
+
+ /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
+ types, especially in C++ land. Similarly, 'vector pixel' is distinct from
+ 'vector unsigned short'. */
+
+ bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
+ bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
+ bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
+ pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
+
+ long_integer_type_internal_node = long_integer_type_node;
+ long_unsigned_type_internal_node = long_unsigned_type_node;
+ intQI_type_internal_node = intQI_type_node;
+ uintQI_type_internal_node = unsigned_intQI_type_node;
+ intHI_type_internal_node = intHI_type_node;
+ uintHI_type_internal_node = unsigned_intHI_type_node;
+ intSI_type_internal_node = intSI_type_node;
+ uintSI_type_internal_node = unsigned_intSI_type_node;
+ float_type_internal_node = float_type_node;
+ void_type_internal_node = void_type_node;
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__bool char"),
+ bool_char_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__bool short"),
+ bool_short_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__bool int"),
+ bool_int_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__pixel"),
+ pixel_type_node));
+
+ bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
+ bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
+ bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
+ pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector unsigned char"),
+ unsigned_V16QI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector signed char"),
+ V16QI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __bool char"),
+ bool_V16QI_type_node));
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector unsigned short"),
+ unsigned_V8HI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector signed short"),
+ V8HI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __bool short"),
+ bool_V8HI_type_node));
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector unsigned int"),
+ unsigned_V4SI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector signed int"),
+ V4SI_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __bool int"),
+ bool_V4SI_type_node));
+
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector float"),
+ V4SF_type_node));
+ (*lang_hooks.decls.pushdecl) (build_decl (TYPE_DECL,
+ get_identifier ("__vector __pixel"),
+ pixel_V8HI_type_node));
+
+ if (TARGET_PAIRED_FLOAT)
+ paired_init_builtins ();
+ if (TARGET_SPE)
+ spe_init_builtins ();
+ if (TARGET_ALTIVEC)
+ altivec_init_builtins ();
+ if (TARGET_ALTIVEC || TARGET_SPE || TARGET_PAIRED_FLOAT)
+ rs6000_common_init_builtins ();
+ if (TARGET_PPC_GFXOPT)
+ {
+ tree ftype = build_function_type_list (float_type_node,
+ float_type_node,
+ float_type_node,
+ NULL_TREE);
+ def_builtin (MASK_PPC_GFXOPT, "__builtin_recipdivf", ftype,
+ RS6000_BUILTIN_RECIPF);
+
+ ftype = build_function_type_list (float_type_node,
+ float_type_node,
+ NULL_TREE);
+ def_builtin (MASK_PPC_GFXOPT, "__builtin_rsqrtf", ftype,
+ RS6000_BUILTIN_RSQRTF);
+ }
+ if (TARGET_POPCNTB)
+ {
+ tree ftype = build_function_type_list (double_type_node,
+ double_type_node,
+ double_type_node,
+ NULL_TREE);
+ def_builtin (MASK_POPCNTB, "__builtin_recipdiv", ftype,
+ RS6000_BUILTIN_RECIP);
+
+ }
+
+#if TARGET_XCOFF
+ /* AIX libm provides clog as __clog. */
+ if (built_in_decls [BUILT_IN_CLOG])
+ set_user_assembler_name (built_in_decls [BUILT_IN_CLOG], "__clog");
+#endif
+
+#ifdef SUBTARGET_INIT_BUILTINS
+ SUBTARGET_INIT_BUILTINS;
+#endif
+}
+
+/* Search through a set of builtins and enable the mask bits.
+ DESC is an array of builtins.
+ SIZE is the total number of builtins.
+ START is the builtin enum at which to start.
+ END is the builtin enum at which to end. */
+static void
+enable_mask_for_builtins (struct builtin_description *desc, int size,
+ enum rs6000_builtins start,
+ enum rs6000_builtins end)
+{
+ int i;
+
+ for (i = 0; i < size; ++i)
+ if (desc[i].code == start)
+ break;
+
+ if (i == size)
+ return;
+
+ for (; i < size; ++i)
+ {
+ /* Flip all the bits on. */
+ desc[i].mask = target_flags;
+ if (desc[i].code == end)
+ break;
+ }
+}
+
+static void
+spe_init_builtins (void)
+{
+ tree endlink = void_list_node;
+ tree puint_type_node = build_pointer_type (unsigned_type_node);
+ tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
+ struct builtin_description *d;
+ size_t i;
+
+ tree v2si_ftype_4_v2si
+ = build_function_type
+ (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ endlink)))));
+
+ tree v2sf_ftype_4_v2sf
+ = build_function_type
+ (opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ endlink)))));
+
+ tree int_ftype_int_v2si_v2si
+ = build_function_type
+ (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ endlink))));
+
+ tree int_ftype_int_v2sf_v2sf
+ = build_function_type
+ (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ tree_cons (NULL_TREE, opaque_V2SF_type_node,
+ endlink))));
+
+ tree void_ftype_v2si_puint_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, puint_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+
+ tree void_ftype_v2si_puint_char
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, puint_type_node,
+ tree_cons (NULL_TREE,
+ char_type_node,
+ endlink))));
+
+ tree void_ftype_v2si_pv2si_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
+ tree_cons (NULL_TREE,
+ integer_type_node,
+ endlink))));
+
+ tree void_ftype_v2si_pv2si_char
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
+ tree_cons (NULL_TREE,
+ char_type_node,
+ endlink))));
+
+ tree void_ftype_int
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, integer_type_node, endlink));
+
+ tree int_ftype_void
+ = build_function_type (integer_type_node, endlink);
+
+ tree v2si_ftype_pv2si_int
+ = build_function_type (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, opaque_p_V2SI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+
+ tree v2si_ftype_puint_int
+ = build_function_type (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, puint_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+
+ tree v2si_ftype_pushort_int
+ = build_function_type (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, pushort_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ endlink)));
+
+ tree v2si_ftype_signed_char
+ = build_function_type (opaque_V2SI_type_node,
+ tree_cons (NULL_TREE, signed_char_type_node,
+ endlink));
+
+ /* The initialization of the simple binary and unary builtins is
+ done in rs6000_common_init_builtins, but we have to enable the
+ mask bits here manually because we have run out of `target_flags'
+ bits. We really need to redesign this mask business. */
+
+ enable_mask_for_builtins ((struct builtin_description *) bdesc_2arg,
+ ARRAY_SIZE (bdesc_2arg),
+ SPE_BUILTIN_EVADDW,
+ SPE_BUILTIN_EVXOR);
+ enable_mask_for_builtins ((struct builtin_description *) bdesc_1arg,
+ ARRAY_SIZE (bdesc_1arg),
+ SPE_BUILTIN_EVABS,
+ SPE_BUILTIN_EVSUBFUSIAAW);
+ enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_predicates,
+ ARRAY_SIZE (bdesc_spe_predicates),
+ SPE_BUILTIN_EVCMPEQ,
+ SPE_BUILTIN_EVFSTSTLT);
+ enable_mask_for_builtins ((struct builtin_description *) bdesc_spe_evsel,
+ ARRAY_SIZE (bdesc_spe_evsel),
+ SPE_BUILTIN_EVSEL_CMPGTS,
+ SPE_BUILTIN_EVSEL_FSTSTEQ);
+
+ (*lang_hooks.decls.pushdecl)
+ (build_decl (TYPE_DECL, get_identifier ("__ev64_opaque__"),
+ opaque_V2SI_type_node));
+
+ /* Initialize irregular SPE builtins. */
+
+ def_builtin (target_flags, "__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
+ def_builtin (target_flags, "__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
+ def_builtin (target_flags, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
+ def_builtin (target_flags, "__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
+ def_builtin (target_flags, "__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
+ def_builtin (target_flags, "__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
+ def_builtin (target_flags, "__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
+ def_builtin (target_flags, "__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
+ def_builtin (target_flags, "__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
+ def_builtin (target_flags, "__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
+ def_builtin (target_flags, "__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
+ def_builtin (target_flags, "__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
+ def_builtin (target_flags, "__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
+ def_builtin (target_flags, "__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
+ def_builtin (target_flags, "__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
+ def_builtin (target_flags, "__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
+ def_builtin (target_flags, "__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
+ def_builtin (target_flags, "__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
+
+ /* Loads. */
+ def_builtin (target_flags, "__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
+ def_builtin (target_flags, "__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
+ def_builtin (target_flags, "__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
+ def_builtin (target_flags, "__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
+ def_builtin (target_flags, "__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
+ def_builtin (target_flags, "__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
+ def_builtin (target_flags, "__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
+ def_builtin (target_flags, "__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
+ def_builtin (target_flags, "__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
+ def_builtin (target_flags, "__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
+ def_builtin (target_flags, "__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
+ def_builtin (target_flags, "__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
+ def_builtin (target_flags, "__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
+ def_builtin (target_flags, "__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
+ def_builtin (target_flags, "__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
+ def_builtin (target_flags, "__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
+ def_builtin (target_flags, "__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
+ def_builtin (target_flags, "__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
+ def_builtin (target_flags, "__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
+ def_builtin (target_flags, "__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
+ def_builtin (target_flags, "__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
+ def_builtin (target_flags, "__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
+
+ /* Predicates. */
+ d = (struct builtin_description *) bdesc_spe_predicates;
+ for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
+ {
+ tree type;
+
+ switch (insn_data[d->icode].operand[1].mode)
+ {
+ case V2SImode:
+ type = int_ftype_int_v2si_v2si;
+ break;
+ case V2SFmode:
+ type = int_ftype_int_v2sf_v2sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ /* Evsel predicates. */
+ d = (struct builtin_description *) bdesc_spe_evsel;
+ for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
+ {
+ tree type;
+
+ switch (insn_data[d->icode].operand[1].mode)
+ {
+ case V2SImode:
+ type = v2si_ftype_4_v2si;
+ break;
+ case V2SFmode:
+ type = v2sf_ftype_4_v2sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+}
+
+static void
+paired_init_builtins (void)
+{
+ const struct builtin_description *d;
+ size_t i;
+ tree endlink = void_list_node;
+
+ tree int_ftype_int_v2sf_v2sf
+ = build_function_type
+ (integer_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, V2SF_type_node,
+ tree_cons (NULL_TREE, V2SF_type_node,
+ endlink))));
+ tree pcfloat_type_node =
+ build_pointer_type (build_qualified_type
+ (float_type_node, TYPE_QUAL_CONST));
+
+ tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
+ long_integer_type_node,
+ pcfloat_type_node,
+ NULL_TREE);
+ tree void_ftype_v2sf_long_pcfloat =
+ build_function_type_list (void_type_node,
+ V2SF_type_node,
+ long_integer_type_node,
+ pcfloat_type_node,
+ NULL_TREE);
+
+
+ def_builtin (0, "__builtin_paired_lx", v2sf_ftype_long_pcfloat,
+ PAIRED_BUILTIN_LX);
+
+
+ def_builtin (0, "__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
+ PAIRED_BUILTIN_STX);
+
+ /* Predicates. */
+ d = bdesc_paired_preds;
+ for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
+ {
+ tree type;
+
+ switch (insn_data[d->icode].operand[1].mode)
+ {
+ case V2SFmode:
+ type = int_ftype_int_v2sf_v2sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+}
+
+static void
+altivec_init_builtins (void)
+{
+ const struct builtin_description *d;
+ const struct builtin_description_predicates *dp;
+ size_t i;
+ tree ftype;
+
+ tree pfloat_type_node = build_pointer_type (float_type_node);
+ tree pint_type_node = build_pointer_type (integer_type_node);
+ tree pshort_type_node = build_pointer_type (short_integer_type_node);
+ tree pchar_type_node = build_pointer_type (char_type_node);
+
+ tree pvoid_type_node = build_pointer_type (void_type_node);
+
+ tree pcfloat_type_node = build_pointer_type (build_qualified_type (float_type_node, TYPE_QUAL_CONST));
+ tree pcint_type_node = build_pointer_type (build_qualified_type (integer_type_node, TYPE_QUAL_CONST));
+ tree pcshort_type_node = build_pointer_type (build_qualified_type (short_integer_type_node, TYPE_QUAL_CONST));
+ tree pcchar_type_node = build_pointer_type (build_qualified_type (char_type_node, TYPE_QUAL_CONST));
+
+ tree pcvoid_type_node = build_pointer_type (build_qualified_type (void_type_node, TYPE_QUAL_CONST));
+
+ tree int_ftype_opaque
+ = build_function_type_list (integer_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+ tree opaque_ftype_opaque
+ = build_function_type (integer_type_node,
+ NULL_TREE);
+ tree opaque_ftype_opaque_int
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, integer_type_node, NULL_TREE);
+ tree opaque_ftype_opaque_opaque_int
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, opaque_V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ tree int_ftype_int_opaque_opaque
+ = build_function_type_list (integer_type_node,
+ integer_type_node, opaque_V4SI_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+ tree int_ftype_int_v4si_v4si
+ = build_function_type_list (integer_type_node,
+ integer_type_node, V4SI_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v4sf_ftype_pcfloat
+ = build_function_type_list (V4SF_type_node, pcfloat_type_node, NULL_TREE);
+ tree void_ftype_pfloat_v4sf
+ = build_function_type_list (void_type_node,
+ pfloat_type_node, V4SF_type_node, NULL_TREE);
+ tree v4si_ftype_pcint
+ = build_function_type_list (V4SI_type_node, pcint_type_node, NULL_TREE);
+ tree void_ftype_pint_v4si
+ = build_function_type_list (void_type_node,
+ pint_type_node, V4SI_type_node, NULL_TREE);
+ tree v8hi_ftype_pcshort
+ = build_function_type_list (V8HI_type_node, pcshort_type_node, NULL_TREE);
+ tree void_ftype_pshort_v8hi
+ = build_function_type_list (void_type_node,
+ pshort_type_node, V8HI_type_node, NULL_TREE);
+ tree v16qi_ftype_pcchar
+ = build_function_type_list (V16QI_type_node, pcchar_type_node, NULL_TREE);
+ tree void_ftype_pchar_v16qi
+ = build_function_type_list (void_type_node,
+ pchar_type_node, V16QI_type_node, NULL_TREE);
+ tree void_ftype_v4si
+ = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
+ tree v8hi_ftype_void
+ = build_function_type (V8HI_type_node, void_list_node);
+ tree void_ftype_void
+ = build_function_type (void_type_node, void_list_node);
+ tree void_ftype_int
+ = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
+
+ tree opaque_ftype_long_pcvoid
+ = build_function_type_list (opaque_V4SI_type_node,
+ long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ tree v16qi_ftype_long_pcvoid
+ = build_function_type_list (V16QI_type_node,
+ long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ tree v8hi_ftype_long_pcvoid
+ = build_function_type_list (V8HI_type_node,
+ long_integer_type_node, pcvoid_type_node, NULL_TREE);
+ tree v4si_ftype_long_pcvoid
+ = build_function_type_list (V4SI_type_node,
+ long_integer_type_node, pcvoid_type_node, NULL_TREE);
+
+ tree void_ftype_opaque_long_pvoid
+ = build_function_type_list (void_type_node,
+ opaque_V4SI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree void_ftype_v4si_long_pvoid
+ = build_function_type_list (void_type_node,
+ V4SI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree void_ftype_v16qi_long_pvoid
+ = build_function_type_list (void_type_node,
+ V16QI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree void_ftype_v8hi_long_pvoid
+ = build_function_type_list (void_type_node,
+ V8HI_type_node, long_integer_type_node,
+ pvoid_type_node, NULL_TREE);
+ tree int_ftype_int_v8hi_v8hi
+ = build_function_type_list (integer_type_node,
+ integer_type_node, V8HI_type_node,
+ V8HI_type_node, NULL_TREE);
+ tree int_ftype_int_v16qi_v16qi
+ = build_function_type_list (integer_type_node,
+ integer_type_node, V16QI_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree int_ftype_int_v4sf_v4sf
+ = build_function_type_list (integer_type_node,
+ integer_type_node, V4SF_type_node,
+ V4SF_type_node, NULL_TREE);
+ tree v4si_ftype_v4si
+ = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi
+ = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi
+ = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf
+ = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree void_ftype_pcvoid_int_int
+ = build_function_type_list (void_type_node,
+ pcvoid_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4sf);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf,
+ ALTIVEC_BUILTIN_ST_INTERNAL_4sf);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4si", v4si_ftype_pcint,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4si);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4si", void_ftype_pint_v4si,
+ ALTIVEC_BUILTIN_ST_INTERNAL_4si);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_8hi", v8hi_ftype_pcshort,
+ ALTIVEC_BUILTIN_LD_INTERNAL_8hi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_8hi", void_ftype_pshort_v8hi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_8hi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_16qi", v16qi_ftype_pcchar,
+ ALTIVEC_BUILTIN_LD_INTERNAL_16qi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_16qi", void_ftype_pchar_v16qi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_16qi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
+
+ if (rs6000_cpu == PROCESSOR_CELL)
+ {
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
+ }
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
+
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
+
+ /* Add the DST variants. */
+ d = bdesc_dst;
+ for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
+ def_builtin (d->mask, d->name, void_ftype_pcvoid_int_int, d->code);
+
+ /* Initialize the predicates. */
+ dp = bdesc_altivec_preds;
+ for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, dp++)
+ {
+ enum machine_mode mode1;
+ tree type;
+ bool is_overloaded = dp->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && dp->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
+
+ if (is_overloaded)
+ mode1 = VOIDmode;
+ else
+ mode1 = insn_data[dp->icode].operand[1].mode;
+
+ switch (mode1)
+ {
+ case VOIDmode:
+ type = int_ftype_int_opaque_opaque;
+ break;
+ case V4SImode:
+ type = int_ftype_int_v4si_v4si;
+ break;
+ case V8HImode:
+ type = int_ftype_int_v8hi_v8hi;
+ break;
+ case V16QImode:
+ type = int_ftype_int_v16qi_v16qi;
+ break;
+ case V4SFmode:
+ type = int_ftype_int_v4sf_v4sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ def_builtin (dp->mask, dp->name, type, dp->code);
+ }
+
+ /* Initialize the abs* operators. */
+ d = bdesc_abs;
+ for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
+ {
+ enum machine_mode mode0;
+ tree type;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+
+ switch (mode0)
+ {
+ case V4SImode:
+ type = v4si_ftype_v4si;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ if (TARGET_ALTIVEC)
+ {
+ tree decl;
+
+ /* Initialize target builtin that implements
+ targetm.vectorize.builtin_mask_for_load. */
+
+ decl = add_builtin_function ("__builtin_altivec_mask_for_load",
+ v16qi_ftype_long_pcvoid,
+ ALTIVEC_BUILTIN_MASK_FOR_LOAD,
+ BUILT_IN_MD, NULL, NULL_TREE);
+ TREE_READONLY (decl) = 1;
+ /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
+ altivec_builtin_mask_for_load = decl;
+ }
+
+ /* Access to the vec_init patterns. */
+ ftype = build_function_type_list (V4SI_type_node, integer_type_node,
+ integer_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SI);
+
+ ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node,
+ short_integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V8HI);
+
+ ftype = build_function_type_list (V16QI_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, char_type_node,
+ char_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V16QI);
+
+ ftype = build_function_type_list (V4SF_type_node, float_type_node,
+ float_type_node, float_type_node,
+ float_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_init_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SF);
+
+ /* Access to the vec_set patterns. */
+ ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
+ intSI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V4SI);
+
+ ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
+ intHI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V8HI);
+
+ ftype = build_function_type_list (V8HI_type_node, V16QI_type_node,
+ intQI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V16QI);
+
+ ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
+ float_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_set_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_SET_V4SF);
+
+ /* Access to the vec_extract patterns. */
+ ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4si", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SI);
+
+ ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v8hi", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V8HI);
+
+ ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v16qi", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V16QI);
+
+ ftype = build_function_type_list (float_type_node, V4SF_type_node,
+ integer_type_node, NULL_TREE);
+ def_builtin (MASK_ALTIVEC, "__builtin_vec_ext_v4sf", ftype,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SF);
+}
+
+static void
+rs6000_common_init_builtins (void)
+{
+ const struct builtin_description *d;
+ size_t i;
+
+ tree v2sf_ftype_v2sf_v2sf_v2sf
+ = build_function_type_list (V2SF_type_node,
+ V2SF_type_node, V2SF_type_node,
+ V2SF_type_node, NULL_TREE);
+
+ tree v4sf_ftype_v4sf_v4sf_v16qi
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si_v16qi
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, V4SI_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi_v16qi
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, V8HI_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi_v16qi
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node, V16QI_type_node,
+ V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_int
+ = build_function_type_list (V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v8hi_ftype_int
+ = build_function_type_list (V8HI_type_node, integer_type_node, NULL_TREE);
+ tree v16qi_ftype_int
+ = build_function_type_list (V16QI_type_node, integer_type_node, NULL_TREE);
+ tree v8hi_ftype_v16qi
+ = build_function_type_list (V8HI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf
+ = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
+
+ tree v2si_ftype_v2si_v2si
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_V2SI_type_node,
+ opaque_V2SI_type_node, NULL_TREE);
+
+ tree v2sf_ftype_v2sf_v2sf_spe
+ = build_function_type_list (opaque_V2SF_type_node,
+ opaque_V2SF_type_node,
+ opaque_V2SF_type_node, NULL_TREE);
+
+ tree v2sf_ftype_v2sf_v2sf
+ = build_function_type_list (V2SF_type_node,
+ V2SF_type_node,
+ V2SF_type_node, NULL_TREE);
+
+
+ tree v2si_ftype_int_int
+ = build_function_type_list (opaque_V2SI_type_node,
+ integer_type_node, integer_type_node,
+ NULL_TREE);
+
+ tree opaque_ftype_opaque
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+
+ tree v2si_ftype_v2si
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_V2SI_type_node, NULL_TREE);
+
+ tree v2sf_ftype_v2sf_spe
+ = build_function_type_list (opaque_V2SF_type_node,
+ opaque_V2SF_type_node, NULL_TREE);
+
+ tree v2sf_ftype_v2sf
+ = build_function_type_list (V2SF_type_node,
+ V2SF_type_node, NULL_TREE);
+
+ tree v2sf_ftype_v2si
+ = build_function_type_list (opaque_V2SF_type_node,
+ opaque_V2SI_type_node, NULL_TREE);
+
+ tree v2si_ftype_v2sf
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_V2SF_type_node, NULL_TREE);
+
+ tree v2si_ftype_v2si_char
+ = build_function_type_list (opaque_V2SI_type_node,
+ opaque_V2SI_type_node,
+ char_type_node, NULL_TREE);
+
+ tree v2si_ftype_int_char
+ = build_function_type_list (opaque_V2SI_type_node,
+ integer_type_node, char_type_node, NULL_TREE);
+
+ tree v2si_ftype_char
+ = build_function_type_list (opaque_V2SI_type_node,
+ char_type_node, NULL_TREE);
+
+ tree int_ftype_int_int
+ = build_function_type_list (integer_type_node,
+ integer_type_node, integer_type_node,
+ NULL_TREE);
+
+ tree opaque_ftype_opaque_opaque
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, opaque_V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4si_int
+ = build_function_type_list (V4SF_type_node,
+ V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v4si_ftype_v4sf_int
+ = build_function_type_list (V4SI_type_node,
+ V4SF_type_node, integer_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_int
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, integer_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_int
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, integer_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_int
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node, integer_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi_int
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node, V16QI_type_node,
+ integer_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi_int
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, V8HI_type_node,
+ integer_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si_int
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, V4SI_type_node,
+ integer_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf_int
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ integer_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree opaque_ftype_opaque_opaque_opaque
+ = build_function_type_list (opaque_V4SI_type_node,
+ opaque_V4SI_type_node, opaque_V4SI_type_node,
+ opaque_V4SI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf_v4si
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v4sf_ftype_v4sf_v4sf_v4sf
+ = build_function_type_list (V4SF_type_node,
+ V4SF_type_node, V4SF_type_node,
+ V4SF_type_node, NULL_TREE);
+ tree v4si_ftype_v4si_v4si_v4si
+ = build_function_type_list (V4SI_type_node,
+ V4SI_type_node, V4SI_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v8hi_ftype_v8hi_v8hi_v8hi
+ = build_function_type_list (V8HI_type_node,
+ V8HI_type_node, V8HI_type_node,
+ V8HI_type_node, NULL_TREE);
+ tree v4si_ftype_v8hi_v8hi_v4si
+ = build_function_type_list (V4SI_type_node,
+ V8HI_type_node, V8HI_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v16qi_v16qi_v4si
+ = build_function_type_list (V4SI_type_node,
+ V16QI_type_node, V16QI_type_node,
+ V4SI_type_node, NULL_TREE);
+ tree v16qi_ftype_v16qi_v16qi
+ = build_function_type_list (V16QI_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_v4sf_v4sf
+ = build_function_type_list (V4SI_type_node,
+ V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree v8hi_ftype_v16qi_v16qi
+ = build_function_type_list (V8HI_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_v8hi_v8hi
+ = build_function_type_list (V4SI_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v8hi_ftype_v4si_v4si
+ = build_function_type_list (V8HI_type_node,
+ V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree v16qi_ftype_v8hi_v8hi
+ = build_function_type_list (V16QI_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+ tree v4si_ftype_v16qi_v4si
+ = build_function_type_list (V4SI_type_node,
+ V16QI_type_node, V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v16qi_v16qi
+ = build_function_type_list (V4SI_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree v4si_ftype_v8hi_v4si
+ = build_function_type_list (V4SI_type_node,
+ V8HI_type_node, V4SI_type_node, NULL_TREE);
+ tree v4si_ftype_v8hi
+ = build_function_type_list (V4SI_type_node, V8HI_type_node, NULL_TREE);
+ tree int_ftype_v4si_v4si
+ = build_function_type_list (integer_type_node,
+ V4SI_type_node, V4SI_type_node, NULL_TREE);
+ tree int_ftype_v4sf_v4sf
+ = build_function_type_list (integer_type_node,
+ V4SF_type_node, V4SF_type_node, NULL_TREE);
+ tree int_ftype_v16qi_v16qi
+ = build_function_type_list (integer_type_node,
+ V16QI_type_node, V16QI_type_node, NULL_TREE);
+ tree int_ftype_v8hi_v8hi
+ = build_function_type_list (integer_type_node,
+ V8HI_type_node, V8HI_type_node, NULL_TREE);
+
+ /* Add the simple ternary operators. */
+ d = bdesc_3arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
+ {
+ enum machine_mode mode0, mode1, mode2, mode3;
+ tree type;
+ bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
+
+ if (is_overloaded)
+ {
+ mode0 = VOIDmode;
+ mode1 = VOIDmode;
+ mode2 = VOIDmode;
+ mode3 = VOIDmode;
+ }
+ else
+ {
+ if (d->name == 0 || d->icode == CODE_FOR_nothing)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+ mode3 = insn_data[d->icode].operand[3].mode;
+ }
+
+ /* When all four are of the same mode. */
+ if (mode0 == mode1 && mode1 == mode2 && mode2 == mode3)
+ {
+ switch (mode0)
+ {
+ case VOIDmode:
+ type = opaque_ftype_opaque_opaque_opaque;
+ break;
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si_v4si;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf_v4sf;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi_v8hi;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi_v16qi;
+ break;
+ case V2SFmode:
+ type = v2sf_ftype_v2sf_v2sf_v2sf;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else if (mode0 == mode1 && mode1 == mode2 && mode3 == V16QImode)
+ {
+ switch (mode0)
+ {
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si_v16qi;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf_v16qi;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi_v16qi;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi_v16qi;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
+ && mode3 == V4SImode)
+ type = v4si_ftype_v16qi_v16qi_v4si;
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
+ && mode3 == V4SImode)
+ type = v4si_ftype_v8hi_v8hi_v4si;
+ else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
+ && mode3 == V4SImode)
+ type = v4sf_ftype_v4sf_v4sf_v4si;
+
+ /* vchar, vchar, vchar, 4-bit literal. */
+ else if (mode0 == V16QImode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v16qi_ftype_v16qi_v16qi_int;
+
+ /* vshort, vshort, vshort, 4-bit literal. */
+ else if (mode0 == V8HImode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v8hi_ftype_v8hi_v8hi_int;
+
+ /* vint, vint, vint, 4-bit literal. */
+ else if (mode0 == V4SImode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v4si_ftype_v4si_v4si_int;
+
+ /* vfloat, vfloat, vfloat, 4-bit literal. */
+ else if (mode0 == V4SFmode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v4sf_ftype_v4sf_v4sf_int;
+
+ else
+ gcc_unreachable ();
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ /* Add the simple binary operators. */
+ d = (struct builtin_description *) bdesc_2arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ {
+ enum machine_mode mode0, mode1, mode2;
+ tree type;
+ bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
+
+ if (is_overloaded)
+ {
+ mode0 = VOIDmode;
+ mode1 = VOIDmode;
+ mode2 = VOIDmode;
+ }
+ else
+ {
+ if (d->name == 0 || d->icode == CODE_FOR_nothing)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+ }
+
+ /* When all three operands are of the same mode. */
+ if (mode0 == mode1 && mode1 == mode2)
+ {
+ switch (mode0)
+ {
+ case VOIDmode:
+ type = opaque_ftype_opaque_opaque;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf;
+ break;
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi;
+ break;
+ case V2SImode:
+ type = v2si_ftype_v2si_v2si;
+ break;
+ case V2SFmode:
+ if (TARGET_PAIRED_FLOAT)
+ type = v2sf_ftype_v2sf_v2sf;
+ else
+ type = v2sf_ftype_v2sf_v2sf_spe;
+ break;
+ case SImode:
+ type = int_ftype_int_int;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ /* A few other combos we really don't want to do manually. */
+
+ /* vint, vfloat, vfloat. */
+ else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == V4SFmode)
+ type = v4si_ftype_v4sf_v4sf;
+
+ /* vshort, vchar, vchar. */
+ else if (mode0 == V8HImode && mode1 == V16QImode && mode2 == V16QImode)
+ type = v8hi_ftype_v16qi_v16qi;
+
+ /* vint, vshort, vshort. */
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode)
+ type = v4si_ftype_v8hi_v8hi;
+
+ /* vshort, vint, vint. */
+ else if (mode0 == V8HImode && mode1 == V4SImode && mode2 == V4SImode)
+ type = v8hi_ftype_v4si_v4si;
+
+ /* vchar, vshort, vshort. */
+ else if (mode0 == V16QImode && mode1 == V8HImode && mode2 == V8HImode)
+ type = v16qi_ftype_v8hi_v8hi;
+
+ /* vint, vchar, vint. */
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V4SImode)
+ type = v4si_ftype_v16qi_v4si;
+
+ /* vint, vchar, vchar. */
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode)
+ type = v4si_ftype_v16qi_v16qi;
+
+ /* vint, vshort, vint. */
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V4SImode)
+ type = v4si_ftype_v8hi_v4si;
+
+ /* vint, vint, 5-bit literal. */
+ else if (mode0 == V4SImode && mode1 == V4SImode && mode2 == QImode)
+ type = v4si_ftype_v4si_int;
+
+ /* vshort, vshort, 5-bit literal. */
+ else if (mode0 == V8HImode && mode1 == V8HImode && mode2 == QImode)
+ type = v8hi_ftype_v8hi_int;
+
+ /* vchar, vchar, 5-bit literal. */
+ else if (mode0 == V16QImode && mode1 == V16QImode && mode2 == QImode)
+ type = v16qi_ftype_v16qi_int;
+
+ /* vfloat, vint, 5-bit literal. */
+ else if (mode0 == V4SFmode && mode1 == V4SImode && mode2 == QImode)
+ type = v4sf_ftype_v4si_int;
+
+ /* vint, vfloat, 5-bit literal. */
+ else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == QImode)
+ type = v4si_ftype_v4sf_int;
+
+ else if (mode0 == V2SImode && mode1 == SImode && mode2 == SImode)
+ type = v2si_ftype_int_int;
+
+ else if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
+ type = v2si_ftype_v2si_char;
+
+ else if (mode0 == V2SImode && mode1 == SImode && mode2 == QImode)
+ type = v2si_ftype_int_char;
+
+ else
+ {
+ /* int, x, x. */
+ gcc_assert (mode0 == SImode);
+ switch (mode1)
+ {
+ case V4SImode:
+ type = int_ftype_v4si_v4si;
+ break;
+ case V4SFmode:
+ type = int_ftype_v4sf_v4sf;
+ break;
+ case V16QImode:
+ type = int_ftype_v16qi_v16qi;
+ break;
+ case V8HImode:
+ type = int_ftype_v8hi_v8hi;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ /* Add the simple unary operators. */
+ d = (struct builtin_description *) bdesc_1arg;
+ for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ {
+ enum machine_mode mode0, mode1;
+ tree type;
+ bool is_overloaded = d->code >= ALTIVEC_BUILTIN_OVERLOADED_FIRST
+ && d->code <= ALTIVEC_BUILTIN_OVERLOADED_LAST;
+
+ if (is_overloaded)
+ {
+ mode0 = VOIDmode;
+ mode1 = VOIDmode;
+ }
+ else
+ {
+ if (d->name == 0 || d->icode == CODE_FOR_nothing)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ }
+
+ if (mode0 == V4SImode && mode1 == QImode)
+ type = v4si_ftype_int;
+ else if (mode0 == V8HImode && mode1 == QImode)
+ type = v8hi_ftype_int;
+ else if (mode0 == V16QImode && mode1 == QImode)
+ type = v16qi_ftype_int;
+ else if (mode0 == VOIDmode && mode1 == VOIDmode)
+ type = opaque_ftype_opaque;
+ else if (mode0 == V4SFmode && mode1 == V4SFmode)
+ type = v4sf_ftype_v4sf;
+ else if (mode0 == V8HImode && mode1 == V16QImode)
+ type = v8hi_ftype_v16qi;
+ else if (mode0 == V4SImode && mode1 == V8HImode)
+ type = v4si_ftype_v8hi;
+ else if (mode0 == V2SImode && mode1 == V2SImode)
+ type = v2si_ftype_v2si;
+ else if (mode0 == V2SFmode && mode1 == V2SFmode)
+ {
+ if (TARGET_PAIRED_FLOAT)
+ type = v2sf_ftype_v2sf;
+ else
+ type = v2sf_ftype_v2sf_spe;
+ }
+ else if (mode0 == V2SFmode && mode1 == V2SImode)
+ type = v2sf_ftype_v2si;
+ else if (mode0 == V2SImode && mode1 == V2SFmode)
+ type = v2si_ftype_v2sf;
+ else if (mode0 == V2SImode && mode1 == QImode)
+ type = v2si_ftype_char;
+ else
+ gcc_unreachable ();
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+}
+
+static void
+rs6000_init_libfuncs (void)
+{
+ if (DEFAULT_ABI != ABI_V4 && TARGET_XCOFF
+ && !TARGET_POWER2 && !TARGET_POWERPC)
+ {
+ /* AIX library routines for float->int conversion. */
+ set_conv_libfunc (sfix_optab, SImode, DFmode, "__itrunc");
+ set_conv_libfunc (ufix_optab, SImode, DFmode, "__uitrunc");
+ set_conv_libfunc (sfix_optab, SImode, TFmode, "_qitrunc");
+ set_conv_libfunc (ufix_optab, SImode, TFmode, "_quitrunc");
+ }
+
+ if (!TARGET_IEEEQUAD)
+ /* AIX/Darwin/64-bit Linux quad floating point routines. */
+ if (!TARGET_XL_COMPAT)
+ {
+ set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
+ set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
+ set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
+
+ if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
+ {
+ set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
+ set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
+ set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
+ set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
+ set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
+ set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
+ set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
+
+ set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
+ set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
+ set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
+ set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
+ set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
+ set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
+ set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
+ set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
+ }
+
+ if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
+ set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
+ }
+ else
+ {
+ set_optab_libfunc (add_optab, TFmode, "_xlqadd");
+ set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
+ set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
+ set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
+ }
+ else
+ {
+ /* 32-bit SVR4 quad floating point routines. */
+
+ set_optab_libfunc (add_optab, TFmode, "_q_add");
+ set_optab_libfunc (sub_optab, TFmode, "_q_sub");
+ set_optab_libfunc (neg_optab, TFmode, "_q_neg");
+ set_optab_libfunc (smul_optab, TFmode, "_q_mul");
+ set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
+ if (TARGET_PPC_GPOPT || TARGET_POWER2)
+ set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
+
+ set_optab_libfunc (eq_optab, TFmode, "_q_feq");
+ set_optab_libfunc (ne_optab, TFmode, "_q_fne");
+ set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
+ set_optab_libfunc (ge_optab, TFmode, "_q_fge");
+ set_optab_libfunc (lt_optab, TFmode, "_q_flt");
+ set_optab_libfunc (le_optab, TFmode, "_q_fle");
+
+ set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
+ set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
+ set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
+ set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
+ set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
+ set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
+ set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
+ set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
+ }
+}
+
+
+/* Expand a block clear operation, and return 1 if successful. Return 0
+ if we should let the compiler generate normal code.
+
+ operands[0] is the destination
+ operands[1] is the length
+ operands[3] is the alignment */
+
+int
+expand_block_clear (rtx operands[])
+{
+ rtx orig_dest = operands[0];
+ rtx bytes_rtx = operands[1];
+ rtx align_rtx = operands[3];
+ bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
+ HOST_WIDE_INT align;
+ HOST_WIDE_INT bytes;
+ int offset;
+ int clear_bytes;
+ int clear_step;
+
+ /* If this is not a fixed size move, just call memcpy */
+ if (! constp)
+ return 0;
+
+ /* This must be a fixed size alignment */
+ gcc_assert (GET_CODE (align_rtx) == CONST_INT);
+ align = INTVAL (align_rtx) * BITS_PER_UNIT;
+
+ /* Anything to clear? */
+ bytes = INTVAL (bytes_rtx);
+ if (bytes <= 0)
+ return 1;
+
+ /* Use the builtin memset after a point, to avoid huge code bloat.
+ When optimize_size, avoid any significant code bloat; calling
+ memset is about 4 instructions, so allow for one instruction to
+ load zero and three to do clearing. */
+ if (TARGET_ALTIVEC && align >= 128)
+ clear_step = 16;
+ else if (TARGET_POWERPC64 && align >= 32)
+ clear_step = 8;
+ else if (TARGET_SPE && align >= 64)
+ clear_step = 8;
+ else
+ clear_step = 4;
+
+ if (optimize_size && bytes > 3 * clear_step)
+ return 0;
+ if (! optimize_size && bytes > 8 * clear_step)
+ return 0;
+
+ for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
+ {
+ enum machine_mode mode = BLKmode;
+ rtx dest;
+
+ if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
+ {
+ clear_bytes = 16;
+ mode = V4SImode;
+ }
+ else if (bytes >= 8 && TARGET_SPE && align >= 64)
+ {
+ clear_bytes = 8;
+ mode = V2SImode;
+ }
+ else if (bytes >= 8 && TARGET_POWERPC64
+ /* 64-bit loads and stores require word-aligned
+ displacements. */
+ && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
+ {
+ clear_bytes = 8;
+ mode = DImode;
+ }
+ else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
+ { /* move 4 bytes */
+ clear_bytes = 4;
+ mode = SImode;
+ }
+ else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
+ { /* move 2 bytes */
+ clear_bytes = 2;
+ mode = HImode;
+ }
+ else /* move 1 byte at a time */
+ {
+ clear_bytes = 1;
+ mode = QImode;
+ }
+
+ dest = adjust_address (orig_dest, mode, offset);
+
+ emit_move_insn (dest, CONST0_RTX (mode));
+ }
+
+ return 1;
+}
+
+
+/* Expand a block move operation, and return 1 if successful. Return 0
+ if we should let the compiler generate normal code.
+
+ operands[0] is the destination
+ operands[1] is the source
+ operands[2] is the length
+ operands[3] is the alignment */
+
+#define MAX_MOVE_REG 4
+
+int
+expand_block_move (rtx operands[])
+{
+ rtx orig_dest = operands[0];
+ rtx orig_src = operands[1];
+ rtx bytes_rtx = operands[2];
+ rtx align_rtx = operands[3];
+ int constp = (GET_CODE (bytes_rtx) == CONST_INT);
+ int align;
+ int bytes;
+ int offset;
+ int move_bytes;
+ rtx stores[MAX_MOVE_REG];
+ int num_reg = 0;
+
+ /* If this is not a fixed size move, just call memcpy */
+ if (! constp)
+ return 0;
+
+ /* This must be a fixed size alignment */
+ gcc_assert (GET_CODE (align_rtx) == CONST_INT);
+ align = INTVAL (align_rtx) * BITS_PER_UNIT;
+
+ /* Anything to move? */
+ bytes = INTVAL (bytes_rtx);
+ if (bytes <= 0)
+ return 1;
+
+ /* store_one_arg depends on expand_block_move to handle at least the size of
+ reg_parm_stack_space. */
+ if (bytes > (TARGET_POWERPC64 ? 64 : 32))
+ return 0;
+
+ for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
+ {
+ union {
+ rtx (*movmemsi) (rtx, rtx, rtx, rtx);
+ rtx (*mov) (rtx, rtx);
+ } gen_func;
+ enum machine_mode mode = BLKmode;
+ rtx src, dest;
+
+ /* Altivec first, since it will be faster than a string move
+ when it applies, and usually not significantly larger. */
+ if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
+ {
+ move_bytes = 16;
+ mode = V4SImode;
+ gen_func.mov = gen_movv4si;
+ }
+ else if (TARGET_SPE && bytes >= 8 && align >= 64)
+ {
+ move_bytes = 8;
+ mode = V2SImode;
+ gen_func.mov = gen_movv2si;
+ }
+ else if (TARGET_STRING
+ && bytes > 24 /* move up to 32 bytes at a time */
+ && ! fixed_regs[5]
+ && ! fixed_regs[6]
+ && ! fixed_regs[7]
+ && ! fixed_regs[8]
+ && ! fixed_regs[9]
+ && ! fixed_regs[10]
+ && ! fixed_regs[11]
+ && ! fixed_regs[12])
+ {
+ move_bytes = (bytes > 32) ? 32 : bytes;
+ gen_func.movmemsi = gen_movmemsi_8reg;
+ }
+ else if (TARGET_STRING
+ && bytes > 16 /* move up to 24 bytes at a time */
+ && ! fixed_regs[5]
+ && ! fixed_regs[6]
+ && ! fixed_regs[7]
+ && ! fixed_regs[8]
+ && ! fixed_regs[9]
+ && ! fixed_regs[10])
+ {
+ move_bytes = (bytes > 24) ? 24 : bytes;
+ gen_func.movmemsi = gen_movmemsi_6reg;
+ }
+ else if (TARGET_STRING
+ && bytes > 8 /* move up to 16 bytes at a time */
+ && ! fixed_regs[5]
+ && ! fixed_regs[6]
+ && ! fixed_regs[7]
+ && ! fixed_regs[8])
+ {
+ move_bytes = (bytes > 16) ? 16 : bytes;
+ gen_func.movmemsi = gen_movmemsi_4reg;
+ }
+ else if (bytes >= 8 && TARGET_POWERPC64
+ /* 64-bit loads and stores require word-aligned
+ displacements. */
+ && (align >= 64 || (!STRICT_ALIGNMENT && align >= 32)))
+ {
+ move_bytes = 8;
+ mode = DImode;
+ gen_func.mov = gen_movdi;
+ }
+ else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
+ { /* move up to 8 bytes at a time */
+ move_bytes = (bytes > 8) ? 8 : bytes;
+ gen_func.movmemsi = gen_movmemsi_2reg;
+ }
+ else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
+ { /* move 4 bytes */
+ move_bytes = 4;
+ mode = SImode;
+ gen_func.mov = gen_movsi;
+ }
+ else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
+ { /* move 2 bytes */
+ move_bytes = 2;
+ mode = HImode;
+ gen_func.mov = gen_movhi;
+ }
+ else if (TARGET_STRING && bytes > 1)
+ { /* move up to 4 bytes at a time */
+ move_bytes = (bytes > 4) ? 4 : bytes;
+ gen_func.movmemsi = gen_movmemsi_1reg;
+ }
+ else /* move 1 byte at a time */
+ {
+ move_bytes = 1;
+ mode = QImode;
+ gen_func.mov = gen_movqi;
+ }
+
+ src = adjust_address (orig_src, mode, offset);
+ dest = adjust_address (orig_dest, mode, offset);
+
+ if (mode != BLKmode)
+ {
+ rtx tmp_reg = gen_reg_rtx (mode);
+
+ emit_insn ((*gen_func.mov) (tmp_reg, src));
+ stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
+ }
+
+ if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
+ {
+ int i;
+ for (i = 0; i < num_reg; i++)
+ emit_insn (stores[i]);
+ num_reg = 0;
+ }
+
+ if (mode == BLKmode)
+ {
+ /* Move the address into scratch registers. The movmemsi
+ patterns require zero offset. */
+ if (!REG_P (XEXP (src, 0)))
+ {
+ rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
+ src = replace_equiv_address (src, src_reg);
+ }
+ set_mem_size (src, GEN_INT (move_bytes));
+
+ if (!REG_P (XEXP (dest, 0)))
+ {
+ rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
+ dest = replace_equiv_address (dest, dest_reg);
+ }
+ set_mem_size (dest, GEN_INT (move_bytes));
+
+ emit_insn ((*gen_func.movmemsi) (dest, src,
+ GEN_INT (move_bytes & 31),
+ align_rtx));
+ }
+ }
+
+ return 1;
+}
+
+
+/* Return a string to perform a load_multiple operation.
+ operands[0] is the vector.
+ operands[1] is the source address.
+ operands[2] is the first destination register. */
+
+const char *
+rs6000_output_load_multiple (rtx operands[3])
+{
+ /* We have to handle the case where the pseudo used to contain the address
+ is assigned to one of the output registers. */
+ int i, j;
+ int words = XVECLEN (operands[0], 0);
+ rtx xop[10];
+
+ if (XVECLEN (operands[0], 0) == 1)
+ return "{l|lwz} %2,0(%1)";
+
+ for (i = 0; i < words; i++)
+ if (refers_to_regno_p (REGNO (operands[2]) + i,
+ REGNO (operands[2]) + i + 1, operands[1], 0))
+ {
+ if (i == words-1)
+ {
+ xop[0] = GEN_INT (4 * (words-1));
+ xop[1] = operands[1];
+ xop[2] = operands[2];
+ output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop);
+ return "";
+ }
+ else if (i == 0)
+ {
+ xop[0] = GEN_INT (4 * (words-1));
+ xop[1] = operands[1];
+ xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
+ output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop);
+ return "";
+ }
+ else
+ {
+ for (j = 0; j < words; j++)
+ if (j != i)
+ {
+ xop[0] = GEN_INT (j * 4);
+ xop[1] = operands[1];
+ xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
+ output_asm_insn ("{l|lwz} %2,%0(%1)", xop);
+ }
+ xop[0] = GEN_INT (i * 4);
+ xop[1] = operands[1];
+ output_asm_insn ("{l|lwz} %1,%0(%1)", xop);
+ return "";
+ }
+ }
+
+ return "{lsi|lswi} %2,%1,%N0";
+}
+
+
+/* A validation routine: say whether CODE, a condition code, and MODE
+ match. The other alternatives either don't make sense or should
+ never be generated. */
+
+void
+validate_condition_mode (enum rtx_code code, enum machine_mode mode)
+{
+ gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
+ || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
+ && GET_MODE_CLASS (mode) == MODE_CC);
+
+ /* These don't make sense. */
+ gcc_assert ((code != GT && code != LT && code != GE && code != LE)
+ || mode != CCUNSmode);
+
+ gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
+ || mode == CCUNSmode);
+
+ gcc_assert (mode == CCFPmode
+ || (code != ORDERED && code != UNORDERED
+ && code != UNEQ && code != LTGT
+ && code != UNGT && code != UNLT
+ && code != UNGE && code != UNLE));
+
+ /* These should never be generated except for
+ flag_finite_math_only. */
+ gcc_assert (mode != CCFPmode
+ || flag_finite_math_only
+ || (code != LE && code != GE
+ && code != UNEQ && code != LTGT
+ && code != UNGT && code != UNLT));
+
+ /* These are invalid; the information is not there. */
+ gcc_assert (mode != CCEQmode || code == EQ || code == NE);
+}
+
+
+/* Return 1 if ANDOP is a mask that has no bits on that are not in the
+ mask required to convert the result of a rotate insn into a shift
+ left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
+
+int
+includes_lshift_p (rtx shiftop, rtx andop)
+{
+ unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
+
+ shift_mask <<= INTVAL (shiftop);
+
+ return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
+}
+
+/* Similar, but for right shift. */
+
+int
+includes_rshift_p (rtx shiftop, rtx andop)
+{
+ unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
+
+ shift_mask >>= INTVAL (shiftop);
+
+ return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
+}
+
+/* Return 1 if ANDOP is a mask suitable for use with an rldic insn
+ to perform a left shift. It must have exactly SHIFTOP least
+ significant 0's, then one or more 1's, then zero or more 0's. */
+
+int
+includes_rldic_lshift_p (rtx shiftop, rtx andop)
+{
+ if (GET_CODE (andop) == CONST_INT)
+ {
+ HOST_WIDE_INT c, lsb, shift_mask;
+
+ c = INTVAL (andop);
+ if (c == 0 || c == ~0)
+ return 0;
+
+ shift_mask = ~0;
+ shift_mask <<= INTVAL (shiftop);
+
+ /* Find the least significant one bit. */
+ lsb = c & -c;
+
+ /* It must coincide with the LSB of the shift mask. */
+ if (-lsb != shift_mask)
+ return 0;
+
+ /* Invert to look for the next transition (if any). */
+ c = ~c;
+
+ /* Remove the low group of ones (originally low group of zeros). */
+ c &= -lsb;
+
+ /* Again find the lsb, and check we have all 1's above. */
+ lsb = c & -c;
+ return c == -lsb;
+ }
+ else if (GET_CODE (andop) == CONST_DOUBLE
+ && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
+ {
+ HOST_WIDE_INT low, high, lsb;
+ HOST_WIDE_INT shift_mask_low, shift_mask_high;
+
+ low = CONST_DOUBLE_LOW (andop);
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ high = CONST_DOUBLE_HIGH (andop);
+
+ if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
+ || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
+ return 0;
+
+ if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
+ {
+ shift_mask_high = ~0;
+ if (INTVAL (shiftop) > 32)
+ shift_mask_high <<= INTVAL (shiftop) - 32;
+
+ lsb = high & -high;
+
+ if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
+ return 0;
+
+ high = ~high;
+ high &= -lsb;
+
+ lsb = high & -high;
+ return high == -lsb;
+ }
+
+ shift_mask_low = ~0;
+ shift_mask_low <<= INTVAL (shiftop);
+
+ lsb = low & -low;
+
+ if (-lsb != shift_mask_low)
+ return 0;
+
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ high = ~high;
+ low = ~low;
+ low &= -lsb;
+
+ if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
+ {
+ lsb = high & -high;
+ return high == -lsb;
+ }
+
+ lsb = low & -low;
+ return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
+ }
+ else
+ return 0;
+}
+
+/* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
+ to perform a left shift. It must have SHIFTOP or more least
+ significant 0's, with the remainder of the word 1's. */
+
+int
+includes_rldicr_lshift_p (rtx shiftop, rtx andop)
+{
+ if (GET_CODE (andop) == CONST_INT)
+ {
+ HOST_WIDE_INT c, lsb, shift_mask;
+
+ shift_mask = ~0;
+ shift_mask <<= INTVAL (shiftop);
+ c = INTVAL (andop);
+
+ /* Find the least significant one bit. */
+ lsb = c & -c;
+
+ /* It must be covered by the shift mask.
+ This test also rejects c == 0. */
+ if ((lsb & shift_mask) == 0)
+ return 0;
+
+ /* Check we have all 1's above the transition, and reject all 1's. */
+ return c == -lsb && lsb != 1;
+ }
+ else if (GET_CODE (andop) == CONST_DOUBLE
+ && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
+ {
+ HOST_WIDE_INT low, lsb, shift_mask_low;
+
+ low = CONST_DOUBLE_LOW (andop);
+
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ {
+ HOST_WIDE_INT high, shift_mask_high;
+
+ high = CONST_DOUBLE_HIGH (andop);
+
+ if (low == 0)
+ {
+ shift_mask_high = ~0;
+ if (INTVAL (shiftop) > 32)
+ shift_mask_high <<= INTVAL (shiftop) - 32;
+
+ lsb = high & -high;
+
+ if ((lsb & shift_mask_high) == 0)
+ return 0;
+
+ return high == -lsb;
+ }
+ if (high != ~0)
+ return 0;
+ }
+
+ shift_mask_low = ~0;
+ shift_mask_low <<= INTVAL (shiftop);
+
+ lsb = low & -low;
+
+ if ((lsb & shift_mask_low) == 0)
+ return 0;
+
+ return low == -lsb && lsb != 1;
+ }
+ else
+ return 0;
+}
+
+/* Return 1 if operands will generate a valid arguments to rlwimi
+instruction for insert with right shift in 64-bit mode. The mask may
+not start on the first bit or stop on the last bit because wrap-around
+effects of instruction do not correspond to semantics of RTL insn. */
+
+int
+insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
+{
+ if (INTVAL (startop) > 32
+ && INTVAL (startop) < 64
+ && INTVAL (sizeop) > 1
+ && INTVAL (sizeop) + INTVAL (startop) < 64
+ && INTVAL (shiftop) > 0
+ && INTVAL (sizeop) + INTVAL (shiftop) < 32
+ && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
+ for lfq and stfq insns iff the registers are hard registers. */
+
+int
+registers_ok_for_quad_peep (rtx reg1, rtx reg2)
+{
+ /* We might have been passed a SUBREG. */
+ if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ return 0;
+
+ /* We might have been passed non floating point registers. */
+ if (!FP_REGNO_P (REGNO (reg1))
+ || !FP_REGNO_P (REGNO (reg2)))
+ return 0;
+
+ return (REGNO (reg1) == REGNO (reg2) - 1);
+}
+
+/* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
+ addr1 and addr2 must be in consecutive memory locations
+ (addr2 == addr1 + 8). */
+
+int
+mems_ok_for_quad_peep (rtx mem1, rtx mem2)
+{
+ rtx addr1, addr2;
+ unsigned int reg1, reg2;
+ int offset1, offset2;
+
+ /* The mems cannot be volatile. */
+ if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
+ return 0;
+
+ addr1 = XEXP (mem1, 0);
+ addr2 = XEXP (mem2, 0);
+
+ /* Extract an offset (if used) from the first addr. */
+ if (GET_CODE (addr1) == PLUS)
+ {
+ /* If not a REG, return zero. */
+ if (GET_CODE (XEXP (addr1, 0)) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (XEXP (addr1, 0));
+ /* The offset must be constant! */
+ if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
+ return 0;
+ offset1 = INTVAL (XEXP (addr1, 1));
+ }
+ }
+ else if (GET_CODE (addr1) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (addr1);
+ /* This was a simple (mem (reg)) expression. Offset is 0. */
+ offset1 = 0;
+ }
+
+ /* And now for the second addr. */
+ if (GET_CODE (addr2) == PLUS)
+ {
+ /* If not a REG, return zero. */
+ if (GET_CODE (XEXP (addr2, 0)) != REG)
+ return 0;
+ else
+ {
+ reg2 = REGNO (XEXP (addr2, 0));
+ /* The offset must be constant. */
+ if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ return 0;
+ offset2 = INTVAL (XEXP (addr2, 1));
+ }
+ }
+ else if (GET_CODE (addr2) != REG)
+ return 0;
+ else
+ {
+ reg2 = REGNO (addr2);
+ /* This was a simple (mem (reg)) expression. Offset is 0. */
+ offset2 = 0;
+ }
+
+ /* Both of these must have the same base register. */
+ if (reg1 != reg2)
+ return 0;
+
+ /* The offset for the second addr must be 8 more than the first addr. */
+ if (offset2 != offset1 + 8)
+ return 0;
+
+ /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
+ instructions. */
+ return 1;
+}
+
+
+rtx
+rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
+{
+ static bool eliminated = false;
+ if (mode != SDmode)
+ return assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
+ else
+ {
+ rtx mem = cfun->machine->sdmode_stack_slot;
+ gcc_assert (mem != NULL_RTX);
+
+ if (!eliminated)
+ {
+ mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
+ cfun->machine->sdmode_stack_slot = mem;
+ eliminated = true;
+ }
+ return mem;
+ }
+}
+
+static tree
+rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
+{
+ /* Don't walk into types. */
+ if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+
+ switch (TREE_CODE (*tp))
+ {
+ case VAR_DECL:
+ case PARM_DECL:
+ case FIELD_DECL:
+ case RESULT_DECL:
+ case REAL_CST:
+ case INDIRECT_REF:
+ case ALIGN_INDIRECT_REF:
+ case MISALIGNED_INDIRECT_REF:
+ case VIEW_CONVERT_EXPR:
+ if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
+ return *tp;
+ break;
+ default:
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+
+/* Allocate a 64-bit stack slot to be used for copying SDmode
+ values through if this function has any SDmode references. */
+
+static void
+rs6000_alloc_sdmode_stack_slot (void)
+{
+ tree t;
+ basic_block bb;
+ gimple_stmt_iterator gsi;
+
+ gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
+
+ FOR_EACH_BB (bb)
+ for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ {
+ tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
+ if (ret)
+ {
+ rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
+ cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
+ SDmode, 0);
+ return;
+ }
+ }
+
+ /* Check for any SDmode parameters of the function. */
+ for (t = DECL_ARGUMENTS (cfun->decl); t; t = TREE_CHAIN (t))
+ {
+ if (TREE_TYPE (t) == error_mark_node)
+ continue;
+
+ if (TYPE_MODE (TREE_TYPE (t)) == SDmode
+ || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
+ {
+ rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
+ cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
+ SDmode, 0);
+ return;
+ }
+ }
+}
+
+static void
+rs6000_instantiate_decls (void)
+{
+ if (cfun->machine->sdmode_stack_slot != NULL_RTX)
+ instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
+}
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in RCLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+
+enum reg_class
+rs6000_secondary_reload_class (enum reg_class rclass,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx in)
+{
+ int regno;
+
+ if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
+#if TARGET_MACHO
+ && MACHOPIC_INDIRECT
+#endif
+ ))
+ {
+ /* We cannot copy a symbolic operand directly into anything
+ other than BASE_REGS for TARGET_ELF. So indicate that a
+ register from BASE_REGS is needed as an intermediate
+ register.
+
+ On Darwin, pic addresses require a load from memory, which
+ needs a base register. */
+ if (rclass != BASE_REGS
+ && (GET_CODE (in) == SYMBOL_REF
+ || GET_CODE (in) == HIGH
+ || GET_CODE (in) == LABEL_REF
+ || GET_CODE (in) == CONST))
+ return BASE_REGS;
+ }
+
+ if (GET_CODE (in) == REG)
+ {
+ regno = REGNO (in);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ regno = true_regnum (in);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ regno = -1;
+ }
+ }
+ else if (GET_CODE (in) == SUBREG)
+ {
+ regno = true_regnum (in);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ regno = -1;
+ }
+ else
+ regno = -1;
+
+ /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
+ into anything. */
+ if (rclass == GENERAL_REGS || rclass == BASE_REGS
+ || (regno >= 0 && INT_REGNO_P (regno)))
+ return NO_REGS;
+
+ /* Constants, memory, and FP registers can go into FP registers. */
+ if ((regno == -1 || FP_REGNO_P (regno))
+ && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
+ return (mode != SDmode) ? NO_REGS : GENERAL_REGS;
+
+ /* Memory, and AltiVec registers can go into AltiVec registers. */
+ if ((regno == -1 || ALTIVEC_REGNO_P (regno))
+ && rclass == ALTIVEC_REGS)
+ return NO_REGS;
+
+ /* We can copy among the CR registers. */
+ if ((rclass == CR_REGS || rclass == CR0_REGS)
+ && regno >= 0 && CR_REGNO_P (regno))
+ return NO_REGS;
+
+ /* Otherwise, we need GENERAL_REGS. */
+ return GENERAL_REGS;
+}
+
+/* Given a comparison operation, return the bit number in CCR to test. We
+ know this is a valid comparison.
+
+ SCC_P is 1 if this is for an scc. That means that %D will have been
+ used instead of %C, so the bits will be in different places.
+
+ Return -1 if OP isn't a valid comparison for some reason. */
+
+int
+ccr_bit (rtx op, int scc_p)
+{
+ enum rtx_code code = GET_CODE (op);
+ enum machine_mode cc_mode;
+ int cc_regnum;
+ int base_bit;
+ rtx reg;
+
+ if (!COMPARISON_P (op))
+ return -1;
+
+ reg = XEXP (op, 0);
+
+ gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
+
+ cc_mode = GET_MODE (reg);
+ cc_regnum = REGNO (reg);
+ base_bit = 4 * (cc_regnum - CR0_REGNO);
+
+ validate_condition_mode (code, cc_mode);
+
+ /* When generating a sCOND operation, only positive conditions are
+ allowed. */
+ gcc_assert (!scc_p
+ || code == EQ || code == GT || code == LT || code == UNORDERED
+ || code == GTU || code == LTU);
+
+ switch (code)
+ {
+ case NE:
+ return scc_p ? base_bit + 3 : base_bit + 2;
+ case EQ:
+ return base_bit + 2;
+ case GT: case GTU: case UNLE:
+ return base_bit + 1;
+ case LT: case LTU: case UNGE:
+ return base_bit;
+ case ORDERED: case UNORDERED:
+ return base_bit + 3;
+
+ case GE: case GEU:
+ /* If scc, we will have done a cror to put the bit in the
+ unordered position. So test that bit. For integer, this is ! LT
+ unless this is an scc insn. */
+ return scc_p ? base_bit + 3 : base_bit;
+
+ case LE: case LEU:
+ return scc_p ? base_bit + 3 : base_bit + 1;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Return the GOT register. */
+
+rtx
+rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
+{
+ /* The second flow pass currently (June 1999) can't update
+ regs_ever_live without disturbing other parts of the compiler, so
+ update it here to make the prolog/epilogue code happy. */
+ if (!can_create_pseudo_p ()
+ && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
+ df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
+
+ crtl->uses_pic_offset_table = 1;
+
+ return pic_offset_table_rtx;
+}
+
+/* Function to init struct machine_function.
+ This will be called, via a pointer variable,
+ from push_function_context. */
+
+static struct machine_function *
+rs6000_init_machine_status (void)
+{
+ return GGC_CNEW (machine_function);
+}
+
+/* These macros test for integers and extract the low-order bits. */
+#define INT_P(X) \
+((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
+ && GET_MODE (X) == VOIDmode)
+
+#define INT_LOWPART(X) \
+ (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
+
+int
+extract_MB (rtx op)
+{
+ int i;
+ unsigned long val = INT_LOWPART (op);
+
+ /* If the high bit is zero, the value is the first 1 bit we find
+ from the left. */
+ if ((val & 0x80000000) == 0)
+ {
+ gcc_assert (val & 0xffffffff);
+
+ i = 1;
+ while (((val <<= 1) & 0x80000000) == 0)
+ ++i;
+ return i;
+ }
+
+ /* If the high bit is set and the low bit is not, or the mask is all
+ 1's, the value is zero. */
+ if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
+ return 0;
+
+ /* Otherwise we have a wrap-around mask. Look for the first 0 bit
+ from the right. */
+ i = 31;
+ while (((val >>= 1) & 1) != 0)
+ --i;
+
+ return i;
+}
+
+int
+extract_ME (rtx op)
+{
+ int i;
+ unsigned long val = INT_LOWPART (op);
+
+ /* If the low bit is zero, the value is the first 1 bit we find from
+ the right. */
+ if ((val & 1) == 0)
+ {
+ gcc_assert (val & 0xffffffff);
+
+ i = 30;
+ while (((val >>= 1) & 1) == 0)
+ --i;
+
+ return i;
+ }
+
+ /* If the low bit is set and the high bit is not, or the mask is all
+ 1's, the value is 31. */
+ if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
+ return 31;
+
+ /* Otherwise we have a wrap-around mask. Look for the first 0 bit
+ from the left. */
+ i = 0;
+ while (((val <<= 1) & 0x80000000) != 0)
+ ++i;
+
+ return i;
+}
+
+/* Locate some local-dynamic symbol still in use by this function
+ so that we can print its name in some tls_ld pattern. */
+
+static const char *
+rs6000_get_some_local_dynamic_name (void)
+{
+ rtx insn;
+
+ if (cfun->machine->some_ld_name)
+ return cfun->machine->some_ld_name;
+
+ for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
+ if (INSN_P (insn)
+ && for_each_rtx (&PATTERN (insn),
+ rs6000_get_some_local_dynamic_name_1, 0))
+ return cfun->machine->some_ld_name;
+
+ gcc_unreachable ();
+}
+
+/* Helper function for rs6000_get_some_local_dynamic_name. */
+
+static int
+rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
+{
+ rtx x = *px;
+
+ if (GET_CODE (x) == SYMBOL_REF)
+ {
+ const char *str = XSTR (x, 0);
+ if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
+ {
+ cfun->machine->some_ld_name = str;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/* Write out a function code label. */
+
+void
+rs6000_output_function_entry (FILE *file, const char *fname)
+{
+ if (fname[0] != '.')
+ {
+ switch (DEFAULT_ABI)
+ {
+ default:
+ gcc_unreachable ();
+
+ case ABI_AIX:
+ if (DOT_SYMBOLS)
+ putc ('.', file);
+ else
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
+ break;
+
+ case ABI_V4:
+ case ABI_DARWIN:
+ break;
+ }
+ }
+ if (TARGET_AIX)
+ RS6000_OUTPUT_BASENAME (file, fname);
+ else
+ assemble_name (file, fname);
+}
+
+/* Print an operand. Recognize special options, documented below. */
+
+#if TARGET_ELF
+#define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
+#define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
+#else
+#define SMALL_DATA_RELOC "sda21"
+#define SMALL_DATA_REG 0
+#endif
+
+void
+print_operand (FILE *file, rtx x, int code)
+{
+ int i;
+ HOST_WIDE_INT val;
+ unsigned HOST_WIDE_INT uval;
+
+ switch (code)
+ {
+ case '.':
+ /* Write out an instruction after the call which may be replaced
+ with glue code by the loader. This depends on the AIX version. */
+ asm_fprintf (file, RS6000_CALL_GLUE);
+ return;
+
+ /* %a is output_address. */
+
+ case 'A':
+ /* If X is a constant integer whose low-order 5 bits are zero,
+ write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
+ in the AIX assembler where "sri" with a zero shift count
+ writes a trash instruction. */
+ if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
+ putc ('l', file);
+ else
+ putc ('r', file);
+ return;
+
+ case 'b':
+ /* If constant, low-order 16 bits of constant, unsigned.
+ Otherwise, write normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'B':
+ /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
+ for 64-bit mask direction. */
+ putc (((INT_LOWPART (x) & 1) == 0 ? 'r' : 'l'), file);
+ return;
+
+ /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
+ output_operand. */
+
+ case 'c':
+ /* X is a CR register. Print the number of the GT bit of the CR. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%E value");
+ else
+ fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 1);
+ return;
+
+ case 'D':
+ /* Like 'J' but get to the GT bit only. */
+ gcc_assert (GET_CODE (x) == REG);
+
+ /* Bit 1 is GT bit. */
+ i = 4 * (REGNO (x) - CR0_REGNO) + 1;
+
+ /* Add one for shift count in rlinm for scc. */
+ fprintf (file, "%d", i + 1);
+ return;
+
+ case 'E':
+ /* X is a CR register. Print the number of the EQ bit of the CR */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%E value");
+ else
+ fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
+ return;
+
+ case 'f':
+ /* X is a CR register. Print the shift count needed to move it
+ to the high-order four bits. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%f value");
+ else
+ fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
+ return;
+
+ case 'F':
+ /* Similar, but print the count for the rotate in the opposite
+ direction. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%F value");
+ else
+ fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
+ return;
+
+ case 'G':
+ /* X is a constant integer. If it is negative, print "m",
+ otherwise print "z". This is to make an aze or ame insn. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%G value");
+ else if (INTVAL (x) >= 0)
+ putc ('z', file);
+ else
+ putc ('m', file);
+ return;
+
+ case 'h':
+ /* If constant, output low-order five bits. Otherwise, write
+ normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'H':
+ /* If constant, output low-order six bits. Otherwise, write
+ normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'I':
+ /* Print `i' if this is a constant, else nothing. */
+ if (INT_P (x))
+ putc ('i', file);
+ return;
+
+ case 'j':
+ /* Write the bit number in CCR for jump. */
+ i = ccr_bit (x, 0);
+ if (i == -1)
+ output_operand_lossage ("invalid %%j code");
+ else
+ fprintf (file, "%d", i);
+ return;
+
+ case 'J':
+ /* Similar, but add one for shift count in rlinm for scc and pass
+ scc flag to `ccr_bit'. */
+ i = ccr_bit (x, 1);
+ if (i == -1)
+ output_operand_lossage ("invalid %%J code");
+ else
+ /* If we want bit 31, write a shift count of zero, not 32. */
+ fprintf (file, "%d", i == 31 ? 0 : i + 1);
+ return;
+
+ case 'k':
+ /* X must be a constant. Write the 1's complement of the
+ constant. */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%k value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
+ return;
+
+ case 'K':
+ /* X must be a symbolic constant on ELF. Write an
+ expression suitable for an 'addi' that adds in the low 16
+ bits of the MEM. */
+ if (GET_CODE (x) != CONST)
+ {
+ print_operand_address (file, x);
+ fputs ("@l", file);
+ }
+ else
+ {
+ if (GET_CODE (XEXP (x, 0)) != PLUS
+ || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
+ || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
+ output_operand_lossage ("invalid %%K value");
+ print_operand_address (file, XEXP (XEXP (x, 0), 0));
+ fputs ("@l", file);
+ /* For GNU as, there must be a non-alphanumeric character
+ between 'l' and the number. The '-' is added by
+ print_operand() already. */
+ if (INTVAL (XEXP (XEXP (x, 0), 1)) >= 0)
+ fputs ("+", file);
+ print_operand (file, XEXP (XEXP (x, 0), 1), 0);
+ }
+ return;
+
+ /* %l is output_asm_label. */
+
+ case 'L':
+ /* Write second word of DImode or DFmode reference. Works on register
+ or non-indexed memory only. */
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x) + 1], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ /* Handle possible auto-increment. Since it is pre-increment and
+ we have already done it, we can just use an offset of word. */
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0),
+ UNITS_PER_WORD));
+ else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0),
+ UNITS_PER_WORD));
+ else
+ output_address (XEXP (adjust_address_nv (x, SImode,
+ UNITS_PER_WORD),
+ 0));
+
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ }
+ return;
+
+ case 'm':
+ /* MB value for a mask operand. */
+ if (! mask_operand (x, SImode))
+ output_operand_lossage ("invalid %%m value");
+
+ fprintf (file, "%d", extract_MB (x));
+ return;
+
+ case 'M':
+ /* ME value for a mask operand. */
+ if (! mask_operand (x, SImode))
+ output_operand_lossage ("invalid %%M value");
+
+ fprintf (file, "%d", extract_ME (x));
+ return;
+
+ /* %n outputs the negative of its operand. */
+
+ case 'N':
+ /* Write the number of elements in the vector times 4. */
+ if (GET_CODE (x) != PARALLEL)
+ output_operand_lossage ("invalid %%N value");
+ else
+ fprintf (file, "%d", XVECLEN (x, 0) * 4);
+ return;
+
+ case 'O':
+ /* Similar, but subtract 1 first. */
+ if (GET_CODE (x) != PARALLEL)
+ output_operand_lossage ("invalid %%O value");
+ else
+ fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
+ return;
+
+ case 'p':
+ /* X is a CONST_INT that is a power of two. Output the logarithm. */
+ if (! INT_P (x)
+ || INT_LOWPART (x) < 0
+ || (i = exact_log2 (INT_LOWPART (x))) < 0)
+ output_operand_lossage ("invalid %%p value");
+ else
+ fprintf (file, "%d", i);
+ return;
+
+ case 'P':
+ /* The operand must be an indirect memory reference. The result
+ is the register name. */
+ if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
+ || REGNO (XEXP (x, 0)) >= 32)
+ output_operand_lossage ("invalid %%P value");
+ else
+ fputs (reg_names[REGNO (XEXP (x, 0))], file);
+ return;
+
+ case 'q':
+ /* This outputs the logical code corresponding to a boolean
+ expression. The expression may have one or both operands
+ negated (if one, only the first one). For condition register
+ logical operations, it will also treat the negated
+ CR codes as NOTs, but not handle NOTs of them. */
+ {
+ const char *const *t = 0;
+ const char *s;
+ enum rtx_code code = GET_CODE (x);
+ static const char * const tbl[3][3] = {
+ { "and", "andc", "nor" },
+ { "or", "orc", "nand" },
+ { "xor", "eqv", "xor" } };
+
+ if (code == AND)
+ t = tbl[0];
+ else if (code == IOR)
+ t = tbl[1];
+ else if (code == XOR)
+ t = tbl[2];
+ else
+ output_operand_lossage ("invalid %%q value");
+
+ if (GET_CODE (XEXP (x, 0)) != NOT)
+ s = t[0];
+ else
+ {
+ if (GET_CODE (XEXP (x, 1)) == NOT)
+ s = t[2];
+ else
+ s = t[1];
+ }
+
+ fputs (s, file);
+ }
+ return;
+
+ case 'Q':
+ if (TARGET_MFCRF)
+ fputc (',', file);
+ /* FALLTHRU */
+ else
+ return;
+
+ case 'R':
+ /* X is a CR register. Print the mask for `mtcrf'. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%R value");
+ else
+ fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
+ return;
+
+ case 's':
+ /* Low 5 bits of 32 - value */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%s value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
+ return;
+
+ case 'S':
+ /* PowerPC64 mask position. All 0's is excluded.
+ CONST_INT 32-bit mask is considered sign-extended so any
+ transition must occur within the CONST_INT, not on the boundary. */
+ if (! mask64_operand (x, DImode))
+ output_operand_lossage ("invalid %%S value");
+
+ uval = INT_LOWPART (x);
+
+ if (uval & 1) /* Clear Left */
+ {
+#if HOST_BITS_PER_WIDE_INT > 64
+ uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
+#endif
+ i = 64;
+ }
+ else /* Clear Right */
+ {
+ uval = ~uval;
+#if HOST_BITS_PER_WIDE_INT > 64
+ uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
+#endif
+ i = 63;
+ }
+ while (uval != 0)
+ --i, uval >>= 1;
+ gcc_assert (i >= 0);
+ fprintf (file, "%d", i);
+ return;
+
+ case 't':
+ /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
+ gcc_assert (GET_CODE (x) == REG && GET_MODE (x) == CCmode);
+
+ /* Bit 3 is OV bit. */
+ i = 4 * (REGNO (x) - CR0_REGNO) + 3;
+
+ /* If we want bit 31, write a shift count of zero, not 32. */
+ fprintf (file, "%d", i == 31 ? 0 : i + 1);
+ return;
+
+ case 'T':
+ /* Print the symbolic name of a branch target register. */
+ if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
+ && REGNO (x) != CTR_REGNO))
+ output_operand_lossage ("invalid %%T value");
+ else if (REGNO (x) == LR_REGNO)
+ fputs (TARGET_NEW_MNEMONICS ? "lr" : "r", file);
+ else
+ fputs ("ctr", file);
+ return;
+
+ case 'u':
+ /* High-order 16 bits of constant for use in unsigned operand. */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%u value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ (INT_LOWPART (x) >> 16) & 0xffff);
+ return;
+
+ case 'v':
+ /* High-order 16 bits of constant for use in signed operand. */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%v value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ (INT_LOWPART (x) >> 16) & 0xffff);
+ return;
+
+ case 'U':
+ /* Print `u' if this has an auto-increment or auto-decrement. */
+ if (GET_CODE (x) == MEM
+ && (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC
+ || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
+ putc ('u', file);
+ return;
+
+ case 'V':
+ /* Print the trap code for this operand. */
+ switch (GET_CODE (x))
+ {
+ case EQ:
+ fputs ("eq", file); /* 4 */
+ break;
+ case NE:
+ fputs ("ne", file); /* 24 */
+ break;
+ case LT:
+ fputs ("lt", file); /* 16 */
+ break;
+ case LE:
+ fputs ("le", file); /* 20 */
+ break;
+ case GT:
+ fputs ("gt", file); /* 8 */
+ break;
+ case GE:
+ fputs ("ge", file); /* 12 */
+ break;
+ case LTU:
+ fputs ("llt", file); /* 2 */
+ break;
+ case LEU:
+ fputs ("lle", file); /* 6 */
+ break;
+ case GTU:
+ fputs ("lgt", file); /* 1 */
+ break;
+ case GEU:
+ fputs ("lge", file); /* 5 */
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case 'w':
+ /* If constant, low-order 16 bits of constant, signed. Otherwise, write
+ normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'W':
+ /* MB value for a PowerPC64 rldic operand. */
+ val = (GET_CODE (x) == CONST_INT
+ ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
+
+ if (val < 0)
+ i = -1;
+ else
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++)
+ if ((val <<= 1) < 0)
+ break;
+
+#if HOST_BITS_PER_WIDE_INT == 32
+ if (GET_CODE (x) == CONST_INT && i >= 0)
+ i += 32; /* zero-extend high-part was all 0's */
+ else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
+ {
+ val = CONST_DOUBLE_LOW (x);
+
+ gcc_assert (val);
+ if (val < 0)
+ --i;
+ else
+ for ( ; i < 64; i++)
+ if ((val <<= 1) < 0)
+ break;
+ }
+#endif
+
+ fprintf (file, "%d", i + 1);
+ return;
+
+ case 'X':
+ if (GET_CODE (x) == MEM
+ && (legitimate_indexed_address_p (XEXP (x, 0), 0)
+ || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
+ && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
+ putc ('x', file);
+ return;
+
+ case 'Y':
+ /* Like 'L', for third word of TImode */
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x) + 2], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
+ else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
+ else
+ output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ }
+ return;
+
+ case 'z':
+ /* X is a SYMBOL_REF. Write out the name preceded by a
+ period and without any trailing data in brackets. Used for function
+ names. If we are configured for System V (or the embedded ABI) on
+ the PowerPC, do not emit the period, since those systems do not use
+ TOCs and the like. */
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+ /* Mark the decl as referenced so that cgraph will output the
+ function. */
+ if (SYMBOL_REF_DECL (x))
+ mark_decl_referenced (SYMBOL_REF_DECL (x));
+
+ /* For macho, check to see if we need a stub. */
+ if (TARGET_MACHO)
+ {
+ const char *name = XSTR (x, 0);
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT
+ && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
+ name = machopic_indirection_name (x, /*stub_p=*/true);
+#endif
+ assemble_name (file, name);
+ }
+ else if (!DOT_SYMBOLS)
+ assemble_name (file, XSTR (x, 0));
+ else
+ rs6000_output_function_entry (file, XSTR (x, 0));
+ return;
+
+ case 'Z':
+ /* Like 'L', for last word of TImode. */
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x) + 3], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
+ else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
+ else
+ output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ }
+ return;
+
+ /* Print AltiVec or SPE memory operand. */
+ case 'y':
+ {
+ rtx tmp;
+
+ gcc_assert (GET_CODE (x) == MEM);
+
+ tmp = XEXP (x, 0);
+
+ /* Ugly hack because %y is overloaded. */
+ if ((TARGET_SPE || TARGET_E500_DOUBLE)
+ && (GET_MODE_SIZE (GET_MODE (x)) == 8
+ || GET_MODE (x) == TFmode
+ || GET_MODE (x) == TImode))
+ {
+ /* Handle [reg]. */
+ if (GET_CODE (tmp) == REG)
+ {
+ fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
+ break;
+ }
+ /* Handle [reg+UIMM]. */
+ else if (GET_CODE (tmp) == PLUS &&
+ GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ {
+ int x;
+
+ gcc_assert (GET_CODE (XEXP (tmp, 0)) == REG);
+
+ x = INTVAL (XEXP (tmp, 1));
+ fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
+ break;
+ }
+
+ /* Fall through. Must be [reg+reg]. */
+ }
+ if (TARGET_ALTIVEC
+ && GET_CODE (tmp) == AND
+ && GET_CODE (XEXP (tmp, 1)) == CONST_INT
+ && INTVAL (XEXP (tmp, 1)) == -16)
+ tmp = XEXP (tmp, 0);
+ if (GET_CODE (tmp) == REG)
+ fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
+ else
+ {
+ if (!GET_CODE (tmp) == PLUS
+ || !REG_P (XEXP (tmp, 0))
+ || !REG_P (XEXP (tmp, 1)))
+ {
+ output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
+ break;
+ }
+
+ if (REGNO (XEXP (tmp, 0)) == 0)
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
+ reg_names[ REGNO (XEXP (tmp, 0)) ]);
+ else
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
+ reg_names[ REGNO (XEXP (tmp, 1)) ]);
+ }
+ break;
+ }
+
+ case 0:
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (GET_CODE (x) == MEM)
+ {
+ /* We need to handle PRE_INC and PRE_DEC here, since we need to
+ know the width from the mode. */
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC)
+ fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
+ reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
+ else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
+ reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
+ else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
+ output_address (XEXP (XEXP (x, 0), 1));
+ else
+ output_address (XEXP (x, 0));
+ }
+ else
+ output_addr_const (file, x);
+ return;
+
+ case '&':
+ assemble_name (file, rs6000_get_some_local_dynamic_name ());
+ return;
+
+ default:
+ output_operand_lossage ("invalid %%xn code");
+ }
+}
+
+/* Print the address of an operand. */
+
+void
+print_operand_address (FILE *file, rtx x)
+{
+ if (GET_CODE (x) == REG)
+ fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
+ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
+ || GET_CODE (x) == LABEL_REF)
+ {
+ output_addr_const (file, x);
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ else
+ gcc_assert (!TARGET_TOC);
+ }
+ else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == REG)
+ {
+ gcc_assert (REG_P (XEXP (x, 0)));
+ if (REGNO (XEXP (x, 0)) == 0)
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
+ reg_names[ REGNO (XEXP (x, 0)) ]);
+ else
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
+ reg_names[ REGNO (XEXP (x, 1)) ]);
+ }
+ else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
+ INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
+#if TARGET_ELF
+ else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ }
+#endif
+#if TARGET_MACHO
+ else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ fprintf (file, "lo16(");
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ }
+#endif
+ else if (legitimate_constant_pool_address_p (x))
+ {
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, "(%s)", reg_names[REGNO (XEXP (x, 0))]);
+ }
+ else
+ gcc_unreachable ();
+}
+
+/* Implement OUTPUT_ADDR_CONST_EXTRA for address X. */
+
+bool
+rs6000_output_addr_const_extra (FILE *file, rtx x)
+{
+ if (GET_CODE (x) == UNSPEC)
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_TOCREL:
+ x = XVECEXP (x, 0, 0);
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+ output_addr_const (file, x);
+ if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
+ {
+ putc ('-', file);
+ assemble_name (file, toc_label_name);
+ }
+ else if (TARGET_ELF)
+ fputs ("@toc", file);
+ return true;
+
+#if TARGET_MACHO
+ case UNSPEC_MACHOPIC_OFFSET:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ putc ('-', file);
+ machopic_output_function_base_name (file);
+ return true;
+#endif
+ }
+ return false;
+}
+
+/* Target hook for assembling integer objects. The PowerPC version has
+ to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
+ is defined. It also needs to handle DI-mode objects on 64-bit
+ targets. */
+
+static bool
+rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
+{
+#ifdef RELOCATABLE_NEEDS_FIXUP
+ /* Special handling for SI values. */
+ if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
+ {
+ static int recurse = 0;
+
+ /* For -mrelocatable, we mark all addresses that need to be fixed up
+ in the .fixup section. */
+ if (TARGET_RELOCATABLE
+ && in_section != toc_section
+ && in_section != text_section
+ && !unlikely_text_section_p (in_section)
+ && !recurse
+ && GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_DOUBLE
+ && CONSTANT_P (x))
+ {
+ char buf[256];
+
+ recurse = 1;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
+ fixuplabelno++;
+ ASM_OUTPUT_LABEL (asm_out_file, buf);
+ fprintf (asm_out_file, "\t.long\t(");
+ output_addr_const (asm_out_file, x);
+ fprintf (asm_out_file, ")@fixup\n");
+ fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
+ ASM_OUTPUT_ALIGN (asm_out_file, 2);
+ fprintf (asm_out_file, "\t.long\t");
+ assemble_name (asm_out_file, buf);
+ fprintf (asm_out_file, "\n\t.previous\n");
+ recurse = 0;
+ return true;
+ }
+ /* Remove initial .'s to turn a -mcall-aixdesc function
+ address into the address of the descriptor, not the function
+ itself. */
+ else if (GET_CODE (x) == SYMBOL_REF
+ && XSTR (x, 0)[0] == '.'
+ && DEFAULT_ABI == ABI_AIX)
+ {
+ const char *name = XSTR (x, 0);
+ while (*name == '.')
+ name++;
+
+ fprintf (asm_out_file, "\t.long\t%s\n", name);
+ return true;
+ }
+ }
+#endif /* RELOCATABLE_NEEDS_FIXUP */
+ return default_assemble_integer (x, size, aligned_p);
+}
+
+#ifdef HAVE_GAS_HIDDEN
+/* Emit an assembler directive to set symbol visibility for DECL to
+ VISIBILITY_TYPE. */
+
+static void
+rs6000_assemble_visibility (tree decl, int vis)
+{
+ /* Functions need to have their entry point symbol visibility set as
+ well as their descriptor symbol visibility. */
+ if (DEFAULT_ABI == ABI_AIX
+ && DOT_SYMBOLS
+ && TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ static const char * const visibility_types[] = {
+ NULL, "internal", "hidden", "protected"
+ };
+
+ const char *name, *type;
+
+ name = ((* targetm.strip_name_encoding)
+ (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
+ type = visibility_types[vis];
+
+ fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
+ fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
+ }
+ else
+ default_assemble_visibility (decl, vis);
+}
+#endif
+
+enum rtx_code
+rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
+{
+ /* Reversal of FP compares takes care -- an ordered compare
+ becomes an unordered compare and vice versa. */
+ if (mode == CCFPmode
+ && (!flag_finite_math_only
+ || code == UNLT || code == UNLE || code == UNGT || code == UNGE
+ || code == UNEQ || code == LTGT))
+ return reverse_condition_maybe_unordered (code);
+ else
+ return reverse_condition (code);
+}
+
+/* Generate a compare for CODE. Return a brand-new rtx that
+ represents the result of the compare. */
+
+static rtx
+rs6000_generate_compare (enum rtx_code code)
+{
+ enum machine_mode comp_mode;
+ rtx compare_result;
+
+ if (rs6000_compare_fp_p)
+ comp_mode = CCFPmode;
+ else if (code == GTU || code == LTU
+ || code == GEU || code == LEU)
+ comp_mode = CCUNSmode;
+ else if ((code == EQ || code == NE)
+ && GET_CODE (rs6000_compare_op0) == SUBREG
+ && GET_CODE (rs6000_compare_op1) == SUBREG
+ && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op0)
+ && SUBREG_PROMOTED_UNSIGNED_P (rs6000_compare_op1))
+ /* These are unsigned values, perhaps there will be a later
+ ordering compare that can be shared with this one.
+ Unfortunately we cannot detect the signedness of the operands
+ for non-subregs. */
+ comp_mode = CCUNSmode;
+ else
+ comp_mode = CCmode;
+
+ /* First, the compare. */
+ compare_result = gen_reg_rtx (comp_mode);
+
+ /* E500 FP compare instructions on the GPRs. Yuck! */
+ if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
+ && rs6000_compare_fp_p)
+ {
+ rtx cmp, or_result, compare_result2;
+ enum machine_mode op_mode = GET_MODE (rs6000_compare_op0);
+
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (rs6000_compare_op1);
+
+ /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
+ This explains the following mess. */
+
+ switch (code)
+ {
+ case EQ: case UNEQ: case NE: case LTGT:
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tstsfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tstdfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case TFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tsttfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmptfeq_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case GT: case GTU: case UNGT: case UNGE: case GE: case GEU:
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tstsfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tstdfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case TFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tsttfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmptfgt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ case LT: case LTU: case UNLT: case UNLE: case LE: case LEU:
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tstsflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tstdflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case TFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tsttflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmptflt_gpr (compare_result, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Synthesize LE and GE from LT/GT || EQ. */
+ if (code == LE || code == GE || code == LEU || code == GEU)
+ {
+ emit_insn (cmp);
+
+ switch (code)
+ {
+ case LE: code = LT; break;
+ case GE: code = GT; break;
+ case LEU: code = LT; break;
+ case GEU: code = GT; break;
+ default: gcc_unreachable ();
+ }
+
+ compare_result2 = gen_reg_rtx (CCFPmode);
+
+ /* Do the EQ. */
+ switch (op_mode)
+ {
+ case SFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tstsfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpsfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case DFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tstdfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmpdfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ case TFmode:
+ cmp = (flag_finite_math_only && !flag_trapping_math)
+ ? gen_tsttfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1)
+ : gen_cmptfeq_gpr (compare_result2, rs6000_compare_op0,
+ rs6000_compare_op1);
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ emit_insn (cmp);
+
+ /* OR them together. */
+ or_result = gen_reg_rtx (CCFPmode);
+ cmp = gen_e500_cr_ior_compare (or_result, compare_result,
+ compare_result2);
+ compare_result = or_result;
+ code = EQ;
+ }
+ else
+ {
+ if (code == NE || code == LTGT)
+ code = NE;
+ else
+ code = EQ;
+ }
+
+ emit_insn (cmp);
+ }
+ else
+ {
+ /* Generate XLC-compatible TFmode compare as PARALLEL with extra
+ CLOBBERs to match cmptf_internal2 pattern. */
+ if (comp_mode == CCFPmode && TARGET_XL_COMPAT
+ && GET_MODE (rs6000_compare_op0) == TFmode
+ && !TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
+ emit_insn (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (9,
+ gen_rtx_SET (VOIDmode,
+ compare_result,
+ gen_rtx_COMPARE (comp_mode,
+ rs6000_compare_op0,
+ rs6000_compare_op1)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)))));
+ else if (GET_CODE (rs6000_compare_op1) == UNSPEC
+ && XINT (rs6000_compare_op1, 1) == UNSPEC_SP_TEST)
+ {
+ rtx op1 = XVECEXP (rs6000_compare_op1, 0, 0);
+ comp_mode = CCEQmode;
+ compare_result = gen_reg_rtx (CCEQmode);
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_protect_testdi (compare_result,
+ rs6000_compare_op0, op1));
+ else
+ emit_insn (gen_stack_protect_testsi (compare_result,
+ rs6000_compare_op0, op1));
+ }
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, compare_result,
+ gen_rtx_COMPARE (comp_mode,
+ rs6000_compare_op0,
+ rs6000_compare_op1)));
+ }
+
+ /* Some kinds of FP comparisons need an OR operation;
+ under flag_finite_math_only we don't bother. */
+ if (rs6000_compare_fp_p
+ && !flag_finite_math_only
+ && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
+ && (code == LE || code == GE
+ || code == UNEQ || code == LTGT
+ || code == UNGT || code == UNLT))
+ {
+ enum rtx_code or1, or2;
+ rtx or1_rtx, or2_rtx, compare2_rtx;
+ rtx or_result = gen_reg_rtx (CCEQmode);
+
+ switch (code)
+ {
+ case LE: or1 = LT; or2 = EQ; break;
+ case GE: or1 = GT; or2 = EQ; break;
+ case UNEQ: or1 = UNORDERED; or2 = EQ; break;
+ case LTGT: or1 = LT; or2 = GT; break;
+ case UNGT: or1 = UNORDERED; or2 = GT; break;
+ case UNLT: or1 = UNORDERED; or2 = LT; break;
+ default: gcc_unreachable ();
+ }
+ validate_condition_mode (or1, comp_mode);
+ validate_condition_mode (or2, comp_mode);
+ or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
+ or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
+ compare2_rtx = gen_rtx_COMPARE (CCEQmode,
+ gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
+ const_true_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
+
+ compare_result = or_result;
+ code = EQ;
+ }
+
+ validate_condition_mode (code, GET_MODE (compare_result));
+
+ return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
+}
+
+
+/* Emit the RTL for an sCOND pattern. */
+
+void
+rs6000_emit_sCOND (enum rtx_code code, rtx result)
+{
+ rtx condition_rtx;
+ enum machine_mode op_mode;
+ enum rtx_code cond_code;
+
+ condition_rtx = rs6000_generate_compare (code);
+ cond_code = GET_CODE (condition_rtx);
+
+ if (rs6000_compare_fp_p
+ && !TARGET_FPRS && TARGET_HARD_FLOAT)
+ {
+ rtx t;
+
+ PUT_MODE (condition_rtx, SImode);
+ t = XEXP (condition_rtx, 0);
+
+ gcc_assert (cond_code == NE || cond_code == EQ);
+
+ if (cond_code == NE)
+ emit_insn (gen_e500_flip_gt_bit (t, t));
+
+ emit_insn (gen_move_from_CR_gt_bit (result, t));
+ return;
+ }
+
+ if (cond_code == NE
+ || cond_code == GE || cond_code == LE
+ || cond_code == GEU || cond_code == LEU
+ || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
+ {
+ rtx not_result = gen_reg_rtx (CCEQmode);
+ rtx not_op, rev_cond_rtx;
+ enum machine_mode cc_mode;
+
+ cc_mode = GET_MODE (XEXP (condition_rtx, 0));
+
+ rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
+ SImode, XEXP (condition_rtx, 0), const0_rtx);
+ not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
+ condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
+ }
+
+ op_mode = GET_MODE (rs6000_compare_op0);
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (rs6000_compare_op1);
+
+ if (TARGET_POWERPC64 && (op_mode == DImode || rs6000_compare_fp_p))
+ {
+ PUT_MODE (condition_rtx, DImode);
+ convert_move (result, condition_rtx, 0);
+ }
+ else
+ {
+ PUT_MODE (condition_rtx, SImode);
+ emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
+ }
+}
+
+/* Emit a branch of kind CODE to location LOC. */
+
+void
+rs6000_emit_cbranch (enum rtx_code code, rtx loc)
+{
+ rtx condition_rtx, loc_ref;
+
+ condition_rtx = rs6000_generate_compare (code);
+ loc_ref = gen_rtx_LABEL_REF (VOIDmode, loc);
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
+ loc_ref, pc_rtx)));
+}
+
+/* Return the string to output a conditional branch to LABEL, which is
+ the operand number of the label, or -1 if the branch is really a
+ conditional return.
+
+ OP is the conditional expression. XEXP (OP, 0) is assumed to be a
+ condition code register and its mode specifies what kind of
+ comparison we made.
+
+ REVERSED is nonzero if we should reverse the sense of the comparison.
+
+ INSN is the insn. */
+
+char *
+output_cbranch (rtx op, const char *label, int reversed, rtx insn)
+{
+ static char string[64];
+ enum rtx_code code = GET_CODE (op);
+ rtx cc_reg = XEXP (op, 0);
+ enum machine_mode mode = GET_MODE (cc_reg);
+ int cc_regno = REGNO (cc_reg) - CR0_REGNO;
+ int need_longbranch = label != NULL && get_attr_length (insn) == 8;
+ int really_reversed = reversed ^ need_longbranch;
+ char *s = string;
+ const char *ccode;
+ const char *pred;
+ rtx note;
+
+ validate_condition_mode (code, mode);
+
+ /* Work out which way this really branches. We could use
+ reverse_condition_maybe_unordered here always but this
+ makes the resulting assembler clearer. */
+ if (really_reversed)
+ {
+ /* Reversal of FP compares takes care -- an ordered compare
+ becomes an unordered compare and vice versa. */
+ if (mode == CCFPmode)
+ code = reverse_condition_maybe_unordered (code);
+ else
+ code = reverse_condition (code);
+ }
+
+ if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
+ {
+ /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
+ to the GT bit. */
+ switch (code)
+ {
+ case EQ:
+ /* Opposite of GT. */
+ code = GT;
+ break;
+
+ case NE:
+ code = UNLE;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+
+ switch (code)
+ {
+ /* Not all of these are actually distinct opcodes, but
+ we distinguish them for clarity of the resulting assembler. */
+ case NE: case LTGT:
+ ccode = "ne"; break;
+ case EQ: case UNEQ:
+ ccode = "eq"; break;
+ case GE: case GEU:
+ ccode = "ge"; break;
+ case GT: case GTU: case UNGT:
+ ccode = "gt"; break;
+ case LE: case LEU:
+ ccode = "le"; break;
+ case LT: case LTU: case UNLT:
+ ccode = "lt"; break;
+ case UNORDERED: ccode = "un"; break;
+ case ORDERED: ccode = "nu"; break;
+ case UNGE: ccode = "nl"; break;
+ case UNLE: ccode = "ng"; break;
+ default:
+ gcc_unreachable ();
+ }
+
+ /* Maybe we have a guess as to how likely the branch is.
+ The old mnemonics don't have a way to specify this information. */
+ pred = "";
+ note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
+ if (note != NULL_RTX)
+ {
+ /* PROB is the difference from 50%. */
+ int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
+
+ /* Only hint for highly probable/improbable branches on newer
+ cpus as static prediction overrides processor dynamic
+ prediction. For older cpus we may as well always hint, but
+ assume not taken for branches that are very close to 50% as a
+ mispredicted taken branch is more expensive than a
+ mispredicted not-taken branch. */
+ if (rs6000_always_hint
+ || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
+ && br_prob_note_reliable_p (note)))
+ {
+ if (abs (prob) > REG_BR_PROB_BASE / 20
+ && ((prob > 0) ^ need_longbranch))
+ pred = "+";
+ else
+ pred = "-";
+ }
+ }
+
+ if (label == NULL)
+ s += sprintf (s, "{b%sr|b%slr%s} ", ccode, ccode, pred);
+ else
+ s += sprintf (s, "{b%s|b%s%s} ", ccode, ccode, pred);
+
+ /* We need to escape any '%' characters in the reg_names string.
+ Assume they'd only be the first character.... */
+ if (reg_names[cc_regno + CR0_REGNO][0] == '%')
+ *s++ = '%';
+ s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
+
+ if (label != NULL)
+ {
+ /* If the branch distance was too far, we may have to use an
+ unconditional branch to go the distance. */
+ if (need_longbranch)
+ s += sprintf (s, ",$+8\n\tb %s", label);
+ else
+ s += sprintf (s, ",%s", label);
+ }
+
+ return string;
+}
+
+/* Return the string to flip the GT bit on a CR. */
+char *
+output_e500_flip_gt_bit (rtx dst, rtx src)
+{
+ static char string[64];
+ int a, b;
+
+ gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
+ && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
+
+ /* GT bit. */
+ a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
+ b = 4 * (REGNO (src) - CR0_REGNO) + 1;
+
+ sprintf (string, "crnot %d,%d", a, b);
+ return string;
+}
+
+/* Return insn index for the vector compare instruction for given CODE,
+ and DEST_MODE, OP_MODE. Return INSN_NOT_AVAILABLE if valid insn is
+ not available. */
+
+static int
+get_vec_cmp_insn (enum rtx_code code,
+ enum machine_mode dest_mode,
+ enum machine_mode op_mode)
+{
+ if (!TARGET_ALTIVEC)
+ return INSN_NOT_AVAILABLE;
+
+ switch (code)
+ {
+ case EQ:
+ if (dest_mode == V16QImode && op_mode == V16QImode)
+ return UNSPEC_VCMPEQUB;
+ if (dest_mode == V8HImode && op_mode == V8HImode)
+ return UNSPEC_VCMPEQUH;
+ if (dest_mode == V4SImode && op_mode == V4SImode)
+ return UNSPEC_VCMPEQUW;
+ if (dest_mode == V4SImode && op_mode == V4SFmode)
+ return UNSPEC_VCMPEQFP;
+ break;
+ case GE:
+ if (dest_mode == V4SImode && op_mode == V4SFmode)
+ return UNSPEC_VCMPGEFP;
+ case GT:
+ if (dest_mode == V16QImode && op_mode == V16QImode)
+ return UNSPEC_VCMPGTSB;
+ if (dest_mode == V8HImode && op_mode == V8HImode)
+ return UNSPEC_VCMPGTSH;
+ if (dest_mode == V4SImode && op_mode == V4SImode)
+ return UNSPEC_VCMPGTSW;
+ if (dest_mode == V4SImode && op_mode == V4SFmode)
+ return UNSPEC_VCMPGTFP;
+ break;
+ case GTU:
+ if (dest_mode == V16QImode && op_mode == V16QImode)
+ return UNSPEC_VCMPGTUB;
+ if (dest_mode == V8HImode && op_mode == V8HImode)
+ return UNSPEC_VCMPGTUH;
+ if (dest_mode == V4SImode && op_mode == V4SImode)
+ return UNSPEC_VCMPGTUW;
+ break;
+ default:
+ break;
+ }
+ return INSN_NOT_AVAILABLE;
+}
+
+/* Emit vector compare for operands OP0 and OP1 using code RCODE.
+ DMODE is expected destination mode. This is a recursive function. */
+
+static rtx
+rs6000_emit_vector_compare (enum rtx_code rcode,
+ rtx op0, rtx op1,
+ enum machine_mode dmode)
+{
+ int vec_cmp_insn;
+ rtx mask;
+ enum machine_mode dest_mode;
+ enum machine_mode op_mode = GET_MODE (op1);
+
+ gcc_assert (TARGET_ALTIVEC);
+ gcc_assert (GET_MODE (op0) == GET_MODE (op1));
+
+ /* Floating point vector compare instructions uses destination V4SImode.
+ Move destination to appropriate mode later. */
+ if (dmode == V4SFmode)
+ dest_mode = V4SImode;
+ else
+ dest_mode = dmode;
+
+ mask = gen_reg_rtx (dest_mode);
+ vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
+
+ if (vec_cmp_insn == INSN_NOT_AVAILABLE)
+ {
+ bool swap_operands = false;
+ bool try_again = false;
+ switch (rcode)
+ {
+ case LT:
+ rcode = GT;
+ swap_operands = true;
+ try_again = true;
+ break;
+ case LTU:
+ rcode = GTU;
+ swap_operands = true;
+ try_again = true;
+ break;
+ case NE:
+ case UNLE:
+ case UNLT:
+ case UNGE:
+ case UNGT:
+ /* Invert condition and try again.
+ e.g., A != B becomes ~(A==B). */
+ {
+ enum rtx_code rev_code;
+ enum insn_code nor_code;
+ rtx eq_rtx;
+
+ rev_code = reverse_condition_maybe_unordered (rcode);
+ eq_rtx = rs6000_emit_vector_compare (rev_code, op0, op1,
+ dest_mode);
+
+ nor_code = optab_handler (one_cmpl_optab, (int)dest_mode)->insn_code;
+ gcc_assert (nor_code != CODE_FOR_nothing);
+ emit_insn (GEN_FCN (nor_code) (mask, eq_rtx));
+
+ if (dmode != dest_mode)
+ {
+ rtx temp = gen_reg_rtx (dest_mode);
+ convert_move (temp, mask, 0);
+ return temp;
+ }
+ return mask;
+ }
+ break;
+ case GE:
+ case GEU:
+ case LE:
+ case LEU:
+ /* Try GT/GTU/LT/LTU OR EQ */
+ {
+ rtx c_rtx, eq_rtx;
+ enum insn_code ior_code;
+ enum rtx_code new_code;
+
+ switch (rcode)
+ {
+ case GE:
+ new_code = GT;
+ break;
+
+ case GEU:
+ new_code = GTU;
+ break;
+
+ case LE:
+ new_code = LT;
+ break;
+
+ case LEU:
+ new_code = LTU;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ c_rtx = rs6000_emit_vector_compare (new_code,
+ op0, op1, dest_mode);
+ eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1,
+ dest_mode);
+
+ ior_code = optab_handler (ior_optab, (int)dest_mode)->insn_code;
+ gcc_assert (ior_code != CODE_FOR_nothing);
+ emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
+ if (dmode != dest_mode)
+ {
+ rtx temp = gen_reg_rtx (dest_mode);
+ convert_move (temp, mask, 0);
+ return temp;
+ }
+ return mask;
+ }
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ if (try_again)
+ {
+ vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
+ /* You only get two chances. */
+ gcc_assert (vec_cmp_insn != INSN_NOT_AVAILABLE);
+ }
+
+ if (swap_operands)
+ {
+ rtx tmp;
+ tmp = op0;
+ op0 = op1;
+ op1 = tmp;
+ }
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, mask,
+ gen_rtx_UNSPEC (dest_mode,
+ gen_rtvec (2, op0, op1),
+ vec_cmp_insn)));
+ if (dmode != dest_mode)
+ {
+ rtx temp = gen_reg_rtx (dest_mode);
+ convert_move (temp, mask, 0);
+ return temp;
+ }
+ return mask;
+}
+
+/* Return vector select instruction for MODE. Return INSN_NOT_AVAILABLE, if
+ valid insn doesn exist for given mode. */
+
+static int
+get_vsel_insn (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case V4SImode:
+ return UNSPEC_VSEL4SI;
+ break;
+ case V4SFmode:
+ return UNSPEC_VSEL4SF;
+ break;
+ case V8HImode:
+ return UNSPEC_VSEL8HI;
+ break;
+ case V16QImode:
+ return UNSPEC_VSEL16QI;
+ break;
+ default:
+ return INSN_NOT_AVAILABLE;
+ break;
+ }
+ return INSN_NOT_AVAILABLE;
+}
+
+/* Emit vector select insn where DEST is destination using
+ operands OP1, OP2 and MASK. */
+
+static void
+rs6000_emit_vector_select (rtx dest, rtx op1, rtx op2, rtx mask)
+{
+ rtx t, temp;
+ enum machine_mode dest_mode = GET_MODE (dest);
+ int vsel_insn_index = get_vsel_insn (GET_MODE (dest));
+
+ temp = gen_reg_rtx (dest_mode);
+
+ /* For each vector element, select op1 when mask is 1 otherwise
+ select op2. */
+ t = gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_UNSPEC (dest_mode,
+ gen_rtvec (3, op2, op1, mask),
+ vsel_insn_index));
+ emit_insn (t);
+ emit_move_insn (dest, temp);
+ return;
+}
+
+/* Emit vector conditional expression.
+ DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
+ CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
+
+int
+rs6000_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
+ rtx cond, rtx cc_op0, rtx cc_op1)
+{
+ enum machine_mode dest_mode = GET_MODE (dest);
+ enum rtx_code rcode = GET_CODE (cond);
+ rtx mask;
+
+ if (!TARGET_ALTIVEC)
+ return 0;
+
+ /* Get the vector mask for the given relational operations. */
+ mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, dest_mode);
+
+ rs6000_emit_vector_select (dest, op1, op2, mask);
+
+ return 1;
+}
+
+/* Emit a conditional move: move TRUE_COND to DEST if OP of the
+ operands of the last comparison is nonzero/true, FALSE_COND if it
+ is zero/false. Return 0 if the hardware has no such operation. */
+
+int
+rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
+{
+ enum rtx_code code = GET_CODE (op);
+ rtx op0 = rs6000_compare_op0;
+ rtx op1 = rs6000_compare_op1;
+ REAL_VALUE_TYPE c1;
+ enum machine_mode compare_mode = GET_MODE (op0);
+ enum machine_mode result_mode = GET_MODE (dest);
+ rtx temp;
+ bool is_against_zero;
+
+ /* These modes should always match. */
+ if (GET_MODE (op1) != compare_mode
+ /* In the isel case however, we can use a compare immediate, so
+ op1 may be a small constant. */
+ && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
+ return 0;
+ if (GET_MODE (true_cond) != result_mode)
+ return 0;
+ if (GET_MODE (false_cond) != result_mode)
+ return 0;
+
+ /* First, work out if the hardware can do this at all, or
+ if it's too slow.... */
+ if (! rs6000_compare_fp_p)
+ {
+ if (TARGET_ISEL)
+ return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
+ return 0;
+ }
+ else if (TARGET_HARD_FLOAT && !TARGET_FPRS
+ && SCALAR_FLOAT_MODE_P (compare_mode))
+ return 0;
+
+ is_against_zero = op1 == CONST0_RTX (compare_mode);
+
+ /* A floating-point subtract might overflow, underflow, or produce
+ an inexact result, thus changing the floating-point flags, so it
+ can't be generated if we care about that. It's safe if one side
+ of the construct is zero, since then no subtract will be
+ generated. */
+ if (SCALAR_FLOAT_MODE_P (compare_mode)
+ && flag_trapping_math && ! is_against_zero)
+ return 0;
+
+ /* Eliminate half of the comparisons by switching operands, this
+ makes the remaining code simpler. */
+ if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
+ || code == LTGT || code == LT || code == UNLE)
+ {
+ code = reverse_condition_maybe_unordered (code);
+ temp = true_cond;
+ true_cond = false_cond;
+ false_cond = temp;
+ }
+
+ /* UNEQ and LTGT take four instructions for a comparison with zero,
+ it'll probably be faster to use a branch here too. */
+ if (code == UNEQ && HONOR_NANS (compare_mode))
+ return 0;
+
+ if (GET_CODE (op1) == CONST_DOUBLE)
+ REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
+
+ /* We're going to try to implement comparisons by performing
+ a subtract, then comparing against zero. Unfortunately,
+ Inf - Inf is NaN which is not zero, and so if we don't
+ know that the operand is finite and the comparison
+ would treat EQ different to UNORDERED, we can't do it. */
+ if (HONOR_INFINITIES (compare_mode)
+ && code != GT && code != UNGE
+ && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
+ /* Constructs of the form (a OP b ? a : b) are safe. */
+ && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
+ || (! rtx_equal_p (op0, true_cond)
+ && ! rtx_equal_p (op1, true_cond))))
+ return 0;
+
+ /* At this point we know we can use fsel. */
+
+ /* Reduce the comparison to a comparison against zero. */
+ if (! is_against_zero)
+ {
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_MINUS (compare_mode, op0, op1)));
+ op0 = temp;
+ op1 = CONST0_RTX (compare_mode);
+ }
+
+ /* If we don't care about NaNs we can reduce some of the comparisons
+ down to faster ones. */
+ if (! HONOR_NANS (compare_mode))
+ switch (code)
+ {
+ case GT:
+ code = LE;
+ temp = true_cond;
+ true_cond = false_cond;
+ false_cond = temp;
+ break;
+ case UNGE:
+ code = GE;
+ break;
+ case UNEQ:
+ code = EQ;
+ break;
+ default:
+ break;
+ }
+
+ /* Now, reduce everything down to a GE. */
+ switch (code)
+ {
+ case GE:
+ break;
+
+ case LE:
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
+ op0 = temp;
+ break;
+
+ case ORDERED:
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
+ op0 = temp;
+ break;
+
+ case EQ:
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_NEG (compare_mode,
+ gen_rtx_ABS (compare_mode, op0))));
+ op0 = temp;
+ break;
+
+ case UNGE:
+ /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
+ temp = gen_reg_rtx (result_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_IF_THEN_ELSE (result_mode,
+ gen_rtx_GE (VOIDmode,
+ op0, op1),
+ true_cond, false_cond)));
+ false_cond = true_cond;
+ true_cond = temp;
+
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
+ op0 = temp;
+ break;
+
+ case GT:
+ /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
+ temp = gen_reg_rtx (result_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_IF_THEN_ELSE (result_mode,
+ gen_rtx_GE (VOIDmode,
+ op0, op1),
+ true_cond, false_cond)));
+ true_cond = false_cond;
+ false_cond = temp;
+
+ temp = gen_reg_rtx (compare_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
+ op0 = temp;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ gen_rtx_IF_THEN_ELSE (result_mode,
+ gen_rtx_GE (VOIDmode,
+ op0, op1),
+ true_cond, false_cond)));
+ return 1;
+}
+
+/* Same as above, but for ints (isel). */
+
+static int
+rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
+{
+ rtx condition_rtx, cr;
+
+ /* All isel implementations thus far are 32-bits. */
+ if (GET_MODE (rs6000_compare_op0) != SImode)
+ return 0;
+
+ /* We still have to do the compare, because isel doesn't do a
+ compare, it just looks at the CRx bits set by a previous compare
+ instruction. */
+ condition_rtx = rs6000_generate_compare (GET_CODE (op));
+ cr = XEXP (condition_rtx, 0);
+
+ if (GET_MODE (cr) == CCmode)
+ emit_insn (gen_isel_signed (dest, condition_rtx,
+ true_cond, false_cond, cr));
+ else
+ emit_insn (gen_isel_unsigned (dest, condition_rtx,
+ true_cond, false_cond, cr));
+
+ return 1;
+}
+
+const char *
+output_isel (rtx *operands)
+{
+ enum rtx_code code;
+
+ code = GET_CODE (operands[1]);
+ if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
+ {
+ PUT_CODE (operands[1], reverse_condition (code));
+ return "isel %0,%3,%2,%j1";
+ }
+ else
+ return "isel %0,%2,%3,%j1";
+}
+
+void
+rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
+{
+ enum machine_mode mode = GET_MODE (op0);
+ enum rtx_code c;
+ rtx target;
+
+ if (code == SMAX || code == SMIN)
+ c = GE;
+ else
+ c = GEU;
+
+ if (code == SMAX || code == UMAX)
+ target = emit_conditional_move (dest, c, op0, op1, mode,
+ op0, op1, mode, 0);
+ else
+ target = emit_conditional_move (dest, c, op0, op1, mode,
+ op1, op0, mode, 0);
+ gcc_assert (target);
+ if (target != dest)
+ emit_move_insn (dest, target);
+}
+
+/* Emit instructions to perform a load-reserved/store-conditional operation.
+ The operation performed is an atomic
+ (set M (CODE:MODE M OP))
+ If not NULL, BEFORE is atomically set to M before the operation, and
+ AFTER is set to M after the operation (that is, (CODE:MODE M OP)).
+ If SYNC_P then a memory barrier is emitted before the operation.
+ Either OP or M may be wrapped in a NOT operation. */
+
+void
+rs6000_emit_sync (enum rtx_code code, enum machine_mode mode,
+ rtx m, rtx op, rtx before_param, rtx after_param,
+ bool sync_p)
+{
+ enum machine_mode used_mode;
+ rtx the_op, set_before, set_after, set_atomic, cc_scratch, before, after;
+ rtx used_m;
+ rtvec vec;
+ HOST_WIDE_INT imask = GET_MODE_MASK (mode);
+ rtx shift = NULL_RTX;
+
+ if (sync_p)
+ emit_insn (gen_lwsync ());
+
+ used_m = m;
+
+ /* If this is smaller than SImode, we'll have to use SImode with
+ adjustments. */
+ if (mode == QImode || mode == HImode)
+ {
+ rtx newop, oldop;
+
+ if (MEM_ALIGN (used_m) >= 32)
+ {
+ int ishift = 0;
+ if (BYTES_BIG_ENDIAN)
+ ishift = GET_MODE_BITSIZE (SImode) - GET_MODE_BITSIZE (mode);
+
+ shift = GEN_INT (ishift);
+ used_m = change_address (used_m, SImode, 0);
+ }
+ else
+ {
+ rtx addrSI, aligned_addr;
+ int shift_mask = mode == QImode ? 0x18 : 0x10;
+
+ addrSI = gen_lowpart_common (SImode,
+ force_reg (Pmode, XEXP (used_m, 0)));
+ addrSI = force_reg (SImode, addrSI);
+ shift = gen_reg_rtx (SImode);
+
+ emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
+ GEN_INT (shift_mask)));
+ emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
+
+ aligned_addr = expand_binop (Pmode, and_optab,
+ XEXP (used_m, 0),
+ GEN_INT (-4), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ used_m = change_address (used_m, SImode, aligned_addr);
+ set_mem_align (used_m, 32);
+ }
+ /* It's safe to keep the old alias set of USED_M, because
+ the operation is atomic and only affects the original
+ USED_M. */
+ m = used_m;
+
+ if (GET_CODE (op) == NOT)
+ {
+ oldop = lowpart_subreg (SImode, XEXP (op, 0), mode);
+ oldop = gen_rtx_NOT (SImode, oldop);
+ }
+ else
+ oldop = lowpart_subreg (SImode, op, mode);
+
+ switch (code)
+ {
+ case IOR:
+ case XOR:
+ newop = expand_binop (SImode, and_optab,
+ oldop, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newop, newop, shift));
+ break;
+
+ case NOT: /* NAND */
+ newop = expand_binop (SImode, ior_optab,
+ oldop, GEN_INT (~imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_rotlsi3 (newop, newop, shift));
+ break;
+
+ case AND:
+ newop = expand_binop (SImode, ior_optab,
+ oldop, GEN_INT (~imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_rotlsi3 (newop, newop, shift));
+ break;
+
+ case PLUS:
+ case MINUS:
+ {
+ rtx mask;
+
+ newop = expand_binop (SImode, and_optab,
+ oldop, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newop, newop, shift));
+
+ mask = gen_reg_rtx (SImode);
+ emit_move_insn (mask, GEN_INT (imask));
+ emit_insn (gen_ashlsi3 (mask, mask, shift));
+
+ if (code == PLUS)
+ newop = gen_rtx_PLUS (SImode, m, newop);
+ else
+ newop = gen_rtx_MINUS (SImode, m, newop);
+ newop = gen_rtx_AND (SImode, newop, mask);
+ newop = gen_rtx_IOR (SImode, newop,
+ gen_rtx_AND (SImode,
+ gen_rtx_NOT (SImode, mask),
+ m));
+ break;
+ }
+
+ default:
+ gcc_unreachable ();
+ }
+
+ op = newop;
+ used_mode = SImode;
+ before = gen_reg_rtx (used_mode);
+ after = gen_reg_rtx (used_mode);
+ }
+ else
+ {
+ used_mode = mode;
+ before = before_param;
+ after = after_param;
+
+ if (before == NULL_RTX)
+ before = gen_reg_rtx (used_mode);
+ if (after == NULL_RTX)
+ after = gen_reg_rtx (used_mode);
+ }
+
+ if ((code == PLUS || code == MINUS)
+ && used_mode != mode)
+ the_op = op; /* Computed above. */
+ else if (GET_CODE (op) == NOT && GET_CODE (m) != NOT)
+ the_op = gen_rtx_fmt_ee (code, used_mode, op, m);
+ else if (code == NOT)
+ the_op = gen_rtx_fmt_ee (IOR, used_mode,
+ gen_rtx_NOT (used_mode, m),
+ gen_rtx_NOT (used_mode, op));
+ else
+ the_op = gen_rtx_fmt_ee (code, used_mode, m, op);
+
+ set_after = gen_rtx_SET (VOIDmode, after, the_op);
+ set_before = gen_rtx_SET (VOIDmode, before, used_m);
+ set_atomic = gen_rtx_SET (VOIDmode, used_m,
+ gen_rtx_UNSPEC (used_mode,
+ gen_rtvec (1, the_op),
+ UNSPEC_SYNC_OP));
+ cc_scratch = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
+
+ if ((code == PLUS || code == MINUS) && used_mode != mode)
+ vec = gen_rtvec (5, set_after, set_before, set_atomic, cc_scratch,
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (SImode)));
+ else
+ vec = gen_rtvec (4, set_after, set_before, set_atomic, cc_scratch);
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, vec));
+
+ /* Shift and mask the return values properly. */
+ if (used_mode != mode && before_param)
+ {
+ emit_insn (gen_lshrsi3 (before, before, shift));
+ convert_move (before_param, before, 1);
+ }
+
+ if (used_mode != mode && after_param)
+ {
+ emit_insn (gen_lshrsi3 (after, after, shift));
+ convert_move (after_param, after, 1);
+ }
+
+ /* The previous sequence will end with a branch that's dependent on
+ the conditional store, so placing an isync will ensure that no
+ other instructions (especially, no load or store instructions)
+ can start before the atomic operation completes. */
+ if (sync_p)
+ emit_insn (gen_isync ());
+}
+
+/* A subroutine of the atomic operation splitters. Jump to LABEL if
+ COND is true. Mark the jump as unlikely to be taken. */
+
+static void
+emit_unlikely_jump (rtx cond, rtx label)
+{
+ rtx very_unlikely = GEN_INT (REG_BR_PROB_BASE / 100 - 1);
+ rtx x;
+
+ x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
+ x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
+ REG_NOTES (x) = gen_rtx_EXPR_LIST (REG_BR_PROB, very_unlikely, NULL_RTX);
+}
+
+/* A subroutine of the atomic operation splitters. Emit a load-locked
+ instruction in MODE. */
+
+static void
+emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
+{
+ rtx (*fn) (rtx, rtx) = NULL;
+ if (mode == SImode)
+ fn = gen_load_locked_si;
+ else if (mode == DImode)
+ fn = gen_load_locked_di;
+ emit_insn (fn (reg, mem));
+}
+
+/* A subroutine of the atomic operation splitters. Emit a store-conditional
+ instruction in MODE. */
+
+static void
+emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
+{
+ rtx (*fn) (rtx, rtx, rtx) = NULL;
+ if (mode == SImode)
+ fn = gen_store_conditional_si;
+ else if (mode == DImode)
+ fn = gen_store_conditional_di;
+
+ /* Emit sync before stwcx. to address PPC405 Erratum. */
+ if (PPC405_ERRATUM77)
+ emit_insn (gen_memory_barrier ());
+
+ emit_insn (fn (res, mem, val));
+}
+
+/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
+ to perform. MEM is the memory on which to operate. VAL is the second
+ operand of the binary operator. BEFORE and AFTER are optional locations to
+ return the value of MEM either before of after the operation. SCRATCH is
+ a scratch register. */
+
+void
+rs6000_split_atomic_op (enum rtx_code code, rtx mem, rtx val,
+ rtx before, rtx after, rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_lwsync ());
+
+ label = gen_label_rtx ();
+ emit_label (label);
+ label = gen_rtx_LABEL_REF (VOIDmode, label);
+
+ if (before == NULL_RTX)
+ before = scratch;
+ emit_load_locked (mode, before, mem);
+
+ if (code == NOT)
+ x = gen_rtx_IOR (mode,
+ gen_rtx_NOT (mode, before),
+ gen_rtx_NOT (mode, val));
+ else if (code == AND)
+ x = gen_rtx_UNSPEC (mode, gen_rtvec (2, before, val), UNSPEC_AND);
+ else
+ x = gen_rtx_fmt_ee (code, mode, before, val);
+
+ if (after != NULL_RTX)
+ emit_insn (gen_rtx_SET (VOIDmode, after, copy_rtx (x)));
+ emit_insn (gen_rtx_SET (VOIDmode, scratch, x));
+
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label);
+
+ emit_insn (gen_isync ());
+}
+
+/* Expand an atomic compare and swap operation. MEM is the memory on which
+ to operate. OLDVAL is the old value to be compared. NEWVAL is the new
+ value to be stored. SCRATCH is a scratch GPR. */
+
+void
+rs6000_split_compare_and_swap (rtx retval, rtx mem, rtx oldval, rtx newval,
+ rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_lwsync ());
+
+ label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (mode, retval, mem);
+
+ x = gen_rtx_COMPARE (CCmode, retval, oldval);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label2);
+
+ emit_move_insn (scratch, newval);
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label1);
+
+ emit_insn (gen_isync ());
+ emit_label (XEXP (label2, 0));
+}
+
+/* Expand an atomic test and set operation. MEM is the memory on which
+ to operate. VAL is the value set. SCRATCH is a scratch GPR. */
+
+void
+rs6000_split_lock_test_and_set (rtx retval, rtx mem, rtx val, rtx scratch)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx label, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label, 0));
+
+ emit_load_locked (mode, retval, mem);
+ emit_move_insn (scratch, val);
+ emit_store_conditional (mode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label);
+
+ emit_insn (gen_isync ());
+}
+
+void
+rs6000_expand_compare_and_swapqhi (rtx dst, rtx mem, rtx oldval, rtx newval)
+{
+ enum machine_mode mode = GET_MODE (mem);
+ rtx addrSI, align, wdst, shift, mask;
+ HOST_WIDE_INT shift_mask = mode == QImode ? 0x18 : 0x10;
+ HOST_WIDE_INT imask = GET_MODE_MASK (mode);
+
+ /* Shift amount for subword relative to aligned word. */
+ addrSI = force_reg (GET_MODE (XEXP (mem, 0)), XEXP (mem, 0));
+ addrSI = force_reg (SImode, gen_lowpart_common (SImode, addrSI));
+ shift = gen_reg_rtx (SImode);
+ emit_insn (gen_rlwinm (shift, addrSI, GEN_INT (3),
+ GEN_INT (shift_mask)));
+ emit_insn (gen_xorsi3 (shift, shift, GEN_INT (shift_mask)));
+
+ /* Shift and mask old value into position within word. */
+ oldval = convert_modes (SImode, mode, oldval, 1);
+ oldval = expand_binop (SImode, and_optab,
+ oldval, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (oldval, oldval, shift));
+
+ /* Shift and mask new value into position within word. */
+ newval = convert_modes (SImode, mode, newval, 1);
+ newval = expand_binop (SImode, and_optab,
+ newval, GEN_INT (imask), NULL_RTX,
+ 1, OPTAB_LIB_WIDEN);
+ emit_insn (gen_ashlsi3 (newval, newval, shift));
+
+ /* Mask for insertion. */
+ mask = gen_reg_rtx (SImode);
+ emit_move_insn (mask, GEN_INT (imask));
+ emit_insn (gen_ashlsi3 (mask, mask, shift));
+
+ /* Address of aligned word containing subword. */
+ align = expand_binop (Pmode, and_optab, XEXP (mem, 0), GEN_INT (-4),
+ NULL_RTX, 1, OPTAB_LIB_WIDEN);
+ mem = change_address (mem, SImode, align);
+ set_mem_align (mem, 32);
+ MEM_VOLATILE_P (mem) = 1;
+
+ wdst = gen_reg_rtx (SImode);
+ emit_insn (gen_sync_compare_and_swapqhi_internal (wdst, mask,
+ oldval, newval, mem));
+
+ /* Shift the result back. */
+ emit_insn (gen_lshrsi3 (wdst, wdst, shift));
+
+ emit_move_insn (dst, gen_lowpart (mode, wdst));
+}
+
+void
+rs6000_split_compare_and_swapqhi (rtx dest, rtx mask,
+ rtx oldval, rtx newval, rtx mem,
+ rtx scratch)
+{
+ rtx label1, label2, x, cond = gen_rtx_REG (CCmode, CR0_REGNO);
+
+ emit_insn (gen_lwsync ());
+ label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+ emit_label (XEXP (label1, 0));
+
+ emit_load_locked (SImode, scratch, mem);
+
+ /* Mask subword within loaded value for comparison with oldval.
+ Use UNSPEC_AND to avoid clobber.*/
+ emit_insn (gen_rtx_SET (SImode, dest,
+ gen_rtx_UNSPEC (SImode,
+ gen_rtvec (2, scratch, mask),
+ UNSPEC_AND)));
+
+ x = gen_rtx_COMPARE (CCmode, dest, oldval);
+ emit_insn (gen_rtx_SET (VOIDmode, cond, x));
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label2);
+
+ /* Clear subword within loaded value for insertion of new value. */
+ emit_insn (gen_rtx_SET (SImode, scratch,
+ gen_rtx_AND (SImode,
+ gen_rtx_NOT (SImode, mask), scratch)));
+ emit_insn (gen_iorsi3 (scratch, scratch, newval));
+ emit_store_conditional (SImode, cond, mem, scratch);
+
+ x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (x, label1);
+
+ emit_insn (gen_isync ());
+ emit_label (XEXP (label2, 0));
+}
+
+
+ /* Emit instructions to move SRC to DST. Called by splitters for
+ multi-register moves. It will emit at most one instruction for
+ each register that is accessed; that is, it won't emit li/lis pairs
+ (or equivalent for 64-bit code). One of SRC or DST must be a hard
+ register. */
+
+void
+rs6000_split_multireg_move (rtx dst, rtx src)
+{
+ /* The register number of the first register being moved. */
+ int reg;
+ /* The mode that is to be moved. */
+ enum machine_mode mode;
+ /* The mode that the move is being done in, and its size. */
+ enum machine_mode reg_mode;
+ int reg_mode_size;
+ /* The number of registers that will be moved. */
+ int nregs;
+
+ reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
+ mode = GET_MODE (dst);
+ nregs = hard_regno_nregs[reg][mode];
+ if (FP_REGNO_P (reg))
+ reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
+ ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
+ else if (ALTIVEC_REGNO_P (reg))
+ reg_mode = V16QImode;
+ else if (TARGET_E500_DOUBLE && mode == TFmode)
+ reg_mode = DFmode;
+ else
+ reg_mode = word_mode;
+ reg_mode_size = GET_MODE_SIZE (reg_mode);
+
+ gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
+
+ if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
+ {
+ /* Move register range backwards, if we might have destructive
+ overlap. */
+ int i;
+ for (i = nregs - 1; i >= 0; i--)
+ emit_insn (gen_rtx_SET (VOIDmode,
+ simplify_gen_subreg (reg_mode, dst, mode,
+ i * reg_mode_size),
+ simplify_gen_subreg (reg_mode, src, mode,
+ i * reg_mode_size)));
+ }
+ else
+ {
+ int i;
+ int j = -1;
+ bool used_update = false;
+
+ if (MEM_P (src) && INT_REGNO_P (reg))
+ {
+ rtx breg;
+
+ if (GET_CODE (XEXP (src, 0)) == PRE_INC
+ || GET_CODE (XEXP (src, 0)) == PRE_DEC)
+ {
+ rtx delta_rtx;
+ breg = XEXP (XEXP (src, 0), 0);
+ delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (breg, breg, delta_rtx)
+ : gen_adddi3 (breg, breg, delta_rtx));
+ src = replace_equiv_address (src, breg);
+ }
+ else if (! rs6000_offsettable_memref_p (src))
+ {
+ rtx basereg;
+ basereg = gen_rtx_REG (Pmode, reg);
+ emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
+ src = replace_equiv_address (src, basereg);
+ }
+
+ breg = XEXP (src, 0);
+ if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
+ breg = XEXP (breg, 0);
+
+ /* If the base register we are using to address memory is
+ also a destination reg, then change that register last. */
+ if (REG_P (breg)
+ && REGNO (breg) >= REGNO (dst)
+ && REGNO (breg) < REGNO (dst) + nregs)
+ j = REGNO (breg) - REGNO (dst);
+ }
+
+ if (GET_CODE (dst) == MEM && INT_REGNO_P (reg))
+ {
+ rtx breg;
+
+ if (GET_CODE (XEXP (dst, 0)) == PRE_INC
+ || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
+ {
+ rtx delta_rtx;
+ breg = XEXP (XEXP (dst, 0), 0);
+ delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
+ ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
+ : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
+
+ /* We have to update the breg before doing the store.
+ Use store with update, if available. */
+
+ if (TARGET_UPDATE)
+ {
+ rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
+ emit_insn (TARGET_32BIT
+ ? (TARGET_POWERPC64
+ ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
+ : gen_movsi_update (breg, breg, delta_rtx, nsrc))
+ : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
+ used_update = true;
+ }
+ else
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (breg, breg, delta_rtx)
+ : gen_adddi3 (breg, breg, delta_rtx));
+ dst = replace_equiv_address (dst, breg);
+ }
+ else
+ gcc_assert (rs6000_offsettable_memref_p (dst));
+ }
+
+ for (i = 0; i < nregs; i++)
+ {
+ /* Calculate index to next subword. */
+ ++j;
+ if (j == nregs)
+ j = 0;
+
+ /* If compiler already emitted move of first word by
+ store with update, no need to do anything. */
+ if (j == 0 && used_update)
+ continue;
+
+ emit_insn (gen_rtx_SET (VOIDmode,
+ simplify_gen_subreg (reg_mode, dst, mode,
+ j * reg_mode_size),
+ simplify_gen_subreg (reg_mode, src, mode,
+ j * reg_mode_size)));
+ }
+ }
+}
+
+
+/* This page contains routines that are used to determine what the
+ function prologue and epilogue code will do and write them out. */
+
+/* Return the first fixed-point register that is required to be
+ saved. 32 if none. */
+
+int
+first_reg_to_save (void)
+{
+ int first_reg;
+
+ /* Find lowest numbered live register. */
+ for (first_reg = 13; first_reg <= 31; first_reg++)
+ if (df_regs_ever_live_p (first_reg)
+ && (! call_used_regs[first_reg]
+ || (first_reg == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
+ || (TARGET_TOC && TARGET_MINIMAL_TOC)))))
+ break;
+
+#if TARGET_MACHO
+ if (flag_pic
+ && crtl->uses_pic_offset_table
+ && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
+ return RS6000_PIC_OFFSET_TABLE_REGNUM;
+#endif
+
+ return first_reg;
+}
+
+/* Similar, for FP regs. */
+
+int
+first_fp_reg_to_save (void)
+{
+ int first_reg;
+
+ /* Find lowest numbered live register. */
+ for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
+ if (df_regs_ever_live_p (first_reg))
+ break;
+
+ return first_reg;
+}
+
+/* Similar, for AltiVec regs. */
+
+static int
+first_altivec_reg_to_save (void)
+{
+ int i;
+
+ /* Stack frame remains as is unless we are in AltiVec ABI. */
+ if (! TARGET_ALTIVEC_ABI)
+ return LAST_ALTIVEC_REGNO + 1;
+
+ /* On Darwin, the unwind routines are compiled without
+ TARGET_ALTIVEC, and use save_world to save/restore the
+ altivec registers when necessary. */
+ if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
+ && ! TARGET_ALTIVEC)
+ return FIRST_ALTIVEC_REGNO + 20;
+
+ /* Find lowest numbered live register. */
+ for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (df_regs_ever_live_p (i))
+ break;
+
+ return i;
+}
+
+/* Return a 32-bit mask of the AltiVec registers we need to set in
+ VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
+ the 32-bit word is 0. */
+
+static unsigned int
+compute_vrsave_mask (void)
+{
+ unsigned int i, mask = 0;
+
+ /* On Darwin, the unwind routines are compiled without
+ TARGET_ALTIVEC, and use save_world to save/restore the
+ call-saved altivec registers when necessary. */
+ if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
+ && ! TARGET_ALTIVEC)
+ mask |= 0xFFF;
+
+ /* First, find out if we use _any_ altivec registers. */
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (df_regs_ever_live_p (i))
+ mask |= ALTIVEC_REG_BIT (i);
+
+ if (mask == 0)
+ return mask;
+
+ /* Next, remove the argument registers from the set. These must
+ be in the VRSAVE mask set by the caller, so we don't need to add
+ them in again. More importantly, the mask we compute here is
+ used to generate CLOBBERs in the set_vrsave insn, and we do not
+ wish the argument registers to die. */
+ for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
+ mask &= ~ALTIVEC_REG_BIT (i);
+
+ /* Similarly, remove the return value from the set. */
+ {
+ bool yes = false;
+ diddle_return_value (is_altivec_return_reg, &yes);
+ if (yes)
+ mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
+ }
+
+ return mask;
+}
+
+/* For a very restricted set of circumstances, we can cut down the
+ size of prologues/epilogues by calling our own save/restore-the-world
+ routines. */
+
+static void
+compute_save_world_info (rs6000_stack_t *info_ptr)
+{
+ info_ptr->world_save_p = 1;
+ info_ptr->world_save_p
+ = (WORLD_SAVE_P (info_ptr)
+ && DEFAULT_ABI == ABI_DARWIN
+ && ! (cfun->calls_setjmp && flag_exceptions)
+ && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
+ && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
+ && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
+ && info_ptr->cr_save_p);
+
+ /* This will not work in conjunction with sibcalls. Make sure there
+ are none. (This check is expensive, but seldom executed.) */
+ if (WORLD_SAVE_P (info_ptr))
+ {
+ rtx insn;
+ for ( insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
+ if ( GET_CODE (insn) == CALL_INSN
+ && SIBLING_CALL_P (insn))
+ {
+ info_ptr->world_save_p = 0;
+ break;
+ }
+ }
+
+ if (WORLD_SAVE_P (info_ptr))
+ {
+ /* Even if we're not touching VRsave, make sure there's room on the
+ stack for it, if it looks like we're calling SAVE_WORLD, which
+ will attempt to save it. */
+ info_ptr->vrsave_size = 4;
+
+ /* If we are going to save the world, we need to save the link register too. */
+ info_ptr->lr_save_p = 1;
+
+ /* "Save" the VRsave register too if we're saving the world. */
+ if (info_ptr->vrsave_mask == 0)
+ info_ptr->vrsave_mask = compute_vrsave_mask ();
+
+ /* Because the Darwin register save/restore routines only handle
+ F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
+ check. */
+ gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
+ && (info_ptr->first_altivec_reg_save
+ >= FIRST_SAVED_ALTIVEC_REGNO));
+ }
+ return;
+}
+
+
+static void
+is_altivec_return_reg (rtx reg, void *xyes)
+{
+ bool *yes = (bool *) xyes;
+ if (REGNO (reg) == ALTIVEC_ARG_RETURN)
+ *yes = true;
+}
+
+
+/* Calculate the stack information for the current function. This is
+ complicated by having two separate calling sequences, the AIX calling
+ sequence and the V.4 calling sequence.
+
+ AIX (and Darwin/Mac OS X) stack frames look like:
+ 32-bit 64-bit
+ SP----> +---------------------------------------+
+ | back chain to caller | 0 0
+ +---------------------------------------+
+ | saved CR | 4 8 (8-11)
+ +---------------------------------------+
+ | saved LR | 8 16
+ +---------------------------------------+
+ | reserved for compilers | 12 24
+ +---------------------------------------+
+ | reserved for binders | 16 32
+ +---------------------------------------+
+ | saved TOC pointer | 20 40
+ +---------------------------------------+
+ | Parameter save area (P) | 24 48
+ +---------------------------------------+
+ | Alloca space (A) | 24+P etc.
+ +---------------------------------------+
+ | Local variable space (L) | 24+P+A
+ +---------------------------------------+
+ | Float/int conversion temporary (X) | 24+P+A+L
+ +---------------------------------------+
+ | Save area for AltiVec registers (W) | 24+P+A+L+X
+ +---------------------------------------+
+ | AltiVec alignment padding (Y) | 24+P+A+L+X+W
+ +---------------------------------------+
+ | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
+ +---------------------------------------+
+ | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
+ +---------------------------------------+
+ | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
+ +---------------------------------------+
+ old SP->| back chain to caller's caller |
+ +---------------------------------------+
+
+ The required alignment for AIX configurations is two words (i.e., 8
+ or 16 bytes).
+
+
+ V.4 stack frames look like:
+
+ SP----> +---------------------------------------+
+ | back chain to caller | 0
+ +---------------------------------------+
+ | caller's saved LR | 4
+ +---------------------------------------+
+ | Parameter save area (P) | 8
+ +---------------------------------------+
+ | Alloca space (A) | 8+P
+ +---------------------------------------+
+ | Varargs save area (V) | 8+P+A
+ +---------------------------------------+
+ | Local variable space (L) | 8+P+A+V
+ +---------------------------------------+
+ | Float/int conversion temporary (X) | 8+P+A+V+L
+ +---------------------------------------+
+ | Save area for AltiVec registers (W) | 8+P+A+V+L+X
+ +---------------------------------------+
+ | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
+ +---------------------------------------+
+ | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
+ +---------------------------------------+
+ | SPE: area for 64-bit GP registers |
+ +---------------------------------------+
+ | SPE alignment padding |
+ +---------------------------------------+
+ | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
+ +---------------------------------------+
+ | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
+ +---------------------------------------+
+ | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
+ +---------------------------------------+
+ old SP->| back chain to caller's caller |
+ +---------------------------------------+
+
+ The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
+ given. (But note below and in sysv4.h that we require only 8 and
+ may round up the size of our stack frame anyways. The historical
+ reason is early versions of powerpc-linux which didn't properly
+ align the stack at program startup. A happy side-effect is that
+ -mno-eabi libraries can be used with -meabi programs.)
+
+ The EABI configuration defaults to the V.4 layout. However,
+ the stack alignment requirements may differ. If -mno-eabi is not
+ given, the required stack alignment is 8 bytes; if -mno-eabi is
+ given, the required alignment is 16 bytes. (But see V.4 comment
+ above.) */
+
+#ifndef ABI_STACK_BOUNDARY
+#define ABI_STACK_BOUNDARY STACK_BOUNDARY
+#endif
+
+static rs6000_stack_t *
+rs6000_stack_info (void)
+{
+ static rs6000_stack_t info;
+ rs6000_stack_t *info_ptr = &info;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ int ehrd_size;
+ int save_align;
+ int first_gp;
+ HOST_WIDE_INT non_fixed_size;
+
+ memset (&info, 0, sizeof (info));
+
+ if (TARGET_SPE)
+ {
+ /* Cache value so we don't rescan instruction chain over and over. */
+ if (cfun->machine->insn_chain_scanned_p == 0)
+ cfun->machine->insn_chain_scanned_p
+ = spe_func_has_64bit_regs_p () + 1;
+ info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
+ }
+
+ /* Select which calling sequence. */
+ info_ptr->abi = DEFAULT_ABI;
+
+ /* Calculate which registers need to be saved & save area size. */
+ info_ptr->first_gp_reg_save = first_reg_to_save ();
+ /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
+ even if it currently looks like we won't. Reload may need it to
+ get at a constant; if so, it will have already created a constant
+ pool entry for it. */
+ if (((TARGET_TOC && TARGET_MINIMAL_TOC)
+ || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
+ || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
+ && crtl->uses_const_pool
+ && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
+ first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
+ else
+ first_gp = info_ptr->first_gp_reg_save;
+
+ info_ptr->gp_size = reg_size * (32 - first_gp);
+
+ /* For the SPE, we have an additional upper 32-bits on each GPR.
+ Ideally we should save the entire 64-bits only when the upper
+ half is used in SIMD instructions. Since we only record
+ registers live (not the size they are used in), this proves
+ difficult because we'd have to traverse the instruction chain at
+ the right time, taking reload into account. This is a real pain,
+ so we opt to save the GPRs in 64-bits always if but one register
+ gets used in 64-bits. Otherwise, all the registers in the frame
+ get saved in 32-bits.
+
+ So... since when we save all GPRs (except the SP) in 64-bits, the
+ traditional GP save area will be empty. */
+ if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
+ info_ptr->gp_size = 0;
+
+ info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
+ info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
+
+ info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
+ info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
+ - info_ptr->first_altivec_reg_save);
+
+ /* Does this function call anything? */
+ info_ptr->calls_p = (! current_function_is_leaf
+ || cfun->machine->ra_needs_full_frame);
+
+ /* Determine if we need to save the link register. */
+ if ((DEFAULT_ABI == ABI_AIX
+ && crtl->profile
+ && !TARGET_PROFILE_KERNEL)
+#ifdef TARGET_RELOCATABLE
+ || (TARGET_RELOCATABLE && (get_pool_size () != 0))
+#endif
+ || (info_ptr->first_fp_reg_save != 64
+ && !FP_SAVE_INLINE (info_ptr->first_fp_reg_save))
+ || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
+ || info_ptr->calls_p
+ || rs6000_ra_ever_killed ())
+ {
+ info_ptr->lr_save_p = 1;
+ df_set_regs_ever_live (LR_REGNO, true);
+ }
+
+ /* Determine if we need to save the condition code registers. */
+ if (df_regs_ever_live_p (CR2_REGNO)
+ || df_regs_ever_live_p (CR3_REGNO)
+ || df_regs_ever_live_p (CR4_REGNO))
+ {
+ info_ptr->cr_save_p = 1;
+ if (DEFAULT_ABI == ABI_V4)
+ info_ptr->cr_size = reg_size;
+ }
+
+ /* If the current function calls __builtin_eh_return, then we need
+ to allocate stack space for registers that will hold data for
+ the exception handler. */
+ if (crtl->calls_eh_return)
+ {
+ unsigned int i;
+ for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
+ continue;
+
+ /* SPE saves EH registers in 64-bits. */
+ ehrd_size = i * (TARGET_SPE_ABI
+ && info_ptr->spe_64bit_regs_used != 0
+ ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
+ }
+ else
+ ehrd_size = 0;
+
+ /* Determine various sizes. */
+ info_ptr->reg_size = reg_size;
+ info_ptr->fixed_size = RS6000_SAVE_AREA;
+ info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
+ info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
+ TARGET_ALTIVEC ? 16 : 8);
+ if (FRAME_GROWS_DOWNWARD)
+ info_ptr->vars_size
+ += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
+ + info_ptr->parm_size,
+ ABI_STACK_BOUNDARY / BITS_PER_UNIT)
+ - (info_ptr->fixed_size + info_ptr->vars_size
+ + info_ptr->parm_size);
+
+ if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
+ info_ptr->spe_gp_size = 8 * (32 - first_gp);
+ else
+ info_ptr->spe_gp_size = 0;
+
+ if (TARGET_ALTIVEC_ABI)
+ info_ptr->vrsave_mask = compute_vrsave_mask ();
+ else
+ info_ptr->vrsave_mask = 0;
+
+ if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
+ info_ptr->vrsave_size = 4;
+ else
+ info_ptr->vrsave_size = 0;
+
+ compute_save_world_info (info_ptr);
+
+ /* Calculate the offsets. */
+ switch (DEFAULT_ABI)
+ {
+ case ABI_NONE:
+ default:
+ gcc_unreachable ();
+
+ case ABI_AIX:
+ case ABI_DARWIN:
+ info_ptr->fp_save_offset = - info_ptr->fp_size;
+ info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
+
+ if (TARGET_ALTIVEC_ABI)
+ {
+ info_ptr->vrsave_save_offset
+ = info_ptr->gp_save_offset - info_ptr->vrsave_size;
+
+ /* Align stack so vector save area is on a quadword boundary.
+ The padding goes above the vectors. */
+ if (info_ptr->altivec_size != 0)
+ info_ptr->altivec_padding_size
+ = info_ptr->vrsave_save_offset & 0xF;
+ else
+ info_ptr->altivec_padding_size = 0;
+
+ info_ptr->altivec_save_offset
+ = info_ptr->vrsave_save_offset
+ - info_ptr->altivec_padding_size
+ - info_ptr->altivec_size;
+ gcc_assert (info_ptr->altivec_size == 0
+ || info_ptr->altivec_save_offset % 16 == 0);
+
+ /* Adjust for AltiVec case. */
+ info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
+ }
+ else
+ info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
+ info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
+ info_ptr->lr_save_offset = 2*reg_size;
+ break;
+
+ case ABI_V4:
+ info_ptr->fp_save_offset = - info_ptr->fp_size;
+ info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
+ info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
+
+ if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
+ {
+ /* Align stack so SPE GPR save area is aligned on a
+ double-word boundary. */
+ if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
+ info_ptr->spe_padding_size
+ = 8 - (-info_ptr->cr_save_offset % 8);
+ else
+ info_ptr->spe_padding_size = 0;
+
+ info_ptr->spe_gp_save_offset
+ = info_ptr->cr_save_offset
+ - info_ptr->spe_padding_size
+ - info_ptr->spe_gp_size;
+
+ /* Adjust for SPE case. */
+ info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
+ }
+ else if (TARGET_ALTIVEC_ABI)
+ {
+ info_ptr->vrsave_save_offset
+ = info_ptr->cr_save_offset - info_ptr->vrsave_size;
+
+ /* Align stack so vector save area is on a quadword boundary. */
+ if (info_ptr->altivec_size != 0)
+ info_ptr->altivec_padding_size
+ = 16 - (-info_ptr->vrsave_save_offset % 16);
+ else
+ info_ptr->altivec_padding_size = 0;
+
+ info_ptr->altivec_save_offset
+ = info_ptr->vrsave_save_offset
+ - info_ptr->altivec_padding_size
+ - info_ptr->altivec_size;
+
+ /* Adjust for AltiVec case. */
+ info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
+ }
+ else
+ info_ptr->ehrd_offset = info_ptr->cr_save_offset;
+ info_ptr->ehrd_offset -= ehrd_size;
+ info_ptr->lr_save_offset = reg_size;
+ break;
+ }
+
+ save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
+ info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
+ + info_ptr->gp_size
+ + info_ptr->altivec_size
+ + info_ptr->altivec_padding_size
+ + info_ptr->spe_gp_size
+ + info_ptr->spe_padding_size
+ + ehrd_size
+ + info_ptr->cr_size
+ + info_ptr->vrsave_size,
+ save_align);
+
+ non_fixed_size = (info_ptr->vars_size
+ + info_ptr->parm_size
+ + info_ptr->save_size);
+
+ info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
+ ABI_STACK_BOUNDARY / BITS_PER_UNIT);
+
+ /* Determine if we need to allocate any stack frame:
+
+ For AIX we need to push the stack if a frame pointer is needed
+ (because the stack might be dynamically adjusted), if we are
+ debugging, if we make calls, or if the sum of fp_save, gp_save,
+ and local variables are more than the space needed to save all
+ non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
+ + 18*8 = 288 (GPR13 reserved).
+
+ For V.4 we don't have the stack cushion that AIX uses, but assume
+ that the debugger can handle stackless frames. */
+
+ if (info_ptr->calls_p)
+ info_ptr->push_p = 1;
+
+ else if (DEFAULT_ABI == ABI_V4)
+ info_ptr->push_p = non_fixed_size != 0;
+
+ else if (frame_pointer_needed)
+ info_ptr->push_p = 1;
+
+ else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
+ info_ptr->push_p = 1;
+
+ else
+ info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
+
+ /* Zero offsets if we're not saving those registers. */
+ if (info_ptr->fp_size == 0)
+ info_ptr->fp_save_offset = 0;
+
+ if (info_ptr->gp_size == 0)
+ info_ptr->gp_save_offset = 0;
+
+ if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
+ info_ptr->altivec_save_offset = 0;
+
+ if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
+ info_ptr->vrsave_save_offset = 0;
+
+ if (! TARGET_SPE_ABI
+ || info_ptr->spe_64bit_regs_used == 0
+ || info_ptr->spe_gp_size == 0)
+ info_ptr->spe_gp_save_offset = 0;
+
+ if (! info_ptr->lr_save_p)
+ info_ptr->lr_save_offset = 0;
+
+ if (! info_ptr->cr_save_p)
+ info_ptr->cr_save_offset = 0;
+
+ return info_ptr;
+}
+
+/* Return true if the current function uses any GPRs in 64-bit SIMD
+ mode. */
+
+static bool
+spe_func_has_64bit_regs_p (void)
+{
+ rtx insns, insn;
+
+ /* Functions that save and restore all the call-saved registers will
+ need to save/restore the registers in 64-bits. */
+ if (crtl->calls_eh_return
+ || cfun->calls_setjmp
+ || crtl->has_nonlocal_goto)
+ return true;
+
+ insns = get_insns ();
+
+ for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ {
+ if (INSN_P (insn))
+ {
+ rtx i;
+
+ /* FIXME: This should be implemented with attributes...
+
+ (set_attr "spe64" "true")....then,
+ if (get_spe64(insn)) return true;
+
+ It's the only reliable way to do the stuff below. */
+
+ i = PATTERN (insn);
+ if (GET_CODE (i) == SET)
+ {
+ enum machine_mode mode = GET_MODE (SET_SRC (i));
+
+ if (SPE_VECTOR_MODE (mode))
+ return true;
+ if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static void
+debug_stack_info (rs6000_stack_t *info)
+{
+ const char *abi_string;
+
+ if (! info)
+ info = rs6000_stack_info ();
+
+ fprintf (stderr, "\nStack information for function %s:\n",
+ ((current_function_decl && DECL_NAME (current_function_decl))
+ ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
+ : "<unknown>"));
+
+ switch (info->abi)
+ {
+ default: abi_string = "Unknown"; break;
+ case ABI_NONE: abi_string = "NONE"; break;
+ case ABI_AIX: abi_string = "AIX"; break;
+ case ABI_DARWIN: abi_string = "Darwin"; break;
+ case ABI_V4: abi_string = "V.4"; break;
+ }
+
+ fprintf (stderr, "\tABI = %5s\n", abi_string);
+
+ if (TARGET_ALTIVEC_ABI)
+ fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
+
+ if (TARGET_SPE_ABI)
+ fprintf (stderr, "\tSPE ABI extensions enabled.\n");
+
+ if (info->first_gp_reg_save != 32)
+ fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
+
+ if (info->first_fp_reg_save != 64)
+ fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
+
+ if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
+ fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
+ info->first_altivec_reg_save);
+
+ if (info->lr_save_p)
+ fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
+
+ if (info->cr_save_p)
+ fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
+
+ if (info->vrsave_mask)
+ fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
+
+ if (info->push_p)
+ fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
+
+ if (info->calls_p)
+ fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
+
+ if (info->gp_save_offset)
+ fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
+
+ if (info->fp_save_offset)
+ fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
+
+ if (info->altivec_save_offset)
+ fprintf (stderr, "\taltivec_save_offset = %5d\n",
+ info->altivec_save_offset);
+
+ if (info->spe_gp_save_offset)
+ fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
+ info->spe_gp_save_offset);
+
+ if (info->vrsave_save_offset)
+ fprintf (stderr, "\tvrsave_save_offset = %5d\n",
+ info->vrsave_save_offset);
+
+ if (info->lr_save_offset)
+ fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
+
+ if (info->cr_save_offset)
+ fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
+
+ if (info->varargs_save_offset)
+ fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
+
+ if (info->total_size)
+ fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
+ info->total_size);
+
+ if (info->vars_size)
+ fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
+ info->vars_size);
+
+ if (info->parm_size)
+ fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
+
+ if (info->fixed_size)
+ fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
+
+ if (info->gp_size)
+ fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
+
+ if (info->spe_gp_size)
+ fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
+
+ if (info->fp_size)
+ fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
+
+ if (info->altivec_size)
+ fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
+
+ if (info->vrsave_size)
+ fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
+
+ if (info->altivec_padding_size)
+ fprintf (stderr, "\taltivec_padding_size= %5d\n",
+ info->altivec_padding_size);
+
+ if (info->spe_padding_size)
+ fprintf (stderr, "\tspe_padding_size = %5d\n",
+ info->spe_padding_size);
+
+ if (info->cr_size)
+ fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
+
+ if (info->save_size)
+ fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
+
+ if (info->reg_size != 4)
+ fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
+
+ fprintf (stderr, "\n");
+}
+
+rtx
+rs6000_return_addr (int count, rtx frame)
+{
+ /* Currently we don't optimize very well between prolog and body
+ code and for PIC code the code can be actually quite bad, so
+ don't try to be too clever here. */
+ if (count != 0 || (DEFAULT_ABI != ABI_AIX && flag_pic))
+ {
+ cfun->machine->ra_needs_full_frame = 1;
+
+ return
+ gen_rtx_MEM
+ (Pmode,
+ memory_address
+ (Pmode,
+ plus_constant (copy_to_reg
+ (gen_rtx_MEM (Pmode,
+ memory_address (Pmode, frame))),
+ RETURN_ADDRESS_OFFSET)));
+ }
+
+ cfun->machine->ra_need_lr = 1;
+ return get_hard_reg_initial_val (Pmode, LR_REGNO);
+}
+
+/* Say whether a function is a candidate for sibcall handling or not.
+ We do not allow indirect calls to be optimized into sibling calls.
+ Also, we can't do it if there are any vector parameters; there's
+ nowhere to put the VRsave code so it works; note that functions with
+ vector parameters are required to have a prototype, so the argument
+ type info must be available here. (The tail recursion case can work
+ with vector parameters, but there's no way to distinguish here.) */
+static bool
+rs6000_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
+{
+ tree type;
+ if (decl)
+ {
+ if (TARGET_ALTIVEC_VRSAVE)
+ {
+ for (type = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ type; type = TREE_CHAIN (type))
+ {
+ if (TREE_CODE (TREE_VALUE (type)) == VECTOR_TYPE)
+ return false;
+ }
+ }
+ if (DEFAULT_ABI == ABI_DARWIN
+ || ((*targetm.binds_local_p) (decl)
+ && (DEFAULT_ABI != ABI_AIX || !DECL_EXTERNAL (decl))))
+ {
+ tree attr_list = TYPE_ATTRIBUTES (TREE_TYPE (decl));
+
+ if (!lookup_attribute ("longcall", attr_list)
+ || lookup_attribute ("shortcall", attr_list))
+ return true;
+ }
+ }
+ return false;
+}
+
+/* NULL if INSN insn is valid within a low-overhead loop.
+ Otherwise return why doloop cannot be applied.
+ PowerPC uses the COUNT register for branch on table instructions. */
+
+static const char *
+rs6000_invalid_within_doloop (const_rtx insn)
+{
+ if (CALL_P (insn))
+ return "Function call in the loop.";
+
+ if (JUMP_P (insn)
+ && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_VEC))
+ return "Computed branch in the loop.";
+
+ return NULL;
+}
+
+static int
+rs6000_ra_ever_killed (void)
+{
+ rtx top;
+ rtx reg;
+ rtx insn;
+
+ if (cfun->is_thunk)
+ return 0;
+
+ /* regs_ever_live has LR marked as used if any sibcalls are present,
+ but this should not force saving and restoring in the
+ pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
+ clobbers LR, so that is inappropriate. */
+
+ /* Also, the prologue can generate a store into LR that
+ doesn't really count, like this:
+
+ move LR->R0
+ bcl to set PIC register
+ move LR->R31
+ move R0->LR
+
+ When we're called from the epilogue, we need to avoid counting
+ this as a store. */
+
+ push_topmost_sequence ();
+ top = get_insns ();
+ pop_topmost_sequence ();
+ reg = gen_rtx_REG (Pmode, LR_REGNO);
+
+ for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
+ {
+ if (INSN_P (insn))
+ {
+ if (CALL_P (insn))
+ {
+ if (!SIBLING_CALL_P (insn))
+ return 1;
+ }
+ else if (find_regno_note (insn, REG_INC, LR_REGNO))
+ return 1;
+ else if (set_of (reg, insn) != NULL_RTX
+ && !prologue_epilogue_contains (insn))
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Emit instructions needed to load the TOC register.
+ This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
+ a constant pool; or for SVR4 -fpic. */
+
+void
+rs6000_emit_load_toc_table (int fromprolog)
+{
+ rtx dest;
+ dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
+
+ if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic)
+ {
+ char buf[30];
+ rtx lab, tmp1, tmp2, got;
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
+ lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ if (flag_pic == 2)
+ got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
+ else
+ got = rs6000_got_sym ();
+ tmp1 = tmp2 = dest;
+ if (!fromprolog)
+ {
+ tmp1 = gen_reg_rtx (Pmode);
+ tmp2 = gen_reg_rtx (Pmode);
+ }
+ emit_insn (gen_load_toc_v4_PIC_1 (lab));
+ emit_move_insn (tmp1,
+ gen_rtx_REG (Pmode, LR_REGNO));
+ emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
+ emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
+ }
+ else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ {
+ emit_insn (gen_load_toc_v4_pic_si ());
+ emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
+ }
+ else if (TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2)
+ {
+ char buf[30];
+ rtx temp0 = (fromprolog
+ ? gen_rtx_REG (Pmode, 0)
+ : gen_reg_rtx (Pmode));
+
+ if (fromprolog)
+ {
+ rtx symF, symL;
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
+ symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
+ symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ emit_insn (gen_load_toc_v4_PIC_1 (symF));
+ emit_move_insn (dest,
+ gen_rtx_REG (Pmode, LR_REGNO));
+ emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
+ }
+ else
+ {
+ rtx tocsym;
+
+ tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
+ emit_insn (gen_load_toc_v4_PIC_1b (tocsym));
+ emit_move_insn (dest,
+ gen_rtx_REG (Pmode, LR_REGNO));
+ emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
+ }
+ emit_insn (gen_addsi3 (dest, temp0, dest));
+ }
+ else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
+ {
+ /* This is for AIX code running in non-PIC ELF32. */
+ char buf[30];
+ rtx realsym;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
+ realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ emit_insn (gen_elf_high (dest, realsym));
+ emit_insn (gen_elf_low (dest, dest, realsym));
+ }
+ else
+ {
+ gcc_assert (DEFAULT_ABI == ABI_AIX);
+
+ if (TARGET_32BIT)
+ emit_insn (gen_load_toc_aix_si (dest));
+ else
+ emit_insn (gen_load_toc_aix_di (dest));
+ }
+}
+
+/* Emit instructions to restore the link register after determining where
+ its value has been stored. */
+
+void
+rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+ rtx operands[2];
+
+ operands[0] = source;
+ operands[1] = scratch;
+
+ if (info->lr_save_p)
+ {
+ rtx frame_rtx = stack_pointer_rtx;
+ HOST_WIDE_INT sp_offset = 0;
+ rtx tmp;
+
+ if (frame_pointer_needed
+ || cfun->calls_alloca
+ || info->total_size > 32767)
+ {
+ tmp = gen_frame_mem (Pmode, frame_rtx);
+ emit_move_insn (operands[1], tmp);
+ frame_rtx = operands[1];
+ }
+ else if (info->push_p)
+ sp_offset = info->total_size;
+
+ tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
+ tmp = gen_frame_mem (Pmode, tmp);
+ emit_move_insn (tmp, operands[0]);
+ }
+ else
+ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
+}
+
+static GTY(()) alias_set_type set = -1;
+
+alias_set_type
+get_TOC_alias_set (void)
+{
+ if (set == -1)
+ set = new_alias_set ();
+ return set;
+}
+
+/* This returns nonzero if the current function uses the TOC. This is
+ determined by the presence of (use (unspec ... UNSPEC_TOC)), which
+ is generated by the ABI_V4 load_toc_* patterns. */
+#if TARGET_ELF
+static int
+uses_TOC (void)
+{
+ rtx insn;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (INSN_P (insn))
+ {
+ rtx pat = PATTERN (insn);
+ int i;
+
+ if (GET_CODE (pat) == PARALLEL)
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx sub = XVECEXP (pat, 0, i);
+ if (GET_CODE (sub) == USE)
+ {
+ sub = XEXP (sub, 0);
+ if (GET_CODE (sub) == UNSPEC
+ && XINT (sub, 1) == UNSPEC_TOC)
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+#endif
+
+rtx
+create_TOC_reference (rtx symbol)
+{
+ if (!can_create_pseudo_p ())
+ df_set_regs_ever_live (TOC_REGISTER, true);
+ return gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (Pmode, TOC_REGISTER),
+ gen_rtx_CONST (Pmode,
+ gen_rtx_UNSPEC (Pmode, gen_rtvec (1, symbol), UNSPEC_TOCREL)));
+}
+
+/* If _Unwind_* has been called from within the same module,
+ toc register is not guaranteed to be saved to 40(1) on function
+ entry. Save it there in that case. */
+
+void
+rs6000_aix_emit_builtin_unwind_init (void)
+{
+ rtx mem;
+ rtx stack_top = gen_reg_rtx (Pmode);
+ rtx opcode_addr = gen_reg_rtx (Pmode);
+ rtx opcode = gen_reg_rtx (SImode);
+ rtx tocompare = gen_reg_rtx (SImode);
+ rtx no_toc_save_needed = gen_label_rtx ();
+
+ mem = gen_frame_mem (Pmode, hard_frame_pointer_rtx);
+ emit_move_insn (stack_top, mem);
+
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (2 * GET_MODE_SIZE (Pmode))));
+ emit_move_insn (opcode_addr, mem);
+ emit_move_insn (opcode, gen_rtx_MEM (SImode, opcode_addr));
+ emit_move_insn (tocompare, gen_int_mode (TARGET_32BIT ? 0x80410014
+ : 0xE8410028, SImode));
+
+ do_compare_rtx_and_jump (opcode, tocompare, EQ, 1,
+ SImode, NULL_RTX, NULL_RTX,
+ no_toc_save_needed);
+
+ mem = gen_frame_mem (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (5 * GET_MODE_SIZE (Pmode))));
+ emit_move_insn (mem, gen_rtx_REG (Pmode, 2));
+ emit_label (no_toc_save_needed);
+}
+
+/* This ties together stack memory (MEM with an alias set of frame_alias_set)
+ and the change to the stack pointer. */
+
+static void
+rs6000_emit_stack_tie (void)
+{
+ rtx mem = gen_frame_mem (BLKmode,
+ gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
+
+ emit_insn (gen_stack_tie (mem));
+}
+
+/* Emit the correct code for allocating stack space, as insns.
+ If COPY_R12, make sure a copy of the old frame is left in r12.
+ If COPY_R11, make sure a copy of the old frame is left in r11,
+ in preference to r12 if COPY_R12.
+ The generated code may use hard register 0 as a temporary. */
+
+static void
+rs6000_emit_allocate_stack (HOST_WIDE_INT size, int copy_r12, int copy_r11)
+{
+ rtx insn;
+ rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
+ rtx tmp_reg = gen_rtx_REG (Pmode, 0);
+ rtx todec = gen_int_mode (-size, Pmode);
+ rtx par, set, mem;
+
+ if (INTVAL (todec) != -size)
+ {
+ warning (0, "stack frame too large");
+ emit_insn (gen_trap ());
+ return;
+ }
+
+ if (crtl->limit_stack)
+ {
+ if (REG_P (stack_limit_rtx)
+ && REGNO (stack_limit_rtx) > 1
+ && REGNO (stack_limit_rtx) <= 31)
+ {
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (tmp_reg,
+ stack_limit_rtx,
+ GEN_INT (size))
+ : gen_adddi3 (tmp_reg,
+ stack_limit_rtx,
+ GEN_INT (size)));
+
+ emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
+ const0_rtx));
+ }
+ else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
+ && TARGET_32BIT
+ && DEFAULT_ABI == ABI_V4)
+ {
+ rtx toload = gen_rtx_CONST (VOIDmode,
+ gen_rtx_PLUS (Pmode,
+ stack_limit_rtx,
+ GEN_INT (size)));
+
+ emit_insn (gen_elf_high (tmp_reg, toload));
+ emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
+ emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
+ const0_rtx));
+ }
+ else
+ warning (0, "stack limit expression is not supported");
+ }
+
+ if (copy_r12 || copy_r11)
+ emit_move_insn (copy_r11
+ ? gen_rtx_REG (Pmode, 11)
+ : gen_rtx_REG (Pmode, 12),
+ stack_reg);
+
+ if (size > 32767)
+ {
+ /* Need a note here so that try_split doesn't get confused. */
+ if (get_last_insn () == NULL_RTX)
+ emit_note (NOTE_INSN_DELETED);
+ insn = emit_move_insn (tmp_reg, todec);
+ try_split (PATTERN (insn), insn, 0);
+ todec = tmp_reg;
+ }
+
+ insn = emit_insn (TARGET_32BIT
+ ? gen_movsi_update_stack (stack_reg, stack_reg,
+ todec, stack_reg)
+ : gen_movdi_di_update_stack (stack_reg, stack_reg,
+ todec, stack_reg));
+ /* Since we didn't use gen_frame_mem to generate the MEM, grab
+ it now and set the alias set/attributes. The above gen_*_update
+ calls will generate a PARALLEL with the MEM set being the first
+ operation. */
+ par = PATTERN (insn);
+ gcc_assert (GET_CODE (par) == PARALLEL);
+ set = XVECEXP (par, 0, 0);
+ gcc_assert (GET_CODE (set) == SET);
+ mem = SET_DEST (set);
+ gcc_assert (MEM_P (mem));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, get_frame_alias_set ());
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) =
+ gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_reg,
+ gen_rtx_PLUS (Pmode, stack_reg,
+ GEN_INT (-size))),
+ REG_NOTES (insn));
+}
+
+/* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
+ with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
+ is not NULL. It would be nice if dwarf2out_frame_debug_expr could
+ deduce these equivalences by itself so it wasn't necessary to hold
+ its hand so much. */
+
+static void
+rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
+ rtx reg2, rtx rreg)
+{
+ rtx real, temp;
+
+ /* copy_rtx will not make unique copies of registers, so we need to
+ ensure we don't have unwanted sharing here. */
+ if (reg == reg2)
+ reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
+
+ if (reg == rreg)
+ reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
+
+ real = copy_rtx (PATTERN (insn));
+
+ if (reg2 != NULL_RTX)
+ real = replace_rtx (real, reg2, rreg);
+
+ real = replace_rtx (real, reg,
+ gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
+ STACK_POINTER_REGNUM),
+ GEN_INT (val)));
+
+ /* We expect that 'real' is either a SET or a PARALLEL containing
+ SETs (and possibly other stuff). In a PARALLEL, all the SETs
+ are important so they all have to be marked RTX_FRAME_RELATED_P. */
+
+ if (GET_CODE (real) == SET)
+ {
+ rtx set = real;
+
+ temp = simplify_rtx (SET_SRC (set));
+ if (temp)
+ SET_SRC (set) = temp;
+ temp = simplify_rtx (SET_DEST (set));
+ if (temp)
+ SET_DEST (set) = temp;
+ if (GET_CODE (SET_DEST (set)) == MEM)
+ {
+ temp = simplify_rtx (XEXP (SET_DEST (set), 0));
+ if (temp)
+ XEXP (SET_DEST (set), 0) = temp;
+ }
+ }
+ else
+ {
+ int i;
+
+ gcc_assert (GET_CODE (real) == PARALLEL);
+ for (i = 0; i < XVECLEN (real, 0); i++)
+ if (GET_CODE (XVECEXP (real, 0, i)) == SET)
+ {
+ rtx set = XVECEXP (real, 0, i);
+
+ temp = simplify_rtx (SET_SRC (set));
+ if (temp)
+ SET_SRC (set) = temp;
+ temp = simplify_rtx (SET_DEST (set));
+ if (temp)
+ SET_DEST (set) = temp;
+ if (GET_CODE (SET_DEST (set)) == MEM)
+ {
+ temp = simplify_rtx (XEXP (SET_DEST (set), 0));
+ if (temp)
+ XEXP (SET_DEST (set), 0) = temp;
+ }
+ RTX_FRAME_RELATED_P (set) = 1;
+ }
+ }
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ real,
+ REG_NOTES (insn));
+}
+
+/* Returns an insn that has a vrsave set operation with the
+ appropriate CLOBBERs. */
+
+static rtx
+generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
+{
+ int nclobs, i;
+ rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
+ rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
+
+ clobs[0]
+ = gen_rtx_SET (VOIDmode,
+ vrsave,
+ gen_rtx_UNSPEC_VOLATILE (SImode,
+ gen_rtvec (2, reg, vrsave),
+ UNSPECV_SET_VRSAVE));
+
+ nclobs = 1;
+
+ /* We need to clobber the registers in the mask so the scheduler
+ does not move sets to VRSAVE before sets of AltiVec registers.
+
+ However, if the function receives nonlocal gotos, reload will set
+ all call saved registers live. We will end up with:
+
+ (set (reg 999) (mem))
+ (parallel [ (set (reg vrsave) (unspec blah))
+ (clobber (reg 999))])
+
+ The clobber will cause the store into reg 999 to be dead, and
+ flow will attempt to delete an epilogue insn. In this case, we
+ need an unspec use/set of the register. */
+
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ if (!epiloguep || call_used_regs [i])
+ clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (V4SImode, i));
+ else
+ {
+ rtx reg = gen_rtx_REG (V4SImode, i);
+
+ clobs[nclobs++]
+ = gen_rtx_SET (VOIDmode,
+ reg,
+ gen_rtx_UNSPEC (V4SImode,
+ gen_rtvec (1, reg), 27));
+ }
+ }
+
+ insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
+
+ for (i = 0; i < nclobs; ++i)
+ XVECEXP (insn, 0, i) = clobs[i];
+
+ return insn;
+}
+
+/* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
+ Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
+
+static void
+emit_frame_save (rtx frame_reg, rtx frame_ptr, enum machine_mode mode,
+ unsigned int regno, int offset, HOST_WIDE_INT total_size)
+{
+ rtx reg, offset_rtx, insn, mem, addr, int_rtx;
+ rtx replacea, replaceb;
+
+ int_rtx = GEN_INT (offset);
+
+ /* Some cases that need register indexed addressing. */
+ if ((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ || (TARGET_E500_DOUBLE && mode == DFmode)
+ || (TARGET_SPE_ABI
+ && SPE_VECTOR_MODE (mode)
+ && !SPE_CONST_OFFSET_OK (offset)))
+ {
+ /* Whomever calls us must make sure r11 is available in the
+ flow path of instructions in the prologue. */
+ offset_rtx = gen_rtx_REG (Pmode, 11);
+ emit_move_insn (offset_rtx, int_rtx);
+
+ replacea = offset_rtx;
+ replaceb = int_rtx;
+ }
+ else
+ {
+ offset_rtx = int_rtx;
+ replacea = NULL_RTX;
+ replaceb = NULL_RTX;
+ }
+
+ reg = gen_rtx_REG (mode, regno);
+ addr = gen_rtx_PLUS (Pmode, frame_reg, offset_rtx);
+ mem = gen_frame_mem (mode, addr);
+
+ insn = emit_move_insn (mem, reg);
+
+ rs6000_frame_related (insn, frame_ptr, total_size, replacea, replaceb);
+}
+
+/* Emit an offset memory reference suitable for a frame store, while
+ converting to a valid addressing mode. */
+
+static rtx
+gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
+{
+ rtx int_rtx, offset_rtx;
+
+ int_rtx = GEN_INT (offset);
+
+ if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode))
+ || (TARGET_E500_DOUBLE && mode == DFmode))
+ {
+ offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
+ emit_move_insn (offset_rtx, int_rtx);
+ }
+ else
+ offset_rtx = int_rtx;
+
+ return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
+}
+
+/* Look for user-defined global regs. We should not save and restore these,
+ and cannot use stmw/lmw if there are any in its range. */
+
+static bool
+no_global_regs_above (int first, bool gpr)
+{
+ int i;
+ for (i = first; i < gpr ? 32 : 64 ; i++)
+ if (global_regs[i])
+ return false;
+ return true;
+}
+
+#ifndef TARGET_FIX_AND_CONTINUE
+#define TARGET_FIX_AND_CONTINUE 0
+#endif
+
+/* It's really GPR 13 and FPR 14, but we need the smaller of the two. */
+#define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
+#define LAST_SAVRES_REGISTER 31
+#define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
+
+static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][8];
+
+/* Return the symbol for an out-of-line register save/restore routine.
+ We are saving/restoring GPRs if GPR is true. */
+
+static rtx
+rs6000_savres_routine_sym (rs6000_stack_t *info, bool savep, bool gpr, bool exitp)
+{
+ int regno = gpr ? info->first_gp_reg_save : (info->first_fp_reg_save - 32);
+ rtx sym;
+ int select = ((savep ? 1 : 0) << 2
+ | (gpr
+ /* On the SPE, we never have any FPRs, but we do have
+ 32/64-bit versions of the routines. */
+ ? (TARGET_SPE_ABI && info->spe_64bit_regs_used ? 1 : 0)
+ : 0) << 1
+ | (exitp ? 1: 0));
+
+ /* Don't generate bogus routine names. */
+ gcc_assert (FIRST_SAVRES_REGISTER <= regno && regno <= LAST_SAVRES_REGISTER);
+
+ sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
+
+ if (sym == NULL)
+ {
+ char name[30];
+ const char *action;
+ const char *regkind;
+ const char *exit_suffix;
+
+ action = savep ? "save" : "rest";
+
+ /* SPE has slightly different names for its routines depending on
+ whether we are saving 32-bit or 64-bit registers. */
+ if (TARGET_SPE_ABI)
+ {
+ /* No floating point saves on the SPE. */
+ gcc_assert (gpr);
+
+ regkind = info->spe_64bit_regs_used ? "64gpr" : "32gpr";
+ }
+ else
+ regkind = gpr ? "gpr" : "fpr";
+
+ exit_suffix = exitp ? "_x" : "";
+
+ sprintf (name, "_%s%s_%d%s", action, regkind, regno, exit_suffix);
+
+ sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
+ = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
+ }
+
+ return sym;
+}
+
+/* Emit a sequence of insns, including a stack tie if needed, for
+ resetting the stack pointer. If SAVRES is true, then don't reset the
+ stack pointer, but move the base of the frame into r11 for use by
+ out-of-line register restore routines. */
+
+static void
+rs6000_emit_stack_reset (rs6000_stack_t *info,
+ rtx sp_reg_rtx, rtx frame_reg_rtx,
+ int sp_offset, bool savres)
+{
+ /* This blockage is needed so that sched doesn't decide to move
+ the sp change before the register restores. */
+ if (frame_reg_rtx != sp_reg_rtx
+ || (TARGET_SPE_ABI
+ && info->spe_64bit_regs_used != 0
+ && info->first_gp_reg_save != 32))
+ rs6000_emit_stack_tie ();
+
+ if (frame_reg_rtx != sp_reg_rtx)
+ {
+ rs6000_emit_stack_tie ();
+ if (sp_offset != 0)
+ emit_insn (gen_addsi3 (sp_reg_rtx, frame_reg_rtx,
+ GEN_INT (sp_offset)));
+ else if (!savres)
+ emit_move_insn (sp_reg_rtx, frame_reg_rtx);
+ }
+ else if (sp_offset != 0)
+ {
+ /* If we are restoring registers out-of-line, we will be using the
+ "exit" variants of the restore routines, which will reset the
+ stack for us. But we do need to point r11 into the right place
+ for those routines. */
+ rtx dest_reg = (savres
+ ? gen_rtx_REG (Pmode, 11)
+ : sp_reg_rtx);
+
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (dest_reg, sp_reg_rtx,
+ GEN_INT (sp_offset))
+ : gen_adddi3 (dest_reg, sp_reg_rtx,
+ GEN_INT (sp_offset)));
+ }
+}
+
+/* Construct a parallel rtx describing the effect of a call to an
+ out-of-line register save/restore routine. */
+
+static rtx
+rs6000_make_savres_rtx (rs6000_stack_t *info,
+ rtx frame_reg_rtx, int save_area_offset,
+ enum machine_mode reg_mode,
+ bool savep, bool gpr, bool exitp)
+{
+ int i;
+ int offset, start_reg, end_reg, n_regs;
+ int reg_size = GET_MODE_SIZE (reg_mode);
+ rtx sym;
+ rtvec p;
+
+ offset = 0;
+ start_reg = (gpr
+ ? info->first_gp_reg_save
+ : info->first_fp_reg_save);
+ end_reg = gpr ? 32 : 64;
+ n_regs = end_reg - start_reg;
+ p = rtvec_alloc ((exitp ? 4 : 3) + n_regs);
+
+ /* If we're saving registers, then we should never say we're exiting. */
+ gcc_assert ((savep && !exitp) || !savep);
+
+ if (exitp)
+ RTVEC_ELT (p, offset++) = gen_rtx_RETURN (VOIDmode);
+
+ RTVEC_ELT (p, offset++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 65));
+
+ sym = rs6000_savres_routine_sym (info, savep, gpr, exitp);
+ RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
+ RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 11));
+
+ for (i = 0; i < end_reg - start_reg; i++)
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (reg_mode, start_reg + i);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (save_area_offset + reg_size*i));
+ mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, i + offset) = gen_rtx_SET (VOIDmode,
+ savep ? mem : reg,
+ savep ? reg : mem);
+ }
+
+ return gen_rtx_PARALLEL (VOIDmode, p);
+}
+
+/* Determine whether the gp REG is really used. */
+
+static bool
+rs6000_reg_live_or_pic_offset_p (int reg)
+{
+ return ((df_regs_ever_live_p (reg)
+ && (!call_used_regs[reg]
+ || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && TARGET_TOC && TARGET_MINIMAL_TOC)))
+ || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
+}
+
+enum {
+ SAVRES_MULTIPLE = 0x1,
+ SAVRES_INLINE_FPRS = 0x2,
+ SAVRES_INLINE_GPRS = 0x4
+};
+
+/* Determine the strategy for savings/restoring registers. */
+
+static int
+rs6000_savres_strategy (rs6000_stack_t *info, bool savep,
+ int using_static_chain_p, int sibcall)
+{
+ bool using_multiple_p;
+ bool common;
+ bool savres_fprs_inline;
+ bool savres_gprs_inline;
+ bool noclobber_global_gprs
+ = no_global_regs_above (info->first_gp_reg_save, /*gpr=*/true);
+
+ using_multiple_p = (TARGET_MULTIPLE && ! TARGET_POWERPC64
+ && (!TARGET_SPE_ABI
+ || info->spe_64bit_regs_used == 0)
+ && info->first_gp_reg_save < 31
+ && noclobber_global_gprs);
+ /* Don't bother to try to save things out-of-line if r11 is occupied
+ by the static chain. It would require too much fiddling and the
+ static chain is rarely used anyway. */
+ common = (using_static_chain_p
+ || sibcall
+ || crtl->calls_eh_return
+ || !info->lr_save_p
+ || cfun->machine->ra_need_lr
+ || info->total_size > 32767);
+ savres_fprs_inline = (common
+ || info->first_fp_reg_save == 64
+ || !no_global_regs_above (info->first_fp_reg_save,
+ /*gpr=*/false)
+ || FP_SAVE_INLINE (info->first_fp_reg_save));
+ savres_gprs_inline = (common
+ /* Saving CR interferes with the exit routines
+ used on the SPE, so just punt here. */
+ || (!savep
+ && TARGET_SPE_ABI
+ && info->spe_64bit_regs_used != 0
+ && info->cr_save_p != 0)
+ || info->first_gp_reg_save == 32
+ || !noclobber_global_gprs
+ || GP_SAVE_INLINE (info->first_gp_reg_save));
+
+ if (savep)
+ /* If we are going to use store multiple, then don't even bother
+ with the out-of-line routines, since the store-multiple instruction
+ will always be smaller. */
+ savres_gprs_inline = savres_gprs_inline || using_multiple_p;
+ else
+ {
+ /* The situation is more complicated with load multiple. We'd
+ prefer to use the out-of-line routines for restores, since the
+ "exit" out-of-line routines can handle the restore of LR and
+ the frame teardown. But we can only use the out-of-line
+ routines if we know that we've used store multiple or
+ out-of-line routines in the prologue, i.e. if we've saved all
+ the registers from first_gp_reg_save. Otherwise, we risk
+ loading garbage from the stack. Furthermore, we can only use
+ the "exit" out-of-line gpr restore if we haven't saved any
+ fprs. */
+ bool saved_all = !savres_gprs_inline || using_multiple_p;
+
+ if (saved_all && info->first_fp_reg_save != 64)
+ /* We can't use the exit routine; use load multiple if it's
+ available. */
+ savres_gprs_inline = savres_gprs_inline || using_multiple_p;
+ }
+
+ return (using_multiple_p
+ | (savres_fprs_inline << 1)
+ | (savres_gprs_inline << 2));
+}
+
+/* Emit function prologue as insns. */
+
+void
+rs6000_emit_prologue (void)
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+ enum machine_mode reg_mode = Pmode;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
+ rtx frame_ptr_rtx = gen_rtx_REG (Pmode, 12);
+ rtx frame_reg_rtx = sp_reg_rtx;
+ rtx cr_save_rtx = NULL_RTX;
+ rtx insn;
+ int strategy;
+ int saving_FPRs_inline;
+ int saving_GPRs_inline;
+ int using_store_multiple;
+ int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
+ && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
+ && !call_used_regs[STATIC_CHAIN_REGNUM]);
+ HOST_WIDE_INT sp_offset = 0;
+
+ if (TARGET_FIX_AND_CONTINUE)
+ {
+ /* gdb on darwin arranges to forward a function from the old
+ address by modifying the first 5 instructions of the function
+ to branch to the overriding function. This is necessary to
+ permit function pointers that point to the old function to
+ actually forward to the new function. */
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ emit_insn (gen_nop ());
+ }
+
+ if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
+ {
+ reg_mode = V2SImode;
+ reg_size = 8;
+ }
+
+ strategy = rs6000_savres_strategy (info, /*savep=*/true,
+ /*static_chain_p=*/using_static_chain_p,
+ /*sibcall=*/0);
+ using_store_multiple = strategy & SAVRES_MULTIPLE;
+ saving_FPRs_inline = strategy & SAVRES_INLINE_FPRS;
+ saving_GPRs_inline = strategy & SAVRES_INLINE_GPRS;
+
+ /* For V.4, update stack before we do any saving and set back pointer. */
+ if (! WORLD_SAVE_P (info)
+ && info->push_p
+ && (DEFAULT_ABI == ABI_V4
+ || crtl->calls_eh_return))
+ {
+ bool need_r11 = (TARGET_SPE
+ ? (!saving_GPRs_inline
+ && info->spe_64bit_regs_used == 0)
+ : (!saving_FPRs_inline || !saving_GPRs_inline));
+ if (info->total_size < 32767)
+ sp_offset = info->total_size;
+ else
+ frame_reg_rtx = (need_r11
+ ? gen_rtx_REG (Pmode, 11)
+ : frame_ptr_rtx);
+ rs6000_emit_allocate_stack (info->total_size,
+ (frame_reg_rtx != sp_reg_rtx
+ && (info->cr_save_p
+ || info->lr_save_p
+ || info->first_fp_reg_save < 64
+ || info->first_gp_reg_save < 32
+ )),
+ need_r11);
+ if (frame_reg_rtx != sp_reg_rtx)
+ rs6000_emit_stack_tie ();
+ }
+
+ /* Handle world saves specially here. */
+ if (WORLD_SAVE_P (info))
+ {
+ int i, j, sz;
+ rtx treg;
+ rtvec p;
+ rtx reg0;
+
+ /* save_world expects lr in r0. */
+ reg0 = gen_rtx_REG (Pmode, 0);
+ if (info->lr_save_p)
+ {
+ insn = emit_move_insn (reg0,
+ gen_rtx_REG (Pmode, LR_REGNO));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
+ assumptions about the offsets of various bits of the stack
+ frame. */
+ gcc_assert (info->gp_save_offset == -220
+ && info->fp_save_offset == -144
+ && info->lr_save_offset == 8
+ && info->cr_save_offset == 4
+ && info->push_p
+ && info->lr_save_p
+ && (!crtl->calls_eh_return
+ || info->ehrd_offset == -432)
+ && info->vrsave_save_offset == -224
+ && info->altivec_save_offset == -416);
+
+ treg = gen_rtx_REG (SImode, 11);
+ emit_move_insn (treg, GEN_INT (-info->total_size));
+
+ /* SAVE_WORLD takes the caller's LR in R0 and the frame size
+ in R11. It also clobbers R12, so beware! */
+
+ /* Preserve CR2 for save_world prologues */
+ sz = 5;
+ sz += 32 - info->first_gp_reg_save;
+ sz += 64 - info->first_fp_reg_save;
+ sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
+ p = rtvec_alloc (sz);
+ j = 0;
+ RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (SImode,
+ LR_REGNO));
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
+ gen_rtx_SYMBOL_REF (Pmode,
+ "*save_world"));
+ /* We do floats first so that the instruction pattern matches
+ properly. */
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode),
+ info->first_fp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset + 8 * i));
+ rtx mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode), addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
+ {
+ rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->altivec_save_offset
+ + sp_offset + 16 * i));
+ rtx mem = gen_frame_mem (V4SImode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+
+ {
+ /* CR register traditionally saved as CR2. */
+ rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset
+ + sp_offset));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ /* Explain about use of R0. */
+ if (info->lr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset
+ + sp_offset));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, mem, reg0);
+ }
+ /* Explain what happens to the stack pointer. */
+ {
+ rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
+ }
+
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ treg, GEN_INT (-info->total_size));
+ sp_offset = info->total_size;
+ }
+
+ /* If we use the link register, get it into r0. */
+ if (!WORLD_SAVE_P (info) && info->lr_save_p)
+ {
+ rtx addr, reg, mem;
+
+ insn = emit_move_insn (gen_rtx_REG (Pmode, 0),
+ gen_rtx_REG (Pmode, LR_REGNO));
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset + sp_offset));
+ reg = gen_rtx_REG (Pmode, 0);
+ mem = gen_rtx_MEM (Pmode, addr);
+ /* This should not be of rs6000_sr_alias_set, because of
+ __builtin_return_address. */
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+
+ /* If we need to save CR, put it into r12. */
+ if (!WORLD_SAVE_P (info) && info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
+ {
+ rtx set;
+
+ cr_save_rtx = gen_rtx_REG (SImode, 12);
+ insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ /* Now, there's no way that dwarf2out_frame_debug_expr is going
+ to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
+ But that's OK. All we have to do is specify that _one_ condition
+ code register is saved in this stack slot. The thrower's epilogue
+ will then restore all the call-saved registers.
+ We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
+ set = gen_rtx_SET (VOIDmode, cr_save_rtx,
+ gen_rtx_REG (SImode, CR2_REGNO));
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ set,
+ REG_NOTES (insn));
+ }
+
+ /* Do any required saving of fpr's. If only one or two to save, do
+ it ourselves. Otherwise, call function. */
+ if (!WORLD_SAVE_P (info) && saving_FPRs_inline)
+ {
+ int i;
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ if ((df_regs_ever_live_p (info->first_fp_reg_save+i)
+ && ! call_used_regs[info->first_fp_reg_save+i]))
+ emit_frame_save (frame_reg_rtx, frame_ptr_rtx,
+ (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode,
+ info->first_fp_reg_save + i,
+ info->fp_save_offset + sp_offset + 8 * i,
+ info->total_size);
+ }
+ else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
+ {
+ rtx par;
+
+ par = rs6000_make_savres_rtx (info, frame_reg_rtx,
+ info->fp_save_offset + sp_offset,
+ DFmode,
+ /*savep=*/true, /*gpr=*/false,
+ /*exitp=*/false);
+ insn = emit_insn (par);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+
+ /* Save GPRs. This is done as a PARALLEL if we are using
+ the store-multiple instructions. */
+ if (!WORLD_SAVE_P (info)
+ && TARGET_SPE_ABI
+ && info->spe_64bit_regs_used != 0
+ && info->first_gp_reg_save != 32)
+ {
+ int i;
+ rtx spe_save_area_ptr;
+
+ /* Determine whether we can address all of the registers that need
+ to be saved with an offset from the stack pointer that fits in
+ the small const field for SPE memory instructions. */
+ int spe_regs_addressable_via_sp
+ = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset
+ + (32 - info->first_gp_reg_save - 1) * reg_size)
+ && saving_GPRs_inline);
+ int spe_offset;
+
+ if (spe_regs_addressable_via_sp)
+ {
+ spe_save_area_ptr = frame_reg_rtx;
+ spe_offset = info->spe_gp_save_offset + sp_offset;
+ }
+ else
+ {
+ /* Make r11 point to the start of the SPE save area. We need
+ to be careful here if r11 is holding the static chain. If
+ it is, then temporarily save it in r0. We would use r0 as
+ our base register here, but using r0 as a base register in
+ loads and stores means something different from what we
+ would like. */
+ int ool_adjust = (saving_GPRs_inline
+ ? 0
+ : (info->first_gp_reg_save
+ - (FIRST_SAVRES_REGISTER+1))*8);
+ HOST_WIDE_INT offset = (info->spe_gp_save_offset
+ + sp_offset - ool_adjust);
+
+ if (using_static_chain_p)
+ {
+ rtx r0 = gen_rtx_REG (Pmode, 0);
+ gcc_assert (info->first_gp_reg_save > 11);
+
+ emit_move_insn (r0, gen_rtx_REG (Pmode, 11));
+ }
+
+ spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
+ insn = emit_insn (gen_addsi3 (spe_save_area_ptr,
+ frame_reg_rtx,
+ GEN_INT (offset)));
+ /* We need to make sure the move to r11 gets noted for
+ properly outputting unwind information. */
+ if (!saving_GPRs_inline)
+ rs6000_frame_related (insn, frame_reg_rtx, offset,
+ NULL_RTX, NULL_RTX);
+ spe_offset = 0;
+ }
+
+ if (saving_GPRs_inline)
+ {
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
+ {
+ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ rtx offset, addr, mem;
+
+ /* We're doing all this to ensure that the offset fits into
+ the immediate offset of 'evstdd'. */
+ gcc_assert (SPE_CONST_OFFSET_OK (reg_size * i + spe_offset));
+
+ offset = GEN_INT (reg_size * i + spe_offset);
+ addr = gen_rtx_PLUS (Pmode, spe_save_area_ptr, offset);
+ mem = gen_rtx_MEM (V2SImode, addr);
+
+ insn = emit_move_insn (mem, reg);
+
+ rs6000_frame_related (insn, spe_save_area_ptr,
+ info->spe_gp_save_offset
+ + sp_offset + reg_size * i,
+ offset, const0_rtx);
+ }
+ }
+ else
+ {
+ rtx par;
+
+ par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
+ 0, reg_mode,
+ /*savep=*/true, /*gpr=*/true,
+ /*exitp=*/false);
+ insn = emit_insn (par);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+
+
+ /* Move the static chain pointer back. */
+ if (using_static_chain_p && !spe_regs_addressable_via_sp)
+ emit_move_insn (gen_rtx_REG (Pmode, 11), gen_rtx_REG (Pmode, 0));
+ }
+ else if (!WORLD_SAVE_P (info) && !saving_GPRs_inline)
+ {
+ rtx par;
+
+ /* Need to adjust r11 if we saved any FPRs. */
+ if (info->first_fp_reg_save != 64)
+ {
+ rtx r11 = gen_rtx_REG (reg_mode, 11);
+ rtx offset = GEN_INT (info->total_size
+ + (-8 * (64-info->first_fp_reg_save)));
+ rtx ptr_reg = (sp_reg_rtx == frame_reg_rtx
+ ? sp_reg_rtx : r11);
+
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (r11, ptr_reg, offset)
+ : gen_adddi3 (r11, ptr_reg, offset));
+ }
+
+ par = rs6000_make_savres_rtx (info, frame_reg_rtx,
+ info->gp_save_offset + sp_offset,
+ reg_mode,
+ /*savep=*/true, /*gpr=*/true,
+ /*exitp=*/false);
+ insn = emit_insn (par);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+ else if (!WORLD_SAVE_P (info) && using_store_multiple)
+ {
+ rtvec p;
+ int i;
+ p = rtvec_alloc (32 - info->first_gp_reg_save);
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+ else if (!WORLD_SAVE_P (info))
+ {
+ int i;
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ mem = gen_frame_mem (reg_mode, addr);
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+ }
+
+ /* ??? There's no need to emit actual instructions here, but it's the
+ easiest way to get the frame unwind information emitted. */
+ if (crtl->calls_eh_return)
+ {
+ unsigned int i, regno;
+
+ /* In AIX ABI we need to pretend we save r2 here. */
+ if (TARGET_AIX)
+ {
+ rtx addr, reg, mem;
+
+ reg = gen_rtx_REG (reg_mode, 2);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (sp_offset + 5 * reg_size));
+ mem = gen_frame_mem (reg_mode, addr);
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ PATTERN (insn) = gen_blockage ();
+ }
+
+ for (i = 0; ; ++i)
+ {
+ regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+
+ emit_frame_save (frame_reg_rtx, frame_ptr_rtx, reg_mode, regno,
+ info->ehrd_offset + sp_offset
+ + reg_size * (int) i,
+ info->total_size);
+ }
+ }
+
+ /* Save CR if we use any that must be preserved. */
+ if (!WORLD_SAVE_P (info) && info->cr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset + sp_offset));
+ rtx mem = gen_frame_mem (SImode, addr);
+ /* See the large comment above about why CR2_REGNO is used. */
+ rtx magic_eh_cr_reg = gen_rtx_REG (SImode, CR2_REGNO);
+
+ /* If r12 was used to hold the original sp, copy cr into r0 now
+ that it's free. */
+ if (REGNO (frame_reg_rtx) == 12)
+ {
+ rtx set;
+
+ cr_save_rtx = gen_rtx_REG (SImode, 0);
+ insn = emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ set = gen_rtx_SET (VOIDmode, cr_save_rtx, magic_eh_cr_reg);
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ set,
+ REG_NOTES (insn));
+
+ }
+ insn = emit_move_insn (mem, cr_save_rtx);
+
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+
+ /* Update stack and set back pointer unless this is V.4,
+ for which it was done previously. */
+ if (!WORLD_SAVE_P (info) && info->push_p
+ && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
+ {
+ if (info->total_size < 32767)
+ sp_offset = info->total_size;
+ else
+ frame_reg_rtx = frame_ptr_rtx;
+ rs6000_emit_allocate_stack (info->total_size,
+ (frame_reg_rtx != sp_reg_rtx
+ && ((info->altivec_size != 0)
+ || (info->vrsave_mask != 0)
+ )),
+ FALSE);
+ if (frame_reg_rtx != sp_reg_rtx)
+ rs6000_emit_stack_tie ();
+ }
+
+ /* Set frame pointer, if needed. */
+ if (frame_pointer_needed)
+ {
+ insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
+ sp_reg_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* Save AltiVec registers if needed. Save here because the red zone does
+ not include AltiVec registers. */
+ if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI && info->altivec_size != 0)
+ {
+ int i;
+
+ /* There should be a non inline version of this, for when we
+ are saving lots of vector registers. */
+ for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ rtx areg, savereg, mem;
+ int offset;
+
+ offset = info->altivec_save_offset + sp_offset
+ + 16 * (i - info->first_altivec_reg_save);
+
+ savereg = gen_rtx_REG (V4SImode, i);
+
+ areg = gen_rtx_REG (Pmode, 0);
+ emit_move_insn (areg, GEN_INT (offset));
+
+ /* AltiVec addressing mode is [reg+reg]. */
+ mem = gen_frame_mem (V4SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
+
+ insn = emit_move_insn (mem, savereg);
+
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ areg, GEN_INT (offset));
+ }
+ }
+
+ /* VRSAVE is a bit vector representing which AltiVec registers
+ are used. The OS uses this to determine which vector
+ registers to save on a context switch. We need to save
+ VRSAVE on the stack frame, add whatever AltiVec registers we
+ used in this function, and do the corresponding magic in the
+ epilogue. */
+
+ if (TARGET_ALTIVEC && TARGET_ALTIVEC_VRSAVE
+ && info->vrsave_mask != 0)
+ {
+ rtx reg, mem, vrsave;
+ int offset;
+
+ /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
+ as frame_reg_rtx and r11 as the static chain pointer for
+ nested functions. */
+ reg = gen_rtx_REG (SImode, 0);
+ vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
+ if (TARGET_MACHO)
+ emit_insn (gen_get_vrsave_internal (reg));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
+
+ if (!WORLD_SAVE_P (info))
+ {
+ /* Save VRSAVE. */
+ offset = info->vrsave_save_offset + sp_offset;
+ mem = gen_frame_mem (SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (offset)));
+ insn = emit_move_insn (mem, reg);
+ }
+
+ /* Include the registers in the mask. */
+ emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
+
+ insn = emit_insn (generate_set_vrsave (reg, info, 0));
+ }
+
+ /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
+ if ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
+ || (DEFAULT_ABI == ABI_V4
+ && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
+ && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM)))
+ {
+ /* If emit_load_toc_table will use the link register, we need to save
+ it. We use R12 for this purpose because emit_load_toc_table
+ can use register 0. This allows us to use a plain 'blr' to return
+ from the procedure more often. */
+ int save_LR_around_toc_setup = (TARGET_ELF
+ && DEFAULT_ABI != ABI_AIX
+ && flag_pic
+ && ! info->lr_save_p
+ && EDGE_COUNT (EXIT_BLOCK_PTR->preds) > 0);
+ if (save_LR_around_toc_setup)
+ {
+ rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
+
+ insn = emit_move_insn (frame_ptr_rtx, lr);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ rs6000_emit_load_toc_table (TRUE);
+
+ insn = emit_move_insn (lr, frame_ptr_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else
+ rs6000_emit_load_toc_table (TRUE);
+ }
+
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN
+ && flag_pic && crtl->uses_pic_offset_table)
+ {
+ rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
+ rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
+
+ /* Save and restore LR locally around this call (in R0). */
+ if (!info->lr_save_p)
+ emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
+
+ emit_insn (gen_load_macho_picbase (src));
+
+ emit_move_insn (gen_rtx_REG (Pmode,
+ RS6000_PIC_OFFSET_TABLE_REGNUM),
+ lr);
+
+ if (!info->lr_save_p)
+ emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
+ }
+#endif
+}
+
+/* Write function prologue. */
+
+static void
+rs6000_output_function_prologue (FILE *file,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (TARGET_DEBUG_STACK)
+ debug_stack_info (info);
+
+ /* Write .extern for any function we will call to save and restore
+ fp values. */
+ if (info->first_fp_reg_save < 64
+ && !FP_SAVE_INLINE (info->first_fp_reg_save))
+ fprintf (file, "\t.extern %s%d%s\n\t.extern %s%d%s\n",
+ SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX,
+ RESTORE_FP_PREFIX, info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
+
+ /* Write .extern for AIX common mode routines, if needed. */
+ if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
+ {
+ fputs ("\t.extern __mulh\n", file);
+ fputs ("\t.extern __mull\n", file);
+ fputs ("\t.extern __divss\n", file);
+ fputs ("\t.extern __divus\n", file);
+ fputs ("\t.extern __quoss\n", file);
+ fputs ("\t.extern __quous\n", file);
+ common_mode_defined = 1;
+ }
+
+ if (! HAVE_prologue)
+ {
+ rtx prologue;
+
+ start_sequence ();
+
+ /* A NOTE_INSN_DELETED is supposed to be at the start and end of
+ the "toplevel" insn chain. */
+ emit_note (NOTE_INSN_DELETED);
+ rs6000_emit_prologue ();
+ emit_note (NOTE_INSN_DELETED);
+
+ /* Expand INSN_ADDRESSES so final() doesn't crash. */
+ {
+ rtx insn;
+ unsigned addr = 0;
+ for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
+ {
+ INSN_ADDRESSES_NEW (insn, addr);
+ addr += 4;
+ }
+ }
+
+ prologue = get_insns ();
+ end_sequence ();
+
+ if (TARGET_DEBUG_STACK)
+ debug_rtx_list (prologue, 100);
+
+ emit_insn_before_noloc (prologue, BB_HEAD (ENTRY_BLOCK_PTR->next_bb),
+ ENTRY_BLOCK_PTR);
+ }
+
+ rs6000_pic_labelno++;
+}
+
+/* Non-zero if vmx regs are restored before the frame pop, zero if
+ we restore after the pop when possible. */
+#define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
+
+/* Reload CR from REG. */
+
+static void
+rs6000_restore_saved_cr (rtx reg, int using_mfcr_multiple)
+{
+ int count = 0;
+ int i;
+
+ if (using_mfcr_multiple)
+ {
+ for (i = 0; i < 8; i++)
+ if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
+ count++;
+ gcc_assert (count);
+ }
+
+ if (using_mfcr_multiple && count > 1)
+ {
+ rtvec p;
+ int ndx;
+
+ p = rtvec_alloc (count);
+
+ ndx = 0;
+ for (i = 0; i < 8; i++)
+ if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
+ {
+ rtvec r = rtvec_alloc (2);
+ RTVEC_ELT (r, 0) = reg;
+ RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
+ RTVEC_ELT (p, ndx) =
+ gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
+ gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
+ ndx++;
+ }
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ gcc_assert (ndx == count);
+ }
+ else
+ for (i = 0; i < 8; i++)
+ if (df_regs_ever_live_p (CR0_REGNO+i) && ! call_used_regs[CR0_REGNO+i])
+ {
+ emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
+ CR0_REGNO+i),
+ reg));
+ }
+}
+
+/* Emit function epilogue as insns.
+
+ At present, dwarf2out_frame_debug_expr doesn't understand
+ register restores, so we don't bother setting RTX_FRAME_RELATED_P
+ anywhere in the epilogue. Most of the insns below would in any case
+ need special notes to explain where r11 is in relation to the stack. */
+
+void
+rs6000_emit_epilogue (int sibcall)
+{
+ rs6000_stack_t *info;
+ int restoring_GPRs_inline;
+ int restoring_FPRs_inline;
+ int using_load_multiple;
+ int using_mtcr_multiple;
+ int use_backchain_to_restore_sp;
+ int restore_lr;
+ int strategy;
+ int sp_offset = 0;
+ rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
+ rtx frame_reg_rtx = sp_reg_rtx;
+ enum machine_mode reg_mode = Pmode;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ int i;
+
+ info = rs6000_stack_info ();
+
+ if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
+ {
+ reg_mode = V2SImode;
+ reg_size = 8;
+ }
+
+ strategy = rs6000_savres_strategy (info, /*savep=*/false,
+ /*static_chain_p=*/0, sibcall);
+ using_load_multiple = strategy & SAVRES_MULTIPLE;
+ restoring_FPRs_inline = strategy & SAVRES_INLINE_FPRS;
+ restoring_GPRs_inline = strategy & SAVRES_INLINE_GPRS;
+ using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
+ || rs6000_cpu == PROCESSOR_PPC603
+ || rs6000_cpu == PROCESSOR_PPC750
+ || optimize_size);
+ /* Restore via the backchain when we have a large frame, since this
+ is more efficient than an addis, addi pair. The second condition
+ here will not trigger at the moment; We don't actually need a
+ frame pointer for alloca, but the generic parts of the compiler
+ give us one anyway. */
+ use_backchain_to_restore_sp = (info->total_size > 32767
+ || info->total_size
+ + (info->lr_save_p ? info->lr_save_offset : 0)
+ > 32767
+ || (cfun->calls_alloca
+ && !frame_pointer_needed));
+ restore_lr = (info->lr_save_p
+ && restoring_GPRs_inline
+ && restoring_FPRs_inline);
+
+ if (WORLD_SAVE_P (info))
+ {
+ int i, j;
+ char rname[30];
+ const char *alloc_rname;
+ rtvec p;
+
+ /* eh_rest_world_r10 will return to the location saved in the LR
+ stack slot (which is not likely to be our caller.)
+ Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
+ rest_world is similar, except any R10 parameter is ignored.
+ The exception-handling stuff that was here in 2.95 is no
+ longer necessary. */
+
+ p = rtvec_alloc (9
+ + 1
+ + 32 - info->first_gp_reg_save
+ + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
+ + 63 + 1 - info->first_fp_reg_save);
+
+ strcpy (rname, ((crtl->calls_eh_return) ?
+ "*eh_rest_world_r10" : "*rest_world"));
+ alloc_rname = ggc_strdup (rname);
+
+ j = 0;
+ RTVEC_ELT (p, j++) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LR_REGNO));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
+ /* The instruction pattern requires a clobber here;
+ it is shared with the restVEC helper. */
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
+
+ {
+ /* CR register traditionally saved as CR2. */
+ rtx reg = gen_rtx_REG (reg_mode, CR2_REGNO);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
+ {
+ rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->altivec_save_offset
+ + 16 * i));
+ rtx mem = gen_frame_mem (V4SImode, addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ for (i = 0; info->first_fp_reg_save + i <= 63; i++)
+ {
+ rtx reg = gen_rtx_REG (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode),
+ info->first_fp_reg_save + i);
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + 8 * i));
+ rtx mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode), addr);
+
+ RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, reg, mem);
+ }
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
+ RTVEC_ELT (p, j++)
+ = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
+ emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
+
+ return;
+ }
+
+ /* frame_reg_rtx + sp_offset points to the top of this stack frame. */
+ if (info->push_p)
+ sp_offset = info->total_size;
+
+ /* Restore AltiVec registers if we must do so before adjusting the
+ stack. */
+ if (TARGET_ALTIVEC_ABI
+ && info->altivec_size != 0
+ && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
+ || (DEFAULT_ABI != ABI_V4
+ && info->altivec_save_offset < (TARGET_32BIT ? -220 : -288))))
+ {
+ int i;
+
+ if (use_backchain_to_restore_sp)
+ {
+ frame_reg_rtx = gen_rtx_REG (Pmode, 11);
+ emit_move_insn (frame_reg_rtx,
+ gen_rtx_MEM (Pmode, sp_reg_rtx));
+ sp_offset = 0;
+ }
+ else if (frame_pointer_needed)
+ frame_reg_rtx = hard_frame_pointer_rtx;
+
+ for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ rtx addr, areg, mem;
+
+ areg = gen_rtx_REG (Pmode, 0);
+ emit_move_insn
+ (areg, GEN_INT (info->altivec_save_offset
+ + sp_offset
+ + 16 * (i - info->first_altivec_reg_save)));
+
+ /* AltiVec addressing mode is [reg+reg]. */
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
+ mem = gen_frame_mem (V4SImode, addr);
+
+ emit_move_insn (gen_rtx_REG (V4SImode, i), mem);
+ }
+ }
+
+ /* Restore VRSAVE if we must do so before adjusting the stack. */
+ if (TARGET_ALTIVEC
+ && TARGET_ALTIVEC_VRSAVE
+ && info->vrsave_mask != 0
+ && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
+ || (DEFAULT_ABI != ABI_V4
+ && info->vrsave_save_offset < (TARGET_32BIT ? -220 : -288))))
+ {
+ rtx addr, mem, reg;
+
+ if (frame_reg_rtx == sp_reg_rtx)
+ {
+ if (use_backchain_to_restore_sp)
+ {
+ frame_reg_rtx = gen_rtx_REG (Pmode, 11);
+ emit_move_insn (frame_reg_rtx,
+ gen_rtx_MEM (Pmode, sp_reg_rtx));
+ sp_offset = 0;
+ }
+ else if (frame_pointer_needed)
+ frame_reg_rtx = hard_frame_pointer_rtx;
+ }
+
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->vrsave_save_offset + sp_offset));
+ mem = gen_frame_mem (SImode, addr);
+ reg = gen_rtx_REG (SImode, 12);
+ emit_move_insn (reg, mem);
+
+ emit_insn (generate_set_vrsave (reg, info, 1));
+ }
+
+ /* If we have a large stack frame, restore the old stack pointer
+ using the backchain. */
+ if (use_backchain_to_restore_sp)
+ {
+ if (frame_reg_rtx == sp_reg_rtx)
+ {
+ /* Under V.4, don't reset the stack pointer until after we're done
+ loading the saved registers. */
+ if (DEFAULT_ABI == ABI_V4)
+ frame_reg_rtx = gen_rtx_REG (Pmode, 11);
+
+ emit_move_insn (frame_reg_rtx,
+ gen_rtx_MEM (Pmode, sp_reg_rtx));
+ sp_offset = 0;
+ }
+ else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
+ && DEFAULT_ABI == ABI_V4)
+ /* frame_reg_rtx has been set up by the altivec restore. */
+ ;
+ else
+ {
+ emit_move_insn (sp_reg_rtx, frame_reg_rtx);
+ frame_reg_rtx = sp_reg_rtx;
+ }
+ }
+ /* If we have a frame pointer, we can restore the old stack pointer
+ from it. */
+ else if (frame_pointer_needed)
+ {
+ frame_reg_rtx = sp_reg_rtx;
+ if (DEFAULT_ABI == ABI_V4)
+ frame_reg_rtx = gen_rtx_REG (Pmode, 11);
+
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (frame_reg_rtx, hard_frame_pointer_rtx,
+ GEN_INT (info->total_size))
+ : gen_adddi3 (frame_reg_rtx, hard_frame_pointer_rtx,
+ GEN_INT (info->total_size)));
+ sp_offset = 0;
+ }
+ else if (info->push_p
+ && DEFAULT_ABI != ABI_V4
+ && !crtl->calls_eh_return)
+ {
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (info->total_size))
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (info->total_size)));
+ sp_offset = 0;
+ }
+
+ /* Restore AltiVec registers if we have not done so already. */
+ if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
+ && TARGET_ALTIVEC_ABI
+ && info->altivec_size != 0
+ && (DEFAULT_ABI == ABI_V4
+ || info->altivec_save_offset >= (TARGET_32BIT ? -220 : -288)))
+ {
+ int i;
+
+ for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ rtx addr, areg, mem;
+
+ areg = gen_rtx_REG (Pmode, 0);
+ emit_move_insn
+ (areg, GEN_INT (info->altivec_save_offset
+ + sp_offset
+ + 16 * (i - info->first_altivec_reg_save)));
+
+ /* AltiVec addressing mode is [reg+reg]. */
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
+ mem = gen_frame_mem (V4SImode, addr);
+
+ emit_move_insn (gen_rtx_REG (V4SImode, i), mem);
+ }
+ }
+
+ /* Restore VRSAVE if we have not done so already. */
+ if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
+ && TARGET_ALTIVEC
+ && TARGET_ALTIVEC_VRSAVE
+ && info->vrsave_mask != 0
+ && (DEFAULT_ABI == ABI_V4
+ || info->vrsave_save_offset >= (TARGET_32BIT ? -220 : -288)))
+ {
+ rtx addr, mem, reg;
+
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->vrsave_save_offset + sp_offset));
+ mem = gen_frame_mem (SImode, addr);
+ reg = gen_rtx_REG (SImode, 12);
+ emit_move_insn (reg, mem);
+
+ emit_insn (generate_set_vrsave (reg, info, 1));
+ }
+
+ /* Get the old lr if we saved it. If we are restoring registers
+ out-of-line, then the out-of-line routines can do this for us. */
+ if (restore_lr)
+ {
+ rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx,
+ info->lr_save_offset + sp_offset);
+
+ emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
+ }
+
+ /* Get the old cr if we saved it. */
+ if (info->cr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset + sp_offset));
+ rtx mem = gen_frame_mem (SImode, addr);
+
+ emit_move_insn (gen_rtx_REG (SImode, 12), mem);
+ }
+
+ /* Set LR here to try to overlap restores below. */
+ if (restore_lr)
+ emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO),
+ gen_rtx_REG (Pmode, 0));
+
+ /* Load exception handler data registers, if needed. */
+ if (crtl->calls_eh_return)
+ {
+ unsigned int i, regno;
+
+ if (TARGET_AIX)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (sp_offset + 5 * reg_size));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ emit_move_insn (gen_rtx_REG (reg_mode, 2), mem);
+ }
+
+ for (i = 0; ; ++i)
+ {
+ rtx mem;
+
+ regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+
+ mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
+ info->ehrd_offset + sp_offset
+ + reg_size * (int) i);
+
+ emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
+ }
+ }
+
+ /* Restore GPRs. This is done as a PARALLEL if we are using
+ the load-multiple instructions. */
+ if (TARGET_SPE_ABI
+ && info->spe_64bit_regs_used != 0
+ && info->first_gp_reg_save != 32)
+ {
+ /* Determine whether we can address all of the registers that need
+ to be saved with an offset from the stack pointer that fits in
+ the small const field for SPE memory instructions. */
+ int spe_regs_addressable_via_sp
+ = (SPE_CONST_OFFSET_OK(info->spe_gp_save_offset + sp_offset
+ + (32 - info->first_gp_reg_save - 1) * reg_size)
+ && restoring_GPRs_inline);
+ int spe_offset;
+
+ if (spe_regs_addressable_via_sp)
+ spe_offset = info->spe_gp_save_offset + sp_offset;
+ else
+ {
+ rtx old_frame_reg_rtx = frame_reg_rtx;
+ /* Make r11 point to the start of the SPE save area. We worried about
+ not clobbering it when we were saving registers in the prologue.
+ There's no need to worry here because the static chain is passed
+ anew to every function. */
+ int ool_adjust = (restoring_GPRs_inline
+ ? 0
+ : (info->first_gp_reg_save
+ - (FIRST_SAVRES_REGISTER+1))*8);
+
+ if (frame_reg_rtx == sp_reg_rtx)
+ frame_reg_rtx = gen_rtx_REG (Pmode, 11);
+ emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
+ GEN_INT (info->spe_gp_save_offset
+ + sp_offset
+ - ool_adjust)));
+ /* Keep the invariant that frame_reg_rtx + sp_offset points
+ at the top of the stack frame. */
+ sp_offset = -info->spe_gp_save_offset;
+
+ spe_offset = 0;
+ }
+
+ if (restoring_GPRs_inline)
+ {
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
+ {
+ rtx offset, addr, mem;
+
+ /* We're doing all this to ensure that the immediate offset
+ fits into the immediate field of 'evldd'. */
+ gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
+
+ offset = GEN_INT (spe_offset + reg_size * i);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
+ mem = gen_rtx_MEM (V2SImode, addr);
+
+ emit_move_insn (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
+ mem);
+ }
+ }
+ else
+ {
+ rtx par;
+
+ par = rs6000_make_savres_rtx (info, gen_rtx_REG (Pmode, 11),
+ 0, reg_mode,
+ /*savep=*/false, /*gpr=*/true,
+ /*exitp=*/true);
+ emit_jump_insn (par);
+
+ /* We don't want anybody else emitting things after we jumped
+ back. */
+ return;
+ }
+ }
+ else if (!restoring_GPRs_inline)
+ {
+ /* We are jumping to an out-of-line function. */
+ bool can_use_exit = info->first_fp_reg_save == 64;
+ rtx par;
+
+ /* Emit stack reset code if we need it. */
+ if (can_use_exit)
+ rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
+ sp_offset, can_use_exit);
+ else
+ emit_insn (gen_addsi3 (gen_rtx_REG (Pmode, 11),
+ sp_reg_rtx,
+ GEN_INT (sp_offset - info->fp_size)));
+
+ par = rs6000_make_savres_rtx (info, frame_reg_rtx,
+ info->gp_save_offset, reg_mode,
+ /*savep=*/false, /*gpr=*/true,
+ /*exitp=*/can_use_exit);
+
+ if (can_use_exit)
+ {
+ if (info->cr_save_p)
+ rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12),
+ using_mtcr_multiple);
+
+ emit_jump_insn (par);
+
+ /* We don't want anybody else emitting things after we jumped
+ back. */
+ return;
+ }
+ else
+ emit_insn (par);
+ }
+ else if (using_load_multiple)
+ {
+ rtvec p;
+ p = rtvec_alloc (32 - info->first_gp_reg_save);
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ RTVEC_ELT (p, i) =
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
+ mem);
+ }
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ }
+ else
+ {
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ rtx mem = gen_frame_mem (reg_mode, addr);
+
+ emit_move_insn (gen_rtx_REG (reg_mode,
+ info->first_gp_reg_save + i), mem);
+ }
+ }
+
+ /* Restore fpr's if we need to do it without calling a function. */
+ if (restoring_FPRs_inline)
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ if ((df_regs_ever_live_p (info->first_fp_reg_save+i)
+ && ! call_used_regs[info->first_fp_reg_save+i]))
+ {
+ rtx addr, mem;
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset
+ + 8 * i));
+ mem = gen_frame_mem (((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode), addr);
+
+ emit_move_insn (gen_rtx_REG (((TARGET_HARD_FLOAT
+ && TARGET_DOUBLE_FLOAT)
+ ? DFmode : SFmode),
+ info->first_fp_reg_save + i),
+ mem);
+ }
+
+ /* If we saved cr, restore it here. Just those that were used. */
+ if (info->cr_save_p)
+ rs6000_restore_saved_cr (gen_rtx_REG (SImode, 12), using_mtcr_multiple);
+
+ /* If this is V.4, unwind the stack pointer after all of the loads
+ have been done. */
+ rs6000_emit_stack_reset (info, sp_reg_rtx, frame_reg_rtx,
+ sp_offset, !restoring_FPRs_inline);
+
+ if (crtl->calls_eh_return)
+ {
+ rtx sa = EH_RETURN_STACKADJ_RTX;
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx, sa)
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx, sa));
+ }
+
+ if (!sibcall)
+ {
+ rtvec p;
+ if (! restoring_FPRs_inline)
+ p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
+ else
+ p = rtvec_alloc (2);
+
+ RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, 1) = (restoring_FPRs_inline
+ ? gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 65))
+ : gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 65)));
+
+ /* If we have to restore more than two FP registers, branch to the
+ restore function. It will return to our caller. */
+ if (! restoring_FPRs_inline)
+ {
+ int i;
+ rtx sym;
+
+ sym = rs6000_savres_routine_sym (info,
+ /*savep=*/false,
+ /*gpr=*/false,
+ /*exitp=*/true);
+ RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
+ RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode, 11));
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx addr, mem;
+ addr = gen_rtx_PLUS (Pmode, sp_reg_rtx,
+ GEN_INT (info->fp_save_offset + 8*i));
+ mem = gen_frame_mem (DFmode, addr);
+
+ RTVEC_ELT (p, i+4) =
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (DFmode, info->first_fp_reg_save + i),
+ mem);
+ }
+ }
+
+ emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ }
+}
+
+/* Write function epilogue. */
+
+static void
+rs6000_output_function_epilogue (FILE *file,
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED)
+{
+ if (! HAVE_epilogue)
+ {
+ rtx insn = get_last_insn ();
+ /* If the last insn was a BARRIER, we don't have to write anything except
+ the trace table. */
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn == 0 || GET_CODE (insn) != BARRIER)
+ {
+ /* This is slightly ugly, but at least we don't have two
+ copies of the epilogue-emitting code. */
+ start_sequence ();
+
+ /* A NOTE_INSN_DELETED is supposed to be at the start
+ and end of the "toplevel" insn chain. */
+ emit_note (NOTE_INSN_DELETED);
+ rs6000_emit_epilogue (FALSE);
+ emit_note (NOTE_INSN_DELETED);
+
+ /* Expand INSN_ADDRESSES so final() doesn't crash. */
+ {
+ rtx insn;
+ unsigned addr = 0;
+ for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
+ {
+ INSN_ADDRESSES_NEW (insn, addr);
+ addr += 4;
+ }
+ }
+
+ if (TARGET_DEBUG_STACK)
+ debug_rtx_list (get_insns (), 100);
+ final (get_insns (), file, FALSE);
+ end_sequence ();
+ }
+ }
+
+#if TARGET_MACHO
+ macho_branch_islands ();
+ /* Mach-O doesn't support labels at the end of objects, so if
+ it looks like we might want one, insert a NOP. */
+ {
+ rtx insn = get_last_insn ();
+ while (insn
+ && NOTE_P (insn)
+ && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
+ insn = PREV_INSN (insn);
+ if (insn
+ && (LABEL_P (insn)
+ || (NOTE_P (insn)
+ && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
+ fputs ("\tnop\n", file);
+ }
+#endif
+
+ /* Output a traceback table here. See /usr/include/sys/debug.h for info
+ on its format.
+
+ We don't output a traceback table if -finhibit-size-directive was
+ used. The documentation for -finhibit-size-directive reads
+ ``don't output a @code{.size} assembler directive, or anything
+ else that would cause trouble if the function is split in the
+ middle, and the two halves are placed at locations far apart in
+ memory.'' The traceback table has this property, since it
+ includes the offset from the start of the function to the
+ traceback table itself.
+
+ System V.4 Powerpc's (and the embedded ABI derived from it) use a
+ different traceback table. */
+ if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive
+ && rs6000_traceback != traceback_none && !cfun->is_thunk)
+ {
+ const char *fname = NULL;
+ const char *language_string = lang_hooks.name;
+ int fixed_parms = 0, float_parms = 0, parm_info = 0;
+ int i;
+ int optional_tbtab;
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (rs6000_traceback == traceback_full)
+ optional_tbtab = 1;
+ else if (rs6000_traceback == traceback_part)
+ optional_tbtab = 0;
+ else
+ optional_tbtab = !optimize_size && !TARGET_ELF;
+
+ if (optional_tbtab)
+ {
+ fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ while (*fname == '.') /* V.4 encodes . in the name */
+ fname++;
+
+ /* Need label immediately before tbtab, so we can compute
+ its offset from the function start. */
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
+ ASM_OUTPUT_LABEL (file, fname);
+ }
+
+ /* The .tbtab pseudo-op can only be used for the first eight
+ expressions, since it can't handle the possibly variable
+ length fields that follow. However, if you omit the optional
+ fields, the assembler outputs zeros for all optional fields
+ anyways, giving each variable length field is minimum length
+ (as defined in sys/debug.h). Thus we can not use the .tbtab
+ pseudo-op at all. */
+
+ /* An all-zero word flags the start of the tbtab, for debuggers
+ that have to find it by searching forward from the entry
+ point or from the current pc. */
+ fputs ("\t.long 0\n", file);
+
+ /* Tbtab format type. Use format type 0. */
+ fputs ("\t.byte 0,", file);
+
+ /* Language type. Unfortunately, there does not seem to be any
+ official way to discover the language being compiled, so we
+ use language_string.
+ C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
+ Java is 13. Objective-C is 14. Objective-C++ isn't assigned
+ a number, so for now use 9. */
+ if (! strcmp (language_string, "GNU C"))
+ i = 0;
+ else if (! strcmp (language_string, "GNU F77")
+ || ! strcmp (language_string, "GNU Fortran"))
+ i = 1;
+ else if (! strcmp (language_string, "GNU Pascal"))
+ i = 2;
+ else if (! strcmp (language_string, "GNU Ada"))
+ i = 3;
+ else if (! strcmp (language_string, "GNU C++")
+ || ! strcmp (language_string, "GNU Objective-C++"))
+ i = 9;
+ else if (! strcmp (language_string, "GNU Java"))
+ i = 13;
+ else if (! strcmp (language_string, "GNU Objective-C"))
+ i = 14;
+ else
+ gcc_unreachable ();
+ fprintf (file, "%d,", i);
+
+ /* 8 single bit fields: global linkage (not set for C extern linkage,
+ apparently a PL/I convention?), out-of-line epilogue/prologue, offset
+ from start of procedure stored in tbtab, internal function, function
+ has controlled storage, function has no toc, function uses fp,
+ function logs/aborts fp operations. */
+ /* Assume that fp operations are used if any fp reg must be saved. */
+ fprintf (file, "%d,",
+ (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
+
+ /* 6 bitfields: function is interrupt handler, name present in
+ proc table, function calls alloca, on condition directives
+ (controls stack walks, 3 bits), saves condition reg, saves
+ link reg. */
+ /* The `function calls alloca' bit seems to be set whenever reg 31 is
+ set up as a frame pointer, even when there is no alloca call. */
+ fprintf (file, "%d,",
+ ((optional_tbtab << 6)
+ | ((optional_tbtab & frame_pointer_needed) << 5)
+ | (info->cr_save_p << 1)
+ | (info->lr_save_p)));
+
+ /* 3 bitfields: saves backchain, fixup code, number of fpr saved
+ (6 bits). */
+ fprintf (file, "%d,",
+ (info->push_p << 7) | (64 - info->first_fp_reg_save));
+
+ /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
+ fprintf (file, "%d,", (32 - first_reg_to_save ()));
+
+ if (optional_tbtab)
+ {
+ /* Compute the parameter info from the function decl argument
+ list. */
+ tree decl;
+ int next_parm_info_bit = 31;
+
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ {
+ rtx parameter = DECL_INCOMING_RTL (decl);
+ enum machine_mode mode = GET_MODE (parameter);
+
+ if (GET_CODE (parameter) == REG)
+ {
+ if (SCALAR_FLOAT_MODE_P (mode))
+ {
+ int bits;
+
+ float_parms++;
+
+ switch (mode)
+ {
+ case SFmode:
+ case SDmode:
+ bits = 0x2;
+ break;
+
+ case DFmode:
+ case DDmode:
+ case TFmode:
+ case TDmode:
+ bits = 0x3;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* If only one bit will fit, don't or in this entry. */
+ if (next_parm_info_bit > 0)
+ parm_info |= (bits << (next_parm_info_bit - 1));
+ next_parm_info_bit -= 2;
+ }
+ else
+ {
+ fixed_parms += ((GET_MODE_SIZE (mode)
+ + (UNITS_PER_WORD - 1))
+ / UNITS_PER_WORD);
+ next_parm_info_bit -= 1;
+ }
+ }
+ }
+ }
+
+ /* Number of fixed point parameters. */
+ /* This is actually the number of words of fixed point parameters; thus
+ an 8 byte struct counts as 2; and thus the maximum value is 8. */
+ fprintf (file, "%d,", fixed_parms);
+
+ /* 2 bitfields: number of floating point parameters (7 bits), parameters
+ all on stack. */
+ /* This is actually the number of fp registers that hold parameters;
+ and thus the maximum value is 13. */
+ /* Set parameters on stack bit if parameters are not in their original
+ registers, regardless of whether they are on the stack? Xlc
+ seems to set the bit when not optimizing. */
+ fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
+
+ if (! optional_tbtab)
+ return;
+
+ /* Optional fields follow. Some are variable length. */
+
+ /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
+ 11 double float. */
+ /* There is an entry for each parameter in a register, in the order that
+ they occur in the parameter list. Any intervening arguments on the
+ stack are ignored. If the list overflows a long (max possible length
+ 34 bits) then completely leave off all elements that don't fit. */
+ /* Only emit this long if there was at least one parameter. */
+ if (fixed_parms || float_parms)
+ fprintf (file, "\t.long %d\n", parm_info);
+
+ /* Offset from start of code to tb table. */
+ fputs ("\t.long ", file);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
+ if (TARGET_AIX)
+ RS6000_OUTPUT_BASENAME (file, fname);
+ else
+ assemble_name (file, fname);
+ putc ('-', file);
+ rs6000_output_function_entry (file, fname);
+ putc ('\n', file);
+
+ /* Interrupt handler mask. */
+ /* Omit this long, since we never set the interrupt handler bit
+ above. */
+
+ /* Number of CTL (controlled storage) anchors. */
+ /* Omit this long, since the has_ctl bit is never set above. */
+
+ /* Displacement into stack of each CTL anchor. */
+ /* Omit this list of longs, because there are no CTL anchors. */
+
+ /* Length of function name. */
+ if (*fname == '*')
+ ++fname;
+ fprintf (file, "\t.short %d\n", (int) strlen (fname));
+
+ /* Function name. */
+ assemble_string (fname, strlen (fname));
+
+ /* Register for alloca automatic storage; this is always reg 31.
+ Only emit this if the alloca bit was set above. */
+ if (frame_pointer_needed)
+ fputs ("\t.byte 31\n", file);
+
+ fputs ("\t.align 2\n", file);
+ }
+}
+
+/* A C compound statement that outputs the assembler code for a thunk
+ function, used to implement C++ virtual function calls with
+ multiple inheritance. The thunk acts as a wrapper around a virtual
+ function, adjusting the implicit object parameter before handing
+ control off to the real function.
+
+ First, emit code to add the integer DELTA to the location that
+ contains the incoming first argument. Assume that this argument
+ contains a pointer, and is the one used to pass the `this' pointer
+ in C++. This is the incoming argument *before* the function
+ prologue, e.g. `%o0' on a sparc. The addition must preserve the
+ values of all other incoming arguments.
+
+ After the addition, emit code to jump to FUNCTION, which is a
+ `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
+ not touch the return address. Hence returning from FUNCTION will
+ return to whoever called the current `thunk'.
+
+ The effect must be as if FUNCTION had been called directly with the
+ adjusted first argument. This macro is responsible for emitting
+ all of the code for a thunk function; output_function_prologue()
+ and output_function_epilogue() are not invoked.
+
+ The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
+ been extracted from it.) It might possibly be useful on some
+ targets, but probably not.
+
+ If you do not define this macro, the target-independent code in the
+ C++ frontend will generate a less efficient heavyweight thunk that
+ calls FUNCTION instead of jumping to it. The generic approach does
+ not support varargs. */
+
+static void
+rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
+ tree function)
+{
+ rtx this_rtx, insn, funexp;
+
+ reload_completed = 1;
+ epilogue_completed = 1;
+
+ /* Mark the end of the (empty) prologue. */
+ emit_note (NOTE_INSN_PROLOGUE_END);
+
+ /* Find the "this" pointer. If the function returns a structure,
+ the structure return pointer is in r3. */
+ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
+ this_rtx = gen_rtx_REG (Pmode, 4);
+ else
+ this_rtx = gen_rtx_REG (Pmode, 3);
+
+ /* Apply the constant offset, if required. */
+ if (delta)
+ {
+ rtx delta_rtx = GEN_INT (delta);
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (this_rtx, this_rtx, delta_rtx)
+ : gen_adddi3 (this_rtx, this_rtx, delta_rtx));
+ }
+
+ /* Apply the offset from the vtable, if required. */
+ if (vcall_offset)
+ {
+ rtx vcall_offset_rtx = GEN_INT (vcall_offset);
+ rtx tmp = gen_rtx_REG (Pmode, 12);
+
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
+ if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
+ {
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (tmp, tmp, vcall_offset_rtx)
+ : gen_adddi3 (tmp, tmp, vcall_offset_rtx));
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
+ }
+ else
+ {
+ rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
+
+ emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
+ }
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (this_rtx, this_rtx, tmp)
+ : gen_adddi3 (this_rtx, this_rtx, tmp));
+ }
+
+ /* Generate a tail call to the target function. */
+ if (!TREE_USED (function))
+ {
+ assemble_external (function);
+ TREE_USED (function) = 1;
+ }
+ funexp = XEXP (DECL_RTL (function), 0);
+ funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
+
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ funexp = machopic_indirect_call_target (funexp);
+#endif
+
+ /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
+ generate sibcall RTL explicitly. */
+ insn = emit_call_insn (
+ gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (4,
+ gen_rtx_CALL (VOIDmode,
+ funexp, const0_rtx),
+ gen_rtx_USE (VOIDmode, const0_rtx),
+ gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (SImode,
+ LR_REGNO)),
+ gen_rtx_RETURN (VOIDmode))));
+ SIBLING_CALL_P (insn) = 1;
+ emit_barrier ();
+
+ /* Run just enough of rest_of_compilation to get the insns emitted.
+ There's not really enough bulk here to make other passes such as
+ instruction scheduling worth while. Note that use_thunk calls
+ assemble_start_function and assemble_end_function. */
+ insn = get_insns ();
+ insn_locators_alloc ();
+ shorten_branches (insn);
+ final_start_function (insn, file, 1);
+ final (insn, file, 1);
+ final_end_function ();
+ free_after_compilation (cfun);
+
+ reload_completed = 0;
+ epilogue_completed = 0;
+}
+
+/* A quick summary of the various types of 'constant-pool tables'
+ under PowerPC:
+
+ Target Flags Name One table per
+ AIX (none) AIX TOC object file
+ AIX -mfull-toc AIX TOC object file
+ AIX -mminimal-toc AIX minimal TOC translation unit
+ SVR4/EABI (none) SVR4 SDATA object file
+ SVR4/EABI -fpic SVR4 pic object file
+ SVR4/EABI -fPIC SVR4 PIC translation unit
+ SVR4/EABI -mrelocatable EABI TOC function
+ SVR4/EABI -maix AIX TOC object file
+ SVR4/EABI -maix -mminimal-toc
+ AIX minimal TOC translation unit
+
+ Name Reg. Set by entries contains:
+ made by addrs? fp? sum?
+
+ AIX TOC 2 crt0 as Y option option
+ AIX minimal TOC 30 prolog gcc Y Y option
+ SVR4 SDATA 13 crt0 gcc N Y N
+ SVR4 pic 30 prolog ld Y not yet N
+ SVR4 PIC 30 prolog gcc Y option option
+ EABI TOC 30 prolog gcc Y option option
+
+*/
+
+/* Hash functions for the hash table. */
+
+static unsigned
+rs6000_hash_constant (rtx k)
+{
+ enum rtx_code code = GET_CODE (k);
+ enum machine_mode mode = GET_MODE (k);
+ unsigned result = (code << 3) ^ mode;
+ const char *format;
+ int flen, fidx;
+
+ format = GET_RTX_FORMAT (code);
+ flen = strlen (format);
+ fidx = 0;
+
+ switch (code)
+ {
+ case LABEL_REF:
+ return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
+
+ case CONST_DOUBLE:
+ if (mode != VOIDmode)
+ return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
+ flen = 2;
+ break;
+
+ case CODE_LABEL:
+ fidx = 3;
+ break;
+
+ default:
+ break;
+ }
+
+ for (; fidx < flen; fidx++)
+ switch (format[fidx])
+ {
+ case 's':
+ {
+ unsigned i, len;
+ const char *str = XSTR (k, fidx);
+ len = strlen (str);
+ result = result * 613 + len;
+ for (i = 0; i < len; i++)
+ result = result * 613 + (unsigned) str[i];
+ break;
+ }
+ case 'u':
+ case 'e':
+ result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
+ break;
+ case 'i':
+ case 'n':
+ result = result * 613 + (unsigned) XINT (k, fidx);
+ break;
+ case 'w':
+ if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
+ result = result * 613 + (unsigned) XWINT (k, fidx);
+ else
+ {
+ size_t i;
+ for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
+ result = result * 613 + (unsigned) (XWINT (k, fidx)
+ >> CHAR_BIT * i);
+ }
+ break;
+ case '0':
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ return result;
+}
+
+static unsigned
+toc_hash_function (const void *hash_entry)
+{
+ const struct toc_hash_struct *thc =
+ (const struct toc_hash_struct *) hash_entry;
+ return rs6000_hash_constant (thc->key) ^ thc->key_mode;
+}
+
+/* Compare H1 and H2 for equivalence. */
+
+static int
+toc_hash_eq (const void *h1, const void *h2)
+{
+ rtx r1 = ((const struct toc_hash_struct *) h1)->key;
+ rtx r2 = ((const struct toc_hash_struct *) h2)->key;
+
+ if (((const struct toc_hash_struct *) h1)->key_mode
+ != ((const struct toc_hash_struct *) h2)->key_mode)
+ return 0;
+
+ return rtx_equal_p (r1, r2);
+}
+
+/* These are the names given by the C++ front-end to vtables, and
+ vtable-like objects. Ideally, this logic should not be here;
+ instead, there should be some programmatic way of inquiring as
+ to whether or not an object is a vtable. */
+
+#define VTABLE_NAME_P(NAME) \
+ (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
+ || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
+ || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
+ || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
+ || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
+
+#ifdef NO_DOLLAR_IN_LABEL
+/* Return a GGC-allocated character string translating dollar signs in
+ input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
+
+const char *
+rs6000_xcoff_strip_dollar (const char *name)
+{
+ char *strip, *p;
+ int len;
+
+ p = strchr (name, '$');
+
+ if (p == 0 || p == name)
+ return name;
+
+ len = strlen (name);
+ strip = (char *) alloca (len + 1);
+ strcpy (strip, name);
+ p = strchr (strip, '$');
+ while (p)
+ {
+ *p = '_';
+ p = strchr (p + 1, '$');
+ }
+
+ return ggc_alloc_string (strip, len);
+}
+#endif
+
+void
+rs6000_output_symbol_ref (FILE *file, rtx x)
+{
+ /* Currently C++ toc references to vtables can be emitted before it
+ is decided whether the vtable is public or private. If this is
+ the case, then the linker will eventually complain that there is
+ a reference to an unknown section. Thus, for vtables only,
+ we emit the TOC reference to reference the symbol and not the
+ section. */
+ const char *name = XSTR (x, 0);
+
+ if (VTABLE_NAME_P (name))
+ {
+ RS6000_OUTPUT_BASENAME (file, name);
+ }
+ else
+ assemble_name (file, name);
+}
+
+/* Output a TOC entry. We derive the entry name from what is being
+ written. */
+
+void
+output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
+{
+ char buf[256];
+ const char *name = buf;
+ rtx base = x;
+ HOST_WIDE_INT offset = 0;
+
+ gcc_assert (!TARGET_NO_TOC);
+
+ /* When the linker won't eliminate them, don't output duplicate
+ TOC entries (this happens on AIX if there is any kind of TOC,
+ and on SVR4 under -fPIC or -mrelocatable). Don't do this for
+ CODE_LABELs. */
+ if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
+ {
+ struct toc_hash_struct *h;
+ void * * found;
+
+ /* Create toc_hash_table. This can't be done at OVERRIDE_OPTIONS
+ time because GGC is not initialized at that point. */
+ if (toc_hash_table == NULL)
+ toc_hash_table = htab_create_ggc (1021, toc_hash_function,
+ toc_hash_eq, NULL);
+
+ h = GGC_NEW (struct toc_hash_struct);
+ h->key = x;
+ h->key_mode = mode;
+ h->labelno = labelno;
+
+ found = htab_find_slot (toc_hash_table, h, 1);
+ if (*found == NULL)
+ *found = h;
+ else /* This is indeed a duplicate.
+ Set this label equal to that label. */
+ {
+ fputs ("\t.set ", file);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
+ fprintf (file, "%d,", labelno);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
+ fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
+ found)->labelno));
+ return;
+ }
+ }
+
+ /* If we're going to put a double constant in the TOC, make sure it's
+ aligned properly when strict alignment is on. */
+ if (GET_CODE (x) == CONST_DOUBLE
+ && STRICT_ALIGNMENT
+ && GET_MODE_BITSIZE (mode) >= 64
+ && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
+ ASM_OUTPUT_ALIGN (file, 3);
+ }
+
+ (*targetm.asm_out.internal_label) (file, "LC", labelno);
+
+ /* Handle FP constants specially. Note that if we have a minimal
+ TOC, things we put here aren't actually in the TOC, so we can allow
+ FP constants. */
+ if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
+ {
+ REAL_VALUE_TYPE rv;
+ long k[4];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
+ else
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
+ k[0] & 0xffffffff, k[1] & 0xffffffff,
+ k[2] & 0xffffffff, k[3] & 0xffffffff);
+ fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
+ k[0] & 0xffffffff, k[1] & 0xffffffff,
+ k[2] & 0xffffffff, k[3] & 0xffffffff);
+ return;
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
+ k[0] & 0xffffffff, k[1] & 0xffffffff,
+ k[2] & 0xffffffff, k[3] & 0xffffffff);
+ fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
+ k[0] & 0xffffffff, k[1] & 0xffffffff,
+ k[2] & 0xffffffff, k[3] & 0xffffffff);
+ return;
+ }
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
+ {
+ REAL_VALUE_TYPE rv;
+ long k[2];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
+ else
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc FD_%lx_%lx[TC],",
+ k[0] & 0xffffffff, k[1] & 0xffffffff);
+ fprintf (file, "0x%lx%08lx\n",
+ k[0] & 0xffffffff, k[1] & 0xffffffff);
+ return;
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc FD_%lx_%lx[TC],",
+ k[0] & 0xffffffff, k[1] & 0xffffffff);
+ fprintf (file, "0x%lx,0x%lx\n",
+ k[0] & 0xffffffff, k[1] & 0xffffffff);
+ return;
+ }
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE &&
+ (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
+ {
+ REAL_VALUE_TYPE rv;
+ long l;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
+ REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
+ else
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
+ fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
+ return;
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
+ fprintf (file, "0x%lx\n", l & 0xffffffff);
+ return;
+ }
+ }
+ else if (GET_MODE (x) == VOIDmode
+ && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
+ {
+ unsigned HOST_WIDE_INT low;
+ HOST_WIDE_INT high;
+
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ low = CONST_DOUBLE_LOW (x);
+ high = CONST_DOUBLE_HIGH (x);
+ }
+ else
+#if HOST_BITS_PER_WIDE_INT == 32
+ {
+ low = INTVAL (x);
+ high = (low & 0x80000000) ? ~0 : 0;
+ }
+#else
+ {
+ low = INTVAL (x) & 0xffffffff;
+ high = (HOST_WIDE_INT) INTVAL (x) >> 32;
+ }
+#endif
+
+ /* TOC entries are always Pmode-sized, but since this
+ is a bigendian machine then if we're putting smaller
+ integer constants in the TOC we have to pad them.
+ (This is still a win over putting the constants in
+ a separate constant pool, because then we'd have
+ to have both a TOC entry _and_ the actual constant.)
+
+ For a 32-bit target, CONST_INT values are loaded and shifted
+ entirely within `low' and can be stored in one TOC entry. */
+
+ /* It would be easy to make this work, but it doesn't now. */
+ gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
+
+ if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
+ {
+#if HOST_BITS_PER_WIDE_INT == 32
+ lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
+ POINTER_SIZE, &low, &high, 0);
+#else
+ low |= high << 32;
+ low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
+ high = (HOST_WIDE_INT) low >> 32;
+ low &= 0xffffffff;
+#endif
+ }
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc ID_%lx_%lx[TC],",
+ (long) high & 0xffffffff, (long) low & 0xffffffff);
+ fprintf (file, "0x%lx%08lx\n",
+ (long) high & 0xffffffff, (long) low & 0xffffffff);
+ return;
+ }
+ else
+ {
+ if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc ID_%lx_%lx[TC],",
+ (long) high & 0xffffffff, (long) low & 0xffffffff);
+ fprintf (file, "0x%lx,0x%lx\n",
+ (long) high & 0xffffffff, (long) low & 0xffffffff);
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
+ fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
+ }
+ return;
+ }
+ }
+
+ if (GET_CODE (x) == CONST)
+ {
+ gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS);
+
+ base = XEXP (XEXP (x, 0), 0);
+ offset = INTVAL (XEXP (XEXP (x, 0), 1));
+ }
+
+ switch (GET_CODE (base))
+ {
+ case SYMBOL_REF:
+ name = XSTR (base, 0);
+ break;
+
+ case LABEL_REF:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L",
+ CODE_LABEL_NUMBER (XEXP (base, 0)));
+ break;
+
+ case CODE_LABEL:
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (TARGET_MINIMAL_TOC)
+ fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
+ else
+ {
+ fputs ("\t.tc ", file);
+ RS6000_OUTPUT_BASENAME (file, name);
+
+ if (offset < 0)
+ fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
+ else if (offset)
+ fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
+
+ fputs ("[TC],", file);
+ }
+
+ /* Currently C++ toc references to vtables can be emitted before it
+ is decided whether the vtable is public or private. If this is
+ the case, then the linker will eventually complain that there is
+ a TOC reference to an unknown section. Thus, for vtables only,
+ we emit the TOC reference to reference the symbol and not the
+ section. */
+ if (VTABLE_NAME_P (name))
+ {
+ RS6000_OUTPUT_BASENAME (file, name);
+ if (offset < 0)
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
+ else if (offset > 0)
+ fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
+ }
+ else
+ output_addr_const (file, x);
+ putc ('\n', file);
+}
+
+/* Output an assembler pseudo-op to write an ASCII string of N characters
+ starting at P to FILE.
+
+ On the RS/6000, we have to do this using the .byte operation and
+ write out special characters outside the quoted string.
+ Also, the assembler is broken; very long strings are truncated,
+ so we must artificially break them up early. */
+
+void
+output_ascii (FILE *file, const char *p, int n)
+{
+ char c;
+ int i, count_string;
+ const char *for_string = "\t.byte \"";
+ const char *for_decimal = "\t.byte ";
+ const char *to_close = NULL;
+
+ count_string = 0;
+ for (i = 0; i < n; i++)
+ {
+ c = *p++;
+ if (c >= ' ' && c < 0177)
+ {
+ if (for_string)
+ fputs (for_string, file);
+ putc (c, file);
+
+ /* Write two quotes to get one. */
+ if (c == '"')
+ {
+ putc (c, file);
+ ++count_string;
+ }
+
+ for_string = NULL;
+ for_decimal = "\"\n\t.byte ";
+ to_close = "\"\n";
+ ++count_string;
+
+ if (count_string >= 512)
+ {
+ fputs (to_close, file);
+
+ for_string = "\t.byte \"";
+ for_decimal = "\t.byte ";
+ to_close = NULL;
+ count_string = 0;
+ }
+ }
+ else
+ {
+ if (for_decimal)
+ fputs (for_decimal, file);
+ fprintf (file, "%d", c);
+
+ for_string = "\n\t.byte \"";
+ for_decimal = ", ";
+ to_close = "\n";
+ count_string = 0;
+ }
+ }
+
+ /* Now close the string if we have written one. Then end the line. */
+ if (to_close)
+ fputs (to_close, file);
+}
+
+/* Generate a unique section name for FILENAME for a section type
+ represented by SECTION_DESC. Output goes into BUF.
+
+ SECTION_DESC can be any string, as long as it is different for each
+ possible section type.
+
+ We name the section in the same manner as xlc. The name begins with an
+ underscore followed by the filename (after stripping any leading directory
+ names) with the last period replaced by the string SECTION_DESC. If
+ FILENAME does not contain a period, SECTION_DESC is appended to the end of
+ the name. */
+
+void
+rs6000_gen_section_name (char **buf, const char *filename,
+ const char *section_desc)
+{
+ const char *q, *after_last_slash, *last_period = 0;
+ char *p;
+ int len;
+
+ after_last_slash = filename;
+ for (q = filename; *q; q++)
+ {
+ if (*q == '/')
+ after_last_slash = q + 1;
+ else if (*q == '.')
+ last_period = q;
+ }
+
+ len = strlen (after_last_slash) + strlen (section_desc) + 2;
+ *buf = (char *) xmalloc (len);
+
+ p = *buf;
+ *p++ = '_';
+
+ for (q = after_last_slash; *q; q++)
+ {
+ if (q == last_period)
+ {
+ strcpy (p, section_desc);
+ p += strlen (section_desc);
+ break;
+ }
+
+ else if (ISALNUM (*q))
+ *p++ = *q;
+ }
+
+ if (last_period == 0)
+ strcpy (p, section_desc);
+ else
+ *p = '\0';
+}
+
+/* Emit profile function. */
+
+void
+output_profile_hook (int labelno ATTRIBUTE_UNUSED)
+{
+ /* Non-standard profiling for kernels, which just saves LR then calls
+ _mcount without worrying about arg saves. The idea is to change
+ the function prologue as little as possible as it isn't easy to
+ account for arg save/restore code added just for _mcount. */
+ if (TARGET_PROFILE_KERNEL)
+ return;
+
+ if (DEFAULT_ABI == ABI_AIX)
+ {
+#ifndef NO_PROFILE_COUNTERS
+# define NO_PROFILE_COUNTERS 0
+#endif
+ if (NO_PROFILE_COUNTERS)
+ emit_library_call (init_one_libfunc (RS6000_MCOUNT), 0, VOIDmode, 0);
+ else
+ {
+ char buf[30];
+ const char *label_name;
+ rtx fun;
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
+ label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
+ fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
+
+ emit_library_call (init_one_libfunc (RS6000_MCOUNT), 0, VOIDmode, 1,
+ fun, Pmode);
+ }
+ }
+ else if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ const char *mcount_name = RS6000_MCOUNT;
+ int caller_addr_regno = LR_REGNO;
+
+ /* Be conservative and always set this, at least for now. */
+ crtl->uses_pic_offset_table = 1;
+
+#if TARGET_MACHO
+ /* For PIC code, set up a stub and collect the caller's address
+ from r0, which is where the prologue puts it. */
+ if (MACHOPIC_INDIRECT
+ && crtl->uses_pic_offset_table)
+ caller_addr_regno = 0;
+#endif
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
+ 0, VOIDmode, 1,
+ gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
+ }
+}
+
+/* Write function profiler code. */
+
+void
+output_function_profiler (FILE *file, int labelno)
+{
+ char buf[100];
+
+ switch (DEFAULT_ABI)
+ {
+ default:
+ gcc_unreachable ();
+
+ case ABI_V4:
+ if (!TARGET_32BIT)
+ {
+ warning (0, "no profiling of 64-bit code for this ABI");
+ return;
+ }
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
+ fprintf (file, "\tmflr %s\n", reg_names[0]);
+ if (NO_PROFILE_COUNTERS)
+ {
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ }
+ else if (TARGET_SECURE_PLT && flag_pic)
+ {
+ asm_fprintf (file, "\tbcl 20,31,1f\n1:\n\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
+ asm_fprintf (file, "\t{cau|addis} %s,%s,",
+ reg_names[12], reg_names[12]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "-1b@ha\n\t{cal|la} %s,", reg_names[0]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
+ }
+ else if (flag_pic == 1)
+ {
+ fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
+ asm_fprintf (file, "\t{l|lwz} %s,", reg_names[0]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "@got(%s)\n", reg_names[12]);
+ }
+ else if (flag_pic > 1)
+ {
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ /* Now, we need to get the address of the label. */
+ fputs ("\tbcl 20,31,1f\n\t.long ", file);
+ assemble_name (file, buf);
+ fputs ("-.\n1:", file);
+ asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
+ asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
+ reg_names[0], reg_names[11]);
+ asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
+ reg_names[0], reg_names[0], reg_names[11]);
+ }
+ else
+ {
+ asm_fprintf (file, "\t{liu|lis} %s,", reg_names[12]);
+ assemble_name (file, buf);
+ fputs ("@ha\n", file);
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ asm_fprintf (file, "\t{cal|la} %s,", reg_names[0]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "@l(%s)\n", reg_names[12]);
+ }
+
+ /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
+ fprintf (file, "\tbl %s%s\n",
+ RS6000_MCOUNT, flag_pic ? "@plt" : "");
+ break;
+
+ case ABI_AIX:
+ case ABI_DARWIN:
+ if (!TARGET_PROFILE_KERNEL)
+ {
+ /* Don't do anything, done in output_profile_hook (). */
+ }
+ else
+ {
+ gcc_assert (!TARGET_32BIT);
+
+ asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
+ asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
+
+ if (cfun->static_chain_decl != NULL)
+ {
+ asm_fprintf (file, "\tstd %s,24(%s)\n",
+ reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
+ fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
+ asm_fprintf (file, "\tld %s,24(%s)\n",
+ reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
+ }
+ else
+ fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
+ }
+ break;
+ }
+}
+
+
+
+/* The following variable value is the last issued insn. */
+
+static rtx last_scheduled_insn;
+
+/* The following variable helps to balance issuing of load and
+ store instructions */
+
+static int load_store_pendulum;
+
+/* Power4 load update and store update instructions are cracked into a
+ load or store and an integer insn which are executed in the same cycle.
+ Branches have their own dispatch slot which does not count against the
+ GCC issue rate, but it changes the program flow so there are no other
+ instructions to issue in this cycle. */
+
+static int
+rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED,
+ int verbose ATTRIBUTE_UNUSED,
+ rtx insn, int more)
+{
+ last_scheduled_insn = insn;
+ if (GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ {
+ cached_can_issue_more = more;
+ return cached_can_issue_more;
+ }
+
+ if (insn_terminates_group_p (insn, current_group))
+ {
+ cached_can_issue_more = 0;
+ return cached_can_issue_more;
+ }
+
+ /* If no reservation, but reach here */
+ if (recog_memoized (insn) < 0)
+ return more;
+
+ if (rs6000_sched_groups)
+ {
+ if (is_microcoded_insn (insn))
+ cached_can_issue_more = 0;
+ else if (is_cracked_insn (insn))
+ cached_can_issue_more = more > 2 ? more - 2 : 0;
+ else
+ cached_can_issue_more = more - 1;
+
+ return cached_can_issue_more;
+ }
+
+ if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
+ return 0;
+
+ cached_can_issue_more = more - 1;
+ return cached_can_issue_more;
+}
+
+/* Adjust the cost of a scheduling dependency. Return the new cost of
+ a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
+
+static int
+rs6000_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+{
+ enum attr_type attr_type;
+
+ if (! recog_memoized (insn))
+ return 0;
+
+ switch (REG_NOTE_KIND (link))
+ {
+ case REG_DEP_TRUE:
+ {
+ /* Data dependency; DEP_INSN writes a register that INSN reads
+ some cycles later. */
+
+ /* Separate a load from a narrower, dependent store. */
+ if (rs6000_sched_groups
+ && GET_CODE (PATTERN (insn)) == SET
+ && GET_CODE (PATTERN (dep_insn)) == SET
+ && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
+ && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
+ && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
+ > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
+ return cost + 14;
+
+ attr_type = get_attr_type (insn);
+
+ switch (attr_type)
+ {
+ case TYPE_JMPREG:
+ /* Tell the first scheduling pass about the latency between
+ a mtctr and bctr (and mtlr and br/blr). The first
+ scheduling pass will not know about this latency since
+ the mtctr instruction, which has the latency associated
+ to it, will be generated by reload. */
+ return TARGET_POWER ? 5 : 4;
+ case TYPE_BRANCH:
+ /* Leave some extra cycles between a compare and its
+ dependent branch, to inhibit expensive mispredicts. */
+ if ((rs6000_cpu_attr == CPU_PPC603
+ || rs6000_cpu_attr == CPU_PPC604
+ || rs6000_cpu_attr == CPU_PPC604E
+ || rs6000_cpu_attr == CPU_PPC620
+ || rs6000_cpu_attr == CPU_PPC630
+ || rs6000_cpu_attr == CPU_PPC750
+ || rs6000_cpu_attr == CPU_PPC7400
+ || rs6000_cpu_attr == CPU_PPC7450
+ || rs6000_cpu_attr == CPU_POWER4
+ || rs6000_cpu_attr == CPU_POWER5
+ || rs6000_cpu_attr == CPU_CELL)
+ && recog_memoized (dep_insn)
+ && (INSN_CODE (dep_insn) >= 0))
+
+ switch (get_attr_type (dep_insn))
+ {
+ case TYPE_CMP:
+ case TYPE_COMPARE:
+ case TYPE_DELAYED_COMPARE:
+ case TYPE_IMUL_COMPARE:
+ case TYPE_LMUL_COMPARE:
+ case TYPE_FPCOMPARE:
+ case TYPE_CR_LOGICAL:
+ case TYPE_DELAYED_CR:
+ return cost + 2;
+ default:
+ break;
+ }
+ break;
+
+ case TYPE_STORE:
+ case TYPE_STORE_U:
+ case TYPE_STORE_UX:
+ case TYPE_FPSTORE:
+ case TYPE_FPSTORE_U:
+ case TYPE_FPSTORE_UX:
+ if ((rs6000_cpu == PROCESSOR_POWER6)
+ && recog_memoized (dep_insn)
+ && (INSN_CODE (dep_insn) >= 0))
+ {
+
+ if (GET_CODE (PATTERN (insn)) != SET)
+ /* If this happens, we have to extend this to schedule
+ optimally. Return default for now. */
+ return cost;
+
+ /* Adjust the cost for the case where the value written
+ by a fixed point operation is used as the address
+ gen value on a store. */
+ switch (get_attr_type (dep_insn))
+ {
+ case TYPE_LOAD:
+ case TYPE_LOAD_U:
+ case TYPE_LOAD_UX:
+ case TYPE_CNTLZ:
+ {
+ if (! store_data_bypass_p (dep_insn, insn))
+ return 4;
+ break;
+ }
+ case TYPE_LOAD_EXT:
+ case TYPE_LOAD_EXT_U:
+ case TYPE_LOAD_EXT_UX:
+ case TYPE_VAR_SHIFT_ROTATE:
+ case TYPE_VAR_DELAYED_COMPARE:
+ {
+ if (! store_data_bypass_p (dep_insn, insn))
+ return 6;
+ break;
+ }
+ case TYPE_INTEGER:
+ case TYPE_COMPARE:
+ case TYPE_FAST_COMPARE:
+ case TYPE_EXTS:
+ case TYPE_SHIFT:
+ case TYPE_INSERT_WORD:
+ case TYPE_INSERT_DWORD:
+ case TYPE_FPLOAD_U:
+ case TYPE_FPLOAD_UX:
+ case TYPE_STORE_U:
+ case TYPE_STORE_UX:
+ case TYPE_FPSTORE_U:
+ case TYPE_FPSTORE_UX:
+ {
+ if (! store_data_bypass_p (dep_insn, insn))
+ return 3;
+ break;
+ }
+ case TYPE_IMUL:
+ case TYPE_IMUL2:
+ case TYPE_IMUL3:
+ case TYPE_LMUL:
+ case TYPE_IMUL_COMPARE:
+ case TYPE_LMUL_COMPARE:
+ {
+ if (! store_data_bypass_p (dep_insn, insn))
+ return 17;
+ break;
+ }
+ case TYPE_IDIV:
+ {
+ if (! store_data_bypass_p (dep_insn, insn))
+ return 45;
+ break;
+ }
+ case TYPE_LDIV:
+ {
+ if (! store_data_bypass_p (dep_insn, insn))
+ return 57;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ break;
+
+ case TYPE_LOAD:
+ case TYPE_LOAD_U:
+ case TYPE_LOAD_UX:
+ case TYPE_LOAD_EXT:
+ case TYPE_LOAD_EXT_U:
+ case TYPE_LOAD_EXT_UX:
+ if ((rs6000_cpu == PROCESSOR_POWER6)
+ && recog_memoized (dep_insn)
+ && (INSN_CODE (dep_insn) >= 0))
+ {
+
+ /* Adjust the cost for the case where the value written
+ by a fixed point instruction is used within the address
+ gen portion of a subsequent load(u)(x) */
+ switch (get_attr_type (dep_insn))
+ {
+ case TYPE_LOAD:
+ case TYPE_LOAD_U:
+ case TYPE_LOAD_UX:
+ case TYPE_CNTLZ:
+ {
+ if (set_to_load_agen (dep_insn, insn))
+ return 4;
+ break;
+ }
+ case TYPE_LOAD_EXT:
+ case TYPE_LOAD_EXT_U:
+ case TYPE_LOAD_EXT_UX:
+ case TYPE_VAR_SHIFT_ROTATE:
+ case TYPE_VAR_DELAYED_COMPARE:
+ {
+ if (set_to_load_agen (dep_insn, insn))
+ return 6;
+ break;
+ }
+ case TYPE_INTEGER:
+ case TYPE_COMPARE:
+ case TYPE_FAST_COMPARE:
+ case TYPE_EXTS:
+ case TYPE_SHIFT:
+ case TYPE_INSERT_WORD:
+ case TYPE_INSERT_DWORD:
+ case TYPE_FPLOAD_U:
+ case TYPE_FPLOAD_UX:
+ case TYPE_STORE_U:
+ case TYPE_STORE_UX:
+ case TYPE_FPSTORE_U:
+ case TYPE_FPSTORE_UX:
+ {
+ if (set_to_load_agen (dep_insn, insn))
+ return 3;
+ break;
+ }
+ case TYPE_IMUL:
+ case TYPE_IMUL2:
+ case TYPE_IMUL3:
+ case TYPE_LMUL:
+ case TYPE_IMUL_COMPARE:
+ case TYPE_LMUL_COMPARE:
+ {
+ if (set_to_load_agen (dep_insn, insn))
+ return 17;
+ break;
+ }
+ case TYPE_IDIV:
+ {
+ if (set_to_load_agen (dep_insn, insn))
+ return 45;
+ break;
+ }
+ case TYPE_LDIV:
+ {
+ if (set_to_load_agen (dep_insn, insn))
+ return 57;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ break;
+
+ case TYPE_FPLOAD:
+ if ((rs6000_cpu == PROCESSOR_POWER6)
+ && recog_memoized (dep_insn)
+ && (INSN_CODE (dep_insn) >= 0)
+ && (get_attr_type (dep_insn) == TYPE_MFFGPR))
+ return 2;
+
+ default:
+ break;
+ }
+
+ /* Fall out to return default cost. */
+ }
+ break;
+
+ case REG_DEP_OUTPUT:
+ /* Output dependency; DEP_INSN writes a register that INSN writes some
+ cycles later. */
+ if ((rs6000_cpu == PROCESSOR_POWER6)
+ && recog_memoized (dep_insn)
+ && (INSN_CODE (dep_insn) >= 0))
+ {
+ attr_type = get_attr_type (insn);
+
+ switch (attr_type)
+ {
+ case TYPE_FP:
+ if (get_attr_type (dep_insn) == TYPE_FP)
+ return 1;
+ break;
+ case TYPE_FPLOAD:
+ if (get_attr_type (dep_insn) == TYPE_MFFGPR)
+ return 2;
+ break;
+ default:
+ break;
+ }
+ }
+ case REG_DEP_ANTI:
+ /* Anti dependency; DEP_INSN reads a register that INSN writes some
+ cycles later. */
+ return 0;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ return cost;
+}
+
+/* The function returns a true if INSN is microcoded.
+ Return false otherwise. */
+
+static bool
+is_microcoded_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ if (rs6000_cpu_attr == CPU_CELL)
+ return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
+
+ if (rs6000_sched_groups)
+ {
+ enum attr_type type = get_attr_type (insn);
+ if (type == TYPE_LOAD_EXT_U
+ || type == TYPE_LOAD_EXT_UX
+ || type == TYPE_LOAD_UX
+ || type == TYPE_STORE_UX
+ || type == TYPE_MFCR)
+ return true;
+ }
+
+ return false;
+}
+
+/* The function returns true if INSN is cracked into 2 instructions
+ by the processor (and therefore occupies 2 issue slots). */
+
+static bool
+is_cracked_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ if (rs6000_sched_groups)
+ {
+ enum attr_type type = get_attr_type (insn);
+ if (type == TYPE_LOAD_U || type == TYPE_STORE_U
+ || type == TYPE_FPLOAD_U || type == TYPE_FPSTORE_U
+ || type == TYPE_FPLOAD_UX || type == TYPE_FPSTORE_UX
+ || type == TYPE_LOAD_EXT || type == TYPE_DELAYED_CR
+ || type == TYPE_COMPARE || type == TYPE_DELAYED_COMPARE
+ || type == TYPE_IMUL_COMPARE || type == TYPE_LMUL_COMPARE
+ || type == TYPE_IDIV || type == TYPE_LDIV
+ || type == TYPE_INSERT_WORD)
+ return true;
+ }
+
+ return false;
+}
+
+/* The function returns true if INSN can be issued only from
+ the branch slot. */
+
+static bool
+is_branch_slot_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ if (rs6000_sched_groups)
+ {
+ enum attr_type type = get_attr_type (insn);
+ if (type == TYPE_BRANCH || type == TYPE_JMPREG)
+ return true;
+ return false;
+ }
+
+ return false;
+}
+
+/* The function returns true if out_inst sets a value that is
+ used in the address generation computation of in_insn */
+static bool
+set_to_load_agen (rtx out_insn, rtx in_insn)
+{
+ rtx out_set, in_set;
+
+ /* For performance reasons, only handle the simple case where
+ both loads are a single_set. */
+ out_set = single_set (out_insn);
+ if (out_set)
+ {
+ in_set = single_set (in_insn);
+ if (in_set)
+ return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
+ }
+
+ return false;
+}
+
+/* The function returns true if the target storage location of
+ out_insn is adjacent to the target storage location of in_insn */
+/* Return 1 if memory locations are adjacent. */
+
+static bool
+adjacent_mem_locations (rtx insn1, rtx insn2)
+{
+
+ rtx a = get_store_dest (PATTERN (insn1));
+ rtx b = get_store_dest (PATTERN (insn2));
+
+ if ((GET_CODE (XEXP (a, 0)) == REG
+ || (GET_CODE (XEXP (a, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
+ && (GET_CODE (XEXP (b, 0)) == REG
+ || (GET_CODE (XEXP (b, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
+ {
+ HOST_WIDE_INT val0 = 0, val1 = 0, val_diff;
+ rtx reg0, reg1;
+
+ if (GET_CODE (XEXP (a, 0)) == PLUS)
+ {
+ reg0 = XEXP (XEXP (a, 0), 0);
+ val0 = INTVAL (XEXP (XEXP (a, 0), 1));
+ }
+ else
+ reg0 = XEXP (a, 0);
+
+ if (GET_CODE (XEXP (b, 0)) == PLUS)
+ {
+ reg1 = XEXP (XEXP (b, 0), 0);
+ val1 = INTVAL (XEXP (XEXP (b, 0), 1));
+ }
+ else
+ reg1 = XEXP (b, 0);
+
+ val_diff = val1 - val0;
+
+ return ((REGNO (reg0) == REGNO (reg1))
+ && ((MEM_SIZE (a) && val_diff == INTVAL (MEM_SIZE (a)))
+ || (MEM_SIZE (b) && val_diff == -INTVAL (MEM_SIZE (b)))));
+ }
+
+ return false;
+}
+
+/* A C statement (sans semicolon) to update the integer scheduling
+ priority INSN_PRIORITY (INSN). Increase the priority to execute the
+ INSN earlier, reduce the priority to execute INSN later. Do not
+ define this macro if you do not need to adjust the scheduling
+ priorities of insns. */
+
+static int
+rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED, int priority)
+{
+ /* On machines (like the 750) which have asymmetric integer units,
+ where one integer unit can do multiply and divides and the other
+ can't, reduce the priority of multiply/divide so it is scheduled
+ before other integer operations. */
+
+#if 0
+ if (! INSN_P (insn))
+ return priority;
+
+ if (GET_CODE (PATTERN (insn)) == USE)
+ return priority;
+
+ switch (rs6000_cpu_attr) {
+ case CPU_PPC750:
+ switch (get_attr_type (insn))
+ {
+ default:
+ break;
+
+ case TYPE_IMUL:
+ case TYPE_IDIV:
+ fprintf (stderr, "priority was %#x (%d) before adjustment\n",
+ priority, priority);
+ if (priority >= 0 && priority < 0x01000000)
+ priority >>= 3;
+ break;
+ }
+ }
+#endif
+
+ if (insn_must_be_first_in_group (insn)
+ && reload_completed
+ && current_sched_info->sched_max_insns_priority
+ && rs6000_sched_restricted_insns_priority)
+ {
+
+ /* Prioritize insns that can be dispatched only in the first
+ dispatch slot. */
+ if (rs6000_sched_restricted_insns_priority == 1)
+ /* Attach highest priority to insn. This means that in
+ haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
+ precede 'priority' (critical path) considerations. */
+ return current_sched_info->sched_max_insns_priority;
+ else if (rs6000_sched_restricted_insns_priority == 2)
+ /* Increase priority of insn by a minimal amount. This means that in
+ haifa-sched.c:ready_sort(), only 'priority' (critical path)
+ considerations precede dispatch-slot restriction considerations. */
+ return (priority + 1);
+ }
+
+ if (rs6000_cpu == PROCESSOR_POWER6
+ && ((load_store_pendulum == -2 && is_load_insn (insn))
+ || (load_store_pendulum == 2 && is_store_insn (insn))))
+ /* Attach highest priority to insn if the scheduler has just issued two
+ stores and this instruction is a load, or two loads and this instruction
+ is a store. Power6 wants loads and stores scheduled alternately
+ when possible */
+ return current_sched_info->sched_max_insns_priority;
+
+ return priority;
+}
+
+/* Return true if the instruction is nonpipelined on the Cell. */
+static bool
+is_nonpipeline_insn (rtx insn)
+{
+ enum attr_type type;
+ if (!insn || !INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ type = get_attr_type (insn);
+ if (type == TYPE_IMUL
+ || type == TYPE_IMUL2
+ || type == TYPE_IMUL3
+ || type == TYPE_LMUL
+ || type == TYPE_IDIV
+ || type == TYPE_LDIV
+ || type == TYPE_SDIV
+ || type == TYPE_DDIV
+ || type == TYPE_SSQRT
+ || type == TYPE_DSQRT
+ || type == TYPE_MFCR
+ || type == TYPE_MFCRF
+ || type == TYPE_MFJMPR)
+ {
+ return true;
+ }
+ return false;
+}
+
+
+/* Return how many instructions the machine can issue per cycle. */
+
+static int
+rs6000_issue_rate (void)
+{
+ /* Use issue rate of 1 for first scheduling pass to decrease degradation. */
+ if (!reload_completed)
+ return 1;
+
+ switch (rs6000_cpu_attr) {
+ case CPU_RIOS1: /* ? */
+ case CPU_RS64A:
+ case CPU_PPC601: /* ? */
+ case CPU_PPC7450:
+ return 3;
+ case CPU_PPC440:
+ case CPU_PPC603:
+ case CPU_PPC750:
+ case CPU_PPC7400:
+ case CPU_PPC8540:
+ case CPU_CELL:
+ case CPU_PPCE300C2:
+ case CPU_PPCE300C3:
+ case CPU_PPCE500MC:
+ return 2;
+ case CPU_RIOS2:
+ case CPU_PPC604:
+ case CPU_PPC604E:
+ case CPU_PPC620:
+ case CPU_PPC630:
+ return 4;
+ case CPU_POWER4:
+ case CPU_POWER5:
+ case CPU_POWER6:
+ return 5;
+ default:
+ return 1;
+ }
+}
+
+/* Return how many instructions to look ahead for better insn
+ scheduling. */
+
+static int
+rs6000_use_sched_lookahead (void)
+{
+ if (rs6000_cpu_attr == CPU_PPC8540)
+ return 4;
+ if (rs6000_cpu_attr == CPU_CELL)
+ return (reload_completed ? 8 : 0);
+ return 0;
+}
+
+/* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
+static int
+rs6000_use_sched_lookahead_guard (rtx insn)
+{
+ if (rs6000_cpu_attr != CPU_CELL)
+ return 1;
+
+ if (insn == NULL_RTX || !INSN_P (insn))
+ abort ();
+
+ if (!reload_completed
+ || is_nonpipeline_insn (insn)
+ || is_microcoded_insn (insn))
+ return 0;
+
+ return 1;
+}
+
+/* Determine is PAT refers to memory. */
+
+static bool
+is_mem_ref (rtx pat)
+{
+ const char * fmt;
+ int i, j;
+ bool ret = false;
+
+ /* stack_tie does not produce any real memory traffic. */
+ if (GET_CODE (pat) == UNSPEC
+ && XINT (pat, 1) == UNSPEC_TIE)
+ return false;
+
+ if (GET_CODE (pat) == MEM)
+ return true;
+
+ /* Recursively process the pattern. */
+ fmt = GET_RTX_FORMAT (GET_CODE (pat));
+
+ for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0 && !ret; i--)
+ {
+ if (fmt[i] == 'e')
+ ret |= is_mem_ref (XEXP (pat, i));
+ else if (fmt[i] == 'E')
+ for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
+ ret |= is_mem_ref (XVECEXP (pat, i, j));
+ }
+
+ return ret;
+}
+
+/* Determine if PAT is a PATTERN of a load insn. */
+
+static bool
+is_load_insn1 (rtx pat)
+{
+ if (!pat || pat == NULL_RTX)
+ return false;
+
+ if (GET_CODE (pat) == SET)
+ return is_mem_ref (SET_SRC (pat));
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ if (is_load_insn1 (XVECEXP (pat, 0, i)))
+ return true;
+ }
+
+ return false;
+}
+
+/* Determine if INSN loads from memory. */
+
+static bool
+is_load_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn))
+ return false;
+
+ if (GET_CODE (insn) == CALL_INSN)
+ return false;
+
+ return is_load_insn1 (PATTERN (insn));
+}
+
+/* Determine if PAT is a PATTERN of a store insn. */
+
+static bool
+is_store_insn1 (rtx pat)
+{
+ if (!pat || pat == NULL_RTX)
+ return false;
+
+ if (GET_CODE (pat) == SET)
+ return is_mem_ref (SET_DEST (pat));
+
+ if (GET_CODE (pat) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ if (is_store_insn1 (XVECEXP (pat, 0, i)))
+ return true;
+ }
+
+ return false;
+}
+
+/* Determine if INSN stores to memory. */
+
+static bool
+is_store_insn (rtx insn)
+{
+ if (!insn || !INSN_P (insn))
+ return false;
+
+ return is_store_insn1 (PATTERN (insn));
+}
+
+/* Return the dest of a store insn. */
+
+static rtx
+get_store_dest (rtx pat)
+{
+ gcc_assert (is_store_insn1 (pat));
+
+ if (GET_CODE (pat) == SET)
+ return SET_DEST (pat);
+ else if (GET_CODE (pat) == PARALLEL)
+ {
+ int i;
+
+ for (i = 0; i < XVECLEN (pat, 0); i++)
+ {
+ rtx inner_pat = XVECEXP (pat, 0, i);
+ if (GET_CODE (inner_pat) == SET
+ && is_mem_ref (SET_DEST (inner_pat)))
+ return inner_pat;
+ }
+ }
+ /* We shouldn't get here, because we should have either a simple
+ store insn or a store with update which are covered above. */
+ gcc_unreachable();
+}
+
+/* Returns whether the dependence between INSN and NEXT is considered
+ costly by the given target. */
+
+static bool
+rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
+{
+ rtx insn;
+ rtx next;
+
+ /* If the flag is not enabled - no dependence is considered costly;
+ allow all dependent insns in the same group.
+ This is the most aggressive option. */
+ if (rs6000_sched_costly_dep == no_dep_costly)
+ return false;
+
+ /* If the flag is set to 1 - a dependence is always considered costly;
+ do not allow dependent instructions in the same group.
+ This is the most conservative option. */
+ if (rs6000_sched_costly_dep == all_deps_costly)
+ return true;
+
+ insn = DEP_PRO (dep);
+ next = DEP_CON (dep);
+
+ if (rs6000_sched_costly_dep == store_to_load_dep_costly
+ && is_load_insn (next)
+ && is_store_insn (insn))
+ /* Prevent load after store in the same group. */
+ return true;
+
+ if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
+ && is_load_insn (next)
+ && is_store_insn (insn)
+ && DEP_TYPE (dep) == REG_DEP_TRUE)
+ /* Prevent load after store in the same group if it is a true
+ dependence. */
+ return true;
+
+ /* The flag is set to X; dependences with latency >= X are considered costly,
+ and will not be scheduled in the same group. */
+ if (rs6000_sched_costly_dep <= max_dep_latency
+ && ((cost - distance) >= (int)rs6000_sched_costly_dep))
+ return true;
+
+ return false;
+}
+
+/* Return the next insn after INSN that is found before TAIL is reached,
+ skipping any "non-active" insns - insns that will not actually occupy
+ an issue slot. Return NULL_RTX if such an insn is not found. */
+
+static rtx
+get_next_active_insn (rtx insn, rtx tail)
+{
+ if (insn == NULL_RTX || insn == tail)
+ return NULL_RTX;
+
+ while (1)
+ {
+ insn = NEXT_INSN (insn);
+ if (insn == NULL_RTX || insn == tail)
+ return NULL_RTX;
+
+ if (CALL_P (insn)
+ || JUMP_P (insn)
+ || (NONJUMP_INSN_P (insn)
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER
+ && INSN_CODE (insn) != CODE_FOR_stack_tie))
+ break;
+ }
+ return insn;
+}
+
+/* We are about to begin issuing insns for this clock cycle. */
+
+static int
+rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
+ rtx *ready ATTRIBUTE_UNUSED,
+ int *pn_ready ATTRIBUTE_UNUSED,
+ int clock_var ATTRIBUTE_UNUSED)
+{
+ int n_ready = *pn_ready;
+
+ if (sched_verbose)
+ fprintf (dump, "// rs6000_sched_reorder :\n");
+
+ /* Reorder the ready list, if the second to last ready insn
+ is a nonepipeline insn. */
+ if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
+ {
+ if (is_nonpipeline_insn (ready[n_ready - 1])
+ && (recog_memoized (ready[n_ready - 2]) > 0))
+ /* Simply swap first two insns. */
+ {
+ rtx tmp = ready[n_ready - 1];
+ ready[n_ready - 1] = ready[n_ready - 2];
+ ready[n_ready - 2] = tmp;
+ }
+ }
+
+ if (rs6000_cpu == PROCESSOR_POWER6)
+ load_store_pendulum = 0;
+
+ return rs6000_issue_rate ();
+}
+
+/* Like rs6000_sched_reorder, but called after issuing each insn. */
+
+static int
+rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx *ready,
+ int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
+{
+ if (sched_verbose)
+ fprintf (dump, "// rs6000_sched_reorder2 :\n");
+
+ /* For Power6, we need to handle some special cases to try and keep the
+ store queue from overflowing and triggering expensive flushes.
+
+ This code monitors how load and store instructions are being issued
+ and skews the ready list one way or the other to increase the likelihood
+ that a desired instruction is issued at the proper time.
+
+ A couple of things are done. First, we maintain a "load_store_pendulum"
+ to track the current state of load/store issue.
+
+ - If the pendulum is at zero, then no loads or stores have been
+ issued in the current cycle so we do nothing.
+
+ - If the pendulum is 1, then a single load has been issued in this
+ cycle and we attempt to locate another load in the ready list to
+ issue with it.
+
+ - If the pendulum is -2, then two stores have already been
+ issued in this cycle, so we increase the priority of the first load
+ in the ready list to increase it's likelihood of being chosen first
+ in the next cycle.
+
+ - If the pendulum is -1, then a single store has been issued in this
+ cycle and we attempt to locate another store in the ready list to
+ issue with it, preferring a store to an adjacent memory location to
+ facilitate store pairing in the store queue.
+
+ - If the pendulum is 2, then two loads have already been
+ issued in this cycle, so we increase the priority of the first store
+ in the ready list to increase it's likelihood of being chosen first
+ in the next cycle.
+
+ - If the pendulum < -2 or > 2, then do nothing.
+
+ Note: This code covers the most common scenarios. There exist non
+ load/store instructions which make use of the LSU and which
+ would need to be accounted for to strictly model the behavior
+ of the machine. Those instructions are currently unaccounted
+ for to help minimize compile time overhead of this code.
+ */
+ if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
+ {
+ int pos;
+ int i;
+ rtx tmp;
+
+ if (is_store_insn (last_scheduled_insn))
+ /* Issuing a store, swing the load_store_pendulum to the left */
+ load_store_pendulum--;
+ else if (is_load_insn (last_scheduled_insn))
+ /* Issuing a load, swing the load_store_pendulum to the right */
+ load_store_pendulum++;
+ else
+ return cached_can_issue_more;
+
+ /* If the pendulum is balanced, or there is only one instruction on
+ the ready list, then all is well, so return. */
+ if ((load_store_pendulum == 0) || (*pn_ready <= 1))
+ return cached_can_issue_more;
+
+ if (load_store_pendulum == 1)
+ {
+ /* A load has been issued in this cycle. Scan the ready list
+ for another load to issue with it */
+ pos = *pn_ready-1;
+
+ while (pos >= 0)
+ {
+ if (is_load_insn (ready[pos]))
+ {
+ /* Found a load. Move it to the head of the ready list,
+ and adjust it's priority so that it is more likely to
+ stay there */
+ tmp = ready[pos];
+ for (i=pos; i<*pn_ready-1; i++)
+ ready[i] = ready[i + 1];
+ ready[*pn_ready-1] = tmp;
+
+ if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
+ INSN_PRIORITY (tmp)++;
+ break;
+ }
+ pos--;
+ }
+ }
+ else if (load_store_pendulum == -2)
+ {
+ /* Two stores have been issued in this cycle. Increase the
+ priority of the first load in the ready list to favor it for
+ issuing in the next cycle. */
+ pos = *pn_ready-1;
+
+ while (pos >= 0)
+ {
+ if (is_load_insn (ready[pos])
+ && !sel_sched_p ()
+ && INSN_PRIORITY_KNOWN (ready[pos]))
+ {
+ INSN_PRIORITY (ready[pos])++;
+
+ /* Adjust the pendulum to account for the fact that a load
+ was found and increased in priority. This is to prevent
+ increasing the priority of multiple loads */
+ load_store_pendulum--;
+
+ break;
+ }
+ pos--;
+ }
+ }
+ else if (load_store_pendulum == -1)
+ {
+ /* A store has been issued in this cycle. Scan the ready list for
+ another store to issue with it, preferring a store to an adjacent
+ memory location */
+ int first_store_pos = -1;
+
+ pos = *pn_ready-1;
+
+ while (pos >= 0)
+ {
+ if (is_store_insn (ready[pos]))
+ {
+ /* Maintain the index of the first store found on the
+ list */
+ if (first_store_pos == -1)
+ first_store_pos = pos;
+
+ if (is_store_insn (last_scheduled_insn)
+ && adjacent_mem_locations (last_scheduled_insn,ready[pos]))
+ {
+ /* Found an adjacent store. Move it to the head of the
+ ready list, and adjust it's priority so that it is
+ more likely to stay there */
+ tmp = ready[pos];
+ for (i=pos; i<*pn_ready-1; i++)
+ ready[i] = ready[i + 1];
+ ready[*pn_ready-1] = tmp;
+
+ if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
+ INSN_PRIORITY (tmp)++;
+
+ first_store_pos = -1;
+
+ break;
+ };
+ }
+ pos--;
+ }
+
+ if (first_store_pos >= 0)
+ {
+ /* An adjacent store wasn't found, but a non-adjacent store was,
+ so move the non-adjacent store to the front of the ready
+ list, and adjust its priority so that it is more likely to
+ stay there. */
+ tmp = ready[first_store_pos];
+ for (i=first_store_pos; i<*pn_ready-1; i++)
+ ready[i] = ready[i + 1];
+ ready[*pn_ready-1] = tmp;
+ if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
+ INSN_PRIORITY (tmp)++;
+ }
+ }
+ else if (load_store_pendulum == 2)
+ {
+ /* Two loads have been issued in this cycle. Increase the priority
+ of the first store in the ready list to favor it for issuing in
+ the next cycle. */
+ pos = *pn_ready-1;
+
+ while (pos >= 0)
+ {
+ if (is_store_insn (ready[pos])
+ && !sel_sched_p ()
+ && INSN_PRIORITY_KNOWN (ready[pos]))
+ {
+ INSN_PRIORITY (ready[pos])++;
+
+ /* Adjust the pendulum to account for the fact that a store
+ was found and increased in priority. This is to prevent
+ increasing the priority of multiple stores */
+ load_store_pendulum++;
+
+ break;
+ }
+ pos--;
+ }
+ }
+ }
+
+ return cached_can_issue_more;
+}
+
+/* Return whether the presence of INSN causes a dispatch group termination
+ of group WHICH_GROUP.
+
+ If WHICH_GROUP == current_group, this function will return true if INSN
+ causes the termination of the current group (i.e, the dispatch group to
+ which INSN belongs). This means that INSN will be the last insn in the
+ group it belongs to.
+
+ If WHICH_GROUP == previous_group, this function will return true if INSN
+ causes the termination of the previous group (i.e, the dispatch group that
+ precedes the group to which INSN belongs). This means that INSN will be
+ the first insn in the group it belongs to). */
+
+static bool
+insn_terminates_group_p (rtx insn, enum group_termination which_group)
+{
+ bool first, last;
+
+ if (! insn)
+ return false;
+
+ first = insn_must_be_first_in_group (insn);
+ last = insn_must_be_last_in_group (insn);
+
+ if (first && last)
+ return true;
+
+ if (which_group == current_group)
+ return last;
+ else if (which_group == previous_group)
+ return first;
+
+ return false;
+}
+
+
+static bool
+insn_must_be_first_in_group (rtx insn)
+{
+ enum attr_type type;
+
+ if (!insn
+ || insn == NULL_RTX
+ || GET_CODE (insn) == NOTE
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ switch (rs6000_cpu)
+ {
+ case PROCESSOR_POWER5:
+ if (is_cracked_insn (insn))
+ return true;
+ case PROCESSOR_POWER4:
+ if (is_microcoded_insn (insn))
+ return true;
+
+ if (!rs6000_sched_groups)
+ return false;
+
+ type = get_attr_type (insn);
+
+ switch (type)
+ {
+ case TYPE_MFCR:
+ case TYPE_MFCRF:
+ case TYPE_MTCR:
+ case TYPE_DELAYED_CR:
+ case TYPE_CR_LOGICAL:
+ case TYPE_MTJMPR:
+ case TYPE_MFJMPR:
+ case TYPE_IDIV:
+ case TYPE_LDIV:
+ case TYPE_LOAD_L:
+ case TYPE_STORE_C:
+ case TYPE_ISYNC:
+ case TYPE_SYNC:
+ return true;
+ default:
+ break;
+ }
+ break;
+ case PROCESSOR_POWER6:
+ type = get_attr_type (insn);
+
+ switch (type)
+ {
+ case TYPE_INSERT_DWORD:
+ case TYPE_EXTS:
+ case TYPE_CNTLZ:
+ case TYPE_SHIFT:
+ case TYPE_VAR_SHIFT_ROTATE:
+ case TYPE_TRAP:
+ case TYPE_IMUL:
+ case TYPE_IMUL2:
+ case TYPE_IMUL3:
+ case TYPE_LMUL:
+ case TYPE_IDIV:
+ case TYPE_INSERT_WORD:
+ case TYPE_DELAYED_COMPARE:
+ case TYPE_IMUL_COMPARE:
+ case TYPE_LMUL_COMPARE:
+ case TYPE_FPCOMPARE:
+ case TYPE_MFCR:
+ case TYPE_MTCR:
+ case TYPE_MFJMPR:
+ case TYPE_MTJMPR:
+ case TYPE_ISYNC:
+ case TYPE_SYNC:
+ case TYPE_LOAD_L:
+ case TYPE_STORE_C:
+ case TYPE_LOAD_U:
+ case TYPE_LOAD_UX:
+ case TYPE_LOAD_EXT_UX:
+ case TYPE_STORE_U:
+ case TYPE_STORE_UX:
+ case TYPE_FPLOAD_U:
+ case TYPE_FPLOAD_UX:
+ case TYPE_FPSTORE_U:
+ case TYPE_FPSTORE_UX:
+ return true;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool
+insn_must_be_last_in_group (rtx insn)
+{
+ enum attr_type type;
+
+ if (!insn
+ || insn == NULL_RTX
+ || GET_CODE (insn) == NOTE
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER)
+ return false;
+
+ switch (rs6000_cpu) {
+ case PROCESSOR_POWER4:
+ case PROCESSOR_POWER5:
+ if (is_microcoded_insn (insn))
+ return true;
+
+ if (is_branch_slot_insn (insn))
+ return true;
+
+ break;
+ case PROCESSOR_POWER6:
+ type = get_attr_type (insn);
+
+ switch (type)
+ {
+ case TYPE_EXTS:
+ case TYPE_CNTLZ:
+ case TYPE_SHIFT:
+ case TYPE_VAR_SHIFT_ROTATE:
+ case TYPE_TRAP:
+ case TYPE_IMUL:
+ case TYPE_IMUL2:
+ case TYPE_IMUL3:
+ case TYPE_LMUL:
+ case TYPE_IDIV:
+ case TYPE_DELAYED_COMPARE:
+ case TYPE_IMUL_COMPARE:
+ case TYPE_LMUL_COMPARE:
+ case TYPE_FPCOMPARE:
+ case TYPE_MFCR:
+ case TYPE_MTCR:
+ case TYPE_MFJMPR:
+ case TYPE_MTJMPR:
+ case TYPE_ISYNC:
+ case TYPE_SYNC:
+ case TYPE_LOAD_L:
+ case TYPE_STORE_C:
+ return true;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
+ dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
+
+static bool
+is_costly_group (rtx *group_insns, rtx next_insn)
+{
+ int i;
+ int issue_rate = rs6000_issue_rate ();
+
+ for (i = 0; i < issue_rate; i++)
+ {
+ sd_iterator_def sd_it;
+ dep_t dep;
+ rtx insn = group_insns[i];
+
+ if (!insn)
+ continue;
+
+ FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
+ {
+ rtx next = DEP_CON (dep);
+
+ if (next == next_insn
+ && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/* Utility of the function redefine_groups.
+ Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
+ in the same dispatch group. If so, insert nops before NEXT_INSN, in order
+ to keep it "far" (in a separate group) from GROUP_INSNS, following
+ one of the following schemes, depending on the value of the flag
+ -minsert_sched_nops = X:
+ (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
+ in order to force NEXT_INSN into a separate group.
+ (2) X < sched_finish_regroup_exact: insert exactly X nops.
+ GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
+ insertion (has a group just ended, how many vacant issue slots remain in the
+ last group, and how many dispatch groups were encountered so far). */
+
+static int
+force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
+ rtx next_insn, bool *group_end, int can_issue_more,
+ int *group_count)
+{
+ rtx nop;
+ bool force;
+ int issue_rate = rs6000_issue_rate ();
+ bool end = *group_end;
+ int i;
+
+ if (next_insn == NULL_RTX)
+ return can_issue_more;
+
+ if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
+ return can_issue_more;
+
+ force = is_costly_group (group_insns, next_insn);
+ if (!force)
+ return can_issue_more;
+
+ if (sched_verbose > 6)
+ fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
+ *group_count ,can_issue_more);
+
+ if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
+ {
+ if (*group_end)
+ can_issue_more = 0;
+
+ /* Since only a branch can be issued in the last issue_slot, it is
+ sufficient to insert 'can_issue_more - 1' nops if next_insn is not
+ a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
+ in this case the last nop will start a new group and the branch
+ will be forced to the new group. */
+ if (can_issue_more && !is_branch_slot_insn (next_insn))
+ can_issue_more--;
+
+ while (can_issue_more > 0)
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ can_issue_more--;
+ }
+
+ *group_end = true;
+ return 0;
+ }
+
+ if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
+ {
+ int n_nops = rs6000_sched_insert_nops;
+
+ /* Nops can't be issued from the branch slot, so the effective
+ issue_rate for nops is 'issue_rate - 1'. */
+ if (can_issue_more == 0)
+ can_issue_more = issue_rate;
+ can_issue_more--;
+ if (can_issue_more == 0)
+ {
+ can_issue_more = issue_rate - 1;
+ (*group_count)++;
+ end = true;
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ }
+
+ while (n_nops > 0)
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ if (can_issue_more == issue_rate - 1) /* new group begins */
+ end = false;
+ can_issue_more--;
+ if (can_issue_more == 0)
+ {
+ can_issue_more = issue_rate - 1;
+ (*group_count)++;
+ end = true;
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ }
+ n_nops--;
+ }
+
+ /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
+ can_issue_more++;
+
+ /* Is next_insn going to start a new group? */
+ *group_end
+ = (end
+ || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
+ || (can_issue_more <= 2 && is_cracked_insn (next_insn))
+ || (can_issue_more < issue_rate &&
+ insn_terminates_group_p (next_insn, previous_group)));
+ if (*group_end && end)
+ (*group_count)--;
+
+ if (sched_verbose > 6)
+ fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
+ *group_count, can_issue_more);
+ return can_issue_more;
+ }
+
+ return can_issue_more;
+}
+
+/* This function tries to synch the dispatch groups that the compiler "sees"
+ with the dispatch groups that the processor dispatcher is expected to
+ form in practice. It tries to achieve this synchronization by forcing the
+ estimated processor grouping on the compiler (as opposed to the function
+ 'pad_goups' which tries to force the scheduler's grouping on the processor).
+
+ The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
+ examines the (estimated) dispatch groups that will be formed by the processor
+ dispatcher. It marks these group boundaries to reflect the estimated
+ processor grouping, overriding the grouping that the scheduler had marked.
+ Depending on the value of the flag '-minsert-sched-nops' this function can
+ force certain insns into separate groups or force a certain distance between
+ them by inserting nops, for example, if there exists a "costly dependence"
+ between the insns.
+
+ The function estimates the group boundaries that the processor will form as
+ follows: It keeps track of how many vacant issue slots are available after
+ each insn. A subsequent insn will start a new group if one of the following
+ 4 cases applies:
+ - no more vacant issue slots remain in the current dispatch group.
+ - only the last issue slot, which is the branch slot, is vacant, but the next
+ insn is not a branch.
+ - only the last 2 or less issue slots, including the branch slot, are vacant,
+ which means that a cracked insn (which occupies two issue slots) can't be
+ issued in this group.
+ - less than 'issue_rate' slots are vacant, and the next insn always needs to
+ start a new group. */
+
+static int
+redefine_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
+{
+ rtx insn, next_insn;
+ int issue_rate;
+ int can_issue_more;
+ int slot, i;
+ bool group_end;
+ int group_count = 0;
+ rtx *group_insns;
+
+ /* Initialize. */
+ issue_rate = rs6000_issue_rate ();
+ group_insns = XALLOCAVEC (rtx, issue_rate);
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ can_issue_more = issue_rate;
+ slot = 0;
+ insn = get_next_active_insn (prev_head_insn, tail);
+ group_end = false;
+
+ while (insn != NULL_RTX)
+ {
+ slot = (issue_rate - can_issue_more);
+ group_insns[slot] = insn;
+ can_issue_more =
+ rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
+ if (insn_terminates_group_p (insn, current_group))
+ can_issue_more = 0;
+
+ next_insn = get_next_active_insn (insn, tail);
+ if (next_insn == NULL_RTX)
+ return group_count + 1;
+
+ /* Is next_insn going to start a new group? */
+ group_end
+ = (can_issue_more == 0
+ || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
+ || (can_issue_more <= 2 && is_cracked_insn (next_insn))
+ || (can_issue_more < issue_rate &&
+ insn_terminates_group_p (next_insn, previous_group)));
+
+ can_issue_more = force_new_group (sched_verbose, dump, group_insns,
+ next_insn, &group_end, can_issue_more,
+ &group_count);
+
+ if (group_end)
+ {
+ group_count++;
+ can_issue_more = 0;
+ for (i = 0; i < issue_rate; i++)
+ {
+ group_insns[i] = 0;
+ }
+ }
+
+ if (GET_MODE (next_insn) == TImode && can_issue_more)
+ PUT_MODE (next_insn, VOIDmode);
+ else if (!can_issue_more && GET_MODE (next_insn) != TImode)
+ PUT_MODE (next_insn, TImode);
+
+ insn = next_insn;
+ if (can_issue_more == 0)
+ can_issue_more = issue_rate;
+ } /* while */
+
+ return group_count;
+}
+
+/* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
+ dispatch group boundaries that the scheduler had marked. Pad with nops
+ any dispatch groups which have vacant issue slots, in order to force the
+ scheduler's grouping on the processor dispatcher. The function
+ returns the number of dispatch groups found. */
+
+static int
+pad_groups (FILE *dump, int sched_verbose, rtx prev_head_insn, rtx tail)
+{
+ rtx insn, next_insn;
+ rtx nop;
+ int issue_rate;
+ int can_issue_more;
+ int group_end;
+ int group_count = 0;
+
+ /* Initialize issue_rate. */
+ issue_rate = rs6000_issue_rate ();
+ can_issue_more = issue_rate;
+
+ insn = get_next_active_insn (prev_head_insn, tail);
+ next_insn = get_next_active_insn (insn, tail);
+
+ while (insn != NULL_RTX)
+ {
+ can_issue_more =
+ rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
+
+ group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
+
+ if (next_insn == NULL_RTX)
+ break;
+
+ if (group_end)
+ {
+ /* If the scheduler had marked group termination at this location
+ (between insn and next_insn), and neither insn nor next_insn will
+ force group termination, pad the group with nops to force group
+ termination. */
+ if (can_issue_more
+ && (rs6000_sched_insert_nops == sched_finish_pad_groups)
+ && !insn_terminates_group_p (insn, current_group)
+ && !insn_terminates_group_p (next_insn, previous_group))
+ {
+ if (!is_branch_slot_insn (next_insn))
+ can_issue_more--;
+
+ while (can_issue_more)
+ {
+ nop = gen_nop ();
+ emit_insn_before (nop, next_insn);
+ can_issue_more--;
+ }
+ }
+
+ can_issue_more = issue_rate;
+ group_count++;
+ }
+
+ insn = next_insn;
+ next_insn = get_next_active_insn (insn, tail);
+ }
+
+ return group_count;
+}
+
+/* We're beginning a new block. Initialize data structures as necessary. */
+
+static void
+rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ int max_ready ATTRIBUTE_UNUSED)
+{
+ last_scheduled_insn = NULL_RTX;
+ load_store_pendulum = 0;
+}
+
+/* The following function is called at the end of scheduling BB.
+ After reload, it inserts nops at insn group bundling. */
+
+static void
+rs6000_sched_finish (FILE *dump, int sched_verbose)
+{
+ int n_groups;
+
+ if (sched_verbose)
+ fprintf (dump, "=== Finishing schedule.\n");
+
+ if (reload_completed && rs6000_sched_groups)
+ {
+ /* Do not run sched_finish hook when selective scheduling enabled. */
+ if (sel_sched_p ())
+ return;
+
+ if (rs6000_sched_insert_nops == sched_finish_none)
+ return;
+
+ if (rs6000_sched_insert_nops == sched_finish_pad_groups)
+ n_groups = pad_groups (dump, sched_verbose,
+ current_sched_info->prev_head,
+ current_sched_info->next_tail);
+ else
+ n_groups = redefine_groups (dump, sched_verbose,
+ current_sched_info->prev_head,
+ current_sched_info->next_tail);
+
+ if (sched_verbose >= 6)
+ {
+ fprintf (dump, "ngroups = %d\n", n_groups);
+ print_rtl (dump, current_sched_info->prev_head);
+ fprintf (dump, "Done finish_sched\n");
+ }
+ }
+}
+
+struct _rs6000_sched_context
+{
+ short cached_can_issue_more;
+ rtx last_scheduled_insn;
+ int load_store_pendulum;
+};
+
+typedef struct _rs6000_sched_context rs6000_sched_context_def;
+typedef rs6000_sched_context_def *rs6000_sched_context_t;
+
+/* Allocate store for new scheduling context. */
+static void *
+rs6000_alloc_sched_context (void)
+{
+ return xmalloc (sizeof (rs6000_sched_context_def));
+}
+
+/* If CLEAN_P is true then initializes _SC with clean data,
+ and from the global context otherwise. */
+static void
+rs6000_init_sched_context (void *_sc, bool clean_p)
+{
+ rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
+
+ if (clean_p)
+ {
+ sc->cached_can_issue_more = 0;
+ sc->last_scheduled_insn = NULL_RTX;
+ sc->load_store_pendulum = 0;
+ }
+ else
+ {
+ sc->cached_can_issue_more = cached_can_issue_more;
+ sc->last_scheduled_insn = last_scheduled_insn;
+ sc->load_store_pendulum = load_store_pendulum;
+ }
+}
+
+/* Sets the global scheduling context to the one pointed to by _SC. */
+static void
+rs6000_set_sched_context (void *_sc)
+{
+ rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
+
+ gcc_assert (sc != NULL);
+
+ cached_can_issue_more = sc->cached_can_issue_more;
+ last_scheduled_insn = sc->last_scheduled_insn;
+ load_store_pendulum = sc->load_store_pendulum;
+}
+
+/* Free _SC. */
+static void
+rs6000_free_sched_context (void *_sc)
+{
+ gcc_assert (_sc != NULL);
+
+ free (_sc);
+}
+
+
+/* Length in units of the trampoline for entering a nested function. */
+
+int
+rs6000_trampoline_size (void)
+{
+ int ret = 0;
+
+ switch (DEFAULT_ABI)
+ {
+ default:
+ gcc_unreachable ();
+
+ case ABI_AIX:
+ ret = (TARGET_32BIT) ? 12 : 24;
+ break;
+
+ case ABI_DARWIN:
+ case ABI_V4:
+ ret = (TARGET_32BIT) ? 40 : 48;
+ break;
+ }
+
+ return ret;
+}
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+void
+rs6000_initialize_trampoline (rtx addr, rtx fnaddr, rtx cxt)
+{
+ int regsize = (TARGET_32BIT) ? 4 : 8;
+ rtx ctx_reg = force_reg (Pmode, cxt);
+
+ switch (DEFAULT_ABI)
+ {
+ default:
+ gcc_unreachable ();
+
+/* Macros to shorten the code expansions below. */
+#define MEM_DEREF(addr) gen_rtx_MEM (Pmode, memory_address (Pmode, addr))
+#define MEM_PLUS(addr,offset) \
+ gen_rtx_MEM (Pmode, memory_address (Pmode, plus_constant (addr, offset)))
+
+ /* Under AIX, just build the 3 word function descriptor */
+ case ABI_AIX:
+ {
+ rtx fn_reg = gen_reg_rtx (Pmode);
+ rtx toc_reg = gen_reg_rtx (Pmode);
+ emit_move_insn (fn_reg, MEM_DEREF (fnaddr));
+ emit_move_insn (toc_reg, MEM_PLUS (fnaddr, regsize));
+ emit_move_insn (MEM_DEREF (addr), fn_reg);
+ emit_move_insn (MEM_PLUS (addr, regsize), toc_reg);
+ emit_move_insn (MEM_PLUS (addr, 2*regsize), ctx_reg);
+ }
+ break;
+
+ /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
+ case ABI_DARWIN:
+ case ABI_V4:
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
+ FALSE, VOIDmode, 4,
+ addr, Pmode,
+ GEN_INT (rs6000_trampoline_size ()), SImode,
+ fnaddr, Pmode,
+ ctx_reg, Pmode);
+ break;
+ }
+
+ return;
+}
+
+
+/* Table of valid machine attributes. */
+
+const struct attribute_spec rs6000_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute },
+ { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
+ { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
+ { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
+ { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute },
+#ifdef SUBTARGET_ATTRIBUTE_TABLE
+ SUBTARGET_ATTRIBUTE_TABLE,
+#endif
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+/* Handle the "altivec" attribute. The attribute may have
+ arguments as follows:
+
+ __attribute__((altivec(vector__)))
+ __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
+ __attribute__((altivec(bool__))) (always followed by 'unsigned')
+
+ and may appear more than once (e.g., 'vector bool char') in a
+ given declaration. */
+
+static tree
+rs6000_handle_altivec_attribute (tree *node,
+ tree name ATTRIBUTE_UNUSED,
+ tree args,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ tree type = *node, result = NULL_TREE;
+ enum machine_mode mode;
+ int unsigned_p;
+ char altivec_type
+ = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
+ && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
+ ? *IDENTIFIER_POINTER (TREE_VALUE (args))
+ : '?');
+
+ while (POINTER_TYPE_P (type)
+ || TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE
+ || TREE_CODE (type) == ARRAY_TYPE)
+ type = TREE_TYPE (type);
+
+ mode = TYPE_MODE (type);
+
+ /* Check for invalid AltiVec type qualifiers. */
+ if (type == long_unsigned_type_node || type == long_integer_type_node)
+ {
+ if (TARGET_64BIT)
+ error ("use of %<long%> in AltiVec types is invalid for 64-bit code");
+ else if (rs6000_warn_altivec_long)
+ warning (0, "use of %<long%> in AltiVec types is deprecated; use %<int%>");
+ }
+ else if (type == long_long_unsigned_type_node
+ || type == long_long_integer_type_node)
+ error ("use of %<long long%> in AltiVec types is invalid");
+ else if (type == double_type_node)
+ error ("use of %<double%> in AltiVec types is invalid");
+ else if (type == long_double_type_node)
+ error ("use of %<long double%> in AltiVec types is invalid");
+ else if (type == boolean_type_node)
+ error ("use of boolean types in AltiVec types is invalid");
+ else if (TREE_CODE (type) == COMPLEX_TYPE)
+ error ("use of %<complex%> in AltiVec types is invalid");
+ else if (DECIMAL_FLOAT_MODE_P (mode))
+ error ("use of decimal floating point types in AltiVec types is invalid");
+
+ switch (altivec_type)
+ {
+ case 'v':
+ unsigned_p = TYPE_UNSIGNED (type);
+ switch (mode)
+ {
+ case SImode:
+ result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
+ break;
+ case HImode:
+ result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
+ break;
+ case QImode:
+ result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
+ break;
+ case SFmode: result = V4SF_type_node; break;
+ /* If the user says 'vector int bool', we may be handed the 'bool'
+ attribute _before_ the 'vector' attribute, and so select the
+ proper type in the 'b' case below. */
+ case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
+ result = type;
+ default: break;
+ }
+ break;
+ case 'b':
+ switch (mode)
+ {
+ case SImode: case V4SImode: result = bool_V4SI_type_node; break;
+ case HImode: case V8HImode: result = bool_V8HI_type_node; break;
+ case QImode: case V16QImode: result = bool_V16QI_type_node;
+ default: break;
+ }
+ break;
+ case 'p':
+ switch (mode)
+ {
+ case V8HImode: result = pixel_V8HI_type_node;
+ default: break;
+ }
+ default: break;
+ }
+
+ /* Propagate qualifiers attached to the element type
+ onto the vector type. */
+ if (result && result != type && TYPE_QUALS (type))
+ result = build_qualified_type (result, TYPE_QUALS (type));
+
+ *no_add_attrs = true; /* No need to hang on to the attribute. */
+
+ if (result)
+ *node = lang_hooks.types.reconstruct_complex_type (*node, result);
+
+ return NULL_TREE;
+}
+
+/* AltiVec defines four built-in scalar types that serve as vector
+ elements; we must teach the compiler how to mangle them. */
+
+static const char *
+rs6000_mangle_type (const_tree type)
+{
+ type = TYPE_MAIN_VARIANT (type);
+
+ if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
+ && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
+ return NULL;
+
+ if (type == bool_char_type_node) return "U6__boolc";
+ if (type == bool_short_type_node) return "U6__bools";
+ if (type == pixel_type_node) return "u7__pixel";
+ if (type == bool_int_type_node) return "U6__booli";
+
+ /* Mangle IBM extended float long double as `g' (__float128) on
+ powerpc*-linux where long-double-64 previously was the default. */
+ if (TYPE_MAIN_VARIANT (type) == long_double_type_node
+ && TARGET_ELF
+ && TARGET_LONG_DOUBLE_128
+ && !TARGET_IEEEQUAD)
+ return "g";
+
+ /* For all other types, use normal C++ mangling. */
+ return NULL;
+}
+
+/* Handle a "longcall" or "shortcall" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+rs6000_handle_longcall_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_TYPE
+ && TREE_CODE (*node) != FIELD_DECL
+ && TREE_CODE (*node) != TYPE_DECL)
+ {
+ warning (OPT_Wattributes, "%qs attribute only applies to functions",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Set longcall attributes on all functions declared when
+ rs6000_default_long_calls is true. */
+static void
+rs6000_set_default_type_attributes (tree type)
+{
+ if (rs6000_default_long_calls
+ && (TREE_CODE (type) == FUNCTION_TYPE
+ || TREE_CODE (type) == METHOD_TYPE))
+ TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
+ NULL_TREE,
+ TYPE_ATTRIBUTES (type));
+
+#if TARGET_MACHO
+ darwin_set_default_type_attributes (type);
+#endif
+}
+
+/* Return a reference suitable for calling a function with the
+ longcall attribute. */
+
+rtx
+rs6000_longcall_ref (rtx call_ref)
+{
+ const char *call_name;
+ tree node;
+
+ if (GET_CODE (call_ref) != SYMBOL_REF)
+ return call_ref;
+
+ /* System V adds '.' to the internal name, so skip them. */
+ call_name = XSTR (call_ref, 0);
+ if (*call_name == '.')
+ {
+ while (*call_name == '.')
+ call_name++;
+
+ node = get_identifier (call_name);
+ call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
+ }
+
+ return force_reg (Pmode, call_ref);
+}
+
+#ifndef TARGET_USE_MS_BITFIELD_LAYOUT
+#define TARGET_USE_MS_BITFIELD_LAYOUT 0
+#endif
+
+/* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
+ struct attribute_spec.handler. */
+static tree
+rs6000_handle_struct_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
+{
+ tree *type = NULL;
+ if (DECL_P (*node))
+ {
+ if (TREE_CODE (*node) == TYPE_DECL)
+ type = &TREE_TYPE (*node);
+ }
+ else
+ type = node;
+
+ if (!(type && (TREE_CODE (*type) == RECORD_TYPE
+ || TREE_CODE (*type) == UNION_TYPE)))
+ {
+ warning (OPT_Wattributes, "%qs attribute ignored", IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ else if ((is_attribute_p ("ms_struct", name)
+ && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
+ || ((is_attribute_p ("gcc_struct", name)
+ && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
+ {
+ warning (OPT_Wattributes, "%qs incompatible attribute ignored",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+static bool
+rs6000_ms_bitfield_layout_p (const_tree record_type)
+{
+ return (TARGET_USE_MS_BITFIELD_LAYOUT &&
+ !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
+ || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
+}
+
+#ifdef USING_ELFOS_H
+
+/* A get_unnamed_section callback, used for switching to toc_section. */
+
+static void
+rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ if (DEFAULT_ABI == ABI_AIX
+ && TARGET_MINIMAL_TOC
+ && !TARGET_RELOCATABLE)
+ {
+ if (!toc_initialized)
+ {
+ toc_initialized = 1;
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
+ fprintf (asm_out_file, "\t.tc ");
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, "\n");
+
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, " = .+32768\n");
+ }
+ else
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ }
+ else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE)
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
+ else
+ {
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ if (!toc_initialized)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
+ fprintf (asm_out_file, " = .+32768\n");
+ toc_initialized = 1;
+ }
+ }
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
+
+static void
+rs6000_elf_asm_init_sections (void)
+{
+ toc_section
+ = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
+
+ sdata2_section
+ = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
+ SDATA2_SECTION_ASM_OP);
+}
+
+/* Implement TARGET_SELECT_RTX_SECTION. */
+
+static section *
+rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align)
+{
+ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
+ return toc_section;
+ else
+ return default_elf_select_rtx_section (mode, x, align);
+}
+
+/* For a SYMBOL_REF, set generic flags and then perform some
+ target-specific processing.
+
+ When the AIX ABI is requested on a non-AIX system, replace the
+ function name with the real name (with a leading .) rather than the
+ function descriptor name. This saves a lot of overriding code to
+ read the prefixes. */
+
+static void
+rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
+{
+ default_encode_section_info (decl, rtl, first);
+
+ if (first
+ && TREE_CODE (decl) == FUNCTION_DECL
+ && !TARGET_AIX
+ && DEFAULT_ABI == ABI_AIX)
+ {
+ rtx sym_ref = XEXP (rtl, 0);
+ size_t len = strlen (XSTR (sym_ref, 0));
+ char *str = XALLOCAVEC (char, len + 2);
+ str[0] = '.';
+ memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
+ XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
+ }
+}
+
+static inline bool
+compare_section_name (const char *section, const char *templ)
+{
+ int len;
+
+ len = strlen (templ);
+ return (strncmp (section, templ, len) == 0
+ && (section[len] == 0 || section[len] == '.'));
+}
+
+bool
+rs6000_elf_in_small_data_p (const_tree decl)
+{
+ if (rs6000_sdata == SDATA_NONE)
+ return false;
+
+ /* We want to merge strings, so we never consider them small data. */
+ if (TREE_CODE (decl) == STRING_CST)
+ return false;
+
+ /* Functions are never in the small data area. */
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ return false;
+
+ if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
+ {
+ const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
+ if (compare_section_name (section, ".sdata")
+ || compare_section_name (section, ".sdata2")
+ || compare_section_name (section, ".gnu.linkonce.s")
+ || compare_section_name (section, ".sbss")
+ || compare_section_name (section, ".sbss2")
+ || compare_section_name (section, ".gnu.linkonce.sb")
+ || strcmp (section, ".PPC.EMB.sdata0") == 0
+ || strcmp (section, ".PPC.EMB.sbss0") == 0)
+ return true;
+ }
+ else
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
+
+ if (size > 0
+ && (unsigned HOST_WIDE_INT) size <= g_switch_value
+ /* If it's not public, and we're not going to reference it there,
+ there's no need to put it in the small data section. */
+ && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
+ return true;
+ }
+
+ return false;
+}
+
+#endif /* USING_ELFOS_H */
+
+/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
+
+static bool
+rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
+{
+ return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
+}
+
+/* Return a REG that occurs in ADDR with coefficient 1.
+ ADDR can be effectively incremented by incrementing REG.
+
+ r0 is special and we must not select it as an address
+ register by this routine since our caller will try to
+ increment the returned register via an "la" instruction. */
+
+rtx
+find_addr_reg (rtx addr)
+{
+ while (GET_CODE (addr) == PLUS)
+ {
+ if (GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) != 0)
+ addr = XEXP (addr, 0);
+ else if (GET_CODE (XEXP (addr, 1)) == REG
+ && REGNO (XEXP (addr, 1)) != 0)
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 0)))
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ addr = XEXP (addr, 0);
+ else
+ gcc_unreachable ();
+ }
+ gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
+ return addr;
+}
+
+void
+rs6000_fatal_bad_address (rtx op)
+{
+ fatal_insn ("bad address", op);
+}
+
+#if TARGET_MACHO
+
+static tree branch_island_list = 0;
+
+/* Remember to generate a branch island for far calls to the given
+ function. */
+
+static void
+add_compiler_branch_island (tree label_name, tree function_name,
+ int line_number)
+{
+ tree branch_island = build_tree_list (function_name, label_name);
+ TREE_TYPE (branch_island) = build_int_cst (NULL_TREE, line_number);
+ TREE_CHAIN (branch_island) = branch_island_list;
+ branch_island_list = branch_island;
+}
+
+#define BRANCH_ISLAND_LABEL_NAME(BRANCH_ISLAND) TREE_VALUE (BRANCH_ISLAND)
+#define BRANCH_ISLAND_FUNCTION_NAME(BRANCH_ISLAND) TREE_PURPOSE (BRANCH_ISLAND)
+#define BRANCH_ISLAND_LINE_NUMBER(BRANCH_ISLAND) \
+ TREE_INT_CST_LOW (TREE_TYPE (BRANCH_ISLAND))
+
+/* Generate far-jump branch islands for everything on the
+ branch_island_list. Invoked immediately after the last instruction
+ of the epilogue has been emitted; the branch-islands must be
+ appended to, and contiguous with, the function body. Mach-O stubs
+ are generated in machopic_output_stub(). */
+
+static void
+macho_branch_islands (void)
+{
+ char tmp_buf[512];
+ tree branch_island;
+
+ for (branch_island = branch_island_list;
+ branch_island;
+ branch_island = TREE_CHAIN (branch_island))
+ {
+ const char *label =
+ IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island));
+ const char *name =
+ IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island));
+ char name_buf[512];
+ /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
+ if (name[0] == '*' || name[0] == '&')
+ strcpy (name_buf, name+1);
+ else
+ {
+ name_buf[0] = '_';
+ strcpy (name_buf+1, name);
+ }
+ strcpy (tmp_buf, "\n");
+ strcat (tmp_buf, label);
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
+#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+ if (flag_pic)
+ {
+ strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
+ strcat (tmp_buf, label);
+ strcat (tmp_buf, "_pic\n");
+ strcat (tmp_buf, label);
+ strcat (tmp_buf, "_pic:\n\tmflr r11\n");
+
+ strcat (tmp_buf, "\taddis r11,r11,ha16(");
+ strcat (tmp_buf, name_buf);
+ strcat (tmp_buf, " - ");
+ strcat (tmp_buf, label);
+ strcat (tmp_buf, "_pic)\n");
+
+ strcat (tmp_buf, "\tmtlr r0\n");
+
+ strcat (tmp_buf, "\taddi r12,r11,lo16(");
+ strcat (tmp_buf, name_buf);
+ strcat (tmp_buf, " - ");
+ strcat (tmp_buf, label);
+ strcat (tmp_buf, "_pic)\n");
+
+ strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
+ }
+ else
+ {
+ strcat (tmp_buf, ":\nlis r12,hi16(");
+ strcat (tmp_buf, name_buf);
+ strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
+ strcat (tmp_buf, name_buf);
+ strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
+ }
+ output_asm_insn (tmp_buf, 0);
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ dbxout_stabd (N_SLINE, BRANCH_ISLAND_LINE_NUMBER (branch_island));
+#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+ }
+
+ branch_island_list = 0;
+}
+
+/* NO_PREVIOUS_DEF checks in the link list whether the function name is
+ already there or not. */
+
+static int
+no_previous_def (tree function_name)
+{
+ tree branch_island;
+ for (branch_island = branch_island_list;
+ branch_island;
+ branch_island = TREE_CHAIN (branch_island))
+ if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
+ return 0;
+ return 1;
+}
+
+/* GET_PREV_LABEL gets the label name from the previous definition of
+ the function. */
+
+static tree
+get_prev_label (tree function_name)
+{
+ tree branch_island;
+ for (branch_island = branch_island_list;
+ branch_island;
+ branch_island = TREE_CHAIN (branch_island))
+ if (function_name == BRANCH_ISLAND_FUNCTION_NAME (branch_island))
+ return BRANCH_ISLAND_LABEL_NAME (branch_island);
+ return 0;
+}
+
+#ifndef DARWIN_LINKER_GENERATES_ISLANDS
+#define DARWIN_LINKER_GENERATES_ISLANDS 0
+#endif
+
+/* KEXTs still need branch islands. */
+#define DARWIN_GENERATE_ISLANDS (!DARWIN_LINKER_GENERATES_ISLANDS \
+ || flag_mkernel || flag_apple_kext)
+
+/* INSN is either a function call or a millicode call. It may have an
+ unconditional jump in its delay slot.
+
+ CALL_DEST is the routine we are calling. */
+
+char *
+output_call (rtx insn, rtx *operands, int dest_operand_number,
+ int cookie_operand_number)
+{
+ static char buf[256];
+ if (DARWIN_GENERATE_ISLANDS
+ && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
+ && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
+ {
+ tree labelname;
+ tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
+
+ if (no_previous_def (funname))
+ {
+ rtx label_rtx = gen_label_rtx ();
+ char *label_buf, temp_buf[256];
+ ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
+ CODE_LABEL_NUMBER (label_rtx));
+ label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
+ labelname = get_identifier (label_buf);
+ add_compiler_branch_island (labelname, funname, insn_line (insn));
+ }
+ else
+ labelname = get_prev_label (funname);
+
+ /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
+ instruction will reach 'foo', otherwise link as 'bl L42'".
+ "L42" should be a 'branch island', that will do a far jump to
+ 'foo'. Branch islands are generated in
+ macho_branch_islands(). */
+ sprintf (buf, "jbsr %%z%d,%.246s",
+ dest_operand_number, IDENTIFIER_POINTER (labelname));
+ }
+ else
+ sprintf (buf, "bl %%z%d", dest_operand_number);
+ return buf;
+}
+
+/* Generate PIC and indirect symbol stubs. */
+
+void
+machopic_output_stub (FILE *file, const char *symb, const char *stub)
+{
+ unsigned int length;
+ char *symbol_name, *lazy_ptr_name;
+ char *local_label_0;
+ static int label = 0;
+
+ /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
+ symb = (*targetm.strip_name_encoding) (symb);
+
+
+ length = strlen (symb);
+ symbol_name = XALLOCAVEC (char, length + 32);
+ GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
+
+ lazy_ptr_name = XALLOCAVEC (char, length + 32);
+ GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
+
+ if (flag_pic == 2)
+ switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
+ else
+ switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
+
+ if (flag_pic == 2)
+ {
+ fprintf (file, "\t.align 5\n");
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
+ label++;
+ local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
+ sprintf (local_label_0, "\"L%011d$spb\"", label);
+
+ fprintf (file, "\tmflr r0\n");
+ fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
+ fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
+ fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
+ lazy_ptr_name, local_label_0);
+ fprintf (file, "\tmtlr r0\n");
+ fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
+ (TARGET_64BIT ? "ldu" : "lwzu"),
+ lazy_ptr_name, local_label_0);
+ fprintf (file, "\tmtctr r12\n");
+ fprintf (file, "\tbctr\n");
+ }
+ else
+ {
+ fprintf (file, "\t.align 4\n");
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
+ fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
+ fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
+ (TARGET_64BIT ? "ldu" : "lwzu"),
+ lazy_ptr_name);
+ fprintf (file, "\tmtctr r12\n");
+ fprintf (file, "\tbctr\n");
+ }
+
+ switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
+ fprintf (file, "%s:\n", lazy_ptr_name);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+ fprintf (file, "%sdyld_stub_binding_helper\n",
+ (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
+}
+
+/* Legitimize PIC addresses. If the address is already
+ position-independent, we return ORIG. Newly generated
+ position-independent addresses go into a reg. This is REG if non
+ zero, otherwise we allocate register(s) as necessary. */
+
+#define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
+
+rtx
+rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
+ rtx reg)
+{
+ rtx base, offset;
+
+ if (reg == NULL && ! reload_in_progress && ! reload_completed)
+ reg = gen_reg_rtx (Pmode);
+
+ if (GET_CODE (orig) == CONST)
+ {
+ rtx reg_temp;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
+
+ /* Use a different reg for the intermediate value, as
+ it will be marked UNCHANGING. */
+ reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
+ base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
+ Pmode, reg_temp);
+ offset =
+ rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
+ Pmode, reg);
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ if (SMALL_INT (offset))
+ return plus_constant (base, INTVAL (offset));
+ else if (! reload_in_progress && ! reload_completed)
+ offset = force_reg (Pmode, offset);
+ else
+ {
+ rtx mem = force_const_mem (Pmode, orig);
+ return machopic_legitimize_pic_address (mem, Pmode, reg);
+ }
+ }
+ return gen_rtx_PLUS (Pmode, base, offset);
+ }
+
+ /* Fall back on generic machopic code. */
+ return machopic_legitimize_pic_address (orig, mode, reg);
+}
+
+/* Output a .machine directive for the Darwin assembler, and call
+ the generic start_file routine. */
+
+static void
+rs6000_darwin_file_start (void)
+{
+ static const struct
+ {
+ const char *arg;
+ const char *name;
+ int if_set;
+ } mapping[] = {
+ { "ppc64", "ppc64", MASK_64BIT },
+ { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
+ { "power4", "ppc970", 0 },
+ { "G5", "ppc970", 0 },
+ { "7450", "ppc7450", 0 },
+ { "7400", "ppc7400", MASK_ALTIVEC },
+ { "G4", "ppc7400", 0 },
+ { "750", "ppc750", 0 },
+ { "740", "ppc750", 0 },
+ { "G3", "ppc750", 0 },
+ { "604e", "ppc604e", 0 },
+ { "604", "ppc604", 0 },
+ { "603e", "ppc603", 0 },
+ { "603", "ppc603", 0 },
+ { "601", "ppc601", 0 },
+ { NULL, "ppc", 0 } };
+ const char *cpu_id = "";
+ size_t i;
+
+ rs6000_file_start ();
+ darwin_file_start ();
+
+ /* Determine the argument to -mcpu=. Default to G3 if not specified. */
+ for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ if (rs6000_select[i].set_arch_p && rs6000_select[i].string
+ && rs6000_select[i].string[0] != '\0')
+ cpu_id = rs6000_select[i].string;
+
+ /* Look through the mapping array. Pick the first name that either
+ matches the argument, has a bit set in IF_SET that is also set
+ in the target flags, or has a NULL name. */
+
+ i = 0;
+ while (mapping[i].arg != NULL
+ && strcmp (mapping[i].arg, cpu_id) != 0
+ && (mapping[i].if_set & target_flags) == 0)
+ i++;
+
+ fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
+}
+
+#endif /* TARGET_MACHO */
+
+#if TARGET_ELF
+static int
+rs6000_elf_reloc_rw_mask (void)
+{
+ if (flag_pic)
+ return 3;
+ else if (DEFAULT_ABI == ABI_AIX)
+ return 2;
+ else
+ return 0;
+}
+
+/* Record an element in the table of global constructors. SYMBOL is
+ a SYMBOL_REF of the function to be called; PRIORITY is a number
+ between 0 and MAX_INIT_PRIORITY.
+
+ This differs from default_named_section_asm_out_constructor in
+ that we have special handling for -mrelocatable. */
+
+static void
+rs6000_elf_asm_out_constructor (rtx symbol, int priority)
+{
+ const char *section = ".ctors";
+ char buf[16];
+
+ if (priority != DEFAULT_INIT_PRIORITY)
+ {
+ sprintf (buf, ".ctors.%.5u",
+ /* Invert the numbering so the linker puts us in the proper
+ order; constructors are run from right to left, and the
+ linker sorts in increasing order. */
+ MAX_INIT_PRIORITY - priority);
+ section = buf;
+ }
+
+ switch_to_section (get_section (section, SECTION_WRITE, NULL));
+ assemble_align (POINTER_SIZE);
+
+ if (TARGET_RELOCATABLE)
+ {
+ fputs ("\t.long (", asm_out_file);
+ output_addr_const (asm_out_file, symbol);
+ fputs (")@fixup\n", asm_out_file);
+ }
+ else
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+}
+
+static void
+rs6000_elf_asm_out_destructor (rtx symbol, int priority)
+{
+ const char *section = ".dtors";
+ char buf[16];
+
+ if (priority != DEFAULT_INIT_PRIORITY)
+ {
+ sprintf (buf, ".dtors.%.5u",
+ /* Invert the numbering so the linker puts us in the proper
+ order; constructors are run from right to left, and the
+ linker sorts in increasing order. */
+ MAX_INIT_PRIORITY - priority);
+ section = buf;
+ }
+
+ switch_to_section (get_section (section, SECTION_WRITE, NULL));
+ assemble_align (POINTER_SIZE);
+
+ if (TARGET_RELOCATABLE)
+ {
+ fputs ("\t.long (", asm_out_file);
+ output_addr_const (asm_out_file, symbol);
+ fputs (")@fixup\n", asm_out_file);
+ }
+ else
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+}
+
+void
+rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
+{
+ if (TARGET_64BIT)
+ {
+ fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
+ ASM_OUTPUT_LABEL (file, name);
+ fputs (DOUBLE_INT_ASM_OP, file);
+ rs6000_output_function_entry (file, name);
+ fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
+ if (DOT_SYMBOLS)
+ {
+ fputs ("\t.size\t", file);
+ assemble_name (file, name);
+ fputs (",24\n\t.type\t.", file);
+ assemble_name (file, name);
+ fputs (",@function\n", file);
+ if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
+ {
+ fputs ("\t.globl\t.", file);
+ assemble_name (file, name);
+ putc ('\n', file);
+ }
+ }
+ else
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
+ ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
+ rs6000_output_function_entry (file, name);
+ fputs (":\n", file);
+ return;
+ }
+
+ if (TARGET_RELOCATABLE
+ && !TARGET_SECURE_PLT
+ && (get_pool_size () != 0 || crtl->profile)
+ && uses_TOC ())
+ {
+ char buf[256];
+
+ (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
+ fprintf (file, "\t.long ");
+ assemble_name (file, buf);
+ putc ('-', file);
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
+ assemble_name (file, buf);
+ putc ('\n', file);
+ }
+
+ ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
+ ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
+
+ if (DEFAULT_ABI == ABI_AIX)
+ {
+ const char *desc_name, *orig_name;
+
+ orig_name = (*targetm.strip_name_encoding) (name);
+ desc_name = orig_name;
+ while (*desc_name == '.')
+ desc_name++;
+
+ if (TREE_PUBLIC (decl))
+ fprintf (file, "\t.globl %s\n", desc_name);
+
+ fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
+ fprintf (file, "%s:\n", desc_name);
+ fprintf (file, "\t.long %s\n", orig_name);
+ fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
+ if (DEFAULT_ABI == ABI_AIX)
+ fputs ("\t.long 0\n", file);
+ fprintf (file, "\t.previous\n");
+ }
+ ASM_OUTPUT_LABEL (file, name);
+}
+
+static void
+rs6000_elf_end_indicate_exec_stack (void)
+{
+ if (TARGET_32BIT)
+ file_end_indicate_exec_stack ();
+}
+#endif
+
+#if TARGET_XCOFF
+static void
+rs6000_xcoff_asm_output_anchor (rtx symbol)
+{
+ char buffer[100];
+
+ sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
+ SYMBOL_REF_BLOCK_OFFSET (symbol));
+ ASM_OUTPUT_DEF (asm_out_file, XSTR (symbol, 0), buffer);
+}
+
+static void
+rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
+{
+ fputs (GLOBAL_ASM_OP, stream);
+ RS6000_OUTPUT_BASENAME (stream, name);
+ putc ('\n', stream);
+}
+
+/* A get_unnamed_decl callback, used for read-only sections. PTR
+ points to the section string variable. */
+
+static void
+rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
+{
+ fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
+ *(const char *const *) directive,
+ XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
+}
+
+/* Likewise for read-write sections. */
+
+static void
+rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
+{
+ fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
+ *(const char *const *) directive,
+ XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
+}
+
+/* A get_unnamed_section callback, used for switching to toc_section. */
+
+static void
+rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
+{
+ if (TARGET_MINIMAL_TOC)
+ {
+ /* toc_section is always selected at least once from
+ rs6000_xcoff_file_start, so this is guaranteed to
+ always be defined once and only once in each file. */
+ if (!toc_initialized)
+ {
+ fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
+ fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
+ toc_initialized = 1;
+ }
+ fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
+ (TARGET_32BIT ? "" : ",3"));
+ }
+ else
+ fputs ("\t.toc\n", asm_out_file);
+}
+
+/* Implement TARGET_ASM_INIT_SECTIONS. */
+
+static void
+rs6000_xcoff_asm_init_sections (void)
+{
+ read_only_data_section
+ = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
+ &xcoff_read_only_section_name);
+
+ private_data_section
+ = get_unnamed_section (SECTION_WRITE,
+ rs6000_xcoff_output_readwrite_section_asm_op,
+ &xcoff_private_data_section_name);
+
+ read_only_private_data_section
+ = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
+ &xcoff_private_data_section_name);
+
+ toc_section
+ = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
+
+ readonly_data_section = read_only_data_section;
+ exception_section = data_section;
+}
+
+static int
+rs6000_xcoff_reloc_rw_mask (void)
+{
+ return 3;
+}
+
+static void
+rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
+ tree decl ATTRIBUTE_UNUSED)
+{
+ int smclass;
+ static const char * const suffix[3] = { "PR", "RO", "RW" };
+
+ if (flags & SECTION_CODE)
+ smclass = 0;
+ else if (flags & SECTION_WRITE)
+ smclass = 2;
+ else
+ smclass = 1;
+
+ fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
+ (flags & SECTION_CODE) ? "." : "",
+ name, suffix[smclass], flags & SECTION_ENTSIZE);
+}
+
+static section *
+rs6000_xcoff_select_section (tree decl, int reloc,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+ if (decl_readonly_section (decl, reloc))
+ {
+ if (TREE_PUBLIC (decl))
+ return read_only_data_section;
+ else
+ return read_only_private_data_section;
+ }
+ else
+ {
+ if (TREE_PUBLIC (decl))
+ return data_section;
+ else
+ return private_data_section;
+ }
+}
+
+static void
+rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
+{
+ const char *name;
+
+ /* Use select_section for private and uninitialized data. */
+ if (!TREE_PUBLIC (decl)
+ || DECL_COMMON (decl)
+ || DECL_INITIAL (decl) == NULL_TREE
+ || DECL_INITIAL (decl) == error_mark_node
+ || (flag_zero_initialized_in_bss
+ && initializer_zerop (DECL_INITIAL (decl))))
+ return;
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ name = (*targetm.strip_name_encoding) (name);
+ DECL_SECTION_NAME (decl) = build_string (strlen (name), name);
+}
+
+/* Select section for constant in constant pool.
+
+ On RS/6000, all constants are in the private read-only data area.
+ However, if this is being placed in the TOC it must be output as a
+ toc entry. */
+
+static section *
+rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
+{
+ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
+ return toc_section;
+ else
+ return read_only_private_data_section;
+}
+
+/* Remove any trailing [DS] or the like from the symbol name. */
+
+static const char *
+rs6000_xcoff_strip_name_encoding (const char *name)
+{
+ size_t len;
+ if (*name == '*')
+ name++;
+ len = strlen (name);
+ if (name[len - 1] == ']')
+ return ggc_alloc_string (name, len - 4);
+ else
+ return name;
+}
+
+/* Section attributes. AIX is always PIC. */
+
+static unsigned int
+rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
+{
+ unsigned int align;
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
+
+ /* Align to at least UNIT size. */
+ if (flags & SECTION_CODE)
+ align = MIN_UNITS_PER_WORD;
+ else
+ /* Increase alignment of large objects if not already stricter. */
+ align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
+ int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
+ ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
+
+ return flags | (exact_log2 (align) & SECTION_ENTSIZE);
+}
+
+/* Output at beginning of assembler file.
+
+ Initialize the section names for the RS/6000 at this point.
+
+ Specify filename, including full path, to assembler.
+
+ We want to go into the TOC section so at least one .toc will be emitted.
+ Also, in order to output proper .bs/.es pairs, we need at least one static
+ [RW] section emitted.
+
+ Finally, declare mcount when profiling to make the assembler happy. */
+
+static void
+rs6000_xcoff_file_start (void)
+{
+ rs6000_gen_section_name (&xcoff_bss_section_name,
+ main_input_filename, ".bss_");
+ rs6000_gen_section_name (&xcoff_private_data_section_name,
+ main_input_filename, ".rw_");
+ rs6000_gen_section_name (&xcoff_read_only_section_name,
+ main_input_filename, ".ro_");
+
+ fputs ("\t.file\t", asm_out_file);
+ output_quoted_string (asm_out_file, main_input_filename);
+ fputc ('\n', asm_out_file);
+ if (write_symbols != NO_DEBUG)
+ switch_to_section (private_data_section);
+ switch_to_section (text_section);
+ if (profile_flag)
+ fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
+ rs6000_file_start ();
+}
+
+/* Output at end of assembler file.
+ On the RS/6000, referencing data should automatically pull in text. */
+
+static void
+rs6000_xcoff_file_end (void)
+{
+ switch_to_section (text_section);
+ fputs ("_section_.text:\n", asm_out_file);
+ switch_to_section (data_section);
+ fputs (TARGET_32BIT
+ ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
+ asm_out_file);
+}
+#endif /* TARGET_XCOFF */
+
+/* Compute a (partial) cost for rtx X. Return true if the complete
+ cost has been computed, and false if subexpressions should be
+ scanned. In either case, *TOTAL contains the cost result. */
+
+static bool
+rs6000_rtx_costs (rtx x, int code, int outer_code, int *total,
+ bool speed)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ switch (code)
+ {
+ /* On the RS/6000, if it is valid in the insn, it is free. */
+ case CONST_INT:
+ if (((outer_code == SET
+ || outer_code == PLUS
+ || outer_code == MINUS)
+ && (satisfies_constraint_I (x)
+ || satisfies_constraint_L (x)))
+ || (outer_code == AND
+ && (satisfies_constraint_K (x)
+ || (mode == SImode
+ ? satisfies_constraint_L (x)
+ : satisfies_constraint_J (x))
+ || mask_operand (x, mode)
+ || (mode == DImode
+ && mask64_operand (x, DImode))))
+ || ((outer_code == IOR || outer_code == XOR)
+ && (satisfies_constraint_K (x)
+ || (mode == SImode
+ ? satisfies_constraint_L (x)
+ : satisfies_constraint_J (x))))
+ || outer_code == ASHIFT
+ || outer_code == ASHIFTRT
+ || outer_code == LSHIFTRT
+ || outer_code == ROTATE
+ || outer_code == ROTATERT
+ || outer_code == ZERO_EXTRACT
+ || (outer_code == MULT
+ && satisfies_constraint_I (x))
+ || ((outer_code == DIV || outer_code == UDIV
+ || outer_code == MOD || outer_code == UMOD)
+ && exact_log2 (INTVAL (x)) >= 0)
+ || (outer_code == COMPARE
+ && (satisfies_constraint_I (x)
+ || satisfies_constraint_K (x)))
+ || (outer_code == EQ
+ && (satisfies_constraint_I (x)
+ || satisfies_constraint_K (x)
+ || (mode == SImode
+ ? satisfies_constraint_L (x)
+ : satisfies_constraint_J (x))))
+ || (outer_code == GTU
+ && satisfies_constraint_I (x))
+ || (outer_code == LTU
+ && satisfies_constraint_P (x)))
+ {
+ *total = 0;
+ return true;
+ }
+ else if ((outer_code == PLUS
+ && reg_or_add_cint_operand (x, VOIDmode))
+ || (outer_code == MINUS
+ && reg_or_sub_cint_operand (x, VOIDmode))
+ || ((outer_code == SET
+ || outer_code == IOR
+ || outer_code == XOR)
+ && (INTVAL (x)
+ & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ /* FALLTHRU */
+
+ case CONST_DOUBLE:
+ if (mode == DImode && code == CONST_DOUBLE)
+ {
+ if ((outer_code == IOR || outer_code == XOR)
+ && CONST_DOUBLE_HIGH (x) == 0
+ && (CONST_DOUBLE_LOW (x)
+ & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0)
+ {
+ *total = 0;
+ return true;
+ }
+ else if ((outer_code == AND && and64_2_operand (x, DImode))
+ || ((outer_code == SET
+ || outer_code == IOR
+ || outer_code == XOR)
+ && CONST_DOUBLE_HIGH (x) == 0))
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ }
+ /* FALLTHRU */
+
+ case CONST:
+ case HIGH:
+ case SYMBOL_REF:
+ case MEM:
+ /* When optimizing for size, MEM should be slightly more expensive
+ than generating address, e.g., (plus (reg) (const)).
+ L1 cache latency is about two instructions. */
+ *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
+ return true;
+
+ case LABEL_REF:
+ *total = 0;
+ return true;
+
+ case PLUS:
+ if (mode == DFmode)
+ {
+ if (GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG)
+ *total = rs6000_cost->dmul - rs6000_cost->fp;
+ else
+ *total = rs6000_cost->dmul;
+ }
+ else
+ *total = rs6000_cost->fp;
+ }
+ else if (mode == SFmode)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ }
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case MINUS:
+ if (mode == DFmode)
+ {
+ if (GET_CODE (XEXP (x, 0)) == MULT
+ || GET_CODE (XEXP (x, 1)) == MULT)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG)
+ *total = rs6000_cost->dmul - rs6000_cost->fp;
+ else
+ *total = rs6000_cost->dmul;
+ }
+ else
+ *total = rs6000_cost->fp;
+ }
+ else if (mode == SFmode)
+ {
+ /* FNMA accounted in outer NEG. */
+ if (outer_code == NEG && GET_CODE (XEXP (x, 0)) == MULT)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ }
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case MULT:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && satisfies_constraint_I (XEXP (x, 1)))
+ {
+ if (INTVAL (XEXP (x, 1)) >= -256
+ && INTVAL (XEXP (x, 1)) <= 255)
+ *total = rs6000_cost->mulsi_const9;
+ else
+ *total = rs6000_cost->mulsi_const;
+ }
+ /* FMA accounted in outer PLUS/MINUS. */
+ else if ((mode == DFmode || mode == SFmode)
+ && (outer_code == PLUS || outer_code == MINUS))
+ *total = 0;
+ else if (mode == DFmode)
+ *total = rs6000_cost->dmul;
+ else if (mode == SFmode)
+ *total = rs6000_cost->fp;
+ else if (mode == DImode)
+ *total = rs6000_cost->muldi;
+ else
+ *total = rs6000_cost->mulsi;
+ return false;
+
+ case DIV:
+ case MOD:
+ if (FLOAT_MODE_P (mode))
+ {
+ *total = mode == DFmode ? rs6000_cost->ddiv
+ : rs6000_cost->sdiv;
+ return false;
+ }
+ /* FALLTHRU */
+
+ case UDIV:
+ case UMOD:
+ if (GET_CODE (XEXP (x, 1)) == CONST_INT
+ && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
+ {
+ if (code == DIV || code == MOD)
+ /* Shift, addze */
+ *total = COSTS_N_INSNS (2);
+ else
+ /* Shift */
+ *total = COSTS_N_INSNS (1);
+ }
+ else
+ {
+ if (GET_MODE (XEXP (x, 1)) == DImode)
+ *total = rs6000_cost->divdi;
+ else
+ *total = rs6000_cost->divsi;
+ }
+ /* Add in shift and subtract for MOD. */
+ if (code == MOD || code == UMOD)
+ *total += COSTS_N_INSNS (2);
+ return false;
+
+ case CTZ:
+ case FFS:
+ *total = COSTS_N_INSNS (4);
+ return false;
+
+ case POPCOUNT:
+ *total = COSTS_N_INSNS (6);
+ return false;
+
+ case NOT:
+ if (outer_code == AND || outer_code == IOR || outer_code == XOR)
+ {
+ *total = 0;
+ return false;
+ }
+ /* FALLTHRU */
+
+ case AND:
+ case CLZ:
+ case IOR:
+ case XOR:
+ case ZERO_EXTRACT:
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ case ROTATE:
+ case ROTATERT:
+ /* Handle mul_highpart. */
+ if (outer_code == TRUNCATE
+ && GET_CODE (XEXP (x, 0)) == MULT)
+ {
+ if (mode == DImode)
+ *total = rs6000_cost->muldi;
+ else
+ *total = rs6000_cost->mulsi;
+ return true;
+ }
+ else if (outer_code == AND)
+ *total = 0;
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case SIGN_EXTEND:
+ case ZERO_EXTEND:
+ if (GET_CODE (XEXP (x, 0)) == MEM)
+ *total = 0;
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case COMPARE:
+ case NEG:
+ case ABS:
+ if (!FLOAT_MODE_P (mode))
+ {
+ *total = COSTS_N_INSNS (1);
+ return false;
+ }
+ /* FALLTHRU */
+
+ case FLOAT:
+ case UNSIGNED_FLOAT:
+ case FIX:
+ case UNSIGNED_FIX:
+ case FLOAT_TRUNCATE:
+ *total = rs6000_cost->fp;
+ return false;
+
+ case FLOAT_EXTEND:
+ if (mode == DFmode)
+ *total = 0;
+ else
+ *total = rs6000_cost->fp;
+ return false;
+
+ case UNSPEC:
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_FRSP:
+ *total = rs6000_cost->fp;
+ return true;
+
+ default:
+ break;
+ }
+ break;
+
+ case CALL:
+ case IF_THEN_ELSE:
+ if (!speed)
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ else if (FLOAT_MODE_P (mode)
+ && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
+ {
+ *total = rs6000_cost->fp;
+ return false;
+ }
+ break;
+
+ case EQ:
+ case GTU:
+ case LTU:
+ /* Carry bit requires mode == Pmode.
+ NEG or PLUS already counted so only add one. */
+ if (mode == Pmode
+ && (outer_code == NEG || outer_code == PLUS))
+ {
+ *total = COSTS_N_INSNS (1);
+ return true;
+ }
+ if (outer_code == SET)
+ {
+ if (XEXP (x, 1) == const0_rtx)
+ {
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+ else if (mode == Pmode)
+ {
+ *total = COSTS_N_INSNS (3);
+ return false;
+ }
+ }
+ /* FALLTHRU */
+
+ case GT:
+ case LT:
+ case UNORDERED:
+ if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
+ {
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+ /* CC COMPARE. */
+ if (outer_code == COMPARE)
+ {
+ *total = 0;
+ return true;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/* A C expression returning the cost of moving data from a register of class
+ CLASS1 to one of CLASS2. */
+
+int
+rs6000_register_move_cost (enum machine_mode mode,
+ enum reg_class from, enum reg_class to)
+{
+ /* Moves from/to GENERAL_REGS. */
+ if (reg_classes_intersect_p (to, GENERAL_REGS)
+ || reg_classes_intersect_p (from, GENERAL_REGS))
+ {
+ if (! reg_classes_intersect_p (to, GENERAL_REGS))
+ from = to;
+
+ if (from == FLOAT_REGS || from == ALTIVEC_REGS)
+ return (rs6000_memory_move_cost (mode, from, 0)
+ + rs6000_memory_move_cost (mode, GENERAL_REGS, 0));
+
+ /* It's more expensive to move CR_REGS than CR0_REGS because of the
+ shift. */
+ else if (from == CR_REGS)
+ return 4;
+
+ /* Power6 has slower LR/CTR moves so make them more expensive than
+ memory in order to bias spills to memory .*/
+ else if (rs6000_cpu == PROCESSOR_POWER6
+ && reg_classes_intersect_p (from, LINK_OR_CTR_REGS))
+ return 6 * hard_regno_nregs[0][mode];
+
+ else
+ /* A move will cost one instruction per GPR moved. */
+ return 2 * hard_regno_nregs[0][mode];
+ }
+
+ /* Moving between two similar registers is just one instruction. */
+ else if (reg_classes_intersect_p (to, from))
+ return (mode == TFmode || mode == TDmode) ? 4 : 2;
+
+ /* Everything else has to go through GENERAL_REGS. */
+ else
+ return (rs6000_register_move_cost (mode, GENERAL_REGS, to)
+ + rs6000_register_move_cost (mode, from, GENERAL_REGS));
+}
+
+/* A C expressions returning the cost of moving data of MODE from a register to
+ or from memory. */
+
+int
+rs6000_memory_move_cost (enum machine_mode mode, enum reg_class rclass,
+ int in ATTRIBUTE_UNUSED)
+{
+ if (reg_classes_intersect_p (rclass, GENERAL_REGS))
+ return 4 * hard_regno_nregs[0][mode];
+ else if (reg_classes_intersect_p (rclass, FLOAT_REGS))
+ return 4 * hard_regno_nregs[32][mode];
+ else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
+ return 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
+ else
+ return 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
+}
+
+/* Returns a code for a target-specific builtin that implements
+ reciprocal of the function, or NULL_TREE if not available. */
+
+static tree
+rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
+ bool sqrt ATTRIBUTE_UNUSED)
+{
+ if (! (TARGET_RECIP && TARGET_PPC_GFXOPT && !optimize_size
+ && flag_finite_math_only && !flag_trapping_math
+ && flag_unsafe_math_optimizations))
+ return NULL_TREE;
+
+ if (md_fn)
+ return NULL_TREE;
+ else
+ switch (fn)
+ {
+ case BUILT_IN_SQRTF:
+ return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
+
+ default:
+ return NULL_TREE;
+ }
+}
+
+/* Newton-Raphson approximation of single-precision floating point divide n/d.
+ Assumes no trapping math and finite arguments. */
+
+void
+rs6000_emit_swdivsf (rtx dst, rtx n, rtx d)
+{
+ rtx x0, e0, e1, y1, u0, v0, one;
+
+ x0 = gen_reg_rtx (SFmode);
+ e0 = gen_reg_rtx (SFmode);
+ e1 = gen_reg_rtx (SFmode);
+ y1 = gen_reg_rtx (SFmode);
+ u0 = gen_reg_rtx (SFmode);
+ v0 = gen_reg_rtx (SFmode);
+ one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
+
+ /* x0 = 1./d estimate */
+ emit_insn (gen_rtx_SET (VOIDmode, x0,
+ gen_rtx_UNSPEC (SFmode, gen_rtvec (1, d),
+ UNSPEC_FRES)));
+ /* e0 = 1. - d * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e0,
+ gen_rtx_MINUS (SFmode, one,
+ gen_rtx_MULT (SFmode, d, x0))));
+ /* e1 = e0 + e0 * e0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e1,
+ gen_rtx_PLUS (SFmode,
+ gen_rtx_MULT (SFmode, e0, e0), e0)));
+ /* y1 = x0 + e1 * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, y1,
+ gen_rtx_PLUS (SFmode,
+ gen_rtx_MULT (SFmode, e1, x0), x0)));
+ /* u0 = n * y1 */
+ emit_insn (gen_rtx_SET (VOIDmode, u0,
+ gen_rtx_MULT (SFmode, n, y1)));
+ /* v0 = n - d * u0 */
+ emit_insn (gen_rtx_SET (VOIDmode, v0,
+ gen_rtx_MINUS (SFmode, n,
+ gen_rtx_MULT (SFmode, d, u0))));
+ /* dst = u0 + v0 * y1 */
+ emit_insn (gen_rtx_SET (VOIDmode, dst,
+ gen_rtx_PLUS (SFmode,
+ gen_rtx_MULT (SFmode, v0, y1), u0)));
+}
+
+/* Newton-Raphson approximation of double-precision floating point divide n/d.
+ Assumes no trapping math and finite arguments. */
+
+void
+rs6000_emit_swdivdf (rtx dst, rtx n, rtx d)
+{
+ rtx x0, e0, e1, e2, y1, y2, y3, u0, v0, one;
+
+ x0 = gen_reg_rtx (DFmode);
+ e0 = gen_reg_rtx (DFmode);
+ e1 = gen_reg_rtx (DFmode);
+ e2 = gen_reg_rtx (DFmode);
+ y1 = gen_reg_rtx (DFmode);
+ y2 = gen_reg_rtx (DFmode);
+ y3 = gen_reg_rtx (DFmode);
+ u0 = gen_reg_rtx (DFmode);
+ v0 = gen_reg_rtx (DFmode);
+ one = force_reg (DFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, DFmode));
+
+ /* x0 = 1./d estimate */
+ emit_insn (gen_rtx_SET (VOIDmode, x0,
+ gen_rtx_UNSPEC (DFmode, gen_rtvec (1, d),
+ UNSPEC_FRES)));
+ /* e0 = 1. - d * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e0,
+ gen_rtx_MINUS (DFmode, one,
+ gen_rtx_MULT (SFmode, d, x0))));
+ /* y1 = x0 + e0 * x0 */
+ emit_insn (gen_rtx_SET (VOIDmode, y1,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, e0, x0), x0)));
+ /* e1 = e0 * e0 */
+ emit_insn (gen_rtx_SET (VOIDmode, e1,
+ gen_rtx_MULT (DFmode, e0, e0)));
+ /* y2 = y1 + e1 * y1 */
+ emit_insn (gen_rtx_SET (VOIDmode, y2,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, e1, y1), y1)));
+ /* e2 = e1 * e1 */
+ emit_insn (gen_rtx_SET (VOIDmode, e2,
+ gen_rtx_MULT (DFmode, e1, e1)));
+ /* y3 = y2 + e2 * y2 */
+ emit_insn (gen_rtx_SET (VOIDmode, y3,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, e2, y2), y2)));
+ /* u0 = n * y3 */
+ emit_insn (gen_rtx_SET (VOIDmode, u0,
+ gen_rtx_MULT (DFmode, n, y3)));
+ /* v0 = n - d * u0 */
+ emit_insn (gen_rtx_SET (VOIDmode, v0,
+ gen_rtx_MINUS (DFmode, n,
+ gen_rtx_MULT (DFmode, d, u0))));
+ /* dst = u0 + v0 * y3 */
+ emit_insn (gen_rtx_SET (VOIDmode, dst,
+ gen_rtx_PLUS (DFmode,
+ gen_rtx_MULT (DFmode, v0, y3), u0)));
+}
+
+
+/* Newton-Raphson approximation of single-precision floating point rsqrt.
+ Assumes no trapping math and finite arguments. */
+
+void
+rs6000_emit_swrsqrtsf (rtx dst, rtx src)
+{
+ rtx x0, x1, x2, y1, u0, u1, u2, v0, v1, v2, t0,
+ half, one, halfthree, c1, cond, label;
+
+ x0 = gen_reg_rtx (SFmode);
+ x1 = gen_reg_rtx (SFmode);
+ x2 = gen_reg_rtx (SFmode);
+ y1 = gen_reg_rtx (SFmode);
+ u0 = gen_reg_rtx (SFmode);
+ u1 = gen_reg_rtx (SFmode);
+ u2 = gen_reg_rtx (SFmode);
+ v0 = gen_reg_rtx (SFmode);
+ v1 = gen_reg_rtx (SFmode);
+ v2 = gen_reg_rtx (SFmode);
+ t0 = gen_reg_rtx (SFmode);
+ halfthree = gen_reg_rtx (SFmode);
+ cond = gen_rtx_REG (CCFPmode, CR1_REGNO);
+ label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
+
+ /* check 0.0, 1.0, NaN, Inf by testing src * src = src */
+ emit_insn (gen_rtx_SET (VOIDmode, t0,
+ gen_rtx_MULT (SFmode, src, src)));
+
+ emit_insn (gen_rtx_SET (VOIDmode, cond,
+ gen_rtx_COMPARE (CCFPmode, t0, src)));
+ c1 = gen_rtx_EQ (VOIDmode, cond, const0_rtx);
+ emit_unlikely_jump (c1, label);
+
+ half = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconsthalf, SFmode));
+ one = force_reg (SFmode, CONST_DOUBLE_FROM_REAL_VALUE (dconst1, SFmode));
+
+ /* halfthree = 1.5 = 1.0 + 0.5 */
+ emit_insn (gen_rtx_SET (VOIDmode, halfthree,
+ gen_rtx_PLUS (SFmode, one, half)));
+
+ /* x0 = rsqrt estimate */
+ emit_insn (gen_rtx_SET (VOIDmode, x0,
+ gen_rtx_UNSPEC (SFmode, gen_rtvec (1, src),
+ UNSPEC_RSQRT)));
+
+ /* y1 = 0.5 * src = 1.5 * src - src -> fewer constants */
+ emit_insn (gen_rtx_SET (VOIDmode, y1,
+ gen_rtx_MINUS (SFmode,
+ gen_rtx_MULT (SFmode, src, halfthree),
+ src)));
+
+ /* x1 = x0 * (1.5 - y1 * (x0 * x0)) */
+ emit_insn (gen_rtx_SET (VOIDmode, u0,
+ gen_rtx_MULT (SFmode, x0, x0)));
+ emit_insn (gen_rtx_SET (VOIDmode, v0,
+ gen_rtx_MINUS (SFmode,
+ halfthree,
+ gen_rtx_MULT (SFmode, y1, u0))));
+ emit_insn (gen_rtx_SET (VOIDmode, x1,
+ gen_rtx_MULT (SFmode, x0, v0)));
+
+ /* x2 = x1 * (1.5 - y1 * (x1 * x1)) */
+ emit_insn (gen_rtx_SET (VOIDmode, u1,
+ gen_rtx_MULT (SFmode, x1, x1)));
+ emit_insn (gen_rtx_SET (VOIDmode, v1,
+ gen_rtx_MINUS (SFmode,
+ halfthree,
+ gen_rtx_MULT (SFmode, y1, u1))));
+ emit_insn (gen_rtx_SET (VOIDmode, x2,
+ gen_rtx_MULT (SFmode, x1, v1)));
+
+ /* dst = x2 * (1.5 - y1 * (x2 * x2)) */
+ emit_insn (gen_rtx_SET (VOIDmode, u2,
+ gen_rtx_MULT (SFmode, x2, x2)));
+ emit_insn (gen_rtx_SET (VOIDmode, v2,
+ gen_rtx_MINUS (SFmode,
+ halfthree,
+ gen_rtx_MULT (SFmode, y1, u2))));
+ emit_insn (gen_rtx_SET (VOIDmode, dst,
+ gen_rtx_MULT (SFmode, x2, v2)));
+
+ emit_label (XEXP (label, 0));
+}
+
+/* Emit popcount intrinsic on TARGET_POPCNTB targets. DST is the
+ target, and SRC is the argument operand. */
+
+void
+rs6000_emit_popcount (rtx dst, rtx src)
+{
+ enum machine_mode mode = GET_MODE (dst);
+ rtx tmp1, tmp2;
+
+ tmp1 = gen_reg_rtx (mode);
+
+ if (mode == SImode)
+ {
+ emit_insn (gen_popcntbsi2 (tmp1, src));
+ tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
+ NULL_RTX, 0);
+ tmp2 = force_reg (SImode, tmp2);
+ emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
+ }
+ else
+ {
+ emit_insn (gen_popcntbdi2 (tmp1, src));
+ tmp2 = expand_mult (DImode, tmp1,
+ GEN_INT ((HOST_WIDE_INT)
+ 0x01010101 << 32 | 0x01010101),
+ NULL_RTX, 0);
+ tmp2 = force_reg (DImode, tmp2);
+ emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
+ }
+}
+
+
+/* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
+ target, and SRC is the argument operand. */
+
+void
+rs6000_emit_parity (rtx dst, rtx src)
+{
+ enum machine_mode mode = GET_MODE (dst);
+ rtx tmp;
+
+ tmp = gen_reg_rtx (mode);
+ if (mode == SImode)
+ {
+ /* Is mult+shift >= shift+xor+shift+xor? */
+ if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
+ {
+ rtx tmp1, tmp2, tmp3, tmp4;
+
+ tmp1 = gen_reg_rtx (SImode);
+ emit_insn (gen_popcntbsi2 (tmp1, src));
+
+ tmp2 = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
+ tmp3 = gen_reg_rtx (SImode);
+ emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
+
+ tmp4 = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
+ emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
+ }
+ else
+ rs6000_emit_popcount (tmp, src);
+ emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
+ }
+ else
+ {
+ /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
+ if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
+ {
+ rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
+
+ tmp1 = gen_reg_rtx (DImode);
+ emit_insn (gen_popcntbdi2 (tmp1, src));
+
+ tmp2 = gen_reg_rtx (DImode);
+ emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
+ tmp3 = gen_reg_rtx (DImode);
+ emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
+
+ tmp4 = gen_reg_rtx (DImode);
+ emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
+ tmp5 = gen_reg_rtx (DImode);
+ emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
+
+ tmp6 = gen_reg_rtx (DImode);
+ emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
+ emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
+ }
+ else
+ rs6000_emit_popcount (tmp, src);
+ emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
+ }
+}
+
+/* Return an RTX representing where to find the function value of a
+ function returning MODE. */
+static rtx
+rs6000_complex_function_value (enum machine_mode mode)
+{
+ unsigned int regno;
+ rtx r1, r2;
+ enum machine_mode inner = GET_MODE_INNER (mode);
+ unsigned int inner_bytes = GET_MODE_SIZE (inner);
+
+ if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
+ regno = FP_ARG_RETURN;
+ else
+ {
+ regno = GP_ARG_RETURN;
+
+ /* 32-bit is OK since it'll go in r3/r4. */
+ if (TARGET_32BIT && inner_bytes >= 4)
+ return gen_rtx_REG (mode, regno);
+ }
+
+ if (inner_bytes >= 8)
+ return gen_rtx_REG (mode, regno);
+
+ r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
+ const0_rtx);
+ r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
+ GEN_INT (inner_bytes));
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
+}
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0.
+
+ On the SPE, both FPs and vectors are returned in r3.
+
+ On RS/6000 an integer value is in r3 and a floating-point value is in
+ fp1, unless -msoft-float. */
+
+rtx
+rs6000_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
+{
+ enum machine_mode mode;
+ unsigned int regno;
+
+ /* Special handling for structs in darwin64. */
+ if (rs6000_darwin64_abi
+ && TYPE_MODE (valtype) == BLKmode
+ && TREE_CODE (valtype) == RECORD_TYPE
+ && int_size_in_bytes (valtype) > 0)
+ {
+ CUMULATIVE_ARGS valcum;
+ rtx valret;
+
+ valcum.words = 0;
+ valcum.fregno = FP_ARG_MIN_REG;
+ valcum.vregno = ALTIVEC_ARG_MIN_REG;
+ /* Do a trial code generation as if this were going to be passed as
+ an argument; if any part goes in memory, we return NULL. */
+ valret = rs6000_darwin64_record_arg (&valcum, valtype, 1, true);
+ if (valret)
+ return valret;
+ /* Otherwise fall through to standard ABI rules. */
+ }
+
+ if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
+ {
+ /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
+ return gen_rtx_PARALLEL (DImode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4))));
+ }
+ if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
+ {
+ return gen_rtx_PARALLEL (DCmode,
+ gen_rtvec (4,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 2),
+ GEN_INT (8)),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 3),
+ GEN_INT (12))));
+ }
+
+ mode = TYPE_MODE (valtype);
+ if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
+ || POINTER_TYPE_P (valtype))
+ mode = TARGET_32BIT ? SImode : DImode;
+
+ if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
+ /* _Decimal128 must use an even/odd register pair. */
+ regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
+ else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS)
+ regno = FP_ARG_RETURN;
+ else if (TREE_CODE (valtype) == COMPLEX_TYPE
+ && targetm.calls.split_complex_arg)
+ return rs6000_complex_function_value (mode);
+ else if (TREE_CODE (valtype) == VECTOR_TYPE
+ && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
+ && ALTIVEC_VECTOR_MODE (mode))
+ regno = ALTIVEC_ARG_RETURN;
+ else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
+ && (mode == DFmode || mode == DCmode
+ || mode == TFmode || mode == TCmode))
+ return spe_build_register_parallel (mode, GP_ARG_RETURN);
+ else
+ regno = GP_ARG_RETURN;
+
+ return gen_rtx_REG (mode, regno);
+}
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+rtx
+rs6000_libcall_value (enum machine_mode mode)
+{
+ unsigned int regno;
+
+ if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
+ {
+ /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
+ return gen_rtx_PARALLEL (DImode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode, GP_ARG_RETURN),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (SImode,
+ GP_ARG_RETURN + 1),
+ GEN_INT (4))));
+ }
+
+ if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
+ /* _Decimal128 must use an even/odd register pair. */
+ regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
+ else if (SCALAR_FLOAT_MODE_P (mode)
+ && TARGET_HARD_FLOAT && TARGET_FPRS)
+ regno = FP_ARG_RETURN;
+ else if (ALTIVEC_VECTOR_MODE (mode)
+ && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
+ regno = ALTIVEC_ARG_RETURN;
+ else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
+ return rs6000_complex_function_value (mode);
+ else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
+ && (mode == DFmode || mode == DCmode
+ || mode == TFmode || mode == TCmode))
+ return spe_build_register_parallel (mode, GP_ARG_RETURN);
+ else
+ regno = GP_ARG_RETURN;
+
+ return gen_rtx_REG (mode, regno);
+}
+
+/* Define the offset between two registers, FROM to be eliminated and its
+ replacement TO, at the start of a routine. */
+HOST_WIDE_INT
+rs6000_initial_elimination_offset (int from, int to)
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+ HOST_WIDE_INT offset;
+
+ if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ offset = info->push_p ? 0 : -info->total_size;
+ else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ {
+ offset = info->push_p ? 0 : -info->total_size;
+ if (FRAME_GROWS_DOWNWARD)
+ offset += info->fixed_size + info->vars_size + info->parm_size;
+ }
+ else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ offset = FRAME_GROWS_DOWNWARD
+ ? info->fixed_size + info->vars_size + info->parm_size
+ : 0;
+ else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ offset = info->total_size;
+ else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ offset = info->push_p ? info->total_size : 0;
+ else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
+ offset = 0;
+ else
+ gcc_unreachable ();
+
+ return offset;
+}
+
+/* Return true if TYPE is a SPE or AltiVec opaque type. */
+
+static bool
+rs6000_is_opaque_type (const_tree type)
+{
+ return (type == opaque_V2SI_type_node
+ || type == opaque_V2SF_type_node
+ || type == opaque_V4SI_type_node);
+}
+
+static rtx
+rs6000_dwarf_register_span (rtx reg)
+{
+ unsigned regno;
+
+ if (TARGET_SPE
+ && (SPE_VECTOR_MODE (GET_MODE (reg))
+ || (TARGET_E500_DOUBLE
+ && (GET_MODE (reg) == DFmode || GET_MODE (reg) == DDmode))))
+ ;
+ else
+ return NULL_RTX;
+
+ regno = REGNO (reg);
+
+ /* The duality of the SPE register size wreaks all kinds of havoc.
+ This is a way of distinguishing r0 in 32-bits from r0 in
+ 64-bits. */
+ return
+ gen_rtx_PARALLEL (VOIDmode,
+ BYTES_BIG_ENDIAN
+ ? gen_rtvec (2,
+ gen_rtx_REG (SImode, regno + 1200),
+ gen_rtx_REG (SImode, regno))
+ : gen_rtvec (2,
+ gen_rtx_REG (SImode, regno),
+ gen_rtx_REG (SImode, regno + 1200)));
+}
+
+/* Fill in sizes for SPE register high parts in table used by unwinder. */
+
+static void
+rs6000_init_dwarf_reg_sizes_extra (tree address)
+{
+ if (TARGET_SPE)
+ {
+ int i;
+ enum machine_mode mode = TYPE_MODE (char_type_node);
+ rtx addr = expand_expr (address, NULL_RTX, VOIDmode, 0);
+ rtx mem = gen_rtx_MEM (BLKmode, addr);
+ rtx value = gen_int_mode (4, mode);
+
+ for (i = 1201; i < 1232; i++)
+ {
+ int column = DWARF_REG_TO_UNWIND_COLUMN (i);
+ HOST_WIDE_INT offset
+ = DWARF_FRAME_REGNUM (column) * GET_MODE_SIZE (mode);
+
+ emit_move_insn (adjust_address (mem, mode, offset), value);
+ }
+ }
+}
+
+/* Map internal gcc register numbers to DWARF2 register numbers. */
+
+unsigned int
+rs6000_dbx_register_number (unsigned int regno)
+{
+ if (regno <= 63 || write_symbols != DWARF2_DEBUG)
+ return regno;
+ if (regno == MQ_REGNO)
+ return 100;
+ if (regno == LR_REGNO)
+ return 108;
+ if (regno == CTR_REGNO)
+ return 109;
+ if (CR_REGNO_P (regno))
+ return regno - CR0_REGNO + 86;
+ if (regno == XER_REGNO)
+ return 101;
+ if (ALTIVEC_REGNO_P (regno))
+ return regno - FIRST_ALTIVEC_REGNO + 1124;
+ if (regno == VRSAVE_REGNO)
+ return 356;
+ if (regno == VSCR_REGNO)
+ return 67;
+ if (regno == SPE_ACC_REGNO)
+ return 99;
+ if (regno == SPEFSCR_REGNO)
+ return 612;
+ /* SPE high reg number. We get these values of regno from
+ rs6000_dwarf_register_span. */
+ gcc_assert (regno >= 1200 && regno < 1232);
+ return regno;
+}
+
+/* target hook eh_return_filter_mode */
+static enum machine_mode
+rs6000_eh_return_filter_mode (void)
+{
+ return TARGET_32BIT ? SImode : word_mode;
+}
+
+/* Target hook for scalar_mode_supported_p. */
+static bool
+rs6000_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (DECIMAL_FLOAT_MODE_P (mode))
+ return true;
+ else
+ return default_scalar_mode_supported_p (mode);
+}
+
+/* Target hook for vector_mode_supported_p. */
+static bool
+rs6000_vector_mode_supported_p (enum machine_mode mode)
+{
+
+ if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
+ return true;
+
+ if (TARGET_SPE && SPE_VECTOR_MODE (mode))
+ return true;
+
+ else if (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (mode))
+ return true;
+
+ else
+ return false;
+}
+
+/* Target hook for invalid_arg_for_unprototyped_fn. */
+static const char *
+invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
+{
+ return (!rs6000_darwin64_abi
+ && typelist == 0
+ && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
+ && (funcdecl == NULL_TREE
+ || (TREE_CODE (funcdecl) == FUNCTION_DECL
+ && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
+ ? N_("AltiVec argument passed to unprototyped function")
+ : NULL;
+}
+
+/* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
+ setup by using __stack_chk_fail_local hidden function instead of
+ calling __stack_chk_fail directly. Otherwise it is better to call
+ __stack_chk_fail directly. */
+
+static tree
+rs6000_stack_protect_fail (void)
+{
+ return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
+ ? default_hidden_stack_protect_fail ()
+ : default_external_stack_protect_fail ();
+}
+
+void
+rs6000_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
+ int num_operands ATTRIBUTE_UNUSED)
+{
+ if (rs6000_warn_cell_microcode)
+ {
+ const char *temp;
+ int insn_code_number = recog_memoized (insn);
+ location_t location = locator_location (INSN_LOCATOR (insn));
+
+ /* Punt on insns we cannot recognize. */
+ if (insn_code_number < 0)
+ return;
+
+ temp = get_insn_template (insn_code_number, insn);
+
+ if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
+ warning_at (location, OPT_mwarn_cell_microcode,
+ "emitting microcode insn %s\t[%s] #%d",
+ temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
+ else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
+ warning_at (location, OPT_mwarn_cell_microcode,
+ "emitting conditional microcode insn %s\t[%s] #%d",
+ temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
+ }
+}
+
+#include "gt-rs6000.h"
diff --git a/gcc-4.4.3/gcc/config/rs6000/rs6000.h b/gcc-4.4.3/gcc/config/rs6000/rs6000.h
new file mode 100644
index 000000000..5754f0376
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rs6000.h
@@ -0,0 +1,3196 @@
+/* Definitions of target machine for GNU compiler, for IBM RS/6000.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+ Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Note that some other tm.h files include this one and then override
+ many of the definitions. */
+
+/* Definitions for the object file format. These are set at
+ compile-time. */
+
+#define OBJECT_XCOFF 1
+#define OBJECT_ELF 2
+#define OBJECT_PEF 3
+#define OBJECT_MACHO 4
+
+#define TARGET_ELF (TARGET_OBJECT_FORMAT == OBJECT_ELF)
+#define TARGET_XCOFF (TARGET_OBJECT_FORMAT == OBJECT_XCOFF)
+#define TARGET_MACOS (TARGET_OBJECT_FORMAT == OBJECT_PEF)
+#define TARGET_MACHO (TARGET_OBJECT_FORMAT == OBJECT_MACHO)
+
+#ifndef TARGET_AIX
+#define TARGET_AIX 0
+#endif
+
+/* Control whether function entry points use a "dot" symbol when
+ ABI_AIX. */
+#define DOT_SYMBOLS 1
+
+/* Default string to use for cpu if not specified. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT ((char *)0)
+#endif
+
+/* If configured for PPC405, support PPC405CR Erratum77. */
+#ifdef CONFIG_PPC405CR
+#define PPC405_ERRATUM77 (rs6000_cpu == PROCESSOR_PPC405)
+#else
+#define PPC405_ERRATUM77 0
+#endif
+
+#ifndef TARGET_PAIRED_FLOAT
+#define TARGET_PAIRED_FLOAT 0
+#endif
+
+#ifdef HAVE_AS_POPCNTB
+#define ASM_CPU_POWER5_SPEC "-mpower5"
+#else
+#define ASM_CPU_POWER5_SPEC "-mpower4"
+#endif
+
+#ifdef HAVE_AS_DFP
+#define ASM_CPU_POWER6_SPEC "-mpower6 -maltivec"
+#else
+#define ASM_CPU_POWER6_SPEC "-mpower4 -maltivec"
+#endif
+
+#ifdef HAVE_AS_VSX
+#define ASM_CPU_POWER7_SPEC "-mpower7"
+#else
+#define ASM_CPU_POWER7_SPEC "-mpower4 -maltivec"
+#endif
+
+/* Common ASM definitions used by ASM_SPEC among the various targets
+ for handling -mcpu=xxx switches. */
+#define ASM_CPU_SPEC \
+"%{!mcpu*: \
+ %{mpower: %{!mpower2: -mpwr}} \
+ %{mpower2: -mpwrx} \
+ %{mpowerpc64*: -mppc64} \
+ %{!mpowerpc64*: %{mpowerpc*: -mppc}} \
+ %{mno-power: %{!mpowerpc*: -mcom}} \
+ %{!mno-power: %{!mpower*: %(asm_default)}}} \
+%{mcpu=common: -mcom} \
+%{mcpu=cell: -mcell} \
+%{mcpu=power: -mpwr} \
+%{mcpu=power2: -mpwrx} \
+%{mcpu=power3: -mppc64} \
+%{mcpu=power4: -mpower4} \
+%{mcpu=power5: %(asm_cpu_power5)} \
+%{mcpu=power5+: %(asm_cpu_power5)} \
+%{mcpu=power6: %(asm_cpu_power6) -maltivec} \
+%{mcpu=power6x: %(asm_cpu_power6) -maltivec} \
+%{mcpu=power7: %(asm_cpu_power7)} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rios: -mpwr} \
+%{mcpu=rios1: -mpwr} \
+%{mcpu=rios2: -mpwrx} \
+%{mcpu=rsc: -mpwr} \
+%{mcpu=rsc1: -mpwr} \
+%{mcpu=rs64a: -mppc64} \
+%{mcpu=401: -mppc} \
+%{mcpu=403: -m403} \
+%{mcpu=405: -m405} \
+%{mcpu=405fp: -m405} \
+%{mcpu=440: -m440} \
+%{mcpu=440fp: -m440} \
+%{mcpu=464: -m440} \
+%{mcpu=464fp: -m440} \
+%{mcpu=505: -mppc} \
+%{mcpu=601: -m601} \
+%{mcpu=602: -mppc} \
+%{mcpu=603: -mppc} \
+%{mcpu=603e: -mppc} \
+%{mcpu=ec603e: -mppc} \
+%{mcpu=604: -mppc} \
+%{mcpu=604e: -mppc} \
+%{mcpu=620: -mppc64} \
+%{mcpu=630: -mppc64} \
+%{mcpu=740: -mppc} \
+%{mcpu=750: -mppc} \
+%{mcpu=G3: -mppc} \
+%{mcpu=7400: -mppc -maltivec} \
+%{mcpu=7450: -mppc -maltivec} \
+%{mcpu=G4: -mppc -maltivec} \
+%{mcpu=801: -mppc} \
+%{mcpu=821: -mppc} \
+%{mcpu=823: -mppc} \
+%{mcpu=860: -mppc} \
+%{mcpu=970: -mpower4 -maltivec} \
+%{mcpu=G5: -mpower4 -maltivec} \
+%{mcpu=8540: -me500} \
+%{mcpu=8548: -me500} \
+%{mcpu=e300c2: -me300} \
+%{mcpu=e300c3: -me300} \
+%{mcpu=e500mc: -me500mc} \
+%{maltivec: -maltivec} \
+-many"
+
+#define CPP_DEFAULT_SPEC ""
+
+#define ASM_DEFAULT_SPEC ""
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GCC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#define SUBTARGET_EXTRA_SPECS
+
+#define EXTRA_SPECS \
+ { "cpp_default", CPP_DEFAULT_SPEC }, \
+ { "asm_cpu", ASM_CPU_SPEC }, \
+ { "asm_default", ASM_DEFAULT_SPEC }, \
+ { "cc1_cpu", CC1_CPU_SPEC }, \
+ { "asm_cpu_power5", ASM_CPU_POWER5_SPEC }, \
+ { "asm_cpu_power6", ASM_CPU_POWER6_SPEC }, \
+ { "asm_cpu_power7", ASM_CPU_POWER7_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+/* -mcpu=native handling only makes sense with compiler running on
+ an PowerPC chip. If changing this condition, also change
+ the condition in driver-rs6000.c. */
+#if defined(__powerpc__) || defined(__POWERPC__) || defined(_AIX)
+/* In driver-rs6000.c. */
+extern const char *host_detect_local_cpu (int argc, const char **argv);
+#define EXTRA_SPEC_FUNCTIONS \
+ { "local_cpu_detect", host_detect_local_cpu },
+#define HAVE_LOCAL_CPU_DETECT
+#endif
+
+#ifndef CC1_CPU_SPEC
+#ifdef HAVE_LOCAL_CPU_DETECT
+#define CC1_CPU_SPEC \
+"%{mcpu=native:%<mcpu=native %:local_cpu_detect(cpu)} \
+ %{mtune=native:%<mtune=native %:local_cpu_detect(tune)}"
+#else
+#define CC1_CPU_SPEC ""
+#endif
+#endif
+
+/* Architecture type. */
+
+/* Define TARGET_MFCRF if the target assembler does not support the
+ optional field operand for mfcr. */
+
+#ifndef HAVE_AS_MFCRF
+#undef TARGET_MFCRF
+#define TARGET_MFCRF 0
+#endif
+
+/* Define TARGET_POPCNTB if the target assembler does not support the
+ popcount byte instruction. */
+
+#ifndef HAVE_AS_POPCNTB
+#undef TARGET_POPCNTB
+#define TARGET_POPCNTB 0
+#endif
+
+/* Define TARGET_FPRND if the target assembler does not support the
+ fp rounding instructions. */
+
+#ifndef HAVE_AS_FPRND
+#undef TARGET_FPRND
+#define TARGET_FPRND 0
+#endif
+
+/* Define TARGET_CMPB if the target assembler does not support the
+ cmpb instruction. */
+
+#ifndef HAVE_AS_CMPB
+#undef TARGET_CMPB
+#define TARGET_CMPB 0
+#endif
+
+/* Define TARGET_MFPGPR if the target assembler does not support the
+ mffpr and mftgpr instructions. */
+
+#ifndef HAVE_AS_MFPGPR
+#undef TARGET_MFPGPR
+#define TARGET_MFPGPR 0
+#endif
+
+/* Define TARGET_DFP if the target assembler does not support decimal
+ floating point instructions. */
+#ifndef HAVE_AS_DFP
+#undef TARGET_DFP
+#define TARGET_DFP 0
+#endif
+
+#ifndef TARGET_SECURE_PLT
+#define TARGET_SECURE_PLT 0
+#endif
+
+#define TARGET_32BIT (! TARGET_64BIT)
+
+#ifndef HAVE_AS_TLS
+#define HAVE_AS_TLS 0
+#endif
+
+/* Return 1 for a symbol ref for a thread-local storage symbol. */
+#define RS6000_SYMBOL_REF_TLS_P(RTX) \
+ (GET_CODE (RTX) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (RTX) != 0)
+
+#ifdef IN_LIBGCC2
+/* For libgcc2 we make sure this is a compile time constant */
+#if defined (__64BIT__) || defined (__powerpc64__) || defined (__ppc64__)
+#undef TARGET_POWERPC64
+#define TARGET_POWERPC64 1
+#else
+#undef TARGET_POWERPC64
+#define TARGET_POWERPC64 0
+#endif
+#else
+ /* The option machinery will define this. */
+#endif
+
+#define TARGET_DEFAULT (MASK_POWER | MASK_MULTIPLE | MASK_STRING)
+
+/* Processor type. Order must match cpu attribute in MD file. */
+enum processor_type
+ {
+ PROCESSOR_RIOS1,
+ PROCESSOR_RIOS2,
+ PROCESSOR_RS64A,
+ PROCESSOR_MPCCORE,
+ PROCESSOR_PPC403,
+ PROCESSOR_PPC405,
+ PROCESSOR_PPC440,
+ PROCESSOR_PPC601,
+ PROCESSOR_PPC603,
+ PROCESSOR_PPC604,
+ PROCESSOR_PPC604e,
+ PROCESSOR_PPC620,
+ PROCESSOR_PPC630,
+ PROCESSOR_PPC750,
+ PROCESSOR_PPC7400,
+ PROCESSOR_PPC7450,
+ PROCESSOR_PPC8540,
+ PROCESSOR_PPCE300C2,
+ PROCESSOR_PPCE300C3,
+ PROCESSOR_PPCE500MC,
+ PROCESSOR_POWER4,
+ PROCESSOR_POWER5,
+ PROCESSOR_POWER6,
+ PROCESSOR_CELL
+};
+
+/* FPU operations supported.
+ Each use of TARGET_SINGLE_FLOAT or TARGET_DOUBLE_FLOAT must
+ also test TARGET_HARD_FLOAT. */
+#define TARGET_SINGLE_FLOAT 1
+#define TARGET_DOUBLE_FLOAT 1
+#define TARGET_SINGLE_FPU 0
+#define TARGET_SIMPLE_FPU 0
+#define TARGET_XILINX_FPU 0
+
+extern enum processor_type rs6000_cpu;
+
+/* Recast the processor type to the cpu attribute. */
+#define rs6000_cpu_attr ((enum attr_cpu)rs6000_cpu)
+
+/* Define generic processor types based upon current deployment. */
+#define PROCESSOR_COMMON PROCESSOR_PPC601
+#define PROCESSOR_POWER PROCESSOR_RIOS1
+#define PROCESSOR_POWERPC PROCESSOR_PPC604
+#define PROCESSOR_POWERPC64 PROCESSOR_RS64A
+
+/* Define the default processor. This is overridden by other tm.h files. */
+#define PROCESSOR_DEFAULT PROCESSOR_RIOS1
+#define PROCESSOR_DEFAULT64 PROCESSOR_RS64A
+
+/* FP processor type. */
+enum fpu_type_t
+{
+ FPU_NONE, /* No FPU */
+ FPU_SF_LITE, /* Limited Single Precision FPU */
+ FPU_DF_LITE, /* Limited Double Precision FPU */
+ FPU_SF_FULL, /* Full Single Precision FPU */
+ FPU_DF_FULL /* Full Double Single Precision FPU */
+};
+
+extern enum fpu_type_t fpu_type;
+
+/* Specify the dialect of assembler to use. New mnemonics is dialect one
+ and the old mnemonics are dialect zero. */
+#define ASSEMBLER_DIALECT (TARGET_NEW_MNEMONICS ? 1 : 0)
+
+/* Types of costly dependences. */
+enum rs6000_dependence_cost
+ {
+ max_dep_latency = 1000,
+ no_dep_costly,
+ all_deps_costly,
+ true_store_to_load_dep_costly,
+ store_to_load_dep_costly
+ };
+
+/* Types of nop insertion schemes in sched target hook sched_finish. */
+enum rs6000_nop_insertion
+ {
+ sched_finish_regroup_exact = 1000,
+ sched_finish_pad_groups,
+ sched_finish_none
+ };
+
+/* Dispatch group termination caused by an insn. */
+enum group_termination
+ {
+ current_group,
+ previous_group
+ };
+
+/* Support for a compile-time default CPU, et cetera. The rules are:
+ --with-cpu is ignored if -mcpu is specified.
+ --with-tune is ignored if -mtune is specified.
+ --with-float is ignored if -mhard-float or -msoft-float are
+ specified. */
+#define OPTION_DEFAULT_SPECS \
+ {"cpu", "%{!mcpu=*:-mcpu=%(VALUE)}" }, \
+ {"tune", "%{!mtune=*:-mtune=%(VALUE)}" }, \
+ {"float", "%{!msoft-float:%{!mhard-float:-m%(VALUE)-float}}" }
+
+/* rs6000_select[0] is reserved for the default cpu defined via --with-cpu */
+struct rs6000_cpu_select
+{
+ const char *string;
+ const char *name;
+ int set_tune_p;
+ int set_arch_p;
+};
+
+extern struct rs6000_cpu_select rs6000_select[];
+
+/* Debug support */
+extern const char *rs6000_debug_name; /* Name for -mdebug-xxxx option */
+extern int rs6000_debug_stack; /* debug stack applications */
+extern int rs6000_debug_arg; /* debug argument handling */
+
+#define TARGET_DEBUG_STACK rs6000_debug_stack
+#define TARGET_DEBUG_ARG rs6000_debug_arg
+
+extern const char *rs6000_traceback_name; /* Type of traceback table. */
+
+/* These are separate from target_flags because we've run out of bits
+ there. */
+extern int rs6000_long_double_type_size;
+extern int rs6000_ieeequad;
+extern int rs6000_altivec_abi;
+extern int rs6000_spe_abi;
+extern int rs6000_spe;
+extern int rs6000_isel;
+extern int rs6000_float_gprs;
+extern int rs6000_alignment_flags;
+extern const char *rs6000_sched_insert_nops_str;
+extern enum rs6000_nop_insertion rs6000_sched_insert_nops;
+extern int rs6000_xilinx_fpu;
+
+/* Alignment options for fields in structures for sub-targets following
+ AIX-like ABI.
+ ALIGN_POWER word-aligns FP doubles (default AIX ABI).
+ ALIGN_NATURAL doubleword-aligns FP doubles (align to object size).
+
+ Override the macro definitions when compiling libobjc to avoid undefined
+ reference to rs6000_alignment_flags due to library's use of GCC alignment
+ macros which use the macros below. */
+
+#ifndef IN_TARGET_LIBS
+#define MASK_ALIGN_POWER 0x00000000
+#define MASK_ALIGN_NATURAL 0x00000001
+#define TARGET_ALIGN_NATURAL (rs6000_alignment_flags & MASK_ALIGN_NATURAL)
+#else
+#define TARGET_ALIGN_NATURAL 0
+#endif
+
+#define TARGET_LONG_DOUBLE_128 (rs6000_long_double_type_size == 128)
+#define TARGET_IEEEQUAD rs6000_ieeequad
+#define TARGET_ALTIVEC_ABI rs6000_altivec_abi
+
+#define TARGET_SPE_ABI 0
+#define TARGET_SPE 0
+#define TARGET_E500 0
+#define TARGET_ISEL rs6000_isel
+#define TARGET_FPRS 1
+#define TARGET_E500_SINGLE 0
+#define TARGET_E500_DOUBLE 0
+#define CHECK_E500_OPTIONS do { } while (0)
+
+/* E500 processors only support plain "sync", not lwsync. */
+#define TARGET_NO_LWSYNC TARGET_E500
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ Do not use this macro to turn on various extra optimizations for
+ `-O'. That is what `OPTIMIZATION_OPTIONS' is for.
+
+ On the RS/6000 this is used to define the target cpu type. */
+
+#define OVERRIDE_OPTIONS rs6000_override_options (TARGET_CPU_DEFAULT)
+
+/* Define this to change the optimizations performed by default. */
+#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) optimization_options(LEVEL,SIZE)
+
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+/* Target pragma. */
+#define REGISTER_TARGET_PRAGMAS() do { \
+ c_register_pragma (0, "longcall", rs6000_pragma_longcall); \
+ targetm.resolve_overloaded_builtin = altivec_resolve_overloaded_builtin; \
+} while (0)
+
+/* Target #defines. */
+#define TARGET_CPU_CPP_BUILTINS() \
+ rs6000_cpu_cpp_builtins (pfile)
+
+/* This is used by rs6000_cpu_cpp_builtins to indicate the byte order
+ we're compiling for. Some configurations may need to override it. */
+#define RS6000_CPU_CPP_ENDIAN_BUILTINS() \
+ do \
+ { \
+ if (BYTES_BIG_ENDIAN) \
+ { \
+ builtin_define ("__BIG_ENDIAN__"); \
+ builtin_define ("_BIG_ENDIAN"); \
+ builtin_assert ("machine=bigendian"); \
+ } \
+ else \
+ { \
+ builtin_define ("__LITTLE_ENDIAN__"); \
+ builtin_define ("_LITTLE_ENDIAN"); \
+ builtin_assert ("machine=littleendian"); \
+ } \
+ } \
+ while (0)
+
+/* Target machine storage layout. */
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ (MODE) = TARGET_32BIT ? SImode : DImode;
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+/* That is true on RS/6000. */
+#define BITS_BIG_ENDIAN 1
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is true on RS/6000. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is lowest
+ numbered.
+
+ For RS/6000 we can decide arbitrarily since there are no machine
+ instructions for them. Might as well be consistent with bits and bytes. */
+#define WORDS_BIG_ENDIAN 1
+
+#define MAX_BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (! TARGET_POWERPC64 ? 4 : 8)
+#ifdef IN_LIBGCC2
+#define MIN_UNITS_PER_WORD UNITS_PER_WORD
+#else
+#define MIN_UNITS_PER_WORD 4
+#endif
+#define UNITS_PER_FP_WORD 8
+#define UNITS_PER_ALTIVEC_WORD 16
+#define UNITS_PER_SPE_WORD 8
+#define UNITS_PER_PAIRED_WORD 8
+
+/* Type used for ptrdiff_t, as a string used in a declaration. */
+#define PTRDIFF_TYPE "int"
+
+/* Type used for size_t, as a string used in a declaration. */
+#define SIZE_TYPE "long unsigned int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+#define WCHAR_TYPE "short unsigned int"
+
+/* Width of wchar_t in bits. */
+#define WCHAR_TYPE_SIZE 16
+
+/* A C expression for the size in bits of the type `short' on the
+ target machine. If you don't define this, the default is half a
+ word. (If this would be less than one storage unit, it is
+ rounded up to one unit.) */
+#define SHORT_TYPE_SIZE 16
+
+/* A C expression for the size in bits of the type `int' on the
+ target machine. If you don't define this, the default is one
+ word. */
+#define INT_TYPE_SIZE 32
+
+/* A C expression for the size in bits of the type `long' on the
+ target machine. If you don't define this, the default is one
+ word. */
+#define LONG_TYPE_SIZE (TARGET_32BIT ? 32 : 64)
+
+/* A C expression for the size in bits of the type `long long' on the
+ target machine. If you don't define this, the default is two
+ words. */
+#define LONG_LONG_TYPE_SIZE 64
+
+/* A C expression for the size in bits of the type `float' on the
+ target machine. If you don't define this, the default is one
+ word. */
+#define FLOAT_TYPE_SIZE 32
+
+/* A C expression for the size in bits of the type `double' on the
+ target machine. If you don't define this, the default is two
+ words. */
+#define DOUBLE_TYPE_SIZE 64
+
+/* A C expression for the size in bits of the type `long double' on
+ the target machine. If you don't define this, the default is two
+ words. */
+#define LONG_DOUBLE_TYPE_SIZE rs6000_long_double_type_size
+
+/* Define this to set long double type size to use in libgcc2.c, which can
+ not depend on target_flags. */
+#ifdef __LONG_DOUBLE_128__
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 128
+#else
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#endif
+
+/* Work around rs6000_long_double_type_size dependency in ada/targtyps.c. */
+#define WIDEST_HARDWARE_FP_SIZE 64
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE (TARGET_32BIT ? 32 : 64)
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY (TARGET_32BIT ? 32 : 64)
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY \
+ ((TARGET_32BIT && !TARGET_ALTIVEC && !TARGET_ALTIVEC_ABI) ? 64 : 128)
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 128
+
+/* A C expression to compute the alignment for a variables in the
+ local store. TYPE is the data type, and ALIGN is the alignment
+ that the object would ordinarily have. */
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+ ((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) ? 128 : \
+ (TARGET_E500_DOUBLE \
+ && TYPE_MODE (TYPE) == DFmode) ? 64 : \
+ ((TARGET_SPE && TREE_CODE (TYPE) == VECTOR_TYPE \
+ && SPE_VECTOR_MODE (TYPE_MODE (TYPE))) || (TARGET_PAIRED_FLOAT \
+ && TREE_CODE (TYPE) == VECTOR_TYPE \
+ && PAIRED_VECTOR_MODE (TYPE_MODE (TYPE)))) ? 64 : ALIGN)
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* Return 1 if a structure or array containing FIELD should be
+ accessed using `BLKMODE'.
+
+ For the SPE, simd types are V2SI, and gcc can be tempted to put the
+ entire thing in a DI and use subregs to access the internals.
+ store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
+ back-end. Because a single GPR can hold a V2SI, but not a DI, the
+ best thing to do is set structs to BLKmode and avoid Severe Tire
+ Damage.
+
+ On e500 v2, DF and DI modes suffer from the same anomaly. DF can
+ fit into 1, whereas DI still needs two. */
+#define MEMBER_TYPE_FORCES_BLK(FIELD, MODE) \
+ ((TARGET_SPE && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \
+ || (TARGET_E500_DOUBLE && (MODE) == DFmode))
+
+/* A bit-field declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Make strings word-aligned so strcpy from constants will be faster.
+ Make vector constants quadword aligned. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (STRICT_ALIGNMENT || !optimize_size) \
+ && (ALIGN) < BITS_PER_WORD \
+ ? BITS_PER_WORD \
+ : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons.
+ Align vectors to 128 bits. Align SPE vectors and E500 v2 doubles to
+ 64 bits. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == VECTOR_TYPE ? ((TARGET_SPE_ABI \
+ || TARGET_PAIRED_FLOAT) ? 64 : 128) \
+ : (TARGET_E500_DOUBLE \
+ && TYPE_MODE (TYPE) == DFmode) ? 64 \
+ : TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 0
+
+/* Define this macro to be the value 1 if unaligned accesses have a cost
+ many times greater than aligned accesses, for example if they are
+ emulated in a trap handler. */
+/* Altivec vector memory instructions simply ignore the low bits; SPE
+ vector memory instructions trap on unaligned accesses. */
+#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) \
+ (STRICT_ALIGNMENT \
+ || (((MODE) == SFmode || (MODE) == DFmode || (MODE) == TFmode \
+ || (MODE) == SDmode || (MODE) == DDmode || (MODE) == TDmode \
+ || (MODE) == DImode) \
+ && (ALIGN) < 32) \
+ || (VECTOR_MODE_P ((MODE)) && (ALIGN) < GET_MODE_BITSIZE ((MODE))))
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ RS/6000 has 32 fixed-point registers, 32 floating-point registers,
+ an MQ register, a count register, a link register, and 8 condition
+ register fields, which we view here as separate registers. AltiVec
+ adds 32 vector registers and a VRsave register.
+
+ In addition, the difference between the frame and argument pointers is
+ a function of the number of registers saved, so we need to have a
+ register for AP that will later be eliminated in favor of SP or FP.
+ This is a normal register, but it is fixed.
+
+ We also create a pseudo register for float/int conversions, that will
+ really represent the memory location used. It is represented here as
+ a register, in order to work around problems in allocating stack storage
+ in inline functions.
+
+ Another pseudo (not included in DWARF_FRAME_REGISTERS) is soft frame
+ pointer, which is eventually eliminated in favor of SP or FP. */
+
+#define FIRST_PSEUDO_REGISTER 114
+
+/* This must be included for pre gcc 3.0 glibc compatibility. */
+#define PRE_GCC3_DWARF_FRAME_REGISTERS 77
+
+/* Add 32 dwarf columns for synthetic SPE registers. */
+#define DWARF_FRAME_REGISTERS ((FIRST_PSEUDO_REGISTER - 1) + 32)
+
+/* The SPE has an additional 32 synthetic registers, with DWARF debug
+ info numbering for these registers starting at 1200. While eh_frame
+ register numbering need not be the same as the debug info numbering,
+ we choose to number these regs for eh_frame at 1200 too. This allows
+ future versions of the rs6000 backend to add hard registers and
+ continue to use the gcc hard register numbering for eh_frame. If the
+ extra SPE registers in eh_frame were numbered starting from the
+ current value of FIRST_PSEUDO_REGISTER, then if FIRST_PSEUDO_REGISTER
+ changed we'd need to introduce a mapping in DWARF_FRAME_REGNUM to
+ avoid invalidating older SPE eh_frame info.
+
+ We must map them here to avoid huge unwinder tables mostly consisting
+ of unused space. */
+#define DWARF_REG_TO_UNWIND_COLUMN(r) \
+ ((r) > 1200 ? ((r) - 1200 + FIRST_PSEUDO_REGISTER - 1) : (r))
+
+/* Use standard DWARF numbering for DWARF debugging information. */
+#define DBX_REGISTER_NUMBER(REGNO) rs6000_dbx_register_number (REGNO)
+
+/* Use gcc hard register numbering for eh_frame. */
+#define DWARF_FRAME_REGNUM(REGNO) (REGNO)
+
+/* Map register numbers held in the call frame info that gcc has
+ collected using DWARF_FRAME_REGNUM to those that should be output in
+ .debug_frame and .eh_frame. We continue to use gcc hard reg numbers
+ for .eh_frame, but use the numbers mandated by the various ABIs for
+ .debug_frame. rs6000_emit_prologue has translated any combination of
+ CR2, CR3, CR4 saves to a save of CR2. The actual code emitted saves
+ the whole of CR, so we map CR2_REGNO to the DWARF reg for CR. */
+#define DWARF2_FRAME_REG_OUT(REGNO, FOR_EH) \
+ ((FOR_EH) ? (REGNO) \
+ : (REGNO) == CR2_REGNO ? 64 \
+ : DBX_REGISTER_NUMBER (REGNO))
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+
+ On RS/6000, r1 is used for the stack. On Darwin, r2 is available
+ as a local register; for all other OS's r2 is the TOC pointer.
+
+ cr5 is not supposed to be used.
+
+ On System V implementations, r13 is fixed and not available for use. */
+
+#define FIXED_REGISTERS \
+ {0, 1, FIXED_R2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, FIXED_R13, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, \
+ /* AltiVec registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1 \
+ , 1, 1, 1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#define CALL_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, FIXED_R13, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \
+ /* AltiVec registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1 \
+ , 1, 1, 1 \
+}
+
+/* Like `CALL_USED_REGISTERS' except this macro doesn't require that
+ the entire set of `FIXED_REGISTERS' be included.
+ (`CALL_USED_REGISTERS' must be a superset of `FIXED_REGISTERS').
+ This macro is optional. If not specified, it defaults to the value
+ of `CALL_USED_REGISTERS'. */
+
+#define CALL_REALLY_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, FIXED_R13, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \
+ /* AltiVec registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0 \
+ , 0, 0, 0 \
+}
+
+#define TOTAL_ALTIVEC_REGS (LAST_ALTIVEC_REGNO - FIRST_ALTIVEC_REGNO + 1)
+
+#define FIRST_SAVED_ALTIVEC_REGNO (FIRST_ALTIVEC_REGNO+20)
+#define FIRST_SAVED_FP_REGNO (14+32)
+#define FIRST_SAVED_GP_REGNO 13
+
+/* List the order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS.
+
+ We allocate in the following order:
+ fp0 (not saved or used for anything)
+ fp13 - fp2 (not saved; incoming fp arg registers)
+ fp1 (not saved; return value)
+ fp31 - fp14 (saved; order given to save least number)
+ cr7, cr6 (not saved or special)
+ cr1 (not saved, but used for FP operations)
+ cr0 (not saved, but used for arithmetic operations)
+ cr4, cr3, cr2 (saved)
+ r0 (not saved; cannot be base reg)
+ r9 (not saved; best for TImode)
+ r11, r10, r8-r4 (not saved; highest used first to make less conflict)
+ r3 (not saved; return value register)
+ r31 - r13 (saved; order given to save least number)
+ r12 (not saved; if used for DImode or DFmode would use r13)
+ mq (not saved; best to use it if we can)
+ ctr (not saved; when we have the choice ctr is better)
+ lr (saved)
+ cr5, r1, r2, ap, xer (fixed)
+ v0 - v1 (not saved or used for anything)
+ v13 - v3 (not saved; incoming vector arg registers)
+ v2 (not saved; incoming vector arg reg; return value)
+ v19 - v14 (not saved or used for anything)
+ v31 - v20 (saved; order given to save least number)
+ vrsave, vscr (fixed)
+ spe_acc, spefscr (fixed)
+ sfp (fixed)
+*/
+
+#if FIXED_R2 == 1
+#define MAYBE_R2_AVAILABLE
+#define MAYBE_R2_FIXED 2,
+#else
+#define MAYBE_R2_AVAILABLE 2,
+#define MAYBE_R2_FIXED
+#endif
+
+#define REG_ALLOC_ORDER \
+ {32, \
+ 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, \
+ 33, \
+ 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, \
+ 50, 49, 48, 47, 46, \
+ 75, 74, 69, 68, 72, 71, 70, \
+ 0, MAYBE_R2_AVAILABLE \
+ 9, 11, 10, 8, 7, 6, 5, 4, \
+ 3, \
+ 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, \
+ 18, 17, 16, 15, 14, 13, 12, \
+ 64, 66, 65, \
+ 73, 1, MAYBE_R2_FIXED 67, 76, \
+ /* AltiVec registers. */ \
+ 77, 78, \
+ 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, \
+ 79, \
+ 96, 95, 94, 93, 92, 91, \
+ 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, 97, \
+ 109, 110, \
+ 111, 112, 113 \
+}
+
+/* True if register is floating-point. */
+#define FP_REGNO_P(N) ((N) >= 32 && (N) <= 63)
+
+/* True if register is a condition register. */
+#define CR_REGNO_P(N) ((N) >= CR0_REGNO && (N) <= CR7_REGNO)
+
+/* True if register is a condition register, but not cr0. */
+#define CR_REGNO_NOT_CR0_P(N) ((N) >= CR1_REGNO && (N) <= CR7_REGNO)
+
+/* True if register is an integer register. */
+#define INT_REGNO_P(N) \
+ ((N) <= 31 || (N) == ARG_POINTER_REGNUM || (N) == FRAME_POINTER_REGNUM)
+
+/* SPE SIMD registers are just the GPRs. */
+#define SPE_SIMD_REGNO_P(N) ((N) <= 31)
+
+/* PAIRED SIMD registers are just the FPRs. */
+#define PAIRED_SIMD_REGNO_P(N) ((N) >= 32 && (N) <= 63)
+
+/* True if register is the XER register. */
+#define XER_REGNO_P(N) ((N) == XER_REGNO)
+
+/* True if register is an AltiVec register. */
+#define ALTIVEC_REGNO_P(N) ((N) >= FIRST_ALTIVEC_REGNO && (N) <= LAST_ALTIVEC_REGNO)
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE. */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) rs6000_hard_regno_nregs ((REGNO), (MODE))
+
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
+ ((TARGET_32BIT && TARGET_POWERPC64 \
+ && (GET_MODE_SIZE (MODE) > 4) \
+ && INT_REGNO_P (REGNO)) ? 1 : 0)
+
+#define ALTIVEC_VECTOR_MODE(MODE) \
+ ((MODE) == V16QImode \
+ || (MODE) == V8HImode \
+ || (MODE) == V4SFmode \
+ || (MODE) == V4SImode)
+
+#define SPE_VECTOR_MODE(MODE) \
+ ((MODE) == V4HImode \
+ || (MODE) == V2SFmode \
+ || (MODE) == V1DImode \
+ || (MODE) == V2SImode)
+
+#define PAIRED_VECTOR_MODE(MODE) \
+ ((MODE) == V2SFmode)
+
+#define UNITS_PER_SIMD_WORD(MODE) \
+ (TARGET_ALTIVEC ? UNITS_PER_ALTIVEC_WORD \
+ : (TARGET_SPE ? UNITS_PER_SPE_WORD : (TARGET_PAIRED_FLOAT ? \
+ UNITS_PER_PAIRED_WORD : UNITS_PER_WORD)))
+
+/* Value is TRUE if hard register REGNO can hold a value of
+ machine-mode MODE. */
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ rs6000_hard_regno_mode_ok_p[(int)(MODE)][REGNO]
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (SCALAR_FLOAT_MODE_P (MODE1) \
+ ? SCALAR_FLOAT_MODE_P (MODE2) \
+ : SCALAR_FLOAT_MODE_P (MODE2) \
+ ? SCALAR_FLOAT_MODE_P (MODE1) \
+ : GET_MODE_CLASS (MODE1) == MODE_CC \
+ ? GET_MODE_CLASS (MODE2) == MODE_CC \
+ : GET_MODE_CLASS (MODE2) == MODE_CC \
+ ? GET_MODE_CLASS (MODE1) == MODE_CC \
+ : SPE_VECTOR_MODE (MODE1) \
+ ? SPE_VECTOR_MODE (MODE2) \
+ : SPE_VECTOR_MODE (MODE2) \
+ ? SPE_VECTOR_MODE (MODE1) \
+ : ALTIVEC_VECTOR_MODE (MODE1) \
+ ? ALTIVEC_VECTOR_MODE (MODE2) \
+ : ALTIVEC_VECTOR_MODE (MODE2) \
+ ? ALTIVEC_VECTOR_MODE (MODE1) \
+ : 1)
+
+/* Post-reload, we can't use any new AltiVec registers, as we already
+ emitted the vrsave mask. */
+
+#define HARD_REGNO_RENAME_OK(SRC, DST) \
+ (! ALTIVEC_REGNO_P (DST) || df_regs_ever_live_p (DST))
+
+/* A C expression returning the cost of moving data from a register of class
+ CLASS1 to one of CLASS2. */
+
+#define REGISTER_MOVE_COST rs6000_register_move_cost
+
+/* A C expressions returning the cost of moving data of MODE from a register to
+ or from memory. */
+
+#define MEMORY_MOVE_COST rs6000_memory_move_cost
+
+/* Specify the cost of a branch insn; roughly the number of extra insns that
+ should be added to avoid a branch.
+
+ Set this to 3 on the RS/6000 since that is roughly the average cost of an
+ unscheduled conditional branch. */
+
+#define BRANCH_COST(speed_p, predictable_p) 3
+
+/* Override BRANCH_COST heuristic which empirically produces worse
+ performance for removing short circuiting from the logical ops. */
+
+#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
+
+/* A fixed register used at epilogue generation to address SPE registers
+ with negative offsets. The 64-bit load/store instructions on the SPE
+ only take positive offsets (and small ones at that), so we need to
+ reserve a register for consing up negative offsets. */
+
+#define FIXED_SCRATCH 0
+
+/* Define this macro to change register usage conditional on target
+ flags. */
+
+#define CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage ()
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* RS/6000 pc isn't overloaded on a register that the compiler knows about. */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 1
+
+/* Base register for access to local variables of the function. */
+#define HARD_FRAME_POINTER_REGNUM 31
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 113
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED 0
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 67
+
+/* Place to put static chain when calling a function that requires it. */
+#define STATIC_CHAIN_REGNUM 11
+
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The RS/6000 has three types of registers, fixed-point, floating-point,
+ and condition registers, plus three special registers, MQ, CTR, and the
+ link register. AltiVec adds a vector register class.
+
+ However, r0 is special in that it cannot be used as a base register.
+ So make a class for registers valid as base registers.
+
+ Also, cr0 is the only condition code register that can be used in
+ arithmetic insns, so make a separate class for it. */
+
+enum reg_class
+{
+ NO_REGS,
+ BASE_REGS,
+ GENERAL_REGS,
+ FLOAT_REGS,
+ ALTIVEC_REGS,
+ VRSAVE_REGS,
+ VSCR_REGS,
+ SPE_ACC_REGS,
+ SPEFSCR_REGS,
+ NON_SPECIAL_REGS,
+ MQ_REGS,
+ LINK_REGS,
+ CTR_REGS,
+ LINK_OR_CTR_REGS,
+ SPECIAL_REGS,
+ SPEC_OR_GEN_REGS,
+ CR0_REGS,
+ CR_REGS,
+ NON_FLOAT_REGS,
+ XER_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "BASE_REGS", \
+ "GENERAL_REGS", \
+ "FLOAT_REGS", \
+ "ALTIVEC_REGS", \
+ "VRSAVE_REGS", \
+ "VSCR_REGS", \
+ "SPE_ACC_REGS", \
+ "SPEFSCR_REGS", \
+ "NON_SPECIAL_REGS", \
+ "MQ_REGS", \
+ "LINK_REGS", \
+ "CTR_REGS", \
+ "LINK_OR_CTR_REGS", \
+ "SPECIAL_REGS", \
+ "SPEC_OR_GEN_REGS", \
+ "CR0_REGS", \
+ "CR_REGS", \
+ "NON_FLOAT_REGS", \
+ "XER_REGS", \
+ "ALL_REGS" \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0xfffffffe, 0x00000000, 0x00000008, 0x00020000 }, /* BASE_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000008, 0x00020000 }, /* GENERAL_REGS */ \
+ { 0x00000000, 0xffffffff, 0x00000000, 0x00000000 }, /* FLOAT_REGS */ \
+ { 0x00000000, 0x00000000, 0xffffe000, 0x00001fff }, /* ALTIVEC_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00002000 }, /* VRSAVE_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00004000 }, /* VSCR_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00008000 }, /* SPE_ACC_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00010000 }, /* SPEFSCR_REGS */ \
+ { 0xffffffff, 0xffffffff, 0x00000008, 0x00020000 }, /* NON_SPECIAL_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000001, 0x00000000 }, /* MQ_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000002, 0x00000000 }, /* LINK_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000004, 0x00000000 }, /* CTR_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000006, 0x00000000 }, /* LINK_OR_CTR_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000007, 0x00002000 }, /* SPECIAL_REGS */ \
+ { 0xffffffff, 0x00000000, 0x0000000f, 0x00022000 }, /* SPEC_OR_GEN_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000010, 0x00000000 }, /* CR0_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000ff0, 0x00000000 }, /* CR_REGS */ \
+ { 0xffffffff, 0x00000000, 0x0000efff, 0x00020000 }, /* NON_FLOAT_REGS */ \
+ { 0x00000000, 0x00000000, 0x00001000, 0x00000000 }, /* XER_REGS */ \
+ { 0xffffffff, 0xffffffff, 0xffffffff, 0x0003ffff } /* ALL_REGS */ \
+}
+
+/* The following macro defines cover classes for Integrated Register
+ Allocator. Cover classes is a set of non-intersected register
+ classes covering all hard registers used for register allocation
+ purpose. Any move between two registers of a cover class should be
+ cheaper than load or store of the registers. The macro value is
+ array of register classes with LIM_REG_CLASSES used as the end
+ marker. */
+
+#define IRA_COVER_CLASSES \
+{ \
+ GENERAL_REGS, SPECIAL_REGS, FLOAT_REGS, ALTIVEC_REGS, \
+ /*VRSAVE_REGS,*/ VSCR_REGS, SPE_ACC_REGS, SPEFSCR_REGS, \
+ /* MQ_REGS, LINK_REGS, CTR_REGS, */ \
+ CR_REGS, XER_REGS, LIM_REG_CLASSES \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == 0 ? GENERAL_REGS \
+ : (REGNO) < 32 ? BASE_REGS \
+ : FP_REGNO_P (REGNO) ? FLOAT_REGS \
+ : ALTIVEC_REGNO_P (REGNO) ? ALTIVEC_REGS \
+ : (REGNO) == CR0_REGNO ? CR0_REGS \
+ : CR_REGNO_P (REGNO) ? CR_REGS \
+ : (REGNO) == MQ_REGNO ? MQ_REGS \
+ : (REGNO) == LR_REGNO ? LINK_REGS \
+ : (REGNO) == CTR_REGNO ? CTR_REGS \
+ : (REGNO) == ARG_POINTER_REGNUM ? BASE_REGS \
+ : (REGNO) == XER_REGNO ? XER_REGS \
+ : (REGNO) == VRSAVE_REGNO ? VRSAVE_REGS \
+ : (REGNO) == VSCR_REGNO ? VRSAVE_REGS \
+ : (REGNO) == SPE_ACC_REGNO ? SPE_ACC_REGS \
+ : (REGNO) == SPEFSCR_REGNO ? SPEFSCR_REGS \
+ : (REGNO) == FRAME_POINTER_REGNUM ? BASE_REGS \
+ : NO_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS BASE_REGS
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+
+ On the RS/6000, we have to return NO_REGS when we want to reload a
+ floating-point CONST_DOUBLE to force it to be copied to memory.
+
+ We also don't want to reload integer values into floating-point
+ registers if we can at all help it. In fact, this can
+ cause reload to die, if it tries to generate a reload of CTR
+ into a FP register and discovers it doesn't have the memory location
+ required.
+
+ ??? Would it be a good idea to have reload do the converse, that is
+ try to reload floating modes into FP registers if possible?
+ */
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ ((CONSTANT_P (X) \
+ && reg_classes_intersect_p ((CLASS), FLOAT_REGS)) \
+ ? NO_REGS \
+ : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \
+ && (CLASS) == NON_SPECIAL_REGS) \
+ ? GENERAL_REGS \
+ : (CLASS))
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,IN) \
+ rs6000_secondary_reload_class (CLASS, MODE, IN)
+
+/* If we are copying between FP or AltiVec registers and anything
+ else, we need a memory location. The exception is when we are
+ targeting ppc64 and the move to/from fpr to gpr instructions
+ are available.*/
+
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
+ ((CLASS1) != (CLASS2) && (((CLASS1) == FLOAT_REGS \
+ && (!TARGET_MFPGPR || !TARGET_POWERPC64 \
+ || ((MODE != DFmode) \
+ && (MODE != DDmode) \
+ && (MODE != DImode)))) \
+ || ((CLASS2) == FLOAT_REGS \
+ && (!TARGET_MFPGPR || !TARGET_POWERPC64 \
+ || ((MODE != DFmode) \
+ && (MODE != DDmode) \
+ && (MODE != DImode)))) \
+ || (CLASS1) == ALTIVEC_REGS \
+ || (CLASS2) == ALTIVEC_REGS))
+
+/* For cpus that cannot load/store SDmode values from the 64-bit
+ FP registers without using a full 64-bit load/store, we need
+ to allocate a full 64-bit stack slot for them. */
+
+#define SECONDARY_MEMORY_NEEDED_RTX(MODE) \
+ rs6000_secondary_memory_needed_rtx (MODE)
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+
+ On RS/6000, this is the size of MODE in words,
+ except in the FP regs, where a single reg is enough for two words. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (((CLASS) == FLOAT_REGS) \
+ ? ((GET_MODE_SIZE (MODE) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD) \
+ : (TARGET_E500_DOUBLE && (CLASS) == GENERAL_REGS \
+ && (MODE) == DFmode) \
+ ? 1 \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
+
+#define CANNOT_CHANGE_MODE_CLASS(FROM, TO, CLASS) \
+ (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO) \
+ ? ((GET_MODE_SIZE (FROM) < 8 || GET_MODE_SIZE (TO) < 8 \
+ || TARGET_IEEEQUAD) \
+ && reg_classes_intersect_p (FLOAT_REGS, CLASS)) \
+ : (((TARGET_E500_DOUBLE \
+ && ((((TO) == DFmode) + ((FROM) == DFmode)) == 1 \
+ || (((TO) == TFmode) + ((FROM) == TFmode)) == 1 \
+ || (((TO) == DDmode) + ((FROM) == DDmode)) == 1 \
+ || (((TO) == TDmode) + ((FROM) == TDmode)) == 1 \
+ || (((TO) == DImode) + ((FROM) == DImode)) == 1)) \
+ || (TARGET_SPE \
+ && (SPE_VECTOR_MODE (FROM) + SPE_VECTOR_MODE (TO)) == 1)) \
+ && reg_classes_intersect_p (GENERAL_REGS, CLASS)))
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Enumeration to give which calling sequence to use. */
+enum rs6000_abi {
+ ABI_NONE,
+ ABI_AIX, /* IBM's AIX */
+ ABI_V4, /* System V.4/eabi */
+ ABI_DARWIN /* Apple's Darwin (OS X kernel) */
+};
+
+extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Offsets recorded in opcodes are a multiple of this alignment factor. */
+#define DWARF_CIE_DATA_ALIGNMENT (-((int) (TARGET_32BIT ? 4 : 8)))
+
+/* Define this to nonzero if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame.
+
+ On the RS/6000, we grow upwards, from the area after the outgoing
+ arguments. */
+#define FRAME_GROWS_DOWNWARD (flag_stack_protect != 0)
+
+/* Size of the outgoing register save area */
+#define RS6000_REG_SAVE ((DEFAULT_ABI == ABI_AIX \
+ || DEFAULT_ABI == ABI_DARWIN) \
+ ? (TARGET_64BIT ? 64 : 32) \
+ : 0)
+
+/* Size of the fixed area on the stack */
+#define RS6000_SAVE_AREA \
+ (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN) ? 24 : 8) \
+ << (TARGET_64BIT ? 1 : 0))
+
+/* MEM representing address to save the TOC register */
+#define RS6000_SAVE_TOC gen_rtx_MEM (Pmode, \
+ plus_constant (stack_pointer_rtx, \
+ (TARGET_32BIT ? 20 : 40)))
+
+/* Align an address */
+#define RS6000_ALIGN(n,a) (((n) + (a) - 1) & ~((a) - 1))
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated.
+
+ On the RS/6000, the frame pointer is the same as the stack pointer,
+ except for dynamic allocations. So we start after the fixed area and
+ outgoing parameter area. */
+
+#define STARTING_FRAME_OFFSET \
+ (FRAME_GROWS_DOWNWARD \
+ ? 0 \
+ : (RS6000_ALIGN (crtl->outgoing_args_size, \
+ TARGET_ALTIVEC ? 16 : 8) \
+ + RS6000_SAVE_AREA))
+
+/* Offset from the stack pointer register to an item dynamically
+ allocated on the stack, e.g., by `alloca'.
+
+ The default value for this macro is `STACK_POINTER_OFFSET' plus the
+ length of the outgoing arguments. The default is correct for most
+ machines. See `function.c' for details. */
+#define STACK_DYNAMIC_OFFSET(FUNDECL) \
+ (RS6000_ALIGN (crtl->outgoing_args_size, \
+ TARGET_ALTIVEC ? 16 : 8) \
+ + (STACK_POINTER_OFFSET))
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On RS/6000, don't define this because there are no push insns. */
+/* #define PUSH_ROUNDING(BYTES) */
+
+/* Offset of first parameter from the argument pointer register value.
+ On the RS/6000, we define the argument pointer to the start of the fixed
+ area. */
+#define FIRST_PARM_OFFSET(FNDECL) RS6000_SAVE_AREA
+
+/* Offset from the argument pointer register value to the top of
+ stack. This is different from FIRST_PARM_OFFSET because of the
+ register save area. */
+#define ARG_POINTER_CFA_OFFSET(FNDECL) 0
+
+/* Define this if stack space is still allocated for a parameter passed
+ in a register. The value is the number of bytes allocated to this
+ area. */
+#define REG_PARM_STACK_SPACE(FNDECL) RS6000_REG_SAVE
+
+/* Define this if the above stack space is to be considered part of the
+ space allocated by the caller. */
+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
+
+/* This is the difference between the logical top of stack and the actual sp.
+
+ For the RS/6000, sp points past the fixed area. */
+#define STACK_POINTER_OFFSET RS6000_SAVE_AREA
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable crtl->outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) rs6000_function_value ((VALTYPE), (FUNC))
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) rs6000_libcall_value ((MODE))
+
+/* DRAFT_V4_STRUCT_RET defaults off. */
+#define DRAFT_V4_STRUCT_RET 0
+
+/* Let TARGET_RETURN_IN_MEMORY control what happens. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Mode of stack savearea.
+ FUNCTION is VOIDmode because calling convention maintains SP.
+ BLOCK needs Pmode for SP.
+ NONLOCAL needs twice Pmode to maintain both backchain and SP. */
+#define STACK_SAVEAREA_MODE(LEVEL) \
+ (LEVEL == SAVE_FUNCTION ? VOIDmode \
+ : LEVEL == SAVE_NONLOCAL ? (TARGET_32BIT ? DImode : TImode) : Pmode)
+
+/* Minimum and maximum general purpose registers used to hold arguments. */
+#define GP_ARG_MIN_REG 3
+#define GP_ARG_MAX_REG 10
+#define GP_ARG_NUM_REG (GP_ARG_MAX_REG - GP_ARG_MIN_REG + 1)
+
+/* Minimum and maximum floating point registers used to hold arguments. */
+#define FP_ARG_MIN_REG 33
+#define FP_ARG_AIX_MAX_REG 45
+#define FP_ARG_V4_MAX_REG 40
+#define FP_ARG_MAX_REG ((DEFAULT_ABI == ABI_AIX \
+ || DEFAULT_ABI == ABI_DARWIN) \
+ ? FP_ARG_AIX_MAX_REG : FP_ARG_V4_MAX_REG)
+#define FP_ARG_NUM_REG (FP_ARG_MAX_REG - FP_ARG_MIN_REG + 1)
+
+/* Minimum and maximum AltiVec registers used to hold arguments. */
+#define ALTIVEC_ARG_MIN_REG (FIRST_ALTIVEC_REGNO + 2)
+#define ALTIVEC_ARG_MAX_REG (ALTIVEC_ARG_MIN_REG + 11)
+#define ALTIVEC_ARG_NUM_REG (ALTIVEC_ARG_MAX_REG - ALTIVEC_ARG_MIN_REG + 1)
+
+/* Return registers */
+#define GP_ARG_RETURN GP_ARG_MIN_REG
+#define FP_ARG_RETURN FP_ARG_MIN_REG
+#define ALTIVEC_ARG_RETURN (FIRST_ALTIVEC_REGNO + 2)
+
+/* Flags for the call/call_value rtl operations set up by function_arg */
+#define CALL_NORMAL 0x00000000 /* no special processing */
+/* Bits in 0x00000001 are unused. */
+#define CALL_V4_CLEAR_FP_ARGS 0x00000002 /* V.4, no FP args passed */
+#define CALL_V4_SET_FP_ARGS 0x00000004 /* V.4, FP args were passed */
+#define CALL_LONG 0x00000008 /* always call indirect */
+#define CALL_LIBCALL 0x00000010 /* libcall */
+
+/* We don't have prologue and epilogue functions to save/restore
+ everything for most ABIs. */
+#define WORLD_SAVE_P(INFO) 0
+
+/* 1 if N is a possible register number for a function value
+ as seen by the caller.
+
+ On RS/6000, this is r3, fp1, and v2 (for AltiVec). */
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ((N) == GP_ARG_RETURN \
+ || ((N) == FP_ARG_RETURN && TARGET_HARD_FLOAT && TARGET_FPRS) \
+ || ((N) == ALTIVEC_ARG_RETURN && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI))
+
+/* 1 if N is a possible register number for function argument passing.
+ On RS/6000, these are r3-r10 and fp1-fp13.
+ On AltiVec, v2 - v13 are used for passing vectors. */
+#define FUNCTION_ARG_REGNO_P(N) \
+ ((unsigned) (N) - GP_ARG_MIN_REG < GP_ARG_NUM_REG \
+ || ((unsigned) (N) - ALTIVEC_ARG_MIN_REG < ALTIVEC_ARG_NUM_REG \
+ && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI) \
+ || ((unsigned) (N) - FP_ARG_MIN_REG < FP_ARG_NUM_REG \
+ && TARGET_HARD_FLOAT && TARGET_FPRS))
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On the RS/6000, this is a structure. The first element is the number of
+ total argument words, the second is used to store the next
+ floating-point register number, and the third says how many more args we
+ have prototype types for.
+
+ For ABI_V4, we treat these slightly differently -- `sysv_gregno' is
+ the next available GP register, `fregno' is the next available FP
+ register, and `words' is the number of words used on the stack.
+
+ The varargs/stdarg support requires that this structure's size
+ be a multiple of sizeof(int). */
+
+typedef struct rs6000_args
+{
+ int words; /* # words used for passing GP registers */
+ int fregno; /* next available FP register */
+ int vregno; /* next available AltiVec register */
+ int nargs_prototype; /* # args left in the current prototype */
+ int prototype; /* Whether a prototype was defined */
+ int stdarg; /* Whether function is a stdarg function. */
+ int call_cookie; /* Do special things for this call */
+ int sysv_gregno; /* next available GP register */
+ int intoffset; /* running offset in struct (darwin64) */
+ int use_stack; /* any part of struct on stack (darwin64) */
+ int named; /* false for varargs params */
+} CUMULATIVE_ARGS;
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \
+ init_cumulative_args (&CUM, FNTYPE, LIBNAME, FALSE, FALSE, N_NAMED_ARGS)
+
+/* Similar, but when scanning the definition of a procedure. We always
+ set NARGS_PROTOTYPE large so we never return an EXPR_LIST. */
+
+#define INIT_CUMULATIVE_INCOMING_ARGS(CUM, FNTYPE, LIBNAME) \
+ init_cumulative_args (&CUM, FNTYPE, LIBNAME, TRUE, FALSE, 1000)
+
+/* Like INIT_CUMULATIVE_ARGS' but only used for outgoing libcalls. */
+
+#define INIT_CUMULATIVE_LIBCALL_ARGS(CUM, MODE, LIBNAME) \
+ init_cumulative_args (&CUM, NULL_TREE, LIBNAME, FALSE, TRUE, 0)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ function_arg_advance (&CUM, MODE, TYPE, NAMED, 0)
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On RS/6000 the first eight words of non-FP are normally in registers
+ and the rest are pushed. The first 13 FP args are in registers.
+
+ If this is floating-point and no prototype is specified, we use
+ both an FP and integer register (or possibly FP reg and stack). Library
+ functions (when TYPE is zero) always have the proper types for args,
+ so we can pass the FP value just in one register. emit_library_function
+ doesn't support EXPR_LIST anyway. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg (&CUM, MODE, TYPE, NAMED)
+
+/* If defined, a C expression which determines whether, and in which
+ direction, to pad out an argument with extra space. The value
+ should be of type `enum direction': either `upward' to pad above
+ the argument, `downward' to pad below, or `none' to inhibit
+ padding. */
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) function_arg_padding (MODE, TYPE)
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ PARM_BOUNDARY is used for all arguments. */
+
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+ function_arg_boundary (MODE, TYPE)
+
+#define PAD_VARARGS_DOWN \
+ (FUNCTION_ARG_PADDING (TYPE_MODE (type), type) == downward)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ output_function_profiler ((FILE), (LABELNO));
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. No definition is equivalent to
+ always zero.
+
+ On the RS/6000, this is nonzero because we can restore the stack from
+ its backpointer, which we maintain. */
+#define EXIT_IGNORE_STACK 1
+
+/* Define this macro as a C expression that is nonzero for registers
+ that are used by the epilogue or the return' pattern. The stack
+ and frame pointer registers are already be assumed to be used as
+ needed. */
+
+#define EPILOGUE_USES(REGNO) \
+ ((reload_completed && (REGNO) == LR_REGNO) \
+ || (TARGET_ALTIVEC && (REGNO) == VRSAVE_REGNO) \
+ || (crtl->calls_eh_return \
+ && TARGET_AIX \
+ && (REGNO) == 2))
+
+
+/* TRAMPOLINE_TEMPLATE deleted */
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE rs6000_trampoline_size ()
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, CXT) \
+ rs6000_initialize_trampoline (ADDR, FNADDR, CXT)
+
+/* Definitions for __builtin_return_address and __builtin_frame_address.
+ __builtin_return_address (0) should give link register (65), enable
+ this. */
+/* This should be uncommented, so that the link register is used, but
+ currently this would result in unmatched insns and spilling fixed
+ registers so we'll leave it for another day. When these problems are
+ taken care of one additional fetch will be necessary in RETURN_ADDR_RTX.
+ (mrs) */
+/* #define RETURN_ADDR_IN_PREVIOUS_FRAME */
+
+/* Number of bytes into the frame return addresses can be found. See
+ rs6000_stack_info in rs6000.c for more information on how the different
+ abi's store the return address. */
+#define RETURN_ADDRESS_OFFSET \
+ ((DEFAULT_ABI == ABI_AIX \
+ || DEFAULT_ABI == ABI_DARWIN) ? (TARGET_32BIT ? 8 : 16) : \
+ (DEFAULT_ABI == ABI_V4) ? 4 : \
+ (internal_error ("RETURN_ADDRESS_OFFSET not supported"), 0))
+
+/* The current return address is in link register (65). The return address
+ of anything farther back is accessed normally at an offset of 8 from the
+ frame pointer. */
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ (rs6000_return_addr (COUNT, FRAME))
+
+
+/* Definitions for register eliminations.
+
+ We have two registers that can be eliminated on the RS/6000. First, the
+ frame pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the argument pointer register can always be
+ eliminated; it is replaced with either the stack or frame pointer.
+
+ In addition, we use the elimination mechanism to see if r30 is needed
+ Initially we assume that it isn't. If it is, we spill it. This is done
+ by making it an eliminable register. We replace it with itself so that
+ if it isn't needed, then existing uses won't be modified. */
+
+/* This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference. */
+#define ELIMINABLE_REGS \
+{{ HARD_FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { RS6000_PIC_OFFSET_TABLE_REGNUM, RS6000_PIC_OFFSET_TABLE_REGNUM } }
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ For the RS/6000, if frame pointer elimination is being done, we would like
+ to convert ap into fp, not sp.
+
+ We need r30 if -mminimal-toc was specified, and there are constant pool
+ references. */
+
+#define CAN_ELIMINATE(FROM, TO) \
+ ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM \
+ ? ! frame_pointer_needed \
+ : (FROM) == RS6000_PIC_OFFSET_TABLE_REGNUM \
+ ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0 \
+ : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ ((OFFSET) = rs6000_initial_elimination_offset(FROM, TO))
+
+/* Addressing modes, and classification of registers for them. */
+
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_PRE_MODIFY_DISP 1
+#define HAVE_PRE_MODIFY_REG 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? (REGNO) <= 31 || (REGNO) == 67 \
+ || (REGNO) == FRAME_POINTER_REGNUM \
+ : (reg_renumber[REGNO] >= 0 \
+ && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67 \
+ || reg_renumber[REGNO] == FRAME_POINTER_REGNUM)))
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? ((REGNO) > 0 && (REGNO) <= 31) || (REGNO) == 67 \
+ || (REGNO) == FRAME_POINTER_REGNUM \
+ : (reg_renumber[REGNO] > 0 \
+ && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67 \
+ || reg_renumber[REGNO] == FRAME_POINTER_REGNUM)))
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
+ || GET_CODE (X) == HIGH)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the RS/6000, all integer constants are acceptable, most won't be valid
+ for particular insns, though. Only easy FP constants are
+ acceptable. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (((GET_CODE (X) != CONST_DOUBLE \
+ && GET_CODE (X) != CONST_VECTOR) \
+ || GET_MODE (X) == VOIDmode \
+ || (TARGET_POWERPC64 && GET_MODE (X) == DImode) \
+ || easy_fp_constant (X, GET_MODE (X)) \
+ || easy_vector_constant (X, GET_MODE (X))) \
+ && !rs6000_tls_referenced_p (X))
+
+#define EASY_VECTOR_15(n) ((n) >= -16 && (n) <= 15)
+#define EASY_VECTOR_15_ADD_SELF(n) (!EASY_VECTOR_15((n)) \
+ && EASY_VECTOR_15((n) >> 1) \
+ && ((n) & 1) == 0)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifdef REG_OK_STRICT
+# define REG_OK_STRICT_FLAG 1
+#else
+# define REG_OK_STRICT_FLAG 0
+#endif
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg in the non-strict case. */
+#define INT_REG_OK_FOR_INDEX_P(X, STRICT) \
+ ((!(STRICT) && REGNO (X) >= FIRST_PSEUDO_REGISTER) \
+ || REGNO_OK_FOR_INDEX_P (REGNO (X)))
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg in the non-strict case. */
+#define INT_REG_OK_FOR_BASE_P(X, STRICT) \
+ ((!(STRICT) && REGNO (X) >= FIRST_PSEUDO_REGISTER) \
+ || REGNO_OK_FOR_BASE_P (REGNO (X)))
+
+#define REG_OK_FOR_INDEX_P(X) INT_REG_OK_FOR_INDEX_P (X, REG_OK_STRICT_FLAG)
+#define REG_OK_FOR_BASE_P(X) INT_REG_OK_FOR_BASE_P (X, REG_OK_STRICT_FLAG)
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On the RS/6000, there are four valid addresses: a SYMBOL_REF that
+ refers to a constant pool entry of an address (or the sum of it
+ plus a constant), a short (16-bit signed) constant plus a register,
+ the sum of two registers, or a register indirect, possibly with an
+ auto-increment. For DFmode, DDmode and DImode with a constant plus
+ register, we must ensure that both words are addressable or PowerPC64
+ with offset word aligned.
+
+ For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
+ 32-bit DImode, TImode), indexed addressing cannot be used because
+ adjacent memory cells are accessed by adding word-sized offsets
+ during assembly output. */
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ if (rs6000_legitimate_address (MODE, X, REG_OK_STRICT_FLAG)) \
+ goto ADDR; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On RS/6000, first check for the sum of a register with a constant
+ integer that is out of range. If so, generate code to add the
+ constant with the low-order 16 bits masked to the register and force
+ this result into another register (this can be done with `cau').
+ Then generate an address of REG+(CONST&0xffff), allowing for the
+ possibility of bit 16 being a one.
+
+ Then check for the sum of a register and something not constant, try to
+ load the other things into a register and return the sum. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+{ rtx result = rs6000_legitimize_address (X, OLDX, MODE); \
+ if (result != NULL_RTX) \
+ { \
+ (X) = result; \
+ goto WIN; \
+ } \
+}
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ Implemented on rs6000 by rs6000_legitimize_reload_address.
+ Note that (X) is evaluated twice; this is safe in current usage. */
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ int win; \
+ (X) = rs6000_legitimize_reload_address ((X), (MODE), (OPNUM), \
+ (int)(TYPE), (IND_LEVELS), &win); \
+ if ( win ) \
+ goto WIN; \
+} while (0)
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+do { \
+ if (rs6000_mode_dependent_address (ADDR)) \
+ goto LABEL; \
+} while (0)
+
+#define FIND_BASE_TERM rs6000_find_base_term
+
+/* The register number of the register used to address a table of
+ static data addresses in memory. In some cases this register is
+ defined by a processor's "application binary interface" (ABI).
+ When this macro is defined, RTL is generated for this register
+ once, as with the stack pointer and frame pointer registers. If
+ this macro is not defined, it is up to the machine-dependent files
+ to allocate such a register (if necessary). */
+
+#define RS6000_PIC_OFFSET_TABLE_REGNUM 30
+#define PIC_OFFSET_TABLE_REGNUM (flag_pic ? RS6000_PIC_OFFSET_TABLE_REGNUM : INVALID_REGNUM)
+
+#define TOC_REGISTER (TARGET_MINIMAL_TOC ? RS6000_PIC_OFFSET_TABLE_REGNUM : 2)
+
+/* Define this macro if the register defined by
+ `PIC_OFFSET_TABLE_REGNUM' is clobbered by calls. Do not define
+ this macro if `PIC_OFFSET_TABLE_REGNUM' is not defined. */
+
+/* #define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED */
+
+/* A C expression that is nonzero if X is a legitimate immediate
+ operand on the target machine when generating position independent
+ code. You can assume that X satisfies `CONSTANT_P', so you need
+ not check this. You can also assume FLAG_PIC is true, so you need
+ not check it either. You need not define this macro if all
+ constants (including `SYMBOL_REF') can be immediate operands when
+ generating position independent code. */
+
+/* #define LEGITIMATE_PIC_OPERAND_P (X) */
+
+/* Define this if some processing needs to be done immediately before
+ emitting code for an insn. */
+
+#define FINAL_PRESCAN_INSN(INSN,OPERANDS,NOPERANDS) \
+ rs6000_final_prescan_insn (INSN, OPERANDS, NOPERANDS)
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* This flag, if defined, says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+
+/* #define FIXUNS_TRUNC_LIKE_FIX_TRUNC */
+
+/* An integer expression for the size in bits of the largest integer machine
+ mode that should actually be used. */
+
+/* Allow pairs of registers to be used, which is the intent of the default. */
+#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TARGET_POWERPC64 ? TImode : DImode)
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX (! TARGET_POWERPC64 ? 4 : 8)
+#define MAX_MOVE_MAX 8
+
+/* Nonzero if access to memory by bytes is no faster than for words.
+ Also nonzero if doing byte operations (specifically shifts) in registers
+ is undesirable. */
+#define SLOW_BYTE_ACCESS 1
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, UNKNOWN if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Define if loading short immediate values into registers sign extends. */
+#define SHORT_IMMEDIATES_SIGN_EXTEND
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* The cntlzw and cntlzd instructions return 32 and 64 for input of zero. */
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+ ((VALUE) = ((MODE) == SImode ? 32 : 64), 1)
+
+/* The CTZ patterns return -1 for input of zero. */
+#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = -1, 1)
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode (TARGET_32BIT ? SImode : DImode)
+
+/* Supply definition of STACK_SIZE_MODE for allocate_dynamic_stack_space. */
+#define STACK_SIZE_MODE (TARGET_32BIT ? SImode : DImode)
+
+/* Mode of a function address in a call instruction (for indexing purposes).
+ Doesn't matter on RS/6000. */
+#define FUNCTION_MODE SImode
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap. */
+#define NO_FUNCTION_CSE
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits.
+
+ The sle and sre instructions which allow SHIFT_COUNT_TRUNCATED
+ have been dropped from the PowerPC architecture. */
+
+#define SHIFT_COUNT_TRUNCATED (TARGET_POWER ? 1 : 0)
+
+/* Adjust the length of an INSN. LENGTH is the currently-computed length and
+ should be adjusted to reflect any required changes. This macro is used when
+ there is some systematic length adjustment required that would be difficult
+ to express in the length attribute. */
+
+/* #define ADJUST_INSN_LENGTH(X,LENGTH) */
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a
+ COMPARE, return the mode to be used for the comparison. For
+ floating-point, CCFPmode should be used. CCUNSmode should be used
+ for unsigned comparisons. CCEQmode should be used when we are
+ doing an inequality comparison on the result of a
+ comparison. CCmode should be used in all other cases. */
+
+#define SELECT_CC_MODE(OP,X,Y) \
+ (SCALAR_FLOAT_MODE_P (GET_MODE (X)) ? CCFPmode \
+ : (OP) == GTU || (OP) == LTU || (OP) == GEU || (OP) == LEU ? CCUNSmode \
+ : (((OP) == EQ || (OP) == NE) && COMPARISON_P (X) \
+ ? CCEQmode : CCmode))
+
+/* Can the condition code MODE be safely reversed? This is safe in
+ all cases on this port, because at present it doesn't use the
+ trapping FP comparisons (fcmpo). */
+#define REVERSIBLE_CC_MODE(MODE) 1
+
+/* Given a condition code and a mode, return the inverse condition. */
+#define REVERSE_CONDITION(CODE, MODE) rs6000_reverse_condition (MODE, CODE)
+
+/* Define the information needed to generate branch and scc insns. This is
+ stored from the compare operation. */
+
+extern GTY(()) rtx rs6000_compare_op0;
+extern GTY(()) rtx rs6000_compare_op1;
+extern int rs6000_compare_fp_p;
+
+/* Control the assembler format that we output. */
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will end at
+ the end of the line. */
+#define ASM_COMMENT_START " #"
+
+/* Flag to say the TOC is initialized */
+extern int toc_initialized;
+
+/* Macro to output a special constant pool entry. Go to WIN if we output
+ it. Otherwise, it is written the usual way.
+
+ On the RS/6000, toc entries are handled this way. */
+
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE, X, MODE, ALIGN, LABELNO, WIN) \
+{ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (X, MODE)) \
+ { \
+ output_toc (FILE, X, LABELNO, MODE); \
+ goto WIN; \
+ } \
+}
+
+#ifdef HAVE_GAS_WEAK
+#define RS6000_WEAK 1
+#else
+#define RS6000_WEAK 0
+#endif
+
+#if RS6000_WEAK
+/* Used in lieu of ASM_WEAKEN_LABEL. */
+#define ASM_WEAKEN_DECL(FILE, DECL, NAME, VAL) \
+ do \
+ { \
+ fputs ("\t.weak\t", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
+ { \
+ if (TARGET_XCOFF) \
+ fputs ("[DS]", (FILE)); \
+ fputs ("\n\t.weak\t.", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ } \
+ fputc ('\n', (FILE)); \
+ if (VAL) \
+ { \
+ ASM_OUTPUT_DEF ((FILE), (NAME), (VAL)); \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
+ { \
+ fputs ("\t.set\t.", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fputs (",.", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (VAL)); \
+ fputc ('\n', (FILE)); \
+ } \
+ } \
+ } \
+ while (0)
+#endif
+
+#if HAVE_GAS_WEAKREF
+#define ASM_OUTPUT_WEAKREF(FILE, DECL, NAME, VALUE) \
+ do \
+ { \
+ fputs ("\t.weakref\t", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fputs (", ", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (VALUE)); \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
+ { \
+ fputs ("\n\t.weakref\t.", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fputs (", .", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (VALUE)); \
+ } \
+ fputc ('\n', (FILE)); \
+ } while (0)
+#endif
+
+/* This implements the `alias' attribute. */
+#undef ASM_OUTPUT_DEF_FROM_DECLS
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL, TARGET) \
+ do \
+ { \
+ const char *alias = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ const char *name = IDENTIFIER_POINTER (TARGET); \
+ if (TREE_CODE (DECL) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS) \
+ { \
+ if (TREE_PUBLIC (DECL)) \
+ { \
+ if (!RS6000_WEAK || !DECL_WEAK (DECL)) \
+ { \
+ fputs ("\t.globl\t.", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, alias); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ else if (TARGET_XCOFF) \
+ { \
+ fputs ("\t.lglobl\t.", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, alias); \
+ putc ('\n', FILE); \
+ } \
+ fputs ("\t.set\t.", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, alias); \
+ fputs (",.", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, name); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_DEF (FILE, alias, name); \
+ } \
+ while (0)
+
+#define TARGET_ASM_FILE_START rs6000_file_start
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF ""
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
+
+#define REGISTER_NAMES \
+{ \
+ &rs6000_reg_names[ 0][0], /* r0 */ \
+ &rs6000_reg_names[ 1][0], /* r1 */ \
+ &rs6000_reg_names[ 2][0], /* r2 */ \
+ &rs6000_reg_names[ 3][0], /* r3 */ \
+ &rs6000_reg_names[ 4][0], /* r4 */ \
+ &rs6000_reg_names[ 5][0], /* r5 */ \
+ &rs6000_reg_names[ 6][0], /* r6 */ \
+ &rs6000_reg_names[ 7][0], /* r7 */ \
+ &rs6000_reg_names[ 8][0], /* r8 */ \
+ &rs6000_reg_names[ 9][0], /* r9 */ \
+ &rs6000_reg_names[10][0], /* r10 */ \
+ &rs6000_reg_names[11][0], /* r11 */ \
+ &rs6000_reg_names[12][0], /* r12 */ \
+ &rs6000_reg_names[13][0], /* r13 */ \
+ &rs6000_reg_names[14][0], /* r14 */ \
+ &rs6000_reg_names[15][0], /* r15 */ \
+ &rs6000_reg_names[16][0], /* r16 */ \
+ &rs6000_reg_names[17][0], /* r17 */ \
+ &rs6000_reg_names[18][0], /* r18 */ \
+ &rs6000_reg_names[19][0], /* r19 */ \
+ &rs6000_reg_names[20][0], /* r20 */ \
+ &rs6000_reg_names[21][0], /* r21 */ \
+ &rs6000_reg_names[22][0], /* r22 */ \
+ &rs6000_reg_names[23][0], /* r23 */ \
+ &rs6000_reg_names[24][0], /* r24 */ \
+ &rs6000_reg_names[25][0], /* r25 */ \
+ &rs6000_reg_names[26][0], /* r26 */ \
+ &rs6000_reg_names[27][0], /* r27 */ \
+ &rs6000_reg_names[28][0], /* r28 */ \
+ &rs6000_reg_names[29][0], /* r29 */ \
+ &rs6000_reg_names[30][0], /* r30 */ \
+ &rs6000_reg_names[31][0], /* r31 */ \
+ \
+ &rs6000_reg_names[32][0], /* fr0 */ \
+ &rs6000_reg_names[33][0], /* fr1 */ \
+ &rs6000_reg_names[34][0], /* fr2 */ \
+ &rs6000_reg_names[35][0], /* fr3 */ \
+ &rs6000_reg_names[36][0], /* fr4 */ \
+ &rs6000_reg_names[37][0], /* fr5 */ \
+ &rs6000_reg_names[38][0], /* fr6 */ \
+ &rs6000_reg_names[39][0], /* fr7 */ \
+ &rs6000_reg_names[40][0], /* fr8 */ \
+ &rs6000_reg_names[41][0], /* fr9 */ \
+ &rs6000_reg_names[42][0], /* fr10 */ \
+ &rs6000_reg_names[43][0], /* fr11 */ \
+ &rs6000_reg_names[44][0], /* fr12 */ \
+ &rs6000_reg_names[45][0], /* fr13 */ \
+ &rs6000_reg_names[46][0], /* fr14 */ \
+ &rs6000_reg_names[47][0], /* fr15 */ \
+ &rs6000_reg_names[48][0], /* fr16 */ \
+ &rs6000_reg_names[49][0], /* fr17 */ \
+ &rs6000_reg_names[50][0], /* fr18 */ \
+ &rs6000_reg_names[51][0], /* fr19 */ \
+ &rs6000_reg_names[52][0], /* fr20 */ \
+ &rs6000_reg_names[53][0], /* fr21 */ \
+ &rs6000_reg_names[54][0], /* fr22 */ \
+ &rs6000_reg_names[55][0], /* fr23 */ \
+ &rs6000_reg_names[56][0], /* fr24 */ \
+ &rs6000_reg_names[57][0], /* fr25 */ \
+ &rs6000_reg_names[58][0], /* fr26 */ \
+ &rs6000_reg_names[59][0], /* fr27 */ \
+ &rs6000_reg_names[60][0], /* fr28 */ \
+ &rs6000_reg_names[61][0], /* fr29 */ \
+ &rs6000_reg_names[62][0], /* fr30 */ \
+ &rs6000_reg_names[63][0], /* fr31 */ \
+ \
+ &rs6000_reg_names[64][0], /* mq */ \
+ &rs6000_reg_names[65][0], /* lr */ \
+ &rs6000_reg_names[66][0], /* ctr */ \
+ &rs6000_reg_names[67][0], /* ap */ \
+ \
+ &rs6000_reg_names[68][0], /* cr0 */ \
+ &rs6000_reg_names[69][0], /* cr1 */ \
+ &rs6000_reg_names[70][0], /* cr2 */ \
+ &rs6000_reg_names[71][0], /* cr3 */ \
+ &rs6000_reg_names[72][0], /* cr4 */ \
+ &rs6000_reg_names[73][0], /* cr5 */ \
+ &rs6000_reg_names[74][0], /* cr6 */ \
+ &rs6000_reg_names[75][0], /* cr7 */ \
+ \
+ &rs6000_reg_names[76][0], /* xer */ \
+ \
+ &rs6000_reg_names[77][0], /* v0 */ \
+ &rs6000_reg_names[78][0], /* v1 */ \
+ &rs6000_reg_names[79][0], /* v2 */ \
+ &rs6000_reg_names[80][0], /* v3 */ \
+ &rs6000_reg_names[81][0], /* v4 */ \
+ &rs6000_reg_names[82][0], /* v5 */ \
+ &rs6000_reg_names[83][0], /* v6 */ \
+ &rs6000_reg_names[84][0], /* v7 */ \
+ &rs6000_reg_names[85][0], /* v8 */ \
+ &rs6000_reg_names[86][0], /* v9 */ \
+ &rs6000_reg_names[87][0], /* v10 */ \
+ &rs6000_reg_names[88][0], /* v11 */ \
+ &rs6000_reg_names[89][0], /* v12 */ \
+ &rs6000_reg_names[90][0], /* v13 */ \
+ &rs6000_reg_names[91][0], /* v14 */ \
+ &rs6000_reg_names[92][0], /* v15 */ \
+ &rs6000_reg_names[93][0], /* v16 */ \
+ &rs6000_reg_names[94][0], /* v17 */ \
+ &rs6000_reg_names[95][0], /* v18 */ \
+ &rs6000_reg_names[96][0], /* v19 */ \
+ &rs6000_reg_names[97][0], /* v20 */ \
+ &rs6000_reg_names[98][0], /* v21 */ \
+ &rs6000_reg_names[99][0], /* v22 */ \
+ &rs6000_reg_names[100][0], /* v23 */ \
+ &rs6000_reg_names[101][0], /* v24 */ \
+ &rs6000_reg_names[102][0], /* v25 */ \
+ &rs6000_reg_names[103][0], /* v26 */ \
+ &rs6000_reg_names[104][0], /* v27 */ \
+ &rs6000_reg_names[105][0], /* v28 */ \
+ &rs6000_reg_names[106][0], /* v29 */ \
+ &rs6000_reg_names[107][0], /* v30 */ \
+ &rs6000_reg_names[108][0], /* v31 */ \
+ &rs6000_reg_names[109][0], /* vrsave */ \
+ &rs6000_reg_names[110][0], /* vscr */ \
+ &rs6000_reg_names[111][0], /* spe_acc */ \
+ &rs6000_reg_names[112][0], /* spefscr */ \
+ &rs6000_reg_names[113][0], /* sfp */ \
+}
+
+/* Table of additional register names to use in user input. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+ {{"r0", 0}, {"r1", 1}, {"r2", 2}, {"r3", 3}, \
+ {"r4", 4}, {"r5", 5}, {"r6", 6}, {"r7", 7}, \
+ {"r8", 8}, {"r9", 9}, {"r10", 10}, {"r11", 11}, \
+ {"r12", 12}, {"r13", 13}, {"r14", 14}, {"r15", 15}, \
+ {"r16", 16}, {"r17", 17}, {"r18", 18}, {"r19", 19}, \
+ {"r20", 20}, {"r21", 21}, {"r22", 22}, {"r23", 23}, \
+ {"r24", 24}, {"r25", 25}, {"r26", 26}, {"r27", 27}, \
+ {"r28", 28}, {"r29", 29}, {"r30", 30}, {"r31", 31}, \
+ {"fr0", 32}, {"fr1", 33}, {"fr2", 34}, {"fr3", 35}, \
+ {"fr4", 36}, {"fr5", 37}, {"fr6", 38}, {"fr7", 39}, \
+ {"fr8", 40}, {"fr9", 41}, {"fr10", 42}, {"fr11", 43}, \
+ {"fr12", 44}, {"fr13", 45}, {"fr14", 46}, {"fr15", 47}, \
+ {"fr16", 48}, {"fr17", 49}, {"fr18", 50}, {"fr19", 51}, \
+ {"fr20", 52}, {"fr21", 53}, {"fr22", 54}, {"fr23", 55}, \
+ {"fr24", 56}, {"fr25", 57}, {"fr26", 58}, {"fr27", 59}, \
+ {"fr28", 60}, {"fr29", 61}, {"fr30", 62}, {"fr31", 63}, \
+ {"v0", 77}, {"v1", 78}, {"v2", 79}, {"v3", 80}, \
+ {"v4", 81}, {"v5", 82}, {"v6", 83}, {"v7", 84}, \
+ {"v8", 85}, {"v9", 86}, {"v10", 87}, {"v11", 88}, \
+ {"v12", 89}, {"v13", 90}, {"v14", 91}, {"v15", 92}, \
+ {"v16", 93}, {"v17", 94}, {"v18", 95}, {"v19", 96}, \
+ {"v20", 97}, {"v21", 98}, {"v22", 99}, {"v23", 100}, \
+ {"v24", 101},{"v25", 102},{"v26", 103},{"v27", 104}, \
+ {"v28", 105},{"v29", 106},{"v30", 107},{"v31", 108}, \
+ {"vrsave", 109}, {"vscr", 110}, \
+ {"spe_acc", 111}, {"spefscr", 112}, \
+ /* no additional names for: mq, lr, ctr, ap */ \
+ {"cr0", 68}, {"cr1", 69}, {"cr2", 70}, {"cr3", 71}, \
+ {"cr4", 72}, {"cr5", 73}, {"cr6", 74}, {"cr7", 75}, \
+ {"cc", 68}, {"sp", 1}, {"toc", 2} }
+
+/* Text to write out after a CALL that may be replaced by glue code by
+ the loader. This depends on the AIX version. */
+#define RS6000_CALL_GLUE "cror 31,31,31"
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ do { char buf[100]; \
+ fputs ("\t.long ", FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", VALUE); \
+ assemble_name (FILE, buf); \
+ putc ('-', FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", REL); \
+ assemble_name (FILE, buf); \
+ putc ('\n', FILE); \
+ } while (0)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+/* Pick up the return address upon entry to a procedure. Used for
+ dwarf2 unwind information. This also enables the table driven
+ mechanism. */
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNO)
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNO)
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 3 : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 10)
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+
+/* Define which CODE values are valid. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '.' || (CODE) == '&')
+
+/* Print a memory address as an operand to reference that memory location. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
+
+#define OUTPUT_ADDR_CONST_EXTRA(STREAM, X, FAIL) \
+ do \
+ if (!rs6000_output_addr_const_extra (STREAM, X)) \
+ goto FAIL; \
+ while (0)
+
+/* uncomment for disabling the corresponding default options */
+/* #define MACHINE_no_sched_interblock */
+/* #define MACHINE_no_sched_speculative */
+/* #define MACHINE_no_sched_speculative_load */
+
+/* General flags. */
+extern int flag_pic;
+extern int optimize;
+extern int flag_expensive_optimizations;
+extern int frame_pointer_needed;
+
+enum rs6000_builtins
+{
+ /* AltiVec builtins. */
+ ALTIVEC_BUILTIN_ST_INTERNAL_4si,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4si,
+ ALTIVEC_BUILTIN_ST_INTERNAL_8hi,
+ ALTIVEC_BUILTIN_LD_INTERNAL_8hi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_16qi,
+ ALTIVEC_BUILTIN_LD_INTERNAL_16qi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_4sf,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4sf,
+ ALTIVEC_BUILTIN_VADDUBM,
+ ALTIVEC_BUILTIN_VADDUHM,
+ ALTIVEC_BUILTIN_VADDUWM,
+ ALTIVEC_BUILTIN_VADDFP,
+ ALTIVEC_BUILTIN_VADDCUW,
+ ALTIVEC_BUILTIN_VADDUBS,
+ ALTIVEC_BUILTIN_VADDSBS,
+ ALTIVEC_BUILTIN_VADDUHS,
+ ALTIVEC_BUILTIN_VADDSHS,
+ ALTIVEC_BUILTIN_VADDUWS,
+ ALTIVEC_BUILTIN_VADDSWS,
+ ALTIVEC_BUILTIN_VAND,
+ ALTIVEC_BUILTIN_VANDC,
+ ALTIVEC_BUILTIN_VAVGUB,
+ ALTIVEC_BUILTIN_VAVGSB,
+ ALTIVEC_BUILTIN_VAVGUH,
+ ALTIVEC_BUILTIN_VAVGSH,
+ ALTIVEC_BUILTIN_VAVGUW,
+ ALTIVEC_BUILTIN_VAVGSW,
+ ALTIVEC_BUILTIN_VCFUX,
+ ALTIVEC_BUILTIN_VCFSX,
+ ALTIVEC_BUILTIN_VCTSXS,
+ ALTIVEC_BUILTIN_VCTUXS,
+ ALTIVEC_BUILTIN_VCMPBFP,
+ ALTIVEC_BUILTIN_VCMPEQUB,
+ ALTIVEC_BUILTIN_VCMPEQUH,
+ ALTIVEC_BUILTIN_VCMPEQUW,
+ ALTIVEC_BUILTIN_VCMPEQFP,
+ ALTIVEC_BUILTIN_VCMPGEFP,
+ ALTIVEC_BUILTIN_VCMPGTUB,
+ ALTIVEC_BUILTIN_VCMPGTSB,
+ ALTIVEC_BUILTIN_VCMPGTUH,
+ ALTIVEC_BUILTIN_VCMPGTSH,
+ ALTIVEC_BUILTIN_VCMPGTUW,
+ ALTIVEC_BUILTIN_VCMPGTSW,
+ ALTIVEC_BUILTIN_VCMPGTFP,
+ ALTIVEC_BUILTIN_VEXPTEFP,
+ ALTIVEC_BUILTIN_VLOGEFP,
+ ALTIVEC_BUILTIN_VMADDFP,
+ ALTIVEC_BUILTIN_VMAXUB,
+ ALTIVEC_BUILTIN_VMAXSB,
+ ALTIVEC_BUILTIN_VMAXUH,
+ ALTIVEC_BUILTIN_VMAXSH,
+ ALTIVEC_BUILTIN_VMAXUW,
+ ALTIVEC_BUILTIN_VMAXSW,
+ ALTIVEC_BUILTIN_VMAXFP,
+ ALTIVEC_BUILTIN_VMHADDSHS,
+ ALTIVEC_BUILTIN_VMHRADDSHS,
+ ALTIVEC_BUILTIN_VMLADDUHM,
+ ALTIVEC_BUILTIN_VMRGHB,
+ ALTIVEC_BUILTIN_VMRGHH,
+ ALTIVEC_BUILTIN_VMRGHW,
+ ALTIVEC_BUILTIN_VMRGLB,
+ ALTIVEC_BUILTIN_VMRGLH,
+ ALTIVEC_BUILTIN_VMRGLW,
+ ALTIVEC_BUILTIN_VMSUMUBM,
+ ALTIVEC_BUILTIN_VMSUMMBM,
+ ALTIVEC_BUILTIN_VMSUMUHM,
+ ALTIVEC_BUILTIN_VMSUMSHM,
+ ALTIVEC_BUILTIN_VMSUMUHS,
+ ALTIVEC_BUILTIN_VMSUMSHS,
+ ALTIVEC_BUILTIN_VMINUB,
+ ALTIVEC_BUILTIN_VMINSB,
+ ALTIVEC_BUILTIN_VMINUH,
+ ALTIVEC_BUILTIN_VMINSH,
+ ALTIVEC_BUILTIN_VMINUW,
+ ALTIVEC_BUILTIN_VMINSW,
+ ALTIVEC_BUILTIN_VMINFP,
+ ALTIVEC_BUILTIN_VMULEUB,
+ ALTIVEC_BUILTIN_VMULESB,
+ ALTIVEC_BUILTIN_VMULEUH,
+ ALTIVEC_BUILTIN_VMULESH,
+ ALTIVEC_BUILTIN_VMULOUB,
+ ALTIVEC_BUILTIN_VMULOSB,
+ ALTIVEC_BUILTIN_VMULOUH,
+ ALTIVEC_BUILTIN_VMULOSH,
+ ALTIVEC_BUILTIN_VNMSUBFP,
+ ALTIVEC_BUILTIN_VNOR,
+ ALTIVEC_BUILTIN_VOR,
+ ALTIVEC_BUILTIN_VSEL_4SI,
+ ALTIVEC_BUILTIN_VSEL_4SF,
+ ALTIVEC_BUILTIN_VSEL_8HI,
+ ALTIVEC_BUILTIN_VSEL_16QI,
+ ALTIVEC_BUILTIN_VPERM_4SI,
+ ALTIVEC_BUILTIN_VPERM_4SF,
+ ALTIVEC_BUILTIN_VPERM_8HI,
+ ALTIVEC_BUILTIN_VPERM_16QI,
+ ALTIVEC_BUILTIN_VPKUHUM,
+ ALTIVEC_BUILTIN_VPKUWUM,
+ ALTIVEC_BUILTIN_VPKPX,
+ ALTIVEC_BUILTIN_VPKUHSS,
+ ALTIVEC_BUILTIN_VPKSHSS,
+ ALTIVEC_BUILTIN_VPKUWSS,
+ ALTIVEC_BUILTIN_VPKSWSS,
+ ALTIVEC_BUILTIN_VPKUHUS,
+ ALTIVEC_BUILTIN_VPKSHUS,
+ ALTIVEC_BUILTIN_VPKUWUS,
+ ALTIVEC_BUILTIN_VPKSWUS,
+ ALTIVEC_BUILTIN_VREFP,
+ ALTIVEC_BUILTIN_VRFIM,
+ ALTIVEC_BUILTIN_VRFIN,
+ ALTIVEC_BUILTIN_VRFIP,
+ ALTIVEC_BUILTIN_VRFIZ,
+ ALTIVEC_BUILTIN_VRLB,
+ ALTIVEC_BUILTIN_VRLH,
+ ALTIVEC_BUILTIN_VRLW,
+ ALTIVEC_BUILTIN_VRSQRTEFP,
+ ALTIVEC_BUILTIN_VSLB,
+ ALTIVEC_BUILTIN_VSLH,
+ ALTIVEC_BUILTIN_VSLW,
+ ALTIVEC_BUILTIN_VSL,
+ ALTIVEC_BUILTIN_VSLO,
+ ALTIVEC_BUILTIN_VSPLTB,
+ ALTIVEC_BUILTIN_VSPLTH,
+ ALTIVEC_BUILTIN_VSPLTW,
+ ALTIVEC_BUILTIN_VSPLTISB,
+ ALTIVEC_BUILTIN_VSPLTISH,
+ ALTIVEC_BUILTIN_VSPLTISW,
+ ALTIVEC_BUILTIN_VSRB,
+ ALTIVEC_BUILTIN_VSRH,
+ ALTIVEC_BUILTIN_VSRW,
+ ALTIVEC_BUILTIN_VSRAB,
+ ALTIVEC_BUILTIN_VSRAH,
+ ALTIVEC_BUILTIN_VSRAW,
+ ALTIVEC_BUILTIN_VSR,
+ ALTIVEC_BUILTIN_VSRO,
+ ALTIVEC_BUILTIN_VSUBUBM,
+ ALTIVEC_BUILTIN_VSUBUHM,
+ ALTIVEC_BUILTIN_VSUBUWM,
+ ALTIVEC_BUILTIN_VSUBFP,
+ ALTIVEC_BUILTIN_VSUBCUW,
+ ALTIVEC_BUILTIN_VSUBUBS,
+ ALTIVEC_BUILTIN_VSUBSBS,
+ ALTIVEC_BUILTIN_VSUBUHS,
+ ALTIVEC_BUILTIN_VSUBSHS,
+ ALTIVEC_BUILTIN_VSUBUWS,
+ ALTIVEC_BUILTIN_VSUBSWS,
+ ALTIVEC_BUILTIN_VSUM4UBS,
+ ALTIVEC_BUILTIN_VSUM4SBS,
+ ALTIVEC_BUILTIN_VSUM4SHS,
+ ALTIVEC_BUILTIN_VSUM2SWS,
+ ALTIVEC_BUILTIN_VSUMSWS,
+ ALTIVEC_BUILTIN_VXOR,
+ ALTIVEC_BUILTIN_VSLDOI_16QI,
+ ALTIVEC_BUILTIN_VSLDOI_8HI,
+ ALTIVEC_BUILTIN_VSLDOI_4SI,
+ ALTIVEC_BUILTIN_VSLDOI_4SF,
+ ALTIVEC_BUILTIN_VUPKHSB,
+ ALTIVEC_BUILTIN_VUPKHPX,
+ ALTIVEC_BUILTIN_VUPKHSH,
+ ALTIVEC_BUILTIN_VUPKLSB,
+ ALTIVEC_BUILTIN_VUPKLPX,
+ ALTIVEC_BUILTIN_VUPKLSH,
+ ALTIVEC_BUILTIN_MTVSCR,
+ ALTIVEC_BUILTIN_MFVSCR,
+ ALTIVEC_BUILTIN_DSSALL,
+ ALTIVEC_BUILTIN_DSS,
+ ALTIVEC_BUILTIN_LVSL,
+ ALTIVEC_BUILTIN_LVSR,
+ ALTIVEC_BUILTIN_DSTT,
+ ALTIVEC_BUILTIN_DSTST,
+ ALTIVEC_BUILTIN_DSTSTT,
+ ALTIVEC_BUILTIN_DST,
+ ALTIVEC_BUILTIN_LVEBX,
+ ALTIVEC_BUILTIN_LVEHX,
+ ALTIVEC_BUILTIN_LVEWX,
+ ALTIVEC_BUILTIN_LVXL,
+ ALTIVEC_BUILTIN_LVX,
+ ALTIVEC_BUILTIN_STVX,
+ ALTIVEC_BUILTIN_LVLX,
+ ALTIVEC_BUILTIN_LVLXL,
+ ALTIVEC_BUILTIN_LVRX,
+ ALTIVEC_BUILTIN_LVRXL,
+ ALTIVEC_BUILTIN_STVEBX,
+ ALTIVEC_BUILTIN_STVEHX,
+ ALTIVEC_BUILTIN_STVEWX,
+ ALTIVEC_BUILTIN_STVXL,
+ ALTIVEC_BUILTIN_STVLX,
+ ALTIVEC_BUILTIN_STVLXL,
+ ALTIVEC_BUILTIN_STVRX,
+ ALTIVEC_BUILTIN_STVRXL,
+ ALTIVEC_BUILTIN_VCMPBFP_P,
+ ALTIVEC_BUILTIN_VCMPEQFP_P,
+ ALTIVEC_BUILTIN_VCMPEQUB_P,
+ ALTIVEC_BUILTIN_VCMPEQUH_P,
+ ALTIVEC_BUILTIN_VCMPEQUW_P,
+ ALTIVEC_BUILTIN_VCMPGEFP_P,
+ ALTIVEC_BUILTIN_VCMPGTFP_P,
+ ALTIVEC_BUILTIN_VCMPGTSB_P,
+ ALTIVEC_BUILTIN_VCMPGTSH_P,
+ ALTIVEC_BUILTIN_VCMPGTSW_P,
+ ALTIVEC_BUILTIN_VCMPGTUB_P,
+ ALTIVEC_BUILTIN_VCMPGTUH_P,
+ ALTIVEC_BUILTIN_VCMPGTUW_P,
+ ALTIVEC_BUILTIN_ABSS_V4SI,
+ ALTIVEC_BUILTIN_ABSS_V8HI,
+ ALTIVEC_BUILTIN_ABSS_V16QI,
+ ALTIVEC_BUILTIN_ABS_V4SI,
+ ALTIVEC_BUILTIN_ABS_V4SF,
+ ALTIVEC_BUILTIN_ABS_V8HI,
+ ALTIVEC_BUILTIN_ABS_V16QI,
+ ALTIVEC_BUILTIN_MASK_FOR_LOAD,
+ ALTIVEC_BUILTIN_MASK_FOR_STORE,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SI,
+ ALTIVEC_BUILTIN_VEC_INIT_V8HI,
+ ALTIVEC_BUILTIN_VEC_INIT_V16QI,
+ ALTIVEC_BUILTIN_VEC_INIT_V4SF,
+ ALTIVEC_BUILTIN_VEC_SET_V4SI,
+ ALTIVEC_BUILTIN_VEC_SET_V8HI,
+ ALTIVEC_BUILTIN_VEC_SET_V16QI,
+ ALTIVEC_BUILTIN_VEC_SET_V4SF,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SI,
+ ALTIVEC_BUILTIN_VEC_EXT_V8HI,
+ ALTIVEC_BUILTIN_VEC_EXT_V16QI,
+ ALTIVEC_BUILTIN_VEC_EXT_V4SF,
+
+ /* Altivec overloaded builtins. */
+ ALTIVEC_BUILTIN_VCMPEQ_P,
+ ALTIVEC_BUILTIN_OVERLOADED_FIRST = ALTIVEC_BUILTIN_VCMPEQ_P,
+ ALTIVEC_BUILTIN_VCMPGT_P,
+ ALTIVEC_BUILTIN_VCMPGE_P,
+ ALTIVEC_BUILTIN_VEC_ABS,
+ ALTIVEC_BUILTIN_VEC_ABSS,
+ ALTIVEC_BUILTIN_VEC_ADD,
+ ALTIVEC_BUILTIN_VEC_ADDC,
+ ALTIVEC_BUILTIN_VEC_ADDS,
+ ALTIVEC_BUILTIN_VEC_AND,
+ ALTIVEC_BUILTIN_VEC_ANDC,
+ ALTIVEC_BUILTIN_VEC_AVG,
+ ALTIVEC_BUILTIN_VEC_EXTRACT,
+ ALTIVEC_BUILTIN_VEC_CEIL,
+ ALTIVEC_BUILTIN_VEC_CMPB,
+ ALTIVEC_BUILTIN_VEC_CMPEQ,
+ ALTIVEC_BUILTIN_VEC_CMPEQUB,
+ ALTIVEC_BUILTIN_VEC_CMPEQUH,
+ ALTIVEC_BUILTIN_VEC_CMPEQUW,
+ ALTIVEC_BUILTIN_VEC_CMPGE,
+ ALTIVEC_BUILTIN_VEC_CMPGT,
+ ALTIVEC_BUILTIN_VEC_CMPLE,
+ ALTIVEC_BUILTIN_VEC_CMPLT,
+ ALTIVEC_BUILTIN_VEC_CTF,
+ ALTIVEC_BUILTIN_VEC_CTS,
+ ALTIVEC_BUILTIN_VEC_CTU,
+ ALTIVEC_BUILTIN_VEC_DST,
+ ALTIVEC_BUILTIN_VEC_DSTST,
+ ALTIVEC_BUILTIN_VEC_DSTSTT,
+ ALTIVEC_BUILTIN_VEC_DSTT,
+ ALTIVEC_BUILTIN_VEC_EXPTE,
+ ALTIVEC_BUILTIN_VEC_FLOOR,
+ ALTIVEC_BUILTIN_VEC_LD,
+ ALTIVEC_BUILTIN_VEC_LDE,
+ ALTIVEC_BUILTIN_VEC_LDL,
+ ALTIVEC_BUILTIN_VEC_LOGE,
+ ALTIVEC_BUILTIN_VEC_LVEBX,
+ ALTIVEC_BUILTIN_VEC_LVEHX,
+ ALTIVEC_BUILTIN_VEC_LVEWX,
+ ALTIVEC_BUILTIN_VEC_LVLX,
+ ALTIVEC_BUILTIN_VEC_LVLXL,
+ ALTIVEC_BUILTIN_VEC_LVRX,
+ ALTIVEC_BUILTIN_VEC_LVRXL,
+ ALTIVEC_BUILTIN_VEC_LVSL,
+ ALTIVEC_BUILTIN_VEC_LVSR,
+ ALTIVEC_BUILTIN_VEC_MADD,
+ ALTIVEC_BUILTIN_VEC_MADDS,
+ ALTIVEC_BUILTIN_VEC_MAX,
+ ALTIVEC_BUILTIN_VEC_MERGEH,
+ ALTIVEC_BUILTIN_VEC_MERGEL,
+ ALTIVEC_BUILTIN_VEC_MIN,
+ ALTIVEC_BUILTIN_VEC_MLADD,
+ ALTIVEC_BUILTIN_VEC_MPERM,
+ ALTIVEC_BUILTIN_VEC_MRADDS,
+ ALTIVEC_BUILTIN_VEC_MRGHB,
+ ALTIVEC_BUILTIN_VEC_MRGHH,
+ ALTIVEC_BUILTIN_VEC_MRGHW,
+ ALTIVEC_BUILTIN_VEC_MRGLB,
+ ALTIVEC_BUILTIN_VEC_MRGLH,
+ ALTIVEC_BUILTIN_VEC_MRGLW,
+ ALTIVEC_BUILTIN_VEC_MSUM,
+ ALTIVEC_BUILTIN_VEC_MSUMS,
+ ALTIVEC_BUILTIN_VEC_MTVSCR,
+ ALTIVEC_BUILTIN_VEC_MULE,
+ ALTIVEC_BUILTIN_VEC_MULO,
+ ALTIVEC_BUILTIN_VEC_NMSUB,
+ ALTIVEC_BUILTIN_VEC_NOR,
+ ALTIVEC_BUILTIN_VEC_OR,
+ ALTIVEC_BUILTIN_VEC_PACK,
+ ALTIVEC_BUILTIN_VEC_PACKPX,
+ ALTIVEC_BUILTIN_VEC_PACKS,
+ ALTIVEC_BUILTIN_VEC_PACKSU,
+ ALTIVEC_BUILTIN_VEC_PERM,
+ ALTIVEC_BUILTIN_VEC_RE,
+ ALTIVEC_BUILTIN_VEC_RL,
+ ALTIVEC_BUILTIN_VEC_ROUND,
+ ALTIVEC_BUILTIN_VEC_RSQRTE,
+ ALTIVEC_BUILTIN_VEC_SEL,
+ ALTIVEC_BUILTIN_VEC_SL,
+ ALTIVEC_BUILTIN_VEC_SLD,
+ ALTIVEC_BUILTIN_VEC_SLL,
+ ALTIVEC_BUILTIN_VEC_SLO,
+ ALTIVEC_BUILTIN_VEC_SPLAT,
+ ALTIVEC_BUILTIN_VEC_SPLAT_S16,
+ ALTIVEC_BUILTIN_VEC_SPLAT_S32,
+ ALTIVEC_BUILTIN_VEC_SPLAT_S8,
+ ALTIVEC_BUILTIN_VEC_SPLAT_U16,
+ ALTIVEC_BUILTIN_VEC_SPLAT_U32,
+ ALTIVEC_BUILTIN_VEC_SPLAT_U8,
+ ALTIVEC_BUILTIN_VEC_SPLTB,
+ ALTIVEC_BUILTIN_VEC_SPLTH,
+ ALTIVEC_BUILTIN_VEC_SPLTW,
+ ALTIVEC_BUILTIN_VEC_SR,
+ ALTIVEC_BUILTIN_VEC_SRA,
+ ALTIVEC_BUILTIN_VEC_SRL,
+ ALTIVEC_BUILTIN_VEC_SRO,
+ ALTIVEC_BUILTIN_VEC_ST,
+ ALTIVEC_BUILTIN_VEC_STE,
+ ALTIVEC_BUILTIN_VEC_STL,
+ ALTIVEC_BUILTIN_VEC_STVEBX,
+ ALTIVEC_BUILTIN_VEC_STVEHX,
+ ALTIVEC_BUILTIN_VEC_STVEWX,
+ ALTIVEC_BUILTIN_VEC_STVLX,
+ ALTIVEC_BUILTIN_VEC_STVLXL,
+ ALTIVEC_BUILTIN_VEC_STVRX,
+ ALTIVEC_BUILTIN_VEC_STVRXL,
+ ALTIVEC_BUILTIN_VEC_SUB,
+ ALTIVEC_BUILTIN_VEC_SUBC,
+ ALTIVEC_BUILTIN_VEC_SUBS,
+ ALTIVEC_BUILTIN_VEC_SUM2S,
+ ALTIVEC_BUILTIN_VEC_SUM4S,
+ ALTIVEC_BUILTIN_VEC_SUMS,
+ ALTIVEC_BUILTIN_VEC_TRUNC,
+ ALTIVEC_BUILTIN_VEC_UNPACKH,
+ ALTIVEC_BUILTIN_VEC_UNPACKL,
+ ALTIVEC_BUILTIN_VEC_VADDFP,
+ ALTIVEC_BUILTIN_VEC_VADDSBS,
+ ALTIVEC_BUILTIN_VEC_VADDSHS,
+ ALTIVEC_BUILTIN_VEC_VADDSWS,
+ ALTIVEC_BUILTIN_VEC_VADDUBM,
+ ALTIVEC_BUILTIN_VEC_VADDUBS,
+ ALTIVEC_BUILTIN_VEC_VADDUHM,
+ ALTIVEC_BUILTIN_VEC_VADDUHS,
+ ALTIVEC_BUILTIN_VEC_VADDUWM,
+ ALTIVEC_BUILTIN_VEC_VADDUWS,
+ ALTIVEC_BUILTIN_VEC_VAVGSB,
+ ALTIVEC_BUILTIN_VEC_VAVGSH,
+ ALTIVEC_BUILTIN_VEC_VAVGSW,
+ ALTIVEC_BUILTIN_VEC_VAVGUB,
+ ALTIVEC_BUILTIN_VEC_VAVGUH,
+ ALTIVEC_BUILTIN_VEC_VAVGUW,
+ ALTIVEC_BUILTIN_VEC_VCFSX,
+ ALTIVEC_BUILTIN_VEC_VCFUX,
+ ALTIVEC_BUILTIN_VEC_VCMPEQFP,
+ ALTIVEC_BUILTIN_VEC_VCMPEQUB,
+ ALTIVEC_BUILTIN_VEC_VCMPEQUH,
+ ALTIVEC_BUILTIN_VEC_VCMPEQUW,
+ ALTIVEC_BUILTIN_VEC_VCMPGTFP,
+ ALTIVEC_BUILTIN_VEC_VCMPGTSB,
+ ALTIVEC_BUILTIN_VEC_VCMPGTSH,
+ ALTIVEC_BUILTIN_VEC_VCMPGTSW,
+ ALTIVEC_BUILTIN_VEC_VCMPGTUB,
+ ALTIVEC_BUILTIN_VEC_VCMPGTUH,
+ ALTIVEC_BUILTIN_VEC_VCMPGTUW,
+ ALTIVEC_BUILTIN_VEC_VMAXFP,
+ ALTIVEC_BUILTIN_VEC_VMAXSB,
+ ALTIVEC_BUILTIN_VEC_VMAXSH,
+ ALTIVEC_BUILTIN_VEC_VMAXSW,
+ ALTIVEC_BUILTIN_VEC_VMAXUB,
+ ALTIVEC_BUILTIN_VEC_VMAXUH,
+ ALTIVEC_BUILTIN_VEC_VMAXUW,
+ ALTIVEC_BUILTIN_VEC_VMINFP,
+ ALTIVEC_BUILTIN_VEC_VMINSB,
+ ALTIVEC_BUILTIN_VEC_VMINSH,
+ ALTIVEC_BUILTIN_VEC_VMINSW,
+ ALTIVEC_BUILTIN_VEC_VMINUB,
+ ALTIVEC_BUILTIN_VEC_VMINUH,
+ ALTIVEC_BUILTIN_VEC_VMINUW,
+ ALTIVEC_BUILTIN_VEC_VMRGHB,
+ ALTIVEC_BUILTIN_VEC_VMRGHH,
+ ALTIVEC_BUILTIN_VEC_VMRGHW,
+ ALTIVEC_BUILTIN_VEC_VMRGLB,
+ ALTIVEC_BUILTIN_VEC_VMRGLH,
+ ALTIVEC_BUILTIN_VEC_VMRGLW,
+ ALTIVEC_BUILTIN_VEC_VMSUMMBM,
+ ALTIVEC_BUILTIN_VEC_VMSUMSHM,
+ ALTIVEC_BUILTIN_VEC_VMSUMSHS,
+ ALTIVEC_BUILTIN_VEC_VMSUMUBM,
+ ALTIVEC_BUILTIN_VEC_VMSUMUHM,
+ ALTIVEC_BUILTIN_VEC_VMSUMUHS,
+ ALTIVEC_BUILTIN_VEC_VMULESB,
+ ALTIVEC_BUILTIN_VEC_VMULESH,
+ ALTIVEC_BUILTIN_VEC_VMULEUB,
+ ALTIVEC_BUILTIN_VEC_VMULEUH,
+ ALTIVEC_BUILTIN_VEC_VMULOSB,
+ ALTIVEC_BUILTIN_VEC_VMULOSH,
+ ALTIVEC_BUILTIN_VEC_VMULOUB,
+ ALTIVEC_BUILTIN_VEC_VMULOUH,
+ ALTIVEC_BUILTIN_VEC_VPKSHSS,
+ ALTIVEC_BUILTIN_VEC_VPKSHUS,
+ ALTIVEC_BUILTIN_VEC_VPKSWSS,
+ ALTIVEC_BUILTIN_VEC_VPKSWUS,
+ ALTIVEC_BUILTIN_VEC_VPKUHUM,
+ ALTIVEC_BUILTIN_VEC_VPKUHUS,
+ ALTIVEC_BUILTIN_VEC_VPKUWUM,
+ ALTIVEC_BUILTIN_VEC_VPKUWUS,
+ ALTIVEC_BUILTIN_VEC_VRLB,
+ ALTIVEC_BUILTIN_VEC_VRLH,
+ ALTIVEC_BUILTIN_VEC_VRLW,
+ ALTIVEC_BUILTIN_VEC_VSLB,
+ ALTIVEC_BUILTIN_VEC_VSLH,
+ ALTIVEC_BUILTIN_VEC_VSLW,
+ ALTIVEC_BUILTIN_VEC_VSPLTB,
+ ALTIVEC_BUILTIN_VEC_VSPLTH,
+ ALTIVEC_BUILTIN_VEC_VSPLTW,
+ ALTIVEC_BUILTIN_VEC_VSRAB,
+ ALTIVEC_BUILTIN_VEC_VSRAH,
+ ALTIVEC_BUILTIN_VEC_VSRAW,
+ ALTIVEC_BUILTIN_VEC_VSRB,
+ ALTIVEC_BUILTIN_VEC_VSRH,
+ ALTIVEC_BUILTIN_VEC_VSRW,
+ ALTIVEC_BUILTIN_VEC_VSUBFP,
+ ALTIVEC_BUILTIN_VEC_VSUBSBS,
+ ALTIVEC_BUILTIN_VEC_VSUBSHS,
+ ALTIVEC_BUILTIN_VEC_VSUBSWS,
+ ALTIVEC_BUILTIN_VEC_VSUBUBM,
+ ALTIVEC_BUILTIN_VEC_VSUBUBS,
+ ALTIVEC_BUILTIN_VEC_VSUBUHM,
+ ALTIVEC_BUILTIN_VEC_VSUBUHS,
+ ALTIVEC_BUILTIN_VEC_VSUBUWM,
+ ALTIVEC_BUILTIN_VEC_VSUBUWS,
+ ALTIVEC_BUILTIN_VEC_VSUM4SBS,
+ ALTIVEC_BUILTIN_VEC_VSUM4SHS,
+ ALTIVEC_BUILTIN_VEC_VSUM4UBS,
+ ALTIVEC_BUILTIN_VEC_VUPKHPX,
+ ALTIVEC_BUILTIN_VEC_VUPKHSB,
+ ALTIVEC_BUILTIN_VEC_VUPKHSH,
+ ALTIVEC_BUILTIN_VEC_VUPKLPX,
+ ALTIVEC_BUILTIN_VEC_VUPKLSB,
+ ALTIVEC_BUILTIN_VEC_VUPKLSH,
+ ALTIVEC_BUILTIN_VEC_XOR,
+ ALTIVEC_BUILTIN_VEC_STEP,
+ ALTIVEC_BUILTIN_VEC_PROMOTE,
+ ALTIVEC_BUILTIN_VEC_INSERT,
+ ALTIVEC_BUILTIN_VEC_SPLATS,
+ ALTIVEC_BUILTIN_OVERLOADED_LAST = ALTIVEC_BUILTIN_VEC_SPLATS,
+
+ /* SPE builtins. */
+ SPE_BUILTIN_EVADDW,
+ SPE_BUILTIN_EVAND,
+ SPE_BUILTIN_EVANDC,
+ SPE_BUILTIN_EVDIVWS,
+ SPE_BUILTIN_EVDIVWU,
+ SPE_BUILTIN_EVEQV,
+ SPE_BUILTIN_EVFSADD,
+ SPE_BUILTIN_EVFSDIV,
+ SPE_BUILTIN_EVFSMUL,
+ SPE_BUILTIN_EVFSSUB,
+ SPE_BUILTIN_EVLDDX,
+ SPE_BUILTIN_EVLDHX,
+ SPE_BUILTIN_EVLDWX,
+ SPE_BUILTIN_EVLHHESPLATX,
+ SPE_BUILTIN_EVLHHOSSPLATX,
+ SPE_BUILTIN_EVLHHOUSPLATX,
+ SPE_BUILTIN_EVLWHEX,
+ SPE_BUILTIN_EVLWHOSX,
+ SPE_BUILTIN_EVLWHOUX,
+ SPE_BUILTIN_EVLWHSPLATX,
+ SPE_BUILTIN_EVLWWSPLATX,
+ SPE_BUILTIN_EVMERGEHI,
+ SPE_BUILTIN_EVMERGEHILO,
+ SPE_BUILTIN_EVMERGELO,
+ SPE_BUILTIN_EVMERGELOHI,
+ SPE_BUILTIN_EVMHEGSMFAA,
+ SPE_BUILTIN_EVMHEGSMFAN,
+ SPE_BUILTIN_EVMHEGSMIAA,
+ SPE_BUILTIN_EVMHEGSMIAN,
+ SPE_BUILTIN_EVMHEGUMIAA,
+ SPE_BUILTIN_EVMHEGUMIAN,
+ SPE_BUILTIN_EVMHESMF,
+ SPE_BUILTIN_EVMHESMFA,
+ SPE_BUILTIN_EVMHESMFAAW,
+ SPE_BUILTIN_EVMHESMFANW,
+ SPE_BUILTIN_EVMHESMI,
+ SPE_BUILTIN_EVMHESMIA,
+ SPE_BUILTIN_EVMHESMIAAW,
+ SPE_BUILTIN_EVMHESMIANW,
+ SPE_BUILTIN_EVMHESSF,
+ SPE_BUILTIN_EVMHESSFA,
+ SPE_BUILTIN_EVMHESSFAAW,
+ SPE_BUILTIN_EVMHESSFANW,
+ SPE_BUILTIN_EVMHESSIAAW,
+ SPE_BUILTIN_EVMHESSIANW,
+ SPE_BUILTIN_EVMHEUMI,
+ SPE_BUILTIN_EVMHEUMIA,
+ SPE_BUILTIN_EVMHEUMIAAW,
+ SPE_BUILTIN_EVMHEUMIANW,
+ SPE_BUILTIN_EVMHEUSIAAW,
+ SPE_BUILTIN_EVMHEUSIANW,
+ SPE_BUILTIN_EVMHOGSMFAA,
+ SPE_BUILTIN_EVMHOGSMFAN,
+ SPE_BUILTIN_EVMHOGSMIAA,
+ SPE_BUILTIN_EVMHOGSMIAN,
+ SPE_BUILTIN_EVMHOGUMIAA,
+ SPE_BUILTIN_EVMHOGUMIAN,
+ SPE_BUILTIN_EVMHOSMF,
+ SPE_BUILTIN_EVMHOSMFA,
+ SPE_BUILTIN_EVMHOSMFAAW,
+ SPE_BUILTIN_EVMHOSMFANW,
+ SPE_BUILTIN_EVMHOSMI,
+ SPE_BUILTIN_EVMHOSMIA,
+ SPE_BUILTIN_EVMHOSMIAAW,
+ SPE_BUILTIN_EVMHOSMIANW,
+ SPE_BUILTIN_EVMHOSSF,
+ SPE_BUILTIN_EVMHOSSFA,
+ SPE_BUILTIN_EVMHOSSFAAW,
+ SPE_BUILTIN_EVMHOSSFANW,
+ SPE_BUILTIN_EVMHOSSIAAW,
+ SPE_BUILTIN_EVMHOSSIANW,
+ SPE_BUILTIN_EVMHOUMI,
+ SPE_BUILTIN_EVMHOUMIA,
+ SPE_BUILTIN_EVMHOUMIAAW,
+ SPE_BUILTIN_EVMHOUMIANW,
+ SPE_BUILTIN_EVMHOUSIAAW,
+ SPE_BUILTIN_EVMHOUSIANW,
+ SPE_BUILTIN_EVMWHSMF,
+ SPE_BUILTIN_EVMWHSMFA,
+ SPE_BUILTIN_EVMWHSMI,
+ SPE_BUILTIN_EVMWHSMIA,
+ SPE_BUILTIN_EVMWHSSF,
+ SPE_BUILTIN_EVMWHSSFA,
+ SPE_BUILTIN_EVMWHUMI,
+ SPE_BUILTIN_EVMWHUMIA,
+ SPE_BUILTIN_EVMWLSMIAAW,
+ SPE_BUILTIN_EVMWLSMIANW,
+ SPE_BUILTIN_EVMWLSSIAAW,
+ SPE_BUILTIN_EVMWLSSIANW,
+ SPE_BUILTIN_EVMWLUMI,
+ SPE_BUILTIN_EVMWLUMIA,
+ SPE_BUILTIN_EVMWLUMIAAW,
+ SPE_BUILTIN_EVMWLUMIANW,
+ SPE_BUILTIN_EVMWLUSIAAW,
+ SPE_BUILTIN_EVMWLUSIANW,
+ SPE_BUILTIN_EVMWSMF,
+ SPE_BUILTIN_EVMWSMFA,
+ SPE_BUILTIN_EVMWSMFAA,
+ SPE_BUILTIN_EVMWSMFAN,
+ SPE_BUILTIN_EVMWSMI,
+ SPE_BUILTIN_EVMWSMIA,
+ SPE_BUILTIN_EVMWSMIAA,
+ SPE_BUILTIN_EVMWSMIAN,
+ SPE_BUILTIN_EVMWHSSFAA,
+ SPE_BUILTIN_EVMWSSF,
+ SPE_BUILTIN_EVMWSSFA,
+ SPE_BUILTIN_EVMWSSFAA,
+ SPE_BUILTIN_EVMWSSFAN,
+ SPE_BUILTIN_EVMWUMI,
+ SPE_BUILTIN_EVMWUMIA,
+ SPE_BUILTIN_EVMWUMIAA,
+ SPE_BUILTIN_EVMWUMIAN,
+ SPE_BUILTIN_EVNAND,
+ SPE_BUILTIN_EVNOR,
+ SPE_BUILTIN_EVOR,
+ SPE_BUILTIN_EVORC,
+ SPE_BUILTIN_EVRLW,
+ SPE_BUILTIN_EVSLW,
+ SPE_BUILTIN_EVSRWS,
+ SPE_BUILTIN_EVSRWU,
+ SPE_BUILTIN_EVSTDDX,
+ SPE_BUILTIN_EVSTDHX,
+ SPE_BUILTIN_EVSTDWX,
+ SPE_BUILTIN_EVSTWHEX,
+ SPE_BUILTIN_EVSTWHOX,
+ SPE_BUILTIN_EVSTWWEX,
+ SPE_BUILTIN_EVSTWWOX,
+ SPE_BUILTIN_EVSUBFW,
+ SPE_BUILTIN_EVXOR,
+ SPE_BUILTIN_EVABS,
+ SPE_BUILTIN_EVADDSMIAAW,
+ SPE_BUILTIN_EVADDSSIAAW,
+ SPE_BUILTIN_EVADDUMIAAW,
+ SPE_BUILTIN_EVADDUSIAAW,
+ SPE_BUILTIN_EVCNTLSW,
+ SPE_BUILTIN_EVCNTLZW,
+ SPE_BUILTIN_EVEXTSB,
+ SPE_BUILTIN_EVEXTSH,
+ SPE_BUILTIN_EVFSABS,
+ SPE_BUILTIN_EVFSCFSF,
+ SPE_BUILTIN_EVFSCFSI,
+ SPE_BUILTIN_EVFSCFUF,
+ SPE_BUILTIN_EVFSCFUI,
+ SPE_BUILTIN_EVFSCTSF,
+ SPE_BUILTIN_EVFSCTSI,
+ SPE_BUILTIN_EVFSCTSIZ,
+ SPE_BUILTIN_EVFSCTUF,
+ SPE_BUILTIN_EVFSCTUI,
+ SPE_BUILTIN_EVFSCTUIZ,
+ SPE_BUILTIN_EVFSNABS,
+ SPE_BUILTIN_EVFSNEG,
+ SPE_BUILTIN_EVMRA,
+ SPE_BUILTIN_EVNEG,
+ SPE_BUILTIN_EVRNDW,
+ SPE_BUILTIN_EVSUBFSMIAAW,
+ SPE_BUILTIN_EVSUBFSSIAAW,
+ SPE_BUILTIN_EVSUBFUMIAAW,
+ SPE_BUILTIN_EVSUBFUSIAAW,
+ SPE_BUILTIN_EVADDIW,
+ SPE_BUILTIN_EVLDD,
+ SPE_BUILTIN_EVLDH,
+ SPE_BUILTIN_EVLDW,
+ SPE_BUILTIN_EVLHHESPLAT,
+ SPE_BUILTIN_EVLHHOSSPLAT,
+ SPE_BUILTIN_EVLHHOUSPLAT,
+ SPE_BUILTIN_EVLWHE,
+ SPE_BUILTIN_EVLWHOS,
+ SPE_BUILTIN_EVLWHOU,
+ SPE_BUILTIN_EVLWHSPLAT,
+ SPE_BUILTIN_EVLWWSPLAT,
+ SPE_BUILTIN_EVRLWI,
+ SPE_BUILTIN_EVSLWI,
+ SPE_BUILTIN_EVSRWIS,
+ SPE_BUILTIN_EVSRWIU,
+ SPE_BUILTIN_EVSTDD,
+ SPE_BUILTIN_EVSTDH,
+ SPE_BUILTIN_EVSTDW,
+ SPE_BUILTIN_EVSTWHE,
+ SPE_BUILTIN_EVSTWHO,
+ SPE_BUILTIN_EVSTWWE,
+ SPE_BUILTIN_EVSTWWO,
+ SPE_BUILTIN_EVSUBIFW,
+
+ /* Compares. */
+ SPE_BUILTIN_EVCMPEQ,
+ SPE_BUILTIN_EVCMPGTS,
+ SPE_BUILTIN_EVCMPGTU,
+ SPE_BUILTIN_EVCMPLTS,
+ SPE_BUILTIN_EVCMPLTU,
+ SPE_BUILTIN_EVFSCMPEQ,
+ SPE_BUILTIN_EVFSCMPGT,
+ SPE_BUILTIN_EVFSCMPLT,
+ SPE_BUILTIN_EVFSTSTEQ,
+ SPE_BUILTIN_EVFSTSTGT,
+ SPE_BUILTIN_EVFSTSTLT,
+
+ /* EVSEL compares. */
+ SPE_BUILTIN_EVSEL_CMPEQ,
+ SPE_BUILTIN_EVSEL_CMPGTS,
+ SPE_BUILTIN_EVSEL_CMPGTU,
+ SPE_BUILTIN_EVSEL_CMPLTS,
+ SPE_BUILTIN_EVSEL_CMPLTU,
+ SPE_BUILTIN_EVSEL_FSCMPEQ,
+ SPE_BUILTIN_EVSEL_FSCMPGT,
+ SPE_BUILTIN_EVSEL_FSCMPLT,
+ SPE_BUILTIN_EVSEL_FSTSTEQ,
+ SPE_BUILTIN_EVSEL_FSTSTGT,
+ SPE_BUILTIN_EVSEL_FSTSTLT,
+
+ SPE_BUILTIN_EVSPLATFI,
+ SPE_BUILTIN_EVSPLATI,
+ SPE_BUILTIN_EVMWHSSMAA,
+ SPE_BUILTIN_EVMWHSMFAA,
+ SPE_BUILTIN_EVMWHSMIAA,
+ SPE_BUILTIN_EVMWHUSIAA,
+ SPE_BUILTIN_EVMWHUMIAA,
+ SPE_BUILTIN_EVMWHSSFAN,
+ SPE_BUILTIN_EVMWHSSIAN,
+ SPE_BUILTIN_EVMWHSMFAN,
+ SPE_BUILTIN_EVMWHSMIAN,
+ SPE_BUILTIN_EVMWHUSIAN,
+ SPE_BUILTIN_EVMWHUMIAN,
+ SPE_BUILTIN_EVMWHGSSFAA,
+ SPE_BUILTIN_EVMWHGSMFAA,
+ SPE_BUILTIN_EVMWHGSMIAA,
+ SPE_BUILTIN_EVMWHGUMIAA,
+ SPE_BUILTIN_EVMWHGSSFAN,
+ SPE_BUILTIN_EVMWHGSMFAN,
+ SPE_BUILTIN_EVMWHGSMIAN,
+ SPE_BUILTIN_EVMWHGUMIAN,
+ SPE_BUILTIN_MTSPEFSCR,
+ SPE_BUILTIN_MFSPEFSCR,
+ SPE_BUILTIN_BRINC,
+
+ /* PAIRED builtins. */
+ PAIRED_BUILTIN_DIVV2SF3,
+ PAIRED_BUILTIN_ABSV2SF2,
+ PAIRED_BUILTIN_NEGV2SF2,
+ PAIRED_BUILTIN_SQRTV2SF2,
+ PAIRED_BUILTIN_ADDV2SF3,
+ PAIRED_BUILTIN_SUBV2SF3,
+ PAIRED_BUILTIN_RESV2SF2,
+ PAIRED_BUILTIN_MULV2SF3,
+ PAIRED_BUILTIN_MSUB,
+ PAIRED_BUILTIN_MADD,
+ PAIRED_BUILTIN_NMSUB,
+ PAIRED_BUILTIN_NMADD,
+ PAIRED_BUILTIN_NABSV2SF2,
+ PAIRED_BUILTIN_SUM0,
+ PAIRED_BUILTIN_SUM1,
+ PAIRED_BUILTIN_MULS0,
+ PAIRED_BUILTIN_MULS1,
+ PAIRED_BUILTIN_MERGE00,
+ PAIRED_BUILTIN_MERGE01,
+ PAIRED_BUILTIN_MERGE10,
+ PAIRED_BUILTIN_MERGE11,
+ PAIRED_BUILTIN_MADDS0,
+ PAIRED_BUILTIN_MADDS1,
+ PAIRED_BUILTIN_STX,
+ PAIRED_BUILTIN_LX,
+ PAIRED_BUILTIN_SELV2SF4,
+ PAIRED_BUILTIN_CMPU0,
+ PAIRED_BUILTIN_CMPU1,
+
+ RS6000_BUILTIN_RECIP,
+ RS6000_BUILTIN_RECIPF,
+ RS6000_BUILTIN_RSQRTF,
+
+ RS6000_BUILTIN_COUNT
+};
+
+enum rs6000_builtin_type_index
+{
+ RS6000_BTI_NOT_OPAQUE,
+ RS6000_BTI_opaque_V2SI,
+ RS6000_BTI_opaque_V2SF,
+ RS6000_BTI_opaque_p_V2SI,
+ RS6000_BTI_opaque_V4SI,
+ RS6000_BTI_V16QI,
+ RS6000_BTI_V2SI,
+ RS6000_BTI_V2SF,
+ RS6000_BTI_V4HI,
+ RS6000_BTI_V4SI,
+ RS6000_BTI_V4SF,
+ RS6000_BTI_V8HI,
+ RS6000_BTI_unsigned_V16QI,
+ RS6000_BTI_unsigned_V8HI,
+ RS6000_BTI_unsigned_V4SI,
+ RS6000_BTI_bool_char, /* __bool char */
+ RS6000_BTI_bool_short, /* __bool short */
+ RS6000_BTI_bool_int, /* __bool int */
+ RS6000_BTI_pixel, /* __pixel */
+ RS6000_BTI_bool_V16QI, /* __vector __bool char */
+ RS6000_BTI_bool_V8HI, /* __vector __bool short */
+ RS6000_BTI_bool_V4SI, /* __vector __bool int */
+ RS6000_BTI_pixel_V8HI, /* __vector __pixel */
+ RS6000_BTI_long, /* long_integer_type_node */
+ RS6000_BTI_unsigned_long, /* long_unsigned_type_node */
+ RS6000_BTI_INTQI, /* intQI_type_node */
+ RS6000_BTI_UINTQI, /* unsigned_intQI_type_node */
+ RS6000_BTI_INTHI, /* intHI_type_node */
+ RS6000_BTI_UINTHI, /* unsigned_intHI_type_node */
+ RS6000_BTI_INTSI, /* intSI_type_node */
+ RS6000_BTI_UINTSI, /* unsigned_intSI_type_node */
+ RS6000_BTI_float, /* float_type_node */
+ RS6000_BTI_void, /* void_type_node */
+ RS6000_BTI_MAX
+};
+
+
+#define opaque_V2SI_type_node (rs6000_builtin_types[RS6000_BTI_opaque_V2SI])
+#define opaque_V2SF_type_node (rs6000_builtin_types[RS6000_BTI_opaque_V2SF])
+#define opaque_p_V2SI_type_node (rs6000_builtin_types[RS6000_BTI_opaque_p_V2SI])
+#define opaque_V4SI_type_node (rs6000_builtin_types[RS6000_BTI_opaque_V4SI])
+#define V16QI_type_node (rs6000_builtin_types[RS6000_BTI_V16QI])
+#define V2SI_type_node (rs6000_builtin_types[RS6000_BTI_V2SI])
+#define V2SF_type_node (rs6000_builtin_types[RS6000_BTI_V2SF])
+#define V4HI_type_node (rs6000_builtin_types[RS6000_BTI_V4HI])
+#define V4SI_type_node (rs6000_builtin_types[RS6000_BTI_V4SI])
+#define V4SF_type_node (rs6000_builtin_types[RS6000_BTI_V4SF])
+#define V8HI_type_node (rs6000_builtin_types[RS6000_BTI_V8HI])
+#define unsigned_V16QI_type_node (rs6000_builtin_types[RS6000_BTI_unsigned_V16QI])
+#define unsigned_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_unsigned_V8HI])
+#define unsigned_V4SI_type_node (rs6000_builtin_types[RS6000_BTI_unsigned_V4SI])
+#define bool_char_type_node (rs6000_builtin_types[RS6000_BTI_bool_char])
+#define bool_short_type_node (rs6000_builtin_types[RS6000_BTI_bool_short])
+#define bool_int_type_node (rs6000_builtin_types[RS6000_BTI_bool_int])
+#define pixel_type_node (rs6000_builtin_types[RS6000_BTI_pixel])
+#define bool_V16QI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V16QI])
+#define bool_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V8HI])
+#define bool_V4SI_type_node (rs6000_builtin_types[RS6000_BTI_bool_V4SI])
+#define pixel_V8HI_type_node (rs6000_builtin_types[RS6000_BTI_pixel_V8HI])
+
+#define long_integer_type_internal_node (rs6000_builtin_types[RS6000_BTI_long])
+#define long_unsigned_type_internal_node (rs6000_builtin_types[RS6000_BTI_unsigned_long])
+#define intQI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTQI])
+#define uintQI_type_internal_node (rs6000_builtin_types[RS6000_BTI_UINTQI])
+#define intHI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTHI])
+#define uintHI_type_internal_node (rs6000_builtin_types[RS6000_BTI_UINTHI])
+#define intSI_type_internal_node (rs6000_builtin_types[RS6000_BTI_INTSI])
+#define uintSI_type_internal_node (rs6000_builtin_types[RS6000_BTI_UINTSI])
+#define float_type_internal_node (rs6000_builtin_types[RS6000_BTI_float])
+#define void_type_internal_node (rs6000_builtin_types[RS6000_BTI_void])
+
+extern GTY(()) tree rs6000_builtin_types[RS6000_BTI_MAX];
+extern GTY(()) tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/rs6000.md b/gcc-4.4.3/gcc/config/rs6000/rs6000.md
new file mode 100644
index 000000000..43931b247
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rs6000.md
@@ -0,0 +1,14897 @@
+;; Machine description for IBM RISC System 6000 (POWER) for GNU C compiler
+;; Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+;; 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
+;; Free Software Foundation, Inc.
+;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;;
+;; REGNOS
+;;
+
+(define_constants
+ [(MQ_REGNO 64)
+ (LR_REGNO 65)
+ (CTR_REGNO 66)
+ (CR0_REGNO 68)
+ (CR1_REGNO 69)
+ (CR2_REGNO 70)
+ (CR3_REGNO 71)
+ (CR4_REGNO 72)
+ (CR5_REGNO 73)
+ (CR6_REGNO 74)
+ (CR7_REGNO 75)
+ (MAX_CR_REGNO 75)
+ (XER_REGNO 76)
+ (FIRST_ALTIVEC_REGNO 77)
+ (LAST_ALTIVEC_REGNO 108)
+ (VRSAVE_REGNO 109)
+ (VSCR_REGNO 110)
+ (SPE_ACC_REGNO 111)
+ (SPEFSCR_REGNO 112)
+ (SFP_REGNO 113)
+ ])
+
+;;
+;; UNSPEC usage
+;;
+
+(define_constants
+ [(UNSPEC_FRSP 0) ; frsp for POWER machines
+ (UNSPEC_TIE 5) ; tie stack contents and stack pointer
+ (UNSPEC_TOCPTR 6) ; address of a word pointing to the TOC
+ (UNSPEC_TOC 7) ; address of the TOC (more-or-less)
+ (UNSPEC_MOVSI_GOT 8)
+ (UNSPEC_MV_CR_OV 9) ; move_from_CR_ov_bit
+ (UNSPEC_FCTIWZ 10)
+ (UNSPEC_FRIM 11)
+ (UNSPEC_FRIN 12)
+ (UNSPEC_FRIP 13)
+ (UNSPEC_FRIZ 14)
+ (UNSPEC_LD_MPIC 15) ; load_macho_picbase
+ (UNSPEC_MPIC_CORRECT 16) ; macho_correct_pic
+ (UNSPEC_TLSGD 17)
+ (UNSPEC_TLSLD 18)
+ (UNSPEC_MOVESI_FROM_CR 19)
+ (UNSPEC_MOVESI_TO_CR 20)
+ (UNSPEC_TLSDTPREL 21)
+ (UNSPEC_TLSDTPRELHA 22)
+ (UNSPEC_TLSDTPRELLO 23)
+ (UNSPEC_TLSGOTDTPREL 24)
+ (UNSPEC_TLSTPREL 25)
+ (UNSPEC_TLSTPRELHA 26)
+ (UNSPEC_TLSTPRELLO 27)
+ (UNSPEC_TLSGOTTPREL 28)
+ (UNSPEC_TLSTLS 29)
+ (UNSPEC_FIX_TRUNC_TF 30) ; fadd, rounding towards zero
+ (UNSPEC_MV_CR_GT 31) ; move_from_CR_gt_bit
+ (UNSPEC_STFIWX 32)
+ (UNSPEC_POPCNTB 33)
+ (UNSPEC_FRES 34)
+ (UNSPEC_SP_SET 35)
+ (UNSPEC_SP_TEST 36)
+ (UNSPEC_SYNC 37)
+ (UNSPEC_LWSYNC 38)
+ (UNSPEC_ISYNC 39)
+ (UNSPEC_SYNC_OP 40)
+ (UNSPEC_ATOMIC 41)
+ (UNSPEC_CMPXCHG 42)
+ (UNSPEC_XCHG 43)
+ (UNSPEC_AND 44)
+ (UNSPEC_DLMZB 45)
+ (UNSPEC_DLMZB_CR 46)
+ (UNSPEC_DLMZB_STRLEN 47)
+ (UNSPEC_RSQRT 48)
+ (UNSPEC_TOCREL 49)
+ (UNSPEC_MACHOPIC_OFFSET 50)
+ ])
+
+;;
+;; UNSPEC_VOLATILE usage
+;;
+
+(define_constants
+ [(UNSPECV_BLOCK 0)
+ (UNSPECV_LL 1) ; load-locked
+ (UNSPECV_SC 2) ; store-conditional
+ (UNSPECV_EH_RR 9) ; eh_reg_restore
+ ])
+
+;; Define an insn type attribute. This is used in function unit delay
+;; computations.
+(define_attr "type" "integer,two,three,load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u,store,store_ux,store_u,fpload,fpload_ux,fpload_u,fpstore,fpstore_ux,fpstore_u,vecload,vecstore,imul,imul2,imul3,lmul,idiv,ldiv,insert_word,branch,cmp,fast_compare,compare,var_delayed_compare,delayed_compare,imul_compare,lmul_compare,fpcompare,cr_logical,delayed_cr,mfcr,mfcrf,mtcr,mfjmpr,mtjmpr,fp,fpsimple,dmul,sdiv,ddiv,ssqrt,dsqrt,jmpreg,brinc,vecsimple,veccomplex,vecdiv,veccmp,veccmpsimple,vecperm,vecfloat,vecfdiv,isync,sync,load_l,store_c,shift,trap,insert_dword,var_shift_rotate,cntlz,exts,mffgpr,mftgpr"
+ (const_string "integer"))
+
+;; Define floating point instruction sub-types for use with Xfpu.md
+(define_attr "fp_type" "fp_default,fp_addsub_s,fp_addsub_d,fp_mul_s,fp_mul_d,fp_div_s,fp_div_d,fp_maddsub_s,fp_maddsub_d,fp_sqrt_s,fp_sqrt_d" (const_string "fp_default"))
+
+;; Length (in bytes).
+; '(pc)' in the following doesn't include the instruction itself; it is
+; calculated as if the instruction had zero size.
+(define_attr "length" ""
+ (if_then_else (eq_attr "type" "branch")
+ (if_then_else (and (ge (minus (match_dup 0) (pc))
+ (const_int -32768))
+ (lt (minus (match_dup 0) (pc))
+ (const_int 32764)))
+ (const_int 4)
+ (const_int 8))
+ (const_int 4)))
+
+;; Processor type -- this attribute must exactly match the processor_type
+;; enumeration in rs6000.h.
+
+(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc440,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450,ppc8540,ppce300c2,ppce300c3,ppce500mc,power4,power5,power6,cell"
+ (const (symbol_ref "rs6000_cpu_attr")))
+
+
+;; If this instruction is microcoded on the CELL processor
+; The default for load extended, the recorded instructions and rotate/shifts by a variable is always microcoded
+(define_attr "cell_micro" "not,conditional,always"
+ (if_then_else (eq_attr "type" "compare,delayed_compare,imul_compare,lmul_compare,load_ext,load_ext_ux,var_shift_rotate,var_delayed_compare")
+ (const_string "always")
+ (const_string "not")))
+
+(automata_option "ndfa")
+
+(include "rios1.md")
+(include "rios2.md")
+(include "rs64.md")
+(include "mpc.md")
+(include "40x.md")
+(include "440.md")
+(include "603.md")
+(include "6xx.md")
+(include "7xx.md")
+(include "7450.md")
+(include "8540.md")
+(include "e300c2c3.md")
+(include "e500mc.md")
+(include "power4.md")
+(include "power5.md")
+(include "power6.md")
+(include "cell.md")
+(include "xfpu.md")
+
+(include "predicates.md")
+(include "constraints.md")
+
+(include "darwin.md")
+
+
+;; Mode iterators
+
+; This mode iterator allows :GPR to be used to indicate the allowable size
+; of whole values in GPRs.
+(define_mode_iterator GPR [SI (DI "TARGET_POWERPC64")])
+
+; Any supported integer mode.
+(define_mode_iterator INT [QI HI SI DI TI])
+
+; Any supported integer mode that fits in one register.
+(define_mode_iterator INT1 [QI HI SI (DI "TARGET_POWERPC64")])
+
+; extend modes for DImode
+(define_mode_iterator QHSI [QI HI SI])
+
+; SImode or DImode, even if DImode doesn't fit in GPRs.
+(define_mode_iterator SDI [SI DI])
+
+; The size of a pointer. Also, the size of the value that a record-condition
+; (one with a '.') will compare.
+(define_mode_iterator P [(SI "TARGET_32BIT") (DI "TARGET_64BIT")])
+
+; Any hardware-supported floating-point mode
+(define_mode_iterator FP [
+ (SF "TARGET_HARD_FLOAT
+ && ((TARGET_FPRS && TARGET_SINGLE_FLOAT) || TARGET_E500_SINGLE)")
+ (DF "TARGET_HARD_FLOAT
+ && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)")
+ (TF "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT
+ && (TARGET_FPRS || TARGET_E500_DOUBLE)
+ && TARGET_LONG_DOUBLE_128")
+ (DD "TARGET_DFP")
+ (TD "TARGET_DFP")])
+
+; Various instructions that come in SI and DI forms.
+; A generic w/d attribute, for things like cmpw/cmpd.
+(define_mode_attr wd [(QI "b") (HI "h") (SI "w") (DI "d")])
+
+; DImode bits
+(define_mode_attr dbits [(QI "56") (HI "48") (SI "32")])
+
+
+;; Start with fixed-point load and store insns. Here we put only the more
+;; complex forms. Basic data transfer is done later.
+
+(define_expand "zero_extend<mode>di2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn "*zero_extend<mode>di2_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_operand:QHSI 1 "reg_or_mem_operand" "m,r")))]
+ "TARGET_POWERPC64"
+ "@
+ l<wd>z%U1%X1 %0,%1
+ rldicl %0,%1,0,<dbits>"
+ [(set_attr "type" "load,*")])
+
+(define_insn "*zero_extend<mode>di2_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ rldicl. %2,%1,0,<dbits>
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn "*zero_extend<mode>di2_internal3"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_64BIT"
+ "@
+ rldicl. %0,%1,0,<dbits>
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:QHSI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "extendqidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64"
+ "extsb %0,%1"
+ [(set_attr "type" "exts")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ extsb. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_64BIT"
+ "@
+ extsb. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendhidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ "TARGET_POWERPC64 && rs6000_gen_cell_microcode"
+ "@
+ lha%U1%X1 %0,%1
+ extsh %0,%1"
+ [(set_attr "type" "load_ext,exts")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64 && !rs6000_gen_cell_microcode"
+ "extsh %0,%1"
+ [(set_attr "type" "exts")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ extsh. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_64BIT"
+ "@
+ extsh. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendsidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_operand:SI 1 "lwa_operand" "m,r")))]
+ "TARGET_POWERPC64 && rs6000_gen_cell_microcode"
+ "@
+ lwa%U1%X1 %0,%1
+ extsw %0,%1"
+ [(set_attr "type" "load_ext,exts")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64 && !rs6000_gen_cell_microcode"
+ "extsw %0,%1"
+ [(set_attr "type" "exts")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ extsw. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_64BIT"
+ "@
+ extsw. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lbz%U1%X1 %0,%1
+ {rlinm|rlwinm} %0,%1,0,0xff"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "@
+ {andil.|andi.} %2,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_dup 1)))]
+ ""
+ "@
+ {andil.|andi.} %0,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqisi2"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:QI 1 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWERPC)
+ emit_insn (gen_extendqisi2_ppc (operands[0], operands[1]));
+ else if (TARGET_POWER)
+ emit_insn (gen_extendqisi2_power (operands[0], operands[1]));
+ else
+ emit_insn (gen_extendqisi2_no_power (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "extendqisi2_ppc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC"
+ "extsb %0,%1"
+ [(set_attr "type" "exts")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI (match_dup 1)))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_dup 1)))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqisi2_power"
+ [(parallel [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (clobber (scratch:SI))])
+ (parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))
+ (clobber (scratch:SI))])]
+ "TARGET_POWER"
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "extendqisi2_no_power"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "gpc_reg_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:HI (match_operand:QI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lbz%U1%X1 %0,%1
+ {rlinm|rlwinm} %0,%1,0,0xff"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 "=r,r"))]
+ ""
+ "@
+ {andil.|andi.} %2,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:HI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:HI (match_dup 1)))]
+ ""
+ "@
+ {andil.|andi.} %0,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "")
+ (zero_extend:HI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:HI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqihi2"
+ [(use (match_operand:HI 0 "gpc_reg_operand" ""))
+ (use (match_operand:QI 1 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWERPC)
+ emit_insn (gen_extendqihi2_ppc (operands[0], operands[1]));
+ else if (TARGET_POWER)
+ emit_insn (gen_extendqihi2_power (operands[0], operands[1]));
+ else
+ emit_insn (gen_extendqihi2_no_power (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "extendqihi2_ppc"
+ [(set (match_operand:HI 0 "gpc_reg_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC"
+ "extsb %0,%1"
+ [(set_attr "type" "exts")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 "=r,r"))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 ""))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:HI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:HI (match_dup 1)))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "")
+ (sign_extend:HI (match_dup 1)))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:HI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqihi2_power"
+ [(parallel [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (clobber (scratch:SI))])
+ (parallel [(set (match_operand:HI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))
+ (clobber (scratch:SI))])]
+ "TARGET_POWER"
+ "
+{ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "extendqihi2_no_power"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "
+{ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lhz%U1%X1 %0,%1
+ {rlinm|rlwinm} %0,%1,0,0xffff"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "@
+ {andil.|andi.} %2,%1,0xffff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_dup 1)))]
+ ""
+ "@
+ {andil.|andi.} %0,%1,0xffff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendhisi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ "rs6000_gen_cell_microcode"
+ "@
+ lha%U1%X1 %0,%1
+ {exts|extsh} %0,%1"
+ [(set_attr "type" "load_ext,exts")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r")))]
+ "!rs6000_gen_cell_microcode"
+ "{exts|extsh} %0,%1"
+ [(set_attr "type" "exts")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "@
+ {exts.|extsh.} %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI (match_dup 1)))]
+ ""
+ "@
+ {exts.|extsh.} %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+;; IBM 405, 440 and 464 half-word multiplication operations.
+
+(define_insn "*macchwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (sign_extend:SI
+ (match_dup 1)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "macchw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*macchw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "macchw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*macchwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (zero_extend:SI
+ (match_dup 1)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "macchwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*macchwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "macchwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "machhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "machhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "machhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*machhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "machhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (sign_extend:SI
+ (match_dup 1))
+ (sign_extend:SI
+ (match_dup 2)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "maclhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "maclhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (plus:SI (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 4 "gpc_reg_operand" "0"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (zero_extend:SI
+ (match_dup 1))
+ (zero_extend:SI
+ (match_dup 2)))
+ (match_dup 4)))]
+ "TARGET_MULHW"
+ "maclhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*maclhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (match_operand:SI 3 "gpc_reg_operand" "0")))]
+ "TARGET_MULHW"
+ "maclhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmacchwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r"))))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_dup 4)
+ (mult:SI (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (sign_extend:SI
+ (match_dup 1)))))]
+ "TARGET_MULHW"
+ "nmacchw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmacchw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))))]
+ "TARGET_MULHW"
+ "nmacchw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmachhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_dup 4)
+ (mult:SI (ashiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16)))))]
+ "TARGET_MULHW"
+ "nmachhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmachhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))))]
+ "TARGET_MULHW"
+ "nmachhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmaclhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (minus:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r"))))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_dup 4)
+ (mult:SI (sign_extend:SI
+ (match_dup 1))
+ (sign_extend:SI
+ (match_dup 2)))))]
+ "TARGET_MULHW"
+ "nmaclhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*nmaclhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 3 "gpc_reg_operand" "0")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))))]
+ "TARGET_MULHW"
+ "nmaclhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (sign_extend:SI
+ (match_dup 1))))]
+ "TARGET_MULHW"
+ "mulchw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mulchw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16))
+ (zero_extend:SI
+ (match_dup 1))))]
+ "TARGET_MULHW"
+ "mulchwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulchwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mulchwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (ashiftrt:SI
+ (match_dup 2)
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_dup 1)
+ (const_int 16))
+ (lshiftrt:SI
+ (match_dup 2)
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mulhhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "gpc_reg_operand" "%r")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 16))))]
+ "TARGET_MULHW"
+ "mulhhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhwc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (sign_extend:SI
+ (match_dup 1))
+ (sign_extend:SI
+ (match_dup 2))))]
+ "TARGET_MULHW"
+ "mullhw. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhw"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mullhw %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhwuc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (compare:CC (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (zero_extend:SI
+ (match_dup 1))
+ (zero_extend:SI
+ (match_dup 2))))]
+ "TARGET_MULHW"
+ "mullhwu. %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+(define_insn "*mullhwu"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:SI
+ (match_operand:HI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_MULHW"
+ "mullhwu %0, %1, %2"
+ [(set_attr "type" "imul3")])
+
+;; IBM 405, 440 and 464 string-search dlmzb instruction support.
+(define_insn "dlmzb"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x")
+ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")]
+ UNSPEC_DLMZB_CR))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_dup 1)
+ (match_dup 2)]
+ UNSPEC_DLMZB))]
+ "TARGET_DLMZB"
+ "dlmzb. %0, %1, %2")
+
+(define_expand "strlensi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unspec:SI [(match_operand:BLK 1 "general_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")
+ (match_operand 3 "const_int_operand" "")]
+ UNSPEC_DLMZB_STRLEN))
+ (clobber (match_scratch:CC 4 "=x"))]
+ "TARGET_DLMZB && WORDS_BIG_ENDIAN && !optimize_size"
+{
+ rtx result = operands[0];
+ rtx src = operands[1];
+ rtx search_char = operands[2];
+ rtx align = operands[3];
+ rtx addr, scratch_string, word1, word2, scratch_dlmzb;
+ rtx loop_label, end_label, mem, cr0, cond;
+ if (search_char != const0_rtx
+ || GET_CODE (align) != CONST_INT
+ || INTVAL (align) < 8)
+ FAIL;
+ word1 = gen_reg_rtx (SImode);
+ word2 = gen_reg_rtx (SImode);
+ scratch_dlmzb = gen_reg_rtx (SImode);
+ scratch_string = gen_reg_rtx (Pmode);
+ loop_label = gen_label_rtx ();
+ end_label = gen_label_rtx ();
+ addr = force_reg (Pmode, XEXP (src, 0));
+ emit_move_insn (scratch_string, addr);
+ emit_label (loop_label);
+ mem = change_address (src, SImode, scratch_string);
+ emit_move_insn (word1, mem);
+ emit_move_insn (word2, adjust_address (mem, SImode, 4));
+ cr0 = gen_rtx_REG (CCmode, CR0_REGNO);
+ emit_insn (gen_dlmzb (scratch_dlmzb, word1, word2, cr0));
+ cond = gen_rtx_NE (VOIDmode, cr0, const0_rtx);
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ cond,
+ gen_rtx_LABEL_REF
+ (VOIDmode,
+ end_label),
+ pc_rtx)));
+ emit_insn (gen_addsi3 (scratch_string, scratch_string, GEN_INT (8)));
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_LABEL_REF (VOIDmode, loop_label)));
+ emit_barrier ();
+ emit_label (end_label);
+ emit_insn (gen_addsi3 (scratch_string, scratch_string, scratch_dlmzb));
+ emit_insn (gen_subsi3 (result, scratch_string, addr));
+ emit_insn (gen_subsi3 (result, result, const1_rtx));
+ DONE;
+})
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Fixed-point arithmetic insns.
+
+(define_expand "add<mode>3"
+ [(set (match_operand:SDI 0 "gpc_reg_operand" "")
+ (plus:SDI (match_operand:SDI 1 "gpc_reg_operand" "")
+ (match_operand:SDI 2 "reg_or_add_cint_operand" "")))]
+ ""
+{
+ if (<MODE>mode == DImode && ! TARGET_POWERPC64)
+ {
+ if (non_short_cint_operand (operands[2], DImode))
+ FAIL;
+ }
+ else if (GET_CODE (operands[2]) == CONST_INT
+ && ! add_operand (operands[2], <MODE>mode))
+ {
+ rtx tmp = ((!can_create_pseudo_p ()
+ || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (<MODE>mode));
+
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, <MODE>mode);
+
+ if (<MODE>mode == DImode && !satisfies_constraint_L (GEN_INT (rest)))
+ FAIL;
+
+ /* The ordering here is important for the prolog expander.
+ When space is allocated from the stack, adding 'low' first may
+ produce a temporary deallocation (which would be bad). */
+ emit_insn (gen_add<mode>3 (tmp, operands[1], GEN_INT (rest)));
+ emit_insn (gen_add<mode>3 (operands[0], tmp, GEN_INT (low)));
+ DONE;
+ }
+})
+
+;; Discourage ai/addic because of carry but provide it in an alternative
+;; allowing register zero as source.
+(define_insn "*add<mode>3_internal1"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r,?r,r")
+ (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "%r,b,r,b")
+ (match_operand:GPR 2 "add_operand" "r,I,I,L")))]
+ "!DECIMAL_FLOAT_MODE_P (GET_MODE (operands[0])) && !DECIMAL_FLOAT_MODE_P (GET_MODE (operands[1]))"
+ "@
+ {cax|add} %0,%1,%2
+ {cal %0,%2(%1)|addi %0,%1,%2}
+ {ai|addic} %0,%1,%2
+ {cau|addis} %0,%1,%v2"
+ [(set_attr "length" "4,4,4,4")])
+
+(define_insn "addsi3_high"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (high:SI (match_operand 2 "" ""))))]
+ "TARGET_MACHO && !TARGET_64BIT"
+ "{cau|addis} %0,%1,ha16(%2)"
+ [(set_attr "length" "4")])
+
+(define_insn "*add<mode>3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (plus:P (match_operand:P 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:P 2 "reg_or_short_operand" "r,I,r,I"))
+ (const_int 0)))
+ (clobber (match_scratch:P 3 "=r,r,r,r"))]
+ ""
+ "@
+ {cax.|add.} %3,%1,%2
+ {ai.|addic.} %3,%1,%2
+ #
+ #"
+ [(set_attr "type" "fast_compare,compare,compare,compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:GPR 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (plus:GPR (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*add<mode>3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (plus:P (match_operand:P 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:P 2 "reg_or_short_operand" "r,I,r,I"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:P (match_dup 1)
+ (match_dup 2)))]
+ ""
+ "@
+ {cax.|add.} %0,%1,%2
+ {ai.|addic.} %0,%1,%2
+ #
+ #"
+ [(set_attr "type" "fast_compare,compare,compare,compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (plus:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (plus:P (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (plus:P (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split an add that we can't do in one insn into two insns, each of which
+;; does one 16-bit part. This is used by combine. Note that the low-order
+;; add should be last in case the result gets used in an address.
+
+(define_split
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (plus:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "non_add_cint_operand" "")))]
+ ""
+ [(set (match_dup 0) (plus:GPR (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:GPR (match_dup 0) (match_dup 4)))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, <MODE>mode);
+
+ operands[4] = GEN_INT (low);
+ if (<MODE>mode == SImode || satisfies_constraint_L (GEN_INT (rest)))
+ operands[3] = GEN_INT (rest);
+ else if (can_create_pseudo_p ())
+ {
+ operands[3] = gen_reg_rtx (DImode);
+ emit_move_insn (operands[3], operands[2]);
+ emit_insn (gen_adddi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ else
+ FAIL;
+})
+
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
+ ""
+ "nor %0,%1,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:P 2 "=r,r"))]
+ ""
+ "@
+ nor. %2,%1,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:P 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (not:P (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (not:P (match_dup 1)))]
+ ""
+ "@
+ nor. %0,%1,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (not:P (match_operand:P 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (not:P (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (not:P (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 1 "reg_or_short_operand" "rI")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))]
+ "! TARGET_POWERPC"
+ "{sf%I1|subf%I1c} %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r,r")
+ (minus:GPR (match_operand:GPR 1 "reg_or_short_operand" "r,I")
+ (match_operand:GPR 2 "gpc_reg_operand" "r,r")))]
+ "TARGET_POWERPC"
+ "@
+ subf %0,%2,%1
+ subfic %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWERPC"
+ "@
+ {sf.|subfc.} %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:P 3 "=r,r"))]
+ "TARGET_POWERPC"
+ "@
+ subf. %3,%2,%1
+ #"
+ [(set_attr "type" "fast_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:P 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (minus:P (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC"
+ "@
+ {sf.|subfc.} %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (minus:P (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_POWERPC"
+ "@
+ subf. %0,%2,%1
+ #"
+ [(set_attr "type" "fast_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (minus:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (minus:P (match_dup 1)
+ (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (minus:P (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "sub<mode>3"
+ [(set (match_operand:SDI 0 "gpc_reg_operand" "")
+ (minus:SDI (match_operand:SDI 1 "reg_or_short_operand" "")
+ (match_operand:SDI 2 "reg_or_sub_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_add<mode>3 (operands[0], operands[1],
+ negate_rtx (<MODE>mode, operands[2])));
+ DONE;
+ }
+}")
+
+;; For SMIN, SMAX, UMIN, and UMAX, we use DEFINE_EXPAND's that involve a doz[i]
+;; instruction and some auxiliary computations. Then we just have a single
+;; DEFINE_INSN for doz[i] and the define_splits to make them if made by
+;; combine.
+
+(define_expand "sminsi3"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (minus:SI (match_dup 2) (match_dup 3)))]
+ "TARGET_POWER || TARGET_ISEL"
+ "
+{
+ if (TARGET_ISEL)
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]);
+ DONE;
+ }
+
+ operands[3] = gen_reg_rtx (SImode);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (smin:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" ""))]
+ "TARGET_POWER"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 0) (minus:SI (match_dup 2) (match_dup 3)))]
+ "")
+
+(define_expand "smaxsi3"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 3) (match_dup 1)))]
+ "TARGET_POWER || TARGET_ISEL"
+ "
+{
+ if (TARGET_ISEL)
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]);
+ DONE;
+ }
+ operands[3] = gen_reg_rtx (SImode);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (smax:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" ""))]
+ "TARGET_POWER"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 0) (plus:SI (match_dup 3) (match_dup 1)))]
+ "")
+
+(define_expand "uminsi3"
+ [(set (match_dup 3) (xor:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 4) (xor:SI (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 3) (if_then_else:SI (gt (match_dup 3) (match_dup 4))
+ (const_int 0)
+ (minus:SI (match_dup 4) (match_dup 3))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (minus:SI (match_dup 2) (match_dup 3)))]
+ "TARGET_POWER || TARGET_ISEL"
+ "
+{
+ if (TARGET_ISEL)
+ {
+ rs6000_emit_minmax (operands[0], UMIN, operands[1], operands[2]);
+ DONE;
+ }
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = GEN_INT (-2147483647 - 1);
+}")
+
+(define_expand "umaxsi3"
+ [(set (match_dup 3) (xor:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 4) (xor:SI (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 3) (if_then_else:SI (gt (match_dup 3) (match_dup 4))
+ (const_int 0)
+ (minus:SI (match_dup 4) (match_dup 3))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 3) (match_dup 1)))]
+ "TARGET_POWER || TARGET_ISEL"
+ "
+{
+ if (TARGET_ISEL)
+ {
+ rs6000_emit_minmax (operands[0], UMAX, operands[1], operands[2]);
+ DONE;
+ }
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = GEN_INT (-2147483647 - 1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2. %3,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))]
+ "TARGET_POWER"
+ "@
+ doz%I2. %0,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; We don't need abs with condition code because such comparisons should
+;; never be done.
+(define_expand "abssi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))]
+ ""
+ "
+{
+ if (TARGET_ISEL)
+ {
+ emit_insn (gen_abssi2_isel (operands[0], operands[1]));
+ DONE;
+ }
+ else if (! TARGET_POWER)
+ {
+ emit_insn (gen_abssi2_nopower (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "*abssi2_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWER"
+ "abs %0,%1")
+
+(define_insn_and_split "abssi2_isel"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (clobber (match_scratch:SI 2 "=&b"))
+ (clobber (match_scratch:CC 3 "=y"))]
+ "TARGET_ISEL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (neg:SI (match_dup 1)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 1)
+ (const_int 0)))
+ (set (match_dup 0)
+ (if_then_else:SI (ge (match_dup 3)
+ (const_int 0))
+ (match_dup 1)
+ (match_dup 2)))]
+ "")
+
+(define_insn_and_split "abssi2_nopower"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,r")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,0")))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "! TARGET_POWER && ! TARGET_ISEL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (ashiftrt:SI (match_dup 1) (const_int 31)))
+ (set (match_dup 0) (xor:SI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:SI (match_dup 0) (match_dup 2)))]
+ "")
+
+(define_insn "*nabs_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r"))))]
+ "TARGET_POWER"
+ "nabs %0,%1")
+
+(define_insn_and_split "*nabs_nopower"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,r")
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,0"))))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "! TARGET_POWER"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (ashiftrt:SI (match_dup 1) (const_int 31)))
+ (set (match_dup 0) (xor:SI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:SI (match_dup 2) (match_dup 0)))]
+ "")
+
+(define_expand "neg<mode>2"
+ [(set (match_operand:SDI 0 "gpc_reg_operand" "")
+ (neg:SDI (match_operand:SDI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn "*neg<mode>2_internal"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
+ ""
+ "neg %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:P 2 "=r,r"))]
+ ""
+ "@
+ neg. %2,%1
+ #"
+ [(set_attr "type" "fast_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:P 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (neg:P (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (neg:P (match_dup 1)))]
+ ""
+ "@
+ neg. %0,%1
+ #"
+ [(set_attr "type" "fast_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (neg:P (match_operand:P 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (neg:P (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (neg:P (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "clz<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (clz:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
+ ""
+ "{cntlz|cntlz<wd>} %0,%1"
+ [(set_attr "type" "cntlz")])
+
+(define_expand "ctz<mode>2"
+ [(set (match_dup 2)
+ (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "")))
+ (parallel [(set (match_dup 3) (and:GPR (match_dup 1)
+ (match_dup 2)))
+ (clobber (scratch:CC))])
+ (set (match_dup 4) (clz:GPR (match_dup 3)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (minus:GPR (match_dup 5) (match_dup 4)))]
+ ""
+ {
+ operands[2] = gen_reg_rtx (<MODE>mode);
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ operands[4] = gen_reg_rtx (<MODE>mode);
+ operands[5] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode) - 1);
+ })
+
+(define_expand "ffs<mode>2"
+ [(set (match_dup 2)
+ (neg:GPR (match_operand:GPR 1 "gpc_reg_operand" "")))
+ (parallel [(set (match_dup 3) (and:GPR (match_dup 1)
+ (match_dup 2)))
+ (clobber (scratch:CC))])
+ (set (match_dup 4) (clz:GPR (match_dup 3)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (minus:GPR (match_dup 5) (match_dup 4)))]
+ ""
+ {
+ operands[2] = gen_reg_rtx (<MODE>mode);
+ operands[3] = gen_reg_rtx (<MODE>mode);
+ operands[4] = gen_reg_rtx (<MODE>mode);
+ operands[5] = GEN_INT (GET_MODE_BITSIZE (<MODE>mode));
+ })
+
+(define_insn "popcntb<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (unspec:GPR [(match_operand:GPR 1 "gpc_reg_operand" "r")]
+ UNSPEC_POPCNTB))]
+ "TARGET_POPCNTB"
+ "popcntb %0,%1")
+
+(define_expand "popcount<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (popcount:GPR (match_operand:GPR 1 "gpc_reg_operand" "")))]
+ "TARGET_POPCNTB"
+ {
+ rs6000_emit_popcount (operands[0], operands[1]);
+ DONE;
+ })
+
+(define_expand "parity<mode>2"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (parity:GPR (match_operand:GPR 1 "gpc_reg_operand" "")))]
+ "TARGET_POPCNTB"
+ {
+ rs6000_emit_parity (operands[0], operands[1]);
+ DONE;
+ })
+
+(define_insn "bswapsi2"
+ [(set (match_operand:SI 0 "reg_or_mem_operand" "=r,Z,&r")
+ (bswap:SI (match_operand:SI 1 "reg_or_mem_operand" "Z,r,r")))]
+ ""
+ "@
+ {lbrx|lwbrx} %0,%y1
+ {stbrx|stwbrx} %1,%y0
+ #"
+ [(set_attr "length" "4,4,12")])
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (bswap:SI (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (rotate:SI (match_dup 1) (const_int 8)))
+ (set (zero_extract:SI (match_dup 0)
+ (const_int 8)
+ (const_int 0))
+ (match_dup 1))
+ (set (zero_extract:SI (match_dup 0)
+ (const_int 8)
+ (const_int 16))
+ (rotate:SI (match_dup 1)
+ (const_int 16)))]
+ "")
+
+(define_expand "mulsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_short_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_mulsi3_mq (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_mulsi3_no_mq (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "mulsi3_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (clobber (match_scratch:SI 3 "=q,q"))]
+ "TARGET_POWER"
+ "@
+ {muls|mullw} %0,%1,%2
+ {muli|mulli} %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 2 "s8bit_cint_operand" "")
+ (const_string "imul3")
+ (match_operand:SI 2 "short_cint_operand" "")
+ (const_string "imul2")]
+ (const_string "imul")))])
+
+(define_insn "mulsi3_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))]
+ "! TARGET_POWER"
+ "@
+ {muls|mullw} %0,%1,%2
+ {muli|mulli} %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 2 "s8bit_cint_operand" "")
+ (const_string "imul3")
+ (match_operand:SI 2 "short_cint_operand" "")
+ (const_string "imul2")]
+ (const_string "imul")))])
+
+(define_insn "*mulsi3_mq_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))
+ (clobber (match_scratch:SI 4 "=q,q"))]
+ "TARGET_POWER"
+ "@
+ {muls.|mullw.} %3,%1,%2
+ #"
+ [(set_attr "type" "imul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*mulsi3_no_mq_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWER"
+ "@
+ {muls.|mullw.} %3,%1,%2
+ #"
+ [(set_attr "type" "imul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 3)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*mulsi3_mq_internal2"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,q"))]
+ "TARGET_POWER"
+ "@
+ {muls.|mullw.} %0,%1,%2
+ #"
+ [(set_attr "type" "imul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*mulsi3_no_mq_internal2"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER"
+ "@
+ {muls.|mullw.} %0,%1,%2
+ #"
+ [(set_attr "type" "imul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (mult:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Operand 1 is divided by operand 2; quotient goes to operand
+;; 0 and remainder to operand 3.
+;; ??? At some point, see what, if anything, we can do about if (x % y == 0).
+
+(define_expand "divmodsi4"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")))
+ (set (match_operand:SI 3 "register_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))])]
+ "TARGET_POWER || (! TARGET_POWER && ! TARGET_POWERPC)"
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_divss_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, 4));
+ DONE;
+ }
+}")
+
+(define_insn "*divmodsi4_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (set (match_operand:SI 3 "register_operand" "=q")
+ (mod:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER"
+ "divs %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_expand "udiv<mode>3"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (udiv:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC || (! TARGET_POWER && ! TARGET_POWERPC)"
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_quous_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ DONE;
+ }
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_udivsi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "udivsi3_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "divwu %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_insn "*udivsi3_no_mq"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (udiv:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "div<wd>u %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 0 "" "")
+ (const_string "idiv")]
+ (const_string "ldiv")))])
+
+
+;; For powers of two we can do srai/aze for divide and then adjust for
+;; modulus. If it isn't a power of two, FAIL on POWER so divmodsi4 will be
+;; used; for PowerPC, force operands into register and do a normal divide;
+;; for AIX common-mode, use quoss call on register operands.
+(define_expand "div<mode>3"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "reg_or_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) > 0
+ && exact_log2 (INTVAL (operands[2])) >= 0)
+ ;
+ else if (TARGET_POWERPC)
+ {
+ operands[2] = force_reg (<MODE>mode, operands[2]);
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_divsi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ }
+ else if (TARGET_POWER)
+ FAIL;
+ else
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_quoss_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ DONE;
+ }
+}")
+
+(define_insn "divsi3_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "divw %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_insn "*div<mode>3_no_mq"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "div<wd> %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 0 "" "")
+ (const_string "idiv")]
+ (const_string "ldiv")))])
+
+(define_expand "mod<mode>3"
+ [(use (match_operand:GPR 0 "gpc_reg_operand" ""))
+ (use (match_operand:GPR 1 "gpc_reg_operand" ""))
+ (use (match_operand:GPR 2 "reg_or_cint_operand" ""))]
+ ""
+ "
+{
+ int i;
+ rtx temp1;
+ rtx temp2;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 0
+ || (i = exact_log2 (INTVAL (operands[2]))) < 0)
+ FAIL;
+
+ temp1 = gen_reg_rtx (<MODE>mode);
+ temp2 = gen_reg_rtx (<MODE>mode);
+
+ emit_insn (gen_div<mode>3 (temp1, operands[1], operands[2]));
+ emit_insn (gen_ashl<mode>3 (temp2, temp1, GEN_INT (i)));
+ emit_insn (gen_sub<mode>3 (operands[0], operands[1], temp2));
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "exact_log2_cint_operand" "N")))]
+ ""
+ "{srai|sra<wd>i} %0,%1,%p2\;{aze|addze} %0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (div:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "exact_log2_cint_operand" "N,N"))
+ (const_int 0)))
+ (clobber (match_scratch:P 3 "=r,r"))]
+ ""
+ "@
+ {srai|sra<wd>i} %3,%1,%p2\;{aze.|addze.} %3,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")
+ (set_attr "cell_micro" "not")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "exact_log2_cint_operand"
+ ""))
+ (const_int 0)))
+ (clobber (match_scratch:GPR 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (div:<MODE> (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (div:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "exact_log2_cint_operand" "N,N"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (div:P (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {srai|sra<wd>i} %0,%1,%p2\;{aze.|addze.} %0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")
+ (set_attr "cell_micro" "not")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (div:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "exact_log2_cint_operand"
+ ""))
+ (const_int 0)))
+ (set (match_operand:GPR 0 "gpc_reg_operand" "")
+ (div:GPR (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (div:<MODE> (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (udiv:SI
+ (plus:DI (ashift:DI
+ (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (const_int 32))
+ (zero_extend:DI (match_operand:SI 4 "register_operand" "2")))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))
+ (set (match_operand:SI 2 "register_operand" "=*q")
+ (umod:SI
+ (plus:DI (ashift:DI
+ (zero_extend:DI (match_dup 1)) (const_int 32))
+ (zero_extend:DI (match_dup 4)))
+ (match_dup 3)))]
+ "TARGET_POWER"
+ "div %0,%1,%3"
+ [(set_attr "type" "idiv")])
+
+;; To do unsigned divide we handle the cases of the divisor looking like a
+;; negative number. If it is a constant that is less than 2**31, we don't
+;; have to worry about the branches. So make a few subroutines here.
+;;
+;; First comes the normal case.
+(define_expand "udivmodsi4_normal"
+ [(set (match_dup 4) (const_int 0))
+ (parallel [(set (match_operand:SI 0 "" "")
+ (udiv:SI (plus:DI (ashift:DI (zero_extend:DI (match_dup 4))
+ (const_int 32))
+ (zero_extend:DI (match_operand:SI 1 "" "")))
+ (match_operand:SI 2 "" "")))
+ (set (match_operand:SI 3 "" "")
+ (umod:SI (plus:DI (ashift:DI (zero_extend:DI (match_dup 4))
+ (const_int 32))
+ (zero_extend:DI (match_dup 1)))
+ (match_dup 2)))])]
+ "TARGET_POWER"
+ "
+{ operands[4] = gen_reg_rtx (SImode); }")
+
+;; This handles the branches.
+(define_expand "udivmodsi4_tests"
+ [(set (match_operand:SI 0 "" "") (const_int 0))
+ (set (match_operand:SI 3 "" "") (match_operand:SI 1 "" ""))
+ (set (match_dup 5) (compare:CCUNS (match_dup 1) (match_operand:SI 2 "" "")))
+ (set (pc) (if_then_else (ltu (match_dup 5) (const_int 0))
+ (label_ref (match_operand:SI 4 "" "")) (pc)))
+ (set (match_dup 0) (const_int 1))
+ (set (match_dup 3) (minus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 6) (compare:CC (match_dup 2) (const_int 0)))
+ (set (pc) (if_then_else (lt (match_dup 6) (const_int 0))
+ (label_ref (match_dup 4)) (pc)))]
+ "TARGET_POWER"
+ "
+{ operands[5] = gen_reg_rtx (CCUNSmode);
+ operands[6] = gen_reg_rtx (CCmode);
+}")
+
+(define_expand "udivmodsi4"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (umod:SI (match_dup 1) (match_dup 2)))])]
+ ""
+ "
+{
+ rtx label = 0;
+
+ if (! TARGET_POWER)
+ {
+ if (! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_divus_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, 4));
+ DONE;
+ }
+ else
+ FAIL;
+ }
+
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) < 0)
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ label = gen_label_rtx ();
+ emit (gen_udivmodsi4_tests (operands[0], operands[1], operands[2],
+ operands[3], label));
+ }
+ else
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit (gen_udivmodsi4_normal (operands[0], operands[1], operands[2],
+ operands[3]));
+ if (label)
+ emit_label (label);
+
+ DONE;
+}")
+
+;; AIX architecture-independent common-mode multiply (DImode),
+;; divide/modulus, and quotient subroutine calls. Input operands in R3 and
+;; R4; results in R3 and sometimes R4; link register always clobbered by bla
+;; instruction; R0 sometimes clobbered; also, MQ sometimes clobbered but
+;; assumed unused if generating common-mode, so ignore.
+(define_insn "mulh_call"
+ [(set (reg:SI 3)
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (reg:SI 3))
+ (sign_extend:DI (reg:SI 4)))
+ (const_int 32))))
+ (clobber (reg:SI LR_REGNO))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __mulh"
+ [(set_attr "type" "imul")])
+
+(define_insn "mull_call"
+ [(set (reg:DI 3)
+ (mult:DI (sign_extend:DI (reg:SI 3))
+ (sign_extend:DI (reg:SI 4))))
+ (clobber (reg:SI LR_REGNO))
+ (clobber (reg:SI 0))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __mull"
+ [(set_attr "type" "imul")])
+
+(define_insn "divss_call"
+ [(set (reg:SI 3)
+ (div:SI (reg:SI 3) (reg:SI 4)))
+ (set (reg:SI 4)
+ (mod:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (reg:SI LR_REGNO))
+ (clobber (reg:SI 0))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __divss"
+ [(set_attr "type" "idiv")])
+
+(define_insn "divus_call"
+ [(set (reg:SI 3)
+ (udiv:SI (reg:SI 3) (reg:SI 4)))
+ (set (reg:SI 4)
+ (umod:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (reg:SI LR_REGNO))
+ (clobber (reg:SI 0))
+ (clobber (match_scratch:CC 0 "=x"))
+ (clobber (reg:CC CR1_REGNO))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __divus"
+ [(set_attr "type" "idiv")])
+
+(define_insn "quoss_call"
+ [(set (reg:SI 3)
+ (div:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (reg:SI LR_REGNO))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __quoss"
+ [(set_attr "type" "idiv")])
+
+(define_insn "quous_call"
+ [(set (reg:SI 3)
+ (udiv:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (reg:SI LR_REGNO))
+ (clobber (reg:SI 0))
+ (clobber (match_scratch:CC 0 "=x"))
+ (clobber (reg:CC CR1_REGNO))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __quous"
+ [(set_attr "type" "idiv")])
+
+;; Logical instructions
+;; The logical instructions are mostly combined by using match_operator,
+;; but the plain AND insns are somewhat different because there is no
+;; plain 'andi' (only 'andi.'), no plain 'andis', and there are all
+;; those rotate-and-mask operations. Thus, the AND insns come first.
+
+(define_expand "andsi3"
+ [(parallel
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "and_operand" "")))
+ (clobber (match_scratch:CC 3 ""))])]
+ ""
+ "")
+
+(define_insn "andsi3_mc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:SI 2 "and_operand" "?r,T,K,L")))
+ (clobber (match_scratch:CC 3 "=X,X,x,x"))]
+ "rs6000_gen_cell_microcode"
+ "@
+ and %0,%1,%2
+ {rlinm|rlwinm} %0,%1,0,%m2,%M2
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2"
+ [(set_attr "type" "*,*,compare,compare")])
+
+(define_insn "andsi3_nomc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "and_operand" "?r,T")))
+ (clobber (match_scratch:CC 3 "=X,X"))]
+ "!rs6000_gen_cell_microcode"
+ "@
+ and %0,%1,%2
+ {rlinm|rlwinm} %0,%1,0,%m2,%M2")
+
+(define_insn "andsi3_internal0_nomc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "and_operand" "?r,T")))]
+ "!rs6000_gen_cell_microcode"
+ "@
+ and %0,%1,%2
+ {rlinm|rlwinm} %0,%1,0,%m2,%M2")
+
+
+;; Note to set cr's other than cr0 we do the and immediate and then
+;; the test again -- this avoids a mfcr which on the higher end
+;; machines causes an execution serialization
+
+(define_insn "*andsi3_internal2_mc"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r,r,r,r,r"))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "TARGET_32BIT && rs6000_gen_cell_microcode"
+ "@
+ and. %3,%1,%2
+ {andil.|andi.} %3,%1,%b2
+ {andiu.|andis.} %3,%1,%u2
+ {rlinm.|rlwinm.} %3,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,8,8,8,8")])
+
+(define_insn "*andsi3_internal3_mc"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r,r,r,r,r"))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "TARGET_64BIT && rs6000_gen_cell_microcode"
+ "@
+ #
+ {andil.|andi.} %3,%1,%b2
+ {andiu.|andis.} %3,%1,%u2
+ {rlinm.|rlwinm.} %3,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "8,4,4,4,8,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (and:GPR (match_operand:GPR 1 "gpc_reg_operand" "")
+ (match_operand:GPR 2 "and_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:GPR 3 ""))
+ (clobber (match_scratch:CC 4 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 3)
+ (and:<MODE> (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+;; We don't have a 32 bit "and. rt,ra,rb" for ppc64. cr is set from the
+;; whole 64 bit reg, and we don't know what is in the high 32 bits.
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_operand" "")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*andsi3_internal4"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "TARGET_32BIT && rs6000_gen_cell_microcode"
+ "@
+ and. %0,%1,%2
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2
+ {rlinm.|rlwinm.} %0,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,8,8,8,8")])
+
+(define_insn "*andsi3_internal5_mc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "TARGET_64BIT && rs6000_gen_cell_microcode"
+ "@
+ #
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2
+ {rlinm.|rlwinm.} %0,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "8,4,4,4,8,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "and_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Handle the PowerPC64 rlwinm corner case
+
+(define_insn_and_split "*andsi3_internal6"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "mask_operand_wrap" "i")))]
+ "TARGET_POWERPC64"
+ "#"
+ "TARGET_POWERPC64"
+ [(set (match_dup 0)
+ (and:SI (rotate:SI (match_dup 1) (match_dup 3))
+ (match_dup 4)))
+ (set (match_dup 0)
+ (rotate:SI (match_dup 0) (match_dup 5)))]
+ "
+{
+ int mb = extract_MB (operands[2]);
+ int me = extract_ME (operands[2]);
+ operands[3] = GEN_INT (me + 1);
+ operands[5] = GEN_INT (32 - (me + 1));
+ operands[4] = GEN_INT (~((HOST_WIDE_INT) -1 << (33 + me - mb)));
+}"
+ [(set_attr "length" "8")])
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_logical_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! logical_operand (operands[2], SImode))
+ {
+ HOST_WIDE_INT value = INTVAL (operands[2]);
+ rtx tmp = ((!can_create_pseudo_p ()
+ || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (SImode));
+
+ emit_insn (gen_iorsi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ emit_insn (gen_iorsi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_expand "xorsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (xor:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_logical_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! logical_operand (operands[2], SImode))
+ {
+ HOST_WIDE_INT value = INTVAL (operands[2]);
+ rtx tmp = ((!can_create_pseudo_p ()
+ || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (SImode));
+
+ emit_insn (gen_xorsi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ emit_insn (gen_xorsi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_insn "*boolsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r")
+ (match_operator:SI 3 "boolean_or_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "%r,r,r")
+ (match_operand:SI 2 "logical_operand" "r,K,L")]))]
+ ""
+ "@
+ %q3 %0,%1,%2
+ {%q3il|%q3i} %0,%1,%b2
+ {%q3iu|%q3is} %0,%1,%u2")
+
+(define_insn "*boolsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_or_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_32BIT"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_32BIT"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split a logical operation that we can't do in one insn into two insns,
+;; each of which does one 16-bit part. This is used by combine.
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_operator:SI 3 "boolean_or_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "non_logical_cint_operand" "")]))]
+ ""
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 0) (match_dup 5))]
+"
+{
+ rtx i;
+ i = GEN_INT (INTVAL (operands[2]) & (~ (HOST_WIDE_INT) 0xffff));
+ operands[4] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[1], i);
+ i = GEN_INT (INTVAL (operands[2]) & 0xffff);
+ operands[5] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[0], i);
+}")
+
+(define_insn "*boolcsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 3 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (match_operand:SI 2 "gpc_reg_operand" "r")]))]
+ ""
+ "%q3 %0,%2,%1")
+
+(define_insn "*boolcsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_32BIT"
+ "@
+ %q4. %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolcsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r"))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_32BIT"
+ "@
+ %q4. %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 3 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" "r"))]))]
+ ""
+ "%q3 %0,%1,%2")
+
+(define_insn "*boolccsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_32BIT"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r"))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_32BIT"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; maskir insn. We need four forms because things might be in arbitrary
+;; orders. Don't define forms that only set CR fields because these
+;; would modify an input register.
+
+(define_insn "*maskir_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))
+ (and:SI (match_dup 2)
+ (match_operand:SI 3 "gpc_reg_operand" "r"))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))
+ (and:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_dup 2))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (match_operand:SI 3 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal4"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal5"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0"))
+ (and:SI (match_dup 2)
+ (match_operand:SI 3 "gpc_reg_operand" "r,r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 2) (match_dup 3))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (and:SI (match_dup 2)
+ (match_operand:SI 3 "gpc_reg_operand" "")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 2) (match_dup 3))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 2) (match_dup 3))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*maskir_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0"))
+ (and:SI (match_operand:SI 3 "gpc_reg_operand" "r,r")
+ (match_dup 2)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 3) (match_dup 2))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (and:SI (match_operand:SI 3 "gpc_reg_operand" "")
+ (match_dup 2)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 3) (match_dup 2))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 3) (match_dup 2))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*maskir_internal7"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 2 "gpc_reg_operand" "r,r")
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (match_dup 2) (match_dup 3))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (match_dup 2) (match_dup 3))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (match_dup 2) (match_dup 3))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*maskir_internal8"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 3 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (match_dup 3) (match_dup 2))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 3 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (match_dup 3) (match_dup 2))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (match_dup 3) (match_dup 2))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Rotate and shift insns, in all their variants. These support shifts,
+;; field inserts and extracts, and various combinations thereof.
+(define_expand "insv"
+ [(set (zero_extract (match_operand 0 "gpc_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand 3 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ /* Do not handle 16/8 bit structures that fit in HI/QI modes directly, since
+ the (SUBREG:SI (REG:HI xxx)) that is otherwise generated can confuse the
+ compiler if the address of the structure is taken later. Likewise, do
+ not handle invalid E500 subregs. */
+ if (GET_CODE (operands[0]) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0]))) < UNITS_PER_WORD
+ || ((TARGET_E500_DOUBLE || TARGET_SPE)
+ && invalid_e500_subreg (operands[0], GET_MODE (operands[0])))))
+ FAIL;
+
+ if (TARGET_POWERPC64 && GET_MODE (operands[0]) == DImode)
+ emit_insn (gen_insvdi (operands[0], operands[1], operands[2], operands[3]));
+ else
+ emit_insn (gen_insvsi (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_insn "insvsi"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 3 "gpc_reg_operand" "r"))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (32 - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal1"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (rotate:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 31;
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (shift - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal2"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (ashiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 31;
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (32 - shift - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal3"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (lshiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 31;
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (32 - shift - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal4"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (zero_extract:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")
+ (match_operand:SI 5 "const_int_operand" "i")))]
+ "INTVAL (operands[4]) >= INTVAL (operands[1])"
+ "*
+{
+ int extract_start = INTVAL (operands[5]) & 31;
+ int extract_size = INTVAL (operands[4]) & 31;
+ int insert_start = INTVAL (operands[2]) & 31;
+ int insert_size = INTVAL (operands[1]) & 31;
+
+/* Align extract field with insert field */
+ operands[5] = GEN_INT (extract_start + extract_size - insert_start - insert_size);
+ operands[1] = GEN_INT (insert_start + insert_size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h5,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+;; combine patterns for rlwimi
+(define_insn "*insvsi_internal5"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (match_operand:SI 1 "mask_operand" "i"))
+ (and:SI (lshiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 5 "mask_operand" "i"))))]
+ "TARGET_POWERPC && INTVAL(operands[1]) == ~INTVAL(operands[5])"
+ "*
+{
+ int me = extract_ME(operands[5]);
+ int mb = extract_MB(operands[5]);
+ operands[4] = GEN_INT(32 - INTVAL(operands[2]));
+ operands[2] = GEN_INT(mb);
+ operands[1] = GEN_INT(me);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "*insvsi_internal6"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (lshiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 5 "mask_operand" "i"))
+ (and:SI (match_operand:SI 4 "gpc_reg_operand" "0")
+ (match_operand:SI 1 "mask_operand" "i"))))]
+ "TARGET_POWERPC && INTVAL(operands[1]) == ~INTVAL(operands[5])"
+ "*
+{
+ int me = extract_ME(operands[5]);
+ int mb = extract_MB(operands[5]);
+ operands[4] = GEN_INT(32 - INTVAL(operands[2]));
+ operands[2] = GEN_INT(mb);
+ operands[1] = GEN_INT(me);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}"
+ [(set_attr "type" "insert_word")])
+
+(define_insn "insvdi"
+ [(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:DI 3 "gpc_reg_operand" "r"))]
+ "TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[2]) & 63;
+ int size = INTVAL (operands[1]) & 63;
+
+ operands[1] = GEN_INT (64 - start - size);
+ return \"rldimi %0,%3,%H1,%H2\";
+}"
+ [(set_attr "type" "insert_dword")])
+
+(define_insn "*insvdi_internal2"
+ [(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (ashiftrt:DI (match_operand:DI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "TARGET_POWERPC64
+ && insvdi_rshift_rlwimi_p (operands[1], operands[2], operands[4])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 63;
+ int start = (INTVAL (operands[2]) & 63) - 32;
+ int size = INTVAL (operands[1]) & 63;
+
+ operands[4] = GEN_INT (64 - shift - start - size);
+ operands[2] = GEN_INT (start);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"rlwimi %0,%3,%h4,%h2,%h1\";
+}")
+
+(define_insn "*insvdi_internal3"
+ [(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (lshiftrt:DI (match_operand:DI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "TARGET_POWERPC64
+ && insvdi_rshift_rlwimi_p (operands[1], operands[2], operands[4])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 63;
+ int start = (INTVAL (operands[2]) & 63) - 32;
+ int size = INTVAL (operands[1]) & 63;
+
+ operands[4] = GEN_INT (64 - shift - start - size);
+ operands[2] = GEN_INT (start);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"rlwimi %0,%3,%h4,%h2,%h1\";
+}")
+
+(define_expand "extzv"
+ [(set (match_operand 0 "gpc_reg_operand" "")
+ (zero_extract (match_operand 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ /* Do not handle 16/8 bit structures that fit in HI/QI modes directly, since
+ the (SUBREG:SI (REG:HI xxx)) that is otherwise generated can confuse the
+ compiler if the address of the structure is taken later. */
+ if (GET_CODE (operands[0]) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0]))) < UNITS_PER_WORD))
+ FAIL;
+
+ if (TARGET_POWERPC64 && GET_MODE (operands[1]) == DImode)
+ emit_insn (gen_extzvdi (operands[0], operands[1], operands[2], operands[3]));
+ else
+ emit_insn (gen_extzvsi (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_insn "extzvsi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i")))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[3]) & 31;
+ int size = INTVAL (operands[2]) & 31;
+
+ if (start + size >= 32)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ return \"{rlinm|rlwinm} %0,%1,%3,%s2,31\";
+}")
+
+(define_insn "*extzvsi_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")
+ (match_operand:SI 3 "const_int_operand" "i,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[3]) & 31;
+ int size = INTVAL (operands[2]) & 31;
+
+ /* Force split for non-cc0 compare. */
+ if (which_alternative == 1)
+ return \"#\";
+
+ /* If the bit-field being tested fits in the upper or lower half of a
+ word, it is possible to use andiu. or andil. to test it. This is
+ useful because the condition register set-use delay is smaller for
+ andi[ul]. than for rlinm. This doesn't work when the starting bit
+ position is 0 because the LT and GT bits may be set wrong. */
+
+ if ((start > 0 && start + size <= 16) || start >= 16)
+ {
+ operands[3] = GEN_INT (((1 << (16 - (start & 15)))
+ - (1 << (16 - (start & 15) - size))));
+ if (start < 16)
+ return \"{andiu.|andis.} %4,%1,%3\";
+ else
+ return \"{andil.|andi.} %4,%1,%3\";
+ }
+
+ if (start + size >= 32)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ return \"{rlinm.|rlwinm.} %4,%1,%3,%s2,31\";
+}"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "reload_completed"
+ [(set (match_dup 4)
+ (zero_extract:SI (match_dup 1) (match_dup 2)
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*extzvsi_internal2"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")
+ (match_operand:SI 3 "const_int_operand" "i,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extract:SI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[3]) & 31;
+ int size = INTVAL (operands[2]) & 31;
+
+ /* Force split for non-cc0 compare. */
+ if (which_alternative == 1)
+ return \"#\";
+
+ /* Since we are using the output value, we can't ignore any need for
+ a shift. The bit-field must end at the LSB. */
+ if (start >= 16 && start + size == 32)
+ {
+ operands[3] = GEN_INT ((1 << size) - 1);
+ return \"{andil.|andi.} %0,%1,%3\";
+ }
+
+ if (start + size >= 32)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ return \"{rlinm.|rlwinm.} %0,%1,%3,%s2,31\";
+}"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extract:SI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extract:SI (match_dup 1) (match_dup 2) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "extzvdi"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i")))]
+ "TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[3]) & 63;
+ int size = INTVAL (operands[2]) & 63;
+
+ if (start + size >= 64)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ operands[2] = GEN_INT (64 - size);
+ return \"rldicl %0,%1,%3,%2\";
+}")
+
+(define_insn "*extzvdi_internal1"
+ [(set (match_operand:CC 0 "gpc_reg_operand" "=x")
+ (compare:CC (zero_extract:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r"))]
+ "TARGET_64BIT && rs6000_gen_cell_microcode"
+ "*
+{
+ int start = INTVAL (operands[3]) & 63;
+ int size = INTVAL (operands[2]) & 63;
+
+ if (start + size >= 64)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ operands[2] = GEN_INT (64 - size);
+ return \"rldicl. %4,%1,%3,%2\";
+}"
+ [(set_attr "type" "compare")])
+
+(define_insn "*extzvdi_internal2"
+ [(set (match_operand:CC 4 "gpc_reg_operand" "=x")
+ (compare:CC (zero_extract:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extract:DI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ "TARGET_64BIT && rs6000_gen_cell_microcode"
+ "*
+{
+ int start = INTVAL (operands[3]) & 63;
+ int size = INTVAL (operands[2]) & 63;
+
+ if (start + size >= 64)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ operands[2] = GEN_INT (64 - size);
+ return \"rldicl. %0,%1,%3,%2\";
+}"
+ [(set_attr "type" "compare")])
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))]
+ ""
+ "@
+ {rlnm|rlwnm} %0,%1,%2,0xffffffff
+ {rlinm|rlwinm} %0,%1,%h2,0xffffffff"
+ [(set_attr "type" "var_shift_rotate,integer")])
+
+(define_insn "*rotlsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))]
+ ""
+ "@
+ {rlnm.|rlwnm.} %3,%1,%2,0xffffffff
+ {rlinm.|rlwinm.} %3,%1,%h2,0xffffffff
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (rotate:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (rotate:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {rlnm.|rlwnm.} %0,%1,%2,0xffffffff
+ {rlinm.|rlwinm.} %0,%1,%h2,0xffffffff
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (rotate:SI (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (rotate:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal4"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i"))
+ (match_operand:SI 3 "mask_operand" "n,n")))]
+ ""
+ "@
+ {rlnm|rlwnm} %0,%1,%2,%m3,%M3
+ {rlinm|rlwinm} %0,%1,%h2,%m3,%M3"
+ [(set_attr "type" "var_shift_rotate,integer")])
+
+(define_insn "*rotlsi3_internal5"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (match_operand:SI 3 "mask_operand" "n,n,n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r,r,r"))]
+ ""
+ "@
+ {rlnm.|rlwnm.} %4,%1,%2,%m3,%M3
+ {rlinm.|rlwinm.} %4,%1,%h2,%m3,%M3
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "reload_completed"
+ [(set (match_dup 4)
+ (and:SI (rotate:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (match_operand:SI 3 "mask_operand" "n,n,n,n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (and:SI (rotate:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ ""
+ "@
+ {rlnm.|rlwnm.} %0,%1,%2,%m3,%M3
+ {rlinm.|rlwinm.} %0,%1,%h2,%m3,%M3
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (rotate:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (and:SI (rotate:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal7"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")) 0)))]
+ ""
+ "{rl%I2nm|rlw%I2nm} %0,%1,%h2,0xff"
+ [(set (attr "cell_micro")
+ (if_then_else (match_operand:SI 2 "const_int_operand" "")
+ (const_string "not")
+ (const_string "always")))])
+
+(define_insn "*rotlsi3_internal8"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))]
+ ""
+ "@
+ {rlnm.|rlwnm.} %3,%1,%2,0xff
+ {rlinm.|rlwinm.} %3,%1,%h2,0xff
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:QI
+ (rotate:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal9"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (zero_extend:SI (subreg:QI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ ""
+ "@
+ {rlnm.|rlwnm.} %0,%1,%2,0xff
+ {rlinm.|rlwinm.} %0,%1,%h2,0xff
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:QI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:QI (rotate:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal10"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")) 0)))]
+ ""
+ "@
+ {rlnm|rlwnm} %0,%1,%2,0xffff
+ {rlinm|rlwinm} %0,%1,%h2,0xffff"
+ [(set_attr "type" "var_shift_rotate,integer")])
+
+
+(define_insn "*rotlsi3_internal11"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))]
+ ""
+ "@
+ {rlnm.|rlwnm.} %3,%1,%2,0xffff
+ {rlinm.|rlwinm.} %3,%1,%h2,0xffff
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:HI
+ (rotate:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal12"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (zero_extend:SI (subreg:HI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ ""
+ "@
+ {rlnm.|rlwnm.} %0,%1,%2,0xffff
+ {rlinm.|rlwinm.} %0,%1,%h2,0xffff
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:HI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:HI (rotate:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Note that we use "sle." instead of "sl." so that we can set
+;; SHIFT_COUNT_TRUNCATED.
+
+(define_expand "ashlsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_cint_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_ashlsi3_power (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_ashlsi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "ashlsi3_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))
+ (clobber (match_scratch:SI 3 "=q,X"))]
+ "TARGET_POWER"
+ "@
+ sle %0,%1,%2
+ {sli|slwi} %0,%1,%h2")
+
+(define_insn "ashlsi3_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))]
+ "! TARGET_POWER"
+ "@
+ {sl|slw} %0,%1,%2
+ {sli|slwi} %0,%1,%h2"
+ [(set_attr "type" "var_shift_rotate,shift")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ sle. %3,%1,%2
+ {sli.|slwi.} %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))]
+ "! TARGET_POWER && TARGET_32BIT"
+ "@
+ {sl.|slw.} %3,%1,%2
+ {sli.|slwi.} %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && TARGET_32BIT && reload_completed"
+ [(set (match_dup 3)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ sle. %0,%1,%2
+ {sli.|slwi.} %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashift:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && TARGET_32BIT"
+ "@
+ {sl.|slw.} %0,%1,%2
+ {sli.|slwi.} %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashift:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "rlwinm"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 3 "mask_operand" "n")))]
+ "includes_lshift_p (operands[2], operands[3])"
+ "{rlinm|rlwinm} %0,%1,%h2,%m3,%M3")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ "includes_lshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %4,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "includes_lshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "includes_lshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "includes_lshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; The AIX assembler mis-handles "sri x,x,0", so write that case as
+;; "sli x,x,0".
+(define_expand "lshrsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_cint_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_lshrsi3_power (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_lshrsi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "lshrsi3_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,i")))
+ (clobber (match_scratch:SI 3 "=q,X,X"))]
+ "TARGET_POWER"
+ "@
+ sre %0,%1,%2
+ mr %0,%1
+ {s%A2i|s%A2wi} %0,%1,%h2")
+
+(define_insn "lshrsi3_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "O,r,i")))]
+ "! TARGET_POWER"
+ "@
+ mr %0,%1
+ {sr|srw} %0,%1,%2
+ {sri|srwi} %0,%1,%h2"
+ [(set_attr "type" "integer,var_shift_rotate,shift")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,?y,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,i,r,O,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,X,r,r,X,r"))
+ (clobber (match_scratch:SI 4 "=q,X,X,q,X,X"))]
+ "TARGET_POWER"
+ "@
+ sre. %3,%1,%2
+ mr. %1,%1
+ {s%A2i.|s%A2wi.} %3,%1,%h2
+ #
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,4,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,?y,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "O,r,i,O,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=X,r,r,X,r,r"))]
+ "! TARGET_POWER && TARGET_32BIT"
+ "@
+ mr. %1,%1
+ {sr.|srw.} %3,%1,%2
+ {sri.|srwi.} %3,%1,%h2
+ #
+ #
+ #"
+ [(set_attr "type" "delayed_compare,var_delayed_compare,delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,4,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && TARGET_32BIT && reload_completed"
+ [(set (match_dup 3)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,?y,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,i,r,O,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,X,X,q,X,X"))]
+ "TARGET_POWER"
+ "@
+ sre. %0,%1,%2
+ mr. %0,%1
+ {s%A2i.|s%A2wi.} %0,%1,%h2
+ #
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,4,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,?y,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "O,r,i,O,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && TARGET_32BIT"
+ "@
+ mr. %0,%1
+ {sr.|srw.} %0,%1,%2
+ {sri.|srwi.} %0,%1,%h2
+ #
+ #
+ #"
+ [(set_attr "type" "delayed_compare,var_delayed_compare,delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,4,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 3 "mask_operand" "n")))]
+ "includes_rshift_p (operands[2], operands[3])"
+ "{rlinm|rlwinm} %0,%1,%s2,%m3,%M3")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ "includes_rshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %4,%1,%s2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "includes_rshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "includes_rshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%s2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "includes_rshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (255))"
+ "{rlinm|rlwinm} %0,%1,%s2,0xff")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "includes_rshift_p (operands[2], GEN_INT (255))"
+ "@
+ {rlinm.|rlwinm.} %3,%1,%s2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "includes_rshift_p (operands[2], GEN_INT (255)) && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:QI
+ (lshiftrt:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:QI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (255))"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%s2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:QI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (255)) && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:QI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (65535))"
+ "{rlinm|rlwinm} %0,%1,%s2,0xffff")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "includes_rshift_p (operands[2], GEN_INT (65535))"
+ "@
+ {rlinm.|rlwinm.} %3,%1,%s2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "includes_rshift_p (operands[2], GEN_INT (65535)) && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:HI
+ (lshiftrt:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:HI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (65535))"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%s2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:HI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (65535)) && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:HI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (ashiftrt:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 31)))]
+ "TARGET_POWER"
+ "rrib %0,%1,%2")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (lshiftrt:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 31)))]
+ "TARGET_POWER"
+ "rrib %0,%1,%2")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (zero_extract:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 1)
+ (const_int 0)))]
+ "TARGET_POWER"
+ "rrib %0,%1,%2")
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_ashrsi3_power (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_ashrsi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "ashrsi3_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))
+ (clobber (match_scratch:SI 3 "=q,X"))]
+ "TARGET_POWER"
+ "@
+ srea %0,%1,%2
+ {srai|srawi} %0,%1,%h2"
+ [(set_attr "type" "shift")])
+
+(define_insn "ashrsi3_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))]
+ "! TARGET_POWER"
+ "@
+ {sra|sraw} %0,%1,%2
+ {srai|srawi} %0,%1,%h2"
+ [(set_attr "type" "var_shift_rotate,shift")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ srea. %3,%1,%2
+ {srai.|srawi.} %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))]
+ "! TARGET_POWER"
+ "@
+ {sra.|sraw.} %3,%1,%2
+ {srai.|srawi.} %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 3)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ srea. %0,%1,%2
+ {srai.|srawi.} %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER"
+ "@
+ {sra.|sraw.} %0,%1,%2
+ {srai.|srawi.} %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Floating-point insns, excluding normal data motion.
+;;
+;; PowerPC has a full set of single-precision floating point instructions.
+;;
+;; For the POWER architecture, we pretend that we have both SFmode and
+;; DFmode insns, while, in fact, all fp insns are actually done in double.
+;; The only conversions we will do will be when storing to memory. In that
+;; case, we will use the "frsp" instruction before storing.
+;;
+;; Note that when we store into a single-precision memory location, we need to
+;; use the frsp insn first. If the register being stored isn't dead, we
+;; need a scratch register for the frsp. But this is difficult when the store
+;; is done by reload. It is not incorrect to do the frsp on the register in
+;; this case, we just lose precision that we would have otherwise gotten but
+;; is not guaranteed. Perhaps this should be tightened up at some point.
+
+(define_expand "extendsfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float_extend:DF (match_operand:SF 1 "reg_or_none500mem_operand" "")))]
+ "TARGET_HARD_FLOAT && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn_and_split "*extendsfdf2_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,?f,f")
+ (float_extend:DF (match_operand:SF 1 "reg_or_mem_operand" "0,f,m")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "@
+ #
+ fmr %0,%1
+ lfs%U1%X1 %0,%1"
+ "&& reload_completed && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+{
+ emit_note (NOTE_INSN_DELETED);
+ DONE;
+}
+ [(set_attr "type" "fp,fp,fpload")])
+
+(define_expand "truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*truncdfsf2_fpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "frsp %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "aux_truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRSP))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "frsp %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "negsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (neg:SF (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_SINGLE_FLOAT"
+ "")
+
+(define_insn "*negsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "fneg %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "abssf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_SINGLE_FLOAT"
+ "")
+
+(define_insn "*abssf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "fabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (abs:SF (match_operand:SF 1 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "fnabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "addsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_SINGLE_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "fadds %0,%1,%2"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_addsub_s")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fa|fadd} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_expand "subsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_SINGLE_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "fsubs %0,%1,%2"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_addsub_s")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fs|fsub} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_expand "mulsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_SINGLE_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "fmuls %0,%1,%2"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_mul_s")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "{fm|fmul} %0,%1,%2"
+ [(set_attr "type" "dmul")])
+
+(define_expand "divsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && TARGET_SINGLE_FLOAT && !TARGET_SIMPLE_FPU"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_SINGLE_FLOAT && !TARGET_SIMPLE_FPU"
+ "fdivs %0,%1,%2"
+ [(set_attr "type" "sdiv")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_SINGLE_FLOAT && !TARGET_SIMPLE_FPU"
+ "{fd|fdiv} %0,%1,%2"
+ [(set_attr "type" "ddiv")])
+
+(define_expand "recipsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")]
+ UNSPEC_FRES))]
+ "TARGET_RECIP && TARGET_HARD_FLOAT && TARGET_PPC_GFXOPT && !optimize_size
+ && flag_finite_math_only && !flag_trapping_math"
+{
+ rs6000_emit_swdivsf (operands[0], operands[1], operands[2]);
+ DONE;
+})
+
+(define_insn "fres"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))]
+ "TARGET_PPC_GFXOPT && flag_finite_math_only"
+ "fres %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_SINGLE_FLOAT && TARGET_FUSED_MADD"
+ "fmadds %0,%1,%2,%3"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_maddsub_s")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fma|fmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_SINGLE_FLOAT && TARGET_FUSED_MADD"
+ "fmsubs %0,%1,%2,%3"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_maddsub_s")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fms|fmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && TARGET_SINGLE_FLOAT && HONOR_SIGNED_ZEROS (SFmode)"
+ "fnmadds %0,%1,%2,%3"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_maddsub_s")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f"))
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_SINGLE_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && ! HONOR_SIGNED_ZEROS (SFmode)"
+ "fnmadds %0,%1,%2,%3"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_maddsub_s")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f"))
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && ! HONOR_SIGNED_ZEROS (SFmode)"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && TARGET_SINGLE_FLOAT && HONOR_SIGNED_ZEROS (SFmode)"
+ "fnmsubs %0,%1,%2,%3"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_maddsub_s")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 3 "gpc_reg_operand" "f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && TARGET_SINGLE_FLOAT && ! HONOR_SIGNED_ZEROS (SFmode)"
+ "fnmsubs %0,%1,%2,%3"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_maddsub_s")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 3 "gpc_reg_operand" "f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD
+ && ! HONOR_SIGNED_ZEROS (SFmode)"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_expand "sqrtsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (sqrt:SF (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "(TARGET_PPC_GPOPT || TARGET_POWER2 || TARGET_XILINX_FPU)
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT
+ && !TARGET_SIMPLE_FPU"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "(TARGET_PPC_GPOPT || TARGET_XILINX_FPU) && TARGET_HARD_FLOAT
+ && TARGET_FPRS && TARGET_SINGLE_FLOAT && !TARGET_SIMPLE_FPU"
+ "fsqrts %0,%1"
+ [(set_attr "type" "ssqrt")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_POWER2 && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_SINGLE_FLOAT && !TARGET_SIMPLE_FPU"
+ "fsqrt %0,%1"
+ [(set_attr "type" "dsqrt")])
+
+(define_expand "rsqrtsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")]
+ UNSPEC_RSQRT))]
+ "TARGET_RECIP && TARGET_HARD_FLOAT && TARGET_PPC_GFXOPT && !optimize_size
+ && flag_finite_math_only && !flag_trapping_math"
+{
+ rs6000_emit_swrsqrtsf (operands[0], operands[1]);
+ DONE;
+})
+
+(define_insn "*rsqrt_internal1"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")]
+ UNSPEC_RSQRT))]
+ "TARGET_HARD_FLOAT && TARGET_PPC_GFXOPT"
+ "frsqrte %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "copysignsf3"
+ [(set (match_dup 3)
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "")))
+ (set (match_dup 4)
+ (neg:SF (abs:SF (match_dup 1))))
+ (set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (ge (match_operand:SF 2 "gpc_reg_operand" "")
+ (match_dup 5))
+ (match_dup 3)
+ (match_dup 4)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT
+ && !HONOR_NANS (SFmode) && !HONOR_SIGNED_ZEROS (SFmode)"
+ {
+ operands[3] = gen_reg_rtx (SFmode);
+ operands[4] = gen_reg_rtx (SFmode);
+ operands[5] = CONST0_RTX (SFmode);
+ })
+
+(define_expand "copysigndf3"
+ [(set (match_dup 3)
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "")))
+ (set (match_dup 4)
+ (neg:DF (abs:DF (match_dup 1))))
+ (set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (ge (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_dup 5))
+ (match_dup 3)
+ (match_dup 4)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && !HONOR_NANS (DFmode) && !HONOR_SIGNED_ZEROS (DFmode)"
+ {
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = gen_reg_rtx (DFmode);
+ operands[5] = CONST0_RTX (DFmode);
+ })
+
+;; For MIN, MAX, and conditional move, we use DEFINE_EXPAND's that involve a
+;; fsel instruction and some auxiliary computations. Then we just have a
+;; single DEFINE_INSN for fsel and the define_splits to make them if made by
+;; combine.
+(define_expand "smaxsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" ""))
+ (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_SINGLE_FLOAT && !flag_trapping_math"
+ "{ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]); DONE;}")
+
+(define_expand "sminsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" ""))
+ (match_dup 2)
+ (match_dup 1)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_SINGLE_FLOAT && !flag_trapping_math"
+ "{ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]); DONE;}")
+
+(define_split
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operator:SF 3 "min_max_operator"
+ [(match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")]))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_SINGLE_FLOAT && !flag_trapping_math"
+ [(const_int 0)]
+ "
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+ operands[1], operands[2]);
+ DONE;
+}")
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_operand:SI 3 "gpc_reg_operand" "")))]
+ "TARGET_ISEL"
+ "
+{
+ if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+}")
+
+;; We use the BASE_REGS for the isel input operands because, if rA is
+;; 0, the value of 0 is placed in rD upon truth. Similarly for rB
+;; because we may switch the operands and rB may end up being rA.
+;;
+;; We need 2 patterns: an unsigned and a signed pattern. We could
+;; leave out the mode in operand 4 and use one pattern, but reload can
+;; change the mode underneath our feet and then gets confused trying
+;; to reload the value.
+(define_insn "isel_signed"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:CC 4 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operand:SI 2 "gpc_reg_operand" "b")
+ (match_operand:SI 3 "gpc_reg_operand" "b")))]
+ "TARGET_ISEL"
+ "*
+{ return output_isel (operands); }"
+ [(set_attr "length" "4")])
+
+(define_insn "isel_unsigned"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (if_then_else:SI
+ (match_operator 1 "comparison_operator"
+ [(match_operand:CCUNS 4 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operand:SI 2 "gpc_reg_operand" "b")
+ (match_operand:SI 3 "gpc_reg_operand" "b")))]
+ "TARGET_ISEL"
+ "*
+{ return output_isel (operands); }"
+ [(set_attr "length" "4")])
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")
+ (match_operand:SF 3 "gpc_reg_operand" "")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "
+{
+ if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_insn "*fselsfsf4"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 4 "zero_fp_constant" "F"))
+ (match_operand:SF 2 "gpc_reg_operand" "f")
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "*fseldfsf4"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (if_then_else:SF (ge (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 4 "zero_fp_constant" "F"))
+ (match_operand:SF 2 "gpc_reg_operand" "f")
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_SINGLE_FLOAT"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_expand "negdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (neg:DF (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*negdf2_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "fneg %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "absdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*absdf2_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "fabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "*nabsdf2_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (abs:DF (match_operand:DF 1 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "fnabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "adddf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (plus:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*adddf3_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (plus:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "{fa|fadd} %0,%1,%2"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_addsub_d")])
+
+(define_expand "subdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (minus:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*subdf3_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "{fs|fsub} %0,%1,%2"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_addsub_d")])
+
+(define_expand "muldf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "")
+
+(define_insn "*muldf3_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "{fm|fmul} %0,%1,%2"
+ [(set_attr "type" "dmul")
+ (set_attr "fp_type" "fp_mul_d")])
+
+(define_expand "divdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (div:DF (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT
+ && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)
+ && !TARGET_SIMPLE_FPU"
+ "")
+
+(define_insn "*divdf3_fpr"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (div:DF (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && !TARGET_SIMPLE_FPU"
+ "{fd|fdiv} %0,%1,%2"
+ [(set_attr "type" "ddiv")])
+
+(define_expand "recipdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")]
+ UNSPEC_FRES))]
+ "TARGET_RECIP && TARGET_HARD_FLOAT && TARGET_POPCNTB && !optimize_size
+ && flag_finite_math_only && !flag_trapping_math"
+{
+ rs6000_emit_swdivdf (operands[0], operands[1], operands[2]);
+ DONE;
+})
+
+(define_insn "fred"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRES))]
+ "TARGET_POPCNTB && flag_finite_math_only"
+ "fre %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (plus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT"
+ "{fma|fmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")
+ (set_attr "fp_type" "fp_maddsub_d")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT"
+ "{fms|fmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")
+ (set_attr "fp_type" "fp_maddsub_d")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (plus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
+ && HONOR_SIGNED_ZEROS (DFmode)"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")
+ (set_attr "fp_type" "fp_maddsub_d")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (mult:DF (neg:DF (match_operand:DF 1 "gpc_reg_operand" "f"))
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
+ && ! HONOR_SIGNED_ZEROS (DFmode)"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")
+ (set_attr "fp_type" "fp_maddsub_d")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (minus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
+ && HONOR_SIGNED_ZEROS (DFmode)"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")
+ (set_attr "fp_type" "fp_maddsub_d")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (match_operand:DF 3 "gpc_reg_operand" "f")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_FUSED_MADD && TARGET_DOUBLE_FLOAT
+ && ! HONOR_SIGNED_ZEROS (DFmode)"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")
+ (set_attr "fp_type" "fp_maddsub_d")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "(TARGET_PPC_GPOPT || TARGET_POWER2) && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_DOUBLE_FLOAT"
+ "fsqrt %0,%1"
+ [(set_attr "type" "dsqrt")])
+
+;; The conditional move instructions allow us to perform max and min
+;; operations even when
+
+(define_expand "smaxdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" ""))
+ (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && !flag_trapping_math"
+ "{ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]); DONE;}")
+
+(define_expand "smindf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" ""))
+ (match_dup 2)
+ (match_dup 1)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && !flag_trapping_math"
+ "{ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]); DONE;}")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operator:DF 3 "min_max_operator"
+ [(match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")]))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && !flag_trapping_math"
+ [(const_int 0)]
+ "
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+ operands[1], operands[2]);
+ DONE;
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_operand:DF 3 "gpc_reg_operand" "")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "
+{
+ if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_insn "*fseldfdf4"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 4 "zero_fp_constant" "F"))
+ (match_operand:DF 2 "gpc_reg_operand" "f")
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "*fselsfdf4"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (if_then_else:DF (ge (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 4 "zero_fp_constant" "F"))
+ (match_operand:DF 2 "gpc_reg_operand" "f")
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_SINGLE_FLOAT"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+;; Conversions to and from floating-point.
+
+(define_expand "fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unsigned_fix:SI (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "")
+
+(define_expand "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "")
+
+; For each of these conversions, there is a define_expand, a define_insn
+; with a '#' template, and a define_split (with C code). The idea is
+; to allow constant folding with the template of the define_insn,
+; then to have the insns split later (between sched1 and final).
+
+(define_expand "floatsidf2"
+ [(parallel [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))
+ (clobber (match_dup 6))])]
+ "TARGET_HARD_FLOAT
+ && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "
+{
+ if (TARGET_E500_DOUBLE)
+ {
+ emit_insn (gen_spe_floatsidf2 (operands[0], operands[1]));
+ DONE;
+ }
+ if (TARGET_POWERPC64)
+ {
+ rtx x = convert_to_mode (DImode, operands[1], 0);
+ emit_insn (gen_floatdidf2 (operands[0], x));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, GEN_INT (0x43300000));
+ operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503601774854144\", DFmode));
+ operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0);
+ operands[5] = gen_reg_rtx (DFmode);
+ operands[6] = gen_reg_rtx (SImode);
+}")
+
+(define_insn_and_split "*floatsidf2_internal"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=&f")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (use (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (use (match_operand:DF 3 "gpc_reg_operand" "f"))
+ (clobber (match_operand:DF 4 "offsettable_mem_operand" "=o"))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" "=&f"))
+ (clobber (match_operand:SI 6 "gpc_reg_operand" "=&r"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "#"
+ "&& (can_create_pseudo_p () || offsettable_nonstrict_memref_p (operands[4]))"
+ [(pc)]
+ "
+{
+ rtx lowword, highword;
+ gcc_assert (MEM_P (operands[4]));
+ highword = adjust_address (operands[4], SImode, 0);
+ lowword = adjust_address (operands[4], SImode, 4);
+ if (! WORDS_BIG_ENDIAN)
+ {
+ rtx tmp;
+ tmp = highword; highword = lowword; lowword = tmp;
+ }
+
+ emit_insn (gen_xorsi3 (operands[6], operands[1],
+ GEN_INT (~ (HOST_WIDE_INT) 0x7fffffff)));
+ emit_move_insn (lowword, operands[6]);
+ emit_move_insn (highword, operands[2]);
+ emit_move_insn (operands[5], operands[4]);
+ emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
+ DONE;
+}"
+ [(set_attr "length" "24")])
+
+(define_expand "floatunssisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (unsigned_float:SF (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "")
+
+(define_expand "floatunssidf2"
+ [(parallel [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))])]
+ "TARGET_HARD_FLOAT && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "
+{
+ if (TARGET_E500_DOUBLE)
+ {
+ emit_insn (gen_spe_floatunssidf2 (operands[0], operands[1]));
+ DONE;
+ }
+ if (TARGET_POWERPC64)
+ {
+ rtx x = convert_to_mode (DImode, operands[1], 1);
+ emit_insn (gen_floatdidf2 (operands[0], x));
+ DONE;
+ }
+
+ operands[2] = force_reg (SImode, GEN_INT (0x43300000));
+ operands[3] = force_reg (DFmode, CONST_DOUBLE_ATOF (\"4503599627370496\", DFmode));
+ operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0);
+ operands[5] = gen_reg_rtx (DFmode);
+}")
+
+(define_insn_and_split "*floatunssidf2_internal"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=&f")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (use (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (use (match_operand:DF 3 "gpc_reg_operand" "f"))
+ (clobber (match_operand:DF 4 "offsettable_mem_operand" "=o"))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" "=&f"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "#"
+ "&& (can_create_pseudo_p () || offsettable_nonstrict_memref_p (operands[4]))"
+ [(pc)]
+ "
+{
+ rtx lowword, highword;
+ gcc_assert (MEM_P (operands[4]));
+ highword = adjust_address (operands[4], SImode, 0);
+ lowword = adjust_address (operands[4], SImode, 4);
+ if (! WORDS_BIG_ENDIAN)
+ {
+ rtx tmp;
+ tmp = highword; highword = lowword; lowword = tmp;
+ }
+
+ emit_move_insn (lowword, operands[1]);
+ emit_move_insn (highword, operands[2]);
+ emit_move_insn (operands[5], operands[4]);
+ emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
+ DONE;
+}"
+ [(set_attr "length" "20")])
+
+(define_expand "fix_truncdfsi2"
+ [(parallel [(set (match_operand:SI 0 "fix_trunc_dest_operand" "")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "")))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))])]
+ "(TARGET_POWER2 || TARGET_POWERPC)
+ && TARGET_HARD_FLOAT && ((TARGET_FPRS && TARGET_DOUBLE_FLOAT) || TARGET_E500_DOUBLE)"
+ "
+{
+ if (TARGET_E500_DOUBLE)
+ {
+ emit_insn (gen_spe_fix_truncdfsi2 (operands[0], operands[1]));
+ DONE;
+ }
+ operands[2] = gen_reg_rtx (DImode);
+ if (TARGET_POWERPC64 && TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS
+ && gpc_reg_operand(operands[0], GET_MODE (operands[0])))
+ {
+ operands[3] = gen_reg_rtx (DImode);
+ emit_insn (gen_fix_truncdfsi2_mfpgpr (operands[0], operands[1],
+ operands[2], operands[3]));
+ DONE;
+ }
+ if (TARGET_PPC_GFXOPT)
+ {
+ rtx orig_dest = operands[0];
+ if (! memory_operand (orig_dest, GET_MODE (orig_dest)))
+ operands[0] = assign_stack_temp (SImode, GET_MODE_SIZE (SImode), 0);
+ emit_insn (gen_fix_truncdfsi2_internal_gfxopt (operands[0], operands[1],
+ operands[2]));
+ if (operands[0] != orig_dest)
+ emit_move_insn (orig_dest, operands[0]);
+ DONE;
+ }
+ operands[3] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+}")
+
+(define_insn_and_split "*fix_truncdfsi2_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "f")))
+ (clobber (match_operand:DI 2 "gpc_reg_operand" "=f"))
+ (clobber (match_operand:DI 3 "offsettable_mem_operand" "=o"))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_DOUBLE_FLOAT"
+ "#"
+ "&& (can_create_pseudo_p () || offsettable_nonstrict_memref_p (operands[3]))"
+ [(pc)]
+ "
+{
+ rtx lowword;
+ gcc_assert (MEM_P (operands[3]));
+ lowword = adjust_address (operands[3], SImode, WORDS_BIG_ENDIAN ? 4 : 0);
+
+ emit_insn (gen_fctiwz (operands[2], operands[1]));
+ emit_move_insn (operands[3], operands[2]);
+ emit_move_insn (operands[0], lowword);
+ DONE;
+}"
+ [(set_attr "length" "16")])
+
+(define_insn_and_split "fix_truncdfsi2_internal_gfxopt"
+ [(set (match_operand:SI 0 "memory_operand" "=Z")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "f")))
+ (clobber (match_operand:DI 2 "gpc_reg_operand" "=f"))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_DOUBLE_FLOAT
+ && TARGET_PPC_GFXOPT"
+ "#"
+ "&& 1"
+ [(pc)]
+ "
+{
+ emit_insn (gen_fctiwz (operands[2], operands[1]));
+ emit_insn (gen_stfiwx (operands[0], operands[2]));
+ DONE;
+}"
+ [(set_attr "length" "16")])
+
+(define_insn_and_split "fix_truncdfsi2_mfpgpr"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "f")))
+ (clobber (match_operand:DI 2 "gpc_reg_operand" "=f"))
+ (clobber (match_operand:DI 3 "gpc_reg_operand" "=r"))]
+ "TARGET_POWERPC64 && TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_DOUBLE_FLOAT"
+ "#"
+ "&& 1"
+ [(set (match_dup 2) (unspec:DI [(fix:SI (match_dup 1))] UNSPEC_FCTIWZ))
+ (set (match_dup 3) (match_dup 2))
+ (set (match_dup 0) (subreg:SI (match_dup 3) 4))]
+ ""
+ [(set_attr "length" "12")])
+
+; Here, we use (set (reg) (unspec:DI [(fix:SI ...)] UNSPEC_FCTIWZ))
+; rather than (set (subreg:SI (reg)) (fix:SI ...))
+; because the first makes it clear that operand 0 is not live
+; before the instruction.
+(define_insn "fctiwz"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=f")
+ (unspec:DI [(fix:SI (match_operand:DF 1 "gpc_reg_operand" "f"))]
+ UNSPEC_FCTIWZ))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_DOUBLE_FLOAT"
+ "{fcirz|fctiwz} %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "btruncdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIZ))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "friz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "btruncsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIZ))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT "
+ "friz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "ceildf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIP))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "frip %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "ceilsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIP))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT "
+ "frip %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "floordf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIM))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "frim %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "floorsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIM))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT "
+ "frim %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "rounddf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "gpc_reg_operand" "f")] UNSPEC_FRIN))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "frin %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "roundsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] UNSPEC_FRIN))]
+ "TARGET_FPRND && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT "
+ "frin %0,%1"
+ [(set_attr "type" "fp")])
+
+; An UNSPEC is used so we don't have to support SImode in FP registers.
+(define_insn "stfiwx"
+ [(set (match_operand:SI 0 "memory_operand" "=Z")
+ (unspec:SI [(match_operand:DI 1 "gpc_reg_operand" "f")]
+ UNSPEC_STFIWX))]
+ "TARGET_PPC_GFXOPT"
+ "stfiwx %1,%y0"
+ [(set_attr "type" "fpstore")])
+
+(define_expand "floatsisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (float:SF (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "")
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (float:DF (match_operand:DI 1 "gpc_reg_operand" "!f#r")))]
+ "(TARGET_POWERPC64 || TARGET_XILINX_FPU) && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT && TARGET_FPRS"
+ "fcfid %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=!f#r")
+ (fix:DI (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "(TARGET_POWERPC64 || TARGET_XILINX_FPU) && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT && TARGET_FPRS"
+ "fctidz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "floatdisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (float:SF (match_operand:DI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT "
+ "
+{
+ rtx val = operands[1];
+ if (!flag_unsafe_math_optimizations)
+ {
+ rtx label = gen_label_rtx ();
+ val = gen_reg_rtx (DImode);
+ emit_insn (gen_floatdisf2_internal2 (val, operands[1], label));
+ emit_label (label);
+ }
+ emit_insn (gen_floatdisf2_internal1 (operands[0], val));
+ DONE;
+}")
+
+;; This is not IEEE compliant if rounding mode is "round to nearest".
+;; If the DI->DF conversion is inexact, then it's possible to suffer
+;; from double rounding.
+(define_insn_and_split "floatdisf2_internal1"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (float:SF (match_operand:DI 1 "gpc_reg_operand" "!f#r")))
+ (clobber (match_scratch:DF 2 "=f"))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (float:DF (match_dup 1)))
+ (set (match_dup 0)
+ (float_truncate:SF (match_dup 2)))]
+ "")
+
+;; Twiddles bits to avoid double rounding.
+;; Bits that might be truncated when converting to DFmode are replaced
+;; by a bit that won't be lost at that stage, but is below the SFmode
+;; rounding position.
+(define_expand "floatdisf2_internal2"
+ [(set (match_dup 3) (ashiftrt:DI (match_operand:DI 1 "" "")
+ (const_int 53)))
+ (parallel [(set (match_operand:DI 0 "" "") (and:DI (match_dup 1)
+ (const_int 2047)))
+ (clobber (scratch:CC))])
+ (set (match_dup 3) (plus:DI (match_dup 3)
+ (const_int 1)))
+ (set (match_dup 0) (plus:DI (match_dup 0)
+ (const_int 2047)))
+ (set (match_dup 4) (compare:CCUNS (match_dup 3)
+ (const_int 2)))
+ (set (match_dup 0) (ior:DI (match_dup 0)
+ (match_dup 1)))
+ (parallel [(set (match_dup 0) (and:DI (match_dup 0)
+ (const_int -2048)))
+ (clobber (scratch:CC))])
+ (set (pc) (if_then_else (geu (match_dup 4) (const_int 0))
+ (label_ref (match_operand:DI 2 "" ""))
+ (pc)))
+ (set (match_dup 0) (match_dup 1))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "
+{
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (CCUNSmode);
+}")
+
+;; Define the DImode operations that can be done in a small number
+;; of instructions. The & constraints are to prevent the register
+;; allocator from allocating registers that overlap with the inputs
+;; (for example, having an input in 7,8 and an output in 6,7). We
+;; also allow for the output being the same as one of the inputs.
+
+(define_insn "*adddi3_noppc64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r,r,r")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,0,0")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I,r,I")))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (WORDS_BIG_ENDIAN)
+ return (GET_CODE (operands[2])) != CONST_INT
+ ? \"{a|addc} %L0,%L1,%L2\;{ae|adde} %0,%1,%2\"
+ : \"{ai|addic} %L0,%L1,%2\;{a%G2e|add%G2e} %0,%1\";
+ else
+ return (GET_CODE (operands[2])) != CONST_INT
+ ? \"{a|addc} %0,%1,%2\;{ae|adde} %L0,%L1,%L2\"
+ : \"{ai|addic} %0,%1,%2\;{a%G2e|add%G2e} %L0,%L1\";
+}"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*subdi3_noppc64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r,r,r,r")
+ (minus:DI (match_operand:DI 1 "reg_or_short_operand" "r,I,0,r,I")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r,r,0,0")))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (WORDS_BIG_ENDIAN)
+ return (GET_CODE (operands[1]) != CONST_INT)
+ ? \"{sf|subfc} %L0,%L2,%L1\;{sfe|subfe} %0,%2,%1\"
+ : \"{sfi|subfic} %L0,%L2,%1\;{sf%G1e|subf%G1e} %0,%2\";
+ else
+ return (GET_CODE (operands[1]) != CONST_INT)
+ ? \"{sf|subfc} %0,%2,%1\;{sfe|subfe} %L0,%L2,%L1\"
+ : \"{sfi|subfic} %0,%2,%1\;{sf%G1e|subf%G1e} %L0,%L2\";
+}"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*negdi2_noppc64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r,0")))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"{sfi|subfic} %L0,%L1,0\;{sfze|subfze} %0,%1\"
+ : \"{sfi|subfic} %0,%1,0\;{sfze|subfze} %L0,%L1\";
+}"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_expand "mulsidi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "! TARGET_POWERPC64"
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_mull_call ());
+ if (WORDS_BIG_ENDIAN)
+ emit_move_insn (operands[0], gen_rtx_REG (DImode, 3));
+ else
+ {
+ emit_move_insn (operand_subword (operands[0], 0, 0, DImode),
+ gen_rtx_REG (SImode, 3));
+ emit_move_insn (operand_subword (operands[0], 1, 0, DImode),
+ gen_rtx_REG (SImode, 4));
+ }
+ DONE;
+ }
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_mulsidi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "mulsidi3_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWER"
+ "mul %0,%1,%2\;mfmq %L0"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_insn "*mulsidi3_no_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_POWERPC && ! TARGET_POWER && ! TARGET_POWERPC64"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"mulhw %0,%1,%2\;mullw %L0,%1,%2\"
+ : \"mulhw %L0,%1,%2\;mullw %0,%1,%2\";
+}"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (const_int 32))))
+ (set (match_dup 4)
+ (mult:SI (match_dup 1)
+ (match_dup 2)))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ operands[3] = operand_subword (operands[0], endian, 0, DImode);
+ operands[4] = operand_subword (operands[0], 1 - endian, 0, DImode);
+}")
+
+(define_expand "umulsidi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64"
+ "
+{
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_umulsidi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "umulsidi3_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"mulhwu %0,%1,%2\;mullw %L0,%1,%2\"
+ : \"mulhwu %L0,%1,%2\;mullw %0,%1,%2\";
+}"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_insn "*umulsidi3_no_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_POWERPC && ! TARGET_POWER && ! TARGET_POWERPC64"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"mulhwu %0,%1,%2\;mullw %L0,%1,%2\"
+ : \"mulhwu %L0,%1,%2\;mullw %0,%1,%2\";
+}"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
+ (zero_extend:DI (match_dup 2)))
+ (const_int 32))))
+ (set (match_dup 4)
+ (mult:SI (match_dup 1)
+ (match_dup 2)))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ operands[3] = operand_subword (operands[0], endian, 0, DImode);
+ operands[4] = operand_subword (operands[0], 1 - endian, 0, DImode);
+}")
+
+(define_expand "smulsi3_highpart"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (sign_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "")))
+ (const_int 32))))]
+ ""
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_mulh_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ DONE;
+ }
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_smulsi3_highpart_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "smulsi3_highpart_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWER"
+ "mul %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_insn "*smulsi3_highpart_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "mulhw %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_expand "umulsi3_highpart"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (zero_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "")))
+ (const_int 32))))]
+ "TARGET_POWERPC"
+ "
+{
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_umulsi3_highpart_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "umulsi3_highpart_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "mulhwu %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_insn "*umulsi3_highpart_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "mulhwu %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+;; If operands 0 and 2 are in the same register, we have a problem. But
+;; operands 0 and 1 (the usual case) can be in the same register. That's
+;; why we have the strange constraints below.
+(define_insn "ashldi3_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,&r")
+ (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,0,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "M,i,r,r")))
+ (clobber (match_scratch:SI 3 "=X,q,q,q"))]
+ "TARGET_POWER"
+ "@
+ {sli|slwi} %0,%L1,%h2\;{cal %L0,0(0)|li %L0,0}
+ sl%I2q %L0,%L1,%h2\;sll%I2q %0,%1,%h2
+ sl%I2q %L0,%L1,%h2\;sll%I2q %0,%1,%h2
+ sl%I2q %L0,%L1,%h2\;sll%I2q %0,%1,%h2"
+ [(set_attr "length" "8")])
+
+(define_insn "lshrdi3_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,&r")
+ (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,0,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "M,i,r,r")))
+ (clobber (match_scratch:SI 3 "=X,q,q,q"))]
+ "TARGET_POWER"
+ "@
+ {s%A2i|s%A2wi} %L0,%1,%h2\;{cal %0,0(0)|li %0,0}
+ sr%I2q %0,%1,%h2\;srl%I2q %L0,%L1,%h2
+ sr%I2q %0,%1,%h2\;srl%I2q %L0,%L1,%h2
+ sr%I2q %0,%1,%h2\;srl%I2q %L0,%L1,%h2"
+ [(set_attr "length" "8")])
+
+;; Shift by a variable amount is too complex to be worth open-coding. We
+;; just handle shifts by constants.
+(define_insn "ashrdi3_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "M,i")))
+ (clobber (match_scratch:SI 3 "=X,q"))]
+ "TARGET_POWER"
+ "@
+ {srai|srawi} %0,%1,31\;{srai|srawi} %L0,%1,%h2
+ sraiq %0,%1,%h2\;srliq %L0,%L1,%h2"
+ [(set_attr "type" "shift")
+ (set_attr "length" "8")])
+
+(define_insn "ashrdi3_no_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "M,i")))]
+ "TARGET_32BIT && !TARGET_POWERPC64 && !TARGET_POWER && WORDS_BIG_ENDIAN"
+ "@
+ {srai|srawi} %0,%1,31\;{srai|srawi} %L0,%1,%h2
+ {sri|srwi} %L0,%L1,%h2\;insrwi %L0,%1,%h2,0\;{srai|srawi} %0,%1,%h2"
+ [(set_attr "type" "two,three")
+ (set_attr "length" "8,12")])
+
+(define_insn "*ashrdisi3_noppc64"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (subreg:SI (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (const_int 32)) 4))]
+ "TARGET_32BIT && !TARGET_POWERPC64"
+ "*
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ return \"\";
+ else
+ return \"mr %0,%1\";
+}"
+ [(set_attr "length" "4")])
+
+
+;; PowerPC64 DImode operations.
+
+(define_insn_and_split "absdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,0")))
+ (clobber (match_scratch:DI 2 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (ashiftrt:DI (match_dup 1) (const_int 63)))
+ (set (match_dup 0) (xor:DI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:DI (match_dup 0) (match_dup 2)))]
+ "")
+
+(define_insn_and_split "*nabsdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,0"))))
+ (clobber (match_scratch:DI 2 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2) (ashiftrt:DI (match_dup 1) (const_int 63)))
+ (set (match_dup 0) (xor:DI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:DI (match_dup 2) (match_dup 0)))]
+ "")
+
+(define_insn "muldi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (mult:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I")))]
+ "TARGET_POWERPC64"
+ "@
+ mulld %0,%1,%2
+ mulli %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 2 "s8bit_cint_operand" "")
+ (const_string "imul3")
+ (match_operand:SI 2 "short_cint_operand" "")
+ (const_string "imul2")]
+ (const_string "lmul")))])
+
+(define_insn "*muldi3_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ mulld. %3,%1,%2
+ #"
+ [(set_attr "type" "lmul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (mult:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*muldi3_internal2"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (mult:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ mulld. %0,%1,%2
+ #"
+ [(set_attr "type" "lmul_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (mult:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (mult:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "smuldi3_highpart"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI (mult:TI (sign_extend:TI
+ (match_operand:DI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:TI
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (const_int 64))))]
+ "TARGET_POWERPC64"
+ "mulhd %0,%1,%2"
+ [(set_attr "type" "lmul")])
+
+(define_insn "umuldi3_highpart"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:TI
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (const_int 64))))]
+ "TARGET_POWERPC64"
+ "mulhdu %0,%1,%2"
+ [(set_attr "type" "lmul")])
+
+(define_insn "rotldi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i")))]
+ "TARGET_POWERPC64"
+ "@
+ rldcl %0,%1,%2,0
+ rldicl %0,%1,%H2,0"
+ [(set_attr "type" "var_shift_rotate,integer")])
+
+(define_insn "*rotldi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r"))]
+ "TARGET_64BIT"
+ "@
+ rldcl. %3,%1,%2,0
+ rldicl. %3,%1,%H2,0
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (rotate:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (rotate:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ rldcl. %0,%1,%2,0
+ rldicl. %0,%1,%H2,0
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (rotate:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (rotate:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal4"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (and:DI (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i"))
+ (match_operand:DI 3 "mask64_operand" "n,n")))]
+ "TARGET_POWERPC64"
+ "@
+ rldc%B3 %0,%1,%2,%S3
+ rldic%B3 %0,%1,%H2,%S3"
+ [(set_attr "type" "var_shift_rotate,integer")])
+
+(define_insn "*rotldi3_internal5"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (match_operand:DI 3 "mask64_operand" "n,n,n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r,r,r,r"))]
+ "TARGET_64BIT"
+ "@
+ rldc%B3. %4,%1,%2,%S3
+ rldic%B3. %4,%1,%H2,%S3
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (and:DI (rotate:DI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (match_operand:DI 3 "mask64_operand" "n,n,n,n"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (and:DI (rotate:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_64BIT"
+ "@
+ rldc%B3. %0,%1,%2,%S3
+ rldic%B3. %0,%1,%H2,%S3
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (rotate:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal7"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i")) 0)))]
+ "TARGET_POWERPC64"
+ "@
+ rldcl %0,%1,%2,56
+ rldicl %0,%1,%H2,56"
+ [(set_attr "type" "var_shift_rotate,integer")])
+
+(define_insn "*rotldi3_internal8"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r"))]
+ "TARGET_64BIT"
+ "@
+ rldcl. %3,%1,%2,56
+ rldicl. %3,%1,%H2,56
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:DI (subreg:QI
+ (rotate:DI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal9"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (zero_extend:DI (subreg:QI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_64BIT"
+ "@
+ rldcl. %0,%1,%2,56
+ rldicl. %0,%1,%H2,56
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (subreg:QI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:QI (rotate:DI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal10"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i")) 0)))]
+ "TARGET_POWERPC64"
+ "@
+ rldcl %0,%1,%2,48
+ rldicl %0,%1,%H2,48"
+ [(set_attr "type" "var_shift_rotate,integer")])
+
+(define_insn "*rotldi3_internal11"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r"))]
+ "TARGET_64BIT"
+ "@
+ rldcl. %3,%1,%2,48
+ rldicl. %3,%1,%H2,48
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:DI (subreg:HI
+ (rotate:DI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal12"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (zero_extend:DI (subreg:HI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_64BIT"
+ "@
+ rldcl. %0,%1,%2,48
+ rldicl. %0,%1,%H2,48
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (subreg:HI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:HI (rotate:DI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal13"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i")) 0)))]
+ "TARGET_POWERPC64"
+ "@
+ rldcl %0,%1,%2,32
+ rldicl %0,%1,%H2,32"
+ [(set_attr "type" "var_shift_rotate,integer")])
+
+(define_insn "*rotldi3_internal14"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r"))]
+ "TARGET_64BIT"
+ "@
+ rldcl. %3,%1,%2,32
+ rldicl. %3,%1,%H2,32
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:DI (subreg:SI
+ (rotate:DI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal15"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,i,r,i")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (zero_extend:DI (subreg:SI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_64BIT"
+ "@
+ rldcl. %0,%1,%2,32
+ rldicl. %0,%1,%H2,32
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (subreg:SI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:SI (rotate:DI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ "TARGET_POWERPC64 || TARGET_POWER"
+ "
+{
+ if (TARGET_POWERPC64)
+ ;
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_ashldi3_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "*ashldi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))]
+ "TARGET_POWERPC64"
+ "@
+ sld %0,%1,%2
+ sldi %0,%1,%H2"
+ [(set_attr "type" "var_shift_rotate,shift")])
+
+(define_insn "*ashldi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r"))]
+ "TARGET_64BIT"
+ "@
+ sld. %3,%1,%2
+ sldi. %3,%1,%H2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (ashift:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashift:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ sld. %0,%1,%2
+ sldi. %0,%1,%H2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashift:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (ashift:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal4"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:DI 3 "const_int_operand" "n")))]
+ "TARGET_POWERPC64 && includes_rldic_lshift_p (operands[2], operands[3])"
+ "rldic %0,%1,%H2,%W3")
+
+(define_insn "ashldi3_internal5"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "const_int_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r,r"))]
+ "TARGET_64BIT && includes_rldic_lshift_p (operands[2], operands[3])"
+ "@
+ rldic. %4,%1,%H2,%W3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldic_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 4)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "const_int_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_64BIT && includes_rldic_lshift_p (operands[2], operands[3])"
+ "@
+ rldic. %0,%1,%H2,%W3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldic_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 0)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal7"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:DI 3 "mask64_operand" "n")))]
+ "TARGET_POWERPC64 && includes_rldicr_lshift_p (operands[2], operands[3])"
+ "rldicr %0,%1,%H2,%S3")
+
+(define_insn "ashldi3_internal8"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "mask64_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r,r"))]
+ "TARGET_64BIT && includes_rldicr_lshift_p (operands[2], operands[3])"
+ "@
+ rldicr. %4,%1,%H2,%S3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldicr_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 4)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal9"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "mask64_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_64BIT && includes_rldicr_lshift_p (operands[2], operands[3])"
+ "@
+ rldicr. %0,%1,%H2,%S3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldicr_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 0)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ "TARGET_POWERPC64 || TARGET_POWER"
+ "
+{
+ if (TARGET_POWERPC64)
+ ;
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_lshrdi3_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "*lshrdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))]
+ "TARGET_POWERPC64"
+ "@
+ srd %0,%1,%2
+ srdi %0,%1,%H2"
+ [(set_attr "type" "var_shift_rotate,shift")])
+
+(define_insn "*lshrdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r"))]
+ "TARGET_64BIT "
+ "@
+ srd. %3,%1,%2
+ srdi. %3,%1,%H2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*lshrdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ srd. %0,%1,%2
+ srdi. %0,%1,%H2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ "WORDS_BIG_ENDIAN"
+ "
+{
+ if (TARGET_POWERPC64)
+ ;
+ else if (TARGET_POWER && GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_ashrdi3_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else if (TARGET_32BIT && GET_CODE (operands[2]) == CONST_INT
+ && WORDS_BIG_ENDIAN)
+ {
+ emit_insn (gen_ashrdi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "*ashrdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))]
+ "TARGET_POWERPC64"
+ "@
+ srad %0,%1,%2
+ sradi %0,%1,%H2"
+ [(set_attr "type" "var_shift_rotate,shift")])
+
+(define_insn "*ashrdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r"))]
+ "TARGET_64BIT"
+ "@
+ srad. %3,%1,%2
+ sradi. %3,%1,%H2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashrdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ srad. %0,%1,%2
+ sradi. %0,%1,%H2
+ #
+ #"
+ [(set_attr "type" "var_delayed_compare,delayed_compare,var_delayed_compare,delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "anddi3"
+ [(parallel
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "and64_2_operand" "")))
+ (clobber (match_scratch:CC 3 ""))])]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn "anddi3_mc"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r")
+ (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "?r,S,T,K,J,t")))
+ (clobber (match_scratch:CC 3 "=X,X,X,x,x,X"))]
+ "TARGET_POWERPC64 && rs6000_gen_cell_microcode"
+ "@
+ and %0,%1,%2
+ rldic%B2 %0,%1,0,%S2
+ rlwinm %0,%1,0,%m2,%M2
+ andi. %0,%1,%b2
+ andis. %0,%1,%u2
+ #"
+ [(set_attr "type" "*,*,*,compare,compare,*")
+ (set_attr "length" "4,4,4,4,4,8")])
+
+(define_insn "anddi3_nomc"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "?r,S,T,t")))
+ (clobber (match_scratch:CC 3 "=X,X,X,X"))]
+ "TARGET_POWERPC64 && !rs6000_gen_cell_microcode"
+ "@
+ and %0,%1,%2
+ rldic%B2 %0,%1,0,%S2
+ rlwinm %0,%1,0,%m2,%M2
+ #"
+ [(set_attr "length" "4,4,4,8")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "mask64_2_operand" "")))
+ (clobber (match_scratch:CC 3 ""))]
+ "TARGET_POWERPC64
+ && (fixed_regs[CR0_REGNO] || !logical_operand (operands[2], DImode))
+ && !mask_operand (operands[2], DImode)
+ && !mask64_operand (operands[2], DImode)"
+ [(set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 1)
+ (match_dup 4))
+ (match_dup 5)))
+ (set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 0)
+ (match_dup 6))
+ (match_dup 7)))]
+{
+ build_mask64_2_operands (operands[2], &operands[4]);
+})
+
+(define_insn "*anddi3_internal2_mc"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,x,x,?y,?y,?y,??y,??y,?y")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "r,S,T,K,J,t,r,S,T,K,J,t"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r,r,r,r,r,r,r,r,r"))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,X,X,X,x,x,X"))]
+ "TARGET_64BIT && rs6000_gen_cell_microcode"
+ "@
+ and. %3,%1,%2
+ rldic%B2. %3,%1,0,%S2
+ rlwinm. %3,%1,0,%m2,%M2
+ andi. %3,%1,%b2
+ andis. %3,%1,%u2
+ #
+ #
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,delayed_compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_operand" "")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "mask64_2_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_64BIT && reload_completed
+ && (fixed_regs[CR0_REGNO] || !logical_operand (operands[2], DImode))
+ && !mask_operand (operands[2], DImode)
+ && !mask64_operand (operands[2], DImode)"
+ [(set (match_dup 3)
+ (and:DI (rotate:DI (match_dup 1)
+ (match_dup 5))
+ (match_dup 6)))
+ (parallel [(set (match_dup 0)
+ (compare:CC (and:DI (rotate:DI (match_dup 3)
+ (match_dup 7))
+ (match_dup 8))
+ (const_int 0)))
+ (clobber (match_dup 3))])]
+ "
+{
+ build_mask64_2_operands (operands[2], &operands[5]);
+}")
+
+(define_insn "*anddi3_internal3_mc"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,x,x,?y,?y,?y,??y,??y,?y")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_2_operand" "r,S,T,K,J,t,r,S,T,K,J,t"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r,r,r")
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,X,X,X,x,x,X"))]
+ "TARGET_64BIT && rs6000_gen_cell_microcode"
+ "@
+ and. %0,%1,%2
+ rldic%B2. %0,%1,0,%S2
+ rlwinm. %0,%1,0,%m2,%M2
+ andi. %0,%1,%b2
+ andis. %0,%1,%u2
+ #
+ #
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,delayed_compare,compare,compare,compare,compare,compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,4,8,8,8,8,8,8,12")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "and64_2_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_64BIT && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "mask64_2_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_64BIT && reload_completed
+ && (fixed_regs[CR0_REGNO] || !logical_operand (operands[2], DImode))
+ && !mask_operand (operands[2], DImode)
+ && !mask64_operand (operands[2], DImode)"
+ [(set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 1)
+ (match_dup 5))
+ (match_dup 6)))
+ (parallel [(set (match_dup 3)
+ (compare:CC (and:DI (rotate:DI (match_dup 0)
+ (match_dup 7))
+ (match_dup 8))
+ (const_int 0)))
+ (set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 0)
+ (match_dup 7))
+ (match_dup 8)))])]
+ "
+{
+ build_mask64_2_operands (operands[2], &operands[5]);
+}")
+
+(define_expand "iordi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ior:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_logical_cint_operand" "")))]
+ "TARGET_POWERPC64"
+ "
+{
+ if (non_logical_cint_operand (operands[2], DImode))
+ {
+ HOST_WIDE_INT value;
+ rtx tmp = ((!can_create_pseudo_p ()
+ || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (DImode));
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ value = INTVAL (operands[2]);
+ emit_insn (gen_iordi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ }
+ else
+ {
+ value = CONST_DOUBLE_LOW (operands[2]);
+ emit_insn (gen_iordi3 (tmp, operands[1],
+ immed_double_const (value
+ & (~ (HOST_WIDE_INT) 0xffff),
+ 0, DImode)));
+ }
+
+ emit_insn (gen_iordi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_expand "xordi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (xor:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_logical_cint_operand" "")))]
+ "TARGET_POWERPC64"
+ "
+{
+ if (non_logical_cint_operand (operands[2], DImode))
+ {
+ HOST_WIDE_INT value;
+ rtx tmp = ((!can_create_pseudo_p ()
+ || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (DImode));
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ value = INTVAL (operands[2]);
+ emit_insn (gen_xordi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ }
+ else
+ {
+ value = CONST_DOUBLE_LOW (operands[2]);
+ emit_insn (gen_xordi3 (tmp, operands[1],
+ immed_double_const (value
+ & (~ (HOST_WIDE_INT) 0xffff),
+ 0, DImode)));
+ }
+
+ emit_insn (gen_xordi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_insn "*booldi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r")
+ (match_operator:DI 3 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "%r,r,r")
+ (match_operand:DI 2 "logical_operand" "r,K,JF")]))]
+ "TARGET_POWERPC64"
+ "@
+ %q3 %0,%1,%2
+ %q3i %0,%1,%b2
+ %q3is %0,%1,%u2")
+
+(define_insn "*booldi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*booldi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_64BIT"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split a logical operation that we can't do in one insn into two insns,
+;; each of which does one 16-bit part. This is used by combine.
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operator:DI 3 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "non_logical_cint_operand" "")]))]
+ "TARGET_POWERPC64"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 0) (match_dup 5))]
+"
+{
+ rtx i3,i4;
+
+ if (GET_CODE (operands[2]) == CONST_DOUBLE)
+ {
+ HOST_WIDE_INT value = CONST_DOUBLE_LOW (operands[2]);
+ i3 = immed_double_const (value & (~ (HOST_WIDE_INT) 0xffff),
+ 0, DImode);
+ i4 = GEN_INT (value & 0xffff);
+ }
+ else
+ {
+ i3 = GEN_INT (INTVAL (operands[2])
+ & (~ (HOST_WIDE_INT) 0xffff));
+ i4 = GEN_INT (INTVAL (operands[2]) & 0xffff);
+ }
+ operands[4] = gen_rtx_fmt_ee (GET_CODE (operands[3]), DImode,
+ operands[1], i3);
+ operands[5] = gen_rtx_fmt_ee (GET_CODE (operands[3]), DImode,
+ operands[0], i4);
+}")
+
+(define_insn "*boolcdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 3 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r"))
+ (match_operand:DI 2 "gpc_reg_operand" "r")]))]
+ "TARGET_POWERPC64"
+ "%q3 %0,%2,%1")
+
+(define_insn "*boolcdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ %q4. %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolcdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r"))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_64BIT"
+ "@
+ %q4. %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 3 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r"))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" "r"))]))]
+ "TARGET_POWERPC64"
+ "%q3 %0,%1,%2")
+
+(define_insn "*boolccdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_64BIT"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r"))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_64BIT"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Now define ways of moving data around.
+
+;; Set up a register with a value from the GOT table
+
+(define_expand "movsi_got"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unspec:SI [(match_operand:SI 1 "got_operand" "")
+ (match_dup 2)] UNSPEC_MOVSI_GOT))]
+ "DEFAULT_ABI == ABI_V4 && flag_pic == 1"
+ "
+{
+ if (GET_CODE (operands[1]) == CONST)
+ {
+ rtx offset = const0_rtx;
+ HOST_WIDE_INT value;
+
+ operands[1] = eliminate_constant_term (XEXP (operands[1], 0), &offset);
+ value = INTVAL (offset);
+ if (value != 0)
+ {
+ rtx tmp = (!can_create_pseudo_p ()
+ ? operands[0]
+ : gen_reg_rtx (Pmode));
+ emit_insn (gen_movsi_got (tmp, operands[1]));
+ emit_insn (gen_addsi3 (operands[0], tmp, offset));
+ DONE;
+ }
+ }
+
+ operands[2] = rs6000_got_register (operands[1]);
+}")
+
+(define_insn "*movsi_got_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "got_no_const_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "b")]
+ UNSPEC_MOVSI_GOT))]
+ "DEFAULT_ABI == ABI_V4 && flag_pic == 1"
+ "{l|lwz} %0,%a1@got(%2)"
+ [(set_attr "type" "load")])
+
+;; Used by sched, shorten_branches and final when the GOT pseudo reg
+;; didn't get allocated to a hard register.
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unspec:SI [(match_operand:SI 1 "got_no_const_operand" "")
+ (match_operand:SI 2 "memory_operand" "")]
+ UNSPEC_MOVSI_GOT))]
+ "DEFAULT_ABI == ABI_V4
+ && flag_pic == 1
+ && (reload_in_progress || reload_completed)"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (unspec:SI [(match_dup 1)(match_dup 0)]
+ UNSPEC_MOVSI_GOT))]
+ "")
+
+;; For SI, we special-case integers that can't be loaded in one insn. We
+;; do the load 16-bits at a time. We could do this by loading from memory,
+;; and this is even supposed to be faster, but it is simpler not to get
+;; integers in the TOC.
+(define_insn "movsi_low"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mem:SI (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "*movsi_internal1"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "=r,r,r,m,r,r,r,r,r,*q,*c*l,*h,*h")
+ (match_operand:SI 1 "input_operand" "r,U,m,r,I,L,n,R,*h,r,r,r,0"))]
+ "gpc_reg_operand (operands[0], SImode)
+ || gpc_reg_operand (operands[1], SImode)"
+ "@
+ mr %0,%1
+ {cal|la} %0,%a1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ {lil|li} %0,%1
+ {liu|lis} %0,%v1
+ #
+ {cal|la} %0,%a1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ mt%0 %1
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,*,load,store,*,*,*,*,mfjmpr,*,mtjmpr,*,*")
+ (set_attr "length" "4,4,4,4,4,4,8,4,4,4,4,4,4")])
+
+;; Split a load of a large constant into the appropriate two-insn
+;; sequence.
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "(unsigned HOST_WIDE_INT) (INTVAL (operands[1]) + 0x8000) >= 0x10000
+ && (INTVAL (operands[1]) & 0xffff) != 0"
+ [(set (match_dup 0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (ior:SI (match_dup 0)
+ (match_dup 3)))]
+ "
+{ rtx tem = rs6000_emit_set_const (operands[0], SImode, operands[1], 2);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_insn "*mov<mode>_internal2"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=y,x,?y")
+ (compare:CC (match_operand:P 1 "gpc_reg_operand" "0,r,r")
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r") (match_dup 1))]
+ ""
+ "@
+ {cmpi|cmp<wd>i} %2,%0,0
+ mr. %0,%1
+ #"
+ [(set_attr "type" "cmp,compare,cmp")
+ (set_attr "length" "4,4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC (match_operand:P 1 "gpc_reg_operand" "")
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "") (match_dup 1))]
+ "reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*movhi_internal"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r,r,*q,*c*l,*h")
+ (match_operand:HI 1 "input_operand" "r,m,r,i,*h,r,r,0"))]
+ "gpc_reg_operand (operands[0], HImode)
+ || gpc_reg_operand (operands[1], HImode)"
+ "@
+ mr %0,%1
+ lhz%U1%X1 %0,%1
+ sth%U0%X0 %1,%0
+ {lil|li} %0,%w1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,load,store,*,mfjmpr,*,mtjmpr,*")])
+
+(define_expand "mov<mode>"
+ [(set (match_operand:INT 0 "general_operand" "")
+ (match_operand:INT 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], <MODE>mode); DONE; }")
+
+(define_insn "*movqi_internal"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r,r,*q,*c*l,*h")
+ (match_operand:QI 1 "input_operand" "r,m,r,i,*h,r,r,0"))]
+ "gpc_reg_operand (operands[0], QImode)
+ || gpc_reg_operand (operands[1], QImode)"
+ "@
+ mr %0,%1
+ lbz%U1%X1 %0,%1
+ stb%U0%X0 %1,%0
+ {lil|li} %0,%1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,load,store,*,mfjmpr,*,mtjmpr,*")])
+
+;; Here is how to move condition codes around. When we store CC data in
+;; an integer register or memory, we store just the high-order 4 bits.
+;; This lets us not shift in the most common case of CR0.
+(define_expand "movcc"
+ [(set (match_operand:CC 0 "nonimmediate_operand" "")
+ (match_operand:CC 1 "nonimmediate_operand" ""))]
+ ""
+ "")
+
+(define_insn "*movcc_internal1"
+ [(set (match_operand:CC 0 "nonimmediate_operand" "=y,x,?y,y,r,r,r,r,r,q,cl,r,m")
+ (match_operand:CC 1 "general_operand" "y,r,r,O,x,y,r,I,h,r,r,m,r"))]
+ "register_operand (operands[0], CCmode)
+ || register_operand (operands[1], CCmode)"
+ "@
+ mcrf %0,%1
+ mtcrf 128,%1
+ {rlinm|rlwinm} %1,%1,%F0,0xffffffff\;mtcrf %R0,%1\;{rlinm|rlwinm} %1,%1,%f0,0xffffffff
+ crxor %0,%0,%0
+ mfcr %0%Q1
+ mfcr %0%Q1\;{rlinm|rlwinm} %0,%0,%f1,0xf0000000
+ mr %0,%1
+ {lil|li} %0,%1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%U1|stw%U0%U1} %1,%0"
+ [(set (attr "type")
+ (cond [(eq_attr "alternative" "0,3")
+ (const_string "cr_logical")
+ (eq_attr "alternative" "1,2")
+ (const_string "mtcr")
+ (eq_attr "alternative" "6,7,9")
+ (const_string "integer")
+ (eq_attr "alternative" "8")
+ (const_string "mfjmpr")
+ (eq_attr "alternative" "10")
+ (const_string "mtjmpr")
+ (eq_attr "alternative" "11")
+ (const_string "load")
+ (eq_attr "alternative" "12")
+ (const_string "store")
+ (ne (symbol_ref "TARGET_MFCRF") (const_int 0))
+ (const_string "mfcrf")
+ ]
+ (const_string "mfcr")))
+ (set_attr "length" "4,4,12,4,4,8,4,4,4,4,4,4,4")])
+
+;; For floating-point, we normally deal with the floating-point registers
+;; unless -msoft-float is used. The sole exception is that parameter passing
+;; can produce floating-point values in fixed-point registers. Unless the
+;; value is a simple constant or already in memory, we deal with this by
+;; allocating memory and copying the value explicitly via that memory location.
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (match_operand:SF 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], SFmode); DONE; }")
+
+(define_split
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operand:SF 1 "const_double_operand" ""))]
+ "reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+
+ if (! TARGET_POWERPC64)
+ operands[2] = operand_subword (operands[0], 0, 0, SFmode);
+ else
+ operands[2] = gen_lowpart (SImode, operands[0]);
+
+ operands[3] = gen_int_mode (l, SImode);
+}")
+
+(define_insn "*movsf_hardfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=!r,!r,m,f,f,m,*c*l,*q,!r,*h,!r,!r")
+ (match_operand:SF 1 "input_operand" "r,m,r,f,m,f,r,r,h,0,G,Fn"))]
+ "(gpc_reg_operand (operands[0], SFmode)
+ || gpc_reg_operand (operands[1], SFmode))
+ && (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT)"
+ "@
+ mr %0,%1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ fmr %0,%1
+ lfs%U1%X1 %0,%1
+ stfs%U0%X0 %1,%0
+ mt%0 %1
+ mt%0 %1
+ mf%1 %0
+ {cror 0,0,0|nop}
+ #
+ #"
+ [(set_attr "type" "*,load,store,fp,fpload,fpstore,mtjmpr,*,mfjmpr,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4,4,8")])
+
+(define_insn "*movsf_softfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,cl,q,r,r,m,r,r,r,r,r,*h")
+ (match_operand:SF 1 "input_operand" "r,r,r,h,m,r,I,L,R,G,Fn,0"))]
+ "(gpc_reg_operand (operands[0], SFmode)
+ || gpc_reg_operand (operands[1], SFmode))
+ && (TARGET_SOFT_FLOAT || !TARGET_FPRS)"
+ "@
+ mr %0,%1
+ mt%0 %1
+ mt%0 %1
+ mf%1 %0
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ {lil|li} %0,%1
+ {liu|lis} %0,%v1
+ {cal|la} %0,%a1
+ #
+ #
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,mtjmpr,*,mfjmpr,load,store,*,*,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,4,8,4")])
+
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (match_operand:DF 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], DFmode); DONE; }")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "const_int_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 1))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+
+ operands[2] = operand_subword (operands[0], endian, 0, DFmode);
+ operands[3] = operand_subword (operands[0], 1 - endian, 0, DFmode);
+#if HOST_BITS_PER_WIDE_INT == 32
+ operands[4] = (value & 0x80000000) ? constm1_rtx : const0_rtx;
+#else
+ operands[4] = GEN_INT (value >> 32);
+ operands[1] = GEN_INT (((value & 0xffffffff) ^ 0x80000000) - 0x80000000);
+#endif
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ long l[2];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+
+ operands[2] = operand_subword (operands[0], endian, 0, DFmode);
+ operands[3] = operand_subword (operands[0], 1 - endian, 0, DFmode);
+ operands[4] = gen_int_mode (l[endian], SImode);
+ operands[5] = gen_int_mode (l[1 - endian], SImode);
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ long l[2];
+ REAL_VALUE_TYPE rv;
+#if HOST_BITS_PER_WIDE_INT >= 64
+ HOST_WIDE_INT val;
+#endif
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+
+ operands[2] = gen_lowpart (DImode, operands[0]);
+ /* HIGHPART is lower memory address when WORDS_BIG_ENDIAN. */
+#if HOST_BITS_PER_WIDE_INT >= 64
+ val = ((HOST_WIDE_INT)(unsigned long)l[endian] << 32
+ | ((HOST_WIDE_INT)(unsigned long)l[1 - endian]));
+
+ operands[3] = gen_int_mode (val, DImode);
+#else
+ operands[3] = immed_double_const (l[1 - endian], l[endian], DImode);
+#endif
+}")
+
+;; Don't have reload use general registers to load a constant. First,
+;; it might not work if the output operand is the equivalent of
+;; a non-offsettable memref, but also it is less efficient than loading
+;; the constant into an FP register, since it will probably be used there.
+;; The "??" is a kludge until we can figure out a more reasonable way
+;; of handling these non-offsettable values.
+(define_insn "*movdf_hardfloat32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=!r,??r,m,f,f,m,!r,!r,!r")
+ (match_operand:DF 1 "input_operand" "r,m,r,f,m,f,G,H,F"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register
+ of operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ if (rs6000_offsettable_memref_p (operands[1])
+ || (GET_CODE (operands[1]) == MEM
+ && (GET_CODE (XEXP (operands[1], 0)) == LO_SUM
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_INC
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_DEC
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)))
+ {
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1%X1|lwz%U1%X1} %0,%1\;{l|lwz} %L0,%L1\";
+ }
+ else
+ {
+ rtx addreg;
+
+ addreg = find_addr_reg (XEXP (operands[1], 0));
+ if (refers_to_regno_p (REGNO (operands[0]),
+ REGNO (operands[0]) + 1,
+ operands[1], 0))
+ {
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{l%X1|lwz%X1} %L0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"{l%X1|lwz%X1} %0,%1\";
+ }
+ else
+ {
+ output_asm_insn (\"{l%X1|lwz%X1} %0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{l%X1|lwz%X1} %L0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"\";
+ }
+ }
+ case 2:
+ if (rs6000_offsettable_memref_p (operands[0])
+ || (GET_CODE (operands[0]) == MEM
+ && (GET_CODE (XEXP (operands[0], 0)) == LO_SUM
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_INC
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)))
+ return \"{st%U0%X0|stw%U0%X0} %1,%0\;{st|stw} %L1,%L0\";
+ else
+ {
+ rtx addreg;
+
+ addreg = find_addr_reg (XEXP (operands[0], 0));
+ output_asm_insn (\"{st%X0|stw%X0} %1,%0\", operands);
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{st%X0|stw%X0} %L1,%0\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"\";
+ }
+ case 3:
+ return \"fmr %0,%1\";
+ case 4:
+ return \"lfd%U1%X1 %0,%1\";
+ case 5:
+ return \"stfd%U0%X0 %1,%0\";
+ case 6:
+ case 7:
+ case 8:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "two,load,store,fp,fpload,fpstore,*,*,*")
+ (set_attr "length" "8,16,16,4,4,4,8,12,16")])
+
+(define_insn "*movdf_softfloat32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,r,r,r")
+ (match_operand:DF 1 "input_operand" "r,m,r,G,H,F"))]
+ "! TARGET_POWERPC64
+ && ((TARGET_FPRS && !TARGET_DOUBLE_FLOAT)
+ || TARGET_SOFT_FLOAT || TARGET_E500_SINGLE)
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1%X1|lwz%U1%X1} %0,%1\;{l|lwz} %L0,%L1\";
+ case 2:
+ return \"{st%U0%X0|stw%U0%X0} %1,%0\;{st|stw} %L1,%L0\";
+ case 3:
+ case 4:
+ case 5:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "two,load,store,*,*,*")
+ (set_attr "length" "8,8,8,8,12,16")])
+
+; ld/std require word-aligned displacements -> 'Y' constraint.
+; List Y->r and r->Y before r->r for reload.
+(define_insn "*movdf_hardfloat64_mfpgpr"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=Y,r,!r,f,f,m,*c*l,!r,*h,!r,!r,!r,r,f")
+ (match_operand:DF 1 "input_operand" "r,Y,r,f,m,f,r,h,0,G,H,F,f,r"))]
+ "TARGET_POWERPC64 && TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_DOUBLE_FLOAT
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "@
+ std%U0%X0 %1,%0
+ ld%U1%X1 %0,%1
+ mr %0,%1
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ mt%0 %1
+ mf%1 %0
+ {cror 0,0,0|nop}
+ #
+ #
+ #
+ mftgpr %0,%1
+ mffgpr %0,%1"
+ [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*,mftgpr,mffgpr")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16,4,4")])
+
+; ld/std require word-aligned displacements -> 'Y' constraint.
+; List Y->r and r->Y before r->r for reload.
+(define_insn "*movdf_hardfloat64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=Y,r,!r,f,f,m,*c*l,!r,*h,!r,!r,!r")
+ (match_operand:DF 1 "input_operand" "r,Y,r,f,m,f,r,h,0,G,H,F"))]
+ "TARGET_POWERPC64 && !TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS
+ && TARGET_DOUBLE_FLOAT
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "@
+ std%U0%X0 %1,%0
+ ld%U1%X1 %0,%1
+ mr %0,%1
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ mt%0 %1
+ mf%1 %0
+ {cror 0,0,0|nop}
+ #
+ #
+ #"
+ [(set_attr "type" "store,load,*,fp,fpload,fpstore,mtjmpr,mfjmpr,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,4,4,8,12,16")])
+
+(define_insn "*movdf_softfloat64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,Y,r,cl,r,r,r,r,*h")
+ (match_operand:DF 1 "input_operand" "Y,r,r,r,h,G,H,F,0"))]
+ "TARGET_POWERPC64 && (TARGET_SOFT_FLOAT || !TARGET_FPRS)
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "@
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
+ mr %0,%1
+ mt%0 %1
+ mf%1 %0
+ #
+ #
+ #
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "load,store,*,mtjmpr,mfjmpr,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,8,12,16,4")])
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0 "general_operand" "")
+ (match_operand:TF 1 "any_operand" ""))]
+ "!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128"
+ "{ rs6000_emit_move (operands[0], operands[1], TFmode); DONE; }")
+
+; It's important to list the o->f and f->o moves before f->f because
+; otherwise reload, given m->f, will try to pick f->f and reload it,
+; which doesn't make progress. Likewise r->Y must be before r->r.
+(define_insn_and_split "*movtf_internal"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=o,f,f,r,Y,r")
+ (match_operand:TF 1 "input_operand" "f,o,f,YGHF,r,r"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128
+ && (gpc_reg_operand (operands[0], TFmode)
+ || gpc_reg_operand (operands[1], TFmode))"
+ "#"
+ "&& reload_completed"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; }
+ [(set_attr "length" "8,8,8,20,20,16")])
+
+(define_insn_and_split "*movtf_softfloat"
+ [(set (match_operand:TF 0 "rs6000_nonimmediate_operand" "=r,Y,r")
+ (match_operand:TF 1 "input_operand" "YGHF,r,r"))]
+ "!TARGET_IEEEQUAD
+ && (TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_LONG_DOUBLE_128
+ && (gpc_reg_operand (operands[0], TFmode)
+ || gpc_reg_operand (operands[1], TFmode))"
+ "#"
+ "&& reload_completed"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; }
+ [(set_attr "length" "20,20,16")])
+
+(define_expand "extenddftf2"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (float_extend:TF (match_operand:DF 1 "input_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT
+ && (TARGET_FPRS || TARGET_E500_DOUBLE)
+ && TARGET_LONG_DOUBLE_128"
+{
+ if (TARGET_E500_DOUBLE)
+ emit_insn (gen_spe_extenddftf2 (operands[0], operands[1]));
+ else
+ emit_insn (gen_extenddftf2_fprs (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "extenddftf2_fprs"
+ [(parallel [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (float_extend:TF (match_operand:DF 1 "input_operand" "")))
+ (use (match_dup 2))])]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && TARGET_LONG_DOUBLE_128"
+{
+ operands[2] = CONST0_RTX (DFmode);
+ /* Generate GOT reference early for SVR4 PIC. */
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ operands[2] = validize_mem (force_const_mem (DFmode, operands[2]));
+})
+
+(define_insn_and_split "*extenddftf2_internal"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=o,f,&f,r")
+ (float_extend:TF (match_operand:DF 1 "input_operand" "fr,mf,mf,rmGHF")))
+ (use (match_operand:DF 2 "zero_reg_mem_operand" "rf,m,f,n"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& reload_completed"
+ [(pc)]
+{
+ const int lo_word = FLOAT_WORDS_BIG_ENDIAN ? GET_MODE_SIZE (DFmode) : 0;
+ const int hi_word = FLOAT_WORDS_BIG_ENDIAN ? 0 : GET_MODE_SIZE (DFmode);
+ emit_move_insn (simplify_gen_subreg (DFmode, operands[0], TFmode, hi_word),
+ operands[1]);
+ emit_move_insn (simplify_gen_subreg (DFmode, operands[0], TFmode, lo_word),
+ operands[2]);
+ DONE;
+})
+
+(define_expand "extendsftf2"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "")
+ (float_extend:TF (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT
+ && (TARGET_FPRS || TARGET_E500_DOUBLE)
+ && TARGET_LONG_DOUBLE_128"
+{
+ rtx tmp = gen_reg_rtx (DFmode);
+ emit_insn (gen_extendsfdf2 (tmp, operands[1]));
+ emit_insn (gen_extenddftf2 (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "trunctfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT
+ && (TARGET_FPRS || TARGET_E500_DOUBLE)
+ && TARGET_LONG_DOUBLE_128"
+ "")
+
+(define_insn_and_split "trunctfdf2_internal1"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,?f")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "0,f")))]
+ "!TARGET_IEEEQUAD && !TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "@
+ #
+ fmr %0,%1"
+ "&& reload_completed && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+{
+ emit_note (NOTE_INSN_DELETED);
+ DONE;
+}
+ [(set_attr "type" "fp")])
+
+(define_insn "trunctfdf2_internal2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ "!TARGET_IEEEQUAD && TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && TARGET_LONG_DOUBLE_128"
+ "fadd %0,%1,%L1"
+ [(set_attr "type" "fp")
+ (set_attr "fp_type" "fp_addsub_d")])
+
+(define_expand "trunctfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (float_truncate:SF (match_operand:TF 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT
+ && (TARGET_FPRS || TARGET_E500_DOUBLE)
+ && TARGET_LONG_DOUBLE_128"
+{
+ if (TARGET_E500_DOUBLE)
+ emit_insn (gen_spe_trunctfsf2 (operands[0], operands[1]));
+ else
+ emit_insn (gen_trunctfsf2_fprs (operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn_and_split "trunctfsf2_fprs"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (float_truncate:SF (match_operand:TF 1 "gpc_reg_operand" "f")))
+ (clobber (match_scratch:DF 2 "=f"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT
+ && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (float_truncate:DF (match_dup 1)))
+ (set (match_dup 0)
+ (float_truncate:SF (match_dup 2)))]
+ "")
+
+(define_expand "floatsitf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "")
+ (float:TF (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT
+ && (TARGET_FPRS || TARGET_E500_DOUBLE)
+ && TARGET_LONG_DOUBLE_128"
+{
+ rtx tmp = gen_reg_rtx (DFmode);
+ expand_float (tmp, operands[1], false);
+ emit_insn (gen_extenddftf2 (operands[0], tmp));
+ DONE;
+})
+
+; fadd, but rounding towards zero.
+; This is probably not the optimal code sequence.
+(define_insn "fix_trunc_helper"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (unspec:DF [(match_operand:TF 1 "gpc_reg_operand" "f")]
+ UNSPEC_FIX_TRUNC_TF))
+ (clobber (match_operand:DF 2 "gpc_reg_operand" "=&f"))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "mffs %2\n\tmtfsb1 31\n\tmtfsb0 30\n\tfadd %0,%1,%L1\n\tmtfsf 1,%2"
+ [(set_attr "type" "fp")
+ (set_attr "length" "20")])
+
+(define_expand "fix_trunctfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_operand:TF 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && (TARGET_POWER2 || TARGET_POWERPC)
+ && TARGET_HARD_FLOAT
+ && (TARGET_FPRS || TARGET_E500_DOUBLE)
+ && TARGET_LONG_DOUBLE_128"
+{
+ if (TARGET_E500_DOUBLE)
+ emit_insn (gen_spe_fix_trunctfsi2 (operands[0], operands[1]));
+ else
+ emit_insn (gen_fix_trunctfsi2_fprs (operands[0], operands[1]));
+ DONE;
+})
+
+(define_expand "fix_trunctfsi2_fprs"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_operand:TF 1 "gpc_reg_operand" "")))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))])]
+ "!TARGET_IEEEQUAD
+ && (TARGET_POWER2 || TARGET_POWERPC)
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+{
+ operands[2] = gen_reg_rtx (DFmode);
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = gen_reg_rtx (DImode);
+ operands[5] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+})
+
+(define_insn_and_split "*fix_trunctfsi2_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:TF 1 "gpc_reg_operand" "f")))
+ (clobber (match_operand:DF 2 "gpc_reg_operand" "=f"))
+ (clobber (match_operand:DF 3 "gpc_reg_operand" "=&f"))
+ (clobber (match_operand:DI 4 "gpc_reg_operand" "=f"))
+ (clobber (match_operand:DI 5 "offsettable_mem_operand" "=o"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& (can_create_pseudo_p () || offsettable_nonstrict_memref_p (operands[5]))"
+ [(pc)]
+{
+ rtx lowword;
+ emit_insn (gen_fix_trunc_helper (operands[2], operands[1], operands[3]));
+
+ gcc_assert (MEM_P (operands[5]));
+ lowword = adjust_address (operands[5], SImode, WORDS_BIG_ENDIAN ? 4 : 0);
+
+ emit_insn (gen_fctiwz (operands[4], operands[2]));
+ emit_move_insn (operands[5], operands[4]);
+ emit_move_insn (operands[0], lowword);
+ DONE;
+})
+
+(define_expand "negtf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "")
+ (neg:TF (match_operand:TF 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT
+ && (TARGET_FPRS || TARGET_E500_DOUBLE)
+ && TARGET_LONG_DOUBLE_128"
+ "")
+
+(define_insn "negtf2_internal"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (neg:TF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128"
+ "*
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"fneg %L0,%L1\;fneg %0,%1\";
+ else
+ return \"fneg %0,%1\;fneg %L0,%L1\";
+}"
+ [(set_attr "type" "fp")
+ (set_attr "length" "8")])
+
+(define_expand "abstf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "")
+ (abs:TF (match_operand:TF 1 "gpc_reg_operand" "")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT
+ && (TARGET_FPRS || TARGET_E500_DOUBLE)
+ && TARGET_LONG_DOUBLE_128"
+ "
+{
+ rtx label = gen_label_rtx ();
+ if (TARGET_E500_DOUBLE)
+ {
+ if (flag_finite_math_only && !flag_trapping_math)
+ emit_insn (gen_spe_abstf2_tst (operands[0], operands[1], label));
+ else
+ emit_insn (gen_spe_abstf2_cmp (operands[0], operands[1], label));
+ }
+ else
+ emit_insn (gen_abstf2_internal (operands[0], operands[1], label));
+ emit_label (label);
+ DONE;
+}")
+
+(define_expand "abstf2_internal"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "")
+ (match_operand:TF 1 "gpc_reg_operand" ""))
+ (set (match_dup 3) (match_dup 5))
+ (set (match_dup 5) (abs:DF (match_dup 5)))
+ (set (match_dup 4) (compare:CCFP (match_dup 3) (match_dup 5)))
+ (set (pc) (if_then_else (eq (match_dup 4) (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 6) (neg:DF (match_dup 6)))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && TARGET_LONG_DOUBLE_128"
+ "
+{
+ const int hi_word = FLOAT_WORDS_BIG_ENDIAN ? 0 : GET_MODE_SIZE (DFmode);
+ const int lo_word = FLOAT_WORDS_BIG_ENDIAN ? GET_MODE_SIZE (DFmode) : 0;
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = gen_reg_rtx (CCFPmode);
+ operands[5] = simplify_gen_subreg (DFmode, operands[0], TFmode, hi_word);
+ operands[6] = simplify_gen_subreg (DFmode, operands[0], TFmode, lo_word);
+}")
+
+;; Next come the multi-word integer load and store and the load and store
+;; multiple insns.
+
+; List r->r after r->"o<>", otherwise reload will try to reload a
+; non-offsettable address by using r->r which won't make progress.
+(define_insn "*movdi_internal32"
+ [(set (match_operand:DI 0 "rs6000_nonimmediate_operand" "=o<>,r,r,*f,*f,m,r")
+ (match_operand:DI 1 "input_operand" "r,r,m,f,m,f,IJKnGHF"))]
+ "! TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], DImode)
+ || gpc_reg_operand (operands[1], DImode))"
+ "@
+ #
+ #
+ #
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ #"
+ [(set_attr "type" "load,*,store,fp,fpload,fpstore,*")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_int_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 1))]
+ "
+{
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
+ DImode);
+ operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
+ DImode);
+#if HOST_BITS_PER_WIDE_INT == 32
+ operands[4] = (value & 0x80000000) ? constm1_rtx : const0_rtx;
+#else
+ operands[4] = GEN_INT (value >> 32);
+ operands[1] = GEN_INT (((value & 0xffffffff) ^ 0x80000000) - 0x80000000);
+#endif
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "rs6000_nonimmediate_operand" "")
+ (match_operand:DI 1 "input_operand" ""))]
+ "reload_completed && !TARGET_POWERPC64
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+
+(define_insn "*movdi_mfpgpr"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,m,r,r,r,r,*f,*f,m,r,*h,*h,r,*f")
+ (match_operand:DI 1 "input_operand" "r,m,r,I,L,nF,R,f,m,f,*h,r,0,*f,r"))]
+ "TARGET_POWERPC64 && TARGET_MFPGPR && TARGET_HARD_FLOAT && TARGET_FPRS
+ && (gpc_reg_operand (operands[0], DImode)
+ || gpc_reg_operand (operands[1], DImode))"
+ "@
+ mr %0,%1
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
+ li %0,%1
+ lis %0,%v1
+ #
+ {cal|la} %0,%a1
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ mf%1 %0
+ mt%0 %1
+ {cror 0,0,0|nop}
+ mftgpr %0,%1
+ mffgpr %0,%1"
+ [(set_attr "type" "*,load,store,*,*,*,*,fp,fpload,fpstore,mfjmpr,mtjmpr,*,mftgpr,mffgpr")
+ (set_attr "length" "4,4,4,4,4,20,4,4,4,4,4,4,4,4,4")])
+
+(define_insn "*movdi_internal64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,m,r,r,r,r,*f,*f,m,r,*h,*h")
+ (match_operand:DI 1 "input_operand" "r,m,r,I,L,nF,R,f,m,f,*h,r,0"))]
+ "TARGET_POWERPC64 && (!TARGET_MFPGPR || !TARGET_HARD_FLOAT || !TARGET_FPRS)
+ && (gpc_reg_operand (operands[0], DImode)
+ || gpc_reg_operand (operands[1], DImode))"
+ "@
+ mr %0,%1
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
+ li %0,%1
+ lis %0,%v1
+ #
+ {cal|la} %0,%a1
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ mf%1 %0
+ mt%0 %1
+ {cror 0,0,0|nop}"
+ [(set_attr "type" "*,load,store,*,*,*,*,fp,fpload,fpstore,mfjmpr,mtjmpr,*")
+ (set_attr "length" "4,4,4,4,4,20,4,4,4,4,4,4,4")])
+
+;; immediate value valid for a single instruction hiding in a const_double
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operand:DI 1 "const_double_operand" "F"))]
+ "HOST_BITS_PER_WIDE_INT == 32 && TARGET_POWERPC64
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && num_insns_constant (operands[1], DImode) == 1"
+ "*
+{
+ return ((unsigned HOST_WIDE_INT)
+ (CONST_DOUBLE_LOW (operands[1]) + 0x8000) < 0x10000)
+ ? \"li %0,%1\" : \"lis %0,%v1\";
+}")
+
+;; Generate all one-bits and clear left or right.
+;; Use (and:DI (rotate:DI ...)) to avoid anddi3 unnecessary clobber.
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "mask64_operand" ""))]
+ "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
+ [(set (match_dup 0) (const_int -1))
+ (set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 0)
+ (const_int 0))
+ (match_dup 1)))]
+ "")
+
+;; Split a load of a large constant into the appropriate five-instruction
+;; sequence. Handle anything in a constant number of insns.
+;; When non-easy constants can go in the TOC, this should use
+;; easy_fp_constant predicate.
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_int_operand" ""))]
+ "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
+ "
+{ rtx tem = rs6000_emit_set_const (operands[0], DImode, operands[1], 5);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_double_operand" ""))]
+ "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
+ "
+{ rtx tem = rs6000_emit_set_const (operands[0], DImode, operands[1], 5);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+;; TImode is similar, except that we usually want to compute the address into
+;; a register and use lsi/stsi (the exception is during reload). MQ is also
+;; clobbered in stsi for POWER, so we need a SCRATCH for it.
+
+;; We say that MQ is clobbered in the last alternative because the first
+;; alternative would never get used otherwise since it would need a reload
+;; while the 2nd alternative would not. We put memory cases first so they
+;; are preferred. Otherwise, we'd try to reload the output instead of
+;; giving the SCRATCH mq.
+
+(define_insn "*movti_power"
+ [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,m,????r,????r,????r,r")
+ (match_operand:TI 1 "input_operand" "r,r,r,Q,m,n"))
+ (clobber (match_scratch:SI 2 "=q,q#X,X,X,X,X"))]
+ "TARGET_POWER && ! TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+
+ case 0:
+ if (TARGET_STRING)
+ return \"{stsi|stswi} %1,%P0,16\";
+ case 1:
+ case 2:
+ return \"#\";
+ case 3:
+ /* If the address is not used in the output, we can use lsi. Otherwise,
+ fall through to generating four loads. */
+ if (TARGET_STRING
+ && ! reg_overlap_mentioned_p (operands[0], operands[1]))
+ return \"{lsi|lswi} %0,%P1,16\";
+ /* ... fall through ... */
+ case 4:
+ case 5:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "store,store,*,load,load,*")])
+
+(define_insn "*movti_string"
+ [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,o<>,????r,????r,????r,r")
+ (match_operand:TI 1 "input_operand" "r,r,r,Q,m,n"))]
+ "! TARGET_POWER && ! TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ if (TARGET_STRING)
+ return \"{stsi|stswi} %1,%P0,16\";
+ case 1:
+ case 2:
+ return \"#\";
+ case 3:
+ /* If the address is not used in the output, we can use lsi. Otherwise,
+ fall through to generating four loads. */
+ if (TARGET_STRING
+ && ! reg_overlap_mentioned_p (operands[0], operands[1]))
+ return \"{lsi|lswi} %0,%P1,16\";
+ /* ... fall through ... */
+ case 4:
+ case 5:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "store_ux,store_ux,*,load_ux,load_ux,*")
+ (set (attr "cell_micro") (if_then_else (eq (symbol_ref "TARGET_STRING") (const_int 1))
+ (const_string "always")
+ (const_string "conditional")))])
+
+(define_insn "*movti_ppc64"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,o<>,r")
+ (match_operand:TI 1 "input_operand" "r,r,m"))]
+ "TARGET_POWERPC64 && (gpc_reg_operand (operands[0], TImode)
+ || gpc_reg_operand (operands[1], TImode))"
+ "#"
+ [(set_attr "type" "*,load,store")])
+
+(define_split
+ [(set (match_operand:TI 0 "gpc_reg_operand" "")
+ (match_operand:TI 1 "const_double_operand" ""))]
+ "TARGET_POWERPC64"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "
+{
+ operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
+ TImode);
+ operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
+ TImode);
+ if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ operands[5] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ operands[4] = GEN_INT (- (INTVAL (operands[1]) < 0));
+ operands[5] = operands[1];
+ }
+ else
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:TI 0 "nonimmediate_operand" "")
+ (match_operand:TI 1 "input_operand" ""))]
+ "reload_completed
+ && gpr_or_gpr_p (operands[0], operands[1])"
+ [(pc)]
+{ rs6000_split_multireg_move (operands[0], operands[1]); DONE; })
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ "TARGET_STRING && !TARGET_POWERPC64"
+ "
+{
+ int regno;
+ int count;
+ rtx op1;
+ int i;
+
+ /* Support only loading a constant number of fixed-point registers from
+ memory and only bother with this if more than two; the machine
+ doesn't support more than eight. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 2
+ || INTVAL (operands[2]) > 8
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) >= 32)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[0]);
+
+ operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+ op1 = replace_equiv_address (operands[1],
+ force_reg (SImode, XEXP (operands[1], 0)));
+
+ for (i = 0; i < count; i++)
+ XVECEXP (operands[3], 0, i)
+ = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, regno + i),
+ adjust_address_nv (op1, SImode, i * 4));
+}")
+
+(define_insn "*ldmsi8"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))
+ (set (match_operand:SI 6 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 16))))
+ (set (match_operand:SI 7 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 20))))
+ (set (match_operand:SI 8 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 24))))
+ (set (match_operand:SI 9 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 28))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 8"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi7"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))
+ (set (match_operand:SI 6 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 16))))
+ (set (match_operand:SI 7 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 20))))
+ (set (match_operand:SI 8 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 24))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 7"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi6"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))
+ (set (match_operand:SI 6 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 16))))
+ (set (match_operand:SI 7 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 20))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 6"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi5"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))
+ (set (match_operand:SI 6 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 16))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 5"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi4"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))
+ (set (match_operand:SI 5 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 12))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 4"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_insn "*ldmsi3"
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "")
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (mem:SI (plus:SI (match_dup 1) (const_int 8))))])]
+ "TARGET_STRING && XVECLEN (operands[0], 0) == 3"
+ "*
+{ return rs6000_output_load_multiple (operands); }"
+ [(set_attr "type" "load_ux")
+ (set_attr "length" "32")])
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (clobber (scratch:SI))
+ (use (match_operand:SI 2 "" ""))])]
+ "TARGET_STRING && !TARGET_POWERPC64"
+ "
+{
+ int regno;
+ int count;
+ rtx to;
+ rtx op0;
+ int i;
+
+ /* Support only storing a constant number of fixed-point registers to
+ memory and only bother with this if more than two; the machine
+ doesn't support more than eight. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 2
+ || INTVAL (operands[2]) > 8
+ || GET_CODE (operands[0]) != MEM
+ || GET_CODE (operands[1]) != REG
+ || REGNO (operands[1]) >= 32)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[1]);
+
+ operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + 1));
+ to = force_reg (SImode, XEXP (operands[0], 0));
+ op0 = replace_equiv_address (operands[0], to);
+
+ XVECEXP (operands[3], 0, 0)
+ = gen_rtx_SET (VOIDmode, adjust_address_nv (op0, SImode, 0), operands[1]);
+ XVECEXP (operands[3], 0, 1) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_SCRATCH (SImode));
+
+ for (i = 1; i < count; i++)
+ XVECEXP (operands[3], 0, i + 1)
+ = gen_rtx_SET (VOIDmode,
+ adjust_address_nv (op0, SImode, i * 4),
+ gen_rtx_REG (SImode, regno + i));
+}")
+
+(define_insn "*stmsi8"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 28)))
+ (match_operand:SI 10 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 9"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi7"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 8"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi6"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 7"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi5"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 6"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi4"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 5"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi3"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=X"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && !TARGET_POWER && XVECLEN (operands[0], 0) == 4"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi8_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 28)))
+ (match_operand:SI 10 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 9"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi7_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 24)))
+ (match_operand:SI 9 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 8"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi6_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 20)))
+ (match_operand:SI 8 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 7"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi5_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 16)))
+ (match_operand:SI 7 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 6"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi4_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 12)))
+ (match_operand:SI 6 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 5"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*stmsi3_power"
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 4)))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (set (mem:SI (plus:SI (match_dup 1) (const_int 8)))
+ (match_operand:SI 5 "gpc_reg_operand" "r"))])]
+ "TARGET_STRING && TARGET_POWER && XVECLEN (operands[0], 0) == 4"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_expand "setmemsi"
+ [(parallel [(set (match_operand:BLK 0 "" "")
+ (match_operand 2 "const_int_operand" ""))
+ (use (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 3 "" ""))])]
+ ""
+ "
+{
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
+ if (expand_block_clear (operands))
+ DONE;
+ else
+ FAIL;
+}")
+
+;; String/block move insn.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movmemsi"
+ [(parallel [(set (match_operand:BLK 0 "" "")
+ (match_operand:BLK 1 "" ""))
+ (use (match_operand:SI 2 "" ""))
+ (use (match_operand:SI 3 "" ""))])]
+ ""
+ "
+{
+ if (expand_block_move (operands))
+ DONE;
+ else
+ FAIL;
+}")
+
+;; Move up to 32 bytes at a time. The fixed registers are needed because the
+;; register allocator doesn't have a clue about allocating 8 word registers.
+;; rD/rS = r5 is preferred, efficient form.
+(define_expand "movmemsi_8reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 5))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=&r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && ((INTVAL (operands[2]) > 24 && INTVAL (operands[2]) < 32)
+ || INTVAL (operands[2]) == 0)
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 12)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=&r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && ((INTVAL (operands[2]) > 24 && INTVAL (operands[2]) < 32)
+ || INTVAL (operands[2]) == 0)
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 12)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+;; Move up to 24 bytes at a time. The fixed registers are needed because the
+;; register allocator doesn't have a clue about allocating 6 word registers.
+;; rD/rS = r5 is preferred, efficient form.
+(define_expand "movmemsi_6reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 5))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=&r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 24
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 10)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=&r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 32
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 10)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+;; Move up to 16 bytes at a time, using 4 fixed registers to avoid spill
+;; problems with TImode.
+;; rD/rS = r5 is preferred, efficient form.
+(define_expand "movmemsi_4reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 5))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=&r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && INTVAL (operands[2]) > 8 && INTVAL (operands[2]) <= 16
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 8)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=&r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && INTVAL (operands[2]) > 8 && INTVAL (operands[2]) <= 16
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 8)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+;; Move up to 8 bytes at a time.
+(define_expand "movmemsi_2reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (match_scratch:DI 4 ""))
+ (clobber (match_scratch:SI 5 ""))])]
+ "TARGET_STRING && ! TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:DI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER && ! TARGET_POWERPC64
+ && INTVAL (operands[2]) > 4 && INTVAL (operands[2]) <= 8"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:DI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER && ! TARGET_POWERPC64
+ && INTVAL (operands[2]) > 4 && INTVAL (operands[2]) <= 8"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+;; Move up to 4 bytes at a time.
+(define_expand "movmemsi_1reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (match_scratch:SI 5 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:P 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:P 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "store_ux")
+ (set_attr "cell_micro" "always")
+ (set_attr "length" "8")])
+
+;; Define insns that do load or store with update. Some of these we can
+;; get by using pre-decrement or pre-increment, but the hardware can also
+;; do cases where the increment is not the size of the object.
+;;
+;; In all these cases, we use operands 0 and 1 for the register being
+;; incremented because those are the operands that local-alloc will
+;; tie and these are the pair most likely to be tieable (and the ones
+;; that will benefit the most).
+
+(define_insn "*movdi_update1"
+ [(set (match_operand:DI 3 "gpc_reg_operand" "=r,r")
+ (mem:DI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
+ (match_operand:DI 2 "reg_or_aligned_short_operand" "r,I"))))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (DImode)
+ || !gpc_reg_operand (operands[2], DImode))"
+ "@
+ ldux %3,%0,%2
+ ldu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "movdi_<mode>_update"
+ [(set (mem:DI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
+ (match_operand:P 2 "reg_or_aligned_short_operand" "r,I")))
+ (match_operand:DI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:P 0 "gpc_reg_operand" "=b,b")
+ (plus:P (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (Pmode)
+ || !gpc_reg_operand (operands[2], Pmode)
+ || (REG_P (operands[0])
+ && REGNO (operands[0]) == STACK_POINTER_REGNUM))"
+ "@
+ stdux %3,%0,%2
+ stdu %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+;; This pattern is only conditional on TARGET_POWERPC64, as it is
+;; needed for stack allocation, even if the user passes -mno-update.
+(define_insn "movdi_<mode>_update_stack"
+ [(set (mem:DI (plus:P (match_operand:P 1 "gpc_reg_operand" "0,0")
+ (match_operand:P 2 "reg_or_aligned_short_operand" "r,I")))
+ (match_operand:DI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:P 0 "gpc_reg_operand" "=b,b")
+ (plus:P (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ stdux %3,%0,%2
+ stdu %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movsi_update1"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ {lux|lwzux} %3,%0,%2
+ {lu|lwzu} %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movsi_update2"
+ [(set (match_operand:DI 3 "gpc_reg_operand" "=r")
+ (sign_extend:DI
+ (mem:SI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0")
+ (match_operand:DI 2 "gpc_reg_operand" "r")))))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && rs6000_gen_cell_microcode
+ && !avoiding_indexed_address_p (DImode)"
+ "lwaux %3,%0,%2"
+ [(set_attr "type" "load_ext_ux")])
+
+(define_insn "movsi_update"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode)
+ || (REG_P (operands[0])
+ && REGNO (operands[0]) == STACK_POINTER_REGNUM))"
+ "@
+ {stux|stwux} %3,%0,%2
+ {stu|stwu} %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+;; This is an unconditional pattern; needed for stack allocation, even
+;; if the user passes -mno-update.
+(define_insn "movsi_update_stack"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {stux|stwux} %3,%0,%2
+ {stu|stwu} %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movhi_update1"
+ [(set (match_operand:HI 3 "gpc_reg_operand" "=r,r")
+ (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ lhzux %3,%0,%2
+ lhzu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movhi_update2"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI
+ (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ lhzux %3,%0,%2
+ lhzu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movhi_update3"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI
+ (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE && rs6000_gen_cell_microcode
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ lhaux %3,%0,%2
+ lhau %3,%2(%0)"
+ [(set_attr "type" "load_ext_ux,load_ext_u")])
+
+(define_insn "*movhi_update4"
+ [(set (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:HI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ sthux %3,%0,%2
+ sthu %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movqi_update1"
+ [(set (match_operand:QI 3 "gpc_reg_operand" "=r,r")
+ (mem:QI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ lbzux %3,%0,%2
+ lbzu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movqi_update2"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ lbzux %3,%0,%2
+ lbzu %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movqi_update3"
+ [(set (mem:QI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:QI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ stbux %3,%0,%2
+ stbu %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movsf_update1"
+ [(set (match_operand:SF 3 "gpc_reg_operand" "=f,f")
+ (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ lfsux %3,%0,%2
+ lfsu %3,%2(%0)"
+ [(set_attr "type" "fpload_ux,fpload_u")])
+
+(define_insn "*movsf_update2"
+ [(set (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SF 3 "gpc_reg_operand" "f,f"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ stfsux %3,%0,%2
+ stfsu %3,%2(%0)"
+ [(set_attr "type" "fpstore_ux,fpstore_u")])
+
+(define_insn "*movsf_update3"
+ [(set (match_operand:SF 3 "gpc_reg_operand" "=r,r")
+ (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ {lux|lwzux} %3,%0,%2
+ {lu|lwzu} %3,%2(%0)"
+ [(set_attr "type" "load_ux,load_u")])
+
+(define_insn "*movsf_update4"
+ [(set (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SF 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "(TARGET_SOFT_FLOAT || !TARGET_FPRS) && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ {stux|stwux} %3,%0,%2
+ {stu|stwu} %3,%2(%0)"
+ [(set_attr "type" "store_ux,store_u")])
+
+(define_insn "*movdf_update1"
+ [(set (match_operand:DF 3 "gpc_reg_operand" "=f,f")
+ (mem:DF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ lfdux %3,%0,%2
+ lfdu %3,%2(%0)"
+ [(set_attr "type" "fpload_ux,fpload_u")])
+
+(define_insn "*movdf_update2"
+ [(set (mem:DF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:DF 3 "gpc_reg_operand" "f,f"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_UPDATE
+ && (!avoiding_indexed_address_p (SImode)
+ || !gpc_reg_operand (operands[2], SImode))"
+ "@
+ stfdux %3,%0,%2
+ stfdu %3,%2(%0)"
+ [(set_attr "type" "fpstore_ux,fpstore_u")])
+
+;; Peephole to convert two consecutive FP loads or stores into lfq/stfq.
+
+(define_insn "*lfq_power2"
+ [(set (match_operand:V2DF 0 "gpc_reg_operand" "=f")
+ (match_operand:V2DF 1 "memory_operand" ""))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "lfq%U1%X1 %0,%1")
+
+(define_peephole2
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "memory_operand" ""))
+ (set (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_operand:DF 3 "memory_operand" ""))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && registers_ok_for_quad_peep (operands[0], operands[2])
+ && mems_ok_for_quad_peep (operands[1], operands[3])"
+ [(set (match_dup 0)
+ (match_dup 1))]
+ "operands[1] = widen_memory_access (operands[1], V2DFmode, 0);
+ operands[0] = gen_rtx_REG (V2DFmode, REGNO (operands[0]));")
+
+(define_insn "*stfq_power2"
+ [(set (match_operand:V2DF 0 "memory_operand" "")
+ (match_operand:V2DF 1 "gpc_reg_operand" "f"))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS"
+ "stfq%U0%X0 %1,%0")
+
+
+(define_peephole2
+ [(set (match_operand:DF 0 "memory_operand" "")
+ (match_operand:DF 1 "gpc_reg_operand" ""))
+ (set (match_operand:DF 2 "memory_operand" "")
+ (match_operand:DF 3 "gpc_reg_operand" ""))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
+ && registers_ok_for_quad_peep (operands[1], operands[3])
+ && mems_ok_for_quad_peep (operands[0], operands[2])"
+ [(set (match_dup 0)
+ (match_dup 1))]
+ "operands[0] = widen_memory_access (operands[0], V2DFmode, 0);
+ operands[1] = gen_rtx_REG (V2DFmode, REGNO (operands[1]));")
+
+;; After inserting conditional returns we can sometimes have
+;; unnecessary register moves. Unfortunately we cannot have a
+;; modeless peephole here, because some single SImode sets have early
+;; clobber outputs. Although those sets expand to multi-ppc-insn
+;; sequences, using get_attr_length here will smash the operands
+;; array. Neither is there an early_cobbler_p predicate.
+;; Disallow subregs for E500 so we don't munge frob_di_df_2.
+(define_peephole2
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "any_operand" ""))
+ (set (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_dup 0))]
+ "!(TARGET_E500_DOUBLE && GET_CODE (operands[2]) == SUBREG)
+ && peep2_reg_dead_p (2, operands[0])"
+ [(set (match_dup 2) (match_dup 1))])
+
+(define_peephole2
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operand:SF 1 "any_operand" ""))
+ (set (match_operand:SF 2 "gpc_reg_operand" "")
+ (match_dup 0))]
+ "peep2_reg_dead_p (2, operands[0])"
+ [(set (match_dup 2) (match_dup 1))])
+
+
+;; TLS support.
+
+;; Mode attributes for different ABIs.
+(define_mode_iterator TLSmode [(SI "! TARGET_64BIT") (DI "TARGET_64BIT")])
+(define_mode_attr tls_abi_suffix [(SI "32") (DI "64")])
+(define_mode_attr tls_sysv_suffix [(SI "si") (DI "di")])
+(define_mode_attr tls_insn_suffix [(SI "wz") (DI "d")])
+
+(define_insn "tls_gd_aix<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=b")
+ (call (mem:TLSmode (match_operand:TLSmode 3 "symbol_ref_operand" "s"))
+ (match_operand 4 "" "g")))
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGD)
+ (clobber (reg:SI LR_REGNO))]
+ "HAVE_AS_TLS && DEFAULT_ABI == ABI_AIX"
+ "addi %0,%1,%2@got@tlsgd\;bl %z3\;%."
+ [(set_attr "type" "two")
+ (set_attr "length" "12")])
+
+(define_insn "tls_gd_sysv<TLSmode:tls_sysv_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=b")
+ (call (mem:TLSmode (match_operand:TLSmode 3 "symbol_ref_operand" "s"))
+ (match_operand 4 "" "g")))
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGD)
+ (clobber (reg:SI LR_REGNO))]
+ "HAVE_AS_TLS && DEFAULT_ABI == ABI_V4"
+{
+ if (flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ return "addi %0,%1,%2@got@tlsgd\;bl %z3+32768@plt";
+ else
+ return "addi %0,%1,%2@got@tlsgd\;bl %z3@plt";
+ }
+ else
+ return "addi %0,%1,%2@got@tlsgd\;bl %z3";
+}
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "tls_ld_aix<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=b")
+ (call (mem:TLSmode (match_operand:TLSmode 2 "symbol_ref_operand" "s"))
+ (match_operand 3 "" "g")))
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")]
+ UNSPEC_TLSLD)
+ (clobber (reg:SI LR_REGNO))]
+ "HAVE_AS_TLS && DEFAULT_ABI == ABI_AIX"
+ "addi %0,%1,%&@got@tlsld\;bl %z2\;%."
+ [(set_attr "length" "12")])
+
+(define_insn "tls_ld_sysv<TLSmode:tls_sysv_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=b")
+ (call (mem:TLSmode (match_operand:TLSmode 2 "symbol_ref_operand" "s"))
+ (match_operand 3 "" "g")))
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")]
+ UNSPEC_TLSLD)
+ (clobber (reg:SI LR_REGNO))]
+ "HAVE_AS_TLS && DEFAULT_ABI == ABI_V4"
+{
+ if (flag_pic)
+ {
+ if (TARGET_SECURE_PLT && flag_pic == 2)
+ return "addi %0,%1,%&@got@tlsld\;bl %z2+32768@plt";
+ else
+ return "addi %0,%1,%&@got@tlsld\;bl %z2@plt";
+ }
+ else
+ return "addi %0,%1,%&@got@tlsld\;bl %z2";
+}
+ [(set_attr "length" "8")])
+
+(define_insn "tls_dtprel_<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=r")
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSDTPREL))]
+ "HAVE_AS_TLS"
+ "addi %0,%1,%2@dtprel")
+
+(define_insn "tls_dtprel_ha_<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=r")
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSDTPRELHA))]
+ "HAVE_AS_TLS"
+ "addis %0,%1,%2@dtprel@ha")
+
+(define_insn "tls_dtprel_lo_<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=r")
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSDTPRELLO))]
+ "HAVE_AS_TLS"
+ "addi %0,%1,%2@dtprel@l")
+
+(define_insn "tls_got_dtprel_<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=r")
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGOTDTPREL))]
+ "HAVE_AS_TLS"
+ "l<TLSmode:tls_insn_suffix> %0,%2@got@dtprel(%1)")
+
+(define_insn "tls_tprel_<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=r")
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTPREL))]
+ "HAVE_AS_TLS"
+ "addi %0,%1,%2@tprel")
+
+(define_insn "tls_tprel_ha_<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=r")
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTPRELHA))]
+ "HAVE_AS_TLS"
+ "addis %0,%1,%2@tprel@ha")
+
+(define_insn "tls_tprel_lo_<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=r")
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTPRELLO))]
+ "HAVE_AS_TLS"
+ "addi %0,%1,%2@tprel@l")
+
+;; "b" output constraint here and on tls_tls input to support linker tls
+;; optimization. The linker may edit the instructions emitted by a
+;; tls_got_tprel/tls_tls pair to addis,addi.
+(define_insn "tls_got_tprel_<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=b")
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSGOTTPREL))]
+ "HAVE_AS_TLS"
+ "l<TLSmode:tls_insn_suffix> %0,%2@got@tprel(%1)")
+
+(define_insn "tls_tls_<TLSmode:tls_abi_suffix>"
+ [(set (match_operand:TLSmode 0 "gpc_reg_operand" "=r")
+ (unspec:TLSmode [(match_operand:TLSmode 1 "gpc_reg_operand" "b")
+ (match_operand:TLSmode 2 "rs6000_tls_symbol_ref" "")]
+ UNSPEC_TLSTLS))]
+ "HAVE_AS_TLS"
+ "add %0,%1,%2@tls")
+
+
+;; Next come insns related to the calling sequence.
+;;
+;; First, an insn to allocate new stack space for dynamic use (e.g., alloca).
+;; We move the back-chain and decrement the stack pointer.
+
+(define_expand "allocate_stack"
+ [(set (match_operand 0 "gpc_reg_operand" "")
+ (minus (reg 1) (match_operand 1 "reg_or_short_operand" "")))
+ (set (reg 1)
+ (minus (reg 1) (match_dup 1)))]
+ ""
+ "
+{ rtx chain = gen_reg_rtx (Pmode);
+ rtx stack_bot = gen_rtx_MEM (Pmode, stack_pointer_rtx);
+ rtx neg_op0;
+ rtx insn, par, set, mem;
+
+ emit_move_insn (chain, stack_bot);
+
+ /* Check stack bounds if necessary. */
+ if (crtl->limit_stack)
+ {
+ rtx available;
+ available = expand_binop (Pmode, sub_optab,
+ stack_pointer_rtx, stack_limit_rtx,
+ NULL_RTX, 1, OPTAB_WIDEN);
+ emit_insn (gen_cond_trap (LTU, available, operands[1], const0_rtx));
+ }
+
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -32767
+ || INTVAL (operands[1]) > 32768)
+ {
+ neg_op0 = gen_reg_rtx (Pmode);
+ if (TARGET_32BIT)
+ emit_insn (gen_negsi2 (neg_op0, operands[1]));
+ else
+ emit_insn (gen_negdi2 (neg_op0, operands[1]));
+ }
+ else
+ neg_op0 = GEN_INT (- INTVAL (operands[1]));
+
+ insn = emit_insn ((* ((TARGET_32BIT) ? gen_movsi_update_stack
+ : gen_movdi_di_update_stack))
+ (stack_pointer_rtx, stack_pointer_rtx, neg_op0,
+ chain));
+ /* Since we didn't use gen_frame_mem to generate the MEM, grab
+ it now and set the alias set/attributes. The above gen_*_update
+ calls will generate a PARALLEL with the MEM set being the first
+ operation. */
+ par = PATTERN (insn);
+ gcc_assert (GET_CODE (par) == PARALLEL);
+ set = XVECEXP (par, 0, 0);
+ gcc_assert (GET_CODE (set) == SET);
+ mem = SET_DEST (set);
+ gcc_assert (MEM_P (mem));
+ MEM_NOTRAP_P (mem) = 1;
+ set_mem_alias_set (mem, get_frame_alias_set ());
+
+ emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+}")
+
+;; These patterns say how to save and restore the stack pointer. We need not
+;; save the stack pointer at function level since we are careful to
+;; preserve the backchain. At block level, we have to restore the backchain
+;; when we restore the stack pointer.
+;;
+;; For nonlocal gotos, we must save both the stack pointer and its
+;; backchain and restore both. Note that in the nonlocal case, the
+;; save area is a memory location.
+
+(define_expand "save_stack_function"
+ [(match_operand 0 "any_operand" "")
+ (match_operand 1 "any_operand" "")]
+ ""
+ "DONE;")
+
+(define_expand "restore_stack_function"
+ [(match_operand 0 "any_operand" "")
+ (match_operand 1 "any_operand" "")]
+ ""
+ "DONE;")
+
+;; Adjust stack pointer (op0) to a new value (op1).
+;; First copy old stack backchain to new location, and ensure that the
+;; scheduler won't reorder the sp assignment before the backchain write.
+(define_expand "restore_stack_block"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 2))
+ (set (match_dup 5) (unspec:BLK [(match_dup 5)] UNSPEC_TIE))
+ (set (match_operand 0 "register_operand" "")
+ (match_operand 1 "register_operand" ""))]
+ ""
+ "
+{
+ operands[1] = force_reg (Pmode, operands[1]);
+ operands[2] = gen_reg_rtx (Pmode);
+ operands[3] = gen_frame_mem (Pmode, operands[0]);
+ operands[4] = gen_frame_mem (Pmode, operands[1]);
+ operands[5] = gen_frame_mem (BLKmode, operands[0]);
+}")
+
+(define_expand "save_stack_nonlocal"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_operand 0 "memory_operand" "") (match_dup 3))
+ (set (match_dup 2) (match_operand 1 "register_operand" ""))]
+ ""
+ "
+{
+ int units_per_word = (TARGET_32BIT) ? 4 : 8;
+
+ /* Copy the backchain to the first word, sp to the second. */
+ operands[0] = adjust_address_nv (operands[0], Pmode, 0);
+ operands[2] = adjust_address_nv (operands[0], Pmode, units_per_word);
+ operands[3] = gen_reg_rtx (Pmode);
+ operands[4] = gen_frame_mem (Pmode, operands[1]);
+}")
+
+(define_expand "restore_stack_nonlocal"
+ [(set (match_dup 2) (match_operand 1 "memory_operand" ""))
+ (set (match_dup 3) (match_dup 4))
+ (set (match_dup 5) (match_dup 2))
+ (set (match_dup 6) (unspec:BLK [(match_dup 6)] UNSPEC_TIE))
+ (set (match_operand 0 "register_operand" "") (match_dup 3))]
+ ""
+ "
+{
+ int units_per_word = (TARGET_32BIT) ? 4 : 8;
+
+ /* Restore the backchain from the first word, sp from the second. */
+ operands[2] = gen_reg_rtx (Pmode);
+ operands[3] = gen_reg_rtx (Pmode);
+ operands[1] = adjust_address_nv (operands[1], Pmode, 0);
+ operands[4] = adjust_address_nv (operands[1], Pmode, units_per_word);
+ operands[5] = gen_frame_mem (Pmode, operands[3]);
+ operands[6] = gen_frame_mem (BLKmode, operands[0]);
+}")
+
+;; TOC register handling.
+
+;; Code to initialize the TOC register...
+
+(define_insn "load_toc_aix_si"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(const_int 0)] UNSPEC_TOC))
+ (use (reg:SI 2))])]
+ "DEFAULT_ABI == ABI_AIX && TARGET_32BIT"
+ "*
+{
+ char buf[30];
+ ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\", 1);
+ operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ operands[2] = gen_rtx_REG (Pmode, 2);
+ return \"{l|lwz} %0,%1(%2)\";
+}"
+ [(set_attr "type" "load")])
+
+(define_insn "load_toc_aix_di"
+ [(parallel [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (unspec:DI [(const_int 0)] UNSPEC_TOC))
+ (use (reg:DI 2))])]
+ "DEFAULT_ABI == ABI_AIX && TARGET_64BIT"
+ "*
+{
+ char buf[30];
+#ifdef TARGET_RELOCATABLE
+ ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\",
+ !TARGET_MINIMAL_TOC || TARGET_RELOCATABLE);
+#else
+ ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\", 1);
+#endif
+ if (TARGET_ELF)
+ strcat (buf, \"@toc\");
+ operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ operands[2] = gen_rtx_REG (Pmode, 2);
+ return \"ld %0,%1(%2)\";
+}"
+ [(set_attr "type" "load")])
+
+(define_insn "load_toc_v4_pic_si"
+ [(set (reg:SI LR_REGNO)
+ (unspec:SI [(const_int 0)] UNSPEC_TOC))]
+ "DEFAULT_ABI == ABI_V4 && flag_pic == 1 && TARGET_32BIT"
+ "bl _GLOBAL_OFFSET_TABLE_@local-4"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "load_toc_v4_PIC_1"
+ [(set (reg:SI LR_REGNO)
+ (match_operand:SI 0 "immediate_operand" "s"))
+ (use (unspec [(match_dup 0)] UNSPEC_TOC))]
+ "TARGET_ELF && DEFAULT_ABI != ABI_AIX
+ && (flag_pic == 2 || (flag_pic && TARGET_SECURE_PLT))"
+ "bcl 20,31,%0\\n%0:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "load_toc_v4_PIC_1b"
+ [(set (reg:SI LR_REGNO)
+ (unspec:SI [(match_operand:SI 0 "immediate_operand" "s")]
+ UNSPEC_TOCPTR))]
+ "TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2"
+ "bcl 20,31,$+8\\n\\t.long %0-$"
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn "load_toc_v4_PIC_2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (minus:SI (match_operand:SI 2 "immediate_operand" "s")
+ (match_operand:SI 3 "immediate_operand" "s")))))]
+ "TARGET_ELF && DEFAULT_ABI != ABI_AIX && flag_pic == 2"
+ "{l|lwz} %0,%2-%3(%1)"
+ [(set_attr "type" "load")])
+
+(define_insn "load_toc_v4_PIC_3b"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (high:SI
+ (minus:SI (match_operand:SI 2 "symbol_ref_operand" "s")
+ (match_operand:SI 3 "symbol_ref_operand" "s")))))]
+ "TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic"
+ "{cau|addis} %0,%1,%2-%3@ha")
+
+(define_insn "load_toc_v4_PIC_3c"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (minus:SI (match_operand:SI 2 "symbol_ref_operand" "s")
+ (match_operand:SI 3 "symbol_ref_operand" "s"))))]
+ "TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI != ABI_AIX && flag_pic"
+ "{cal %0,%2-%3@l(%1)|addi %0,%1,%2-%3@l}")
+
+;; If the TOC is shared over a translation unit, as happens with all
+;; the kinds of PIC that we support, we need to restore the TOC
+;; pointer only when jumping over units of translation.
+;; On Darwin, we need to reload the picbase.
+
+(define_expand "builtin_setjmp_receiver"
+ [(use (label_ref (match_operand 0 "" "")))]
+ "(DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ || (TARGET_TOC && TARGET_MINIMAL_TOC)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic)"
+ "
+{
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ rtx picrtx = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
+ rtx picreg = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
+ rtx tmplabrtx;
+ char tmplab[20];
+
+ crtl->uses_pic_offset_table = 1;
+ ASM_GENERATE_INTERNAL_LABEL(tmplab, \"LSJR\",
+ CODE_LABEL_NUMBER (operands[0]));
+ tmplabrtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tmplab));
+
+ emit_insn (gen_load_macho_picbase (tmplabrtx));
+ emit_move_insn (picreg, gen_rtx_REG (Pmode, LR_REGNO));
+ emit_insn (gen_macho_correct_pic (picreg, picreg, picrtx, tmplabrtx));
+ }
+ else
+#endif
+ rs6000_emit_load_toc_table (FALSE);
+ DONE;
+}")
+
+;; Elf specific ways of loading addresses for non-PIC code.
+;; The output of this could be r0, but we make a very strong
+;; preference for a base register because it will usually
+;; be needed there.
+(define_insn "elf_high"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
+ (high:SI (match_operand 1 "" "")))]
+ "TARGET_ELF && ! TARGET_64BIT"
+ "{liu|lis} %0,%1@ha")
+
+(define_insn "elf_low"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_ELF && ! TARGET_64BIT"
+ "@
+ {cal|la} %0,%2@l(%1)
+ {ai|addic} %0,%1,%K2")
+
+;; A function pointer under AIX is a pointer to a data area whose first word
+;; contains the actual address of the function, whose second word contains a
+;; pointer to its TOC, and whose third word contains a value to place in the
+;; static chain register (r11). Note that if we load the static chain, our
+;; "trampoline" need not have any executable code.
+
+(define_expand "call_indirect_aix32"
+ [(set (match_dup 2)
+ (mem:SI (match_operand:SI 0 "gpc_reg_operand" "")))
+ (set (mem:SI (plus:SI (reg:SI 1) (const_int 20)))
+ (reg:SI 2))
+ (set (reg:SI 11)
+ (mem:SI (plus:SI (match_dup 0)
+ (const_int 8))))
+ (parallel [(call (mem:SI (match_dup 2))
+ (match_operand 1 "" ""))
+ (use (mem:SI (plus:SI (match_dup 0) (const_int 4))))
+ (use (reg:SI 11))
+ (use (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (reg:SI LR_REGNO))])]
+ "TARGET_32BIT"
+ "
+{ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "call_indirect_aix64"
+ [(set (match_dup 2)
+ (mem:DI (match_operand:DI 0 "gpc_reg_operand" "")))
+ (set (mem:DI (plus:DI (reg:DI 1) (const_int 40)))
+ (reg:DI 2))
+ (set (reg:DI 11)
+ (mem:DI (plus:DI (match_dup 0)
+ (const_int 16))))
+ (parallel [(call (mem:SI (match_dup 2))
+ (match_operand 1 "" ""))
+ (use (mem:DI (plus:DI (match_dup 0) (const_int 8))))
+ (use (reg:DI 11))
+ (use (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (reg:SI LR_REGNO))])]
+ "TARGET_64BIT"
+ "
+{ operands[2] = gen_reg_rtx (DImode); }")
+
+(define_expand "call_value_indirect_aix32"
+ [(set (match_dup 3)
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (set (mem:SI (plus:SI (reg:SI 1) (const_int 20)))
+ (reg:SI 2))
+ (set (reg:SI 11)
+ (mem:SI (plus:SI (match_dup 1)
+ (const_int 8))))
+ (parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_dup 3))
+ (match_operand 2 "" "")))
+ (use (mem:SI (plus:SI (match_dup 1) (const_int 4))))
+ (use (reg:SI 11))
+ (use (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (reg:SI LR_REGNO))])]
+ "TARGET_32BIT"
+ "
+{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_expand "call_value_indirect_aix64"
+ [(set (match_dup 3)
+ (mem:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (set (mem:DI (plus:DI (reg:DI 1) (const_int 40)))
+ (reg:DI 2))
+ (set (reg:DI 11)
+ (mem:DI (plus:DI (match_dup 1)
+ (const_int 16))))
+ (parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_dup 3))
+ (match_operand 2 "" "")))
+ (use (mem:DI (plus:DI (match_dup 1) (const_int 8))))
+ (use (reg:DI 11))
+ (use (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (reg:SI LR_REGNO))])]
+ "TARGET_64BIT"
+ "
+{ operands[3] = gen_reg_rtx (DImode); }")
+
+;; Now the definitions for the call and call_value insns
+(define_expand "call"
+ [(parallel [(call (mem:SI (match_operand 0 "address_operand" ""))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (reg:SI LR_REGNO))])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[0] = machopic_indirect_call_target (operands[0]);
+#endif
+
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ gcc_assert (GET_CODE (operands[1]) == CONST_INT);
+
+ operands[0] = XEXP (operands[0], 0);
+
+ if (GET_CODE (operands[0]) != SYMBOL_REF
+ || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (operands[0]))
+ || (DEFAULT_ABI != ABI_DARWIN && (INTVAL (operands[2]) & CALL_LONG) != 0))
+ {
+ if (INTVAL (operands[2]) & CALL_LONG)
+ operands[0] = rs6000_longcall_ref (operands[0]);
+
+ switch (DEFAULT_ABI)
+ {
+ case ABI_V4:
+ case ABI_DARWIN:
+ operands[0] = force_reg (Pmode, operands[0]);
+ break;
+
+ case ABI_AIX:
+ /* AIX function pointers are really pointers to a three word
+ area. */
+ emit_call_insn (TARGET_32BIT
+ ? gen_call_indirect_aix32 (force_reg (SImode,
+ operands[0]),
+ operands[1])
+ : gen_call_indirect_aix64 (force_reg (DImode,
+ operands[0]),
+ operands[1]));
+ DONE;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+}")
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand 1 "address_operand" ""))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI LR_REGNO))])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[1] = machopic_indirect_call_target (operands[1]);
+#endif
+
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
+
+ operands[1] = XEXP (operands[1], 0);
+
+ if (GET_CODE (operands[1]) != SYMBOL_REF
+ || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (operands[1]))
+ || (DEFAULT_ABI != ABI_DARWIN && (INTVAL (operands[3]) & CALL_LONG) != 0))
+ {
+ if (INTVAL (operands[3]) & CALL_LONG)
+ operands[1] = rs6000_longcall_ref (operands[1]);
+
+ switch (DEFAULT_ABI)
+ {
+ case ABI_V4:
+ case ABI_DARWIN:
+ operands[1] = force_reg (Pmode, operands[1]);
+ break;
+
+ case ABI_AIX:
+ /* AIX function pointers are really pointers to a three word
+ area. */
+ emit_call_insn (TARGET_32BIT
+ ? gen_call_value_indirect_aix32 (operands[0],
+ force_reg (SImode,
+ operands[1]),
+ operands[2])
+ : gen_call_value_indirect_aix64 (operands[0],
+ force_reg (DImode,
+ operands[1]),
+ operands[2]));
+ DONE;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+}")
+
+;; Call to function in current module. No TOC pointer reload needed.
+;; Operand2 is nonzero if we are using the V.4 calling sequence and
+;; either the function was not prototyped, or it was prototyped as a
+;; variable argument function. It is > 0 if FP registers were passed
+;; and < 0 if they were not.
+
+(define_insn "*call_local32"
+ [(call (mem:SI (match_operand:SI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (reg:SI LR_REGNO))]
+ "(INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@local\" : \"bl %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_local64"
+ [(call (mem:SI (match_operand:DI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_64BIT && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@local\" : \"bl %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_value_local32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (reg:SI LR_REGNO))]
+ "(INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z1@local\" : \"bl %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+
+(define_insn "*call_value_local64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_64BIT && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z1@local\" : \"bl %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+;; Call to function which may be in another module. Restore the TOC
+;; pointer (r2) after the call unless this is System V.
+;; Operand2 is nonzero if we are using the V.4 calling sequence and
+;; either the function was not prototyped, or it was prototyped as a
+;; variable argument function. It is > 0 if FP registers were passed
+;; and < 0 if they were not.
+
+(define_insn_and_split "*call_indirect_nonlocal_aix32_internal"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "c,*l"))
+ (match_operand 1 "" "g,g"))
+ (use (mem:SI (plus:SI (match_operand:SI 2 "register_operand" "b,b") (const_int 4))))
+ (use (reg:SI 11))
+ (use (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_32BIT && DEFAULT_ABI == ABI_AIX"
+ "#"
+ "&& reload_completed"
+ [(set (reg:SI 2)
+ (mem:SI (plus:SI (match_dup 2) (const_int 4))))
+ (parallel [(call (mem:SI (match_dup 0))
+ (match_dup 1))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (reg:SI LR_REGNO))])]
+ ""
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "12")])
+
+(define_insn "*call_indirect_nonlocal_aix32"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "c,*l"))
+ (match_operand 1 "" "g,g"))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_32BIT && DEFAULT_ABI == ABI_AIX && reload_completed"
+ "b%T0l\;{l|lwz} 2,20(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_nonlocal_aix32"
+ [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "bl %z0\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn_and_split "*call_indirect_nonlocal_aix64_internal"
+ [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l"))
+ (match_operand 1 "" "g,g"))
+ (use (mem:DI (plus:DI (match_operand:DI 2 "register_operand" "b,b")
+ (const_int 8))))
+ (use (reg:DI 11))
+ (use (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_AIX"
+ "#"
+ "&& reload_completed"
+ [(set (reg:DI 2)
+ (mem:DI (plus:DI (match_dup 2) (const_int 8))))
+ (parallel [(call (mem:SI (match_dup 0))
+ (match_dup 1))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (reg:SI LR_REGNO))])]
+ ""
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "12")])
+
+(define_insn "*call_indirect_nonlocal_aix64"
+ [(call (mem:SI (match_operand:DI 0 "register_operand" "c,*l"))
+ (match_operand 1 "" "g,g"))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_AIX && reload_completed"
+ "b%T0l\;ld 2,40(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_nonlocal_aix64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "bl %z0\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn_and_split "*call_value_indirect_nonlocal_aix32_internal"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "c,*l"))
+ (match_operand 2 "" "g,g")))
+ (use (mem:SI (plus:SI (match_operand:SI 3 "register_operand" "b,b")
+ (const_int 4))))
+ (use (reg:SI 11))
+ (use (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_32BIT && DEFAULT_ABI == ABI_AIX"
+ "#"
+ "&& reload_completed"
+ [(set (reg:SI 2)
+ (mem:SI (plus:SI (match_dup 3) (const_int 4))))
+ (parallel [(set (match_dup 0) (call (mem:SI (match_dup 1))
+ (match_dup 2)))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (reg:SI LR_REGNO))])]
+ ""
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "12")])
+
+(define_insn "*call_value_indirect_nonlocal_aix32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "c,*l"))
+ (match_operand 2 "" "g,g")))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_32BIT && DEFAULT_ABI == ABI_AIX && reload_completed"
+ "b%T1l\;{l|lwz} 2,20(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_nonlocal_aix32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "bl %z1\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn_and_split "*call_value_indirect_nonlocal_aix64_internal"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l"))
+ (match_operand 2 "" "g,g")))
+ (use (mem:DI (plus:DI (match_operand:DI 3 "register_operand" "b,b")
+ (const_int 8))))
+ (use (reg:DI 11))
+ (use (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_AIX"
+ "#"
+ "&& reload_completed"
+ [(set (reg:DI 2)
+ (mem:DI (plus:DI (match_dup 3) (const_int 8))))
+ (parallel [(set (match_dup 0) (call (mem:SI (match_dup 1))
+ (match_dup 2)))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (reg:SI LR_REGNO))])]
+ ""
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "12")])
+
+(define_insn "*call_value_indirect_nonlocal_aix64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "register_operand" "c,*l"))
+ (match_operand 2 "" "g,g")))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_AIX && reload_completed"
+ "b%T1l\;ld 2,40(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_nonlocal_aix64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (clobber (reg:SI LR_REGNO))]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "bl %z1\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+;; A function pointer under System V is just a normal pointer
+;; operands[0] is the function pointer
+;; operands[1] is the stack size to clean up
+;; operands[2] is the value FUNCTION_ARG returns for the VOID argument
+;; which indicates how to set cr1
+
+(define_insn "*call_indirect_nonlocal_sysv<mode>"
+ [(call (mem:SI (match_operand:P 0 "register_operand" "c,*l,c,*l"))
+ (match_operand 1 "" "g,g,g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,O,n,n"))
+ (clobber (reg:SI LR_REGNO))]
+ "DEFAULT_ABI == ABI_V4
+ || DEFAULT_ABI == ABI_DARWIN"
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+ return "b%T0l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn_and_split "*call_nonlocal_sysv<mode>"
+ [(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (reg:SI LR_REGNO))]
+ "(DEFAULT_ABI == ABI_DARWIN
+ || (DEFAULT_ABI == ABI_V4
+ && (INTVAL (operands[2]) & CALL_LONG) == 0))"
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+#if TARGET_MACHO
+ return output_call(insn, operands, 0, 2);
+#else
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ gcc_assert (!TARGET_SECURE_PLT);
+ return "bl %z0@plt";
+ }
+ else
+ return "bl %z0";
+#endif
+}
+ "DEFAULT_ABI == ABI_V4
+ && TARGET_SECURE_PLT && flag_pic && !SYMBOL_REF_LOCAL_P (operands[0])
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ [(parallel [(call (mem:SI (match_dup 0))
+ (match_dup 1))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (reg:SI LR_REGNO))])]
+{
+ operands[3] = pic_offset_table_rtx;
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_nonlocal_sysv_secure<mode>"
+ [(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (use (match_operand:SI 3 "register_operand" "r,r"))
+ (clobber (reg:SI LR_REGNO))]
+ "(DEFAULT_ABI == ABI_V4
+ && TARGET_SECURE_PLT && flag_pic && !SYMBOL_REF_LOCAL_P (operands[0])
+ && (INTVAL (operands[2]) & CALL_LONG) == 0)"
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+ if (flag_pic == 2)
+ /* The magic 32768 offset here and in the other sysv call insns
+ corresponds to the offset of r30 in .got2, as given by LCTOC1.
+ See sysv4.h:toc_section. */
+ return "bl %z0+32768@plt";
+ else
+ return "bl %z0@plt";
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_value_indirect_nonlocal_sysv<mode>"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:P 1 "register_operand" "c,*l,c,*l"))
+ (match_operand 2 "" "g,g,g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,O,n,n"))
+ (clobber (reg:SI LR_REGNO))]
+ "DEFAULT_ABI == ABI_V4
+ || DEFAULT_ABI == ABI_DARWIN"
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+ return "b%T1l";
+}
+ [(set_attr "type" "jmpreg,jmpreg,jmpreg,jmpreg")
+ (set_attr "length" "4,4,8,8")])
+
+(define_insn_and_split "*call_value_nonlocal_sysv<mode>"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (reg:SI LR_REGNO))]
+ "(DEFAULT_ABI == ABI_DARWIN
+ || (DEFAULT_ABI == ABI_V4
+ && (INTVAL (operands[3]) & CALL_LONG) == 0))"
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+#if TARGET_MACHO
+ return output_call(insn, operands, 1, 3);
+#else
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ gcc_assert (!TARGET_SECURE_PLT);
+ return "bl %z1@plt";
+ }
+ else
+ return "bl %z1";
+#endif
+}
+ "DEFAULT_ABI == ABI_V4
+ && TARGET_SECURE_PLT && flag_pic && !SYMBOL_REF_LOCAL_P (operands[1])
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ [(parallel [(set (match_dup 0)
+ (call (mem:SI (match_dup 1))
+ (match_dup 2)))
+ (use (match_dup 3))
+ (use (match_dup 4))
+ (clobber (reg:SI LR_REGNO))])]
+{
+ operands[4] = pic_offset_table_rtx;
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_value_nonlocal_sysv_secure<mode>"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (match_operand:SI 4 "register_operand" "r,r"))
+ (clobber (reg:SI LR_REGNO))]
+ "(DEFAULT_ABI == ABI_V4
+ && TARGET_SECURE_PLT && flag_pic && !SYMBOL_REF_LOCAL_P (operands[1])
+ && (INTVAL (operands[3]) & CALL_LONG) == 0)"
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn ("crxor 6,6,6", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn ("creqv 6,6,6", operands);
+
+ if (flag_pic == 2)
+ return "bl %z1+32768@plt";
+ else
+ return "bl %z1@plt";
+}
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+;; Call subroutine returning any type.
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, const0_rtx, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; sibling call patterns
+(define_expand "sibcall"
+ [(parallel [(call (mem:SI (match_operand 0 "address_operand" ""))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (reg:SI LR_REGNO))
+ (return)])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[0] = machopic_indirect_call_target (operands[0]);
+#endif
+
+ gcc_assert (GET_CODE (operands[0]) == MEM);
+ gcc_assert (GET_CODE (operands[1]) == CONST_INT);
+
+ operands[0] = XEXP (operands[0], 0);
+}")
+
+;; this and similar patterns must be marked as using LR, otherwise
+;; dataflow will try to delete the store into it. This is true
+;; even when the actual reg to jump to is in CTR, when LR was
+;; saved and restored around the PIC-setting BCL.
+(define_insn "*sibcall_local32"
+ [(call (mem:SI (match_operand:SI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "(INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z0@local\" : \"b %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_local64"
+ [(call (mem:SI (match_operand:DI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "TARGET_64BIT && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z0@local\" : \"b %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_value_local32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "(INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z1@local\" : \"b %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+
+(define_insn "*sibcall_value_local64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "TARGET_64BIT && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"b %z1@local\" : \"b %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*sibcall_nonlocal_aix32"
+ [(call (mem:SI (match_operand:SI 0 "symbol_ref_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "b %z0"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_nonlocal_aix64"
+ [(call (mem:SI (match_operand:DI 0 "symbol_ref_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "b %z0"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_nonlocal_aix32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "symbol_ref_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "b %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_value_nonlocal_aix64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbol_ref_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "b %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*sibcall_nonlocal_sysv<mode>"
+ [(call (mem:SI (match_operand:P 0 "symbol_ref_operand" "s,s"))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "immediate_operand" "O,n"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN
+ || DEFAULT_ABI == ABI_V4)
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ gcc_assert (!TARGET_SECURE_PLT);
+ return \"b %z0@plt\";
+ }
+ else
+ return \"b %z0\";
+}"
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_expand "sibcall_value"
+ [(parallel [(set (match_operand 0 "register_operand" "")
+ (call (mem:SI (match_operand 1 "address_operand" ""))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (use (reg:SI LR_REGNO))
+ (return)])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (MACHOPIC_INDIRECT)
+ operands[1] = machopic_indirect_call_target (operands[1]);
+#endif
+
+ gcc_assert (GET_CODE (operands[1]) == MEM);
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
+
+ operands[1] = XEXP (operands[1], 0);
+}")
+
+(define_insn "*sibcall_value_nonlocal_sysv<mode>"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:P 1 "symbol_ref_operand" "s,s"))
+ (match_operand 2 "" "")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (use (reg:SI LR_REGNO))
+ (return)]
+ "(DEFAULT_ABI == ABI_DARWIN
+ || DEFAULT_ABI == ABI_V4)
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ if (DEFAULT_ABI == ABI_V4 && flag_pic)
+ {
+ gcc_assert (!TARGET_SECURE_PLT);
+ return \"b %z1@plt\";
+ }
+ else
+ return \"b %z1\";
+}"
+ [(set_attr "type" "branch,branch")
+ (set_attr "length" "4,8")])
+
+(define_expand "sibcall_epilogue"
+ [(use (const_int 0))]
+ "TARGET_SCHED_PROLOG"
+ "
+{
+ rs6000_emit_epilogue (TRUE);
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCK)]
+ ""
+ "")
+
+;; Compare insns are next. Note that the RS/6000 has two types of compares,
+;; signed & unsigned, and one type of branch.
+;;
+;; Start with the DEFINE_EXPANDs to generate the rtl for compares, scc
+;; insns, and branches. We store the operands of compares until we see
+;; how it is used.
+(define_expand "cmp<mode>"
+ [(set (cc0)
+ (compare (match_operand:GPR 0 "gpc_reg_operand" "")
+ (match_operand:GPR 1 "reg_or_short_operand" "")))]
+ ""
+ "
+{
+ /* Take care of the possibility that operands[1] might be negative but
+ this might be a logical operation. That insn doesn't exist. */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 0)
+ operands[1] = force_reg (<MODE>mode, operands[1]);
+
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = operands[1];
+ rs6000_compare_fp_p = 0;
+ DONE;
+}")
+
+(define_expand "cmp<mode>"
+ [(set (cc0) (compare (match_operand:FP 0 "gpc_reg_operand" "")
+ (match_operand:FP 1 "gpc_reg_operand" "")))]
+ ""
+ "
+{
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = operands[1];
+ rs6000_compare_fp_p = 1;
+ DONE;
+}")
+
+(define_expand "beq"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (EQ, operands[0]); DONE; }")
+
+(define_expand "bne"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (NE, operands[0]); DONE; }")
+
+(define_expand "bge"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GE, operands[0]); DONE; }")
+
+(define_expand "bgt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GT, operands[0]); DONE; }")
+
+(define_expand "ble"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LE, operands[0]); DONE; }")
+
+(define_expand "blt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LT, operands[0]); DONE; }")
+
+(define_expand "bgeu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GEU, operands[0]); DONE; }")
+
+(define_expand "bgtu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GTU, operands[0]); DONE; }")
+
+(define_expand "bleu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LEU, operands[0]); DONE; }")
+
+(define_expand "bltu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LTU, operands[0]); DONE; }")
+
+(define_expand "bunordered"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (UNORDERED, operands[0]); DONE; }")
+
+(define_expand "bordered"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (ORDERED, operands[0]); DONE; }")
+
+(define_expand "buneq"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (UNEQ, operands[0]); DONE; }")
+
+(define_expand "bunge"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (UNGE, operands[0]); DONE; }")
+
+(define_expand "bungt"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (UNGT, operands[0]); DONE; }")
+
+(define_expand "bunle"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (UNLE, operands[0]); DONE; }")
+
+(define_expand "bunlt"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (UNLT, operands[0]); DONE; }")
+
+(define_expand "bltgt"
+ [(use (match_operand 0 "" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_cbranch (LTGT, operands[0]); DONE; }")
+
+;; For SNE, we would prefer that the xor/abs sequence be used for integers.
+;; For SEQ, likewise, except that comparisons with zero should be done
+;; with an scc insns. However, due to the order that combine see the
+;; resulting insns, we must, in fact, allow SEQ for integers. Fail in
+;; the cases we don't want to handle.
+(define_expand "seq"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (EQ, operands[0]); DONE; }")
+
+(define_expand "sne"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p)
+ FAIL;
+
+ rs6000_emit_sCOND (NE, operands[0]);
+ DONE;
+}")
+
+;; A >= 0 is best done the portable way for A an integer.
+(define_expand "sge"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
+ FAIL;
+
+ rs6000_emit_sCOND (GE, operands[0]);
+ DONE;
+}")
+
+;; A > 0 is best done using the portable sequence, so fail in that case.
+(define_expand "sgt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
+ FAIL;
+
+ rs6000_emit_sCOND (GT, operands[0]);
+ DONE;
+}")
+
+;; A <= 0 is best done the portable way for A an integer.
+(define_expand "sle"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
+ FAIL;
+
+ rs6000_emit_sCOND (LE, operands[0]);
+ DONE;
+}")
+
+;; A < 0 is best done in the portable way for A an integer.
+(define_expand "slt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p && rs6000_compare_op1 == const0_rtx)
+ FAIL;
+
+ rs6000_emit_sCOND (LT, operands[0]);
+ DONE;
+}")
+
+(define_expand "sgeu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (GEU, operands[0]); DONE; }")
+
+(define_expand "sgtu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (GTU, operands[0]); DONE; }")
+
+(define_expand "sleu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (LEU, operands[0]); DONE; }")
+
+(define_expand "sltu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (LTU, operands[0]); DONE; }")
+
+(define_expand "sunordered"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (UNORDERED, operands[0]); DONE; }")
+
+(define_expand "sordered"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (ORDERED, operands[0]); DONE; }")
+
+(define_expand "suneq"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (UNEQ, operands[0]); DONE; }")
+
+(define_expand "sunge"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (UNGE, operands[0]); DONE; }")
+
+(define_expand "sungt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (UNGT, operands[0]); DONE; }")
+
+(define_expand "sunle"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (UNLE, operands[0]); DONE; }")
+
+(define_expand "sunlt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (UNLT, operands[0]); DONE; }")
+
+(define_expand "sltgt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ "! (TARGET_HARD_FLOAT && !TARGET_FPRS)"
+ "{ rs6000_emit_sCOND (LTGT, operands[0]); DONE; }")
+
+(define_expand "stack_protect_set"
+ [(match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" "")]
+ ""
+{
+#ifdef TARGET_THREAD_SSP_OFFSET
+ rtx tlsreg = gen_rtx_REG (Pmode, TARGET_64BIT ? 13 : 2);
+ rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
+ operands[1] = gen_rtx_MEM (Pmode, addr);
+#endif
+ if (TARGET_64BIT)
+ emit_insn (gen_stack_protect_setdi (operands[0], operands[1]));
+ else
+ emit_insn (gen_stack_protect_setsi (operands[0], operands[1]));
+ DONE;
+})
+
+(define_insn "stack_protect_setsi"
+ [(set (match_operand:SI 0 "memory_operand" "=m")
+ (unspec:SI [(match_operand:SI 1 "memory_operand" "m")] UNSPEC_SP_SET))
+ (set (match_scratch:SI 2 "=&r") (const_int 0))]
+ "TARGET_32BIT"
+ "{l%U1%X1|lwz%U1%X1} %2,%1\;{st%U0%X0|stw%U0%X0} %2,%0\;{lil|li} %2,0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "stack_protect_setdi"
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (unspec:DI [(match_operand:DI 1 "memory_operand" "m")] UNSPEC_SP_SET))
+ (set (match_scratch:DI 2 "=&r") (const_int 0))]
+ "TARGET_64BIT"
+ "ld%U1%X1 %2,%1\;std%U0%X0 %2,%0\;{lil|li} %2,0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_expand "stack_protect_test"
+ [(match_operand 0 "memory_operand" "")
+ (match_operand 1 "memory_operand" "")
+ (match_operand 2 "" "")]
+ ""
+{
+#ifdef TARGET_THREAD_SSP_OFFSET
+ rtx tlsreg = gen_rtx_REG (Pmode, TARGET_64BIT ? 13 : 2);
+ rtx addr = gen_rtx_PLUS (Pmode, tlsreg, GEN_INT (TARGET_THREAD_SSP_OFFSET));
+ operands[1] = gen_rtx_MEM (Pmode, addr);
+#endif
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, operands[1]),
+ UNSPEC_SP_TEST);
+ rs6000_compare_fp_p = 0;
+ emit_jump_insn (gen_beq (operands[2]));
+ DONE;
+})
+
+(define_insn "stack_protect_testsi"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
+ (unspec:CCEQ [(match_operand:SI 1 "memory_operand" "m,m")
+ (match_operand:SI 2 "memory_operand" "m,m")]
+ UNSPEC_SP_TEST))
+ (set (match_scratch:SI 4 "=r,r") (const_int 0))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {l%U1%X1|lwz%U1%X1} %3,%1\;{l%U2%X2|lwz%U2%X2} %4,%2\;xor. %3,%3,%4\;{lil|li} %4,0
+ {l%U1%X1|lwz%U1%X1} %3,%1\;{l%U2%X2|lwz%U2%X2} %4,%2\;{cmpl|cmplw} %0,%3,%4\;{lil|li} %3,0\;{lil|li} %4,0"
+ [(set_attr "length" "16,20")])
+
+(define_insn "stack_protect_testdi"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=x,?y")
+ (unspec:CCEQ [(match_operand:DI 1 "memory_operand" "m,m")
+ (match_operand:DI 2 "memory_operand" "m,m")]
+ UNSPEC_SP_TEST))
+ (set (match_scratch:DI 4 "=r,r") (const_int 0))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_64BIT"
+ "@
+ ld%U1%X1 %3,%1\;ld%U2%X2 %4,%2\;xor. %3,%3,%4\;{lil|li} %4,0
+ ld%U1%X1 %3,%1\;ld%U2%X2 %4,%2\;cmpld %0,%3,%4\;{lil|li} %3,0\;{lil|li} %4,0"
+ [(set_attr "length" "16,20")])
+
+
+;; Here are the actual compare insns.
+(define_insn "*cmp<mode>_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (compare:CC (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "reg_or_short_operand" "rI")))]
+ ""
+ "{cmp%I2|cmp<wd>%I2} %0,%1,%2"
+ [(set_attr "type" "cmp")])
+
+;; If we are comparing a register for equality with a large constant,
+;; we can do this with an XOR followed by a compare. But this is profitable
+;; only if the large constant is only used for the comparison (and in this
+;; case we already have a register to reuse as scratch).
+;;
+;; For 64-bit registers, we could only do so if the constant's bit 15 is clear:
+;; otherwise we'd need to XOR with FFFFFFFF????0000 which is not available.
+
+(define_peephole2
+ [(set (match_operand:SI 0 "register_operand")
+ (match_operand:SI 1 "logical_const_operand" ""))
+ (set (match_dup 0) (match_operator:SI 3 "boolean_or_operator"
+ [(match_dup 0)
+ (match_operand:SI 2 "logical_const_operand" "")]))
+ (set (match_operand:CC 4 "cc_reg_operand" "")
+ (compare:CC (match_operand:SI 5 "gpc_reg_operand" "")
+ (match_dup 0)))
+ (set (pc)
+ (if_then_else (match_operator 6 "equality_operator"
+ [(match_dup 4) (const_int 0)])
+ (match_operand 7 "" "")
+ (match_operand 8 "" "")))]
+ "peep2_reg_dead_p (3, operands[0])
+ && peep2_reg_dead_p (4, operands[4])"
+ [(set (match_dup 0) (xor:SI (match_dup 5) (match_dup 9)))
+ (set (match_dup 4) (compare:CC (match_dup 0) (match_dup 10)))
+ (set (pc) (if_then_else (match_dup 6) (match_dup 7) (match_dup 8)))]
+
+{
+ /* Get the constant we are comparing against, and see what it looks like
+ when sign-extended from 16 to 32 bits. Then see what constant we could
+ XOR with SEXTC to get the sign-extended value. */
+ rtx cnst = simplify_const_binary_operation (GET_CODE (operands[3]),
+ SImode,
+ operands[1], operands[2]);
+ HOST_WIDE_INT c = INTVAL (cnst);
+ HOST_WIDE_INT sextc = ((c & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT xorv = c ^ sextc;
+
+ operands[9] = GEN_INT (xorv);
+ operands[10] = GEN_INT (sextc);
+})
+
+(define_insn "*cmpsi_internal2"
+ [(set (match_operand:CCUNS 0 "cc_reg_operand" "=y")
+ (compare:CCUNS (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_u_short_operand" "rK")))]
+ ""
+ "{cmpl%I2|cmplw%I2} %0,%1,%b2"
+ [(set_attr "type" "cmp")])
+
+(define_insn "*cmpdi_internal2"
+ [(set (match_operand:CCUNS 0 "cc_reg_operand" "=y")
+ (compare:CCUNS (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_u_short_operand" "rK")))]
+ ""
+ "cmpld%I2 %0,%1,%b2"
+ [(set_attr "type" "cmp")])
+
+;; The following two insns don't exist as single insns, but if we provide
+;; them, we can swap an add and compare, which will enable us to overlap more
+;; of the required delay between a compare and branch. We generate code for
+;; them by splitting.
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=y")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "short_cint_operand" "i")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "i")))]
+ ""
+ "#"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CCUNS 3 "cc_reg_operand" "=y")
+ (compare:CCUNS (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "u_short_cint_operand" "i")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "i")))]
+ ""
+ "#"
+ [(set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "short_cint_operand" "")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "")))]
+ ""
+ [(set (match_dup 3) (compare:CC (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 4)))])
+
+(define_split
+ [(set (match_operand:CCUNS 3 "cc_reg_operand" "")
+ (compare:CCUNS (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "u_short_cint_operand" "")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "")))]
+ ""
+ [(set (match_dup 3) (compare:CCUNS (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 4)))])
+
+(define_insn "*cmpsf_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT"
+ "fcmpu %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+(define_insn "*cmpdf_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT"
+ "fcmpu %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+;; Only need to compare second words if first words equal
+(define_insn "*cmptf_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "f")
+ (match_operand:TF 2 "gpc_reg_operand" "f")))]
+ "!TARGET_IEEEQUAD && !TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_LONG_DOUBLE_128"
+ "fcmpu %0,%1,%2\;bne %0,$+8\;fcmpu %0,%L1,%L2"
+ [(set_attr "type" "fpcompare")
+ (set_attr "length" "12")])
+
+(define_insn_and_split "*cmptf_internal2"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "f")
+ (match_operand:TF 2 "gpc_reg_operand" "f")))
+ (clobber (match_scratch:DF 3 "=f"))
+ (clobber (match_scratch:DF 4 "=f"))
+ (clobber (match_scratch:DF 5 "=f"))
+ (clobber (match_scratch:DF 6 "=f"))
+ (clobber (match_scratch:DF 7 "=f"))
+ (clobber (match_scratch:DF 8 "=f"))
+ (clobber (match_scratch:DF 9 "=f"))
+ (clobber (match_scratch:DF 10 "=f"))]
+ "!TARGET_IEEEQUAD && TARGET_XL_COMPAT
+ && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3) (match_dup 13))
+ (set (match_dup 4) (match_dup 14))
+ (set (match_dup 9) (abs:DF (match_dup 5)))
+ (set (match_dup 0) (compare:CCFP (match_dup 9) (match_dup 3)))
+ (set (pc) (if_then_else (ne (match_dup 0) (const_int 0))
+ (label_ref (match_dup 11))
+ (pc)))
+ (set (match_dup 0) (compare:CCFP (match_dup 5) (match_dup 7)))
+ (set (pc) (label_ref (match_dup 12)))
+ (match_dup 11)
+ (set (match_dup 10) (minus:DF (match_dup 5) (match_dup 7)))
+ (set (match_dup 9) (minus:DF (match_dup 6) (match_dup 8)))
+ (set (match_dup 9) (plus:DF (match_dup 10) (match_dup 9)))
+ (set (match_dup 0) (compare:CCFP (match_dup 7) (match_dup 4)))
+ (match_dup 12)]
+{
+ REAL_VALUE_TYPE rv;
+ const int lo_word = FLOAT_WORDS_BIG_ENDIAN ? GET_MODE_SIZE (DFmode) : 0;
+ const int hi_word = FLOAT_WORDS_BIG_ENDIAN ? 0 : GET_MODE_SIZE (DFmode);
+
+ operands[5] = simplify_gen_subreg (DFmode, operands[1], TFmode, hi_word);
+ operands[6] = simplify_gen_subreg (DFmode, operands[1], TFmode, lo_word);
+ operands[7] = simplify_gen_subreg (DFmode, operands[2], TFmode, hi_word);
+ operands[8] = simplify_gen_subreg (DFmode, operands[2], TFmode, lo_word);
+ operands[11] = gen_label_rtx ();
+ operands[12] = gen_label_rtx ();
+ real_inf (&rv);
+ operands[13] = force_const_mem (DFmode,
+ CONST_DOUBLE_FROM_REAL_VALUE (rv, DFmode));
+ operands[14] = force_const_mem (DFmode,
+ CONST_DOUBLE_FROM_REAL_VALUE (dconst0,
+ DFmode));
+ if (TARGET_TOC)
+ {
+ operands[13] = gen_const_mem (DFmode,
+ create_TOC_reference (XEXP (operands[13], 0)));
+ operands[14] = gen_const_mem (DFmode,
+ create_TOC_reference (XEXP (operands[14], 0)));
+ set_mem_alias_set (operands[13], get_TOC_alias_set ());
+ set_mem_alias_set (operands[14], get_TOC_alias_set ());
+ }
+})
+
+;; Now we have the scc insns. We can do some combinations because of the
+;; way the machine works.
+;;
+;; Note that this is probably faster if we can put an insn between the
+;; mfcr and rlinm, but this is tricky. Let's leave it for now. In most
+;; cases the insns below which don't use an intermediate CR field will
+;; be used instead.
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ ""
+ "mfcr %0%Q2\;{rlinm|rlwinm} %0,%0,%J1,1"
+ [(set (attr "type")
+ (cond [(ne (symbol_ref "TARGET_MFCRF") (const_int 0))
+ (const_string "mfcrf")
+ ]
+ (const_string "mfcr")))
+ (set_attr "length" "8")])
+
+;; Same as above, but get the GT bit.
+(define_insn "move_from_CR_gt_bit"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_GT))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "mfcr %0\;{rlinm|rlwinm} %0,%0,%D1,31,31"
+ [(set_attr "type" "mfcr")
+ (set_attr "length" "8")])
+
+;; Same as above, but get the OV/ORDERED bit.
+(define_insn "move_from_CR_ov_bit"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand 1 "cc_reg_operand" "y")] UNSPEC_MV_CR_OV))]
+ "TARGET_ISEL"
+ "mfcr %0\;{rlinm|rlwinm} %0,%0,%t1,1"
+ [(set_attr "type" "mfcr")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ "TARGET_POWERPC64"
+ "mfcr %0%Q2\;{rlinm|rlwinm} %0,%0,%J1,1"
+ [(set (attr "type")
+ (cond [(ne (symbol_ref "TARGET_MFCRF") (const_int 0))
+ (const_string "mfcrf")
+ ]
+ (const_string "mfcr")))
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y,y")
+ (const_int 0)])
+ (const_int 0)))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (match_op_dup 1 [(match_dup 2) (const_int 0)]))]
+ "TARGET_32BIT"
+ "@
+ mfcr %3%Q2\;{rlinm.|rlwinm.} %3,%3,%J1,1
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "8,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "")
+ (const_int 0)])
+ (const_int 0)))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (match_op_dup 1 [(match_dup 2) (const_int 0)]))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3)
+ (match_op_dup 1 [(match_dup 2) (const_int 0)]))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ashift:SI (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ ""
+ "*
+{
+ int is_bit = ccr_bit (operands[1], 1);
+ int put_bit = 31 - (INTVAL (operands[3]) & 31);
+ int count;
+
+ if (is_bit >= put_bit)
+ count = is_bit - put_bit;
+ else
+ count = 32 - (put_bit - is_bit);
+
+ operands[4] = GEN_INT (count);
+ operands[5] = GEN_INT (put_bit);
+
+ return \"mfcr %0%Q2\;{rlinm|rlwinm} %0,%0,%4,%5,%5\";
+}"
+ [(set (attr "type")
+ (cond [(ne (symbol_ref "TARGET_MFCRF") (const_int 0))
+ (const_string "mfcrf")
+ ]
+ (const_string "mfcr")))
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ashift:SI (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y,y")
+ (const_int 0)])
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:SI 4 "gpc_reg_operand" "=r,r")
+ (ashift:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])
+ (match_dup 3)))]
+ ""
+ "*
+{
+ int is_bit = ccr_bit (operands[1], 1);
+ int put_bit = 31 - (INTVAL (operands[3]) & 31);
+ int count;
+
+ /* Force split for non-cc0 compare. */
+ if (which_alternative == 1)
+ return \"#\";
+
+ if (is_bit >= put_bit)
+ count = is_bit - put_bit;
+ else
+ count = 32 - (put_bit - is_bit);
+
+ operands[5] = GEN_INT (count);
+ operands[6] = GEN_INT (put_bit);
+
+ return \"mfcr %4%Q2\;{rlinm.|rlwinm.} %4,%4,%5,%6,%6\";
+}"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "8,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (ashift:SI (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "")
+ (const_int 0)])
+ (match_operand:SI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (ashift:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])
+ (match_dup 3)))]
+ "reload_completed"
+ [(set (match_dup 4)
+ (ashift:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+;; There is a 3 cycle delay between consecutive mfcr instructions
+;; so it is useful to combine 2 scc instructions to use only one mfcr.
+
+(define_peephole
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=r")
+ (match_operator:SI 4 "scc_comparison_operator"
+ [(match_operand 5 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ "REGNO (operands[2]) != REGNO (operands[5])"
+ "mfcr %3\;{rlinm|rlwinm} %0,%3,%J1,1\;{rlinm|rlwinm} %3,%3,%J4,1"
+ [(set_attr "type" "mfcr")
+ (set_attr "length" "12")])
+
+(define_peephole
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))
+ (set (match_operand:DI 3 "gpc_reg_operand" "=r")
+ (match_operator:DI 4 "scc_comparison_operator"
+ [(match_operand 5 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ "TARGET_POWERPC64 && REGNO (operands[2]) != REGNO (operands[5])"
+ "mfcr %3\;{rlinm|rlwinm} %0,%3,%J1,1\;{rlinm|rlwinm} %3,%3,%J4,1"
+ [(set_attr "type" "mfcr")
+ (set_attr "length" "12")])
+
+;; There are some scc insns that can be done directly, without a compare.
+;; These are faster because they don't involve the communications between
+;; the FXU and branch units. In fact, we will be replacing all of the
+;; integer scc insns here or in the portable methods in emit_store_flag.
+;;
+;; Also support (neg (scc ..)) since that construct is used to replace
+;; branches, (plus (scc ..) ..) since that construct is common and
+;; takes no more insns than scc, and (and (neg (scc ..)) ..) in the
+;; cases where it is no more expensive than (neg (scc ..)).
+
+;; Have reload force a constant into a register for the simple insns that
+;; otherwise won't accept constants. We do this because it is faster than
+;; the cmp/mfcr sequence we would otherwise generate.
+
+(define_mode_attr scc_eq_op2 [(SI "rKLI")
+ (DI "rKJI")])
+
+(define_insn_and_split "*eq<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (eq:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")
+ (match_operand:GPR 2 "scc_eq_operand" "<scc_eq_op2>")))]
+ "!TARGET_POWER"
+ "#"
+ "!TARGET_POWER"
+ [(set (match_dup 0)
+ (clz:GPR (match_dup 3)))
+ (set (match_dup 0)
+ (lshiftrt:GPR (match_dup 0) (match_dup 4)))]
+ {
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) != 0)
+ {
+ /* Use output operand as intermediate. */
+ operands[3] = operands[0];
+
+ if (logical_operand (operands[2], <MODE>mode))
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_XOR (<MODE>mode,
+ operands[1], operands[2])));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_PLUS (<MODE>mode, operands[1],
+ negate_rtx (<MODE>mode,
+ operands[2]))));
+ }
+ else
+ operands[3] = operands[1];
+
+ operands[4] = GEN_INT (exact_log2 (GET_MODE_BITSIZE (<MODE>mode)));
+ })
+
+(define_insn_and_split "*eq<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=y")
+ (compare:CC
+ (eq:P (match_operand:P 1 "gpc_reg_operand" "=r")
+ (match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (eq:P (match_dup 1) (match_dup 2)))]
+ "!TARGET_POWER && optimize_size"
+ "#"
+ "!TARGET_POWER && optimize_size"
+ [(set (match_dup 0)
+ (clz:P (match_dup 4)))
+ (parallel [(set (match_dup 3)
+ (compare:CC (lshiftrt:P (match_dup 0) (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 0)
+ (lshiftrt:P (match_dup 0) (match_dup 5)))])]
+ {
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) != 0)
+ {
+ /* Use output operand as intermediate. */
+ operands[4] = operands[0];
+
+ if (logical_operand (operands[2], <MODE>mode))
+ emit_insn (gen_rtx_SET (VOIDmode, operands[4],
+ gen_rtx_XOR (<MODE>mode,
+ operands[1], operands[2])));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[4],
+ gen_rtx_PLUS (<MODE>mode, operands[1],
+ negate_rtx (<MODE>mode,
+ operands[2]))));
+ }
+ else
+ operands[4] = operands[1];
+
+ operands[5] = GEN_INT (exact_log2 (GET_MODE_BITSIZE (<MODE>mode)));
+ })
+
+(define_insn "*eqsi_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I")))
+ (clobber (match_scratch:SI 3 "=r,&r,r,r,r"))]
+ "TARGET_POWER"
+ "@
+ xor %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
+ {sfi|subfic} %3,%1,0\;{ae|adde} %0,%3,%1
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0"
+ [(set_attr "type" "three,two,three,three,three")
+ (set_attr "length" "12,8,12,12,12")])
+
+;; We have insns of the form shown by the first define_insn below. If
+;; there is something inside the comparison operation, we must split it.
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "" "")
+ (match_operand:SI 3
+ "reg_or_cint_operand" "")])
+ (match_operand:SI 4 "gpc_reg_operand" "")))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "! gpc_reg_operand (operands[2], SImode)"
+ [(set (match_dup 5) (match_dup 2))
+ (set (match_dup 2) (plus:SI (match_op_dup 1 [(match_dup 2) (match_dup 3)])
+ (match_dup 4)))])
+
+(define_insn "*plus_eqsi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r,&r")
+ (plus:SI (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:SI 2 "scc_eq_operand" "r,O,K,L,I"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r")))]
+ "TARGET_32BIT"
+ "@
+ xor %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {sfi|subfic} %0,%1,0\;{aze|addze} %0,%3
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3"
+ [(set_attr "type" "three,two,three,three,three")
+ (set_attr "length" "12,8,12,12,12")])
+
+(define_insn "*compare_plus_eqsi"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "scc_eq_operand" "r,O,K,L,I,r,O,K,L,I"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r,r,r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r"))]
+ "TARGET_32BIT && optimize_size"
+ "@
+ xor %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {sfi|subfic} %4,%1,0\;{aze.|addze.} %4,%3
+ {xoril|xori} %4,%1,%b2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {xoriu|xoris} %4,%1,%u2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {sfi|subfic} %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "scc_eq_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && optimize_size && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (eq:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_eqsi_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "scc_eq_operand" "r,O,K,L,I,r,O,K,L,I"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r,r,r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r")
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT && optimize_size"
+ "@
+ xor %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {sfi|subfic} %0,%1,0\;{aze.|addze.} %0,%3
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "scc_eq_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT && optimize_size && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*neg_eq0<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (eq:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (const_int 0))))]
+ ""
+ "{ai|addic} %0,%1,-1\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn_and_split "*neg_eq<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (eq:P (match_operand:P 1 "gpc_reg_operand" "%r")
+ (match_operand:P 2 "scc_eq_operand" "<scc_eq_op2>"))))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (eq:P (match_dup 3) (const_int 0))))]
+ {
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) != 0)
+ {
+ /* Use output operand as intermediate. */
+ operands[3] = operands[0];
+
+ if (logical_operand (operands[2], <MODE>mode))
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_XOR (<MODE>mode,
+ operands[1], operands[2])));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, operands[3],
+ gen_rtx_PLUS (<MODE>mode, operands[1],
+ negate_rtx (<MODE>mode,
+ operands[2]))));
+ }
+ else
+ operands[3] = operands[1];
+ })
+
+;; Simplify (ne X (const_int 0)) on the PowerPC. No need to on the Power,
+;; since it nabs/sr is just as fast.
+(define_insn "*ne0si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (lshiftrt:SI (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (const_int 31)))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "! TARGET_POWER && TARGET_32BIT && !TARGET_ISEL"
+ "{ai|addic} %2,%1,-1\;{sfe|subfe} %0,%2,%1"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*ne0di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (lshiftrt:DI (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
+ (const_int 63)))
+ (clobber (match_scratch:DI 2 "=&r"))]
+ "TARGET_64BIT"
+ "addic %2,%1,-1\;subfe %0,%2,%1"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+;; This is what (plus (ne X (const_int 0)) Y) looks like.
+(define_insn "*plus_ne0si"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "TARGET_32BIT"
+ "{ai|addic} %3,%1,-1\;{aze|addze} %0,%2"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*plus_ne0di"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:DI 3 "=&r"))]
+ "TARGET_64BIT"
+ "addic %3,%1,-1\;addze %0,%2"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn "*compare_plus_ne0si"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))
+ (clobber (match_scratch:SI 4 "=X,&r"))]
+ "TARGET_32BIT"
+ "@
+ {ai|addic} %3,%1,-1\;{aze.|addze.} %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1)))
+ (const_int 31))
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*compare_plus_ne0di"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_64BIT"
+ "@
+ addic %3,%1,-1\;addze. %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_64BIT && reload_completed"
+ [(set (match_dup 3)
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1)))
+ (const_int 63))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_ne0si_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1))) (const_int 31))
+ (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {ai|addic} %3,%1,-1\;{aze.|addze.} %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1))) (const_int 31))
+ (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1))) (const_int 31))
+ (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_ne0di_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1))) (const_int 63))
+ (match_dup 2)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_64BIT"
+ "@
+ addic %3,%1,-1\;addze. %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1))) (const_int 63))
+ (match_dup 2)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_64BIT && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1))) (const_int 63))
+ (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O")))
+ (clobber (match_scratch:SI 3 "=r,X"))]
+ "TARGET_POWER"
+ "@
+ doz %3,%2,%1\;{sfi|subfic} %0,%3,0\;{ae|adde} %0,%0,%3
+ {ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O,r,O"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (le:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=r,X,r,X"))]
+ "TARGET_POWER"
+ "@
+ doz %3,%2,%1\;{sfi|subfic} %0,%3,0\;{ae.|adde.} %0,%0,%3
+ {ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{sri.|srwi.} %0,%0,31
+ #
+ #"
+ [(set_attr "type" "compare,delayed_compare,compare,delayed_compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (le:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (le:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (le:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r")))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3
+ {srai|srawi} %0,%1,31\;{sf|subfc} %0,%1,%0\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O,r,O"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz %4,%2,%1\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {srai|srawi} %4,%1,31\;{sf|subfc} %4,%1,%4\;{aze.|addze.} %4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (le:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O,r,O"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:SI (le:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ {srai|srawi} %0,%1,31\;{sf|subfc} %0,%1,%0\;{aze.|addze.} %0,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (le:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (le:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (neg:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O"))))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0
+ {ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn "*leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI")))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "*leu<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (leu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {sf%I2|subf%I2c} %0,%1,%2\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (leu:P (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (leu:P (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (plus:P (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))
+ (match_operand:P 3 "gpc_reg_operand" "r")))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{aze|addze} %0,%3"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {sf%I2|subf%I2c} %4,%1,%2\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT"
+ "@
+ {sf%I2|subf%I2c} %0,%1,%2\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*neg_leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;nand %0,%0,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "*and_neg_leu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (and:P (neg:P
+ (leu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI")))
+ (match_operand:P 3 "gpc_reg_operand" "r")))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;andc %0,%3,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;andc. %4,%3,%4
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2)))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))]
+ "TARGET_32BIT"
+ "@
+ {sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;andc. %0,%3,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2)))
+ (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;nabs %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %0,%1,%2\;nabs %0,%0\;{sri.|srwi.} %0,%0,31
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (lt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (lt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (lt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;{ai|addic} %0,%0,-1\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %4,%1,%2\;{ai|addic} %4,%4,-1\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %0,%1,%2\;{ai|addic} %0,%0,-1\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;nabs %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn_and_split "*ltu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (neg:P (match_dup 0)))]
+ "")
+
+(define_insn_and_split "*ltu<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r,r")
+ (ltu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 3)
+ (compare:CC (neg:P (match_dup 0)) (const_int 0)))
+ (set (match_dup 0) (neg:P (match_dup 0)))])]
+ "")
+
+(define_insn_and_split "*plus_ltu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r,r")
+ (plus:P (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P"))
+ (match_operand:P 3 "reg_or_short_operand" "rI,rI")))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))]
+ "")
+
+(define_insn_and_split "*plus_ltu<mode>_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:P (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:P 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:P (ltu:P (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (ltu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 4)
+ (compare:CC (minus:P (match_dup 3) (match_dup 0))
+ (const_int 0)))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))])]
+ "")
+
+(define_insn "*neg_ltu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (neg:P (ltu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P"))))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))
+ (clobber (match_scratch:SI 3 "=r"))]
+ "TARGET_POWER"
+ "doz%I2 %3,%1,%2\;{sfi|subfic} %0,%3,0\;{ae|adde} %0,%0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ge:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %3,%1,%2\;{sfi|subfic} %0,%3,0\;{ae.|adde.} %0,%0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ge:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ge:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (ge:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %0,%1,%2\;{sfi|subfic} %0,%0,0\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn "*geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P")))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0
+ {ai|addic} %0,%1,%n2\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "*geu<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r,r,r")
+ (geu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
+ {ai|addic} %0,%1,%n2\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "")
+ (match_operand:P 2 "reg_or_neg_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (geu:P (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (geu:P (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r,&r")
+ (plus:P (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P"))
+ (match_operand:P 3 "gpc_reg_operand" "r,r")))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{aze|addze} %0,%3
+ {ai|addic} %0,%1,%n2\;{aze|addze} %0,%3"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {sf|subfc} %4,%2,%1\;{aze.|addze.} %4,%3
+ {ai|addic} %4,%1,%n2\;{aze.|addze.} %4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,8,12,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT"
+ "@
+ {sf|subfc} %0,%2,%1\;{aze.|addze.} %0,%3
+ {ai|addic} %0,%1,%n2\;{aze.|addze.} %0,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,8,12,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*neg_geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (neg:P (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_short_operand" "r,I"))))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;nand %0,%0,%0
+ {sfi|subfic} %0,%1,-1\;{a%I2|add%I2c} %0,%0,%2\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn "*and_neg_geu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r,&r")
+ (and:P (neg:P
+ (geu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_neg_short_operand" "r,P")))
+ (match_operand:P 3 "gpc_reg_operand" "r,r")))]
+ ""
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;andc %0,%3,%0
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;andc %0,%3,%0"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;andc. %4,%3,%4
+ {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;andc. %4,%3,%4
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2)))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))]
+ "TARGET_32BIT"
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;andc. %0,%3,%0
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;andc. %0,%3,%0
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "r")))]
+ "TARGET_POWER"
+ "doz %0,%2,%1\;nabs %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (gt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;nabs %0,%0\;{sri.|srwi.} %0,%0,31
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (gt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (gt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*plus_gt0<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (plus:P (gt:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (const_int 0))
+ (match_operand:P 2 "gpc_reg_operand" "r")))]
+ ""
+ "{a|addc} %0,%1,%1\;{sfe|subfe} %0,%1,%0\;{aze|addze} %0,%2"
+ [(set_attr "type" "three")
+ (set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "TARGET_32BIT"
+ "@
+ {a|addc} %3,%1,%1\;{sfe|subfe} %3,%1,%3\;{aze.|addze.} %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 3)
+ (plus:SI (gt:SI (match_dup 1) (const_int 0))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_64BIT"
+ "@
+ addc %3,%1,%1\;subfe %3,%1,%3\;addze. %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_64BIT && reload_completed"
+ [(set (match_dup 3)
+ (plus:DI (gt:DI (match_dup 1) (const_int 0))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (gt:SI (match_dup 1) (const_int 0)) (match_dup 2)))]
+ "TARGET_32BIT"
+ "@
+ {a|addc} %0,%1,%1\;{sfe|subfe} %0,%1,%0\;{aze.|addze.} %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (gt:SI (match_dup 1) (const_int 0)) (match_dup 2)))]
+ "TARGET_32BIT && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (gt:SI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:DI (gt:DI (match_dup 1) (const_int 0)) (match_dup 2)))]
+ "TARGET_64BIT"
+ "@
+ addc %0,%1,%1\;subfe %0,%1,%0\;addze. %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_micro_cr0_operand" "")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (gt:DI (match_dup 1) (const_int 0)) (match_dup 2)))]
+ "TARGET_64BIT && reload_completed"
+ [(set (match_dup 0)
+ (plus:DI (gt:DI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))]
+ "TARGET_POWER"
+ "doz %0,%2,%1\;{ai|addic} %0,%0,-1\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz %4,%2,%1\;{ai|addic} %4,%4,-1\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=&r,&r")
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;{ai|addic} %0,%0,-1\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "r"))))]
+ "TARGET_POWER"
+ "doz %0,%2,%1\;nabs %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn_and_split "*gtu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (gtu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (neg:P (match_dup 0)))]
+ "")
+
+(define_insn_and_split "*gtu<mode>_compare"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (gtu:P (match_operand:P 1 "gpc_reg_operand" "r,r")
+ (match_operand:P 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=r,r")
+ (gtu:P (match_dup 1) (match_dup 2)))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 3)
+ (compare:CC (neg:P (match_dup 0)) (const_int 0)))
+ (set (match_dup 0) (neg:P (match_dup 0)))])]
+ "")
+
+(define_insn_and_split "*plus_gtu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=&r")
+ (plus:P (gtu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))
+ (match_operand:P 3 "reg_or_short_operand" "rI")))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))]
+ "")
+
+(define_insn_and_split "*plus_gtu<mode>_compare"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:P (gtu:P (match_operand:P 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:P 2 "reg_or_short_operand" "I,r,I,r"))
+ (match_operand:P 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:P 0 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (plus:P (gtu:P (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ ""
+ "#"
+ "&& !reg_overlap_mentioned_p (operands[0], operands[3])"
+ [(set (match_dup 0) (neg:P (gtu:P (match_dup 1) (match_dup 2))))
+ (parallel [(set (match_dup 4)
+ (compare:CC (minus:P (match_dup 3) (match_dup 0))
+ (const_int 0)))
+ (set (match_dup 0) (minus:P (match_dup 3) (match_dup 0)))])]
+ "")
+
+(define_insn "*neg_gtu<mode>"
+ [(set (match_operand:P 0 "gpc_reg_operand" "=r")
+ (neg:P (gtu:P (match_operand:P 1 "gpc_reg_operand" "r")
+ (match_operand:P 2 "reg_or_short_operand" "rI"))))]
+ ""
+ "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "type" "two")
+ (set_attr "length" "8")])
+
+
+;; Define both directions of branch and return. If we need a reload
+;; register, we'd rather use CR0 since it is much easier to copy a
+;; register CC value to there.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 1 "branch_comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], \"%l0\", 0, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_comparison_operator"
+ [(match_operand 1
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (return)
+ (pc)))]
+ "direct_return ()"
+ "*
+{
+ return output_cbranch (operands[0], NULL, 0, insn);
+}"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "4")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 1 "branch_comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], \"%l0\", 1, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_comparison_operator"
+ [(match_operand 1
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (pc)
+ (return)))]
+ "direct_return ()"
+ "*
+{
+ return output_cbranch (operands[0], NULL, 1, insn);
+}"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "4")])
+
+;; Logic on condition register values.
+
+; This pattern matches things like
+; (set (reg:CCEQ 68) (compare:CCEQ (ior:SI (gt:SI (reg:CCFP 68) (const_int 0))
+; (eq:SI (reg:CCFP 68) (const_int 0)))
+; (const_int 1)))
+; which are generated by the branch logic.
+; Prefer destructive operations where BT = BB (for crXX BT,BA,BB)
+
+(define_insn "*cceq_ior_compare"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y,?y")
+ (compare:CCEQ (match_operator:SI 1 "boolean_operator"
+ [(match_operator:SI 2
+ "branch_positive_comparison_operator"
+ [(match_operand 3
+ "cc_reg_operand" "y,y")
+ (const_int 0)])
+ (match_operator:SI 4
+ "branch_positive_comparison_operator"
+ [(match_operand 5
+ "cc_reg_operand" "0,y")
+ (const_int 0)])])
+ (const_int 1)))]
+ ""
+ "cr%q1 %E0,%j2,%j4"
+ [(set_attr "type" "cr_logical,delayed_cr")])
+
+; Why is the constant -1 here, but 1 in the previous pattern?
+; Because ~1 has all but the low bit set.
+(define_insn ""
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y,?y")
+ (compare:CCEQ (match_operator:SI 1 "boolean_or_operator"
+ [(not:SI (match_operator:SI 2
+ "branch_positive_comparison_operator"
+ [(match_operand 3
+ "cc_reg_operand" "y,y")
+ (const_int 0)]))
+ (match_operator:SI 4
+ "branch_positive_comparison_operator"
+ [(match_operand 5
+ "cc_reg_operand" "0,y")
+ (const_int 0)])])
+ (const_int -1)))]
+ ""
+ "cr%q1 %E0,%j2,%j4"
+ [(set_attr "type" "cr_logical,delayed_cr")])
+
+(define_insn "*cceq_rev_compare"
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y,?y")
+ (compare:CCEQ (match_operator:SI 1
+ "branch_positive_comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "0,y")
+ (const_int 0)])
+ (const_int 0)))]
+ ""
+ "{crnor %E0,%j1,%j1|crnot %E0,%j1}"
+ [(set_attr "type" "cr_logical,delayed_cr")])
+
+;; If we are comparing the result of two comparisons, this can be done
+;; using creqv or crxor.
+
+(define_insn_and_split ""
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y")
+ (compare:CCEQ (match_operator 1 "branch_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operator 3 "branch_comparison_operator"
+ [(match_operand 4 "cc_reg_operand" "y")
+ (const_int 0)])))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (compare:CCEQ (xor:SI (match_dup 1) (match_dup 3))
+ (match_dup 5)))]
+ "
+{
+ int positive_1, positive_2;
+
+ positive_1 = branch_positive_comparison_operator (operands[1],
+ GET_MODE (operands[1]));
+ positive_2 = branch_positive_comparison_operator (operands[3],
+ GET_MODE (operands[3]));
+
+ if (! positive_1)
+ operands[1] = gen_rtx_fmt_ee (rs6000_reverse_condition (GET_MODE (operands[2]),
+ GET_CODE (operands[1])),
+ SImode,
+ operands[2], const0_rtx);
+ else if (GET_MODE (operands[1]) != SImode)
+ operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]), SImode,
+ operands[2], const0_rtx);
+
+ if (! positive_2)
+ operands[3] = gen_rtx_fmt_ee (rs6000_reverse_condition (GET_MODE (operands[4]),
+ GET_CODE (operands[3])),
+ SImode,
+ operands[4], const0_rtx);
+ else if (GET_MODE (operands[3]) != SImode)
+ operands[3] = gen_rtx_fmt_ee (GET_CODE (operands[3]), SImode,
+ operands[4], const0_rtx);
+
+ if (positive_1 == positive_2)
+ {
+ operands[1] = gen_rtx_NOT (SImode, operands[1]);
+ operands[5] = constm1_rtx;
+ }
+ else
+ {
+ operands[5] = const1_rtx;
+ }
+}")
+
+;; Unconditional branch and return.
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "b %l0"
+ [(set_attr "type" "branch")])
+
+(define_insn "return"
+ [(return)]
+ "direct_return ()"
+ "{br|blr}"
+ [(set_attr "type" "jmpreg")])
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand 0 "register_operand" ""))])
+
+(define_insn "*indirect_jump<mode>"
+ [(set (pc) (match_operand:P 0 "register_operand" "c,*l"))]
+ ""
+ "@
+ bctr
+ {br|blr}"
+ [(set_attr "type" "jmpreg")])
+
+;; Table jump for switch statements:
+(define_expand "tablejump"
+ [(use (match_operand 0 "" ""))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "
+{
+ if (TARGET_32BIT)
+ emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
+ else
+ emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_expand "tablejumpsi"
+ [(set (match_dup 3)
+ (plus:SI (match_operand:SI 0 "" "")
+ (match_dup 2)))
+ (parallel [(set (pc) (match_dup 3))
+ (use (label_ref (match_operand 1 "" "")))])]
+ "TARGET_32BIT"
+ "
+{ operands[0] = force_reg (SImode, operands[0]);
+ operands[2] = force_reg (SImode, gen_rtx_LABEL_REF (SImode, operands[1]));
+ operands[3] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "tablejumpdi"
+ [(set (match_dup 4)
+ (sign_extend:DI (match_operand:SI 0 "lwa_operand" "")))
+ (set (match_dup 3)
+ (plus:DI (match_dup 4)
+ (match_dup 2)))
+ (parallel [(set (pc) (match_dup 3))
+ (use (label_ref (match_operand 1 "" "")))])]
+ "TARGET_64BIT"
+ "
+{ operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (DImode, operands[1]));
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+}")
+
+(define_insn "*tablejump<mode>_internal1"
+ [(set (pc)
+ (match_operand:P 0 "register_operand" "c,*l"))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "@
+ bctr
+ {br|blr}"
+ [(set_attr "type" "jmpreg")])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "{cror 0,0,0|nop}")
+
+;; Define the subtract-one-and-jump insns, starting with the template
+;; so loop.c knows what to generate.
+
+(define_expand "doloop_end"
+ [(use (match_operand 0 "" "")) ; loop pseudo
+ (use (match_operand 1 "" "")) ; iterations; zero if unknown
+ (use (match_operand 2 "" "")) ; max iterations
+ (use (match_operand 3 "" "")) ; loop level
+ (use (match_operand 4 "" ""))] ; label
+ ""
+ "
+{
+ /* Only use this on innermost loops. */
+ if (INTVAL (operands[3]) > 1)
+ FAIL;
+ if (TARGET_64BIT)
+ {
+ if (GET_MODE (operands[0]) != DImode)
+ FAIL;
+ emit_jump_insn (gen_ctrdi (operands[0], operands[4]));
+ }
+ else
+ {
+ if (GET_MODE (operands[0]) != SImode)
+ FAIL;
+ emit_jump_insn (gen_ctrsi (operands[0], operands[4]));
+ }
+ DONE;
+}")
+
+(define_expand "ctr<mode>"
+ [(parallel [(set (pc)
+ (if_then_else (ne (match_operand:P 0 "register_operand" "")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:P (match_dup 0)
+ (const_int -1)))
+ (clobber (match_scratch:CC 2 ""))
+ (clobber (match_scratch:P 3 ""))])]
+ ""
+ "")
+
+;; We need to be able to do this for any operand, including MEM, or we
+;; will cause reload to blow up since we don't allow output reloads on
+;; JUMP_INSNs.
+;; For the length attribute to be calculated correctly, the
+;; label MUST be operand 0.
+
+(define_insn "*ctr<mode>_internal1"
+ [(set (pc)
+ (if_then_else (ne (match_operand:P 1 "register_operand" "c,*r,*r,*r")
+ (const_int 1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16,16")])
+
+(define_insn "*ctr<mode>_internal2"
+ [(set (pc)
+ (if_then_else (ne (match_operand:P 1 "register_operand" "c,*r,*r,*r")
+ (const_int 1))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16,16")])
+
+;; Similar but use EQ
+
+(define_insn "*ctr<mode>_internal5"
+ [(set (pc)
+ (if_then_else (eq (match_operand:P 1 "register_operand" "c,*r,*r,*r")
+ (const_int 1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16,16")])
+
+(define_insn "*ctr<mode>_internal6"
+ [(set (pc)
+ (if_then_else (eq (match_operand:P 1 "register_operand" "c,*r,*r,*r")
+ (const_int 1))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:P 2 "nonimmediate_operand" "=1,*r,m,*q*c*l")
+ (plus:P (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x,&x"))
+ (clobber (match_scratch:P 4 "=X,X,&r,r"))]
+ ""
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16,16")])
+
+;; Now the splitters if we could not allocate the CTR register
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(match_operand:P 1 "gpc_reg_operand" "")
+ (const_int 1)])
+ (match_operand 5 "" "")
+ (match_operand 6 "" "")))
+ (set (match_operand:P 0 "gpc_reg_operand" "")
+ (plus:P (match_dup 1) (const_int -1)))
+ (clobber (match_scratch:CC 3 ""))
+ (clobber (match_scratch:P 4 ""))]
+ "reload_completed"
+ [(parallel [(set (match_dup 3)
+ (compare:CC (plus:P (match_dup 1)
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 0)
+ (plus:P (match_dup 1)
+ (const_int -1)))])
+ (set (pc) (if_then_else (match_dup 7)
+ (match_dup 5)
+ (match_dup 6)))]
+ "
+{ operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[2]), VOIDmode,
+ operands[3], const0_rtx); }")
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(match_operand:P 1 "gpc_reg_operand" "")
+ (const_int 1)])
+ (match_operand 5 "" "")
+ (match_operand 6 "" "")))
+ (set (match_operand:P 0 "nonimmediate_operand" "")
+ (plus:P (match_dup 1) (const_int -1)))
+ (clobber (match_scratch:CC 3 ""))
+ (clobber (match_scratch:P 4 ""))]
+ "reload_completed && ! gpc_reg_operand (operands[0], SImode)"
+ [(parallel [(set (match_dup 3)
+ (compare:CC (plus:P (match_dup 1)
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 4)
+ (plus:P (match_dup 1)
+ (const_int -1)))])
+ (set (match_dup 0)
+ (match_dup 4))
+ (set (pc) (if_then_else (match_dup 7)
+ (match_dup 5)
+ (match_dup 6)))]
+ "
+{ operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[2]), VOIDmode,
+ operands[3], const0_rtx); }")
+
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 0))]
+ ""
+ "{t 31,0,0|trap}"
+ [(set_attr "type" "trap")])
+
+(define_expand "conditional_trap"
+ [(trap_if (match_operator 0 "trap_comparison_operator"
+ [(match_dup 2) (match_dup 3)])
+ (match_operand 1 "const_int_operand" ""))]
+ ""
+ "if (rs6000_compare_fp_p || operands[1] != const0_rtx) FAIL;
+ operands[2] = rs6000_compare_op0;
+ operands[3] = rs6000_compare_op1;")
+
+(define_insn ""
+ [(trap_if (match_operator 0 "trap_comparison_operator"
+ [(match_operand:GPR 1 "register_operand" "r")
+ (match_operand:GPR 2 "reg_or_short_operand" "rI")])
+ (const_int 0))]
+ ""
+ "{t|t<wd>}%V0%I2 %1,%2"
+ [(set_attr "type" "trap")])
+
+;; Insns related to generating the function prologue and epilogue.
+
+(define_expand "prologue"
+ [(use (const_int 0))]
+ "TARGET_SCHED_PROLOG"
+ "
+{
+ rs6000_emit_prologue ();
+ DONE;
+}")
+
+(define_insn "*movesi_from_cr_one"
+ [(match_parallel 0 "mfcr_operation"
+ [(set (match_operand:SI 1 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:CC 2 "cc_reg_operand" "y")
+ (match_operand 3 "immediate_operand" "n")]
+ UNSPEC_MOVESI_FROM_CR))])]
+ "TARGET_MFCRF"
+ "*
+{
+ int mask = 0;
+ int i;
+ for (i = 0; i < XVECLEN (operands[0], 0); i++)
+ {
+ mask = INTVAL (XVECEXP (SET_SRC (XVECEXP (operands[0], 0, i)), 0, 1));
+ operands[4] = GEN_INT (mask);
+ output_asm_insn (\"mfcr %1,%4\", operands);
+ }
+ return \"\";
+}"
+ [(set_attr "type" "mfcrf")])
+
+(define_insn "movesi_from_cr"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(reg:CC CR0_REGNO) (reg:CC CR1_REGNO)
+ (reg:CC CR2_REGNO) (reg:CC CR3_REGNO)
+ (reg:CC CR4_REGNO) (reg:CC CR5_REGNO)
+ (reg:CC CR6_REGNO) (reg:CC CR7_REGNO)]
+ UNSPEC_MOVESI_FROM_CR))]
+ ""
+ "mfcr %0"
+ [(set_attr "type" "mfcr")])
+
+(define_insn "*stmw"
+ [(match_parallel 0 "stmw_operation"
+ [(set (match_operand:SI 1 "memory_operand" "=m")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))])]
+ "TARGET_MULTIPLE"
+ "{stm|stmw} %2,%1"
+ [(set_attr "type" "store_ux")])
+
+(define_insn "*save_gpregs_<mode>"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (reg:P 65))
+ (use (match_operand:P 1 "symbol_ref_operand" "s"))
+ (use (match_operand:P 2 "gpc_reg_operand" "r"))
+ (set (match_operand:P 3 "memory_operand" "=m")
+ (match_operand:P 4 "gpc_reg_operand" "r"))])]
+ ""
+ "bl %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*save_fpregs_<mode>"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (reg:P 65))
+ (use (match_operand:P 1 "symbol_ref_operand" "s"))
+ (use (match_operand:P 2 "gpc_reg_operand" "r"))
+ (set (match_operand:DF 3 "memory_operand" "=m")
+ (match_operand:DF 4 "gpc_reg_operand" "f"))])]
+ ""
+ "bl %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+; These are to explain that changes to the stack pointer should
+; not be moved over stores to stack memory.
+(define_insn "stack_tie"
+ [(set (match_operand:BLK 0 "memory_operand" "+m")
+ (unspec:BLK [(match_dup 0)] UNSPEC_TIE))]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+
+(define_expand "epilogue"
+ [(use (const_int 0))]
+ "TARGET_SCHED_PROLOG"
+ "
+{
+ rs6000_emit_epilogue (FALSE);
+ DONE;
+}")
+
+; On some processors, doing the mtcrf one CC register at a time is
+; faster (like on the 604e). On others, doing them all at once is
+; faster; for instance, on the 601 and 750.
+
+(define_expand "movsi_to_cr_one"
+ [(set (match_operand:CC 0 "cc_reg_operand" "")
+ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_dup 2)] UNSPEC_MOVESI_TO_CR))]
+ ""
+ "operands[2] = GEN_INT (1 << (75 - REGNO (operands[0])));")
+
+(define_insn "*movsi_to_cr"
+ [(match_parallel 0 "mtcrf_operation"
+ [(set (match_operand:CC 1 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:SI 2 "gpc_reg_operand" "r")
+ (match_operand 3 "immediate_operand" "n")]
+ UNSPEC_MOVESI_TO_CR))])]
+ ""
+ "*
+{
+ int mask = 0;
+ int i;
+ for (i = 0; i < XVECLEN (operands[0], 0); i++)
+ mask |= INTVAL (XVECEXP (SET_SRC (XVECEXP (operands[0], 0, i)), 0, 1));
+ operands[4] = GEN_INT (mask);
+ return \"mtcrf %4,%2\";
+}"
+ [(set_attr "type" "mtcr")])
+
+(define_insn "*mtcrfsi"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand 2 "immediate_operand" "n")]
+ UNSPEC_MOVESI_TO_CR))]
+ "GET_CODE (operands[0]) == REG
+ && CR_REGNO_P (REGNO (operands[0]))
+ && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 1 << (75 - REGNO (operands[0]))"
+ "mtcrf %R0,%1"
+ [(set_attr "type" "mtcr")])
+
+; The load-multiple instructions have similar properties.
+; Note that "load_multiple" is a name known to the machine-independent
+; code that actually corresponds to the PowerPC load-string.
+
+(define_insn "*lmw"
+ [(match_parallel 0 "lmw_operation"
+ [(set (match_operand:SI 1 "gpc_reg_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))])]
+ "TARGET_MULTIPLE"
+ "{lm|lmw} %1,%2"
+ [(set_attr "type" "load_ux")
+ (set_attr "cell_micro" "always")])
+
+(define_insn "*return_internal_<mode>"
+ [(return)
+ (use (match_operand:P 0 "register_operand" "lc"))]
+ ""
+ "b%T0"
+ [(set_attr "type" "jmpreg")])
+
+; FIXME: This would probably be somewhat simpler if the Cygnus sibcall
+; stuff was in GCC. Oh, and "any_parallel_operand" is a bit flexible...
+
+(define_insn "*restore_gpregs_<mode>"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (match_operand:P 1 "register_operand" "=l"))
+ (use (match_operand:P 2 "symbol_ref_operand" "s"))
+ (use (match_operand:P 3 "gpc_reg_operand" "r"))
+ (set (match_operand:P 4 "gpc_reg_operand" "=r")
+ (match_operand:P 5 "memory_operand" "m"))])]
+ ""
+ "bl %z2"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*return_and_restore_gpregs_<mode>"
+ [(match_parallel 0 "any_parallel_operand"
+ [(return)
+ (clobber (match_operand:P 1 "register_operand" "=l"))
+ (use (match_operand:P 2 "symbol_ref_operand" "s"))
+ (use (match_operand:P 3 "gpc_reg_operand" "r"))
+ (set (match_operand:P 4 "gpc_reg_operand" "=r")
+ (match_operand:P 5 "memory_operand" "m"))])]
+ ""
+ "b %z2"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*return_and_restore_fpregs_<mode>"
+ [(match_parallel 0 "any_parallel_operand"
+ [(return)
+ (clobber (match_operand:P 1 "register_operand" "=l"))
+ (use (match_operand:P 2 "symbol_ref_operand" "s"))
+ (use (match_operand:P 3 "gpc_reg_operand" "r"))
+ (set (match_operand:DF 4 "gpc_reg_operand" "=f")
+ (match_operand:DF 5 "memory_operand" "m"))])]
+ ""
+ "b %z2"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+; This is used in compiling the unwind routines.
+(define_expand "eh_return"
+ [(use (match_operand 0 "general_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_32BIT)
+ emit_insn (gen_eh_set_lr_si (operands[0]));
+ else
+ emit_insn (gen_eh_set_lr_di (operands[0]));
+ DONE;
+}")
+
+; We can't expand this before we know where the link register is stored.
+(define_insn "eh_set_lr_<mode>"
+ [(unspec_volatile [(match_operand:P 0 "register_operand" "r")]
+ UNSPECV_EH_RR)
+ (clobber (match_scratch:P 1 "=&b"))]
+ ""
+ "#")
+
+(define_split
+ [(unspec_volatile [(match_operand 0 "register_operand" "")] UNSPECV_EH_RR)
+ (clobber (match_scratch 1 ""))]
+ "reload_completed"
+ [(const_int 0)]
+ "
+{
+ rs6000_emit_eh_reg_restore (operands[0], operands[1]);
+ DONE;
+}")
+
+(define_insn "prefetch"
+ [(prefetch (match_operand 0 "indexed_or_indirect_address" "a")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))]
+ "TARGET_POWERPC"
+ "*
+{
+ if (GET_CODE (operands[0]) == REG)
+ return INTVAL (operands[1]) ? \"dcbtst 0,%0\" : \"dcbt 0,%0\";
+ return INTVAL (operands[1]) ? \"dcbtst %a0\" : \"dcbt %a0\";
+}"
+ [(set_attr "type" "load")])
+
+
+(include "sync.md")
+(include "altivec.md")
+(include "spe.md")
+(include "dfp.md")
+(include "paired.md")
diff --git a/gcc-4.4.3/gcc/config/rs6000/rs6000.opt b/gcc-4.4.3/gcc/config/rs6000/rs6000.opt
new file mode 100644
index 000000000..8a6235278
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rs6000.opt
@@ -0,0 +1,296 @@
+; Options for the rs6000 port of the compiler
+;
+; Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mpower
+Target Report RejectNegative Mask(POWER)
+Use POWER instruction set
+
+mno-power
+Target Report RejectNegative
+Do not use POWER instruction set
+
+mpower2
+Target Report Mask(POWER2)
+Use POWER2 instruction set
+
+mpowerpc
+Target Report RejectNegative Mask(POWERPC)
+Use PowerPC instruction set
+
+mno-powerpc
+Target Report RejectNegative
+Do not use PowerPC instruction set
+
+mpowerpc64
+Target Report Mask(POWERPC64)
+Use PowerPC-64 instruction set
+
+mpowerpc-gpopt
+Target Report Mask(PPC_GPOPT)
+Use PowerPC General Purpose group optional instructions
+
+mpowerpc-gfxopt
+Target Report Mask(PPC_GFXOPT)
+Use PowerPC Graphics group optional instructions
+
+mmfcrf
+Target Report Mask(MFCRF)
+Use PowerPC V2.01 single field mfcr instruction
+
+mpopcntb
+Target Report Mask(POPCNTB)
+Use PowerPC V2.02 popcntb instruction
+
+mfprnd
+Target Report Mask(FPRND)
+Use PowerPC V2.02 floating point rounding instructions
+
+mcmpb
+Target Report Mask(CMPB)
+Use PowerPC V2.05 compare bytes instruction
+
+mmfpgpr
+Target Report Mask(MFPGPR)
+Use extended PowerPC V2.05 move floating point to/from GPR instructions
+
+maltivec
+Target Report Mask(ALTIVEC)
+Use AltiVec instructions
+
+mhard-dfp
+Target Report Mask(DFP)
+Use decimal floating point instructions
+
+mmulhw
+Target Report Mask(MULHW)
+Use 4xx half-word multiply instructions
+
+mdlmzb
+Target Report Mask(DLMZB)
+Use 4xx string-search dlmzb instruction
+
+mmultiple
+Target Report Mask(MULTIPLE)
+Generate load/store multiple instructions
+
+mstring
+Target Report Mask(STRING)
+Generate string instructions for block moves
+
+mnew-mnemonics
+Target Report RejectNegative Mask(NEW_MNEMONICS)
+Use new mnemonics for PowerPC architecture
+
+mold-mnemonics
+Target Report RejectNegative InverseMask(NEW_MNEMONICS)
+Use old mnemonics for PowerPC architecture
+
+msoft-float
+Target Report RejectNegative Mask(SOFT_FLOAT)
+Do not use hardware floating point
+
+mhard-float
+Target Report RejectNegative InverseMask(SOFT_FLOAT, HARD_FLOAT)
+Use hardware floating point
+
+mno-update
+Target Report RejectNegative Mask(NO_UPDATE)
+Do not generate load/store with update instructions
+
+mupdate
+Target Report RejectNegative InverseMask(NO_UPDATE, UPDATE)
+Generate load/store with update instructions
+
+mavoid-indexed-addresses
+Target Report Var(TARGET_AVOID_XFORM) Init(-1)
+Avoid generation of indexed load/store instructions when possible
+
+mno-fused-madd
+Target Report RejectNegative Mask(NO_FUSED_MADD)
+Do not generate fused multiply/add instructions
+
+mfused-madd
+Target Report RejectNegative InverseMask(NO_FUSED_MADD, FUSED_MADD)
+Generate fused multiply/add instructions
+
+msched-prolog
+Target Report Var(TARGET_SCHED_PROLOG) Init(1)
+Schedule the start and end of the procedure
+
+msched-epilog
+Target Undocumented Var(TARGET_SCHED_PROLOG) VarExists
+
+maix-struct-return
+Target Report RejectNegative Var(aix_struct_return)
+Return all structures in memory (AIX default)
+
+msvr4-struct-return
+Target Report RejectNegative Var(aix_struct_return,0) VarExists
+Return small structures in registers (SVR4 default)
+
+mxl-compat
+Target Report Var(TARGET_XL_COMPAT)
+Conform more closely to IBM XLC semantics
+
+mrecip
+Target Report Var(TARGET_RECIP)
+Generate software reciprocal sqrt for better throughput
+
+mno-fp-in-toc
+Target Report RejectNegative Var(TARGET_NO_FP_IN_TOC)
+Do not place floating point constants in TOC
+
+mfp-in-toc
+Target Report RejectNegative Var(TARGET_NO_FP_IN_TOC,0)
+Place floating point constants in TOC
+
+mno-sum-in-toc
+Target RejectNegative Var(TARGET_NO_SUM_IN_TOC)
+Do not place symbol+offset constants in TOC
+
+msum-in-toc
+Target RejectNegative Var(TARGET_NO_SUM_IN_TOC,0) VarExists
+Place symbol+offset constants in TOC
+
+; Output only one TOC entry per module. Normally linking fails if
+; there are more than 16K unique variables/constants in an executable. With
+; this option, linking fails only if there are more than 16K modules, or
+; if there are more than 16K unique variables/constant in a single module.
+;
+; This is at the cost of having 2 extra loads and one extra store per
+; function, and one less allocable register.
+mminimal-toc
+Target Report Mask(MINIMAL_TOC)
+Use only one TOC entry per procedure
+
+mfull-toc
+Target Report
+Put everything in the regular TOC
+
+mvrsave
+Target Report Var(TARGET_ALTIVEC_VRSAVE)
+Generate VRSAVE instructions when generating AltiVec code
+
+mvrsave=
+Target RejectNegative Joined
+-mvrsave=yes/no Deprecated option. Use -mvrsave/-mno-vrsave instead
+
+misel
+Target
+Generate isel instructions
+
+misel=
+Target RejectNegative Joined
+-misel=yes/no Deprecated option. Use -misel/-mno-isel instead
+
+mspe
+Target
+Generate SPE SIMD instructions on E500
+
+mpaired
+Target Var(rs6000_paired_float)
+Generate PPC750CL paired-single instructions
+
+mspe=
+Target RejectNegative Joined
+-mspe=yes/no Deprecated option. Use -mspe/-mno-spe instead
+
+mdebug=
+Target RejectNegative Joined
+-mdebug= Enable debug output
+
+mabi=
+Target RejectNegative Joined
+-mabi= Specify ABI to use
+
+mcpu=
+Target RejectNegative Joined
+-mcpu= Use features of and schedule code for given CPU
+
+mtune=
+Target RejectNegative Joined
+-mtune= Schedule code for given CPU
+
+mtraceback=
+Target RejectNegative Joined
+-mtraceback= Select full, part, or no traceback table
+
+mlongcall
+Target Report Var(rs6000_default_long_calls)
+Avoid all range limits on call instructions
+
+mgen-cell-microcode
+Target Report Var(rs6000_gen_cell_microcode) Init(-1)
+Generate Cell microcode
+
+mwarn-cell-microcode
+Target Var(rs6000_warn_cell_microcode) Init(0) Warning
+Warn when a Cell microcoded instruction is emitted
+
+mwarn-altivec-long
+Target Var(rs6000_warn_altivec_long) Init(1)
+Warn about deprecated 'vector long ...' AltiVec type usage
+
+mfloat-gprs=
+Target RejectNegative Joined
+-mfloat-gprs= Select GPR floating point method
+
+mlong-double-
+Target RejectNegative Joined UInteger
+-mlong-double-<n> Specify size of long double (64 or 128 bits)
+
+msched-costly-dep=
+Target RejectNegative Joined
+Determine which dependences between insns are considered costly
+
+minsert-sched-nops=
+Target RejectNegative Joined
+Specify which post scheduling nop insertion scheme to apply
+
+malign-
+Target RejectNegative Joined
+Specify alignment of structure fields default/natural
+
+mprioritize-restricted-insns=
+Target RejectNegative Joined UInteger Var(rs6000_sched_restricted_insns_priority)
+Specify scheduling priority for dispatch slot restricted insns
+
+msingle-float
+Target RejectNegative Var(rs6000_single_float)
+Single-precision floating point unit
+
+mdouble-float
+Target RejectNegative Var(rs6000_double_float)
+Double-precision floating point unit
+
+msimple-fpu
+Target RejectNegative Var(rs6000_simple_fpu)
+Floating point unit does not support divide & sqrt
+
+mfpu=
+Target RejectNegative Joined
+-mfpu= Specify FP (sp, dp, sp-lite, dp-lite) (implies -mxilinx-fpu)
+
+mxilinx-fpu
+Target Var(rs6000_xilinx_fpu)
+Specify Xilinx FPU.
+
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/rs64.md b/gcc-4.4.3/gcc/config/rs6000/rs64.md
new file mode 100644
index 000000000..f7234408a
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rs64.md
@@ -0,0 +1,154 @@
+;; Scheduling description for IBM RS64 processors.
+;; Copyright (C) 2003, 2004, 2007 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_automaton "rs64,rs64fp")
+(define_cpu_unit "iu_rs64" "rs64")
+(define_cpu_unit "mciu_rs64" "rs64")
+(define_cpu_unit "fpu_rs64" "rs64fp")
+(define_cpu_unit "lsu_rs64,bpu_rs64" "rs64")
+
+;; RS64a 64-bit IU, LSU, FPU, BPU
+
+(define_insn_reservation "rs64a-load" 2
+ (and (eq_attr "type" "load,load_ext,load_ext_u,load_ext_ux,load_ux,load_u")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-store" 2
+ (and (eq_attr "type" "store,store_ux,store_u,fpstore,fpstore_ux,fpstore_u")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-fpload" 3
+ (and (eq_attr "type" "fpload,fpload_ux,fpload_u")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-llsc" 2
+ (and (eq_attr "type" "load_l,store_c")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-integer" 1
+ (and (eq_attr "type" "integer,insert_word,insert_dword,shift,trap,\
+ var_shift_rotate,cntlz,exts")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64")
+
+(define_insn_reservation "rs64a-two" 1
+ (and (eq_attr "type" "two")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64,iu_rs64")
+
+(define_insn_reservation "rs64a-three" 1
+ (and (eq_attr "type" "three")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64,iu_rs64,iu_rs64")
+
+(define_insn_reservation "rs64a-imul" 20
+ (and (eq_attr "type" "imul,imul_compare")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*13")
+
+(define_insn_reservation "rs64a-imul2" 12
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*5")
+
+(define_insn_reservation "rs64a-imul3" 8
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*2")
+
+(define_insn_reservation "rs64a-lmul" 34
+ (and (eq_attr "type" "lmul,lmul_compare")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*34")
+
+(define_insn_reservation "rs64a-idiv" 66
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*66")
+
+(define_insn_reservation "rs64a-ldiv" 66
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64*66")
+
+(define_insn_reservation "rs64a-compare" 3
+ (and (eq_attr "type" "cmp,fast_compare,compare,\
+ delayed_compare,var_delayed_compare")
+ (eq_attr "cpu" "rs64a"))
+ "iu_rs64,nothing,bpu_rs64")
+
+(define_insn_reservation "rs64a-fpcompare" 5
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64,fpu_rs64,bpu_rs64")
+
+(define_insn_reservation "rs64a-fp" 4
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64,fpu_rs64")
+
+(define_insn_reservation "rs64a-sdiv" 31
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64,fpu_rs64*31")
+
+(define_insn_reservation "rs64a-sqrt" 49
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "rs64a"))
+ "mciu_rs64,fpu_rs64*49")
+
+(define_insn_reservation "rs64a-mfcr" 2
+ (and (eq_attr "type" "mfcr")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-mtcr" 3
+ (and (eq_attr "type" "mtcr")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-mtjmpr" 3
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-mfjmpr" 2
+ (and (eq_attr "type" "mfjmpr")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
+(define_insn_reservation "rs64a-jmpreg" 1
+ (and (eq_attr "type" "jmpreg,branch,cr_logical,delayed_cr")
+ (eq_attr "cpu" "rs64a"))
+ "bpu_rs64")
+
+(define_insn_reservation "rs64a-isync" 6
+ (and (eq_attr "type" "isync")
+ (eq_attr "cpu" "rs64a"))
+ "bpu_rs64")
+
+(define_insn_reservation "rs64a-sync" 1
+ (and (eq_attr "type" "sync")
+ (eq_attr "cpu" "rs64a"))
+ "lsu_rs64")
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/rtems.h b/gcc-4.4.3/gcc/config/rs6000/rtems.h
new file mode 100644
index 000000000..a8bd0e75f
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/rtems.h
@@ -0,0 +1,56 @@
+/* Definitions for rtems targeting a PowerPC using elf.
+ Copyright (C) 1996, 1997, 2000, 2001, 2002, 2003, 2004, 2005, 2007
+ Free Software Foundation, Inc.
+ Contributed by Joel Sherrill (joel@OARcorp.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Specify predefined symbols in preprocessor. */
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("PPC"); \
+ builtin_define ("__rtems__"); \
+ builtin_define ("__USE_INIT_FINI__"); \
+ builtin_assert ("system=rtems"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ TARGET_OS_SYSV_CPP_BUILTINS (); \
+ } \
+ while (0)
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_rtems)"
+
+#define CPP_OS_RTEMS_SPEC "\
+%{!mcpu*: %{!Dppc*: %{!Dmpc*: -Dmpc750} } }\
+%{mcpu=403: %{!Dppc*: %{!Dmpc*: -Dppc403} } } \
+%{mcpu=505: %{!Dppc*: %{!Dmpc*: -Dmpc505} } } \
+%{mcpu=601: %{!Dppc*: %{!Dmpc*: -Dppc601} } } \
+%{mcpu=602: %{!Dppc*: %{!Dmpc*: -Dppc602} } } \
+%{mcpu=603: %{!Dppc*: %{!Dmpc*: -Dppc603} } } \
+%{mcpu=603e: %{!Dppc*: %{!Dmpc*: -Dppc603e} } } \
+%{mcpu=604: %{!Dppc*: %{!Dmpc*: -Dmpc604} } } \
+%{mcpu=750: %{!Dppc*: %{!Dmpc*: -Dmpc750} } } \
+%{mcpu=821: %{!Dppc*: %{!Dmpc*: -Dmpc821} } } \
+%{mcpu=860: %{!Dppc*: %{!Dmpc*: -Dmpc860} } }"
+
+#undef SUBSUBTARGET_EXTRA_SPECS
+#define SUBSUBTARGET_EXTRA_SPECS \
+ { "cpp_os_rtems", CPP_OS_RTEMS_SPEC }
diff --git a/gcc-4.4.3/gcc/config/rs6000/secureplt.h b/gcc-4.4.3/gcc/config/rs6000/secureplt.h
new file mode 100644
index 000000000..f41078df3
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/secureplt.h
@@ -0,0 +1,20 @@
+/* Default to -msecure-plt.
+ Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#define CC1_SECURE_PLT_DEFAULT_SPEC "-msecure-plt"
diff --git a/gcc-4.4.3/gcc/config/rs6000/sfp-machine.h b/gcc-4.4.3/gcc/config/rs6000/sfp-machine.h
new file mode 100644
index 000000000..a0d1631bb
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/sfp-machine.h
@@ -0,0 +1,68 @@
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE long
+
+/* The type of the result of a floating point comparison. This must
+ match `__libgcc_cmp_return__' in GCC for the target. */
+typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
+#define CMPtype __gcc_CMPtype
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 1
+
+/* Someone please check this. */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ else \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+
+#if defined __BIG_ENDIAN__ || defined _BIG_ENDIAN
+# if defined __LITTLE_ENDIAN__ || defined _LITTLE_ENDIAN
+# error "Both BIG_ENDIAN and LITTLE_ENDIAN defined!"
+# endif
+# define __BYTE_ORDER __BIG_ENDIAN
+#else
+# if defined __LITTLE_ENDIAN__ || defined _LITTLE_ENDIAN
+# define __BYTE_ORDER __LITTLE_ENDIAN
+# else
+# error "Cannot determine current byte order"
+# endif
+#endif
+
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
diff --git a/gcc-4.4.3/gcc/config/rs6000/si2vmx.h b/gcc-4.4.3/gcc/config/rs6000/si2vmx.h
new file mode 100644
index 000000000..e925a25a3
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/si2vmx.h
@@ -0,0 +1,2048 @@
+/* Cell BEA specific SPU intrinsics to PPU/VMX intrinsics
+ Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _SI2VMX_H_
+#define _SI2VMX_H_ 1
+
+#ifndef __SPU__
+
+#include <stdlib.h>
+#include <vec_types.h>
+
+
+/* Specify a default halt action for spu_hcmpeq and spu_hcmpgt intrinsics.
+ * Users can override the action by defining it prior to including this
+ * header file.
+ */
+#ifndef SPU_HALT_ACTION
+#define SPU_HALT_ACTION abort()
+#endif
+
+/* Specify a default stop action for the spu_stop intrinsic.
+ * Users can override the action by defining it prior to including this
+ * header file.
+ */
+#ifndef SPU_STOP_ACTION
+#define SPU_STOP_ACTION abort()
+#endif
+
+
+/* Specify a default action for unsupported intrinsic.
+ * Users can override the action by defining it prior to including this
+ * header file.
+ */
+#ifndef SPU_UNSUPPORTED_ACTION
+#define SPU_UNSUPPORTED_ACTION abort()
+#endif
+
+
+/* Casting intrinsics - from scalar to quadword
+ */
+
+static __inline qword si_from_uchar(unsigned char c) {
+ union {
+ qword q;
+ unsigned char c[16];
+ } x;
+ x.c[3] = c;
+ return (x.q);
+}
+
+static __inline qword si_from_char(signed char c) {
+ union {
+ qword q;
+ signed char c[16];
+ } x;
+ x.c[3] = c;
+ return (x.q);
+}
+
+static __inline qword si_from_ushort(unsigned short s) {
+ union {
+ qword q;
+ unsigned short s[8];
+ } x;
+ x.s[1] = s;
+ return (x.q);
+}
+
+static __inline qword si_from_short(short s) {
+ union {
+ qword q;
+ short s[8];
+ } x;
+ x.s[1] = s;
+ return (x.q);
+}
+
+
+static __inline qword si_from_uint(unsigned int i) {
+ union {
+ qword q;
+ unsigned int i[4];
+ } x;
+ x.i[0] = i;
+ return (x.q);
+}
+
+static __inline qword si_from_int(int i) {
+ union {
+ qword q;
+ int i[4];
+ } x;
+ x.i[0] = i;
+ return (x.q);
+}
+
+static __inline qword si_from_ullong(unsigned long long l) {
+ union {
+ qword q;
+ unsigned long long l[2];
+ } x;
+ x.l[0] = l;
+ return (x.q);
+}
+
+static __inline qword si_from_llong(long long l) {
+ union {
+ qword q;
+ long long l[2];
+ } x;
+ x.l[0] = l;
+ return (x.q);
+}
+
+static __inline qword si_from_float(float f) {
+ union {
+ qword q;
+ float f[4];
+ } x;
+ x.f[0] = f;
+ return (x.q);
+}
+
+static __inline qword si_from_double(double d) {
+ union {
+ qword q;
+ double d[2];
+ } x;
+ x.d[0] = d;
+ return (x.q);
+}
+
+static __inline qword si_from_ptr(void *ptr) {
+ union {
+ qword q;
+ void *p;
+ } x;
+ x.p = ptr;
+ return (x.q);
+}
+
+
+/* Casting intrinsics - from quadword to scalar
+ */
+static __inline unsigned char si_to_uchar(qword q) {
+ union {
+ qword q;
+ unsigned char c[16];
+ } x;
+ x.q = q;
+ return (x.c[3]);
+}
+
+static __inline signed char si_to_char(qword q) {
+ union {
+ qword q;
+ signed char c[16];
+ } x;
+ x.q = q;
+ return (x.c[3]);
+}
+
+static __inline unsigned short si_to_ushort(qword q) {
+ union {
+ qword q;
+ unsigned short s[8];
+ } x;
+ x.q = q;
+ return (x.s[1]);
+}
+
+static __inline short si_to_short(qword q) {
+ union {
+ qword q;
+ short s[8];
+ } x;
+ x.q = q;
+ return (x.s[1]);
+}
+
+static __inline unsigned int si_to_uint(qword q) {
+ union {
+ qword q;
+ unsigned int i[4];
+ } x;
+ x.q = q;
+ return (x.i[0]);
+}
+
+static __inline int si_to_int(qword q) {
+ union {
+ qword q;
+ int i[4];
+ } x;
+ x.q = q;
+ return (x.i[0]);
+}
+
+static __inline unsigned long long si_to_ullong(qword q) {
+ union {
+ qword q;
+ unsigned long long l[2];
+ } x;
+ x.q = q;
+ return (x.l[0]);
+}
+
+static __inline long long si_to_llong(qword q) {
+ union {
+ qword q;
+ long long l[2];
+ } x;
+ x.q = q;
+ return (x.l[0]);
+}
+
+static __inline float si_to_float(qword q) {
+ union {
+ qword q;
+ float f[4];
+ } x;
+ x.q = q;
+ return (x.f[0]);
+}
+
+static __inline double si_to_double(qword q) {
+ union {
+ qword q;
+ double d[2];
+ } x;
+ x.q = q;
+ return (x.d[0]);
+}
+
+static __inline void * si_to_ptr(qword q) {
+ union {
+ qword q;
+ void *p;
+ } x;
+ x.q = q;
+ return (x.p);
+}
+
+
+/* Absolute difference
+ */
+static __inline qword si_absdb(qword a, qword b)
+{
+ vec_uchar16 ac, bc, dc;
+
+ ac = (vec_uchar16)(a);
+ bc = (vec_uchar16)(b);
+ dc = vec_sel(vec_sub(bc, ac), vec_sub(ac, bc), vec_cmpgt(ac, bc));
+
+ return ((qword)(dc));
+}
+
+/* Add intrinsics
+ */
+#define si_a(_a, _b) ((qword)(vec_add((vec_uint4)(_a), (vec_uint4)(_b))))
+
+#define si_ah(_a, _b) ((qword)(vec_add((vec_ushort8)(_a), (vec_ushort8)(_b))))
+
+static __inline qword si_ai(qword a, int b)
+{
+ return ((qword)(vec_add((vec_int4)(a),
+ vec_splat((vec_int4)(si_from_int(b)), 0))));
+}
+
+
+static __inline qword si_ahi(qword a, short b)
+{
+ return ((qword)(vec_add((vec_short8)(a),
+ vec_splat((vec_short8)(si_from_short(b)), 1))));
+}
+
+
+#define si_fa(_a, _b) ((qword)(vec_add((vec_float4)(_a), (vec_float4)(_b))))
+
+
+static __inline qword si_dfa(qword a, qword b)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } ad, bd, dd;
+
+ ad.v = (vec_double2)(a);
+ bd.v = (vec_double2)(b);
+ dd.d[0] = ad.d[0] + bd.d[0];
+ dd.d[1] = ad.d[1] + bd.d[1];
+
+ return ((qword)(dd.v));
+}
+
+/* Add word extended
+ */
+#define si_addx(_a, _b, _c) ((qword)(vec_add(vec_add((vec_uint4)(_a), (vec_uint4)(_b)), \
+ vec_and((vec_uint4)(_c), vec_splat_u32(1)))))
+
+
+/* Bit-wise AND
+ */
+#define si_and(_a, _b) ((qword)(vec_and((vec_uint4)(_a), (vec_uint4)(_b))))
+
+
+static __inline qword si_andbi(qword a, signed char b)
+{
+ return ((qword)(vec_and((vec_char16)(a),
+ vec_splat((vec_char16)(si_from_char(b)), 3))));
+}
+
+static __inline qword si_andhi(qword a, signed short b)
+{
+ return ((qword)(vec_and((vec_short8)(a),
+ vec_splat((vec_short8)(si_from_short(b)), 1))));
+}
+
+
+static __inline qword si_andi(qword a, signed int b)
+{
+ return ((qword)(vec_and((vec_int4)(a),
+ vec_splat((vec_int4)(si_from_int(b)), 0))));
+}
+
+
+/* Bit-wise AND with complement
+ */
+#define si_andc(_a, _b) ((qword)(vec_andc((vec_uchar16)(_a), (vec_uchar16)(_b))))
+
+
+/* Average byte vectors
+ */
+#define si_avgb(_a, _b) ((qword)(vec_avg((vec_uchar16)(_a), (vec_uchar16)(_b))))
+
+
+/* Branch indirect and set link on external data
+ */
+#define si_bisled(_func) /* not mappable */
+#define si_bisledd(_func) /* not mappable */
+#define si_bislede(_func) /* not mappable */
+
+
+/* Borrow generate
+ */
+#define si_bg(_a, _b) ((qword)(vec_subc((vec_uint4)(_b), (vec_uint4)(_a))))
+
+#define si_bgx(_a, _b, _c) ((qword)(vec_and(vec_or(vec_cmpgt((vec_uint4)(_b), (vec_uint4)(_a)), \
+ vec_and(vec_cmpeq((vec_uint4)(_b), (vec_uint4)(_a)), \
+ (vec_uint4)(_c))), vec_splat_u32(1))))
+
+/* Compare absolute equal
+ */
+static __inline qword si_fcmeq(qword a, qword b)
+{
+ vec_float4 msb = (vec_float4)((vec_uint4){0x80000000, 0x80000000, 0x80000000, 0x80000000});
+
+ return ((qword)(vec_cmpeq(vec_andc((vec_float4)(a), msb),
+ vec_andc((vec_float4)(b), msb))));
+}
+
+static __inline qword si_dfcmeq(qword a, qword b)
+{
+ vec_uint4 sign_mask= (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
+ vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000 };
+ vec_uchar16 hihi_promote = (vec_uchar16) { 0,1,2,3, 16,17,18,19, 8,9,10,11, 24,25,26,27};
+
+ vec_uint4 biteq;
+ vec_uint4 aabs;
+ vec_uint4 babs;
+ vec_uint4 a_gt;
+ vec_uint4 ahi_inf;
+ vec_uint4 anan;
+ vec_uint4 result;
+
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+
+ /* Shift 4 bytes */
+ x.i[3] = 4 << 3;
+
+ /* Mask out sign bits */
+ aabs = vec_and((vec_uint4)a,sign_mask);
+ babs = vec_and((vec_uint4)b,sign_mask);
+
+ /* A) Check for bit equality, store in high word */
+ biteq = (vec_uint4) vec_cmpeq((vec_uint4)aabs,(vec_uint4)babs);
+ biteq = vec_and(biteq,(vec_uint4)vec_slo((vec_uchar16)biteq,x.v));
+
+ /*
+ B) Check if a is NaN, store in high word
+
+ B1) If the high word is greater than max_exp (indicates a NaN)
+ B2) If the low word is greater than 0
+ */
+ a_gt = (vec_uint4)vec_cmpgt(aabs,nan_mask);
+
+ /* B3) Check if the high word is equal to the inf exponent */
+ ahi_inf = (vec_uint4)vec_cmpeq(aabs,nan_mask);
+
+ /* anan = B1[hi] or (B2[lo] and B3[hi]) */
+ anan = (vec_uint4)vec_or(a_gt,vec_and((vec_uint4)vec_slo((vec_uchar16)a_gt,x.v),ahi_inf));
+
+ /* result = A and not B */
+ result = vec_andc(biteq, anan);
+
+ /* Promote high words to 64 bits and return */
+ return ((qword)(vec_perm((vec_uchar16)result, (vec_uchar16)result, hihi_promote)));
+}
+
+
+/* Compare absolute greater than
+ */
+static __inline qword si_fcmgt(qword a, qword b)
+{
+ vec_float4 msb = (vec_float4)((vec_uint4){0x80000000, 0x80000000, 0x80000000, 0x80000000});
+
+ return ((qword)(vec_cmpgt(vec_andc((vec_float4)(a), msb),
+ vec_andc((vec_float4)(b), msb))));
+}
+
+static __inline qword si_dfcmgt(qword a, qword b)
+{
+ vec_uchar16 splat_hi = (vec_uchar16) { 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
+ vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x0, 0x7FF00000, 0x0 };
+ vec_uint4 sign_mask = (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
+
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+
+ /* Shift 4 bytes */
+ x.i[3] = 4 << 3;
+
+ // absolute value of a,b
+ vec_uint4 aabs = vec_and((vec_uint4)a, sign_mask);
+ vec_uint4 babs = vec_and((vec_uint4)b, sign_mask);
+
+ // check if a is nan
+ vec_uint4 a_inf = (vec_uint4)vec_cmpeq(aabs, nan_mask);
+ vec_uint4 a_nan = (vec_uint4)vec_cmpgt(aabs, nan_mask);
+ a_nan = vec_or(a_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)a_nan,x.v),a_inf));
+ a_nan = (vec_uint4)vec_perm((vec_uchar16)a_nan, (vec_uchar16)a_nan, splat_hi);
+
+ // check if b is nan
+ vec_uint4 b_inf = (vec_uint4)vec_cmpeq(babs, nan_mask);
+ vec_uint4 b_nan = (vec_uint4)vec_cmpgt(babs, nan_mask);
+ b_nan = vec_or(b_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)b_nan,x.v),b_inf));
+ b_nan = (vec_uint4)vec_perm((vec_uchar16)b_nan, (vec_uchar16)b_nan, splat_hi);
+
+ // A) Check if the exponents are different
+ vec_uint4 gt_hi = (vec_uint4)vec_cmpgt(aabs,babs);
+
+ // B) Check if high word equal, and low word greater
+ vec_uint4 gt_lo = (vec_uint4)vec_cmpgt((vec_uint4)aabs, (vec_uint4)babs);
+ vec_uint4 eq = (vec_uint4)vec_cmpeq(aabs, babs);
+ vec_uint4 eqgt = vec_and(eq,vec_slo(gt_lo,x.v));
+
+ // If either A or B is true, return true (unless NaNs detected)
+ vec_uint4 r = vec_or(gt_hi, eqgt);
+
+ // splat the high words of the comparison step
+ r = (vec_uint4)vec_perm((vec_uchar16)r,(vec_uchar16)r,splat_hi);
+
+ // correct for NaNs in input
+ return ((qword)vec_andc(r,vec_or(a_nan,b_nan)));
+}
+
+
+/* Compare equal
+ */
+static __inline qword si_ceqb(qword a, qword b)
+{
+ return ((qword)(vec_cmpeq((vec_uchar16)(a), (vec_uchar16)(b))));
+}
+
+static __inline qword si_ceqh(qword a, qword b)
+{
+ return ((qword)(vec_cmpeq((vec_ushort8)(a), (vec_ushort8)(b))));
+}
+
+static __inline qword si_ceq(qword a, qword b)
+{
+ return ((qword)(vec_cmpeq((vec_uint4)(a), (vec_uint4)(b))));
+}
+
+static __inline qword si_fceq(qword a, qword b)
+{
+ return ((qword)(vec_cmpeq((vec_float4)(a), (vec_float4)(b))));
+}
+
+static __inline qword si_ceqbi(qword a, signed char b)
+{
+ return ((qword)(vec_cmpeq((vec_char16)(a),
+ vec_splat((vec_char16)(si_from_char(b)), 3))));
+}
+
+static __inline qword si_ceqhi(qword a, signed short b)
+{
+ return ((qword)(vec_cmpeq((vec_short8)(a),
+ vec_splat((vec_short8)(si_from_short(b)), 1))));
+}
+
+static __inline qword si_ceqi(qword a, signed int b)
+{
+ return ((qword)(vec_cmpeq((vec_int4)(a),
+ vec_splat((vec_int4)(si_from_int(b)), 0))));
+}
+
+static __inline qword si_dfceq(qword a, qword b)
+{
+ vec_uint4 sign_mask= (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
+ vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x00000000, 0x7FF00000, 0x00000000 };
+ vec_uchar16 hihi_promote = (vec_uchar16) { 0,1,2,3, 16,17,18,19, 8,9,10,11, 24,25,26,27};
+
+ vec_uint4 biteq;
+ vec_uint4 aabs;
+ vec_uint4 babs;
+ vec_uint4 a_gt;
+ vec_uint4 ahi_inf;
+ vec_uint4 anan;
+ vec_uint4 iszero;
+ vec_uint4 result;
+
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+
+ /* Shift 4 bytes */
+ x.i[3] = 4 << 3;
+
+ /* A) Check for bit equality, store in high word */
+ biteq = (vec_uint4) vec_cmpeq((vec_uint4)a,(vec_uint4)b);
+ biteq = vec_and(biteq,(vec_uint4)vec_slo((vec_uchar16)biteq,x.v));
+
+ /* Mask out sign bits */
+ aabs = vec_and((vec_uint4)a,sign_mask);
+ babs = vec_and((vec_uint4)b,sign_mask);
+
+ /*
+ B) Check if a is NaN, store in high word
+
+ B1) If the high word is greater than max_exp (indicates a NaN)
+ B2) If the low word is greater than 0
+ */
+ a_gt = (vec_uint4)vec_cmpgt(aabs,nan_mask);
+
+ /* B3) Check if the high word is equal to the inf exponent */
+ ahi_inf = (vec_uint4)vec_cmpeq(aabs,nan_mask);
+
+ /* anan = B1[hi] or (B2[lo] and B3[hi]) */
+ anan = (vec_uint4)vec_or(a_gt,vec_and((vec_uint4)vec_slo((vec_uchar16)a_gt,x.v),ahi_inf));
+
+ /* C) Check for 0 = -0 special case */
+ iszero =(vec_uint4)vec_cmpeq((vec_uint4)vec_or(aabs,babs),(vec_uint4)vec_splat_u32(0));
+ iszero = vec_and(iszero,(vec_uint4)vec_slo((vec_uchar16)iszero,x.v));
+
+ /* result = (A or C) and not B */
+ result = vec_or(biteq,iszero);
+ result = vec_andc(result, anan);
+
+ /* Promote high words to 64 bits and return */
+ return ((qword)(vec_perm((vec_uchar16)result, (vec_uchar16)result, hihi_promote)));
+}
+
+
+/* Compare greater than
+ */
+static __inline qword si_cgtb(qword a, qword b)
+{
+ return ((qword)(vec_cmpgt((vec_char16)(a), (vec_char16)(b))));
+}
+
+static __inline qword si_cgth(qword a, qword b)
+{
+ return ((qword)(vec_cmpgt((vec_short8)(a), (vec_short8)(b))));
+}
+
+static __inline qword si_cgt(qword a, qword b)
+{
+ return ((qword)(vec_cmpgt((vec_int4)(a), (vec_int4)(b))));
+}
+
+static __inline qword si_clgtb(qword a, qword b)
+{
+ return ((qword)(vec_cmpgt((vec_uchar16)(a), (vec_uchar16)(b))));
+}
+
+static __inline qword si_clgth(qword a, qword b)
+{
+ return ((qword)(vec_cmpgt((vec_ushort8)(a), (vec_ushort8)(b))));
+}
+
+static __inline qword si_clgt(qword a, qword b)
+{
+ return ((qword)(vec_cmpgt((vec_uint4)(a), (vec_uint4)(b))));
+}
+
+static __inline qword si_fcgt(qword a, qword b)
+{
+ return ((qword)(vec_cmpgt((vec_float4)(a), (vec_float4)(b))));
+}
+
+static __inline qword si_dfcgt(qword a, qword b)
+{
+ vec_uchar16 splat_hi = (vec_uchar16) { 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
+ vec_uchar16 borrow_shuffle = (vec_uchar16) { 4,5,6,7, 192,192,192,192, 12,13,14,15, 192,192,192,192 };
+ vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x0, 0x7FF00000, 0x0 };
+ vec_uint4 sign_mask = (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
+
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+
+ /* Shift 4 bytes */
+ x.i[3] = 4 << 3;
+
+ // absolute value of a,b
+ vec_uint4 aabs = vec_and((vec_uint4)a, sign_mask);
+ vec_uint4 babs = vec_and((vec_uint4)b, sign_mask);
+
+ // check if a is nan
+ vec_uint4 a_inf = (vec_uint4)vec_cmpeq(aabs, nan_mask);
+ vec_uint4 a_nan = (vec_uint4)vec_cmpgt(aabs, nan_mask);
+ a_nan = vec_or(a_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)a_nan,x.v),a_inf));
+ a_nan = (vec_uint4)vec_perm((vec_uchar16)a_nan, (vec_uchar16)a_nan, splat_hi);
+
+ // check if b is nan
+ vec_uint4 b_inf = (vec_uint4)vec_cmpeq(babs, nan_mask);
+ vec_uint4 b_nan = (vec_uint4)vec_cmpgt(babs, nan_mask);
+ b_nan = vec_or(b_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)b_nan,x.v),b_inf));
+ b_nan = (vec_uint4)vec_perm((vec_uchar16)b_nan, (vec_uchar16)b_nan, splat_hi);
+
+ // sign of a
+ vec_uint4 asel = (vec_uint4)vec_sra((vec_int4)(a), (vec_uint4)vec_splat(((vec_uint4)si_from_int(31)), 0));
+ asel = (vec_uint4)vec_perm((vec_uchar16)asel,(vec_uchar16)asel,splat_hi);
+
+ // sign of b
+ vec_uint4 bsel = (vec_uint4)vec_sra((vec_int4)(b), (vec_uint4)vec_splat(((vec_uint4)si_from_int(31)), 0));
+ bsel = (vec_uint4)vec_perm((vec_uchar16)bsel,(vec_uchar16)bsel,splat_hi);
+
+ // negative a
+ vec_uint4 abor = vec_subc((vec_uint4)vec_splat_u32(0), aabs);
+ vec_uchar16 pat = vec_sel(((vec_uchar16){0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15}), vec_sr(borrow_shuffle, vec_splat_u8(3)), vec_sra(borrow_shuffle, vec_splat_u8(7)));
+ abor = (vec_uint4)(vec_perm(vec_perm((vec_uchar16)abor, (vec_uchar16)abor, borrow_shuffle),((vec_uchar16){0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x80, 0x80, 0x80}),pat));
+ vec_uint4 aneg = vec_add(vec_add(vec_splat_u32(0), vec_nor(aabs, aabs)), vec_and(abor, vec_splat_u32(1)));
+
+ // pick the one we want
+ vec_int4 aval = (vec_int4)vec_sel((vec_uchar16)aabs, (vec_uchar16)aneg, (vec_uchar16)asel);
+
+ // negative b
+ vec_uint4 bbor = vec_subc((vec_uint4)vec_splat_u32(0), babs);
+ bbor = (vec_uint4)(vec_perm(vec_perm((vec_uchar16)bbor, (vec_uchar16)bbor, borrow_shuffle),((vec_uchar16){0, 0, 0, 0, 0, 0, 0, 0, 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x80, 0x80, 0x80}),pat));
+ vec_uint4 bneg = vec_add(vec_nor(babs, babs), vec_and(bbor, vec_splat_u32(1)));
+
+ // pick the one we want
+ vec_int4 bval=(vec_int4)vec_sel((vec_uchar16)babs, (vec_uchar16)bneg, (vec_uchar16)bsel);
+
+ // A) Check if the exponents are different
+ vec_uint4 gt_hi = (vec_uint4)vec_cmpgt(aval,bval);
+
+ // B) Check if high word equal, and low word greater
+ vec_uint4 gt_lo = (vec_uint4)vec_cmpgt((vec_uint4)aval, (vec_uint4)bval);
+ vec_uint4 eq = (vec_uint4)vec_cmpeq(aval, bval);
+ vec_uint4 eqgt = vec_and(eq,vec_slo(gt_lo,x.v));
+
+ // If either A or B is true, return true (unless NaNs detected)
+ vec_uint4 r = vec_or(gt_hi, eqgt);
+
+ // splat the high words of the comparison step
+ r = (vec_uint4)vec_perm((vec_uchar16)r,(vec_uchar16)r,splat_hi);
+
+ // correct for NaNs in input
+ return ((qword)vec_andc(r,vec_or(a_nan,b_nan)));
+}
+
+static __inline qword si_cgtbi(qword a, signed char b)
+{
+ return ((qword)(vec_cmpgt((vec_char16)(a),
+ vec_splat((vec_char16)(si_from_char(b)), 3))));
+}
+
+static __inline qword si_cgthi(qword a, signed short b)
+{
+ return ((qword)(vec_cmpgt((vec_short8)(a),
+ vec_splat((vec_short8)(si_from_short(b)), 1))));
+}
+
+static __inline qword si_cgti(qword a, signed int b)
+{
+ return ((qword)(vec_cmpgt((vec_int4)(a),
+ vec_splat((vec_int4)(si_from_int(b)), 0))));
+}
+
+static __inline qword si_clgtbi(qword a, unsigned char b)
+{
+ return ((qword)(vec_cmpgt((vec_uchar16)(a),
+ vec_splat((vec_uchar16)(si_from_uchar(b)), 3))));
+}
+
+static __inline qword si_clgthi(qword a, unsigned short b)
+{
+ return ((qword)(vec_cmpgt((vec_ushort8)(a),
+ vec_splat((vec_ushort8)(si_from_ushort(b)), 1))));
+}
+
+static __inline qword si_clgti(qword a, unsigned int b)
+{
+ return ((qword)(vec_cmpgt((vec_uint4)(a),
+ vec_splat((vec_uint4)(si_from_uint(b)), 0))));
+}
+
+static __inline qword si_dftsv(qword a, char b)
+{
+ vec_uchar16 splat_hi = (vec_uchar16) { 0,1,2,3, 0,1,2,3, 8,9,10,11, 8,9,10,11 };
+ vec_uint4 sign_mask = (vec_uint4) { 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF };
+ vec_uint4 result = (vec_uint4){0};
+ vec_uint4 sign = (vec_uint4)vec_sra((vec_int4)(a), (vec_uint4)vec_splat(((vec_uint4)si_from_int(31)), 0));
+ sign = (vec_uint4)vec_perm((vec_uchar16)sign,(vec_uchar16)sign,splat_hi);
+ vec_uint4 aabs = vec_and((vec_uint4)a,sign_mask);
+
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+
+ /* Shift 4 bytes */
+ x.i[3] = 4 << 3;
+
+ /* Nan or +inf or -inf */
+ if (b & 0x70)
+ {
+ vec_uint4 nan_mask = (vec_uint4) { 0x7FF00000, 0x0, 0x7FF00000, 0x0 };
+ vec_uint4 a_inf = (vec_uint4)vec_cmpeq(aabs, nan_mask);
+ /* NaN */
+ if (b & 0x40)
+ {
+ vec_uint4 a_nan = (vec_uint4)vec_cmpgt(aabs, nan_mask);
+ a_nan = vec_or(a_nan, vec_and((vec_uint4)vec_slo((vec_uchar16)a_nan,x.v),a_inf));
+ a_nan = (vec_uint4)vec_perm((vec_uchar16)a_nan, (vec_uchar16)a_nan, splat_hi);
+ result = vec_or(result, a_nan);
+ }
+ /* inf */
+ if (b & 0x30)
+ {
+ a_inf = vec_and((vec_uint4)vec_slo((vec_uchar16)a_inf,x.v), a_inf);
+ a_inf = (vec_uint4)vec_perm((vec_uchar16)a_inf, (vec_uchar16)a_inf, splat_hi);
+ /* +inf */
+ if (b & 0x20)
+ result = vec_or(vec_andc(a_inf, sign), result);
+ /* -inf */
+ if (b & 0x10)
+ result = vec_or(vec_and(a_inf, sign), result);
+ }
+ }
+ /* 0 or denorm */
+ if (b & 0xF)
+ {
+ vec_uint4 iszero =(vec_uint4)vec_cmpeq(aabs,(vec_uint4)vec_splat_u32(0));
+ iszero = vec_and(iszero,(vec_uint4)vec_slo((vec_uchar16)iszero,x.v));
+ /* denorm */
+ if (b & 0x3)
+ {
+ vec_uint4 denorm_mask = (vec_uint4){0xFFFFF, 0xFFFFF, 0xFFFFF, 0xFFFFF};
+ vec_uint4 isdenorm = vec_nor((vec_uint4)vec_cmpgt(aabs, denorm_mask), iszero);
+ isdenorm = (vec_uint4)vec_perm((vec_uchar16)isdenorm, (vec_uchar16)isdenorm, splat_hi);
+ /* +denorm */
+ if (b & 0x2)
+ result = vec_or(vec_andc(isdenorm, sign), result);
+ /* -denorm */
+ if (b & 0x1)
+ result = vec_or(vec_and(isdenorm, sign), result);
+ }
+ /* 0 */
+ if (b & 0xC)
+ {
+ iszero = (vec_uint4)vec_perm((vec_uchar16)iszero, (vec_uchar16)iszero, splat_hi);
+ /* +0 */
+ if (b & 0x8)
+ result = vec_or(vec_andc(iszero, sign), result);
+ /* -0 */
+ if (b & 0x4)
+ result = vec_or(vec_and(iszero, sign), result);
+ }
+ }
+ return ((qword)result);
+}
+
+
+/* Carry generate
+ */
+#define si_cg(_a, _b) ((qword)(vec_addc((vec_uint4)(_a), (vec_uint4)(_b))))
+
+#define si_cgx(_a, _b, _c) ((qword)(vec_or(vec_addc((vec_uint4)(_a), (vec_uint4)(_b)), \
+ vec_addc(vec_add((vec_uint4)(_a), (vec_uint4)(_b)), \
+ vec_and((vec_uint4)(_c), vec_splat_u32(1))))))
+
+
+/* Count ones for bytes
+ */
+static __inline qword si_cntb(qword a)
+{
+ vec_uchar16 nib_cnt = (vec_uchar16){0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4};
+ vec_uchar16 four = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 };
+ vec_uchar16 av;
+
+ av = (vec_uchar16)(a);
+
+ return ((qword)(vec_add(vec_perm(nib_cnt, nib_cnt, av),
+ vec_perm(nib_cnt, nib_cnt, vec_sr (av, four)))));
+}
+
+/* Count ones for bytes
+ */
+static __inline qword si_clz(qword a)
+{
+ vec_uchar16 av;
+ vec_uchar16 cnt_hi, cnt_lo, cnt, tmp1, tmp2, tmp3;
+ vec_uchar16 four = vec_splat_u8(4);
+ vec_uchar16 nib_cnt = (vec_uchar16){4, 3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0};
+ vec_uchar16 eight = vec_splat_u8(8);
+ vec_uchar16 sixteen = (vec_uchar16){16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16};
+ vec_uchar16 twentyfour = (vec_uchar16){24,24,24,24,24,24,24,24,24,24,24,24,24,24,24,24};
+
+ av = (vec_uchar16)(a);
+
+ cnt_hi = vec_perm(nib_cnt, nib_cnt, vec_sr(av, four));
+ cnt_lo = vec_perm(nib_cnt, nib_cnt, av);
+
+ cnt = vec_add(cnt_hi, vec_and(cnt_lo, vec_cmpeq(cnt_hi, four)));
+
+ tmp1 = (vec_uchar16)vec_sl((vec_uint4)(cnt), (vec_uint4)(eight));
+ tmp2 = (vec_uchar16)vec_sl((vec_uint4)(cnt), (vec_uint4)(sixteen));
+ tmp3 = (vec_uchar16)vec_sl((vec_uint4)(cnt), (vec_uint4)(twentyfour));
+
+ cnt = vec_add(cnt, vec_and(tmp1, vec_cmpeq(cnt, eight)));
+ cnt = vec_add(cnt, vec_and(tmp2, vec_cmpeq(cnt, sixteen)));
+ cnt = vec_add(cnt, vec_and(tmp3, vec_cmpeq(cnt, twentyfour)));
+
+ return (qword)((vec_sr((vec_uint4)(cnt), (vec_uint4)(twentyfour))));
+}
+
+/* Convert to float
+ */
+#define si_cuflt(_a, _b) ((qword)(vec_ctf((vec_uint4)(_a), _b)))
+#define si_csflt(_a, _b) ((qword)(vec_ctf((vec_int4)(_a), _b)))
+
+/* Convert to signed int
+ */
+#define si_cflts(_a, _b) ((qword)(vec_cts((vec_float4)(_a), _b)))
+
+/* Convert to unsigned int
+ */
+#define si_cfltu(_a, _b) ((qword)(vec_ctu((vec_float4)(_a), _b)))
+
+/* Synchronize
+ */
+#define si_dsync() /* do nothing */
+#define si_sync() /* do nothing */
+#define si_syncc() /* do nothing */
+
+
+/* Equivalence
+ */
+static __inline qword si_eqv(qword a, qword b)
+{
+ vec_uchar16 d;
+
+ d = vec_xor((vec_uchar16)(a), (vec_uchar16)(b));
+ return ((qword)(vec_nor(d, d)));
+}
+
+/* Extend
+ */
+static __inline qword si_xsbh(qword a)
+{
+ vec_char16 av;
+
+ av = (vec_char16)(a);
+ return ((qword)(vec_unpackh(vec_perm(av, av, ((vec_uchar16){1, 3, 5, 7, 9,11,13,15,
+ 0, 0, 0, 0, 0, 0, 0, 0})))));
+}
+
+static __inline qword si_xshw(qword a)
+{
+ vec_short8 av;
+
+ av = (vec_short8)(a);
+ return ((qword)(vec_unpackh(vec_perm(av, av, ((vec_uchar16){2, 3, 6, 7,
+ 10,11,14,15,
+ 0, 0, 0, 0,
+ 0, 0, 0, 0})))));
+}
+
+static __inline qword si_xswd(qword a)
+{
+ vec_int4 av;
+
+ av = (vec_int4)(a);
+ return ((qword)(vec_perm(av, vec_sra(av, ((vec_uint4){31,31,31,31})),
+ ((vec_uchar16){20, 21, 22, 23,
+ 4, 5, 6, 7,
+ 28, 29, 30, 31,
+ 12, 13, 14, 15}))));
+}
+
+static __inline qword si_fesd(qword a)
+{
+ union {
+ double d[2];
+ vec_double2 vd;
+ } out;
+ union {
+ float f[4];
+ vec_float4 vf;
+ } in;
+
+ in.vf = (vec_float4)(a);
+ out.d[0] = (double)(in.f[0]);
+ out.d[1] = (double)(in.f[2]);
+ return ((qword)(out.vd));
+}
+
+/* Gather
+ */
+static __inline qword si_gbb(qword a)
+{
+ vec_uchar16 bits;
+ vec_uint4 bytes;
+
+ bits = vec_sl(vec_and((vec_uchar16)(a), vec_splat_u8(1)), ((vec_uchar16){7, 6, 5, 4, 3, 2, 1, 0,
+ 7, 6, 5, 4, 3, 2, 1, 0}));
+ bytes = (vec_uint4)vec_sum2s((vec_int4)(vec_sum4s(bits, ((vec_uint4){0}))), ((vec_int4){0}));
+
+ return ((qword)(vec_perm(bytes, bytes, ((vec_uchar16){0, 0, 7,15, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0}))));
+}
+
+
+static __inline qword si_gbh(qword a)
+{
+ vec_ushort8 bits;
+ vec_uint4 bytes;
+
+ bits = vec_sl(vec_and((vec_ushort8)(a), vec_splat_u16(1)), ((vec_ushort8){7, 6, 5, 4, 3, 2, 1, 0}));
+
+ bytes = (vec_uint4)vec_sums((vec_int4)(vec_sum4s((vec_short8)(bits), (vec_int4){0})), (vec_int4){0});
+
+ return ((qword)(vec_sld(bytes, bytes, 12)));
+}
+
+static __inline qword si_gb(qword a)
+{
+ vec_uint4 bits;
+ vec_uint4 bytes;
+
+ bits = vec_sl(vec_and((vec_uint4)(a), vec_splat_u32(1)), ((vec_uint4){3, 2, 1, 0}));
+ bytes = (vec_uint4)vec_sums((vec_int4)(bits), ((vec_int4){0}));
+ return ((qword)(vec_sld(bytes, bytes, 12)));
+}
+
+
+/* Compare and halt
+ */
+static __inline void si_heq(qword a, qword b)
+{
+ union {
+ vector unsigned int v;
+ unsigned int i[4];
+ } aa, bb;
+
+ aa.v = (vector unsigned int)(a);
+ bb.v = (vector unsigned int)(b);
+
+ if (aa.i[0] == bb.i[0]) { SPU_HALT_ACTION; };
+}
+
+static __inline void si_heqi(qword a, unsigned int b)
+{
+ union {
+ vector unsigned int v;
+ unsigned int i[4];
+ } aa;
+
+ aa.v = (vector unsigned int)(a);
+
+ if (aa.i[0] == b) { SPU_HALT_ACTION; };
+}
+
+static __inline void si_hgt(qword a, qword b)
+{
+ union {
+ vector signed int v;
+ signed int i[4];
+ } aa, bb;
+
+ aa.v = (vector signed int)(a);
+ bb.v = (vector signed int)(b);
+
+ if (aa.i[0] > bb.i[0]) { SPU_HALT_ACTION; };
+}
+
+static __inline void si_hgti(qword a, signed int b)
+{
+ union {
+ vector signed int v;
+ signed int i[4];
+ } aa;
+
+ aa.v = (vector signed int)(a);
+
+ if (aa.i[0] > b) { SPU_HALT_ACTION; };
+}
+
+static __inline void si_hlgt(qword a, qword b)
+{
+ union {
+ vector unsigned int v;
+ unsigned int i[4];
+ } aa, bb;
+
+ aa.v = (vector unsigned int)(a);
+ bb.v = (vector unsigned int)(b);
+
+ if (aa.i[0] > bb.i[0]) { SPU_HALT_ACTION; };
+}
+
+static __inline void si_hlgti(qword a, unsigned int b)
+{
+ union {
+ vector unsigned int v;
+ unsigned int i[4];
+ } aa;
+
+ aa.v = (vector unsigned int)(a);
+
+ if (aa.i[0] > b) { SPU_HALT_ACTION; };
+}
+
+
+/* Multiply and Add
+ */
+static __inline qword si_mpya(qword a, qword b, qword c)
+{
+ return ((qword)(vec_msum(vec_and((vec_short8)(a),
+ ((vec_short8){0, -1, 0, -1, 0, -1, 0, -1})),
+ (vec_short8)(b), (vec_int4)(c))));
+}
+
+static __inline qword si_fma(qword a, qword b, qword c)
+{
+ return ((qword)(vec_madd((vec_float4)(a), (vec_float4)(b), (vec_float4)(c))));
+}
+
+static __inline qword si_dfma(qword a, qword b, qword c)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } aa, bb, cc, dd;
+
+ aa.v = (vec_double2)(a);
+ bb.v = (vec_double2)(b);
+ cc.v = (vec_double2)(c);
+ dd.d[0] = aa.d[0] * bb.d[0] + cc.d[0];
+ dd.d[1] = aa.d[1] * bb.d[1] + cc.d[1];
+ return ((qword)(dd.v));
+}
+
+/* Form Mask
+ */
+#define si_fsmbi(_a) si_fsmb(si_from_int(_a))
+
+static __inline qword si_fsmb(qword a)
+{
+ vec_char16 mask;
+ vec_ushort8 in;
+
+ in = (vec_ushort8)(a);
+ mask = (vec_char16)(vec_perm(in, in, ((vec_uchar16){2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3})));
+ return ((qword)(vec_sra(vec_sl(mask, ((vec_uchar16){0, 1, 2, 3, 4, 5, 6, 7,
+ 0, 1, 2, 3, 4, 5, 6, 7})),
+ vec_splat_u8(7))));
+}
+
+
+static __inline qword si_fsmh(qword a)
+{
+ vec_uchar16 in;
+ vec_short8 mask;
+
+ in = (vec_uchar16)(a);
+ mask = (vec_short8)(vec_splat(in, 3));
+ return ((qword)(vec_sra(vec_sl(mask, ((vec_ushort8){0, 1, 2, 3, 4, 5, 6, 7})),
+ vec_splat_u16(15))));
+}
+
+static __inline qword si_fsm(qword a)
+{
+ vec_uchar16 in;
+ vec_int4 mask;
+
+ in = (vec_uchar16)(a);
+ mask = (vec_int4)(vec_splat(in, 3));
+ return ((qword)(vec_sra(vec_sl(mask, ((vec_uint4){28, 29, 30, 31})),
+ ((vec_uint4){31,31,31,31}))));
+}
+
+/* Move from/to registers
+ */
+#define si_fscrrd() ((qword)((vec_uint4){0}))
+#define si_fscrwr(_a)
+
+#define si_mfspr(_reg) ((qword)((vec_uint4){0}))
+#define si_mtspr(_reg, _a)
+
+/* Multiply High High Add
+ */
+static __inline qword si_mpyhha(qword a, qword b, qword c)
+{
+ return ((qword)(vec_add(vec_mule((vec_short8)(a), (vec_short8)(b)), (vec_int4)(c))));
+}
+
+static __inline qword si_mpyhhau(qword a, qword b, qword c)
+{
+ return ((qword)(vec_add(vec_mule((vec_ushort8)(a), (vec_ushort8)(b)), (vec_uint4)(c))));
+}
+
+/* Multiply Subtract
+ */
+static __inline qword si_fms(qword a, qword b, qword c)
+{
+ return ((qword)(vec_madd((vec_float4)(a), (vec_float4)(b),
+ vec_sub(((vec_float4){0.0f}), (vec_float4)(c)))));
+}
+
+static __inline qword si_dfms(qword a, qword b, qword c)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } aa, bb, cc, dd;
+
+ aa.v = (vec_double2)(a);
+ bb.v = (vec_double2)(b);
+ cc.v = (vec_double2)(c);
+ dd.d[0] = aa.d[0] * bb.d[0] - cc.d[0];
+ dd.d[1] = aa.d[1] * bb.d[1] - cc.d[1];
+ return ((qword)(dd.v));
+}
+
+/* Multiply
+ */
+static __inline qword si_fm(qword a, qword b)
+{
+ return ((qword)(vec_madd((vec_float4)(a), (vec_float4)(b), ((vec_float4){0.0f}))));
+}
+
+static __inline qword si_dfm(qword a, qword b)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } aa, bb, dd;
+
+ aa.v = (vec_double2)(a);
+ bb.v = (vec_double2)(b);
+ dd.d[0] = aa.d[0] * bb.d[0];
+ dd.d[1] = aa.d[1] * bb.d[1];
+ return ((qword)(dd.v));
+}
+
+/* Multiply High
+ */
+static __inline qword si_mpyh(qword a, qword b)
+{
+ vec_uint4 sixteen = (vec_uint4){16, 16, 16, 16};
+
+ return ((qword)(vec_sl(vec_mule((vec_short8)(a), (vec_short8)(vec_sl((vec_uint4)(b), sixteen))), sixteen)));
+}
+
+
+/* Multiply High High
+ */
+static __inline qword si_mpyhh(qword a, qword b)
+{
+ return ((qword)(vec_mule((vec_short8)(a), (vec_short8)(b))));
+}
+
+static __inline qword si_mpyhhu(qword a, qword b)
+{
+ return ((qword)(vec_mule((vec_ushort8)(a), (vec_ushort8)(b))));
+}
+
+/* Multiply Odd
+ */
+static __inline qword si_mpy(qword a, qword b)
+{
+ return ((qword)(vec_mulo((vec_short8)(a), (vec_short8)(b))));
+}
+
+static __inline qword si_mpyu(qword a, qword b)
+{
+ return ((qword)(vec_mulo((vec_ushort8)(a), (vec_ushort8)(b))));
+}
+
+static __inline qword si_mpyi(qword a, short b)
+{
+ return ((qword)(vec_mulo((vec_short8)(a),
+ vec_splat((vec_short8)(si_from_short(b)), 1))));
+}
+
+static __inline qword si_mpyui(qword a, unsigned short b)
+{
+ return ((qword)(vec_mulo((vec_ushort8)(a),
+ vec_splat((vec_ushort8)(si_from_ushort(b)), 1))));
+}
+
+/* Multiply and Shift Right
+ */
+static __inline qword si_mpys(qword a, qword b)
+{
+ return ((qword)(vec_sra(vec_mulo((vec_short8)(a), (vec_short8)(b)), ((vec_uint4){16,16,16,16}))));
+}
+
+/* Nand
+ */
+static __inline qword si_nand(qword a, qword b)
+{
+ vec_uchar16 d;
+
+ d = vec_and((vec_uchar16)(a), (vec_uchar16)(b));
+ return ((qword)(vec_nor(d, d)));
+}
+
+/* Negative Multiply Add
+ */
+static __inline qword si_dfnma(qword a, qword b, qword c)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } aa, bb, cc, dd;
+
+ aa.v = (vec_double2)(a);
+ bb.v = (vec_double2)(b);
+ cc.v = (vec_double2)(c);
+ dd.d[0] = -cc.d[0] - aa.d[0] * bb.d[0];
+ dd.d[1] = -cc.d[1] - aa.d[1] * bb.d[1];
+ return ((qword)(dd.v));
+}
+
+/* Negative Multiply and Subtract
+ */
+static __inline qword si_fnms(qword a, qword b, qword c)
+{
+ return ((qword)(vec_nmsub((vec_float4)(a), (vec_float4)(b), (vec_float4)(c))));
+}
+
+static __inline qword si_dfnms(qword a, qword b, qword c)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } aa, bb, cc, dd;
+
+ aa.v = (vec_double2)(a);
+ bb.v = (vec_double2)(b);
+ cc.v = (vec_double2)(c);
+ dd.d[0] = cc.d[0] - aa.d[0] * bb.d[0];
+ dd.d[1] = cc.d[1] - aa.d[1] * bb.d[1];
+ return ((qword)(dd.v));
+}
+
+/* Nor
+ */
+static __inline qword si_nor(qword a, qword b)
+{
+ return ((qword)(vec_nor((vec_uchar16)(a), (vec_uchar16)(b))));
+}
+
+/* Or
+ */
+static __inline qword si_or(qword a, qword b)
+{
+ return ((qword)(vec_or((vec_uchar16)(a), (vec_uchar16)(b))));
+}
+
+static __inline qword si_orbi(qword a, unsigned char b)
+{
+ return ((qword)(vec_or((vec_uchar16)(a),
+ vec_splat((vec_uchar16)(si_from_uchar(b)), 3))));
+}
+
+static __inline qword si_orhi(qword a, unsigned short b)
+{
+ return ((qword)(vec_or((vec_ushort8)(a),
+ vec_splat((vec_ushort8)(si_from_ushort(b)), 1))));
+}
+
+static __inline qword si_ori(qword a, unsigned int b)
+{
+ return ((qword)(vec_or((vec_uint4)(a),
+ vec_splat((vec_uint4)(si_from_uint(b)), 0))));
+}
+
+/* Or Complement
+ */
+static __inline qword si_orc(qword a, qword b)
+{
+ return ((qword)(vec_or((vec_uchar16)(a), vec_nor((vec_uchar16)(b), (vec_uchar16)(b)))));
+}
+
+
+/* Or Across
+ */
+static __inline qword si_orx(qword a)
+{
+ vec_uchar16 tmp;
+ tmp = (vec_uchar16)(a);
+ tmp = vec_or(tmp, vec_sld(tmp, tmp, 8));
+ tmp = vec_or(tmp, vec_sld(tmp, tmp, 4));
+ return ((qword)(vec_and(tmp, ((vec_uchar16){0xFF,0xFF,0xFF,0xFF, 0x00,0x00,0x00,0x00,
+ 0x00,0x00,0x00,0x00, 0x00,0x00,0x00,0x00}))));
+}
+
+
+/* Estimates
+ */
+static __inline qword si_frest(qword a)
+{
+ return ((qword)(vec_re((vec_float4)(a))));
+}
+
+static __inline qword si_frsqest(qword a)
+{
+ return ((qword)(vec_rsqrte((vec_float4)(a))));
+}
+
+#define si_fi(_a, _d) (_d)
+
+/* Channel Read and Write
+ */
+#define si_rdch(_channel) ((qword)(vec_splat_u8(0))) /* not mappable */
+#define si_rchcnt(_channel) ((qword)(vec_splat_u8(0))) /* not mappable */
+#define si_wrch(_channel, _a) /* not mappable */
+
+/* Rotate Left
+ */
+static __inline qword si_roth(qword a, qword b)
+{
+ return ((qword)(vec_rl((vec_ushort8)(a), (vec_ushort8)(b))));
+}
+
+static __inline qword si_rot(qword a, qword b)
+{
+ return ((qword)(vec_rl((vec_uint4)(a), (vec_uint4)(b))));
+}
+
+static __inline qword si_rothi(qword a, int b)
+{
+ return ((qword)(vec_rl((vec_ushort8)(a),
+ vec_splat((vec_ushort8)(si_from_int(b)), 1))));
+}
+
+static __inline qword si_roti(qword a, int b)
+{
+ return ((qword)(vec_rl((vec_uint4)(a),
+ vec_splat((vec_uint4)(si_from_int(b)), 0))));
+}
+
+/* Rotate Left with Mask
+ */
+static __inline qword si_rothm(qword a, qword b)
+{
+ vec_ushort8 neg_b;
+ vec_ushort8 mask;
+
+ neg_b = (vec_ushort8)vec_sub(vec_splat_s16(0), (vec_short8)(b));
+ mask = vec_sra(vec_sl(neg_b, vec_splat_u16(11)), vec_splat_u16(15));
+ return ((qword)(vec_andc(vec_sr((vec_ushort8)(a), neg_b), mask)));
+}
+
+static __inline qword si_rotm(qword a, qword b)
+{
+ vec_uint4 neg_b;
+ vec_uint4 mask;
+
+ neg_b = (vec_uint4)vec_sub(vec_splat_s32(0), (vec_int4)(b));
+ mask = vec_sra(vec_sl(neg_b, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
+ return ((qword)(vec_andc(vec_sr((vec_uint4)(a), neg_b), mask)));
+}
+
+static __inline qword si_rothmi(qword a, int b)
+{
+ vec_ushort8 neg_b;
+ vec_ushort8 mask;
+
+ neg_b = vec_splat((vec_ushort8)(si_from_int(-b)), 1);
+ mask = vec_sra(vec_sl(neg_b, vec_splat_u16(11)), vec_splat_u16(15));
+ return ((qword)(vec_andc(vec_sr((vec_ushort8)(a), neg_b), mask)));
+}
+
+static __inline qword si_rotmi(qword a, int b)
+{
+ vec_uint4 neg_b;
+ vec_uint4 mask;
+
+ neg_b = vec_splat((vec_uint4)(si_from_int(-b)), 0);
+ mask = vec_sra(vec_sl(neg_b, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
+ return ((qword)(vec_andc(vec_sr((vec_uint4)(a), neg_b), mask)));
+}
+
+
+/* Rotate Left Algebraic with Mask
+ */
+static __inline qword si_rotmah(qword a, qword b)
+{
+ vec_ushort8 neg_b;
+ vec_ushort8 mask;
+
+ neg_b = (vec_ushort8)vec_sub(vec_splat_s16(0), (vec_short8)(b));
+ mask = vec_sra(vec_sl(neg_b, vec_splat_u16(11)), vec_splat_u16(15));
+ return ((qword)(vec_sra((vec_short8)(a), (vec_ushort8)vec_or(neg_b, mask))));
+}
+
+static __inline qword si_rotma(qword a, qword b)
+{
+ vec_uint4 neg_b;
+ vec_uint4 mask;
+
+ neg_b = (vec_uint4)vec_sub(vec_splat_s32(0), (vec_int4)(b));
+ mask = vec_sra(vec_sl(neg_b, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
+ return ((qword)(vec_sra((vec_int4)(a), (vec_uint4)vec_or(neg_b, mask))));
+}
+
+
+static __inline qword si_rotmahi(qword a, int b)
+{
+ vec_ushort8 neg_b;
+ vec_ushort8 mask;
+
+ neg_b = vec_splat((vec_ushort8)(si_from_int(-b)), 1);
+ mask = vec_sra(vec_sl(neg_b, vec_splat_u16(11)), vec_splat_u16(15));
+ return ((qword)(vec_sra((vec_short8)(a), (vec_ushort8)vec_or(neg_b, mask))));
+}
+
+static __inline qword si_rotmai(qword a, int b)
+{
+ vec_uint4 neg_b;
+ vec_uint4 mask;
+
+ neg_b = vec_splat((vec_uint4)(si_from_int(-b)), 0);
+ mask = vec_sra(vec_sl(neg_b, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
+ return ((qword)(vec_sra((vec_int4)(a), (vec_uint4)vec_or(neg_b, mask))));
+}
+
+
+/* Rotate Left Quadword by Bytes with Mask
+ */
+static __inline qword si_rotqmbyi(qword a, int count)
+{
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+ vec_uchar16 mask;
+
+ count = 0 - count;
+ x.i[3] = count << 3;
+ mask = (count & 0x10) ? vec_splat_u8(0) : vec_splat_u8(-1);
+
+ return ((qword)(vec_and(vec_sro((vec_uchar16)(a), x.v), mask)));
+}
+
+
+static __inline qword si_rotqmby(qword a, qword count)
+{
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+ int cnt;
+ vec_uchar16 mask;
+
+ x.v = (vec_uchar16)(count);
+ x.i[0] = cnt = (0 - x.i[0]) << 3;
+
+ x.v = vec_splat(x.v, 3);
+ mask = (cnt & 0x80) ? vec_splat_u8(0) : vec_splat_u8(-1);
+
+ return ((qword)(vec_and(vec_sro((vec_uchar16)(a), x.v), mask)));
+}
+
+
+/* Rotate Left Quadword by Bytes
+ */
+static __inline qword si_rotqbyi(qword a, int count)
+{
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } left, right;
+
+ count <<= 3;
+ left.i[3] = count;
+ right.i[3] = 0 - count;
+ return ((qword)(vec_or(vec_slo((vec_uchar16)(a), left.v), vec_sro((vec_uchar16)(a), right.v))));
+}
+
+static __inline qword si_rotqby(qword a, qword count)
+{
+ vec_uchar16 left, right;
+
+ left = vec_sl(vec_splat((vec_uchar16)(count), 3), vec_splat_u8(3));
+ right = vec_sub(vec_splat_u8(0), left);
+ return ((qword)(vec_or(vec_slo((vec_uchar16)(a), left), vec_sro((vec_uchar16)(a), right))));
+}
+
+/* Rotate Left Quadword by Bytes Bit Count
+ */
+static __inline qword si_rotqbybi(qword a, qword count)
+{
+ vec_uchar16 left, right;
+
+ left = vec_splat((vec_uchar16)(count), 3);
+ right = vec_sub(vec_splat_u8(7), left);
+ return ((qword)(vec_or(vec_slo((vec_uchar16)(a), left), vec_sro((vec_uchar16)(a), right))));
+}
+
+
+/* Rotate Left Quadword by Bytes Bit Count
+ */
+static __inline qword si_rotqbii(qword a, int count)
+{
+ vec_uchar16 x, y;
+ vec_uchar16 result;
+
+ x = vec_splat((vec_uchar16)(si_from_int(count & 7)), 3);
+ y = (vec_uchar16)(vec_sr((vec_uint4)vec_sro((vec_uchar16)(a), ((vec_uchar16)((vec_uint4){0,0,0,120}))),
+ (vec_uint4)vec_sub(vec_splat_u8(8), x)));
+ result = vec_or(vec_sll((qword)(a), x), y);
+ return ((qword)(result));
+}
+
+static __inline qword si_rotqbi(qword a, qword count)
+{
+ vec_uchar16 x, y;
+ vec_uchar16 result;
+
+ x = vec_and(vec_splat((vec_uchar16)(count), 3), vec_splat_u8(7));
+ y = (vec_uchar16)(vec_sr((vec_uint4)vec_sro((vec_uchar16)(a), ((vec_uchar16)((vec_uint4){0,0,0,120}))),
+ (vec_uint4)vec_sub(vec_splat_u8(8), x)));
+
+ result = vec_or(vec_sll((qword)(a), x), y);
+ return ((qword)(result));
+}
+
+
+/* Rotate Left Quadword and Mask by Bits
+ */
+static __inline qword si_rotqmbii(qword a, int count)
+{
+ return ((qword)(vec_srl((vec_uchar16)(a), vec_splat((vec_uchar16)(si_from_int(0 - count)), 3))));
+}
+
+static __inline qword si_rotqmbi(qword a, qword count)
+{
+ return ((qword)(vec_srl((vec_uchar16)(a), vec_sub(vec_splat_u8(0), vec_splat((vec_uchar16)(count), 3)))));
+}
+
+
+/* Rotate Left Quadword and Mask by Bytes with Bit Count
+ */
+static __inline qword si_rotqmbybi(qword a, qword count)
+{
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+ int cnt;
+ vec_uchar16 mask;
+
+ x.v = (vec_uchar16)(count);
+ x.i[0] = cnt = 0 - (x.i[0] & ~7);
+ x.v = vec_splat(x.v, 3);
+ mask = (cnt & 0x80) ? vec_splat_u8(0) : vec_splat_u8(-1);
+
+ return ((qword)(vec_and(vec_sro((vec_uchar16)(a), x.v), mask)));
+}
+
+
+
+
+/* Round Double to Float
+ */
+static __inline qword si_frds(qword a)
+{
+ union {
+ vec_float4 v;
+ float f[4];
+ } d;
+ union {
+ vec_double2 v;
+ double d[2];
+ } in;
+
+ in.v = (vec_double2)(a);
+ d.v = (vec_float4){0.0f};
+ d.f[0] = (float)in.d[0];
+ d.f[2] = (float)in.d[1];
+
+ return ((qword)(d.v));
+}
+
+/* Select Bits
+ */
+static __inline qword si_selb(qword a, qword b, qword c)
+{
+ return ((qword)(vec_sel((vec_uchar16)(a), (vec_uchar16)(b), (vec_uchar16)(c))));
+}
+
+
+/* Shuffle Bytes
+ */
+static __inline qword si_shufb(qword a, qword b, qword pattern)
+{
+ vec_uchar16 pat;
+
+ pat = vec_sel(((vec_uchar16){0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15}),
+ vec_sr((vec_uchar16)(pattern), vec_splat_u8(3)),
+ vec_sra((vec_uchar16)(pattern), vec_splat_u8(7)));
+ return ((qword)(vec_perm(vec_perm(a, b, pattern),
+ ((vec_uchar16){0, 0, 0, 0, 0, 0, 0, 0,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x80, 0x80, 0x80, 0x80}),
+ pat)));
+}
+
+
+/* Shift Left
+ */
+static __inline qword si_shlh(qword a, qword b)
+{
+ vec_ushort8 mask;
+
+ mask = (vec_ushort8)vec_sra(vec_sl((vec_ushort8)(b), vec_splat_u16(11)), vec_splat_u16(15));
+ return ((qword)(vec_andc(vec_sl((vec_ushort8)(a), (vec_ushort8)(b)), mask)));
+}
+
+static __inline qword si_shl(qword a, qword b)
+{
+ vec_uint4 mask;
+
+ mask = (vec_uint4)vec_sra(vec_sl((vec_uint4)(b), ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
+ return ((qword)(vec_andc(vec_sl((vec_uint4)(a), (vec_uint4)(b)), mask)));
+}
+
+
+static __inline qword si_shlhi(qword a, unsigned int b)
+{
+ vec_ushort8 mask;
+ vec_ushort8 bv;
+
+ bv = vec_splat((vec_ushort8)(si_from_int(b)), 1);
+ mask = (vec_ushort8)vec_sra(vec_sl(bv, vec_splat_u16(11)), vec_splat_u16(15));
+ return ((qword)(vec_andc(vec_sl((vec_ushort8)(a), bv), mask)));
+}
+
+static __inline qword si_shli(qword a, unsigned int b)
+{
+ vec_uint4 bv;
+ vec_uint4 mask;
+
+ bv = vec_splat((vec_uint4)(si_from_uint(b)), 0);
+ mask = (vec_uint4)vec_sra(vec_sl(bv, ((vec_uint4){26,26,26,26})), ((vec_uint4){31,31,31,31}));
+ return ((qword)(vec_andc(vec_sl((vec_uint4)(a), bv), mask)));
+}
+
+
+/* Shift Left Quadword
+ */
+static __inline qword si_shlqbii(qword a, unsigned int count)
+{
+ vec_uchar16 x;
+
+ x = vec_splat((vec_uchar16)(si_from_uint(count)), 3);
+ return ((qword)(vec_sll((vec_uchar16)(a), x)));
+}
+
+static __inline qword si_shlqbi(qword a, qword count)
+{
+ vec_uchar16 x;
+
+ x = vec_splat((vec_uchar16)(count), 3);
+ return ((qword)(vec_sll((vec_uchar16)(a), x)));
+}
+
+
+/* Shift Left Quadword by Bytes
+ */
+static __inline qword si_shlqbyi(qword a, unsigned int count)
+{
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+ vec_uchar16 mask;
+
+ x.i[3] = count << 3;
+ mask = (count & 0x10) ? vec_splat_u8(0) : vec_splat_u8(-1);
+ return ((qword)(vec_and(vec_slo((vec_uchar16)(a), x.v), mask)));
+}
+
+static __inline qword si_shlqby(qword a, qword count)
+{
+ union {
+ vec_uchar16 v;
+ unsigned int i[4];
+ } x;
+ unsigned int cnt;
+ vec_uchar16 mask;
+
+ x.v = vec_sl(vec_splat((vec_uchar16)(count), 3), vec_splat_u8(3));
+ cnt = x.i[0];
+ mask = (cnt & 0x80) ? vec_splat_u8(0) : vec_splat_u8(-1);
+ return ((qword)(vec_and(vec_slo((vec_uchar16)(a), x.v), mask)));
+}
+
+/* Shift Left Quadword by Bytes with Bit Count
+ */
+static __inline qword si_shlqbybi(qword a, qword count)
+{
+ union {
+ vec_uchar16 v;
+ int i[4];
+ } x;
+ unsigned int cnt;
+ vec_uchar16 mask;
+
+ x.v = vec_splat((vec_uchar16)(count), 3);
+ cnt = x.i[0];
+ mask = (cnt & 0x80) ? vec_splat_u8(0) : vec_splat_u8(-1);
+ return ((qword)(vec_and(vec_slo((vec_uchar16)(a), x.v), mask)));
+}
+
+
+/* Stop and Signal
+ */
+#define si_stop(_type) SPU_STOP_ACTION
+#define si_stopd(a, b, c) SPU_STOP_ACTION
+
+
+/* Subtract
+ */
+static __inline qword si_sfh(qword a, qword b)
+{
+ return ((qword)(vec_sub((vec_ushort8)(b), (vec_ushort8)(a))));
+}
+
+static __inline qword si_sf(qword a, qword b)
+{
+ return ((qword)(vec_sub((vec_uint4)(b), (vec_uint4)(a))));
+}
+
+static __inline qword si_fs(qword a, qword b)
+{
+ return ((qword)(vec_sub((vec_float4)(a), (vec_float4)(b))));
+}
+
+static __inline qword si_dfs(qword a, qword b)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } aa, bb, dd;
+
+ aa.v = (vec_double2)(a);
+ bb.v = (vec_double2)(b);
+ dd.d[0] = aa.d[0] - bb.d[0];
+ dd.d[1] = aa.d[1] - bb.d[1];
+ return ((qword)(dd.v));
+}
+
+static __inline qword si_sfhi(qword a, short b)
+{
+ return ((qword)(vec_sub(vec_splat((vec_short8)(si_from_short(b)), 1),
+ (vec_short8)(a))));
+}
+
+static __inline qword si_sfi(qword a, int b)
+{
+ return ((qword)(vec_sub(vec_splat((vec_int4)(si_from_int(b)), 0),
+ (vec_int4)(a))));
+}
+
+/* Subtract word extended
+ */
+#define si_sfx(_a, _b, _c) ((qword)(vec_add(vec_add((vec_uint4)(_b), \
+ vec_nor((vec_uint4)(_a), (vec_uint4)(_a))), \
+ vec_and((vec_uint4)(_c), vec_splat_u32(1)))))
+
+
+/* Sum Bytes into Shorts
+ */
+static __inline qword si_sumb(qword a, qword b)
+{
+ vec_uint4 zero = (vec_uint4){0};
+ vec_ushort8 sum_a, sum_b;
+
+ sum_a = (vec_ushort8)vec_sum4s((vec_uchar16)(a), zero);
+ sum_b = (vec_ushort8)vec_sum4s((vec_uchar16)(b), zero);
+
+ return ((qword)(vec_perm(sum_a, sum_b, ((vec_uchar16){18, 19, 2, 3, 22, 23, 6, 7,
+ 26, 27, 10, 11, 30, 31, 14, 15}))));
+}
+
+/* Exclusive OR
+ */
+static __inline qword si_xor(qword a, qword b)
+{
+ return ((qword)(vec_xor((vec_uchar16)(a), (vec_uchar16)(b))));
+}
+
+static __inline qword si_xorbi(qword a, unsigned char b)
+{
+ return ((qword)(vec_xor((vec_uchar16)(a),
+ vec_splat((vec_uchar16)(si_from_uchar(b)), 3))));
+}
+
+static __inline qword si_xorhi(qword a, unsigned short b)
+{
+ return ((qword)(vec_xor((vec_ushort8)(a),
+ vec_splat((vec_ushort8)(si_from_ushort(b)), 1))));
+}
+
+static __inline qword si_xori(qword a, unsigned int b)
+{
+ return ((qword)(vec_xor((vec_uint4)(a),
+ vec_splat((vec_uint4)(si_from_uint(b)), 0))));
+}
+
+
+/* Generate Controls for Sub-Quadword Insertion
+ */
+static __inline qword si_cbd(qword a, int imm)
+{
+ union {
+ vec_uint4 v;
+ unsigned char c[16];
+ } shmask;
+
+ shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
+ shmask.c[(si_to_uint(a) + (unsigned int)(imm)) & 0xF] = 0x03;
+ return ((qword)(shmask.v));
+}
+
+static __inline qword si_cdd(qword a, int imm)
+{
+ union {
+ vec_uint4 v;
+ unsigned long long ll[2];
+ } shmask;
+
+ shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
+ shmask.ll[((si_to_uint(a) + (unsigned int)(imm)) >> 3) & 0x1] = 0x0001020304050607ULL;
+ return ((qword)(shmask.v));
+}
+
+static __inline qword si_chd(qword a, int imm)
+{
+ union {
+ vec_uint4 v;
+ unsigned short s[8];
+ } shmask;
+
+ shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
+ shmask.s[((si_to_uint(a) + (unsigned int)(imm)) >> 1) & 0x7] = 0x0203;
+ return ((qword)(shmask.v));
+}
+
+static __inline qword si_cwd(qword a, int imm)
+{
+ union {
+ vec_uint4 v;
+ unsigned int i[4];
+ } shmask;
+
+ shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
+ shmask.i[((si_to_uint(a) + (unsigned int)(imm)) >> 2) & 0x3] = 0x00010203;
+ return ((qword)(shmask.v));
+}
+
+static __inline qword si_cbx(qword a, qword b)
+{
+ union {
+ vec_uint4 v;
+ unsigned char c[16];
+ } shmask;
+
+ shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
+ shmask.c[si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))) & 0xF] = 0x03;
+ return ((qword)(shmask.v));
+}
+
+
+static __inline qword si_cdx(qword a, qword b)
+{
+ union {
+ vec_uint4 v;
+ unsigned long long ll[2];
+ } shmask;
+
+ shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
+ shmask.ll[(si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))) >> 3) & 0x1] = 0x0001020304050607ULL;
+ return ((qword)(shmask.v));
+}
+
+static __inline qword si_chx(qword a, qword b)
+{
+ union {
+ vec_uint4 v;
+ unsigned short s[8];
+ } shmask;
+
+ shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
+ shmask.s[(si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))) >> 1) & 0x7] = 0x0203;
+ return ((qword)(shmask.v));
+}
+
+static __inline qword si_cwx(qword a, qword b)
+{
+ union {
+ vec_uint4 v;
+ unsigned int i[4];
+ } shmask;
+
+ shmask.v = ((vec_uint4){0x10111213, 0x14151617, 0x18191A1B, 0x1C1D1E1F});
+ shmask.i[(si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))) >> 2) & 0x3] = 0x00010203;
+ return ((qword)(shmask.v));
+}
+
+
+/* Constant Formation
+ */
+static __inline qword si_il(signed short imm)
+{
+ return ((qword)(vec_splat((vec_int4)(si_from_int((signed int)(imm))), 0)));
+}
+
+
+static __inline qword si_ila(unsigned int imm)
+{
+ return ((qword)(vec_splat((vec_uint4)(si_from_uint(imm)), 0)));
+}
+
+static __inline qword si_ilh(signed short imm)
+{
+ return ((qword)(vec_splat((vec_short8)(si_from_short(imm)), 1)));
+}
+
+static __inline qword si_ilhu(signed short imm)
+{
+ return ((qword)(vec_splat((vec_uint4)(si_from_uint((unsigned int)(imm) << 16)), 0)));
+}
+
+static __inline qword si_iohl(qword a, unsigned short imm)
+{
+ return ((qword)(vec_or((vec_uint4)(a), vec_splat((vec_uint4)(si_from_uint((unsigned int)(imm))), 0))));
+}
+
+/* No Operation
+ */
+#define si_lnop() /* do nothing */
+#define si_nop() /* do nothing */
+
+
+/* Memory Load and Store
+ */
+static __inline qword si_lqa(unsigned int imm)
+{
+ return ((qword)(vec_ld(0, (vector unsigned char *)(imm))));
+}
+
+static __inline qword si_lqd(qword a, unsigned int imm)
+{
+ return ((qword)(vec_ld(si_to_uint(a) & ~0xF, (vector unsigned char *)(imm))));
+}
+
+static __inline qword si_lqr(unsigned int imm)
+{
+ return ((qword)(vec_ld(0, (vector unsigned char *)(imm))));
+}
+
+static __inline qword si_lqx(qword a, qword b)
+{
+ return ((qword)(vec_ld(si_to_uint((qword)(vec_add((vec_uint4)(a), (vec_uint4)(b)))), (vector unsigned char *)(0))));
+}
+
+static __inline void si_stqa(qword a, unsigned int imm)
+{
+ vec_st((vec_uchar16)(a), 0, (vector unsigned char *)(imm));
+}
+
+static __inline void si_stqd(qword a, qword b, unsigned int imm)
+{
+ vec_st((vec_uchar16)(a), si_to_uint(b) & ~0xF, (vector unsigned char *)(imm));
+}
+
+static __inline void si_stqr(qword a, unsigned int imm)
+{
+ vec_st((vec_uchar16)(a), 0, (vector unsigned char *)(imm));
+}
+
+static __inline void si_stqx(qword a, qword b, qword c)
+{
+ vec_st((vec_uchar16)(a),
+ si_to_uint((qword)(vec_add((vec_uint4)(b), (vec_uint4)(c)))),
+ (vector unsigned char *)(0));
+}
+
+#endif /* !__SPU__ */
+#endif /* !_SI2VMX_H_ */
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/singlefp.h b/gcc-4.4.3/gcc/config/rs6000/singlefp.h
new file mode 100644
index 000000000..36e093c1a
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/singlefp.h
@@ -0,0 +1,40 @@
+/* Definitions for PowerPC single-precision floating point unit
+ such as Xilinx PowerPC 405/440 APU.
+
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Contributed by Michael Eager (eager@eagercon.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+/* Undefine definitions from rs6000.h. */
+#undef TARGET_SINGLE_FLOAT
+#undef TARGET_DOUBLE_FLOAT
+#undef TARGET_SINGLE_FPU
+#undef TARGET_SIMPLE_FPU
+#undef UNITS_PER_FP_WORD
+
+/* FPU operations supported.
+ If TARGET_SINGLE_FPU set, processor supports single fp options. */
+#define TARGET_SINGLE_FLOAT (rs6000_single_float)
+#define TARGET_DOUBLE_FLOAT (rs6000_double_float)
+#define TARGET_SINGLE_FPU 1
+#define TARGET_SIMPLE_FPU (rs6000_simple_fpu)
+
+/* FP word width depends on single/double fp support. */
+#define UNITS_PER_FP_WORD ((TARGET_SOFT_FLOAT || TARGET_DOUBLE_FLOAT) ? 8 : 4)
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/sol-ci.asm b/gcc-4.4.3/gcc/config/rs6000/sol-ci.asm
new file mode 100644
index 000000000..7c2fbae97
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/sol-ci.asm
@@ -0,0 +1,94 @@
+# crti.s for sysv4
+
+# Copyright (C) 1996, 2008, 2009 Free Software Foundation, Inc.
+# Written By Michael Meissner
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just supplies labeled starting points for the .got* and other
+# special sections. It is linked in first before other modules.
+
+ .ident "GNU C scrti.s"
+
+#ifndef __powerpc64__
+# Start of .text
+ .section ".text"
+ .globl _ex_text0
+_ex_text0:
+
+# Exception range
+ .section ".exception_ranges","aw"
+ .globl _ex_range0
+_ex_range0:
+
+# List of C++ constructors
+ .section ".ctors","aw"
+ .globl __CTOR_LIST__
+ .type __CTOR_LIST__,@object
+__CTOR_LIST__:
+
+# List of C++ destructors
+ .section ".dtors","aw"
+ .globl __DTOR_LIST__
+ .type __DTOR_LIST__,@object
+__DTOR_LIST__:
+
+# Head of _init function used for static constructors
+ .section ".init","ax"
+ .align 2
+ .globl _init
+ .type _init,@function
+_init: stwu %r1,-16(%r1)
+ mflr %r0
+ stw %r31,12(%r1)
+ stw %r0,16(%r1)
+
+ bl _GLOBAL_OFFSET_TABLE_-4 # get the GOT address
+ mflr %r31
+
+# lwz %r3,_ex_shared0@got(%r31)
+# lwz %r4,-8(%r3) # _ex_register or 0
+# cmpi %cr0,%r4,0
+# beq .Lno_reg
+# mtlr %r4
+# blrl
+#.Lno_reg:
+
+# Head of _fini function used for static destructors
+ .section ".fini","ax"
+ .align 2
+ .globl _fini
+ .type _fini,@function
+_fini: stwu %r1,-16(%r1)
+ mflr %r0
+ stw %r31,12(%r1)
+ stw %r0,16(%r1)
+
+ bl _GLOBAL_OFFSET_TABLE_-4 # get the GOT address
+ mflr %r31
+
+# _environ and its evil twin environ, pointing to the environment
+ .section ".sdata","aw"
+ .align 2
+ .globl _environ
+ .space 4
+ .weak environ
+ .set environ,_environ
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/sol-cn.asm b/gcc-4.4.3/gcc/config/rs6000/sol-cn.asm
new file mode 100644
index 000000000..4aeacaf2c
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/sol-cn.asm
@@ -0,0 +1,72 @@
+# crtn.s for sysv4
+
+# Copyright (C) 1996, 2007, 2008, 2009 Free Software Foundation, Inc.
+# Written By Michael Meissner
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just supplies labeled ending points for the .got* and other
+# special sections. It is linked in last after other modules.
+
+ .ident "GNU C scrtn.s"
+
+#ifndef __powerpc64__
+# Default versions of exception handling register/deregister
+ .weak _ex_register
+ .weak _ex_deregister
+ .set _ex_register,0
+ .set _ex_deregister,0
+
+# End list of C++ constructors
+ .section ".ctors","aw"
+ .globl __CTOR_END__
+ .type __CTOR_END__,@object
+__CTOR_END__:
+
+# End list of C++ destructors
+ .section ".dtors","aw"
+ .weak __DTOR_END__
+ .type __DTOR_END__,@object
+__DTOR_END__:
+
+ .section ".text"
+ .globl _ex_text1
+_ex_text1:
+
+ .section ".exception_ranges","aw"
+ .globl _ex_range1
+_ex_range1:
+
+# Tail of _init used for static constructors
+ .section ".init","ax"
+ lwz %r0,16(%r1)
+ lwz %r31,12(%r1)
+ mtlr %r0
+ addi %r1,%r1,16
+ blr
+
+# Tail of _fini used for static destructors
+ .section ".fini","ax"
+ lwz %r0,16(%r1)
+ lwz %r31,12(%r1)
+ mtlr %r0
+ addi %r1,%r1,16
+ blr
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/spe.h b/gcc-4.4.3/gcc/config/rs6000/spe.h
new file mode 100644
index 000000000..a79318099
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/spe.h
@@ -0,0 +1,1107 @@
+/* PowerPC E500 user include file.
+ Copyright (C) 2002, 2003, 2004, 2009 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _SPE_H
+#define _SPE_H
+
+#define __vector __attribute__((vector_size(8)))
+
+typedef int int32_t;
+typedef unsigned uint32_t;
+typedef short int16_t;
+typedef unsigned short uint16_t;
+typedef long long int64_t;
+typedef unsigned long long uint64_t;
+
+typedef short __vector __ev64_s16__;
+typedef unsigned short __vector __ev64_u16__;
+typedef int __vector __ev64_s32__;
+typedef unsigned __vector __ev64_u32__;
+typedef long long __vector __ev64_s64__;
+typedef unsigned long long __vector __ev64_u64__;
+typedef float __vector __ev64_fs__;
+
+#define __v2si __ev64_opaque__
+#define __v2sf __ev64_fs__
+
+#define __ev_addw __builtin_spe_evaddw
+#define __ev_addiw __builtin_spe_evaddiw
+#define __ev_subfw(a,b) __builtin_spe_evsubfw ((b), (a))
+#define __ev_subw __builtin_spe_evsubfw
+#define __ev_subifw(a,b) __builtin_spe_evsubifw ((b), (a))
+#define __ev_subiw __builtin_spe_evsubifw
+#define __ev_abs __builtin_spe_evabs
+#define __ev_neg __builtin_spe_evneg
+#define __ev_extsb __builtin_spe_evextsb
+#define __ev_extsh __builtin_spe_evextsh
+#define __ev_and __builtin_spe_evand
+#define __ev_or __builtin_spe_evor
+#define __ev_xor __builtin_spe_evxor
+#define __ev_nand __builtin_spe_evnand
+#define __ev_nor __builtin_spe_evnor
+#define __ev_eqv __builtin_spe_eveqv
+#define __ev_andc __builtin_spe_evandc
+#define __ev_orc __builtin_spe_evorc
+#define __ev_rlw __builtin_spe_evrlw
+#define __ev_rlwi __builtin_spe_evrlwi
+#define __ev_slw __builtin_spe_evslw
+#define __ev_slwi __builtin_spe_evslwi
+#define __ev_srws __builtin_spe_evsrws
+#define __ev_srwu __builtin_spe_evsrwu
+#define __ev_srwis __builtin_spe_evsrwis
+#define __ev_srwiu __builtin_spe_evsrwiu
+#define __ev_cntlzw __builtin_spe_evcntlzw
+#define __ev_cntlsw __builtin_spe_evcntlsw
+#define __ev_rndw __builtin_spe_evrndw
+#define __ev_mergehi __builtin_spe_evmergehi
+#define __ev_mergelo __builtin_spe_evmergelo
+#define __ev_mergelohi __builtin_spe_evmergelohi
+#define __ev_mergehilo __builtin_spe_evmergehilo
+#define __ev_splati __builtin_spe_evsplati
+#define __ev_splatfi __builtin_spe_evsplatfi
+#define __ev_divws __builtin_spe_evdivws
+#define __ev_divwu __builtin_spe_evdivwu
+#define __ev_mra __builtin_spe_evmra
+
+#define __brinc __builtin_spe_brinc
+
+/* Loads. */
+
+#define __ev_lddx __builtin_spe_evlddx
+#define __ev_ldwx __builtin_spe_evldwx
+#define __ev_ldhx __builtin_spe_evldhx
+#define __ev_lwhex __builtin_spe_evlwhex
+#define __ev_lwhoux __builtin_spe_evlwhoux
+#define __ev_lwhosx __builtin_spe_evlwhosx
+#define __ev_lwwsplatx __builtin_spe_evlwwsplatx
+#define __ev_lwhsplatx __builtin_spe_evlwhsplatx
+#define __ev_lhhesplatx __builtin_spe_evlhhesplatx
+#define __ev_lhhousplatx __builtin_spe_evlhhousplatx
+#define __ev_lhhossplatx __builtin_spe_evlhhossplatx
+#define __ev_ldd __builtin_spe_evldd
+#define __ev_ldw __builtin_spe_evldw
+#define __ev_ldh __builtin_spe_evldh
+#define __ev_lwhe __builtin_spe_evlwhe
+#define __ev_lwhou __builtin_spe_evlwhou
+#define __ev_lwhos __builtin_spe_evlwhos
+#define __ev_lwwsplat __builtin_spe_evlwwsplat
+#define __ev_lwhsplat __builtin_spe_evlwhsplat
+#define __ev_lhhesplat __builtin_spe_evlhhesplat
+#define __ev_lhhousplat __builtin_spe_evlhhousplat
+#define __ev_lhhossplat __builtin_spe_evlhhossplat
+
+/* Stores. */
+
+#define __ev_stddx __builtin_spe_evstddx
+#define __ev_stdwx __builtin_spe_evstdwx
+#define __ev_stdhx __builtin_spe_evstdhx
+#define __ev_stwwex __builtin_spe_evstwwex
+#define __ev_stwwox __builtin_spe_evstwwox
+#define __ev_stwhex __builtin_spe_evstwhex
+#define __ev_stwhox __builtin_spe_evstwhox
+#define __ev_stdd __builtin_spe_evstdd
+#define __ev_stdw __builtin_spe_evstdw
+#define __ev_stdh __builtin_spe_evstdh
+#define __ev_stwwe __builtin_spe_evstwwe
+#define __ev_stwwo __builtin_spe_evstwwo
+#define __ev_stwhe __builtin_spe_evstwhe
+#define __ev_stwho __builtin_spe_evstwho
+
+/* Fixed point complex. */
+
+#define __ev_mhossf __builtin_spe_evmhossf
+#define __ev_mhosmf __builtin_spe_evmhosmf
+#define __ev_mhosmi __builtin_spe_evmhosmi
+#define __ev_mhoumi __builtin_spe_evmhoumi
+#define __ev_mhessf __builtin_spe_evmhessf
+#define __ev_mhesmf __builtin_spe_evmhesmf
+#define __ev_mhesmi __builtin_spe_evmhesmi
+#define __ev_mheumi __builtin_spe_evmheumi
+#define __ev_mhossfa __builtin_spe_evmhossfa
+#define __ev_mhosmfa __builtin_spe_evmhosmfa
+#define __ev_mhosmia __builtin_spe_evmhosmia
+#define __ev_mhoumia __builtin_spe_evmhoumia
+#define __ev_mhessfa __builtin_spe_evmhessfa
+#define __ev_mhesmfa __builtin_spe_evmhesmfa
+#define __ev_mhesmia __builtin_spe_evmhesmia
+#define __ev_mheumia __builtin_spe_evmheumia
+
+#define __ev_mhoumf __ev_mhoumi
+#define __ev_mheumf __ev_mheumi
+#define __ev_mhoumfa __ev_mhoumia
+#define __ev_mheumfa __ev_mheumia
+
+#define __ev_mhossfaaw __builtin_spe_evmhossfaaw
+#define __ev_mhossiaaw __builtin_spe_evmhossiaaw
+#define __ev_mhosmfaaw __builtin_spe_evmhosmfaaw
+#define __ev_mhosmiaaw __builtin_spe_evmhosmiaaw
+#define __ev_mhousiaaw __builtin_spe_evmhousiaaw
+#define __ev_mhoumiaaw __builtin_spe_evmhoumiaaw
+#define __ev_mhessfaaw __builtin_spe_evmhessfaaw
+#define __ev_mhessiaaw __builtin_spe_evmhessiaaw
+#define __ev_mhesmfaaw __builtin_spe_evmhesmfaaw
+#define __ev_mhesmiaaw __builtin_spe_evmhesmiaaw
+#define __ev_mheusiaaw __builtin_spe_evmheusiaaw
+#define __ev_mheumiaaw __builtin_spe_evmheumiaaw
+
+#define __ev_mhousfaaw __ev_mhousiaaw
+#define __ev_mhoumfaaw __ev_mhoumiaaw
+#define __ev_mheusfaaw __ev_mheusiaaw
+#define __ev_mheumfaaw __ev_mheumiaaw
+
+#define __ev_mhossfanw __builtin_spe_evmhossfanw
+#define __ev_mhossianw __builtin_spe_evmhossianw
+#define __ev_mhosmfanw __builtin_spe_evmhosmfanw
+#define __ev_mhosmianw __builtin_spe_evmhosmianw
+#define __ev_mhousianw __builtin_spe_evmhousianw
+#define __ev_mhoumianw __builtin_spe_evmhoumianw
+#define __ev_mhessfanw __builtin_spe_evmhessfanw
+#define __ev_mhessianw __builtin_spe_evmhessianw
+#define __ev_mhesmfanw __builtin_spe_evmhesmfanw
+#define __ev_mhesmianw __builtin_spe_evmhesmianw
+#define __ev_mheusianw __builtin_spe_evmheusianw
+#define __ev_mheumianw __builtin_spe_evmheumianw
+
+#define __ev_mhousfanw __ev_mhousianw
+#define __ev_mhoumfanw __ev_mhoumianw
+#define __ev_mheusfanw __ev_mheusianw
+#define __ev_mheumfanw __ev_mheumianw
+
+#define __ev_mhogsmfaa __builtin_spe_evmhogsmfaa
+#define __ev_mhogsmiaa __builtin_spe_evmhogsmiaa
+#define __ev_mhogumiaa __builtin_spe_evmhogumiaa
+#define __ev_mhegsmfaa __builtin_spe_evmhegsmfaa
+#define __ev_mhegsmiaa __builtin_spe_evmhegsmiaa
+#define __ev_mhegumiaa __builtin_spe_evmhegumiaa
+
+#define __ev_mhogumfaa __ev_mhogumiaa
+#define __ev_mhegumfaa __ev_mhegumiaa
+
+#define __ev_mhogsmfan __builtin_spe_evmhogsmfan
+#define __ev_mhogsmian __builtin_spe_evmhogsmian
+#define __ev_mhogumian __builtin_spe_evmhogumian
+#define __ev_mhegsmfan __builtin_spe_evmhegsmfan
+#define __ev_mhegsmian __builtin_spe_evmhegsmian
+#define __ev_mhegumian __builtin_spe_evmhegumian
+
+#define __ev_mhogumfan __ev_mhogumian
+#define __ev_mhegumfan __ev_mhegumian
+
+#define __ev_mwhssf __builtin_spe_evmwhssf
+#define __ev_mwhsmf __builtin_spe_evmwhsmf
+#define __ev_mwhsmi __builtin_spe_evmwhsmi
+#define __ev_mwhumi __builtin_spe_evmwhumi
+#define __ev_mwhssfa __builtin_spe_evmwhssfa
+#define __ev_mwhsmfa __builtin_spe_evmwhsmfa
+#define __ev_mwhsmia __builtin_spe_evmwhsmia
+#define __ev_mwhumia __builtin_spe_evmwhumia
+
+#define __ev_mwhumf __ev_mwhumi
+#define __ev_mwhumfa __ev_mwhumia
+
+#define __ev_mwlumi __builtin_spe_evmwlumi
+#define __ev_mwlumia __builtin_spe_evmwlumia
+#define __ev_mwlumiaaw __builtin_spe_evmwlumiaaw
+
+#define __ev_mwlssiaaw __builtin_spe_evmwlssiaaw
+#define __ev_mwlsmiaaw __builtin_spe_evmwlsmiaaw
+#define __ev_mwlusiaaw __builtin_spe_evmwlusiaaw
+#define __ev_mwlusiaaw __builtin_spe_evmwlusiaaw
+
+#define __ev_mwlssianw __builtin_spe_evmwlssianw
+#define __ev_mwlsmianw __builtin_spe_evmwlsmianw
+#define __ev_mwlusianw __builtin_spe_evmwlusianw
+#define __ev_mwlumianw __builtin_spe_evmwlumianw
+
+#define __ev_mwssf __builtin_spe_evmwssf
+#define __ev_mwsmf __builtin_spe_evmwsmf
+#define __ev_mwsmi __builtin_spe_evmwsmi
+#define __ev_mwumi __builtin_spe_evmwumi
+#define __ev_mwssfa __builtin_spe_evmwssfa
+#define __ev_mwsmfa __builtin_spe_evmwsmfa
+#define __ev_mwsmia __builtin_spe_evmwsmia
+#define __ev_mwumia __builtin_spe_evmwumia
+
+#define __ev_mwumf __ev_mwumi
+#define __ev_mwumfa __ev_mwumia
+
+#define __ev_mwssfaa __builtin_spe_evmwssfaa
+#define __ev_mwsmfaa __builtin_spe_evmwsmfaa
+#define __ev_mwsmiaa __builtin_spe_evmwsmiaa
+#define __ev_mwumiaa __builtin_spe_evmwumiaa
+
+#define __ev_mwumfaa __ev_mwumiaa
+
+#define __ev_mwssfan __builtin_spe_evmwssfan
+#define __ev_mwsmfan __builtin_spe_evmwsmfan
+#define __ev_mwsmian __builtin_spe_evmwsmian
+#define __ev_mwumian __builtin_spe_evmwumian
+
+#define __ev_mwumfan __ev_mwumian
+
+#define __ev_addssiaaw __builtin_spe_evaddssiaaw
+#define __ev_addsmiaaw __builtin_spe_evaddsmiaaw
+#define __ev_addusiaaw __builtin_spe_evaddusiaaw
+#define __ev_addumiaaw __builtin_spe_evaddumiaaw
+
+#define __ev_addusfaaw __ev_addusiaaw
+#define __ev_addumfaaw __ev_addumiaaw
+#define __ev_addsmfaaw __ev_addsmiaaw
+#define __ev_addssfaaw __ev_addssiaaw
+
+#define __ev_subfssiaaw __builtin_spe_evsubfssiaaw
+#define __ev_subfsmiaaw __builtin_spe_evsubfsmiaaw
+#define __ev_subfusiaaw __builtin_spe_evsubfusiaaw
+#define __ev_subfumiaaw __builtin_spe_evsubfumiaaw
+
+#define __ev_subfusfaaw __ev_subfusiaaw
+#define __ev_subfumfaaw __ev_subfumiaaw
+#define __ev_subfsmfaaw __ev_subfsmiaaw
+#define __ev_subfssfaaw __ev_subfssiaaw
+
+/* Floating Point SIMD Instructions */
+
+#define __ev_fsabs __builtin_spe_evfsabs
+#define __ev_fsnabs __builtin_spe_evfsnabs
+#define __ev_fsneg __builtin_spe_evfsneg
+#define __ev_fsadd __builtin_spe_evfsadd
+#define __ev_fssub __builtin_spe_evfssub
+#define __ev_fsmul __builtin_spe_evfsmul
+#define __ev_fsdiv __builtin_spe_evfsdiv
+#define __ev_fscfui __builtin_spe_evfscfui
+#define __ev_fscfsi __builtin_spe_evfscfsi
+#define __ev_fscfuf __builtin_spe_evfscfuf
+#define __ev_fscfsf __builtin_spe_evfscfsf
+#define __ev_fsctui __builtin_spe_evfsctui
+#define __ev_fsctsi __builtin_spe_evfsctsi
+#define __ev_fsctuf __builtin_spe_evfsctuf
+#define __ev_fsctsf __builtin_spe_evfsctsf
+#define __ev_fsctuiz __builtin_spe_evfsctuiz
+#define __ev_fsctsiz __builtin_spe_evfsctsiz
+
+/* NOT SUPPORTED IN FIRST e500, support via two instructions: */
+
+#define __ev_mwhusfaaw __ev_mwhusiaaw
+#define __ev_mwhumfaaw __ev_mwhumiaaw
+#define __ev_mwhusfanw __ev_mwhusianw
+#define __ev_mwhumfanw __ev_mwhumianw
+#define __ev_mwhgumfaa __ev_mwhgumiaa
+#define __ev_mwhgumfan __ev_mwhgumian
+
+#define __ev_mwhgssfaa __internal_ev_mwhgssfaa
+#define __ev_mwhgsmfaa __internal_ev_mwhgsmfaa
+#define __ev_mwhgsmiaa __internal_ev_mwhgsmiaa
+#define __ev_mwhgumiaa __internal_ev_mwhgumiaa
+#define __ev_mwhgssfan __internal_ev_mwhgssfan
+#define __ev_mwhgsmfan __internal_ev_mwhgsmfan
+#define __ev_mwhgsmian __internal_ev_mwhgsmian
+#define __ev_mwhgumian __internal_ev_mwhgumian
+#define __ev_mwhssiaaw __internal_ev_mwhssiaaw
+#define __ev_mwhssfaaw __internal_ev_mwhssfaaw
+#define __ev_mwhsmfaaw __internal_ev_mwhsmfaaw
+#define __ev_mwhsmiaaw __internal_ev_mwhsmiaaw
+#define __ev_mwhusiaaw __internal_ev_mwhusiaaw
+#define __ev_mwhumiaaw __internal_ev_mwhumiaaw
+#define __ev_mwhssfanw __internal_ev_mwhssfanw
+#define __ev_mwhssianw __internal_ev_mwhssianw
+#define __ev_mwhsmfanw __internal_ev_mwhsmfanw
+#define __ev_mwhsmianw __internal_ev_mwhsmianw
+#define __ev_mwhusianw __internal_ev_mwhusianw
+#define __ev_mwhumianw __internal_ev_mwhumianw
+
+static inline __ev64_opaque__
+__internal_ev_mwhssfaaw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhssf (a, b);
+ return __ev_addssiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhssiaaw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmi (a, b);
+ return __ev_addssiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhsmfaaw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmf (a, b);
+ return __ev_addsmiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhsmiaaw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmi (a, b);
+ return __ev_addsmiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhusiaaw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhumi (a, b);
+ return __ev_addusiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhumiaaw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhumi (a, b);
+ return __ev_addumiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhssfanw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhssf (a, b);
+ return __ev_subfssiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhssianw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmi (a, b);
+ return __ev_subfssiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhsmfanw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmf (a, b);
+ return __ev_subfsmiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhsmianw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmi (a, b);
+ return __ev_subfsmiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhusianw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhumi (a, b);
+ return __ev_subfusiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhumianw (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhumi (a, b);
+ return __ev_subfumiaaw (t);
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhgssfaa (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhssf (a, b);
+ return __ev_mwsmiaa (t, ((__ev64_s32__){1, 1}));
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhgsmfaa (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmf (a, b);
+ return __ev_mwsmiaa (t, ((__ev64_s32__){1, 1}));
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhgsmiaa (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmi (a, b);
+ return __ev_mwsmiaa (t, ((__ev64_s32__){1, 1}));
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhgumiaa (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhumi (a, b);
+ return __ev_mwumiaa (t, ((__ev64_s32__){1, 1}));
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhgssfan (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhssf (a, b);
+ return __ev_mwsmian (t, ((__ev64_s32__){1, 1}));
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhgsmfan (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmf (a, b);
+ return __ev_mwsmian (t, ((__ev64_s32__){1, 1}));
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhgsmian (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhsmi (a, b);
+ return __ev_mwsmian (t, ((__ev64_s32__){1, 1}));
+}
+
+static inline __ev64_opaque__
+__internal_ev_mwhgumian (__ev64_opaque__ a, __ev64_opaque__ b)
+{
+ __ev64_opaque__ t;
+
+ t = __ev_mwhumi (a, b);
+ return __ev_mwumian (t, ((__ev64_s32__){1, 1}));
+}
+
+/* END OF NOT SUPPORTED */
+
+/* __ev_create* functions. */
+
+#define __ev_create_ufix32_u32 __ev_create_u32
+#define __ev_create_sfix32_s32 __ev_create_s32
+
+static inline __ev64_opaque__
+__ev_create_s16 (int16_t a, int16_t b, int16_t c, int16_t d)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ int16_t i[4];
+ } u;
+
+ u.i[0] = a;
+ u.i[1] = b;
+ u.i[2] = c;
+ u.i[3] = d;
+
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_create_u16 (uint16_t a, uint16_t b, uint16_t c, uint16_t d)
+
+{
+ union
+ {
+ __ev64_opaque__ v;
+ uint16_t i[4];
+ } u;
+
+ u.i[0] = a;
+ u.i[1] = b;
+ u.i[2] = c;
+ u.i[3] = d;
+
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_create_s32 (int32_t a, int32_t b)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ int32_t i[2];
+ } u;
+
+ u.i[0] = a;
+ u.i[1] = b;
+
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_create_u32 (uint32_t a, uint32_t b)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ uint32_t i[2];
+ } u;
+
+ u.i[0] = a;
+ u.i[1] = b;
+
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_create_fs (float a, float b)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ float f[2];
+ } u;
+
+ u.f[0] = a;
+ u.f[1] = b;
+
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_create_sfix32_fs (float a, float b)
+{
+ __ev64_opaque__ ev;
+
+ ev = (__ev64_opaque__) __ev_create_fs (a, b);
+ return (__ev64_opaque__) __builtin_spe_evfsctsf ((__v2sf) ev);
+}
+
+static inline __ev64_opaque__
+__ev_create_ufix32_fs (float a, float b)
+{
+ __ev64_opaque__ ev;
+
+ ev = (__ev64_opaque__) __ev_create_fs (a, b);
+ return (__ev64_opaque__) __builtin_spe_evfsctuf ((__v2sf) ev);
+}
+
+static inline __ev64_opaque__
+__ev_create_s64 (int64_t a)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ int64_t i;
+ } u;
+
+ u.i = a;
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_create_u64 (uint64_t a)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ uint64_t i;
+ } u;
+
+ u.i = a;
+ return u.v;
+}
+
+static inline uint64_t
+__ev_convert_u64 (__ev64_opaque__ a)
+{
+ return (uint64_t) a;
+}
+
+static inline int64_t
+__ev_convert_s64 (__ev64_opaque__ a)
+{
+ return (int64_t) a;
+}
+
+/* __ev_get_* functions. */
+
+#define __ev_get_upper_u32(a) __ev_get_u32_internal ((a), 0)
+#define __ev_get_lower_u32(a) __ev_get_u32_internal ((a), 1)
+#define __ev_get_upper_s32(a) __ev_get_s32_internal ((a), 0)
+#define __ev_get_lower_s32(a) __ev_get_s32_internal ((a), 1)
+#define __ev_get_upper_fs(a) __ev_get_fs_internal ((a), 0)
+#define __ev_get_lower_fs(a) __ev_get_fs_internal ((a), 1)
+#define __ev_get_upper_ufix32_u32 __ev_get_upper_u32
+#define __ev_get_lower_ufix32_u32 __ev_get_lower_u32
+#define __ev_get_upper_sfix32_s32 __ev_get_upper_s32
+#define __ev_get_lower_sfix32_s32 __ev_get_lower_s32
+#define __ev_get_upper_sfix32_fs(a) __ev_get_sfix32_fs ((a), 0)
+#define __ev_get_lower_sfix32_fs(a) __ev_get_sfix32_fs ((a), 1)
+#define __ev_get_upper_ufix32_fs(a) __ev_get_ufix32_fs ((a), 0)
+#define __ev_get_lower_ufix32_fs(a) __ev_get_ufix32_fs ((a), 1)
+
+#define __ev_get_u32 __ev_get_u32_internal
+#define __ev_get_s32 __ev_get_s32_internal
+#define __ev_get_fs __ev_get_fs_internal
+#define __ev_get_u16 __ev_get_u16_internal
+#define __ev_get_s16 __ev_get_s16_internal
+
+#define __ev_get_ufix32_u32 __ev_get_u32
+#define __ev_get_sfix32_s32 __ev_get_s32
+#define __ev_get_ufix32_fs __ev_get_ufix32_fs_internal
+#define __ev_get_sfix32_fs __ev_get_sfix32_fs_internal
+
+static inline uint32_t
+__ev_get_u32_internal (__ev64_opaque__ a, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ uint32_t i[2];
+ } u;
+
+ u.v = a;
+ return u.i[pos];
+}
+
+static inline int32_t
+__ev_get_s32_internal (__ev64_opaque__ a, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ int32_t i[2];
+ } u;
+
+ u.v = a;
+ return u.i[pos];
+}
+
+static inline float
+__ev_get_fs_internal (__ev64_opaque__ a, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ float f[2];
+ } u;
+
+ u.v = a;
+ return u.f[pos];
+}
+
+static inline float
+__ev_get_sfix32_fs_internal (__ev64_opaque__ a, uint32_t pos)
+{
+ __ev64_fs__ v;
+
+ v = __builtin_spe_evfscfsf ((__v2sf) a);
+ return __ev_get_fs_internal ((__ev64_opaque__) v, pos);
+}
+
+static inline float
+__ev_get_ufix32_fs_internal (__ev64_opaque__ a, uint32_t pos)
+{
+ __ev64_fs__ v;
+
+ v = __builtin_spe_evfscfuf ((__v2sf) a);
+ return __ev_get_fs_internal ((__ev64_opaque__) v, pos);
+}
+
+static inline uint16_t
+__ev_get_u16_internal (__ev64_opaque__ a, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ uint16_t i[4];
+ } u;
+
+ u.v = a;
+ return u.i[pos];
+}
+
+static inline int16_t
+__ev_get_s16_internal (__ev64_opaque__ a, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ int16_t i[4];
+ } u;
+
+ u.v = a;
+ return u.i[pos];
+}
+
+/* __ev_set_* functions. */
+
+#define __ev_set_u32 __ev_set_u32_internal
+#define __ev_set_s32 __ev_set_s32_internal
+#define __ev_set_fs __ev_set_fs_internal
+#define __ev_set_u16 __ev_set_u16_internal
+#define __ev_set_s16 __ev_set_s16_internal
+
+#define __ev_set_ufix32_u32 __ev_set_u32
+#define __ev_set_sfix32_s32 __ev_set_s32
+
+#define __ev_set_sfix32_fs __ev_set_sfix32_fs_internal
+#define __ev_set_ufix32_fs __ev_set_ufix32_fs_internal
+
+#define __ev_set_upper_u32(a, b) __ev_set_u32 (a, b, 0)
+#define __ev_set_lower_u32(a, b) __ev_set_u32 (a, b, 1)
+#define __ev_set_upper_s32(a, b) __ev_set_s32 (a, b, 0)
+#define __ev_set_lower_s32(a, b) __ev_set_s32 (a, b, 1)
+#define __ev_set_upper_fs(a, b) __ev_set_fs (a, b, 0)
+#define __ev_set_lower_fs(a, b) __ev_set_fs (a, b, 1)
+#define __ev_set_upper_ufix32_u32 __ev_set_upper_u32
+#define __ev_set_lower_ufix32_u32 __ev_set_lower_u32
+#define __ev_set_upper_sfix32_s32 __ev_set_upper_s32
+#define __ev_set_lower_sfix32_s32 __ev_set_lower_s32
+#define __ev_set_upper_sfix32_fs(a, b) __ev_set_sfix32_fs (a, b, 0)
+#define __ev_set_lower_sfix32_fs(a, b) __ev_set_sfix32_fs (a, b, 1)
+#define __ev_set_upper_ufix32_fs(a, b) __ev_set_ufix32_fs (a, b, 0)
+#define __ev_set_lower_ufix32_fs(a, b) __ev_set_ufix32_fs (a, b, 1)
+
+#define __ev_set_acc_vec64 __builtin_spe_evmra
+
+static inline __ev64_opaque__
+__ev_set_acc_u64 (uint64_t a)
+{
+ __ev64_opaque__ ev32;
+ ev32 = __ev_create_u64 (a);
+ __ev_mra (ev32);
+ return ev32;
+}
+
+static inline __ev64_opaque__
+__ev_set_acc_s64 (int64_t a)
+{
+ __ev64_opaque__ ev32;
+ ev32 = __ev_create_s64 (a);
+ __ev_mra (ev32);
+ return ev32;
+}
+
+static inline __ev64_opaque__
+__ev_set_u32_internal (__ev64_opaque__ a, uint32_t b, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ uint32_t i[2];
+ } u;
+
+ u.v = a;
+ u.i[pos] = b;
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_set_s32_internal (__ev64_opaque__ a, int32_t b, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ int32_t i[2];
+ } u;
+
+ u.v = a;
+ u.i[pos] = b;
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_set_fs_internal (__ev64_opaque__ a, float b, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ float f[2];
+ } u;
+
+ u.v = a;
+ u.f[pos] = b;
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_set_sfix32_fs_internal (__ev64_opaque__ a, float b, uint32_t pos)
+{
+ __ev64_opaque__ v;
+ float other;
+
+ /* Get other half. */
+ other = __ev_get_fs_internal (a, pos ^ 1);
+
+ /* Make an sfix32 with 'b'. */
+ v = __ev_create_sfix32_fs (b, b);
+
+ /* Set other half to what it used to be. */
+ return __ev_set_fs_internal (v, other, pos ^ 1);
+}
+
+static inline __ev64_opaque__
+__ev_set_ufix32_fs_internal (__ev64_opaque__ a, float b, uint32_t pos)
+{
+ __ev64_opaque__ v;
+ float other;
+
+ /* Get other half. */
+ other = __ev_get_fs_internal (a, pos ^ 1);
+
+ /* Make an ufix32 with 'b'. */
+ v = __ev_create_ufix32_fs (b, b);
+
+ /* Set other half to what it used to be. */
+ return __ev_set_fs_internal (v, other, pos ^ 1);
+}
+
+static inline __ev64_opaque__
+__ev_set_u16_internal (__ev64_opaque__ a, uint16_t b, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ uint16_t i[4];
+ } u;
+
+ u.v = a;
+ u.i[pos] = b;
+ return u.v;
+}
+
+static inline __ev64_opaque__
+__ev_set_s16_internal (__ev64_opaque__ a, int16_t b, uint32_t pos)
+{
+ union
+ {
+ __ev64_opaque__ v;
+ int16_t i[4];
+ } u;
+
+ u.v = a;
+ u.i[pos] = b;
+ return u.v;
+}
+
+/* Predicates. */
+
+#define __pred_all 0
+#define __pred_any 1
+#define __pred_upper 2
+#define __pred_lower 3
+
+#define __ev_any_gts(a, b) __builtin_spe_evcmpgts (__pred_any, (a), (b))
+#define __ev_all_gts(a, b) __builtin_spe_evcmpgts (__pred_all, (a), (b))
+#define __ev_upper_gts(a, b) __builtin_spe_evcmpgts (__pred_upper, (a), (b))
+#define __ev_lower_gts(a, b) __builtin_spe_evcmpgts (__pred_lower, (a), (b))
+#define __ev_select_gts __builtin_spe_evsel_gts
+
+#define __ev_any_gtu(a, b) __builtin_spe_evcmpgtu (__pred_any, (a), (b))
+#define __ev_all_gtu(a, b) __builtin_spe_evcmpgtu (__pred_all, (a), (b))
+#define __ev_upper_gtu(a, b) __builtin_spe_evcmpgtu (__pred_upper, (a), (b))
+#define __ev_lower_gtu(a, b) __builtin_spe_evcmpgtu (__pred_lower, (a), (b))
+#define __ev_select_gtu __builtin_spe_evsel_gtu
+
+#define __ev_any_lts(a, b) __builtin_spe_evcmplts (__pred_any, (a), (b))
+#define __ev_all_lts(a, b) __builtin_spe_evcmplts (__pred_all, (a), (b))
+#define __ev_upper_lts(a, b) __builtin_spe_evcmplts (__pred_upper, (a), (b))
+#define __ev_lower_lts(a, b) __builtin_spe_evcmplts (__pred_lower, (a), (b))
+#define __ev_select_lts(a, b, c, d) ((__v2si) __builtin_spe_evsel_lts ((a), (b), (c), (d)))
+
+#define __ev_any_ltu(a, b) __builtin_spe_evcmpltu (__pred_any, (a), (b))
+#define __ev_all_ltu(a, b) __builtin_spe_evcmpltu (__pred_all, (a), (b))
+#define __ev_upper_ltu(a, b) __builtin_spe_evcmpltu (__pred_upper, (a), (b))
+#define __ev_lower_ltu(a, b) __builtin_spe_evcmpltu (__pred_lower, (a), (b))
+#define __ev_select_ltu __builtin_spe_evsel_ltu
+#define __ev_any_eq(a, b) __builtin_spe_evcmpeq (__pred_any, (a), (b))
+#define __ev_all_eq(a, b) __builtin_spe_evcmpeq (__pred_all, (a), (b))
+#define __ev_upper_eq(a, b) __builtin_spe_evcmpeq (__pred_upper, (a), (b))
+#define __ev_lower_eq(a, b) __builtin_spe_evcmpeq (__pred_lower, (a), (b))
+#define __ev_select_eq __builtin_spe_evsel_eq
+
+#define __ev_any_fs_gt(a, b) __builtin_spe_evfscmpgt (__pred_any, (a), (b))
+#define __ev_all_fs_gt(a, b) __builtin_spe_evfscmpgt (__pred_all, (a), (b))
+#define __ev_upper_fs_gt(a, b) __builtin_spe_evfscmpgt (__pred_upper, (a), (b))
+#define __ev_lower_fs_gt(a, b) __builtin_spe_evfscmpgt (__pred_lower, (a), (b))
+#define __ev_select_fs_gt __builtin_spe_evsel_fsgt
+
+#define __ev_any_fs_lt(a, b) __builtin_spe_evfscmplt (__pred_any, (a), (b))
+#define __ev_all_fs_lt(a, b) __builtin_spe_evfscmplt (__pred_all, (a), (b))
+#define __ev_upper_fs_lt(a, b) __builtin_spe_evfscmplt (__pred_upper, (a), (b))
+#define __ev_lower_fs_lt(a, b) __builtin_spe_evfscmplt (__pred_lower, (a), (b))
+#define __ev_select_fs_lt __builtin_spe_evsel_fslt
+
+#define __ev_any_fs_eq(a, b) __builtin_spe_evfscmpeq (__pred_any, (a), (b))
+#define __ev_all_fs_eq(a, b) __builtin_spe_evfscmpeq (__pred_all, (a), (b))
+#define __ev_upper_fs_eq(a, b) __builtin_spe_evfscmpeq (__pred_upper, (a), (b))
+#define __ev_lower_fs_eq(a, b) __builtin_spe_evfscmpeq (__pred_lower, (a), (b))
+#define __ev_select_fs_eq __builtin_spe_evsel_fseq
+
+#define __ev_any_fs_tst_gt(a, b) __builtin_spe_evfststgt (__pred_any, (a), (b))
+#define __ev_all_fs_tst_gt(a, b) __builtin_spe_evfststgt (__pred_all, (a), (b))
+#define __ev_upper_fs_tst_gt(a, b) __builtin_spe_evfststgt (__pred_upper, (a), (b))
+#define __ev_lower_fs_tst_gt(a, b) __builtin_spe_evfststgt (__pred_lower, (a), (b))
+#define __ev_select_fs_tst_gt __builtin_spe_evsel_fststgt
+
+#define __ev_any_fs_tst_lt(a, b) __builtin_spe_evfststlt (__pred_any, (a), (b))
+#define __ev_all_fs_tst_lt(a, b) __builtin_spe_evfststlt (__pred_all, (a), (b))
+#define __ev_upper_fs_tst_lt(a, b) __builtin_spe_evfststlt (__pred_upper, (a), (b))
+#define __ev_lower_fs_tst_lt(a, b) __builtin_spe_evfststlt (__pred_lower, (a), (b))
+#define __ev_select_fs_tst_lt __builtin_spe_evsel_fststlt
+
+#define __ev_any_fs_tst_eq(a, b) __builtin_spe_evfststeq (__pred_any, (a), (b))
+#define __ev_all_fs_tst_eq(a, b) __builtin_spe_evfststeq (__pred_all, (a), (b))
+#define __ev_upper_fs_tst_eq(a, b) __builtin_spe_evfststeq (__pred_upper, (a), (b))
+#define __ev_lower_fs_tst_eq(a, b) __builtin_spe_evfststeq (__pred_lower, (a), (b))
+#define __ev_select_fs_tst_eq __builtin_spe_evsel_fststeq
+
+/* SPEFSCR accessor functions. */
+
+#define __SPEFSCR_SOVH 0x80000000
+#define __SPEFSCR_OVH 0x40000000
+#define __SPEFSCR_FGH 0x20000000
+#define __SPEFSCR_FXH 0x10000000
+#define __SPEFSCR_FINVH 0x08000000
+#define __SPEFSCR_FDBZH 0x04000000
+#define __SPEFSCR_FUNFH 0x02000000
+#define __SPEFSCR_FOVFH 0x01000000
+/* 2 unused bits. */
+#define __SPEFSCR_FINXS 0x00200000
+#define __SPEFSCR_FINVS 0x00100000
+#define __SPEFSCR_FDBZS 0x00080000
+#define __SPEFSCR_FUNFS 0x00040000
+#define __SPEFSCR_FOVFS 0x00020000
+#define __SPEFSCR_MODE 0x00010000
+#define __SPEFSCR_SOV 0x00008000
+#define __SPEFSCR_OV 0x00004000
+#define __SPEFSCR_FG 0x00002000
+#define __SPEFSCR_FX 0x00001000
+#define __SPEFSCR_FINV 0x00000800
+#define __SPEFSCR_FDBZ 0x00000400
+#define __SPEFSCR_FUNF 0x00000200
+#define __SPEFSCR_FOVF 0x00000100
+/* 1 unused bit. */
+#define __SPEFSCR_FINXE 0x00000040
+#define __SPEFSCR_FINVE 0x00000020
+#define __SPEFSCR_FDBZE 0x00000010
+#define __SPEFSCR_FUNFE 0x00000008
+#define __SPEFSCR_FOVFE 0x00000004
+#define __SPEFSCR_FRMC 0x00000003
+
+#define __ev_get_spefscr_sovh() (__builtin_spe_mfspefscr () & __SPEFSCR_SOVH)
+#define __ev_get_spefscr_ovh() (__builtin_spe_mfspefscr () & __SPEFSCR_OVH)
+#define __ev_get_spefscr_fgh() (__builtin_spe_mfspefscr () & __SPEFSCR_FGH)
+#define __ev_get_spefscr_fxh() (__builtin_spe_mfspefscr () & __SPEFSCR_FXH)
+#define __ev_get_spefscr_finvh() (__builtin_spe_mfspefscr () & __SPEFSCR_FINVH)
+#define __ev_get_spefscr_fdbzh() (__builtin_spe_mfspefscr () & __SPEFSCR_FDBZH)
+#define __ev_get_spefscr_funfh() (__builtin_spe_mfspefscr () & __SPEFSCR_FUNFH)
+#define __ev_get_spefscr_fovfh() (__builtin_spe_mfspefscr () & __SPEFSCR_FOVFH)
+#define __ev_get_spefscr_finxs() (__builtin_spe_mfspefscr () & __SPEFSCR_FINXS)
+#define __ev_get_spefscr_finvs() (__builtin_spe_mfspefscr () & __SPEFSCR_FINVS)
+#define __ev_get_spefscr_fdbzs() (__builtin_spe_mfspefscr () & __SPEFSCR_FDBZS)
+#define __ev_get_spefscr_funfs() (__builtin_spe_mfspefscr () & __SPEFSCR_FUNFS)
+#define __ev_get_spefscr_fovfs() (__builtin_spe_mfspefscr () & __SPEFSCR_FOVFS)
+#define __ev_get_spefscr_mode() (__builtin_spe_mfspefscr () & __SPEFSCR_MODE)
+#define __ev_get_spefscr_sov() (__builtin_spe_mfspefscr () & __SPEFSCR_SOV)
+#define __ev_get_spefscr_ov() (__builtin_spe_mfspefscr () & __SPEFSCR_OV)
+#define __ev_get_spefscr_fg() (__builtin_spe_mfspefscr () & __SPEFSCR_FG)
+#define __ev_get_spefscr_fx() (__builtin_spe_mfspefscr () & __SPEFSCR_FX)
+#define __ev_get_spefscr_finv() (__builtin_spe_mfspefscr () & __SPEFSCR_FINV)
+#define __ev_get_spefscr_fdbz() (__builtin_spe_mfspefscr () & __SPEFSCR_FDBZ)
+#define __ev_get_spefscr_funf() (__builtin_spe_mfspefscr () & __SPEFSCR_FUNF)
+#define __ev_get_spefscr_fovf() (__builtin_spe_mfspefscr () & __SPEFSCR_FOVF)
+#define __ev_get_spefscr_finxe() (__builtin_spe_mfspefscr () & __SPEFSCR_FINXE)
+#define __ev_get_spefscr_finve() (__builtin_spe_mfspefscr () & __SPEFSCR_FINVE)
+#define __ev_get_spefscr_fdbze() (__builtin_spe_mfspefscr () & __SPEFSCR_FDBZE)
+#define __ev_get_spefscr_funfe() (__builtin_spe_mfspefscr () & __SPEFSCR_FUNFE)
+#define __ev_get_spefscr_fovfe() (__builtin_spe_mfspefscr () & __SPEFSCR_FOVFE)
+#define __ev_get_spefscr_frmc() (__builtin_spe_mfspefscr () & __SPEFSCR_FRMC)
+
+static inline void
+__ev_clr_spefscr_field (int mask)
+{
+ int i;
+
+ i = __builtin_spe_mfspefscr ();
+ i &= ~mask;
+ __builtin_spe_mtspefscr (i);
+}
+
+#define __ev_clr_spefscr_sovh() __ev_clr_spefscr_field (__SPEFSCR_SOVH)
+#define __ev_clr_spefscr_sov() __ev_clr_spefscr_field (__SPEFSCR_SOV)
+#define __ev_clr_spefscr_finxs() __ev_clr_spefscr_field (__SPEFSCR_FINXS)
+#define __ev_clr_spefscr_finvs() __ev_clr_spefscr_field (__SPEFSCR_FINVS)
+#define __ev_clr_spefscr_fdbzs() __ev_clr_spefscr_field (__SPEFSCR_FDBZS)
+#define __ev_clr_spefscr_funfs() __ev_clr_spefscr_field (__SPEFSCR_FUNFS)
+#define __ev_clr_spefscr_fovfs() __ev_clr_spefscr_field (__SPEFSCR_FOVFS)
+
+/* Set rounding mode:
+ rnd = 0 (nearest)
+ rnd = 1 (zero)
+ rnd = 2 (+inf)
+ rnd = 3 (-inf). */
+
+static inline void
+__ev_set_spefscr_frmc (int rnd)
+{
+ int i;
+
+ i = __builtin_spe_mfspefscr ();
+ i &= ~__SPEFSCR_FRMC;
+ i |= rnd;
+ __builtin_spe_mtspefscr (i);
+}
+
+/* The SPE PIM says these are declared in <spe.h>, although they are
+ not provided by GCC: they must be taken from a separate
+ library. */
+extern short int atosfix16 (const char *);
+extern int atosfix32 (const char *);
+extern long long atosfix64 (const char *);
+
+extern unsigned short atoufix16 (const char *);
+extern unsigned int atoufix32 (const char *);
+extern unsigned long long atoufix64 (const char *);
+
+extern short int strtosfix16 (const char *, char **);
+extern int strtosfix32 (const char *, char **);
+extern long long strtosfix64 (const char *, char **);
+
+extern unsigned short int strtoufix16 (const char *, char **);
+extern unsigned int strtoufix32 (const char *, char **);
+extern unsigned long long strtoufix64 (const char *, char **);
+
+#endif /* _SPE_H */
diff --git a/gcc-4.4.3/gcc/config/rs6000/spe.md b/gcc-4.4.3/gcc/config/rs6000/spe.md
new file mode 100644
index 000000000..5368bace3
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/spe.md
@@ -0,0 +1,3190 @@
+;; e500 SPE description
+;; Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008
+;; Free Software Foundation, Inc.
+;; Contributed by Aldy Hernandez (aldy@quesejoda.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_constants
+ [(CMPDFEQ_GPR 1006)
+ (TSTDFEQ_GPR 1007)
+ (CMPDFGT_GPR 1008)
+ (TSTDFGT_GPR 1009)
+ (CMPDFLT_GPR 1010)
+ (TSTDFLT_GPR 1011)
+ (CMPTFEQ_GPR 1012)
+ (TSTTFEQ_GPR 1013)
+ (CMPTFGT_GPR 1014)
+ (TSTTFGT_GPR 1015)
+ (CMPTFLT_GPR 1016)
+ (TSTTFLT_GPR 1017)
+ (E500_CR_IOR_COMPARE 1018)
+ ])
+
+;; Modes using a 64-bit register.
+(define_mode_iterator SPE64 [DF V4HI V2SF V1DI V2SI])
+
+;; Likewise, but allow TFmode (two registers) as well.
+(define_mode_iterator SPE64TF [DF V4HI V2SF V1DI V2SI TF])
+
+;; DImode and TImode.
+(define_mode_iterator DITI [DI TI])
+
+(define_insn "*negsf2_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (neg:SF (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsneg %0,%1"
+ [(set_attr "type" "fpsimple")])
+
+(define_insn "*abssf2_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsabs %0,%1"
+ [(set_attr "type" "fpsimple")])
+
+(define_insn "*nabssf2_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (neg:SF (abs:SF (match_operand:SF 1 "gpc_reg_operand" "r"))))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsnabs %0,%1"
+ [(set_attr "type" "fpsimple")])
+
+(define_insn "*addsf3_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "%r")
+ (match_operand:SF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsadd %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "*subsf3_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efssub %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "*mulsf3_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%r")
+ (match_operand:SF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsmul %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "*divsf3_gpr"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsdiv %0,%1,%2"
+ [(set_attr "type" "vecfdiv")])
+
+;; Floating point conversion instructions.
+
+(define_insn "fixuns_truncdfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unsigned_fix:SI (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdctuiz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_extendsfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (float_extend:DF (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdcfs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_fixuns_truncsfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unsigned_fix:SI (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsctuiz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_fix_truncsfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efsctsiz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_fix_truncdfsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdctsiz %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_floatunssisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (unsigned_float:SF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efscfui %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_floatunssidf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdcfui %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_floatsisf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (float:SF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "efscfsi %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "spe_floatsidf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdcfsi %0,%1"
+ [(set_attr "type" "fp")])
+
+;; SPE SIMD instructions
+
+(define_insn "spe_evabs"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (abs:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evabs %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evandc"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (and:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (not:V2SI (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "evandc %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evand"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (and:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evand %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+;; Vector compare instructions
+
+(define_insn "spe_evcmpeq"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 500))]
+ "TARGET_SPE"
+ "evcmpeq %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcmpgts"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 501))]
+ "TARGET_SPE"
+ "evcmpgts %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcmpgtu"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 502))]
+ "TARGET_SPE"
+ "evcmpgtu %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcmplts"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 503))]
+ "TARGET_SPE"
+ "evcmplts %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcmpltu"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 504))]
+ "TARGET_SPE"
+ "evcmpltu %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+;; Floating point vector compare instructions
+
+(define_insn "spe_evfscmpeq"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 538))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfscmpeq %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscmpgt"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 539))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfscmpgt %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscmplt"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 540))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfscmplt %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfststeq"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 541))]
+ "TARGET_SPE"
+ "evfststeq %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfststgt"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 542))]
+ "TARGET_SPE"
+ "evfststgt %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfststlt"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")] 543))]
+ "TARGET_SPE"
+ "evfststlt %0,%1,%2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+;; End of vector compare instructions
+
+(define_insn "spe_evcntlsw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 505))]
+ "TARGET_SPE"
+ "evcntlsw %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evcntlzw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 506))]
+ "TARGET_SPE"
+ "evcntlzw %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_eveqv"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (not:V2SI (xor:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "eveqv %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evextsb"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 507))]
+ "TARGET_SPE"
+ "evextsb %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evextsh"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 508))]
+ "TARGET_SPE"
+ "evextsh %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhesplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 509)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlhhesplat %0,%2*2(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhesplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 510)]
+ "TARGET_SPE"
+ "evlhhesplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhossplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 511)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlhhossplat %0,%2*2(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhossplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 512)]
+ "TARGET_SPE"
+ "evlhhossplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhousplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 513)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlhhousplat %0,%2*2(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlhhousplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 514)]
+ "TARGET_SPE"
+ "evlhhousplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhsplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 515)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwhsplat %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhsplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 516)]
+ "TARGET_SPE"
+ "evlwhsplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwwsplat"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 517)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwwsplat %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwwsplatx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 518)]
+ "TARGET_SPE"
+ "evlwwsplatx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmergehi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (vec_merge:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (vec_select:V2SI
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (const_int 2)))]
+ "TARGET_SPE"
+ "evmergehi %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmergehilo"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (vec_merge:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (const_int 2)))]
+ "TARGET_SPE"
+ "evmergehilo %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmergelo"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (vec_merge:V2SI (vec_select:V2SI
+ (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (const_int 2)))]
+ "TARGET_SPE"
+ "evmergelo %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmergelohi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (vec_merge:V2SI (vec_select:V2SI
+ (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (vec_select:V2SI
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (parallel [(const_int 1)
+ (const_int 0)]))
+ (const_int 2)))]
+ "TARGET_SPE"
+ "evmergelohi %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evnand"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (not:V2SI (and:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "evnand %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "negv2si2"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (neg:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evneg %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evnor"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (not:V2SI (ior:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "evnor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evorc"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (ior:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (not:V2SI (match_operand:V2SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_SPE"
+ "evorc %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evor"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (ior:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evrlwi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 519))]
+ "TARGET_SPE"
+ "evrlwi %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evrlw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 520))]
+ "TARGET_SPE"
+ "evrlw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evrndw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 521))]
+ "TARGET_SPE"
+ "evrndw %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsel"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (match_operand:CC 3 "cc_reg_operand" "y")] 522))]
+ "TARGET_SPE"
+ "evsel %0,%1,%2,%3"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsel_fs"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")
+ (match_operand:CC 3 "cc_reg_operand" "y")] 725))]
+ "TARGET_SPE"
+ "evsel %0,%1,%2,%3"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evslwi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ 523))]
+ "TARGET_SPE"
+ "evslwi %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evslw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 524))]
+ "TARGET_SPE"
+ "evslw %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsrwis"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ 525))]
+ "TARGET_SPE"
+ "evsrwis %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsrwiu"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")]
+ 526))]
+ "TARGET_SPE"
+ "evsrwiu %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsrws"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 527))]
+ "TARGET_SPE"
+ "evsrws %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsrwu"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 528))]
+ "TARGET_SPE"
+ "evsrwu %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+;; vector xors
+
+(define_insn "xorv2si3"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (xor:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "xorv4hi3"
+ [(set (match_operand:V4HI 0 "gpc_reg_operand" "=r")
+ (xor:V4HI (match_operand:V4HI 1 "gpc_reg_operand" "r")
+ (match_operand:V4HI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "xorv1di3"
+ [(set (match_operand:V1DI 0 "gpc_reg_operand" "=r")
+ (xor:V1DI (match_operand:V1DI 1 "gpc_reg_operand" "r")
+ (match_operand:V1DI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+;; end of vector xors
+
+(define_insn "spe_evfsabs"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (abs:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evfsabs %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsadd"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (plus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfsadd %0,%1,%2"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscfsf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 529))]
+ "TARGET_SPE"
+ "evfscfsf %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscfsi"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (float:V2SF (match_operand:V2SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evfscfsi %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscfuf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 530))]
+ "TARGET_SPE"
+ "evfscfuf %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfscfui"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SI 1 "gpc_reg_operand" "r")] 701))]
+ "TARGET_SPE"
+ "evfscfui %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctsf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 531))]
+ "TARGET_SPE"
+ "evfsctsf %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctsi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 532))]
+ "TARGET_SPE"
+ "evfsctsi %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctsiz"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 533))]
+ "TARGET_SPE"
+ "evfsctsiz %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctuf"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 534))]
+ "TARGET_SPE"
+ "evfsctuf %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctui"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 535))]
+ "TARGET_SPE"
+ "evfsctui %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsctuiz"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 536))]
+ "TARGET_SPE"
+ "evfsctuiz %0,%1"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsdiv"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (div:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfsdiv %0,%1,%2"
+ [(set_attr "type" "vecfdiv")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsmul"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (mult:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfsmul %0,%1,%2"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsnabs"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (unspec:V2SF [(match_operand:V2SF 1 "gpc_reg_operand" "r")] 537))]
+ "TARGET_SPE"
+ "evfsnabs %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfsneg"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (neg:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evfsneg %0,%1"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evfssub"
+ [(set (match_operand:V2SF 0 "gpc_reg_operand" "=r")
+ (minus:V2SF (match_operand:V2SF 1 "gpc_reg_operand" "r")
+ (match_operand:V2SF 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evfssub %0,%1,%2"
+ [(set_attr "type" "vecfloat")
+ (set_attr "length" "4")])
+
+;; SPE SIMD load instructions.
+
+;; Only the hardware engineer who designed the SPE understands the
+;; plethora of load and store instructions ;-). We have no way of
+;; differentiating between them with RTL so use an unspec of const_int 0
+;; to avoid identical RTL.
+
+(define_insn "spe_evldd"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 544)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evldd %0,%2*8(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlddx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 545)]
+ "TARGET_SPE"
+ "evlddx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evldh"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 546)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evldh %0,%2*8(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evldhx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 547)]
+ "TARGET_SPE"
+ "evldhx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evldw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 548)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evldw %0,%2*8(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evldwx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 549)]
+ "TARGET_SPE"
+ "evldwx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhe"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 550)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwhe %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhex"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 551)]
+ "TARGET_SPE"
+ "evlwhex %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhos"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 552)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwhos %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhosx"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 553)]
+ "TARGET_SPE"
+ "evlwhosx %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhou"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:QI 2 "immediate_operand" "i"))))
+ (unspec [(const_int 0)] 554)]
+ "TARGET_SPE && INTVAL (operands[2]) >= 0 && INTVAL (operands[2]) <= 31"
+ "evlwhou %0,%2*4(%1)"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evlwhoux"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (mem:V2SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (unspec [(const_int 0)] 555)]
+ "TARGET_SPE"
+ "evlwhoux %0,%1,%2"
+ [(set_attr "type" "vecload")
+ (set_attr "length" "4")])
+
+(define_insn "spe_brinc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")] 556))]
+ "TARGET_SPE"
+ "brinc %0,%1,%2"
+ [(set_attr "type" "brinc")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 557))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 558))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 559))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 560))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 561))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhegumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 562))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhegumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmfaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 563))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmfaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmfanw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 564))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmfanw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 565))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 566))]
+ "TARGET_SPE"
+ "evmhesmf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 567))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 568))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 569))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhesmia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhesmi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 570))]
+ "TARGET_SPE"
+ "evmhesmi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessfaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 571))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessfaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessfanw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 572))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessfanw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 573))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 574))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evmhessf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 575))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhessianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 576))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhessianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 577))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheumiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheumianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 578))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheumianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 579))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 580))]
+ "TARGET_SPE"
+ "evmheumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheusiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 581))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheusiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmheusianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 582))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmheusianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 583))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 584))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 585))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 586))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 587))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhogumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 588))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhogumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmfaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 589))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmfaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmfanw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 590))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmfanw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 591))]
+ "TARGET_SPE"
+ "evmhosmfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 592))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 593))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 594))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 595))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhosmia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhosmi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 596))]
+ "TARGET_SPE"
+ "evmhosmi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossfaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 597))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossfaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossfanw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 598))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossfanw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 599))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 600))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evmhossf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 601))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhossianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 602))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhossianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhoumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 603))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhoumiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhoumianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 604))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhoumianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhoumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 605))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhoumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhoumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 606))]
+ "TARGET_SPE"
+ "evmhoumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhousiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 607))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhousiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmhousianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 608))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmhousianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmmlssfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 609))]
+ "TARGET_SPE"
+ "evmmlssfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmmlssf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 610))]
+ "TARGET_SPE"
+ "evmmlssf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 611))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 612))]
+ "TARGET_SPE"
+ "evmwhsmf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 613))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 614))]
+ "TARGET_SPE"
+ "evmwhsmi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 615))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhusian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 626))]
+ "TARGET_SPE"
+ "evmwhusian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 628))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evmwhssf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 629))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 630))]
+ "TARGET_SPE"
+ "evmwhumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlsmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 635))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlsmiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlsmianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 636))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlsmianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlssiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 641))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlssiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlssianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 642))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlssianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 643))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlumiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlumianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 644))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlumianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 645))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 646))]
+ "TARGET_SPE"
+ "evmwlumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlusiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 647))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlusiaaw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwlusianw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 648))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwlusianw %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 649))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 650))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 651))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 652))]
+ "TARGET_SPE"
+ "evmwsmf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 653))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 654))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 655))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwsmia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwsmi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 656))]
+ "TARGET_SPE"
+ "evmwsmi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwssfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 657))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwssfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwssfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 658))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwssfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwssfa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 659))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwssfa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwssf"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 660))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evmwssf %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 661))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 662))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwumia"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 663))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwumia %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwumi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 664))]
+ "TARGET_SPE"
+ "evmwumi %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (plus:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evaddw %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddusiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 673))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evaddusiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 674))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evaddumiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddssiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 675))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evaddssiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddsmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 676))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evaddsmiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evaddiw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 677))]
+ "TARGET_SPE"
+ "evaddiw %0,%1,%2"
+ [(set_attr "type" "vecsimple")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubifw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 678))]
+ "TARGET_SPE"
+ "evsubifw %0,%2,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (minus:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_SPE"
+ "evsubfw %0,%2,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfusiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 679))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evsubfusiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfumiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 680))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evsubfumiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfssiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 681))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evsubfssiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsubfsmiaaw"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (reg:V2SI SPE_ACC_REGNO)] 682))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evsubfsmiaaw %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmra"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (match_operand:V2SI 1 "gpc_reg_operand" "r"))
+ (set (reg:V2SI SPE_ACC_REGNO)
+ (unspec:V2SI [(match_dup 1)] 726))]
+ "TARGET_SPE"
+ "evmra %0,%1"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evdivws"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (div:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evdivws %0,%1,%2"
+ [(set_attr "type" "vecdiv")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evdivwu"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (udiv:V2SI (match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")))
+ (clobber (reg:SI SPEFSCR_REGNO))]
+ "TARGET_SPE"
+ "evdivwu %0,%1,%2"
+ [(set_attr "type" "vecdiv")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsplatfi"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:QI 1 "immediate_operand" "i")] 684))]
+ "TARGET_SPE"
+ "evsplatfi %0,%1"
+ [(set_attr "type" "vecperm")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evsplati"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:QI 1 "immediate_operand" "i")] 685))]
+ "TARGET_SPE"
+ "evsplati %0,%1"
+ [(set_attr "type" "vecperm")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdd"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 686)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstdd %2,%1*8(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstddx"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 687)]
+ "TARGET_SPE"
+ "evstddx %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdh"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 688)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstdh %2,%1*8(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdhx"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 689)]
+ "TARGET_SPE"
+ "evstdhx %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdw"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 690)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstdw %2,%1*8(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstdwx"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 691)]
+ "TARGET_SPE"
+ "evstdwx %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwhe"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 692)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstwhe %2,%1*4(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwhex"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 693)]
+ "TARGET_SPE"
+ "evstwhex %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwho"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 694)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstwho %2,%1*4(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwhox"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 695)]
+ "TARGET_SPE"
+ "evstwhox %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwwe"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 696)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstwwe %2,%1*4(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwwex"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 697)]
+ "TARGET_SPE"
+ "evstwwex %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwwo"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:QI 1 "immediate_operand" "i")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 698)]
+ "TARGET_SPE && INTVAL (operands[1]) >= 0 && INTVAL (operands[1]) <= 31"
+ "evstwwo %2,%1*4(%0)"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evstwwox"
+ [(set (mem:V2SI (plus:SI (match_operand:SI 0 "gpc_reg_operand" "b")
+ (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (match_operand:V2SI 2 "gpc_reg_operand" "r"))
+ (unspec [(const_int 0)] 699)]
+ "TARGET_SPE"
+ "evstwwox %2,%0,%1"
+ [(set_attr "type" "vecstore")
+ (set_attr "length" "4")])
+
+;; Double-precision floating point instructions.
+
+;; FIXME: Add o=r option.
+(define_insn "*frob_<SPE64:mode>_<DITI:mode>"
+ [(set (match_operand:SPE64 0 "nonimmediate_operand" "=r,r")
+ (subreg:SPE64 (match_operand:DITI 1 "input_operand" "r,m") 0))]
+ "(TARGET_E500_DOUBLE && <SPE64:MODE>mode == DFmode)
+ || (TARGET_SPE && <SPE64:MODE>mode != DFmode)"
+ "@
+ evmergelo %0,%1,%L1
+ evldd%X1 %0,%y1")
+
+(define_insn "*frob_tf_ti"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=r")
+ (subreg:TF (match_operand:TI 1 "gpc_reg_operand" "r") 0))]
+ "TARGET_E500_DOUBLE"
+ "evmergelo %0,%1,%L1\;evmergelo %L0,%Y1,%Z1"
+ [(set_attr "length" "8")])
+
+(define_insn "*frob_<mode>_di_2"
+ [(set (subreg:DI (match_operand:SPE64TF 0 "nonimmediate_operand" "+&r,r") 0)
+ (match_operand:DI 1 "input_operand" "r,m"))]
+ "(TARGET_E500_DOUBLE && (<MODE>mode == DFmode || <MODE>mode == TFmode))
+ || (TARGET_SPE && <MODE>mode != DFmode && <MODE>mode != TFmode)"
+ "@
+ evmergelo %0,%1,%L1
+ evldd%X1 %0,%y1")
+
+(define_insn "*frob_tf_di_8_2"
+ [(set (subreg:DI (match_operand:TF 0 "nonimmediate_operand" "+&r,r") 8)
+ (match_operand:DI 1 "input_operand" "r,m"))]
+ "TARGET_E500_DOUBLE"
+ "@
+ evmergelo %L0,%1,%L1
+ evldd%X1 %L0,%y1")
+
+(define_insn "*frob_di_<mode>"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=&r")
+ (subreg:DI (match_operand:SPE64TF 1 "input_operand" "r") 0))]
+ "(TARGET_E500_DOUBLE && (<MODE>mode == DFmode || <MODE>mode == TFmode))
+ || (TARGET_SPE && <MODE>mode != DFmode && <MODE>mode != TFmode)"
+ "evmergehi %0,%1,%1\;mr %L0,%1"
+ [(set_attr "length" "8")])
+
+(define_insn "*frob_ti_tf"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=&r")
+ (subreg:TI (match_operand:TF 1 "input_operand" "r") 0))]
+ "TARGET_E500_DOUBLE"
+ "evmergehi %0,%1,%1\;mr %L0,%1\;evmergehi %Y0,%L1,%L1\;mr %Z0,%L1"
+ [(set_attr "length" "16")])
+
+(define_insn "*frob_<DITI:mode>_<SPE64:mode>_2"
+ [(set (subreg:SPE64 (match_operand:DITI 0 "register_operand" "+&r,r") 0)
+ (match_operand:SPE64 1 "input_operand" "r,m"))]
+ "(TARGET_E500_DOUBLE && <SPE64:MODE>mode == DFmode)
+ || (TARGET_SPE && <SPE64:MODE>mode != DFmode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ return \"evmergehi %0,%1,%1\;mr %L0,%1\";
+ case 1:
+ /* If the address is not offsettable we need to load the whole
+ doubleword into a 64-bit register and then copy the high word
+ to form the correct output layout. */
+ if (!offsettable_nonstrict_memref_p (operands[1]))
+ return \"evldd%X1 %L0,%y1\;evmergehi %0,%L0,%L0\";
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1%X1|lwz%U1%X1} %0,%1\;{l|lwz} %L0,%L1\";
+ }
+}"
+ [(set_attr "length" "8,8")])
+
+; As the above, but TImode at offset 8.
+(define_insn "*frob_ti_<mode>_8_2"
+ [(set (subreg:SPE64 (match_operand:TI 0 "register_operand" "+&r,r") 8)
+ (match_operand:SPE64 1 "input_operand" "r,m"))]
+ "(TARGET_E500_DOUBLE && <MODE>mode == DFmode)
+ || (TARGET_SPE && <MODE>mode != DFmode)"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ gcc_unreachable ();
+ case 0:
+ return \"evmergehi %Y0,%1,%1\;mr %Z0,%1\";
+ case 1:
+ if (!offsettable_nonstrict_memref_p (operands[1]))
+ return \"evldd%X1 %Z0,%y1\;evmergehi %Y0,%Z0,%Z0\";
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %Z0,%L1\;{l|lwz} %Y0,%1\";
+ else
+ return \"{l%U1%X1|lwz%U1%X1} %Y0,%1\;{l|lwz} %Z0,%L1\";
+ }
+}"
+ [(set_attr "length" "8,8")])
+
+(define_insn "*frob_ti_tf_2"
+ [(set (subreg:TF (match_operand:TI 0 "gpc_reg_operand" "=&r") 0)
+ (match_operand:TF 1 "input_operand" "r"))]
+ "TARGET_E500_DOUBLE"
+ "evmergehi %0,%1,%1\;mr %L0,%1\;evmergehi %Y0,%L1,%L1\;mr %Z0,%L1"
+ [(set_attr "length" "16")])
+
+(define_insn "*mov_si<mode>_e500_subreg0"
+ [(set (subreg:SI (match_operand:SPE64TF 0 "register_operand" "+r,&r") 0)
+ (match_operand:SI 1 "input_operand" "r,m"))]
+ "(TARGET_E500_DOUBLE && (<MODE>mode == DFmode || <MODE>mode == TFmode))
+ || (TARGET_SPE && <MODE>mode != DFmode && <MODE>mode != TFmode)"
+ "@
+ evmergelo %0,%1,%0
+ evmergelohi %0,%0,%0\;{l%U1%X1|lwz%U1%X1} %0,%1\;evmergelohi %0,%0,%0"
+ [(set_attr "length" "4,12")])
+
+;; ??? Could use evstwwe for memory stores in some cases, depending on
+;; the offset.
+(define_insn "*mov_si<mode>_e500_subreg0_2"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "+r,m")
+ (subreg:SI (match_operand:SPE64TF 1 "register_operand" "+r,&r") 0))]
+ "(TARGET_E500_DOUBLE && (<MODE>mode == DFmode || <MODE>mode == TFmode))
+ || (TARGET_SPE && <MODE>mode != DFmode && <MODE>mode != TFmode)"
+ "@
+ evmergehi %0,%0,%1
+ evmergelohi %1,%1,%1\;{st%U0%X0|stw%U0%X0} %1,%0"
+ [(set_attr "length" "4,8")])
+
+(define_insn "*mov_si<mode>_e500_subreg4"
+ [(set (subreg:SI (match_operand:SPE64TF 0 "register_operand" "+r,r") 4)
+ (match_operand:SI 1 "input_operand" "r,m"))]
+ "(TARGET_E500_DOUBLE && (<MODE>mode == DFmode || <MODE>mode == TFmode))
+ || (TARGET_SPE && <MODE>mode != DFmode && <MODE>mode != TFmode)"
+ "@
+ mr %0,%1
+ {l%U1%X1|lwz%U1%X1} %0,%1")
+
+(define_insn "*mov_si<mode>_e500_subreg4_2"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "+r,m")
+ (subreg:SI (match_operand:SPE64TF 1 "register_operand" "r,r") 4))]
+ "(TARGET_E500_DOUBLE && (<MODE>mode == DFmode || <MODE>mode == TFmode))
+ || (TARGET_SPE && <MODE>mode != DFmode && <MODE>mode != TFmode)"
+ "@
+ mr %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0")
+
+(define_insn "*mov_sitf_e500_subreg8"
+ [(set (subreg:SI (match_operand:TF 0 "register_operand" "+r,&r") 8)
+ (match_operand:SI 1 "input_operand" "r,m"))]
+ "TARGET_E500_DOUBLE"
+ "@
+ evmergelo %L0,%1,%L0
+ evmergelohi %L0,%L0,%L0\;{l%U1%X1|lwz%U1%X1} %L0,%1\;evmergelohi %L0,%L0,%L0"
+ [(set_attr "length" "4,12")])
+
+(define_insn "*mov_sitf_e500_subreg8_2"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "+r,m")
+ (subreg:SI (match_operand:TF 1 "register_operand" "+r,&r") 8))]
+ "TARGET_E500_DOUBLE"
+ "@
+ evmergehi %0,%0,%L1
+ evmergelohi %L1,%L1,%L1\;{st%U0%X0|stw%U0%X0} %L1,%0"
+ [(set_attr "length" "4,8")])
+
+(define_insn "*mov_sitf_e500_subreg12"
+ [(set (subreg:SI (match_operand:TF 0 "register_operand" "+r,r") 12)
+ (match_operand:SI 1 "input_operand" "r,m"))]
+ "TARGET_E500_DOUBLE"
+ "@
+ mr %L0,%1
+ {l%U1%X1|lwz%U1%X1} %L0,%1")
+
+(define_insn "*mov_sitf_e500_subreg12_2"
+ [(set (match_operand:SI 0 "rs6000_nonimmediate_operand" "+r,m")
+ (subreg:SI (match_operand:TF 1 "register_operand" "r,r") 12))]
+ "TARGET_E500_DOUBLE"
+ "@
+ mr %0,%L1
+ {st%U0%X0|stw%U0%X0} %L1,%0")
+
+;; FIXME: Allow r=CONST0.
+(define_insn "*movdf_e500_double"
+ [(set (match_operand:DF 0 "rs6000_nonimmediate_operand" "=r,r,m")
+ (match_operand:DF 1 "input_operand" "r,m,r"))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "*
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return \"evor %0,%1,%1\";
+ case 1:
+ return \"evldd%X1 %0,%y1\";
+ case 2:
+ return \"evstdd%X0 %1,%y0\";
+ default:
+ gcc_unreachable ();
+ }
+ }"
+ [(set_attr "type" "*,vecload,vecstore")
+ (set_attr "length" "*,*,*")])
+
+(define_insn "spe_truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efscfd %0,%1")
+
+(define_insn "spe_absdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdabs %0,%1")
+
+(define_insn "spe_nabsdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (neg:DF (abs:DF (match_operand:DF 1 "gpc_reg_operand" "r"))))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdnabs %0,%1")
+
+(define_insn "spe_negdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (neg:DF (match_operand:DF 1 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdneg %0,%1")
+
+(define_insn "spe_adddf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (plus:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdadd %0,%1,%2")
+
+(define_insn "spe_subdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (minus:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdsub %0,%1,%2")
+
+(define_insn "spe_muldf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efdmul %0,%1,%2")
+
+(define_insn "spe_divdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r")
+ (div:DF (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r")))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE"
+ "efddiv %0,%1,%2")
+
+;; Double-precision floating point instructions for IBM long double.
+
+(define_insn_and_split "spe_trunctfdf2_internal1"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=r,?r")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "0,r")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128"
+ "@
+ #
+ evor %0,%1,%1"
+ "&& reload_completed && REGNO (operands[0]) == REGNO (operands[1])"
+ [(const_int 0)]
+{
+ emit_note (NOTE_INSN_DELETED);
+ DONE;
+})
+
+(define_insn_and_split "spe_trunctfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=r")
+ (float_truncate:SF (match_operand:TF 1 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:DF 2 "=r"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (float_truncate:DF (match_dup 1)))
+ (set (match_dup 0)
+ (float_truncate:SF (match_dup 2)))]
+ "")
+
+(define_insn "spe_extenddftf2"
+ [(set (match_operand:TF 0 "rs6000_nonimmediate_operand" "=r,?r,r,o")
+ (float_extend:TF (match_operand:DF 1 "input_operand" "0,r,m,r")))
+ (clobber (match_scratch:DF 2 "=X,X,X,&r"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128"
+ "@
+ evxor %L0,%L0,%L0
+ evor %0,%1,%1\;evxor %L0,%L0,%L0
+ evldd%X1 %0,%y1\;evxor %L0,%L0,%L0
+ evstdd%X0 %1,%y0\;evxor %2,%2,%2\;evstdd %2,%Y0"
+ [(set_attr "length" "4,8,8,12")])
+
+(define_expand "spe_fix_trunctfsi2"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_operand:TF 1 "gpc_reg_operand" "")))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))
+ (clobber (match_dup 4))])]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128"
+{
+ operands[2] = gen_reg_rtx (DFmode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+})
+
+; Like fix_trunc_helper, add with rounding towards 0.
+(define_insn "spe_fix_trunctfsi2_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:TF 1 "gpc_reg_operand" "r")))
+ (clobber (match_operand:DF 2 "gpc_reg_operand" "=r"))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" "=&r"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=&r"))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128"
+ "mfspefscr %3\;rlwinm %4,%3,0,0,29\;ori %4,%4,1\;efdadd %2,%1,%L1\;mtspefscr %3\;efdctsiz %0, %2"
+ [(set_attr "length" "24")])
+
+(define_insn "spe_negtf2_internal"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=r")
+ (neg:TF (match_operand:TF 1 "gpc_reg_operand" "r")))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128"
+ "*
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"efdneg %L0,%L1\;efdneg %0,%1\";
+ else
+ return \"efdneg %0,%1\;efdneg %L0,%L1\";
+}"
+ [(set_attr "length" "8")])
+
+(define_expand "spe_abstf2_cmp"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (match_operand:TF 1 "gpc_reg_operand" "f"))
+ (set (match_dup 3) (match_dup 5))
+ (set (match_dup 5) (abs:DF (match_dup 5)))
+ (set (match_dup 4) (unspec:CCFP [(compare:CCFP (match_dup 3)
+ (match_dup 5))] CMPDFEQ_GPR))
+ (set (pc) (if_then_else (eq (match_dup 4) (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 6) (neg:DF (match_dup 6)))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128"
+ "
+{
+ const int hi_word = FLOAT_WORDS_BIG_ENDIAN ? 0 : GET_MODE_SIZE (DFmode);
+ const int lo_word = FLOAT_WORDS_BIG_ENDIAN ? GET_MODE_SIZE (DFmode) : 0;
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = gen_reg_rtx (CCFPmode);
+ operands[5] = simplify_gen_subreg (DFmode, operands[0], TFmode, hi_word);
+ operands[6] = simplify_gen_subreg (DFmode, operands[0], TFmode, lo_word);
+}")
+
+(define_expand "spe_abstf2_tst"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (match_operand:TF 1 "gpc_reg_operand" "f"))
+ (set (match_dup 3) (match_dup 5))
+ (set (match_dup 5) (abs:DF (match_dup 5)))
+ (set (match_dup 4) (unspec:CCFP [(compare:CCFP (match_dup 3)
+ (match_dup 5))] TSTDFEQ_GPR))
+ (set (pc) (if_then_else (eq (match_dup 4) (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 6) (neg:DF (match_dup 6)))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128"
+ "
+{
+ const int hi_word = FLOAT_WORDS_BIG_ENDIAN ? 0 : GET_MODE_SIZE (DFmode);
+ const int lo_word = FLOAT_WORDS_BIG_ENDIAN ? GET_MODE_SIZE (DFmode) : 0;
+ operands[3] = gen_reg_rtx (DFmode);
+ operands[4] = gen_reg_rtx (CCFPmode);
+ operands[5] = simplify_gen_subreg (DFmode, operands[0], TFmode, hi_word);
+ operands[6] = simplify_gen_subreg (DFmode, operands[0], TFmode, lo_word);
+}")
+
+;; Vector move instructions.
+
+(define_expand "movv2si"
+ [(set (match_operand:V2SI 0 "nonimmediate_operand" "")
+ (match_operand:V2SI 1 "any_operand" ""))]
+ "TARGET_SPE"
+ "{ rs6000_emit_move (operands[0], operands[1], V2SImode); DONE; }")
+
+(define_insn "*movv2si_internal"
+ [(set (match_operand:V2SI 0 "nonimmediate_operand" "=m,r,r,r")
+ (match_operand:V2SI 1 "input_operand" "r,m,r,W"))]
+ "TARGET_SPE
+ && (gpc_reg_operand (operands[0], V2SImode)
+ || gpc_reg_operand (operands[1], V2SImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0: return \"evstdd%X0 %1,%y0\";
+ case 1: return \"evldd%X1 %0,%y1\";
+ case 2: return \"evor %0,%1,%1\";
+ case 3: return output_vec_const_move (operands);
+ default: gcc_unreachable ();
+ }
+}"
+ [(set_attr "type" "vecload,vecstore,*,*")
+ (set_attr "length" "*,*,*,12")])
+
+(define_split
+ [(set (match_operand:V2SI 0 "register_operand" "")
+ (match_operand:V2SI 1 "zero_constant" ""))]
+ "TARGET_SPE && reload_completed"
+ [(set (match_dup 0)
+ (xor:V2SI (match_dup 0) (match_dup 0)))]
+ "")
+
+(define_expand "movv1di"
+ [(set (match_operand:V1DI 0 "nonimmediate_operand" "")
+ (match_operand:V1DI 1 "any_operand" ""))]
+ "TARGET_SPE"
+ "{ rs6000_emit_move (operands[0], operands[1], V1DImode); DONE; }")
+
+(define_insn "*movv1di_internal"
+ [(set (match_operand:V1DI 0 "nonimmediate_operand" "=m,r,r,r")
+ (match_operand:V1DI 1 "input_operand" "r,m,r,W"))]
+ "TARGET_SPE
+ && (gpc_reg_operand (operands[0], V1DImode)
+ || gpc_reg_operand (operands[1], V1DImode))"
+ "@
+ evstdd%X0 %1,%y0
+ evldd%X1 %0,%y1
+ evor %0,%1,%1
+ evxor %0,%0,%0"
+ [(set_attr "type" "vecload,vecstore,*,*")
+ (set_attr "length" "*,*,*,*")])
+
+(define_expand "movv4hi"
+ [(set (match_operand:V4HI 0 "nonimmediate_operand" "")
+ (match_operand:V4HI 1 "any_operand" ""))]
+ "TARGET_SPE"
+ "{ rs6000_emit_move (operands[0], operands[1], V4HImode); DONE; }")
+
+(define_insn "*movv4hi_internal"
+ [(set (match_operand:V4HI 0 "nonimmediate_operand" "=m,r,r,r")
+ (match_operand:V4HI 1 "input_operand" "r,m,r,W"))]
+ "TARGET_SPE
+ && (gpc_reg_operand (operands[0], V4HImode)
+ || gpc_reg_operand (operands[1], V4HImode))"
+ "@
+ evstdd%X0 %1,%y0
+ evldd%X1 %0,%y1
+ evor %0,%1,%1
+ evxor %0,%0,%0"
+ [(set_attr "type" "vecload")])
+
+(define_expand "movv2sf"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "")
+ (match_operand:V2SF 1 "any_operand" ""))]
+ "TARGET_SPE || TARGET_PAIRED_FLOAT"
+ "{ rs6000_emit_move (operands[0], operands[1], V2SFmode); DONE; }")
+
+(define_insn "*movv2sf_internal"
+ [(set (match_operand:V2SF 0 "nonimmediate_operand" "=m,r,r,r")
+ (match_operand:V2SF 1 "input_operand" "r,m,r,W"))]
+ "TARGET_SPE
+ && (gpc_reg_operand (operands[0], V2SFmode)
+ || gpc_reg_operand (operands[1], V2SFmode))"
+ "@
+ evstdd%X0 %1,%y0
+ evldd%X1 %0,%y1
+ evor %0,%1,%1
+ evxor %0,%0,%0"
+ [(set_attr "type" "vecload,vecstore,*,*")
+ (set_attr "length" "*,*,*,*")])
+
+;; End of vector move instructions.
+
+(define_insn "spe_evmwhssfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 702))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssmaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 703))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssmaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 704))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 705))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhusiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 706))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhusiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 707))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 708))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhssian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 709))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhssian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 710))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 711))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 713))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgssfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 714))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgssfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgsmfaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 715))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgsmfaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgsmiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 716))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgsmiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgumiaa"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 717))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgumiaa %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgssfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 718))
+ (clobber (reg:SI SPEFSCR_REGNO))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgssfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgsmfan"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 719))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgsmfan %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgsmian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 720))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgsmian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_evmwhgumian"
+ [(set (match_operand:V2SI 0 "gpc_reg_operand" "=r")
+ (unspec:V2SI [(match_operand:V2SI 1 "gpc_reg_operand" "r")
+ (match_operand:V2SI 2 "gpc_reg_operand" "r")] 721))
+ (set (reg:V2SI SPE_ACC_REGNO) (unspec:V2SI [(const_int 0)] 0))]
+ "TARGET_SPE"
+ "evmwhgumian %0,%1,%2"
+ [(set_attr "type" "veccomplex")
+ (set_attr "length" "4")])
+
+(define_insn "spe_mtspefscr"
+ [(set (reg:SI SPEFSCR_REGNO)
+ (unspec_volatile:SI [(match_operand:SI 0 "register_operand" "r")]
+ 722))]
+ "TARGET_SPE"
+ "mtspefscr %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "spe_mfspefscr"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec_volatile:SI [(reg:SI SPEFSCR_REGNO)] 723))]
+ "TARGET_SPE"
+ "mfspefscr %0"
+ [(set_attr "type" "vecsimple")])
+
+;; FP comparison stuff.
+
+;; Flip the GT bit.
+(define_insn "e500_flip_gt_bit"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(match_operand:CCFP 1 "cc_reg_operand" "y")] 999))]
+ "!TARGET_FPRS && TARGET_HARD_FLOAT"
+ "*
+{
+ return output_e500_flip_gt_bit (operands[0], operands[1]);
+}"
+ [(set_attr "type" "cr_logical")])
+
+;; MPC8540 single-precision FP instructions on GPRs.
+;; We have 2 variants for each. One for IEEE compliant math and one
+;; for non IEEE compliant math.
+
+(define_insn "cmpsfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1000))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS
+ && !(flag_finite_math_only && !flag_trapping_math)"
+ "efscmpeq %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstsfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1001))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS
+ && flag_finite_math_only && !flag_trapping_math"
+ "efststeq %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpsfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1002))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS
+ && !(flag_finite_math_only && !flag_trapping_math)"
+ "efscmpgt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstsfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1003))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS
+ && flag_finite_math_only && !flag_trapping_math"
+ "efststgt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpsflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1004))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS
+ && !(flag_finite_math_only && !flag_trapping_math)"
+ "efscmplt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstsflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "r")
+ (match_operand:SF 2 "gpc_reg_operand" "r"))]
+ 1005))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS
+ && flag_finite_math_only && !flag_trapping_math"
+ "efststlt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+;; Same thing, but for double-precision.
+
+(define_insn "cmpdfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ CMPDFEQ_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE
+ && !(flag_finite_math_only && !flag_trapping_math)"
+ "efdcmpeq %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstdfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ TSTDFEQ_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE
+ && flag_finite_math_only && !flag_trapping_math"
+ "efdtsteq %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpdfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ CMPDFGT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE
+ && !(flag_finite_math_only && !flag_trapping_math)"
+ "efdcmpgt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstdfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ TSTDFGT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE
+ && flag_finite_math_only && !flag_trapping_math"
+ "efdtstgt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+(define_insn "cmpdflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ CMPDFLT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE
+ && !(flag_finite_math_only && !flag_trapping_math)"
+ "efdcmplt %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "tstdflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "r")
+ (match_operand:DF 2 "gpc_reg_operand" "r"))]
+ TSTDFLT_GPR))]
+ "TARGET_HARD_FLOAT && TARGET_E500_DOUBLE
+ && flag_finite_math_only && !flag_trapping_math"
+ "efdtstlt %0,%1,%2"
+ [(set_attr "type" "veccmpsimple")])
+
+;; Same thing, but for IBM long double.
+
+(define_insn "cmptfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "r")
+ (match_operand:TF 2 "gpc_reg_operand" "r"))]
+ CMPTFEQ_GPR))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128
+ && !(flag_finite_math_only && !flag_trapping_math)"
+ "efdcmpeq %0,%1,%2\;bng %0,$+8\;efdcmpeq %0,%L1,%L2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "12")])
+
+(define_insn "tsttfeq_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "r")
+ (match_operand:TF 2 "gpc_reg_operand" "r"))]
+ TSTTFEQ_GPR))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128
+ && flag_finite_math_only && !flag_trapping_math"
+ "efdtsteq %0,%1,%2\;bng %0,$+8\;efdtsteq %0,%L1,%L2"
+ [(set_attr "type" "veccmpsimple")
+ (set_attr "length" "12")])
+
+(define_insn "cmptfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "r")
+ (match_operand:TF 2 "gpc_reg_operand" "r"))]
+ CMPTFGT_GPR))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128
+ && !(flag_finite_math_only && !flag_trapping_math)"
+ "efdcmpgt %0,%1,%2\;bgt %0,$+16\;efdcmpeq %0,%1,%2\;bng %0,$+8\;efdcmpgt %0,%L1,%L2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "20")])
+
+(define_insn "tsttfgt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "r")
+ (match_operand:TF 2 "gpc_reg_operand" "r"))]
+ TSTTFGT_GPR))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128
+ && flag_finite_math_only && !flag_trapping_math"
+ "efdtstgt %0,%1,%2\;bgt %0,$+16\;efdtsteq %0,%1,%2\;bng %0,$+8\;efdtstgt %0,%L1,%L2"
+ [(set_attr "type" "veccmpsimple")
+ (set_attr "length" "20")])
+
+(define_insn "cmptflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "r")
+ (match_operand:TF 2 "gpc_reg_operand" "r"))]
+ CMPTFLT_GPR))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128
+ && !(flag_finite_math_only && !flag_trapping_math)"
+ "efdcmplt %0,%1,%2\;bgt %0,$+16\;efdcmpeq %0,%1,%2\;bng %0,$+8\;efdcmplt %0,%L1,%L2"
+ [(set_attr "type" "veccmp")
+ (set_attr "length" "20")])
+
+(define_insn "tsttflt_gpr"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP
+ [(compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "r")
+ (match_operand:TF 2 "gpc_reg_operand" "r"))]
+ TSTTFLT_GPR))]
+ "!TARGET_IEEEQUAD
+ && TARGET_HARD_FLOAT && TARGET_E500_DOUBLE && TARGET_LONG_DOUBLE_128
+ && flag_finite_math_only && !flag_trapping_math"
+ "efdtstlt %0,%1,%2\;bgt %0,$+16\;efdtsteq %0,%1,%2\;bng %0,$+8\;efdtstlt %0,%L1,%L2"
+ [(set_attr "type" "veccmpsimple")
+ (set_attr "length" "20")])
+
+;; Like cceq_ior_compare, but compare the GT bits.
+(define_insn "e500_cr_ior_compare"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (unspec:CCFP [(match_operand 1 "cc_reg_operand" "y")
+ (match_operand 2 "cc_reg_operand" "y")]
+ E500_CR_IOR_COMPARE))]
+ "TARGET_HARD_FLOAT && !TARGET_FPRS"
+ "cror 4*%0+gt,4*%1+gt,4*%2+gt"
+ [(set_attr "type" "cr_logical")])
+
+;; Out-of-line prologues and epilogues.
+(define_insn "*save_gpregs_spe"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (reg:P 65))
+ (use (match_operand:P 1 "symbol_ref_operand" "s"))
+ (use (match_operand:P 2 "gpc_reg_operand" "r"))
+ (set (match_operand:V2SI 3 "memory_operand" "=m")
+ (match_operand:V2SI 4 "gpc_reg_operand" "r"))])]
+ "TARGET_SPE_ABI"
+ "bl %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*restore_gpregs_spe"
+ [(match_parallel 0 "any_parallel_operand"
+ [(clobber (reg:P 65))
+ (use (match_operand:P 1 "symbol_ref_operand" "s"))
+ (use (match_operand:P 2 "gpc_reg_operand" "r"))
+ (set (match_operand:V2SI 3 "gpc_reg_operand" "=r")
+ (match_operand:V2SI 4 "memory_operand" "m"))])]
+ "TARGET_SPE_ABI"
+ "bl %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "*return_and_restore_gpregs_spe"
+ [(match_parallel 0 "any_parallel_operand"
+ [(return)
+ (clobber (reg:P 65))
+ (use (match_operand:P 1 "symbol_ref_operand" "s"))
+ (use (match_operand:P 2 "gpc_reg_operand" "r"))
+ (set (match_operand:V2SI 3 "gpc_reg_operand" "=r")
+ (match_operand:V2SI 4 "memory_operand" "m"))])]
+ "TARGET_SPE_ABI"
+ "b %z1"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
diff --git a/gcc-4.4.3/gcc/config/rs6000/spu2vmx.h b/gcc-4.4.3/gcc/config/rs6000/spu2vmx.h
new file mode 100644
index 000000000..1ccb71c70
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/spu2vmx.h
@@ -0,0 +1,2415 @@
+/* Cell SPU 2 VMX intrinsics header
+ Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _SPU2VMX_H_
+#define _SPU2VMX_H_ 1
+
+#ifdef __cplusplus
+
+#ifndef __SPU__
+
+#include <si2vmx.h>
+
+/* spu_absd (absolute difference)
+ * ========
+ */
+static __inline vec_uchar16 spu_absd(vec_uchar16 a, vec_uchar16 b)
+{
+ return ((vec_uchar16)(si_absdb((qword)(a), (qword)(b))));
+
+}
+
+
+/* spu_add
+ * =======
+ */
+static __inline vec_uint4 spu_add(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_a((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_add(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_a((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_add(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_ah((qword)(a), (qword)(b))));
+}
+
+static __inline vec_short8 spu_add(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_ah((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_add(vec_uint4 a, unsigned int b)
+{
+ return ((vec_uint4)(si_ai((qword)(a), (int)(b))));
+}
+
+static __inline vec_int4 spu_add(vec_int4 a, int b)
+{
+ return ((vec_int4)(si_ai((qword)(a), b)));
+}
+
+static __inline vec_ushort8 spu_add(vec_ushort8 a, unsigned short b)
+{
+ return ((vec_ushort8)(si_ahi((qword)(a), (short)(b))));
+}
+
+static __inline vec_short8 spu_add(vec_short8 a, short b)
+{
+ return ((vec_short8)(si_ahi((qword)(a), b)));
+}
+
+static __inline vec_float4 spu_add(vec_float4 a, vec_float4 b)
+{
+ return ((vec_float4)(si_fa((qword)(a), (qword)(b))));
+}
+
+static __inline vec_double2 spu_add(vec_double2 a, vec_double2 b)
+{
+ return ((vec_double2)(si_dfa((qword)(a), (qword)(b))));
+}
+
+
+/* spu_addx
+ * ========
+ */
+static __inline vec_uint4 spu_addx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
+{
+ return ((vec_uint4)(si_addx((qword)(a), (qword)(b), (qword)(c))));
+}
+
+static __inline vec_int4 spu_addx(vec_int4 a, vec_int4 b, vec_int4 c)
+{
+ return ((vec_int4)(si_addx((qword)(a), (qword)(b), (qword)(c))));
+}
+
+
+/* spu_and
+ * =======
+ */
+static __inline vec_uchar16 spu_and(vec_uchar16 a, vec_uchar16 b)
+{
+ return ((vec_uchar16)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_char16 spu_and(vec_char16 a, vec_char16 b)
+{
+ return ((vec_char16)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_and(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_short8 spu_and(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_and(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_and(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_float4 spu_and(vec_float4 a, vec_float4 b)
+{
+ return ((vec_float4)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ullong2 spu_and(vec_ullong2 a, vec_ullong2 b)
+{
+ return ((vec_ullong2)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_llong2 spu_and(vec_llong2 a, vec_llong2 b)
+{
+ return ((vec_llong2)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_double2 spu_and(vec_double2 a, vec_double2 b)
+{
+ return ((vec_double2)(si_and((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uchar16 spu_and(vec_uchar16 a, unsigned char b)
+{
+ return ((vec_uchar16)(si_andbi((qword)(a), (signed char)(b))));
+}
+
+
+static __inline vec_char16 spu_and(vec_char16 a, signed char b)
+{
+ return ((vec_char16)(si_andbi((qword)(a), b)));
+}
+
+static __inline vec_ushort8 spu_and(vec_ushort8 a, unsigned short b)
+{
+ return ((vec_ushort8)(si_andhi((qword)(a), (signed short)(b))));
+}
+
+static __inline vec_short8 spu_and(vec_short8 a, signed short b)
+{
+ return ((vec_short8)(si_andhi((qword)(a), b)));
+}
+
+static __inline vec_uint4 spu_and(vec_uint4 a, unsigned int b)
+{
+ return ((vec_uint4)(si_andi((qword)(a), (signed int)(b))));
+}
+
+static __inline vec_int4 spu_and(vec_int4 a, signed int b)
+{
+ return ((vec_int4)(si_andi((qword)(a), b)));
+}
+
+
+/* spu_andc
+ * ========
+ */
+#define spu_andc(_a, _b) vec_andc(_a, _b)
+
+
+/* spu_avg
+ * =======
+ */
+#define spu_avg(_a, _b) vec_avg(_a, _b)
+
+
+/* spu_bisled
+ * spu_bisled_d
+ * spu_bisled_e
+ * ============
+ */
+#define spu_bisled(_func) /* not mappable */
+#define spu_bisled_d(_func) /* not mappable */
+#define spu_bisled_e(_func) /* not mappable */
+
+/* spu_cmpabseq
+ * ============
+ */
+static __inline vec_uint4 spu_cmpabseq(vec_float4 a, vec_float4 b)
+{
+ return ((vec_uint4)(si_fcmeq((qword)(a), (qword)(b))));
+
+}
+
+static __inline vec_ullong2 spu_cmpabseq(vec_double2 a, vec_double2 b)
+{
+ return ((vec_ullong2)(si_dfcmeq((qword)(a), (qword)(b))));
+}
+
+
+/* spu_cmpabsgt
+ * ============
+ */
+static __inline vec_uint4 spu_cmpabsgt(vec_float4 a, vec_float4 b)
+{
+ return ((vec_uint4)(si_fcmgt((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ullong2 spu_cmpabsgt(vec_double2 a, vec_double2 b)
+{
+ return ((vec_ullong2)(si_dfcmgt((qword)(a), (qword)(b))));
+}
+
+
+/* spu_cmpeq
+ * ========
+ */
+static __inline vec_uchar16 spu_cmpeq(vec_uchar16 a, vec_uchar16 b)
+{
+ return ((vec_uchar16)(si_ceqb((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uchar16 spu_cmpeq(vec_char16 a, vec_char16 b)
+{
+ return ((vec_uchar16)(si_ceqb((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_cmpeq(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_ceqh((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_cmpeq(vec_short8 a, vec_short8 b)
+{
+ return ((vec_ushort8)(si_ceqh((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_cmpeq(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_ceq((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_cmpeq(vec_int4 a, vec_int4 b)
+{
+ return ((vec_uint4)(si_ceq((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_cmpeq(vec_float4 a, vec_float4 b)
+{
+ return ((vec_uint4)(si_fceq((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uchar16 spu_cmpeq(vec_uchar16 a, unsigned char b)
+{
+ return ((vec_uchar16)(si_ceqbi((qword)(a), (signed char)(b))));
+}
+
+static __inline vec_uchar16 spu_cmpeq(vec_char16 a, signed char b)
+{
+ return ((vec_uchar16)(si_ceqbi((qword)(a), b)));
+}
+
+static __inline vec_ushort8 spu_cmpeq(vec_ushort8 a, unsigned short b)
+{
+ return ((vec_ushort8)(si_ceqhi((qword)(a), (signed short)(b))));
+}
+
+static __inline vec_ushort8 spu_cmpeq(vec_short8 a, signed short b)
+{
+ return ((vec_ushort8)(si_ceqhi((qword)(a), b)));
+}
+
+static __inline vec_uint4 spu_cmpeq(vec_uint4 a, unsigned int b)
+{
+ return ((vec_uint4)(si_ceqi((qword)(a), (signed int)(b))));
+}
+
+static __inline vec_uint4 spu_cmpeq(vec_int4 a, signed int b)
+{
+ return ((vec_uint4)(si_ceqi((qword)(a), b)));
+}
+
+static __inline vec_ullong2 spu_cmpeq(vec_double2 a, vec_double2 b)
+{
+ return ((vec_ullong2)(si_dfceq((qword)(a), (qword)(b))));
+}
+
+
+/* spu_cmpgt
+ * ========
+ */
+static __inline vec_uchar16 spu_cmpgt(vec_uchar16 a, vec_uchar16 b)
+{
+ return ((vec_uchar16)(si_clgtb((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uchar16 spu_cmpgt(vec_char16 a, vec_char16 b)
+{
+ return ((vec_uchar16)(si_cgtb((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_cmpgt(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_clgth((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_cmpgt(vec_short8 a, vec_short8 b)
+{
+ return ((vec_ushort8)(si_cgth((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_cmpgt(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_clgt((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_cmpgt(vec_int4 a, vec_int4 b)
+{
+ return ((vec_uint4)(si_cgt((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_cmpgt(vec_float4 a, vec_float4 b)
+{
+ return ((vec_uint4)(si_fcgt((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uchar16 spu_cmpgt(vec_uchar16 a, unsigned char b)
+{
+ return ((vec_uchar16)(si_clgtbi((qword)(a), b)));
+}
+
+static __inline vec_uchar16 spu_cmpgt(vec_char16 a, signed char b)
+{
+ return ((vec_uchar16)(si_cgtbi((qword)(a), b)));
+}
+
+static __inline vec_ushort8 spu_cmpgt(vec_ushort8 a, unsigned short b)
+{
+ return ((vec_ushort8)(si_clgthi((qword)(a), b)));
+}
+
+static __inline vec_ushort8 spu_cmpgt(vec_short8 a, signed short b)
+{
+ return ((vec_ushort8)(si_cgthi((qword)(a), b)));
+}
+
+static __inline vec_uint4 spu_cmpgt(vec_uint4 a, unsigned int b)
+{
+ return ((vec_uint4)(si_clgti((qword)(a), b)));
+}
+
+static __inline vec_uint4 spu_cmpgt(vec_int4 a, signed int b)
+{
+ return ((vec_uint4)(si_cgti((qword)(a), b)));
+}
+
+static __inline vec_ullong2 spu_cmpgt(vec_double2 a, vec_double2 b)
+{
+ return ((vec_ullong2)(si_dfcgt((qword)(a), (qword)(b))));
+}
+
+
+/* spu_cntb
+ * ========
+ */
+static __inline vec_uchar16 spu_cntb(vec_uchar16 a)
+{
+ return ((vec_uchar16)(si_cntb((qword)(a))));
+}
+
+
+static __inline vec_uchar16 spu_cntb(vec_char16 a)
+{
+ return ((vec_uchar16)(si_cntb((qword)(a))));
+}
+
+/* spu_cntlz
+ * =========
+ */
+static __inline vec_uint4 spu_cntlz(vec_uint4 a)
+{
+ return ((vec_uint4)(si_clz((qword)(a))));
+}
+
+static __inline vec_uint4 spu_cntlz(vec_int4 a)
+{
+ return ((vec_uint4)(si_clz((qword)(a))));
+}
+
+static __inline vec_uint4 spu_cntlz(vec_float4 a)
+{
+ return ((vec_uint4)(si_clz((qword)(a))));
+}
+
+/* spu_testsv
+ * ==========
+ */
+static __inline vec_ullong2 spu_testsv(vec_double2 a, char b)
+{
+ return ((vec_ullong2)(si_dftsv((qword)(a), b)));
+}
+
+/* spu_convtf
+ * ==========
+ */
+#define spu_convtf(_a, _b) (vec_ctf(_a, _b))
+
+/* spu_convts
+ * ==========
+ */
+#define spu_convts(_a, _b) (vec_cts(_a, _b))
+
+/* spu_convtu
+ * ==========
+ */
+#define spu_convtu(_a, _b) (vec_ctu(_a, _b))
+
+
+/* spu_dsync
+ * ========
+ */
+#define spu_dsync()
+
+/* spu_eqv
+ * =======
+ */
+static __inline vec_uchar16 spu_eqv(vec_uchar16 a, vec_uchar16 b)
+{
+ return ((vec_uchar16)(si_eqv((qword)(a), (qword)(b))));
+}
+
+static __inline vec_char16 spu_eqv(vec_char16 a, vec_char16 b)
+{
+ return ((vec_char16)(si_eqv((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_eqv(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_eqv((qword)(a), (qword)(b))));
+}
+
+static __inline vec_short8 spu_eqv(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_eqv((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_eqv(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_eqv((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_eqv(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_eqv((qword)(a), (qword)(b))));
+}
+
+static __inline vec_float4 spu_eqv(vec_float4 a, vec_float4 b)
+{
+ return ((vec_float4)(si_eqv((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ullong2 spu_eqv(vec_ullong2 a, vec_ullong2 b)
+{
+ return ((vec_ullong2)(si_eqv((qword)(a), (qword)(b))));
+}
+
+static __inline vec_llong2 spu_eqv(vec_llong2 a, vec_llong2 b)
+{
+ return ((vec_llong2)(si_eqv((qword)(a), (qword)(b))));
+}
+
+static __inline vec_double2 spu_eqv(vec_double2 a, vec_double2 b)
+{
+ return ((vec_double2)(si_eqv((qword)(a), (qword)(b))));
+}
+
+/* spu_extend
+ * ========
+ */
+static __inline vec_short8 spu_extend(vec_char16 a)
+{
+ return ((vec_short8)(si_xsbh((qword)(a))));
+}
+
+
+static __inline vec_int4 spu_extend(vec_short8 a)
+{
+ return ((vec_int4)(si_xshw((qword)(a))));
+}
+
+static __inline vec_llong2 spu_extend(vec_int4 a)
+{
+ return ((vec_llong2)(si_xswd((qword)(a))));
+}
+
+
+static __inline vec_double2 spu_extend(vec_float4 a)
+{
+ return ((vec_double2)(si_fesd((qword)(a))));
+}
+
+
+/* spu_extract
+ * ========
+ */
+static __inline unsigned char spu_extract(vec_uchar16 a, int element)
+{
+ union {
+ vec_uchar16 v;
+ unsigned char c[16];
+ } in;
+
+ in.v = a;
+ return (in.c[element & 15]);
+}
+
+static __inline signed char spu_extract(vec_char16 a, int element)
+{
+ union {
+ vec_char16 v;
+ signed char c[16];
+ } in;
+
+ in.v = a;
+ return (in.c[element & 15]);
+}
+
+static __inline unsigned short spu_extract(vec_ushort8 a, int element)
+{
+ union {
+ vec_ushort8 v;
+ unsigned short s[8];
+ } in;
+
+ in.v = a;
+ return (in.s[element & 7]);
+}
+
+static __inline signed short spu_extract(vec_short8 a, int element)
+{
+ union {
+ vec_short8 v;
+ signed short s[8];
+ } in;
+
+ in.v = a;
+ return (in.s[element & 7]);
+}
+
+static __inline unsigned int spu_extract(vec_uint4 a, int element)
+{
+ union {
+ vec_uint4 v;
+ unsigned int i[4];
+ } in;
+
+ in.v = a;
+ return (in.i[element & 3]);
+}
+
+static __inline signed int spu_extract(vec_int4 a, int element)
+{
+ union {
+ vec_int4 v;
+ signed int i[4];
+ } in;
+
+ in.v = a;
+ return (in.i[element & 3]);
+}
+
+static __inline float spu_extract(vec_float4 a, int element)
+{
+ union {
+ vec_float4 v;
+ float f[4];
+ } in;
+
+ in.v = a;
+ return (in.f[element & 3]);
+}
+
+static __inline unsigned long long spu_extract(vec_ullong2 a, int element)
+{
+ union {
+ vec_ullong2 v;
+ unsigned long long l[2];
+ } in;
+
+ in.v = a;
+ return (in.l[element & 1]);
+}
+
+static __inline signed long long spu_extract(vec_llong2 a, int element)
+{
+ union {
+ vec_llong2 v;
+ signed long long l[2];
+ } in;
+
+ in.v = a;
+ return (in.l[element & 1]);
+}
+
+static __inline double spu_extract(vec_double2 a, int element)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } in;
+
+ in.v = a;
+ return (in.d[element & 1]);
+}
+
+/* spu_gather
+ * ========
+ */
+static __inline vec_uint4 spu_gather(vec_uchar16 a)
+{
+ return ((vec_uint4)(si_gbb((qword)(a))));
+}
+
+
+static __inline vec_uint4 spu_gather(vec_char16 a)
+{
+ return ((vec_uint4)(si_gbb((qword)(a))));
+}
+
+static __inline vec_uint4 spu_gather(vec_ushort8 a)
+{
+ return ((vec_uint4)(si_gbh((qword)(a))));
+}
+
+static __inline vec_uint4 spu_gather(vec_short8 a)
+{
+ return ((vec_uint4)(si_gbh((qword)(a))));
+}
+
+
+static __inline vec_uint4 spu_gather(vec_uint4 a)
+{
+ return ((vec_uint4)(si_gb((qword)(a))));
+}
+
+static __inline vec_uint4 spu_gather(vec_int4 a)
+{
+ return ((vec_uint4)(si_gb((qword)(a))));
+}
+
+static __inline vec_uint4 spu_gather(vec_float4 a)
+{
+ return ((vec_uint4)(si_gb((qword)(a))));
+}
+
+/* spu_genb
+ * ========
+ */
+static __inline vec_uint4 spu_genb(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_bg((qword)(b), (qword)(a))));
+}
+
+static __inline vec_int4 spu_genb(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_bg((qword)(b), (qword)(a))));
+}
+
+/* spu_genbx
+ * =========
+ */
+static __inline vec_uint4 spu_genbx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
+{
+ return ((vec_uint4)(si_bgx((qword)(b), (qword)(a), (qword)(c))));
+}
+
+static __inline vec_int4 spu_genbx(vec_int4 a, vec_int4 b, vec_int4 c)
+{
+ return ((vec_int4)(si_bgx((qword)(b), (qword)(a), (qword)(c))));
+}
+
+
+/* spu_genc
+ * ========
+ */
+static __inline vec_uint4 spu_genc(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_cg((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_genc(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_cg((qword)(a), (qword)(b))));
+}
+
+/* spu_gencx
+ * =========
+ */
+static __inline vec_uint4 spu_gencx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
+{
+ return ((vec_uint4)(si_cgx((qword)(a), (qword)(b), (qword)(c))));
+}
+
+static __inline vec_int4 spu_gencx(vec_int4 a, vec_int4 b, vec_int4 c)
+{
+ return ((vec_int4)(si_cgx((qword)(a), (qword)(b), (qword)(c))));
+}
+
+
+/* spu_hcmpeq
+ * ========
+ */
+#define spu_hcmpeq(_a, _b) if (_a == _b) { SPU_HALT_ACTION; };
+
+
+/* spu_hcmpgt
+ * ========
+ */
+#define spu_hcmpgt(_a, _b) if (_a > _b) { SPU_HALT_ACTION; };
+
+
+/* spu_idisable
+ * ============
+ */
+#define spu_idisable() SPU_UNSUPPORTED_ACTION
+
+
+/* spu_ienable
+ * ===========
+ */
+#define spu_ienable() SPU_UNSUPPORTED_ACTION
+
+
+/* spu_insert
+ * ========
+ */
+static __inline vec_uchar16 spu_insert(unsigned char a, vec_uchar16 b, int element)
+{
+ union {
+ vec_uchar16 v;
+ unsigned char c[16];
+ } in;
+
+ in.v = b;
+ in.c[element & 15] = a;
+ return (in.v);
+}
+
+static __inline vec_char16 spu_insert(signed char a, vec_char16 b, int element)
+{
+ return ((vec_char16)spu_insert((unsigned char)(a), (vec_uchar16)(b), element));
+}
+
+static __inline vec_ushort8 spu_insert(unsigned short a, vec_ushort8 b, int element)
+{
+ union {
+ vec_ushort8 v;
+ unsigned short s[8];
+ } in;
+
+ in.v = b;
+ in.s[element & 7] = a;
+ return (in.v);
+}
+
+static __inline vec_short8 spu_insert(signed short a, vec_short8 b, int element)
+{
+ return ((vec_short8)spu_insert((unsigned short)(a), (vec_ushort8)(b), element));
+}
+
+static __inline vec_uint4 spu_insert(unsigned int a, vec_uint4 b, int element)
+{
+ union {
+ vec_uint4 v;
+ unsigned int i[4];
+ } in;
+
+ in.v = b;
+ in.i[element & 3] = a;
+ return (in.v);
+}
+
+static __inline vec_int4 spu_insert(signed int a, vec_int4 b, int element)
+{
+ return ((vec_int4)spu_insert((unsigned int)(a), (vec_uint4)(b), element));
+}
+
+static __inline vec_float4 spu_insert(float a, vec_float4 b, int element)
+{
+ union {
+ vec_float4 v;
+ float f[4];
+ } in;
+
+ in.v = b;
+ in.f[element & 3] = a;
+ return (in.v);
+}
+
+static __inline vec_ullong2 spu_insert(unsigned long long a, vec_ullong2 b, int element)
+{
+ union {
+ vec_ullong2 v;
+ unsigned long long l[2];
+ } in;
+
+ in.v = b;
+ in.l[element & 1] = a;
+ return (in.v);
+}
+
+static __inline vec_llong2 spu_insert(signed long long a, vec_llong2 b, int element)
+{
+ return ((vec_llong2)spu_insert((unsigned long long)(a), (vec_ullong2)(b), element));
+}
+
+static __inline vec_double2 spu_insert(double a, vec_double2 b, int element)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } in;
+
+ in.v = b;
+ in.d[element & 1] = a;
+ return (in.v);
+}
+
+
+/* spu_madd
+ * ========
+ */
+static __inline vec_int4 spu_madd(vec_short8 a, vec_short8 b, vec_int4 c)
+{
+ return ((vec_int4)(si_mpya((qword)(a), (qword)(b), (qword)(c))));
+}
+
+static __inline vec_float4 spu_madd(vec_float4 a, vec_float4 b, vec_float4 c)
+{
+ return ((vec_float4)(si_fma((qword)(a), (qword)(b), (qword)(c))));
+}
+
+static __inline vec_double2 spu_madd(vec_double2 a, vec_double2 b, vec_double2 c)
+{
+ return ((vec_double2)(si_dfma((qword)(a), (qword)(b), (qword)(c))));
+}
+
+
+/* spu_maskb
+ * ========
+ */
+#define spu_maskb(_a) (vec_uchar16)(si_fsmb(si_from_int((int)(_a))))
+
+/* spu_maskh
+ * ========
+ */
+#define spu_maskh(_a) (vec_ushort8)(si_fsmh(si_from_int((int)(_a))))
+
+
+/* spu_maskw
+ * ========
+ */
+#define spu_maskw(_a) (vec_uint4)(si_fsm(si_from_int((int)(_a))))
+
+
+/* spu_mfcdma32
+ * ========
+ */
+#define spu_mfcdma32(_ls, _ea, _size, _tagid, _cmd)
+
+
+/* spu_mfcdma64
+ * ========
+ */
+#define spu_mfcdma64(_ls, _eahi, _ealow, _size, _tagid, _cmd)
+
+/* spu_mfcstat
+ * ========
+ */
+#define spu_mfcstat(_type) 0xFFFFFFFF
+
+
+
+/* spu_mffpscr
+ * ===========
+ */
+#define spu_mffpscr() (vec_uint4)(si_fscrrd())
+
+
+/* spu_mfspr
+ * ========
+ */
+
+#define spu_mfspr(_reg) si_to_uint(si_mfspr(_reg))
+
+
+
+/* spu_mhhadd
+ * ==========
+ */
+static __inline vec_int4 spu_mhhadd(vec_short8 a, vec_short8 b, vec_int4 c)
+{
+ return ((vec_int4)(si_mpyhha((qword)(a), (qword)(b), (qword)(c))));
+}
+
+
+static __inline vec_uint4 spu_mhhadd(vec_ushort8 a, vec_ushort8 b, vec_uint4 c)
+{
+ return ((vec_uint4)(si_mpyhhau((qword)(a), (qword)(b), (qword)(c))));
+}
+
+
+/* spu_msub
+ * ========
+ */
+static __inline vec_float4 spu_msub(vec_float4 a, vec_float4 b, vec_float4 c)
+{
+ return ((vec_float4)(si_fms((qword)(a), (qword)(b), (qword)(c))));
+}
+
+static __inline vec_double2 spu_msub(vec_double2 a, vec_double2 b, vec_double2 c)
+{
+ return ((vec_double2)(si_dfms((qword)(a), (qword)(b), (qword)(c))));
+}
+
+
+/* spu_mtfpscr
+ * ===========
+ */
+#define spu_mtfpscr(_a)
+
+
+/* spu_mtspr
+ * ========
+ */
+#define spu_mtspr(_reg, _a)
+
+
+/* spu_mul
+ * ========
+ */
+static __inline vec_float4 spu_mul(vec_float4 a, vec_float4 b)
+{
+ return ((vec_float4)(si_fm((qword)(a), (qword)(b))));
+}
+
+static __inline vec_double2 spu_mul(vec_double2 a, vec_double2 b)
+{
+ return ((vec_double2)(si_dfm((qword)(a), (qword)(b))));
+}
+
+
+/* spu_mulh
+ * ========
+ */
+static __inline vec_int4 spu_mulh(vec_short8 a, vec_short8 b)
+{
+ return ((vec_int4)(si_mpyh((qword)(a), (qword)(b))));
+}
+
+/* spu_mule
+ * =========
+ */
+#define spu_mule(_a, _b) vec_mule(_a, _b)
+
+
+
+/* spu_mulo
+ * ========
+ */
+static __inline vec_int4 spu_mulo(vec_short8 a, vec_short8 b)
+{
+ return ((vec_int4)(si_mpy((qword)(a), (qword)(b))));
+}
+
+
+static __inline vec_uint4 spu_mulo(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_uint4)(si_mpyu((qword)(a), (qword)(b))));
+}
+
+
+static __inline vec_int4 spu_mulo(vec_short8 a, short b)
+{
+ return ((vec_int4)(si_mpyi((qword)(a), b)));
+}
+
+static __inline vec_uint4 spu_mulo(vec_ushort8 a, unsigned short b)
+{
+ return ((vec_uint4)(si_mpyui((qword)(a), b)));
+}
+
+
+/* spu_mulsr
+ * =========
+ */
+static __inline vec_int4 spu_mulsr(vec_short8 a, vec_short8 b)
+{
+ return ((vec_int4)(si_mpys((qword)(a), (qword)(b))));
+}
+
+
+/* spu_nand
+ * ========
+ */
+static __inline vec_uchar16 spu_nand(vec_uchar16 a, vec_uchar16 b)
+{
+ return ((vec_uchar16)(si_nand((qword)(a), (qword)(b))));
+}
+
+static __inline vec_char16 spu_nand(vec_char16 a, vec_char16 b)
+{
+ return ((vec_char16)(si_nand((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_nand(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_nand((qword)(a), (qword)(b))));
+}
+
+static __inline vec_short8 spu_nand(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_nand((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_nand(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_nand((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_nand(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_nand((qword)(a), (qword)(b))));
+}
+
+static __inline vec_float4 spu_nand(vec_float4 a, vec_float4 b)
+{
+ return ((vec_float4)(si_nand((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ullong2 spu_nand(vec_ullong2 a, vec_ullong2 b)
+{
+ return ((vec_ullong2)(si_nand((qword)(a), (qword)(b))));
+}
+
+static __inline vec_llong2 spu_nand(vec_llong2 a, vec_llong2 b)
+{
+ return ((vec_llong2)(si_nand((qword)(a), (qword)(b))));
+}
+
+static __inline vec_double2 spu_nand(vec_double2 a, vec_double2 b)
+{
+ return ((vec_double2)(si_nand((qword)(a), (qword)(b))));
+}
+
+
+/* spu_nmadd
+ * =========
+ */
+static __inline vec_double2 spu_nmadd(vec_double2 a, vec_double2 b, vec_double2 c)
+{
+ return ((vec_double2)(si_dfnma((qword)(a), (qword)(b), (qword)(c))));
+}
+
+
+/* spu_nmsub
+ * =========
+ */
+static __inline vec_float4 spu_nmsub(vec_float4 a, vec_float4 b, vec_float4 c)
+{
+ return ((vec_float4)(si_fnms((qword)(a), (qword)(b), (qword)(c))));
+}
+
+static __inline vec_double2 spu_nmsub(vec_double2 a, vec_double2 b, vec_double2 c)
+{
+ return ((vec_double2)(si_dfnms((qword)(a), (qword)(b), (qword)(c))));
+}
+
+
+/* spu_nor
+ * =======
+ */
+#define spu_nor(_a, _b) vec_nor(_a, _b)
+
+
+/* spu_or
+ * ======
+ */
+static __inline vec_uchar16 spu_or(vec_uchar16 a, vec_uchar16 b)
+{
+ return ((vec_uchar16)(si_or((qword)(a), (qword)(b))));
+}
+
+static __inline vec_char16 spu_or(vec_char16 a, vec_char16 b)
+{
+ return ((vec_char16)(si_or((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_or(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_or((qword)(a), (qword)(b))));
+}
+
+static __inline vec_short8 spu_or(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_or((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_or(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_or((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_or(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_or((qword)(a), (qword)(b))));
+}
+
+static __inline vec_float4 spu_or(vec_float4 a, vec_float4 b)
+{
+ return ((vec_float4)(si_or((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ullong2 spu_or(vec_ullong2 a, vec_ullong2 b)
+{
+ return ((vec_ullong2)(si_or((qword)(a), (qword)(b))));
+}
+
+static __inline vec_llong2 spu_or(vec_llong2 a, vec_llong2 b)
+{
+ return ((vec_llong2)(si_or((qword)(a), (qword)(b))));
+}
+
+static __inline vec_double2 spu_or(vec_double2 a, vec_double2 b)
+{
+ return ((vec_double2)(si_or((qword)(a), (qword)(b))));
+}
+
+
+static __inline vec_uchar16 spu_or(vec_uchar16 a, unsigned char b)
+{
+ return ((vec_uchar16)(si_orbi((qword)(a), b)));
+}
+
+static __inline vec_char16 spu_or(vec_char16 a, signed char b)
+{
+ return ((vec_char16)(si_orbi((qword)(a), (unsigned char)(b))));
+}
+
+static __inline vec_ushort8 spu_or(vec_ushort8 a, unsigned short b)
+{
+ return ((vec_ushort8)(si_orhi((qword)(a), b)));
+}
+
+static __inline vec_short8 spu_or(vec_short8 a, signed short b)
+{
+ return ((vec_short8)(si_orhi((qword)(a), (unsigned short)(b))));
+}
+
+static __inline vec_uint4 spu_or(vec_uint4 a, unsigned int b)
+{
+ return ((vec_uint4)(si_ori((qword)(a), b)));
+}
+
+static __inline vec_int4 spu_or(vec_int4 a, signed int b)
+{
+ return ((vec_int4)(si_ori((qword)(a), (unsigned int)(b))));
+}
+
+
+/* spu_orc
+ * =======
+ */
+#define spu_orc(_a, _b) vec_or(_a, vec_nor(_b, _b))
+
+
+/* spu_orx
+ * =======
+ */
+static __inline vec_uint4 spu_orx(vec_uint4 a)
+{
+ return ((vec_uint4)(si_orx((qword)(a))));
+}
+
+static __inline vec_int4 spu_orx(vec_int4 a)
+{
+ return ((vec_int4)(si_orx((qword)(a))));
+}
+
+
+/* spu_promote
+ * ===========
+ */
+static __inline vec_uchar16 spu_promote(unsigned char a, int element)
+{
+ union {
+ vec_uchar16 v;
+ unsigned char c[16];
+ } in;
+
+ in.c[element & 15] = a;
+ return (in.v);
+}
+
+static __inline vec_char16 spu_promote(signed char a, int element)
+{
+ union {
+ vec_char16 v;
+ signed char c[16];
+ } in;
+
+ in.c[element & 15] = a;
+ return (in.v);
+}
+
+static __inline vec_ushort8 spu_promote(unsigned short a, int element)
+{
+ union {
+ vec_ushort8 v;
+ unsigned short s[8];
+ } in;
+
+ in.s[element & 7] = a;
+ return (in.v);
+}
+
+static __inline vec_short8 spu_promote(signed short a, int element)
+{
+ union {
+ vec_short8 v;
+ signed short s[8];
+ } in;
+
+ in.s[element & 7] = a;
+ return (in.v);
+}
+
+static __inline vec_uint4 spu_promote(unsigned int a, int element)
+{
+ union {
+ vec_uint4 v;
+ unsigned int i[4];
+ } in;
+
+ in.i[element & 3] = a;
+ return (in.v);
+}
+
+static __inline vec_int4 spu_promote(signed int a, int element)
+{
+ union {
+ vec_int4 v;
+ signed int i[4];
+ } in;
+
+ in.i[element & 3] = a;
+ return (in.v);
+}
+
+static __inline vec_float4 spu_promote(float a, int element)
+{
+ union {
+ vec_float4 v;
+ float f[4];
+ } in;
+
+ in.f[element & 3] = a;
+ return (in.v);
+}
+
+static __inline vec_ullong2 spu_promote(unsigned long long a, int element)
+{
+ union {
+ vec_ullong2 v;
+ unsigned long long l[2];
+ } in;
+
+ in.l[element & 1] = a;
+ return (in.v);
+}
+
+static __inline vec_llong2 spu_promote(signed long long a, int element)
+{
+ union {
+ vec_llong2 v;
+ signed long long l[2];
+ } in;
+
+ in.l[element & 1] = a;
+ return (in.v);
+}
+
+static __inline vec_double2 spu_promote(double a, int element)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } in;
+
+ in.d[element & 1] = a;
+ return (in.v);
+}
+
+/* spu_re
+ * ======
+ */
+#define spu_re(_a) vec_re(_a)
+
+
+/* spu_readch
+ * ==========
+ */
+#define spu_readch(_channel) 0 /* not mappable */
+
+
+/* spu_readchcnt
+ * =============
+ */
+#define spu_readchcnt(_channel) 0 /* not mappable */
+
+
+/* spu_readchqw
+ * ============
+ */
+#define spu_readchqw(_channel) __extension__ ({ vec_uint4 result = { 0, 0, 0, 0 }; result; })
+
+/* spu_rl
+ * ======
+ */
+static __inline vec_ushort8 spu_rl(vec_ushort8 a, vec_short8 b)
+{
+ return ((vec_ushort8)(si_roth((qword)(a), (qword)(b))));
+}
+
+static __inline vec_short8 spu_rl(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_roth((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_rl(vec_uint4 a, vec_int4 b)
+{
+ return ((vec_uint4)(si_rot((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_rl(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_rot((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_rl(vec_ushort8 a, int b)
+{
+ return ((vec_ushort8)(si_rothi((qword)(a), b)));
+}
+
+static __inline vec_short8 spu_rl(vec_short8 a, int b)
+{
+ return ((vec_short8)(si_rothi((qword)(a), b)));
+}
+
+static __inline vec_uint4 spu_rl(vec_uint4 a, int b)
+{
+ return ((vec_uint4)(si_roti((qword)(a), b)));
+}
+
+static __inline vec_int4 spu_rl(vec_int4 a, int b)
+{
+ return ((vec_int4)(si_roti((qword)(a), b)));
+}
+
+
+/* spu_rlmask
+ * ==========
+ */
+static __inline vec_ushort8 spu_rlmask(vec_ushort8 a, vec_short8 b)
+{
+ return ((vec_ushort8)(si_rothm((qword)(a), (qword)(b))));
+}
+
+static __inline vec_short8 spu_rlmask(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_rothm((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_rlmask(vec_uint4 a, vec_int4 b)
+{
+ return ((vec_uint4)(si_rotm((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_rlmask(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_rotm((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_rlmask(vec_ushort8 a, int b)
+{
+ return ((vec_ushort8)(si_rothmi((qword)(a), b)));
+}
+
+static __inline vec_short8 spu_rlmask(vec_short8 a, int b)
+{
+ return ((vec_short8)(si_rothmi((qword)(a), b)));
+}
+
+
+static __inline vec_uint4 spu_rlmask(vec_uint4 a, int b)
+{
+ return ((vec_uint4)(si_rotmi((qword)(a), b)));
+}
+
+static __inline vec_int4 spu_rlmask(vec_int4 a, int b)
+{
+ return ((vec_int4)(si_rotmi((qword)(a), b)));
+}
+
+/* spu_rlmaska
+ * ===========
+ */
+static __inline vec_short8 spu_rlmaska(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_rotmah((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_rlmaska(vec_ushort8 a, vec_short8 b)
+{
+ return ((vec_ushort8)(si_rotmah((qword)(a), (qword)(b))));
+}
+
+
+static __inline vec_int4 spu_rlmaska(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_rotma((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_rlmaska(vec_uint4 a, vec_int4 b)
+{
+ return ((vec_uint4)(si_rotma((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_rlmaska(vec_ushort8 a, int b)
+{
+ return ((vec_ushort8)(si_rotmahi((qword)(a), b)));
+}
+
+static __inline vec_short8 spu_rlmaska(vec_short8 a, int b)
+{
+ return ((vec_short8)(si_rotmahi((qword)(a), b)));
+}
+
+static __inline vec_uint4 spu_rlmaska(vec_uint4 a, int b)
+{
+ return ((vec_uint4)(si_rotmai((qword)(a), b)));
+}
+
+static __inline vec_int4 spu_rlmaska(vec_int4 a, int b)
+{
+ return ((vec_int4)(si_rotmai((qword)(a), b)));
+}
+
+
+/* spu_rlmaskqw
+ * ============
+ */
+static __inline vec_uchar16 spu_rlmaskqw(vec_uchar16 a, int count)
+{
+ return ((vec_uchar16)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_char16 spu_rlmaskqw(vec_char16 a, int count)
+{
+ return ((vec_char16)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ushort8 spu_rlmaskqw(vec_ushort8 a, int count)
+{
+ return ((vec_ushort8)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_short8 spu_rlmaskqw(vec_short8 a, int count)
+{
+ return ((vec_short8)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_uint4 spu_rlmaskqw(vec_uint4 a, int count)
+{
+ return ((vec_uint4)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_int4 spu_rlmaskqw(vec_int4 a, int count)
+{
+ return ((vec_int4)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_float4 spu_rlmaskqw(vec_float4 a, int count)
+{
+ return ((vec_float4)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ullong2 spu_rlmaskqw(vec_ullong2 a, int count)
+{
+ return ((vec_ullong2)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_llong2 spu_rlmaskqw(vec_llong2 a, int count)
+{
+ return ((vec_llong2)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_double2 spu_rlmaskqw(vec_double2 a, int count)
+{
+ return ((vec_double2)(si_rotqmbi((qword)(a), si_from_int(count))));
+}
+
+/* spu_rlmaskqwbyte
+ * ================
+ */
+static __inline vec_uchar16 spu_rlmaskqwbyte(vec_uchar16 a, int count)
+{
+ return ((vec_uchar16)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_char16 spu_rlmaskqwbyte(vec_char16 a, int count)
+{
+ return ((vec_char16)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ushort8 spu_rlmaskqwbyte(vec_ushort8 a, int count)
+{
+ return ((vec_ushort8)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_short8 spu_rlmaskqwbyte(vec_short8 a, int count)
+{
+ return ((vec_short8)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_uint4 spu_rlmaskqwbyte(vec_uint4 a, int count)
+{
+ return ((vec_uint4)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_int4 spu_rlmaskqwbyte(vec_int4 a, int count)
+{
+ return ((vec_int4)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_float4 spu_rlmaskqwbyte(vec_float4 a, int count)
+{
+ return ((vec_float4)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ullong2 spu_rlmaskqwbyte(vec_ullong2 a, int count)
+{
+ return ((vec_ullong2)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_llong2 spu_rlmaskqwbyte(vec_llong2 a, int count)
+{
+ return ((vec_llong2)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_double2 spu_rlmaskqwbyte(vec_double2 a, int count)
+{
+ return ((vec_double2)(si_rotqmby((qword)(a), si_from_int(count))));
+}
+
+/* spu_rlmaskqwbytebc
+ * ==================
+ */
+static __inline vec_uchar16 spu_rlmaskqwbytebc(vec_uchar16 a, int count)
+{
+ return ((vec_uchar16)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_char16 spu_rlmaskqwbytebc(vec_char16 a, int count)
+{
+ return ((vec_char16)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ushort8 spu_rlmaskqwbytebc(vec_ushort8 a, int count)
+{
+ return ((vec_ushort8)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_short8 spu_rlmaskqwbytebc(vec_short8 a, int count)
+{
+ return ((vec_short8)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_uint4 spu_rlmaskqwbytebc(vec_uint4 a, int count)
+{
+ return ((vec_uint4)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_int4 spu_rlmaskqwbytebc(vec_int4 a, int count)
+{
+ return ((vec_int4)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_float4 spu_rlmaskqwbytebc(vec_float4 a, int count)
+{
+ return ((vec_float4)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ullong2 spu_rlmaskqwbytebc(vec_ullong2 a, int count)
+{
+ return ((vec_ullong2)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_llong2 spu_rlmaskqwbytebc(vec_llong2 a, int count)
+{
+ return ((vec_llong2)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_double2 spu_rlmaskqwbytebc(vec_double2 a, int count)
+{
+ return ((vec_double2)(si_rotqmbybi((qword)(a), si_from_int(count))));
+}
+
+
+/* spu_rlqwbyte
+ * ============
+ */
+static __inline vec_uchar16 spu_rlqwbyte(vec_uchar16 a, int count)
+{
+ return ((vec_uchar16)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_char16 spu_rlqwbyte(vec_char16 a, int count)
+{
+ return ((vec_char16)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ushort8 spu_rlqwbyte(vec_ushort8 a, int count)
+{
+ return ((vec_ushort8)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_short8 spu_rlqwbyte(vec_short8 a, int count)
+{
+ return ((vec_short8)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_uint4 spu_rlqwbyte(vec_uint4 a, int count)
+{
+ return ((vec_uint4)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_int4 spu_rlqwbyte(vec_int4 a, int count)
+{
+ return ((vec_int4)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_float4 spu_rlqwbyte(vec_float4 a, int count)
+{
+ return ((vec_float4)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ullong2 spu_rlqwbyte(vec_ullong2 a, int count)
+{
+ return ((vec_ullong2)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_llong2 spu_rlqwbyte(vec_llong2 a, int count)
+{
+ return ((vec_llong2)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_double2 spu_rlqwbyte(vec_double2 a, int count)
+{
+ return ((vec_double2)(si_rotqby((qword)(a), si_from_int(count))));
+}
+
+
+/* spu_rlqwbytebc
+ * ==============
+ */
+static __inline vec_uchar16 spu_rlqwbytebc(vec_uchar16 a, int count)
+{
+ return ((vec_uchar16)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_char16 spu_rlqwbytebc(vec_char16 a, int count)
+{
+ return ((vec_char16)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ushort8 spu_rlqwbytebc(vec_ushort8 a, int count)
+{
+ return ((vec_ushort8)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_short8 spu_rlqwbytebc(vec_short8 a, int count)
+{
+ return ((vec_short8)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_uint4 spu_rlqwbytebc(vec_uint4 a, int count)
+{
+ return ((vec_uint4)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_int4 spu_rlqwbytebc(vec_int4 a, int count)
+{
+ return ((vec_int4)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_float4 spu_rlqwbytebc(vec_float4 a, int count)
+{
+ return ((vec_float4)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ullong2 spu_rlqwbytebc(vec_ullong2 a, int count)
+{
+ return ((vec_ullong2)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_llong2 spu_rlqwbytebc(vec_llong2 a, int count)
+{
+ return ((vec_llong2)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_double2 spu_rlqwbytebc(vec_double2 a, int count)
+{
+ return ((vec_double2)(si_rotqbybi((qword)(a), si_from_int(count))));
+}
+
+/* spu_rlqw
+ * ========
+ */
+static __inline vec_uchar16 spu_rlqw(vec_uchar16 a, int count)
+{
+ return ((vec_uchar16)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_char16 spu_rlqw(vec_char16 a, int count)
+{
+ return ((vec_char16)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ushort8 spu_rlqw(vec_ushort8 a, int count)
+{
+ return ((vec_ushort8)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_short8 spu_rlqw(vec_short8 a, int count)
+{
+ return ((vec_short8)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_uint4 spu_rlqw(vec_uint4 a, int count)
+{
+ return ((vec_uint4)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_int4 spu_rlqw(vec_int4 a, int count)
+{
+ return ((vec_int4)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_float4 spu_rlqw(vec_float4 a, int count)
+{
+ return ((vec_float4)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_ullong2 spu_rlqw(vec_ullong2 a, int count)
+{
+ return ((vec_ullong2)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_llong2 spu_rlqw(vec_llong2 a, int count)
+{
+ return ((vec_llong2)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+static __inline vec_double2 spu_rlqw(vec_double2 a, int count)
+{
+ return ((vec_double2)(si_rotqbi((qword)(a), si_from_int(count))));
+}
+
+/* spu_roundtf
+ * ===========
+ */
+static __inline vec_float4 spu_roundtf(vec_double2 a)
+{
+ return ((vec_float4)(si_frds((qword)(a))));
+}
+
+
+/* spu_rsqrte
+ * ==========
+ */
+#define spu_rsqrte(_a) vec_rsqrte(_a)
+
+
+/* spu_sel
+ * =======
+ */
+static __inline vec_uchar16 spu_sel(vec_uchar16 a, vec_uchar16 b, vec_uchar16 pattern)
+{
+ return ((vec_uchar16)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_char16 spu_sel(vec_char16 a, vec_char16 b, vec_uchar16 pattern)
+{
+ return ((vec_char16)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_ushort8 spu_sel(vec_ushort8 a, vec_ushort8 b, vec_ushort8 pattern)
+{
+ return ((vec_ushort8)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_short8 spu_sel(vec_short8 a, vec_short8 b, vec_ushort8 pattern)
+{
+ return ((vec_short8)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_uint4 spu_sel(vec_uint4 a, vec_uint4 b, vec_uint4 pattern)
+{
+ return ((vec_uint4)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_int4 spu_sel(vec_int4 a, vec_int4 b, vec_uint4 pattern)
+{
+ return ((vec_int4)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_float4 spu_sel(vec_float4 a, vec_float4 b, vec_uint4 pattern)
+{
+ return ((vec_float4)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_ullong2 spu_sel(vec_ullong2 a, vec_ullong2 b, vec_ullong2 pattern)
+{
+ return ((vec_ullong2)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_llong2 spu_sel(vec_llong2 a, vec_llong2 b, vec_ullong2 pattern)
+{
+ return ((vec_llong2)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_double2 spu_sel(vec_double2 a, vec_double2 b, vec_ullong2 pattern)
+{
+ return ((vec_double2)(si_selb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+
+
+/* spu_shuffle
+ * ===========
+ */
+static __inline vec_uchar16 spu_shuffle(vec_uchar16 a, vec_uchar16 b, vec_uchar16 pattern)
+{
+ return ((vec_uchar16)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_char16 spu_shuffle(vec_char16 a, vec_char16 b, vec_uchar16 pattern)
+{
+ return ((vec_char16)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_ushort8 spu_shuffle(vec_ushort8 a, vec_ushort8 b, vec_uchar16 pattern)
+{
+ return ((vec_ushort8)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_short8 spu_shuffle(vec_short8 a, vec_short8 b, vec_uchar16 pattern)
+{
+ return ((vec_short8)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_uint4 spu_shuffle(vec_uint4 a, vec_uint4 b, vec_uchar16 pattern)
+{
+ return ((vec_uint4)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_int4 spu_shuffle(vec_int4 a, vec_int4 b, vec_uchar16 pattern)
+{
+ return ((vec_int4)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_float4 spu_shuffle(vec_float4 a, vec_float4 b, vec_uchar16 pattern)
+{
+ return ((vec_float4)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_ullong2 spu_shuffle(vec_ullong2 a, vec_ullong2 b, vec_uchar16 pattern)
+{
+ return ((vec_ullong2)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_llong2 spu_shuffle(vec_llong2 a, vec_llong2 b, vec_uchar16 pattern)
+{
+ return ((vec_llong2)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+static __inline vec_double2 spu_shuffle(vec_double2 a, vec_double2 b, vec_uchar16 pattern)
+{
+ return ((vec_double2)(si_shufb((qword)(a), (qword)(b), (qword)(pattern))));
+}
+
+
+/* spu_sl
+ * ======
+ */
+static __inline vec_ushort8 spu_sl(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_shlh((qword)(a), (qword)(b))));
+}
+
+static __inline vec_short8 spu_sl(vec_short8 a, vec_ushort8 b)
+{
+ return ((vec_short8)(si_shlh((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_sl(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_shl((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_sl(vec_int4 a, vec_uint4 b)
+{
+ return ((vec_int4)(si_shl((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_sl(vec_ushort8 a, unsigned int b)
+{
+ return ((vec_ushort8)(si_shlhi((qword)(a), b)));
+}
+
+static __inline vec_short8 spu_sl(vec_short8 a, unsigned int b)
+{
+ return ((vec_short8)(si_shlhi((qword)(a), b)));
+}
+
+static __inline vec_uint4 spu_sl(vec_uint4 a, unsigned int b)
+{
+ return ((vec_uint4)(si_shli((qword)(a), b)));
+}
+
+static __inline vec_int4 spu_sl(vec_int4 a, unsigned int b)
+{
+ return ((vec_int4)(si_shli((qword)(a), b)));
+}
+
+
+/* spu_slqw
+ * ========
+ */
+static __inline vec_uchar16 spu_slqw(vec_uchar16 a, unsigned int count)
+{
+ return ((vec_uchar16)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_char16 spu_slqw(vec_char16 a, unsigned int count)
+{
+ return ((vec_char16)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_ushort8 spu_slqw(vec_ushort8 a, unsigned int count)
+{
+ return ((vec_ushort8)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_short8 spu_slqw(vec_short8 a, unsigned int count)
+{
+ return ((vec_short8)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_uint4 spu_slqw(vec_uint4 a, unsigned int count)
+{
+ return ((vec_uint4)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_int4 spu_slqw(vec_int4 a, unsigned int count)
+{
+ return ((vec_int4)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_float4 spu_slqw(vec_float4 a, unsigned int count)
+{
+ return ((vec_float4)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_ullong2 spu_slqw(vec_ullong2 a, unsigned int count)
+{
+ return ((vec_ullong2)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_llong2 spu_slqw(vec_llong2 a, unsigned int count)
+{
+ return ((vec_llong2)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_double2 spu_slqw(vec_double2 a, unsigned int count)
+{
+ return ((vec_double2)(si_shlqbi((qword)(a), si_from_uint(count))));
+}
+
+/* spu_slqwbyte
+ * ============
+ */
+static __inline vec_uchar16 spu_slqwbyte(vec_uchar16 a, unsigned int count)
+{
+ return ((vec_uchar16)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_char16 spu_slqwbyte(vec_char16 a, unsigned int count)
+{
+ return ((vec_char16)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_ushort8 spu_slqwbyte(vec_ushort8 a, unsigned int count)
+{
+ return ((vec_ushort8)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_short8 spu_slqwbyte(vec_short8 a, unsigned int count)
+{
+ return ((vec_short8)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_uint4 spu_slqwbyte(vec_uint4 a, unsigned int count)
+{
+ return ((vec_uint4)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_int4 spu_slqwbyte(vec_int4 a, unsigned int count)
+{
+ return ((vec_int4)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_float4 spu_slqwbyte(vec_float4 a, unsigned int count)
+{
+ return ((vec_float4)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_ullong2 spu_slqwbyte(vec_ullong2 a, unsigned int count)
+{
+ return ((vec_ullong2)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_llong2 spu_slqwbyte(vec_llong2 a, unsigned int count)
+{
+ return ((vec_llong2)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_double2 spu_slqwbyte(vec_double2 a, unsigned int count)
+{
+ return ((vec_double2)(si_shlqby((qword)(a), si_from_uint(count))));
+}
+
+/* spu_slqwbytebc
+ * ==============
+ */
+static __inline vec_uchar16 spu_slqwbytebc(vec_uchar16 a, unsigned int count)
+{
+ return ((vec_uchar16)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_char16 spu_slqwbytebc(vec_char16 a, unsigned int count)
+{
+ return ((vec_char16)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_ushort8 spu_slqwbytebc(vec_ushort8 a, unsigned int count)
+{
+ return ((vec_ushort8)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_short8 spu_slqwbytebc(vec_short8 a, unsigned int count)
+{
+ return ((vec_short8)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_uint4 spu_slqwbytebc(vec_uint4 a, unsigned int count)
+{
+ return ((vec_uint4)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_int4 spu_slqwbytebc(vec_int4 a, unsigned int count)
+{
+ return ((vec_int4)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_float4 spu_slqwbytebc(vec_float4 a, unsigned int count)
+{
+ return ((vec_float4)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_ullong2 spu_slqwbytebc(vec_ullong2 a, unsigned int count)
+{
+ return ((vec_ullong2)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_llong2 spu_slqwbytebc(vec_llong2 a, unsigned int count)
+{
+ return ((vec_llong2)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+static __inline vec_double2 spu_slqwbytebc(vec_double2 a, unsigned int count)
+{
+ return ((vec_double2)(si_shlqbybi((qword)(a), si_from_uint(count))));
+}
+
+/* spu_splats
+ * ==========
+ */
+static __inline vec_uchar16 spu_splats(unsigned char a)
+{
+ union {
+ vec_uchar16 v;
+ unsigned char c[16];
+ } in;
+
+ in.c[0] = a;
+ return (vec_splat(in.v, 0));
+}
+
+static __inline vec_char16 spu_splats(signed char a)
+{
+ return ((vec_char16)spu_splats((unsigned char)(a)));
+}
+
+static __inline vec_ushort8 spu_splats(unsigned short a)
+{
+ union {
+ vec_ushort8 v;
+ unsigned short s[8];
+ } in;
+
+ in.s[0] = a;
+ return (vec_splat(in.v, 0));
+}
+
+static __inline vec_short8 spu_splats(signed short a)
+{
+ return ((vec_short8)spu_splats((unsigned short)(a)));
+}
+
+static __inline vec_uint4 spu_splats(unsigned int a)
+{
+ union {
+ vec_uint4 v;
+ unsigned int i[4];
+ } in;
+
+ in.i[0] = a;
+ return (vec_splat(in.v, 0));
+}
+
+static __inline vec_int4 spu_splats(signed int a)
+{
+ return ((vec_int4)spu_splats((unsigned int)(a)));
+}
+
+static __inline vec_float4 spu_splats(float a)
+{
+ union {
+ vec_float4 v;
+ float f[4];
+ } in;
+
+ in.f[0] = a;
+ return (vec_splat(in.v, 0));
+}
+
+static __inline vec_ullong2 spu_splats(unsigned long long a)
+{
+ union {
+ vec_ullong2 v;
+ unsigned long long l[2];
+ } in;
+
+ in.l[0] = a;
+ in.l[1] = a;
+ return (in.v);
+}
+
+static __inline vec_llong2 spu_splats(signed long long a)
+{
+ return ((vec_llong2)spu_splats((unsigned long long)(a)));
+}
+
+static __inline vec_double2 spu_splats(double a)
+{
+ union {
+ vec_double2 v;
+ double d[2];
+ } in;
+
+ in.d[0] = a;
+ in.d[1] = a;
+ return (in.v);
+}
+
+
+/* spu_stop
+ * ========
+ */
+#define spu_stop(_type) si_stop(_type)
+
+
+/* spu_sub
+ * =======
+ */
+static __inline vec_ushort8 spu_sub(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_sfh((qword)(b), (qword)(a))));
+}
+
+static __inline vec_short8 spu_sub(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_sfh((qword)(b), (qword)(a))));
+}
+
+static __inline vec_uint4 spu_sub(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_sf((qword)(b), (qword)(a))));
+}
+
+static __inline vec_int4 spu_sub(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_sf((qword)(b), (qword)(a))));
+}
+
+static __inline vec_float4 spu_sub(vec_float4 a, vec_float4 b)
+{
+ return ((vec_float4)(si_fs((qword)(a), (qword)(b))));
+}
+
+static __inline vec_double2 spu_sub(vec_double2 a, vec_double2 b)
+{
+ return ((vec_double2)(si_dfs((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_sub(unsigned int a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_sfi((qword)b, (int)a)));
+}
+
+static __inline vec_int4 spu_sub(signed int a, vec_int4 b)
+{
+ return ((vec_int4)(si_sfi((qword)b, (int)a)));
+}
+
+static __inline vec_ushort8 spu_sub(unsigned short a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_sfhi((qword)b, (short)a)));
+}
+
+static __inline vec_short8 spu_sub(signed short a, vec_short8 b)
+{
+ return ((vec_short8)(si_sfhi((qword)b, (short)a)));
+}
+
+/* spu_subx
+ * ========
+ */
+static __inline vec_uint4 spu_subx(vec_uint4 a, vec_uint4 b, vec_uint4 c)
+{
+ return ((vec_uint4)(si_sfx((qword)(b), (qword)(a), (qword)(c))));
+}
+
+static __inline vec_int4 spu_subx(vec_int4 a, vec_int4 b, vec_int4 c)
+{
+ return ((vec_int4)(si_sfx((qword)(b), (qword)(a), (qword)(c))));
+}
+
+/* spu_sumb
+ * ========
+ */
+static __inline vec_ushort8 spu_sumb(vec_uchar16 a, vec_uchar16 b)
+{
+ return ((vec_ushort8)(si_sumb((qword)(a), (qword)(b))));
+}
+
+
+/* spu_sync
+ * spu_sync_c
+ * ========
+ */
+#define spu_sync() /* do nothing */
+
+#define spu_sync_c() /* do nothing */
+
+
+/* spu_writech
+ * ===========
+ */
+#define spu_writech(_channel, _a) /* not mappable */
+
+/* spu_writechqw
+ * =============
+ */
+#define spu_writechqw(_channel, _a) /* not mappable */
+
+
+/* spu_xor
+ * =======
+ */
+static __inline vec_uchar16 spu_xor(vec_uchar16 a, vec_uchar16 b)
+{
+ return ((vec_uchar16)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_char16 spu_xor(vec_char16 a, vec_char16 b)
+{
+ return ((vec_char16)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ushort8 spu_xor(vec_ushort8 a, vec_ushort8 b)
+{
+ return ((vec_ushort8)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_short8 spu_xor(vec_short8 a, vec_short8 b)
+{
+ return ((vec_short8)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uint4 spu_xor(vec_uint4 a, vec_uint4 b)
+{
+ return ((vec_uint4)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_int4 spu_xor(vec_int4 a, vec_int4 b)
+{
+ return ((vec_int4)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_float4 spu_xor(vec_float4 a, vec_float4 b)
+{
+ return ((vec_float4)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_ullong2 spu_xor(vec_ullong2 a, vec_ullong2 b)
+{
+ return ((vec_ullong2)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_llong2 spu_xor(vec_llong2 a, vec_llong2 b)
+{
+ return ((vec_llong2)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_double2 spu_xor(vec_double2 a, vec_double2 b)
+{
+ return ((vec_double2)(si_xor((qword)(a), (qword)(b))));
+}
+
+static __inline vec_uchar16 spu_xor(vec_uchar16 a, unsigned char b)
+{
+ return ((vec_uchar16)(si_xorbi((qword)(a), b)));
+}
+
+static __inline vec_char16 spu_xor(vec_char16 a, signed char b)
+{
+ return ((vec_char16)(si_xorbi((qword)(a), (unsigned char)(b))));
+}
+
+static __inline vec_ushort8 spu_xor(vec_ushort8 a, unsigned short b)
+{
+ return ((vec_ushort8)(si_xorhi((qword)(a), b)));
+}
+
+static __inline vec_short8 spu_xor(vec_short8 a, signed short b)
+{
+ return ((vec_short8)(si_xorhi((qword)(a), (unsigned short)(b))));
+}
+
+static __inline vec_uint4 spu_xor(vec_uint4 a, unsigned int b)
+{
+ return ((vec_uint4)(si_xori((qword)(a), b)));
+}
+
+static __inline vec_int4 spu_xor(vec_int4 a, signed int b)
+{
+ return ((vec_int4)(si_xori((qword)(a), (unsigned int)(b))));
+}
+
+#endif /* !__SPU__ */
+#endif /* __cplusplus */
+#endif /* !_SPU2VMX_H_ */
diff --git a/gcc-4.4.3/gcc/config/rs6000/sync.md b/gcc-4.4.3/gcc/config/rs6000/sync.md
new file mode 100644
index 000000000..9d1ef9b58
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/sync.md
@@ -0,0 +1,622 @@
+;; Machine description for PowerPC synchronization instructions.
+;; Copyright (C) 2005, 2007, 2008, 2009 Free Software Foundation, Inc.
+;; Contributed by Geoffrey Keating.
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_mode_attr larx [(SI "lwarx") (DI "ldarx")])
+(define_mode_attr stcx [(SI "stwcx.") (DI "stdcx.")])
+
+(define_code_iterator FETCHOP [plus minus ior xor and])
+(define_code_attr fetchop_name
+ [(plus "add") (minus "sub") (ior "ior") (xor "xor") (and "and")])
+(define_code_attr fetchop_pred
+ [(plus "add_operand") (minus "gpc_reg_operand")
+ (ior "logical_operand") (xor "logical_operand") (and "and_operand")])
+(define_code_attr fetchopsi_constr
+ [(plus "rIL") (minus "r") (ior "rKL") (xor "rKL") (and "rTKL")])
+(define_code_attr fetchopdi_constr
+ [(plus "rIL") (minus "r") (ior "rKJF") (xor "rKJF") (and "rSTKJ")])
+
+(define_expand "memory_barrier"
+ [(set (match_dup 0)
+ (unspec:BLK [(match_dup 0)] UNSPEC_SYNC))]
+ ""
+{
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+})
+
+(define_insn "*sync_internal"
+ [(set (match_operand:BLK 0 "" "")
+ (unspec:BLK [(match_dup 0)] UNSPEC_SYNC))]
+ ""
+ "{dcs|sync}"
+ [(set_attr "type" "sync")])
+
+(define_insn "load_locked_<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=r")
+ (unspec_volatile:GPR
+ [(match_operand:GPR 1 "memory_operand" "Z")] UNSPECV_LL))]
+ "TARGET_POWERPC"
+ "<larx> %0,%y1"
+ [(set_attr "type" "load_l")])
+
+(define_insn "store_conditional_<mode>"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x")
+ (unspec_volatile:CC [(const_int 0)] UNSPECV_SC))
+ (set (match_operand:GPR 1 "memory_operand" "=Z")
+ (match_operand:GPR 2 "gpc_reg_operand" "r"))]
+ "TARGET_POWERPC"
+ "<stcx> %2,%y1"
+ [(set_attr "type" "store_c")])
+
+(define_insn_and_split "sync_compare_and_swap<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (match_operand:GPR 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(match_operand:GPR 2 "reg_or_short_operand" "rI")
+ (match_operand:GPR 3 "gpc_reg_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:GPR 4 "=&r"))
+ (clobber (match_scratch:CC 5 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_compare_and_swap (operands[0], operands[1], operands[2],
+ operands[3], operands[4]);
+ DONE;
+})
+
+(define_expand "sync_compare_and_swaphi"
+ [(match_operand:HI 0 "gpc_reg_operand" "")
+ (match_operand:HI 1 "memory_operand" "")
+ (match_operand:HI 2 "gpc_reg_operand" "")
+ (match_operand:HI 3 "gpc_reg_operand" "")]
+ "TARGET_POWERPC"
+{
+ rs6000_expand_compare_and_swapqhi (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+})
+
+(define_expand "sync_compare_and_swapqi"
+ [(match_operand:QI 0 "gpc_reg_operand" "")
+ (match_operand:QI 1 "memory_operand" "")
+ (match_operand:QI 2 "gpc_reg_operand" "")
+ (match_operand:QI 3 "gpc_reg_operand" "")]
+ "TARGET_POWERPC"
+{
+ rs6000_expand_compare_and_swapqhi (operands[0], operands[1],
+ operands[2], operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "sync_compare_and_swapqhi_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (match_operand:SI 4 "memory_operand" "+Z"))
+ (set (match_dup 4)
+ (unspec:SI
+ [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")
+ (match_operand:SI 3 "gpc_reg_operand" "r")]
+ UNSPEC_CMPXCHG))
+ (clobber (match_scratch:SI 5 "=&r"))
+ (clobber (match_scratch:CC 6 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_compare_and_swapqhi (operands[0], operands[1],
+ operands[2], operands[3], operands[4],
+ operands[5]);
+ DONE;
+})
+
+(define_insn_and_split "sync_lock_test_and_set<mode>"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (match_operand:GPR 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(match_operand:GPR 2 "reg_or_short_operand" "rL")]
+ UNSPEC_XCHG))
+ (clobber (match_scratch:GPR 3 "=&r"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_lock_test_and_set (operands[0], operands[1], operands[2],
+ operands[3]);
+ DONE;
+})
+
+(define_expand "sync_<fetchop_name><mode>"
+ [(parallel [(set (match_operand:INT1 0 "memory_operand" "")
+ (unspec:INT1
+ [(FETCHOP:INT1 (match_dup 0)
+ (match_operand:INT1 1 "<fetchop_pred>" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (<CODE>, <MODE>mode, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "memory_operand" "+Z")
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 0)
+ (match_operand:SI 1 "<fetchop_pred>" "<fetchopsi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 2 "=&b"))
+ (clobber (match_scratch:CC 3 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, operands[2]);
+ DONE;
+})
+
+(define_insn_and_split "*sync_<fetchop_name>di_internal"
+ [(set (match_operand:DI 0 "memory_operand" "+Z")
+ (unspec:DI
+ [(FETCHOP:DI (match_dup 0)
+ (match_operand:DI 1 "<fetchop_pred>" "<fetchopdi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:DI 2 "=&b"))
+ (clobber (match_scratch:CC 3 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, operands[2]);
+ DONE;
+})
+
+(define_expand "sync_nand<mode>"
+ [(parallel [(set (match_operand:INT1 0 "memory_operand" "")
+ (unspec:INT1
+ [(ior:INT1 (not:INT1 (match_dup 0))
+ (not:INT1 (match_operand:INT1 1 "gpc_reg_operand" "")))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ FAIL;
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (NOT, <MODE>mode, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_nand<mode>_internal"
+ [(set (match_operand:GPR 0 "memory_operand" "+Z")
+ (unspec:GPR
+ [(ior:GPR (not:GPR (match_dup 0))
+ (not:GPR (match_operand:GPR 1 "gpc_reg_operand" "r")))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:GPR 2 "=&r"))
+ (clobber (match_scratch:CC 3 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (NOT, operands[0], operands[1],
+ NULL_RTX, NULL_RTX, operands[2]);
+ DONE;
+})
+
+(define_expand "sync_old_<fetchop_name><mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (match_operand:INT1 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(FETCHOP:INT1 (match_dup 1)
+ (match_operand:INT1 2 "<fetchop_pred>" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (<CODE>, <MODE>mode, operands[1], operands[2],
+ operands[0], NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_old_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (match_operand:SI 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1)
+ (match_operand:SI 2 "<fetchop_pred>" "<fetchopsi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ operands[0], NULL_RTX, operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "*sync_old_<fetchop_name>di_internal"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (match_operand:DI 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:DI
+ [(FETCHOP:DI (match_dup 1)
+ (match_operand:DI 2 "<fetchop_pred>" "<fetchopdi_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:DI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ operands[0], NULL_RTX, operands[3]);
+ DONE;
+})
+
+(define_expand "sync_old_nand<mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (match_operand:INT1 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(ior:INT1 (not:INT1 (match_dup 1))
+ (not:INT1 (match_operand:INT1 2 "gpc_reg_operand" "")))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ FAIL;
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (NOT, <MODE>mode, operands[1], operands[2],
+ operands[0], NULL_RTX, true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_old_nand<mode>_internal"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (match_operand:GPR 1 "memory_operand" "+Z"))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(ior:GPR (not:GPR (match_dup 1))
+ (not:GPR (match_operand:GPR 2 "gpc_reg_operand" "r")))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:GPR 3 "=&r"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (NOT, operands[1], operands[2],
+ operands[0], NULL_RTX, operands[3]);
+ DONE;
+})
+
+(define_expand "sync_new_<fetchop_name><mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (FETCHOP:INT1
+ (match_operand:INT1 1 "memory_operand" "")
+ (match_operand:INT1 2 "<fetchop_pred>" "")))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(FETCHOP:INT1 (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (<CODE>, <MODE>mode, operands[1], operands[2],
+ NULL_RTX, operands[0], true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_new_<fetchop_name>si_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (FETCHOP:SI
+ (match_operand:SI 1 "memory_operand" "+Z")
+ (match_operand:SI 2 "<fetchop_pred>" "<fetchopsi_constr>")))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ NULL_RTX, operands[0], operands[3]);
+ DONE;
+})
+
+(define_insn_and_split "*sync_new_<fetchop_name>di_internal"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (FETCHOP:DI
+ (match_operand:DI 1 "memory_operand" "+Z")
+ (match_operand:DI 2 "<fetchop_pred>" "<fetchopdi_constr>")))
+ (set (match_dup 1)
+ (unspec:DI
+ [(FETCHOP:DI (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:DI 3 "=&b"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (<CODE>, operands[1], operands[2],
+ NULL_RTX, operands[0], operands[3]);
+ DONE;
+})
+
+(define_expand "sync_new_nand<mode>"
+ [(parallel [(set (match_operand:INT1 0 "gpc_reg_operand" "")
+ (ior:INT1
+ (not:INT1 (match_operand:INT1 1 "memory_operand" ""))
+ (not:INT1 (match_operand:INT1 2 "gpc_reg_operand" ""))))
+ (set (match_dup 1)
+ (unspec:INT1
+ [(ior:INT1 (not:INT1 (match_dup 1))
+ (not:INT1 (match_dup 2)))]
+ UNSPEC_ATOMIC))
+ (clobber (scratch:INT1))
+ (clobber (scratch:CC))])]
+ "TARGET_POWERPC"
+ "
+{
+ if (<MODE>mode != SImode && <MODE>mode != DImode)
+ {
+ FAIL;
+ if (PPC405_ERRATUM77)
+ FAIL;
+ rs6000_emit_sync (NOT, <MODE>mode, operands[1], operands[2],
+ NULL_RTX, operands[0], true);
+ DONE;
+ }
+}")
+
+(define_insn_and_split "*sync_new_nand<mode>_internal"
+ [(set (match_operand:GPR 0 "gpc_reg_operand" "=&r")
+ (ior:GPR
+ (not:GPR (match_operand:GPR 1 "memory_operand" "+Z"))
+ (not:GPR (match_operand:GPR 2 "gpc_reg_operand" "r"))))
+ (set (match_dup 1)
+ (unspec:GPR
+ [(ior:GPR (not:GPR (match_dup 1)) (not:GPR (match_dup 2)))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:GPR 3 "=&r"))
+ (clobber (match_scratch:CC 4 "=&x"))]
+ "TARGET_POWERPC"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_atomic_op (NOT, operands[1], operands[2],
+ NULL_RTX, operands[0], operands[3]);
+ DONE;
+})
+
+; and<mode> without cr0 clobber to avoid generation of additional clobber
+; in atomic splitters causing internal consistency failure.
+; cr0 already clobbered by larx/stcx.
+(define_insn "*atomic_andsi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (unspec:SI [(match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:SI 2 "and_operand" "?r,T,K,L")]
+ UNSPEC_AND))]
+ ""
+ "@
+ and %0,%1,%2
+ {rlinm|rlwinm} %0,%1,0,%m2,%M2
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2"
+ [(set_attr "type" "*,*,compare,compare")])
+
+(define_insn "*atomic_anddi"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ (unspec:DI [(match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:DI 2 "and_operand" "?r,S,T,K,J")]
+ UNSPEC_AND))]
+ "TARGET_POWERPC64"
+ "@
+ and %0,%1,%2
+ rldic%B2 %0,%1,0,%S2
+ rlwinm %0,%1,0,%m2,%M2
+ andi. %0,%1,%b2
+ andis. %0,%1,%u2"
+ [(set_attr "type" "*,*,*,compare,compare")
+ (set_attr "length" "4,4,4,4,4")])
+
+; the sync_*_internal patterns all have these operands:
+; 0 - memory location
+; 1 - operand
+; 2 - value in memory after operation
+; 3 - value in memory immediately before operation
+
+(define_insn "*sync_addshort_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r")
+ (ior:SI (and:SI (plus:SI (match_operand:SI 0 "memory_operand" "+Z")
+ (match_operand:SI 1 "add_operand" "rI"))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0))))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b") (match_dup 0))
+ (set (match_dup 0)
+ (unspec:SI [(ior:SI (and:SI (plus:SI (match_dup 0) (match_dup 1))
+ (match_dup 4))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0)))]
+ UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 5 "=&x"))
+ (clobber (match_scratch:SI 6 "=&r"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "lwarx %3,%y0\n\tadd%I1 %2,%3,%1\n\tandc %6,%3,%4\n\tand %2,%2,%4\n\tor %2,%2,%6\n\tstwcx. %2,%y0\n\tbne- $-24"
+ [(set_attr "length" "28")])
+
+(define_insn "*sync_subshort_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r")
+ (ior:SI (and:SI (minus:SI (match_operand:SI 0 "memory_operand" "+Z")
+ (match_operand:SI 1 "add_operand" "rI"))
+ (match_operand:SI 4 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0))))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b") (match_dup 0))
+ (set (match_dup 0)
+ (unspec:SI [(ior:SI (and:SI (minus:SI (match_dup 0) (match_dup 1))
+ (match_dup 4))
+ (and:SI (not:SI (match_dup 4)) (match_dup 0)))]
+ UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 5 "=&x"))
+ (clobber (match_scratch:SI 6 "=&r"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "lwarx %3,%y0\n\tsubf %2,%1,%3\n\tandc %6,%3,%4\n\tand %2,%2,%4\n\tor %2,%2,%6\n\tstwcx. %2,%y0\n\tbne- $-24"
+ [(set_attr "length" "28")])
+
+(define_insn "*sync_andsi_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r,&r,&r,&r")
+ (and:SI (match_operand:SI 0 "memory_operand" "+Z,Z,Z,Z")
+ (match_operand:SI 1 "and_operand" "r,T,K,L")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b,&b,&b,&b") (match_dup 0))
+ (set (match_dup 0)
+ (unspec:SI [(and:SI (match_dup 0) (match_dup 1))]
+ UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 4 "=&x,&x,&x,&x"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "@
+ lwarx %3,%y0\n\tand %2,%3,%1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\trlwinm %2,%3,0,%m1,%M1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\tandi. %2,%3,%b1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\tandis. %2,%3,%u1\n\tstwcx. %2,%y0\n\tbne- $-12"
+ [(set_attr "length" "16,16,16,16")])
+
+(define_insn "*sync_boolsi_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r,&r,&r")
+ (match_operator:SI 4 "boolean_or_operator"
+ [(match_operand:SI 0 "memory_operand" "+Z,Z,Z")
+ (match_operand:SI 1 "logical_operand" "r,K,L")]))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b,&b,&b") (match_dup 0))
+ (set (match_dup 0) (unspec:SI [(match_dup 4)] UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 5 "=&x,&x,&x"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "@
+ lwarx %3,%y0\n\t%q4 %2,%3,%1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\t%q4i %2,%3,%b1\n\tstwcx. %2,%y0\n\tbne- $-12
+ lwarx %3,%y0\n\t%q4is %2,%3,%u1\n\tstwcx. %2,%y0\n\tbne- $-12"
+ [(set_attr "length" "16,16,16")])
+
+; This pattern could also take immediate values of operand 1,
+; since the non-NOT version of the operator is used; but this is not
+; very useful, since in practice operand 1 is a full 32-bit value.
+; Likewise, operand 5 is in practice either <= 2^16 or it is a register.
+(define_insn "*sync_boolcshort_internal"
+ [(set (match_operand:SI 2 "gpc_reg_operand" "=&r")
+ (match_operator:SI 4 "boolean_or_operator"
+ [(xor:SI (not:SI (match_operand:SI 0 "memory_operand" "+Z"))
+ (not:SI (match_operand:SI 5 "logical_operand" "rK")))
+ (match_operand:SI 1 "gpc_reg_operand" "r")]))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=&b") (match_dup 0))
+ (set (match_dup 0) (unspec:SI [(match_dup 4)] UNSPEC_SYNC_OP))
+ (clobber (match_scratch:CC 6 "=&x"))]
+ "TARGET_POWERPC && !PPC405_ERRATUM77"
+ "lwarx %3,%y0\n\txor%I2 %2,%3,%5\n\t%q4 %2,%2,%1\n\tstwcx. %2,%y0\n\tbne- $-16"
+ [(set_attr "length" "20")])
+
+(define_insn "isync"
+ [(set (mem:BLK (match_scratch 0 "X"))
+ (unspec_volatile:BLK [(mem:BLK (match_scratch 1 "X"))] UNSPEC_ISYNC))]
+ ""
+ "{ics|isync}"
+ [(set_attr "type" "isync")])
+
+(define_expand "sync_lock_release<mode>"
+ [(set (match_operand:INT 0 "memory_operand")
+ (match_operand:INT 1 "any_operand"))]
+ ""
+ "
+{
+ emit_insn (gen_lwsync ());
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+}")
+
+; Some AIX assemblers don't accept lwsync, so we use a .long.
+(define_insn "lwsync"
+ [(set (mem:BLK (match_scratch 0 "X"))
+ (unspec_volatile:BLK [(mem:BLK (match_scratch 1 "X"))] UNSPEC_LWSYNC))]
+ ""
+{
+ if (TARGET_NO_LWSYNC)
+ return "sync";
+ else
+ return ".long 0x7c2004ac";
+}
+ [(set_attr "type" "sync")])
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/sysv4.h b/gcc-4.4.3/gcc/config/rs6000/sysv4.h
new file mode 100644
index 000000000..a878a53ff
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/sysv4.h
@@ -0,0 +1,1129 @@
+/* Target definitions for GNU compiler for PowerPC running System V.4
+ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003,
+ 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Header files should be C++ aware in general. */
+#undef NO_IMPLICIT_EXTERN_C
+#define NO_IMPLICIT_EXTERN_C
+
+/* Yes! We are ELF. */
+#define TARGET_OBJECT_FORMAT OBJECT_ELF
+
+/* Default ABI to compile code for. */
+#define DEFAULT_ABI rs6000_current_abi
+
+/* Default ABI to use. */
+#define RS6000_ABI_NAME "sysv"
+
+/* Override rs6000.h definition. */
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mppc"
+
+/* Small data support types. */
+enum rs6000_sdata_type {
+ SDATA_NONE, /* No small data support. */
+ SDATA_DATA, /* Just put data in .sbss/.sdata, don't use relocs. */
+ SDATA_SYSV, /* Use r13 to point to .sdata/.sbss. */
+ SDATA_EABI /* Use r13 like above, r2 points to .sdata2/.sbss2. */
+};
+
+extern enum rs6000_sdata_type rs6000_sdata;
+
+#define TARGET_TOC ((target_flags & MASK_64BIT) \
+ || ((target_flags & (MASK_RELOCATABLE \
+ | MASK_MINIMAL_TOC)) \
+ && flag_pic > 1) \
+ || DEFAULT_ABI == ABI_AIX)
+
+#define TARGET_BITFIELD_TYPE (! TARGET_NO_BITFIELD_TYPE)
+#define TARGET_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
+#define TARGET_NO_PROTOTYPE (! TARGET_PROTOTYPE)
+#define TARGET_NO_TOC (! TARGET_TOC)
+#define TARGET_NO_EABI (! TARGET_EABI)
+
+#ifdef HAVE_AS_REL16
+#undef TARGET_SECURE_PLT
+#define TARGET_SECURE_PLT secure_plt
+#endif
+
+extern const char *rs6000_abi_name;
+extern const char *rs6000_sdata_name;
+extern const char *rs6000_tls_size_string; /* For -mtls-size= */
+
+#define SDATA_DEFAULT_SIZE 8
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to
+ get control. */
+
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (!g_switch_set) \
+ g_switch_value = SDATA_DEFAULT_SIZE; \
+ \
+ if (rs6000_abi_name == NULL) \
+ rs6000_abi_name = RS6000_ABI_NAME; \
+ \
+ if (!strcmp (rs6000_abi_name, "sysv")) \
+ rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "sysv-noeabi")) \
+ { \
+ rs6000_current_abi = ABI_V4; \
+ target_flags &= ~ MASK_EABI; \
+ } \
+ else if (!strcmp (rs6000_abi_name, "sysv-eabi") \
+ || !strcmp (rs6000_abi_name, "eabi")) \
+ { \
+ rs6000_current_abi = ABI_V4; \
+ target_flags |= MASK_EABI; \
+ } \
+ else if (!strcmp (rs6000_abi_name, "aixdesc")) \
+ rs6000_current_abi = ABI_AIX; \
+ else if (!strcmp (rs6000_abi_name, "freebsd")) \
+ rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "linux")) \
+ { \
+ if (TARGET_64BIT) \
+ rs6000_current_abi = ABI_AIX; \
+ else \
+ rs6000_current_abi = ABI_V4; \
+ } \
+ else if (!strcmp (rs6000_abi_name, "gnu")) \
+ rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "netbsd")) \
+ rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "openbsd")) \
+ rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "i960-old")) \
+ { \
+ rs6000_current_abi = ABI_V4; \
+ target_flags |= (MASK_LITTLE_ENDIAN | MASK_EABI \
+ | MASK_NO_BITFIELD_WORD); \
+ target_flags &= ~MASK_STRICT_ALIGN; \
+ } \
+ else \
+ { \
+ rs6000_current_abi = ABI_V4; \
+ error ("bad value for -mcall-%s", rs6000_abi_name); \
+ } \
+ \
+ if (rs6000_sdata_name) \
+ { \
+ if (!strcmp (rs6000_sdata_name, "none")) \
+ rs6000_sdata = SDATA_NONE; \
+ else if (!strcmp (rs6000_sdata_name, "data")) \
+ rs6000_sdata = SDATA_DATA; \
+ else if (!strcmp (rs6000_sdata_name, "default")) \
+ rs6000_sdata = (TARGET_EABI) ? SDATA_EABI : SDATA_SYSV; \
+ else if (!strcmp (rs6000_sdata_name, "sysv")) \
+ rs6000_sdata = SDATA_SYSV; \
+ else if (!strcmp (rs6000_sdata_name, "eabi")) \
+ rs6000_sdata = SDATA_EABI; \
+ else \
+ error ("bad value for -msdata=%s", rs6000_sdata_name); \
+ } \
+ else if (DEFAULT_ABI == ABI_V4) \
+ { \
+ rs6000_sdata = SDATA_DATA; \
+ rs6000_sdata_name = "data"; \
+ } \
+ else \
+ { \
+ rs6000_sdata = SDATA_NONE; \
+ rs6000_sdata_name = "none"; \
+ } \
+ \
+ if (TARGET_RELOCATABLE && \
+ (rs6000_sdata == SDATA_EABI || rs6000_sdata == SDATA_SYSV)) \
+ { \
+ rs6000_sdata = SDATA_DATA; \
+ error ("-mrelocatable and -msdata=%s are incompatible", \
+ rs6000_sdata_name); \
+ } \
+ \
+ else if (flag_pic && DEFAULT_ABI != ABI_AIX \
+ && (rs6000_sdata == SDATA_EABI \
+ || rs6000_sdata == SDATA_SYSV)) \
+ { \
+ rs6000_sdata = SDATA_DATA; \
+ error ("-f%s and -msdata=%s are incompatible", \
+ (flag_pic > 1) ? "PIC" : "pic", \
+ rs6000_sdata_name); \
+ } \
+ \
+ if ((rs6000_sdata != SDATA_NONE && DEFAULT_ABI != ABI_V4) \
+ || (rs6000_sdata == SDATA_EABI && !TARGET_EABI)) \
+ { \
+ rs6000_sdata = SDATA_NONE; \
+ error ("-msdata=%s and -mcall-%s are incompatible", \
+ rs6000_sdata_name, rs6000_abi_name); \
+ } \
+ \
+ targetm.have_srodata_section = rs6000_sdata == SDATA_EABI; \
+ \
+ if (TARGET_RELOCATABLE && !TARGET_MINIMAL_TOC) \
+ { \
+ target_flags |= MASK_MINIMAL_TOC; \
+ error ("-mrelocatable and -mno-minimal-toc are incompatible"); \
+ } \
+ \
+ if (TARGET_RELOCATABLE && rs6000_current_abi == ABI_AIX) \
+ { \
+ target_flags &= ~MASK_RELOCATABLE; \
+ error ("-mrelocatable and -mcall-%s are incompatible", \
+ rs6000_abi_name); \
+ } \
+ \
+ if (!TARGET_64BIT && flag_pic > 1 && rs6000_current_abi == ABI_AIX) \
+ { \
+ flag_pic = 0; \
+ error ("-fPIC and -mcall-%s are incompatible", \
+ rs6000_abi_name); \
+ } \
+ \
+ if (rs6000_current_abi == ABI_AIX && TARGET_LITTLE_ENDIAN) \
+ { \
+ target_flags &= ~MASK_LITTLE_ENDIAN; \
+ error ("-mcall-aixdesc must be big endian"); \
+ } \
+ \
+ if (TARGET_SECURE_PLT != secure_plt) \
+ { \
+ error ("-msecure-plt not supported by your assembler"); \
+ } \
+ \
+ /* Treat -fPIC the same as -mrelocatable. */ \
+ if (flag_pic > 1 && DEFAULT_ABI != ABI_AIX) \
+ { \
+ target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC; \
+ TARGET_NO_FP_IN_TOC = 1; \
+ } \
+ \
+ else if (TARGET_RELOCATABLE) \
+ flag_pic = 2; \
+} while (0)
+
+#ifndef RS6000_BI_ARCH
+# define SUBSUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if ((TARGET_DEFAULT ^ target_flags) & MASK_64BIT) \
+ error ("-m%s not supported in this configuration", \
+ (target_flags & MASK_64BIT) ? "64" : "32"); \
+} while (0)
+#endif
+
+/* Override rs6000.h definition. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS)
+
+/* Override rs6000.h definition. */
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC750
+
+/* SVR4 only defined for PowerPC, so short-circuit POWER patterns. */
+#undef TARGET_POWER
+#define TARGET_POWER 0
+
+#define FIXED_R2 1
+/* System V.4 uses register 13 as a pointer to the small data area,
+ so it is not available to the normal user. */
+#define FIXED_R13 1
+
+/* Override default big endianism definitions in rs6000.h. */
+#undef BYTES_BIG_ENDIAN
+#undef WORDS_BIG_ENDIAN
+#define BYTES_BIG_ENDIAN (TARGET_BIG_ENDIAN)
+#define WORDS_BIG_ENDIAN (TARGET_BIG_ENDIAN)
+
+/* Define this to set the endianness to use in libgcc2.c, which can
+ not depend on target_flags. */
+#if !defined(__LITTLE_ENDIAN__) && !defined(__sun__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define cutoff for using external functions to save floating point.
+ Currently on 64-bit V.4, always use inline stores. When optimizing
+ for size on 32-bit targets, use external functions when
+ profitable. */
+#define FP_SAVE_INLINE(FIRST_REG) (optimize_size && !TARGET_64BIT \
+ ? ((FIRST_REG) == 62 \
+ || (FIRST_REG) == 63) \
+ : (FIRST_REG) < 64)
+/* And similarly for general purpose registers. */
+#define GP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 32 \
+ && (TARGET_64BIT || !optimize_size))
+
+/* Put jump tables in read-only memory, rather than in .text. */
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+
+/* Prefix and suffix to use to saving floating point. */
+#define SAVE_FP_PREFIX "_savefpr_"
+#define SAVE_FP_SUFFIX (TARGET_64BIT ? "_l" : "")
+
+/* Prefix and suffix to use to restoring floating point. */
+#define RESTORE_FP_PREFIX "_restfpr_"
+#define RESTORE_FP_SUFFIX (TARGET_64BIT ? "_l" : "")
+
+/* Type used for ptrdiff_t, as a string used in a declaration. */
+#define PTRDIFF_TYPE "int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+/* Override svr4.h definition. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+/* Width of wchar_t in bits. */
+/* Override svr4.h definition. */
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Make int foo : 8 not cause structures to be aligned to an int boundary. */
+/* Override elfos.h definition. */
+#undef PCC_BITFIELD_TYPE_MATTERS
+#define PCC_BITFIELD_TYPE_MATTERS (TARGET_BITFIELD_TYPE)
+
+#undef BITFIELD_NBYTES_LIMITED
+#define BITFIELD_NBYTES_LIMITED (TARGET_NO_BITFIELD_WORD)
+
+/* Define this macro to be the value 1 if instructions will fail to
+ work if given data not on the nominal alignment. If instructions
+ will merely go slower in that case, define this macro as 0. */
+#undef STRICT_ALIGNMENT
+#define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN)
+
+/* Define this macro if you wish to preserve a certain alignment for
+ the stack pointer, greater than what the hardware enforces. The
+ definition is a C expression for the desired alignment (measured
+ in bits). This macro must evaluate to a value equal to or larger
+ than STACK_BOUNDARY.
+ For the SYSV ABI and variants the alignment of the stack pointer
+ is usually controlled manually in rs6000.c. However, to maintain
+ alignment across alloca () in all circumstances,
+ PREFERRED_STACK_BOUNDARY needs to be set as well.
+ This has the additional advantage of allowing a bigger maximum
+ alignment of user objects on the stack. */
+
+#undef PREFERRED_STACK_BOUNDARY
+#define PREFERRED_STACK_BOUNDARY 128
+
+/* Real stack boundary as mandated by the appropriate ABI. */
+#define ABI_STACK_BOUNDARY \
+ ((TARGET_EABI && !TARGET_ALTIVEC && !TARGET_ALTIVEC_ABI) ? 64 : 128)
+
+/* An expression for the alignment of a structure field FIELD if the
+ alignment computed in the usual way is COMPUTED. */
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ ((TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \
+ ? 128 : COMPUTED)
+
+#undef BIGGEST_FIELD_ALIGNMENT
+
+/* Use ELF style section commands. */
+
+#define TEXT_SECTION_ASM_OP "\t.section\t\".text\""
+
+#define DATA_SECTION_ASM_OP "\t.section\t\".data\""
+
+#define BSS_SECTION_ASM_OP "\t.section\t\".bss\""
+
+/* Override elfos.h definition. */
+#undef INIT_SECTION_ASM_OP
+#define INIT_SECTION_ASM_OP "\t.section\t\".init\",\"ax\""
+
+/* Override elfos.h definition. */
+#undef FINI_SECTION_ASM_OP
+#define FINI_SECTION_ASM_OP "\t.section\t\".fini\",\"ax\""
+
+#define TOC_SECTION_ASM_OP "\t.section\t\".got\",\"aw\""
+
+/* Put PC relative got entries in .got2. */
+#define MINIMAL_TOC_SECTION_ASM_OP \
+ (TARGET_RELOCATABLE || (flag_pic && DEFAULT_ABI != ABI_AIX) \
+ ? "\t.section\t\".got2\",\"aw\"" : "\t.section\t\".got1\",\"aw\"")
+
+#define SDATA_SECTION_ASM_OP "\t.section\t\".sdata\",\"aw\""
+#define SDATA2_SECTION_ASM_OP "\t.section\t\".sdata2\",\"a\""
+#define SBSS_SECTION_ASM_OP "\t.section\t\".sbss\",\"aw\",@nobits"
+
+/* Override default elf definitions. */
+#define TARGET_ASM_INIT_SECTIONS rs6000_elf_asm_init_sections
+#undef TARGET_ASM_RELOC_RW_MASK
+#define TARGET_ASM_RELOC_RW_MASK rs6000_elf_reloc_rw_mask
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION rs6000_elf_select_rtx_section
+
+/* Return nonzero if this entry is to be written into the constant pool
+ in a special way. We do so if this is a SYMBOL_REF, LABEL_REF or a CONST
+ containing one of them. If -mfp-in-toc (the default), we also do
+ this for floating-point constants. We actually can only do this
+ if the FP formats of the target and host machines are the same, but
+ we can't check that since not every file that uses
+ GO_IF_LEGITIMATE_ADDRESS_P includes real.h.
+
+ Unlike AIX, we don't key off of -mminimal-toc, but instead do not
+ allow floating point constants in the TOC if -mrelocatable. */
+
+#undef ASM_OUTPUT_SPECIAL_POOL_ENTRY_P
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) \
+ (TARGET_TOC \
+ && (GET_CODE (X) == SYMBOL_REF \
+ || (GET_CODE (X) == CONST && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF) \
+ || GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST_INT \
+ && GET_MODE_BITSIZE (MODE) <= GET_MODE_BITSIZE (Pmode)) \
+ || (!TARGET_NO_FP_IN_TOC \
+ && !TARGET_RELOCATABLE \
+ && GET_CODE (X) == CONST_DOUBLE \
+ && SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
+ && BITS_PER_WORD == HOST_BITS_PER_INT)))
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+
+extern int rs6000_pic_labelno;
+
+/* Override elfos.h definition. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ rs6000_elf_declare_function_name ((FILE), (NAME), (DECL))
+
+/* The USER_LABEL_PREFIX stuff is affected by the -fleading-underscore
+ flag. The LOCAL_LABEL_PREFIX variable is used by dbxelf.h. */
+
+#define LOCAL_LABEL_PREFIX "."
+#define USER_LABEL_PREFIX ""
+
+/* svr4.h overrides (*targetm.asm_out.internal_label). */
+
+#define ASM_OUTPUT_INTERNAL_LABEL_PREFIX(FILE,PREFIX) \
+ asm_fprintf (FILE, "%L%s", PREFIX)
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.globl "
+
+/* This says how to output assembler code to declare an
+ uninitialized internal linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define LOCAL_ASM_OP "\t.local\t"
+
+#define LCOMM_ASM_OP "\t.lcomm\t"
+
+/* Describe how to emit uninitialized local items. */
+#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \
+do { \
+ if ((DECL) && rs6000_elf_in_small_data_p (DECL)) \
+ { \
+ switch_to_section (sbss_section); \
+ ASM_OUTPUT_ALIGN (FILE, exact_log2 (ALIGN / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ ASM_OUTPUT_SKIP (FILE, SIZE); \
+ if (!flag_inhibit_size_directive && (SIZE) > 0) \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, SIZE); \
+ } \
+ else \
+ { \
+ fprintf (FILE, "%s", LCOMM_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", \
+ (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ } \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+} while (0)
+
+/* Describe how to emit uninitialized external linkage items. */
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+do { \
+ ASM_OUTPUT_ALIGNED_DECL_LOCAL (FILE, DECL, NAME, SIZE, ALIGN); \
+} while (0)
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* To support -falign-* switches we need to use .p2align so
+ that alignment directives in code sections will be padded
+ with no-op instructions, rather than zeroes. */
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG) != 0) \
+ { \
+ if ((MAX_SKIP) == 0) \
+ fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else \
+ fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
+ }
+#endif
+
+/* This is how to output code to push a register on the stack.
+ It need not be very fast code.
+
+ On the rs6000, we must keep the backchain up to date. In order
+ to simplify things, always allocate 16 bytes for a push (System V
+ wants to keep stack aligned to a 16 byte boundary). */
+
+#define ASM_OUTPUT_REG_PUSH(FILE, REGNO) \
+do { \
+ if (DEFAULT_ABI == ABI_V4) \
+ asm_fprintf (FILE, \
+ "\t{stu|stwu} %s,-16(%s)\n\t{st|stw} %s,12(%s)\n", \
+ reg_names[1], reg_names[1], reg_names[REGNO], \
+ reg_names[1]); \
+} while (0)
+
+/* This is how to output an insn to pop a register from the stack.
+ It need not be very fast code. */
+
+#define ASM_OUTPUT_REG_POP(FILE, REGNO) \
+do { \
+ if (DEFAULT_ABI == ABI_V4) \
+ asm_fprintf (FILE, \
+ "\t{l|lwz} %s,12(%s)\n\t{ai|addic} %s,%s,16\n", \
+ reg_names[REGNO], reg_names[1], reg_names[1], \
+ reg_names[1]); \
+} while (0)
+
+/* Switch Recognition by gcc.c. Add -G xx support. */
+
+/* Override svr4.h definition. */
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ ((CHAR) == 'D' || (CHAR) == 'U' || (CHAR) == 'o' \
+ || (CHAR) == 'e' || (CHAR) == 'T' || (CHAR) == 'u' \
+ || (CHAR) == 'I' || (CHAR) == 'm' || (CHAR) == 'x' \
+ || (CHAR) == 'L' || (CHAR) == 'A' || (CHAR) == 'V' \
+ || (CHAR) == 'B' || (CHAR) == 'b' || (CHAR) == 'G')
+
+extern int fixuplabelno;
+
+/* Handle constructors specially for -mrelocatable. */
+#define TARGET_ASM_CONSTRUCTOR rs6000_elf_asm_out_constructor
+#define TARGET_ASM_DESTRUCTOR rs6000_elf_asm_out_destructor
+
+/* This is the end of what might become sysv4.h. */
+
+/* Use DWARF 2 debugging information by default. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Historically we have also supported stabs debugging. */
+#define DBX_DEBUGGING_INFO 1
+
+#define TARGET_ENCODE_SECTION_INFO rs6000_elf_encode_section_info
+#define TARGET_IN_SMALL_DATA_P rs6000_elf_in_small_data_p
+
+/* The ELF version doesn't encode [DS] or whatever at the end of symbols. */
+
+#define RS6000_OUTPUT_BASENAME(FILE, NAME) \
+ assemble_name (FILE, NAME)
+
+/* We have to output the stabs for the function name *first*, before
+ outputting its label. */
+
+#define DBX_FUNCTION_FIRST
+
+/* This is the end of what might become sysv4dbx.h. */
+
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC System V.4)");
+#endif
+
+#define TARGET_OS_SYSV_CPP_BUILTINS() \
+ do \
+ { \
+ if (target_flags_explicit \
+ & MASK_RELOCATABLE) \
+ builtin_define ("_RELOCATABLE"); \
+ } \
+ while (0)
+
+#ifndef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define_std ("PPC"); \
+ builtin_define_std ("unix"); \
+ builtin_define ("__svr4__"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=svr4"); \
+ builtin_assert ("cpu=powerpc"); \
+ builtin_assert ("machine=powerpc"); \
+ TARGET_OS_SYSV_CPP_BUILTINS (); \
+ } \
+ while (0)
+#endif
+
+/* Pass various options to the assembler. */
+/* Override svr4.h definition. */
+#undef ASM_SPEC
+#define ASM_SPEC "%(asm_cpu) \
+%{,assembler|,assembler-with-cpp: %{mregnames} %{mno-regnames}}" \
+SVR4_ASM_SPEC \
+"%{mrelocatable} %{mrelocatable-lib} %{fpic|fpie|fPIC|fPIE:-K PIC} \
+%{memb|msdata|msdata=eabi: -memb} \
+%{mlittle|mlittle-endian:-mlittle; \
+ mbig|mbig-endian :-mbig; \
+ mcall-aixdesc | \
+ mcall-freebsd | \
+ mcall-netbsd | \
+ mcall-openbsd | \
+ mcall-linux | \
+ mcall-gnu :-mbig; \
+ mcall-i960-old :-mlittle}"
+
+#define CC1_ENDIAN_BIG_SPEC ""
+
+#define CC1_ENDIAN_LITTLE_SPEC "\
+%{!mstrict-align: %{!mno-strict-align: \
+ %{!mcall-i960-old: \
+ -mstrict-align \
+ } \
+}}"
+
+#define CC1_ENDIAN_DEFAULT_SPEC "%(cc1_endian_big)"
+
+#ifndef CC1_SECURE_PLT_DEFAULT_SPEC
+#define CC1_SECURE_PLT_DEFAULT_SPEC ""
+#endif
+
+/* Pass -G xxx to the compiler and set correct endian mode. */
+#define CC1_SPEC "%{G*} %(cc1_cpu) \
+%{mlittle|mlittle-endian: %(cc1_endian_little); \
+ mbig |mbig-endian : %(cc1_endian_big); \
+ mcall-aixdesc | \
+ mcall-freebsd | \
+ mcall-netbsd | \
+ mcall-openbsd | \
+ mcall-linux | \
+ mcall-gnu : -mbig %(cc1_endian_big); \
+ mcall-i960-old : -mlittle %(cc1_endian_little); \
+ : %(cc1_endian_default)} \
+%{meabi: %{!mcall-*: -mcall-sysv }} \
+%{!meabi: %{!mno-eabi: \
+ %{mrelocatable: -meabi } \
+ %{mcall-freebsd: -mno-eabi } \
+ %{mcall-i960-old: -meabi } \
+ %{mcall-linux: -mno-eabi } \
+ %{mcall-gnu: -mno-eabi } \
+ %{mcall-netbsd: -mno-eabi } \
+ %{mcall-openbsd: -mno-eabi }}} \
+%{msdata: -msdata=default} \
+%{mno-sdata: -msdata=none} \
+%{!mbss-plt: %{!msecure-plt: %(cc1_secure_plt_default)}} \
+%{profile: -p}"
+
+/* Don't put -Y P,<path> for cross compilers. */
+#ifndef CROSS_DIRECTORY_STRUCTURE
+#define LINK_PATH_SPEC "\
+%{!R*:%{L*:-R %*}} \
+%{!nostdlib: %{!YP,*: \
+ %{compat-bsd: \
+ %{p:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}} \
+ %{!R*: %{!L*: -R /usr/ucblib}} \
+ %{!compat-bsd: \
+ %{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:-Y P,/usr/ccs/lib:/usr/lib}}}}"
+
+#else
+#define LINK_PATH_SPEC ""
+#endif
+
+/* Default starting address if specified. */
+#define LINK_START_SPEC "\
+%{mads : %(link_start_ads) ; \
+ myellowknife : %(link_start_yellowknife) ; \
+ mmvme : %(link_start_mvme) ; \
+ msim : %(link_start_sim) ; \
+ mcall-freebsd: %(link_start_freebsd) ; \
+ mcall-linux : %(link_start_linux) ; \
+ mcall-gnu : %(link_start_gnu) ; \
+ mcall-netbsd : %(link_start_netbsd) ; \
+ mcall-openbsd: %(link_start_openbsd) ; \
+ : %(link_start_default) }"
+
+#define LINK_START_DEFAULT_SPEC ""
+
+/* Override svr4.h definition. */
+#undef LINK_SPEC
+#define LINK_SPEC "\
+%{h*} %{v:-V} %{!msdata=none:%{G*}} %{msdata=none:-G0} \
+%{YP,*} %{R*} \
+%{Qy:} %{!Qn:-Qy} \
+%(link_shlib) \
+%{!Wl,-T*: %{!T*: %(link_start) }} \
+%(link_target) \
+%(link_os)"
+
+/* For now, turn off shared libraries by default. */
+#ifndef SHARED_LIB_SUPPORT
+#define NO_SHARED_LIB_SUPPORT
+#endif
+
+#ifndef NO_SHARED_LIB_SUPPORT
+/* Shared libraries are default. */
+#define LINK_SHLIB_SPEC "\
+%{!static: %(link_path) %{!R*:%{L*:-R %*}}} \
+%{mshlib: } \
+%{static:-dn -Bstatic} \
+%{shared:-G -dy -z text} \
+%{symbolic:-Bsymbolic -G -dy -z text}"
+
+#else
+/* Shared libraries are not default. */
+#define LINK_SHLIB_SPEC "\
+%{mshlib: %(link_path) } \
+%{!mshlib: %{!shared: %{!symbolic: -dn -Bstatic}}} \
+%{static: } \
+%{shared:-G -dy -z text %(link_path) } \
+%{symbolic:-Bsymbolic -G -dy -z text %(link_path) }"
+#endif
+
+/* Override the default target of the linker. */
+#define LINK_TARGET_SPEC "\
+%{mlittle: --oformat elf32-powerpcle } %{mlittle-endian: --oformat elf32-powerpcle } \
+%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \
+ %{mcall-i960-old: --oformat elf32-powerpcle} \
+ }}}}"
+
+/* Any specific OS flags. */
+#define LINK_OS_SPEC "\
+%{mads : %(link_os_ads) ; \
+ myellowknife : %(link_os_yellowknife) ; \
+ mmvme : %(link_os_mvme) ; \
+ msim : %(link_os_sim) ; \
+ mcall-freebsd: %(link_os_freebsd) ; \
+ mcall-linux : %(link_os_linux) ; \
+ mcall-gnu : %(link_os_gnu) ; \
+ mcall-netbsd : %(link_os_netbsd) ; \
+ mcall-openbsd: %(link_os_openbsd) ; \
+ : %(link_os_default) }"
+
+#define LINK_OS_DEFAULT_SPEC ""
+
+#define DRIVER_SELF_SPECS "%{mfpu=none: %<mfpu=* \
+ %<msingle-float %<mdouble-float}"
+
+/* Override rs6000.h definition. */
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} \
+%{mads : %(cpp_os_ads) ; \
+ myellowknife : %(cpp_os_yellowknife) ; \
+ mmvme : %(cpp_os_mvme) ; \
+ msim : %(cpp_os_sim) ; \
+ mcall-freebsd: %(cpp_os_freebsd) ; \
+ mcall-linux : %(cpp_os_linux) ; \
+ mcall-gnu : %(cpp_os_gnu) ; \
+ mcall-netbsd : %(cpp_os_netbsd) ; \
+ mcall-openbsd: %(cpp_os_openbsd) ; \
+ : %(cpp_os_default) }"
+
+#define CPP_OS_DEFAULT_SPEC ""
+
+/* Override svr4.h definition. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "\
+%{mads : %(startfile_ads) ; \
+ myellowknife : %(startfile_yellowknife) ; \
+ mmvme : %(startfile_mvme) ; \
+ msim : %(startfile_sim) ; \
+ mcall-freebsd: %(startfile_freebsd) ; \
+ mcall-linux : %(startfile_linux) ; \
+ mcall-gnu : %(startfile_gnu) ; \
+ mcall-netbsd : %(startfile_netbsd) ; \
+ mcall-openbsd: %(startfile_openbsd) ; \
+ : %(startfile_default) }"
+
+#define STARTFILE_DEFAULT_SPEC "ecrti.o%s crtbegin.o%s"
+
+/* Override svr4.h definition. */
+#undef LIB_SPEC
+#define LIB_SPEC "\
+%{mads : %(lib_ads) ; \
+ myellowknife : %(lib_yellowknife) ; \
+ mmvme : %(lib_mvme) ; \
+ msim : %(lib_sim) ; \
+ mcall-freebsd: %(lib_freebsd) ; \
+ mcall-linux : %(lib_linux) ; \
+ mcall-gnu : %(lib_gnu) ; \
+ mcall-netbsd : %(lib_netbsd) ; \
+ mcall-openbsd: %(lib_openbsd) ; \
+ : %(lib_default) }"
+
+#define LIB_DEFAULT_SPEC "-lc"
+
+/* Override svr4.h definition. */
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "\
+%{mads : %(endfile_ads) ; \
+ myellowknife : %(endfile_yellowknife) ; \
+ mmvme : %(endfile_mvme) ; \
+ msim : %(endfile_sim) ; \
+ mcall-freebsd: %(endfile_freebsd) ; \
+ mcall-linux : %(endfile_linux) ; \
+ mcall-gnu : %(endfile_gnu) ; \
+ mcall-netbsd : %(endfile_netbsd) ; \
+ mcall-openbsd: %(endfile_openbsd) ; \
+ : %(crtsavres_default) %(endfile_default) }"
+
+#define CRTSAVRES_DEFAULT_SPEC ""
+
+#define ENDFILE_DEFAULT_SPEC "crtend.o%s ecrtn.o%s"
+
+/* Motorola ADS support. */
+#define LIB_ADS_SPEC "--start-group -lads -lc --end-group"
+
+#define STARTFILE_ADS_SPEC "ecrti.o%s crt0.o%s crtbegin.o%s"
+
+#define ENDFILE_ADS_SPEC "crtend.o%s ecrtn.o%s"
+
+#define LINK_START_ADS_SPEC "-T ads.ld%s"
+
+#define LINK_OS_ADS_SPEC ""
+
+#define CPP_OS_ADS_SPEC ""
+
+/* Motorola Yellowknife support. */
+#define LIB_YELLOWKNIFE_SPEC "--start-group -lyk -lc --end-group"
+
+#define STARTFILE_YELLOWKNIFE_SPEC "ecrti.o%s crt0.o%s crtbegin.o%s"
+
+#define ENDFILE_YELLOWKNIFE_SPEC "crtend.o%s ecrtn.o%s"
+
+#define LINK_START_YELLOWKNIFE_SPEC "-T yellowknife.ld%s"
+
+#define LINK_OS_YELLOWKNIFE_SPEC ""
+
+#define CPP_OS_YELLOWKNIFE_SPEC ""
+
+/* Motorola MVME support. */
+#define LIB_MVME_SPEC "--start-group -lmvme -lc --end-group"
+
+#define STARTFILE_MVME_SPEC "ecrti.o%s crt0.o%s crtbegin.o%s"
+
+#define ENDFILE_MVME_SPEC "crtend.o%s ecrtn.o%s"
+
+#define LINK_START_MVME_SPEC "-Ttext 0x40000"
+
+#define LINK_OS_MVME_SPEC ""
+
+#define CPP_OS_MVME_SPEC ""
+
+/* PowerPC simulator based on netbsd system calls support. */
+#define LIB_SIM_SPEC "--start-group -lsim -lc --end-group"
+
+#define STARTFILE_SIM_SPEC "ecrti.o%s sim-crt0.o%s crtbegin.o%s"
+
+#define ENDFILE_SIM_SPEC "crtend.o%s ecrtn.o%s"
+
+#define LINK_START_SIM_SPEC ""
+
+#define LINK_OS_SIM_SPEC "-m elf32ppcsim"
+
+#define CPP_OS_SIM_SPEC ""
+
+/* FreeBSD support. */
+
+#define CPP_OS_FREEBSD_SPEC "\
+ -D__PPC__ -D__ppc__ -D__PowerPC__ -D__powerpc__ \
+ -Acpu=powerpc -Amachine=powerpc"
+
+#define STARTFILE_FREEBSD_SPEC FBSD_STARTFILE_SPEC
+#define ENDFILE_FREEBSD_SPEC FBSD_ENDFILE_SPEC
+#define LIB_FREEBSD_SPEC FBSD_LIB_SPEC
+#define LINK_START_FREEBSD_SPEC ""
+
+#define LINK_OS_FREEBSD_SPEC "\
+ %{p:%nconsider using `-pg' instead of `-p' with gprof(1)} \
+ %{v:-V} \
+ %{assert*} %{R*} %{rpath*} %{defsym*} \
+ %{shared:-Bshareable %{h*} %{soname*}} \
+ %{!shared: \
+ %{!static: \
+ %{rdynamic: -export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker %(fbsd_dynamic_linker) }} \
+ %{static:-Bstatic}} \
+ %{symbolic:-Bsymbolic}"
+
+/* GNU/Linux support. */
+#define LIB_LINUX_SPEC "%{mnewlib: --start-group -llinux -lc --end-group } \
+%{!mnewlib: %{pthread:-lpthread} %{shared:-lc} \
+%{!shared: %{profile:-lc_p} %{!profile:-lc}}}"
+
+#ifdef HAVE_LD_PIE
+#define STARTFILE_LINUX_SPEC "\
+%{!shared: %{pg|p|profile:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}} \
+%{mnewlib:ecrti.o%s;:crti.o%s} \
+%{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+#else
+#define STARTFILE_LINUX_SPEC "\
+%{!shared: %{pg|p|profile:gcrt1.o%s;:crt1.o%s}} \
+%{mnewlib:ecrti.o%s;:crti.o%s} \
+%{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+#endif
+
+#define ENDFILE_LINUX_SPEC "\
+%{shared|pie:crtendS.o%s;:crtend.o%s} \
+%{mnewlib:ecrtn.o%s;:crtn.o%s}"
+
+#define LINK_START_LINUX_SPEC ""
+
+#define GLIBC_DYNAMIC_LINKER "/lib/ld.so.1"
+#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0"
+#if UCLIBC_DEFAULT
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:%{muclibc:%e-mglibc and -muclibc used together}" G ";:" U "}"
+#else
+#define CHOOSE_DYNAMIC_LINKER(G, U) "%{muclibc:%{mglibc:%e-mglibc and -muclibc used together}" U ";:" G "}"
+#endif
+#define LINUX_DYNAMIC_LINKER \
+ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER, UCLIBC_DYNAMIC_LINKER)
+
+#define LINK_OS_LINUX_SPEC "-m elf32ppclinux %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker " LINUX_DYNAMIC_LINKER "}}}"
+
+#if defined(HAVE_LD_EH_FRAME_HDR)
+# define LINK_EH_SPEC "%{!static:--eh-frame-hdr} "
+#endif
+
+#define CPP_OS_LINUX_SPEC "-D__unix__ -D__gnu_linux__ -D__linux__ \
+%{!undef: \
+ %{!ansi: \
+ %{!std=*:-Dunix -D__unix -Dlinux -D__linux} \
+ %{std=gnu*:-Dunix -D__unix -Dlinux -D__linux}}} \
+-Asystem=linux -Asystem=unix -Asystem=posix %{pthread:-D_REENTRANT}"
+
+/* GNU/Hurd support. */
+#define LIB_GNU_SPEC "%{mnewlib: --start-group -lgnu -lc --end-group } \
+%{!mnewlib: %{shared:-lc} %{!shared: %{pthread:-lpthread } \
+%{profile:-lc_p} %{!profile:-lc}}}"
+
+#define STARTFILE_GNU_SPEC "\
+%{!shared: %{!static: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}} \
+%{static: %{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}}} \
+%{mnewlib: ecrti.o%s} %{!mnewlib: crti.o%s} \
+%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+#define ENDFILE_GNU_SPEC "%{!shared:crtend.o%s} %{shared:crtendS.o%s} \
+%{mnewlib: ecrtn.o%s} %{!mnewlib: crtn.o%s}"
+
+#define LINK_START_GNU_SPEC ""
+
+#define LINK_OS_GNU_SPEC "-m elf32ppclinux %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}}}"
+
+#define CPP_OS_GNU_SPEC "-D__unix__ -D__gnu_hurd__ -D__GNU__ \
+%{!undef: \
+ %{!ansi: -Dunix -D__unix}} \
+-Asystem=gnu -Asystem=unix -Asystem=posix %{pthread:-D_REENTRANT}"
+
+/* NetBSD support. */
+#define LIB_NETBSD_SPEC "\
+%{profile:-lgmon -lc_p} %{!profile:-lc}"
+
+#define STARTFILE_NETBSD_SPEC "\
+ncrti.o%s crt0.o%s \
+%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+#define ENDFILE_NETBSD_SPEC "\
+%{!shared:crtend.o%s} %{shared:crtendS.o%s} \
+ncrtn.o%s"
+
+#define LINK_START_NETBSD_SPEC "\
+"
+
+#define LINK_OS_NETBSD_SPEC "\
+%{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld.elf_so}}}"
+
+#define CPP_OS_NETBSD_SPEC "\
+-D__powerpc__ -D__NetBSD__ -D__KPRINTF_ATTRIBUTE__"
+
+/* OpenBSD support. */
+#ifndef LIB_OPENBSD_SPEC
+#define LIB_OPENBSD_SPEC "%{!shared:%{pthread:-lpthread%{p:_p}%{!p:%{pg:_p}}}} %{!shared:-lc%{p:_p}%{!p:%{pg:_p}}}"
+#endif
+
+#ifndef STARTFILE_OPENBSD_SPEC
+#define STARTFILE_OPENBSD_SPEC "\
+%{!shared: %{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}}} \
+%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+#endif
+
+#ifndef ENDFILE_OPENBSD_SPEC
+#define ENDFILE_OPENBSD_SPEC "\
+%{!shared:crtend.o%s} %{shared:crtendS.o%s}"
+#endif
+
+#ifndef LINK_START_OPENBSD_SPEC
+#define LINK_START_OPENBSD_SPEC "-Ttext 0x400074"
+#endif
+
+#ifndef LINK_OS_OPENBSD_SPEC
+#define LINK_OS_OPENBSD_SPEC ""
+#endif
+
+#ifndef CPP_OS_OPENBSD_SPEC
+#define CPP_OS_OPENBSD_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_POSIX_THREADS}"
+#endif
+
+/* Define any extra SPECS that the compiler needs to generate. */
+/* Override rs6000.h definition. */
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "crtsavres_default", CRTSAVRES_DEFAULT_SPEC }, \
+ { "lib_ads", LIB_ADS_SPEC }, \
+ { "lib_yellowknife", LIB_YELLOWKNIFE_SPEC }, \
+ { "lib_mvme", LIB_MVME_SPEC }, \
+ { "lib_sim", LIB_SIM_SPEC }, \
+ { "lib_freebsd", LIB_FREEBSD_SPEC }, \
+ { "lib_gnu", LIB_GNU_SPEC }, \
+ { "lib_linux", LIB_LINUX_SPEC }, \
+ { "lib_netbsd", LIB_NETBSD_SPEC }, \
+ { "lib_openbsd", LIB_OPENBSD_SPEC }, \
+ { "lib_default", LIB_DEFAULT_SPEC }, \
+ { "startfile_ads", STARTFILE_ADS_SPEC }, \
+ { "startfile_yellowknife", STARTFILE_YELLOWKNIFE_SPEC }, \
+ { "startfile_mvme", STARTFILE_MVME_SPEC }, \
+ { "startfile_sim", STARTFILE_SIM_SPEC }, \
+ { "startfile_freebsd", STARTFILE_FREEBSD_SPEC }, \
+ { "startfile_gnu", STARTFILE_GNU_SPEC }, \
+ { "startfile_linux", STARTFILE_LINUX_SPEC }, \
+ { "startfile_netbsd", STARTFILE_NETBSD_SPEC }, \
+ { "startfile_openbsd", STARTFILE_OPENBSD_SPEC }, \
+ { "startfile_default", STARTFILE_DEFAULT_SPEC }, \
+ { "endfile_ads", ENDFILE_ADS_SPEC }, \
+ { "endfile_yellowknife", ENDFILE_YELLOWKNIFE_SPEC }, \
+ { "endfile_mvme", ENDFILE_MVME_SPEC }, \
+ { "endfile_sim", ENDFILE_SIM_SPEC }, \
+ { "endfile_freebsd", ENDFILE_FREEBSD_SPEC }, \
+ { "endfile_gnu", ENDFILE_GNU_SPEC }, \
+ { "endfile_linux", ENDFILE_LINUX_SPEC }, \
+ { "endfile_netbsd", ENDFILE_NETBSD_SPEC }, \
+ { "endfile_openbsd", ENDFILE_OPENBSD_SPEC }, \
+ { "endfile_default", ENDFILE_DEFAULT_SPEC }, \
+ { "link_path", LINK_PATH_SPEC }, \
+ { "link_shlib", LINK_SHLIB_SPEC }, \
+ { "link_target", LINK_TARGET_SPEC }, \
+ { "link_start", LINK_START_SPEC }, \
+ { "link_start_ads", LINK_START_ADS_SPEC }, \
+ { "link_start_yellowknife", LINK_START_YELLOWKNIFE_SPEC }, \
+ { "link_start_mvme", LINK_START_MVME_SPEC }, \
+ { "link_start_sim", LINK_START_SIM_SPEC }, \
+ { "link_start_freebsd", LINK_START_FREEBSD_SPEC }, \
+ { "link_start_gnu", LINK_START_GNU_SPEC }, \
+ { "link_start_linux", LINK_START_LINUX_SPEC }, \
+ { "link_start_netbsd", LINK_START_NETBSD_SPEC }, \
+ { "link_start_openbsd", LINK_START_OPENBSD_SPEC }, \
+ { "link_start_default", LINK_START_DEFAULT_SPEC }, \
+ { "link_os", LINK_OS_SPEC }, \
+ { "link_os_ads", LINK_OS_ADS_SPEC }, \
+ { "link_os_yellowknife", LINK_OS_YELLOWKNIFE_SPEC }, \
+ { "link_os_mvme", LINK_OS_MVME_SPEC }, \
+ { "link_os_sim", LINK_OS_SIM_SPEC }, \
+ { "link_os_freebsd", LINK_OS_FREEBSD_SPEC }, \
+ { "link_os_linux", LINK_OS_LINUX_SPEC }, \
+ { "link_os_gnu", LINK_OS_GNU_SPEC }, \
+ { "link_os_netbsd", LINK_OS_NETBSD_SPEC }, \
+ { "link_os_openbsd", LINK_OS_OPENBSD_SPEC }, \
+ { "link_os_default", LINK_OS_DEFAULT_SPEC }, \
+ { "cc1_endian_big", CC1_ENDIAN_BIG_SPEC }, \
+ { "cc1_endian_little", CC1_ENDIAN_LITTLE_SPEC }, \
+ { "cc1_endian_default", CC1_ENDIAN_DEFAULT_SPEC }, \
+ { "cc1_secure_plt_default", CC1_SECURE_PLT_DEFAULT_SPEC }, \
+ { "cpp_os_ads", CPP_OS_ADS_SPEC }, \
+ { "cpp_os_yellowknife", CPP_OS_YELLOWKNIFE_SPEC }, \
+ { "cpp_os_mvme", CPP_OS_MVME_SPEC }, \
+ { "cpp_os_sim", CPP_OS_SIM_SPEC }, \
+ { "cpp_os_freebsd", CPP_OS_FREEBSD_SPEC }, \
+ { "cpp_os_gnu", CPP_OS_GNU_SPEC }, \
+ { "cpp_os_linux", CPP_OS_LINUX_SPEC }, \
+ { "cpp_os_netbsd", CPP_OS_NETBSD_SPEC }, \
+ { "cpp_os_openbsd", CPP_OS_OPENBSD_SPEC }, \
+ { "cpp_os_default", CPP_OS_DEFAULT_SPEC }, \
+ { "fbsd_dynamic_linker", FBSD_DYNAMIC_LINKER }, \
+ SUBSUBTARGET_EXTRA_SPECS
+
+#define SUBSUBTARGET_EXTRA_SPECS
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#define MULTILIB_DEFAULTS { "mbig", "mcall-sysv" }
+
+/* Define this macro if the code for function profiling should come
+ before the function prologue. Normally, the profiling code comes
+ after. */
+#define PROFILE_BEFORE_PROLOGUE 1
+
+/* Function name to call to do profiling. */
+#define RS6000_MCOUNT "_mcount"
+
+/* Define this macro (to a value of 1) if you want to support the
+ Win32 style pragmas #pragma pack(push,<n>)' and #pragma
+ pack(pop)'. The pack(push,<n>) pragma specifies the maximum
+ alignment (in bytes) of fields within a structure, in much the
+ same way as the __aligned__' and __packed__' __attribute__'s
+ do. A pack value of zero resets the behavior to the default.
+ Successive invocations of this pragma cause the previous values to
+ be stacked, so that invocations of #pragma pack(pop)' will return
+ to the previous value. */
+
+#define HANDLE_PRAGMA_PACK_PUSH_POP 1
+
+/* Select a format to encode pointers in exception handling data. CODE
+ is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
+ true if the symbol may be affected by dynamic relocations. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+ ((flag_pic || TARGET_RELOCATABLE) \
+ ? (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4) \
+ : DW_EH_PE_absptr)
+
+#define DOUBLE_INT_ASM_OP "\t.quad\t"
+
+/* Generate entries in .fixup for relocatable addresses. */
+#define RELOCATABLE_NEEDS_FIXUP 1
+
+/* This target uses the sysv4.opt file. */
+#define TARGET_USES_SYSV4_OPT 1
diff --git a/gcc-4.4.3/gcc/config/rs6000/sysv4.opt b/gcc-4.4.3/gcc/config/rs6000/sysv4.opt
new file mode 100644
index 000000000..5de03b34a
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/sysv4.opt
@@ -0,0 +1,144 @@
+; SYSV4 options for PPC port.
+;
+; Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc.
+; Contributed by Aldy Hernandez <aldy@quesejoda.com>.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT
+; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+; License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mcall-
+Target RejectNegative Joined
+Select ABI calling convention
+
+msdata=
+Target RejectNegative Joined
+Select method for sdata handling
+
+mtls-size=
+Target RejectNegative Joined
+Specify bit size of immediate TLS offsets
+
+mbit-align
+Target Report Mask(NO_BITFIELD_TYPE)
+Align to the base type of the bit-field
+
+mstrict-align
+Target Report Mask(STRICT_ALIGN)
+Align to the base type of the bit-field
+Don't assume that unaligned accesses are handled by the system
+
+mrelocatable
+Target Report Mask(RELOCATABLE)
+Produce code relocatable at runtime
+
+mrelocatable-lib
+Target
+Produce code relocatable at runtime
+
+mlittle-endian
+Target Report RejectNegative Mask(LITTLE_ENDIAN)
+Produce little endian code
+
+mlittle
+Target Report RejectNegative Mask(LITTLE_ENDIAN) MaskExists
+Produce little endian code
+
+mbig-endian
+Target Report RejectNegative InverseMask(LITTLE_ENDIAN)
+Produce big endian code
+
+mbig
+Target Report RejectNegative InverseMask(LITTLE_ENDIAN)
+Produce big endian code
+
+;; FIXME: This does nothing. What should be done?
+mno-toc
+Target RejectNegative
+no description yet
+
+mtoc
+Target RejectNegative
+no description yet
+
+mprototype
+Target Var(TARGET_PROTOTYPE)
+Assume all variable arg functions are prototyped
+
+;; FIXME: Does nothing.
+mno-traceback
+Target RejectNegative
+no description yet
+
+meabi
+Target Report Mask(EABI)
+Use EABI
+
+mbit-word
+Target Report Mask(NO_BITFIELD_WORD)
+Allow bit-fields to cross word boundaries
+
+mregnames
+Target Mask(REGNAMES)
+Use alternate register names
+
+;; FIXME: Does nothing.
+msdata
+Target
+no description yet
+
+msim
+Target RejectNegative
+Link with libsim.a, libc.a and sim-crt0.o
+
+mads
+Target RejectNegative
+Link with libads.a, libc.a and crt0.o
+
+myellowknife
+Target RejectNegative
+Link with libyk.a, libc.a and crt0.o
+
+mmvme
+Target RejectNegative
+Link with libmvme.a, libc.a and crt0.o
+
+memb
+Target RejectNegative
+Set the PPC_EMB bit in the ELF flags header
+
+mshlib
+Target RejectNegative
+no description yet
+
+m64
+Target Report RejectNegative Negative(m32) Mask(64BIT)
+Generate 64-bit code
+
+m32
+Target Report RejectNegative Negative(m64) InverseMask(64BIT)
+Generate 32-bit code
+
+mnewlib
+Target RejectNegative
+no description yet
+
+msecure-plt
+Target Report RejectNegative Var(secure_plt, 1)
+Generate code to use a non-exec PLT and GOT
+
+mbss-plt
+Target Report RejectNegative Var(secure_plt, 0)
+Generate code for old exec BSS PLT
diff --git a/gcc-4.4.3/gcc/config/rs6000/sysv4le.h b/gcc-4.4.3/gcc/config/rs6000/sysv4le.h
new file mode 100644
index 000000000..155977766
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/sysv4le.h
@@ -0,0 +1,36 @@
+/* Target definitions for GCC for a little endian PowerPC
+ running System V.4
+ Copyright (C) 1995, 2000, 2003, 2007 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_LITTLE_ENDIAN)
+
+#undef CC1_ENDIAN_DEFAULT_SPEC
+#define CC1_ENDIAN_DEFAULT_SPEC "%(cc1_endian_little)"
+
+#undef LINK_TARGET_SPEC
+#define LINK_TARGET_SPEC "\
+%{mbig: --oformat elf32-powerpc } %{mbig-endian: --oformat elf32-powerpc } \
+%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \
+ %{mcall-linux: --oformat elf32-powerpc} \
+ }}}}"
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mlittle", "mcall-sysv" }
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-aix43 b/gcc-4.4.3/gcc/config/rs6000/t-aix43
new file mode 100644
index 000000000..6a73cdd03
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-aix43
@@ -0,0 +1,76 @@
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# Build the libraries for pthread and all of the
+# different processor models
+
+MULTILIB_OPTIONS = pthread \
+ mcpu=common/mcpu=power/mcpu=powerpc/maix64
+
+MULTILIB_DIRNAMES = pthread \
+ common power powerpc ppc64
+
+MULTILIB_MATCHES = mcpu?power=mcpu?power \
+ mcpu?power=mcpu?power2 \
+ mcpu?powerpc=mcpu?power3 \
+ mcpu?powerpc=mcpu?power4 \
+ mcpu?powerpc=mcpu?powerpc \
+ mcpu?power=mcpu?rios1 \
+ mcpu?power=mcpu?rios2 \
+ mcpu?power=mcpu?rsc \
+ mcpu?power=mcpu?rsc1 \
+ mcpu?powerpc=mcpu?rs64a \
+ mcpu?powerpc=mcpu?601 \
+ mcpu?powerpc=mcpu?602 \
+ mcpu?powerpc=mcpu?603 \
+ mcpu?powerpc=mcpu?603e \
+ mcpu?powerpc=mcpu?604 \
+ mcpu?powerpc=mcpu?604e \
+ mcpu?powerpc=mcpu?620 \
+ mcpu?powerpc=mcpu?630
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Build a shared libgcc library.
+SHLIB_EXT = .a
+SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
+ -Wl,-bE:@shlib_map_file@ -o @multilib_dir@/shr.o \
+ @multilib_flags@ @shlib_objs@ -lc \
+ `case @multilib_dir@ in \
+ *pthread*) echo -L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a ;; \
+ *) echo -lc ;; esac` ; \
+ rm -f @multilib_dir@/tmp-@shlib_base_name@.a ; \
+ $(AR_CREATE_FOR_TARGET) @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/shr.o ; \
+ mv @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/@shlib_base_name@.a ; \
+ rm -f @multilib_dir@/shr.o
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@; \
+ $(INSTALL_DATA) @multilib_dir@/@shlib_base_name@.a \
+ $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@/
+SHLIB_LIBS = -lc `case @multilib_dir@ in *pthread*) echo -lpthread ;; esac`
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver $(srcdir)/config/rs6000/libgcc-ppc64.ver
+SHLIB_NM_FLAGS = -Bpg -X32_64
+
+# GCC 128-bit long double support routines.
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/ppc64-fp.c \
+ $(srcdir)/config/rs6000/darwin-ldouble.c
+TARGET_LIBGCC2_CFLAGS = -mlong-double-128
+
+# Either 32-bit and 64-bit objects in archives.
+AR_FLAGS_FOR_TARGET = -X32_64
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-aix52 b/gcc-4.4.3/gcc/config/rs6000/t-aix52
new file mode 100644
index 000000000..81ab90ed0
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-aix52
@@ -0,0 +1,56 @@
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# Build the libraries for pthread and all of the
+# different processor models
+
+MULTILIB_OPTIONS = pthread maix64
+
+MULTILIB_DIRNAMES = pthread ppc64
+
+MULTILIB_MATCHES =
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Build a shared libgcc library.
+SHLIB_EXT = .a
+SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
+ -Wl,-bE:@shlib_map_file@ -o @multilib_dir@/shr.o \
+ @multilib_flags@ @shlib_objs@ -lc \
+ `case @multilib_dir@ in \
+ *pthread*) echo -L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a ;; \
+ *) echo -lc ;; esac` ; \
+ rm -f @multilib_dir@/tmp-@shlib_base_name@.a ; \
+ $(AR_CREATE_FOR_TARGET) @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/shr.o ; \
+ mv @multilib_dir@/tmp-@shlib_base_name@.a \
+ @multilib_dir@/@shlib_base_name@.a ; \
+ rm -f @multilib_dir@/shr.o
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIB_INSTALL = \
+ $$(mkinstalldirs) $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@; \
+ $(INSTALL_DATA) @multilib_dir@/@shlib_base_name@.a \
+ $$(DESTDIR)$$(slibdir)@shlib_slibdir_qual@/
+SHLIB_LIBS = -lc `case @multilib_dir@ in *pthread*) echo -lpthread ;; esac`
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver $(srcdir)/config/rs6000/libgcc-ppc64.ver
+SHLIB_NM_FLAGS = -Bpg -X32_64
+
+# GCC 128-bit long double support routines.
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/ppc64-fp.c \
+ $(srcdir)/config/rs6000/darwin-ldouble.c
+TARGET_LIBGCC2_CFLAGS = -mlong-double-128
+
+# Either 32-bit and 64-bit objects in archives.
+AR_FLAGS_FOR_TARGET = -X32_64
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-darwin b/gcc-4.4.3/gcc/config/rs6000/t-darwin
new file mode 100644
index 000000000..0a31bd76d
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-darwin
@@ -0,0 +1,36 @@
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-tramp.asm \
+ $(srcdir)/config/rs6000/ppc64-fp.c \
+ $(srcdir)/config/darwin-64.c \
+ $(srcdir)/config/rs6000/darwin-ldouble.c \
+ $(srcdir)/config/rs6000/darwin-world.asm
+
+LIB2FUNCS_STATIC_EXTRA = \
+ $(srcdir)/config/rs6000/darwin-fpsave.asm \
+ $(srcdir)/config/rs6000/darwin-vecsave.asm
+
+DARWIN_EXTRA_CRT_BUILD_CFLAGS = -mlongcall -mmacosx-version-min=10.4
+
+# The .asm files above are designed to run on all processors,
+# even though they use AltiVec instructions. -Wa is used because
+# -force_cpusubtype_ALL doesn't work with -dynamiclib.
+#
+# -pipe because there's an assembler bug, 4077127, which causes
+# it to not properly process the first # directive, causing temporary
+# file names to appear in stabs, causing the bootstrap to fail. Using -pipe
+# works around this by not having any temporary file names.
+TARGET_LIBGCC2_CFLAGS = -Wa,-force_cpusubtype_ALL -pipe -mmacosx-version-min=10.4
+
+# Export the _xlq* symbols from darwin-ldouble.c.
+SHLIB_MAPFILES += $(srcdir)/config/rs6000/libgcc-ppc64.ver
+
+LIB2ADDEH += $(srcdir)/config/rs6000/darwin-fallback.c
+
+darwin-fpsave.o: $(srcdir)/config/rs6000/darwin-asm.h
+darwin-tramp.o: $(srcdir)/config/rs6000/darwin-asm.h
+
+# Explain how to build crt2.o
+$(T)crt2$(objext): $(srcdir)/config/darwin-crt2.c $(GCC_PASSES) \
+ $(TCONFIG_H) stmp-int-hdrs tsystem.h
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) \
+ $(DARWIN_EXTRA_CRT_BUILD_CFLAGS) \
+ -c $(srcdir)/config/darwin-crt2.c -o $(T)crt2$(objext)
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-darwin8 b/gcc-4.4.3/gcc/config/rs6000/t-darwin8
new file mode 100644
index 000000000..2f3bb32f8
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-darwin8
@@ -0,0 +1,3 @@
+# 64-bit libraries can only be built in Darwin 8.x or later.
+MULTILIB_OPTIONS = m64
+MULTILIB_DIRNAMES = ppc64
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-fprules b/gcc-4.4.3/gcc/config/rs6000/t-fprules
new file mode 100644
index 000000000..9d1f936c4
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-fprules
@@ -0,0 +1,19 @@
+MULTILIB_MATCHES_FLOAT = msoft-float=mcpu?401 \
+ msoft-float=mcpu?403 \
+ msoft-float=mcpu?405 \
+ msoft-float=mcpu?440 \
+ msoft-float=mcpu?464 \
+ msoft-float=mcpu?ec603e \
+ msoft-float=mcpu?801 \
+ msoft-float=mcpu?821 \
+ msoft-float=mcpu?823 \
+ msoft-float=mcpu?860
+
+# Build the libraries for both hard and soft floating point by default
+
+MULTILIB_OPTIONS = msoft-float
+MULTILIB_DIRNAMES = soft-float
+MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT}
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-fprules-fpbit b/gcc-4.4.3/gcc/config/rs6000/t-fprules-fpbit
new file mode 100644
index 000000000..a80c1cf4e
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-fprules-fpbit
@@ -0,0 +1,11 @@
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-fprules-softfp b/gcc-4.4.3/gcc/config/rs6000/t-fprules-softfp
new file mode 100644
index 000000000..10b271f03
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-fprules-softfp
@@ -0,0 +1,6 @@
+softfp_float_modes := sf df
+softfp_int_modes := si di
+softfp_extensions := sfdf
+softfp_truncations := dfsf
+softfp_machine_header := rs6000/sfp-machine.h
+softfp_exclude_libgcc2 := y
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-linux64 b/gcc-4.4.3/gcc/config/rs6000/t-linux64
new file mode 100644
index 000000000..2a2e7b0cd
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-linux64
@@ -0,0 +1,26 @@
+
+#rs6000/t-linux64
+
+LIB2FUNCS_EXTRA += tramp.S $(srcdir)/config/rs6000/ppc64-fp.c \
+ $(srcdir)/config/rs6000/darwin-ldouble.c
+LIB2FUNCS_EXTRA := $(sort $(LIB2FUNCS_EXTRA))
+
+TARGET_LIBGCC2_CFLAGS += -mno-minimal-toc
+
+# On Debian, Ubuntu and other derivative distributions, the 32bit libraries
+# are found in /lib32 and /usr/lib32, /lib64 and /usr/lib64 are symlinks to
+# /lib and /usr/lib, while other distributions install libraries into /lib64
+# and /usr/lib64. The LSB does not enforce the use of /lib64 and /usr/lib64,
+# it doesn't tell anything about the 32bit libraries on those systems. Set
+# MULTILIB_OSDIRNAMES according to what is found on the target.
+
+MULTILIB_OPTIONS = m64/m32 msoft-float
+MULTILIB_DIRNAMES = 64 32 nof
+MULTILIB_EXTRA_OPTS = fPIC mstrict-align
+MULTILIB_EXCEPTIONS = m64/msoft-float
+MULTILIB_EXCLUSIONS = m64/!m32/msoft-float
+MULTILIB_OSDIRNAMES = ../lib64 $(if $(wildcard $(shell echo $(SYSTEM_HEADER_DIR))/../../usr/lib32),../lib32,../lib) nof
+MULTILIB_MATCHES = $(MULTILIB_MATCHES_FLOAT)
+
+softfp_wrap_start := '\#ifndef __powerpc64__'
+softfp_wrap_end := '\#endif'
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-lynx b/gcc-4.4.3/gcc/config/rs6000/t-lynx
new file mode 100644
index 000000000..429f641ec
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-lynx
@@ -0,0 +1,38 @@
+LIB2FUNCS_EXTRA = tramp.S
+
+tramp.S: $(srcdir)/config/rs6000/tramp.asm
+ cat $(srcdir)/config/rs6000/tramp.asm > tramp.S
+
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+MULTILIB_OPTIONS += msoft-float
+MULTILIB_DIRNAMES += soft-float
+
+MULTILIB_OPTIONS += maltivec
+MULTILIB_DIRNAMES += altivec
+
+MULTILIB_EXCEPTIONS = *msoft-float/*maltivec*
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o
+
+# If .sdata is enabled __CTOR_{LIST,END}__ go into .sdata instead of
+# .ctors.
+CRTSTUFF_T_CFLAGS = -mno-sdata
+
+# Compile crtbeginS.o and crtendS.o with pic.
+CRTSTUFF_T_CFLAGS_S = -fPIC -mno-sdata
+
+Local Variables:
+mode: makefile
+End:
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-netbsd b/gcc-4.4.3/gcc/config/rs6000/t-netbsd
new file mode 100644
index 000000000..135748dae
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-netbsd
@@ -0,0 +1,72 @@
+# Support for NetBSD PowerPC ELF targets (SVR4 ABI).
+
+LIB2FUNCS_EXTRA = tramp.S
+
+LIB2FUNCS_STATIC_EXTRA = crtsavfpr.S crtresfpr.S \
+ crtsavgpr.S crtresgpr.S \
+ crtresxfpr.S crtresxgpr.S
+
+tramp.S: $(srcdir)/config/rs6000/tramp.asm
+ cat $(srcdir)/config/rs6000/tramp.asm > tramp.S
+
+crtsavfpr.S: $(srcdir)/config/rs6000/crtsavfpr.asm
+ cat $(srcdir)/config/rs6000/crtsavfpr.asm >crtsavfpr.S
+
+crtresfpr.S: $(srcdir)/config/rs6000/crtresfpr.asm
+ cat $(srcdir)/config/rs6000/crtresfpr.asm >crtresfpr.S
+
+crtsavgpr.S: $(srcdir)/config/rs6000/crtsavgpr.asm
+ cat $(srcdir)/config/rs6000/crtsavgpr.asm >crtsavgpr.S
+
+crtresgpr.S: $(srcdir)/config/rs6000/crtresgpr.asm
+ cat $(srcdir)/config/rs6000/crtresgpr.asm >crtresgpr.S
+
+crtresxfpr.S: $(srcdir)/config/rs6000/crtresxfpr.asm
+ cat $(srcdir)/config/rs6000/crtresxfpr.asm >crtresxfpr.S
+
+crtresxgpr.S: $(srcdir)/config/rs6000/crtresxgpr.asm
+ cat $(srcdir)/config/rs6000/crtresxgpr.asm >crtresxgpr.S
+
+# It is important that crtbegin.o, etc., aren't surprised by stuff in .sdata.
+CRTSTUFF_T_CFLAGS += -msdata=none
+CRTSTUFF_T_CFLAGS_S += -msdata=none
+
+# Switch synonyms
+MULTILIB_MATCHES_FLOAT = msoft-float=mcpu?401 \
+ msoft-float=mcpu?403 \
+ msoft-float=mcpu?405 \
+ msoft-float=mcpu?ec603e \
+ msoft-float=mcpu?801 \
+ msoft-float=mcpu?821 \
+ msoft-float=mcpu?823 \
+ msoft-float=mcpu?860
+
+MULTILIB_OPTIONS = msoft-float
+MULTILIB_DIRNAMES = soft-float
+MULTILIB_EXTRA_OPTS = fPIC mstrict-align
+MULTILIB_EXCEPTIONS =
+
+MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT}
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+EXTRA_MULTILIB_PARTS = crtbegin$(objext) crtend$(objext) \
+ crtbeginS$(objext) crtendS$(objext) crtbeginT$(objext)
+
+$(T)crtsavfpr$(objext): crtsavfpr.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavfpr.S -o $(T)crtsavfpr$(objext)
+
+$(T)crtresfpr$(objext): crtresfpr.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresfpr.S -o $(T)crtresfpr$(objext)
+
+$(T)crtsavgpr$(objext): crtsavgpr.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtsavgpr.S -o $(T)crtsavgpr$(objext)
+
+$(T)crtresgpr$(objext): crtresgpr.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresgpr.S -o $(T)crtresgpr$(objext)
+
+$(T)crtresxfpr$(objext): crtresxfpr.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresxfpr.S -o $(T)crtresxfpr$(objext)
+
+$(T)crtresxgpr$(objext): crtresxgpr.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c crtresxgpr.S -o $(T)crtresxgpr$(objext)
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-newas b/gcc-4.4.3/gcc/config/rs6000/t-newas
new file mode 100644
index 000000000..eed66bf5f
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-newas
@@ -0,0 +1,37 @@
+# Build the libraries for both hard and soft floating point and all of the
+# different processor models
+
+MULTILIB_OPTIONS = msoft-float \
+ mcpu=common/mcpu=power/mcpu=powerpc
+
+MULTILIB_DIRNAMES = soft-float \
+ common power powerpc
+
+MULTILIB_MATCHES = $(MULTILIB_MATCHES_FLOAT) \
+ mcpu?power=mpower \
+ mcpu?power=mrios1 \
+ mcpu?power=mcpu?rios1 \
+ mcpu?power=mcpu?rsc \
+ mcpu?power=mcpu?rsc1 \
+ mcpu?power=mpower2 \
+ mcpu?power=mrios2 \
+ mcpu?power=mcpu=rios2 \
+ mcpu?powerpc=mcpu?601 \
+ mcpu?powerpc=mcpu?602 \
+ mcpu?powerpc=mcpu?603 \
+ mcpu?powerpc=mcpu?603e \
+ mcpu?powerpc=mcpu?604 \
+ mcpu?powerpc=mcpu?620 \
+ mcpu?powerpc=mcpu?403 \
+ mcpu?powerpc=mpowerpc \
+ mcpu?powerpc=mpowerpc-gpopt \
+ mcpu?powerpc=mpowerpc-gfxopt
+
+# GCC 128-bit long double support routines.
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-ldouble.c
+
+# Aix 3.2.x needs milli.exp for -mcpu=common
+EXTRA_PARTS = milli.exp
+$(T)milli.exp: $(srcdir)/config/rs6000/milli.exp
+ rm -f $(T)milli.exp
+ cp $(srcdir)/config/rs6000/milli.exp $(T)milli.exp
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-ppccomm b/gcc-4.4.3/gcc/config/rs6000/t-ppccomm
new file mode 100644
index 000000000..364557122
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-ppccomm
@@ -0,0 +1,56 @@
+# Common support for PowerPC ELF targets (both EABI and SVR4).
+
+LIB2FUNCS_EXTRA += tramp.S $(srcdir)/config/rs6000/darwin-ldouble.c
+
+# These can't end up in shared libgcc
+LIB2FUNCS_STATIC_EXTRA = eabi.S
+
+eabi.S: $(srcdir)/config/rs6000/eabi.asm
+ cat $(srcdir)/config/rs6000/eabi.asm > eabi.S
+
+tramp.S: $(srcdir)/config/rs6000/tramp.asm
+ cat $(srcdir)/config/rs6000/tramp.asm > tramp.S
+
+# Switch synonyms
+MULTILIB_MATCHES_ENDIAN = mlittle=mlittle-endian mbig=mbig-endian
+MULTILIB_MATCHES_SYSV = mcall-sysv=mcall-sysv-eabi mcall-sysv=mcall-sysv-noeabi mcall-sysv=mcall-linux mcall-sysv=mcall-netbsd
+
+EXTRA_MULTILIB_PARTS = crtbegin$(objext) crtend$(objext) \
+ crtbeginS$(objext) crtendS$(objext) crtbeginT$(objext) \
+ ecrti$(objext) ecrtn$(objext) \
+ ncrti$(objext) ncrtn$(objext)
+
+# We build {e,n}crti.o and {e,n}crtn.o, which serve to add begin and
+# end labels to all of the special sections used when we link using gcc.
+
+# Assemble startup files.
+ecrti.S: $(srcdir)/config/rs6000/eabi-ci.asm
+ cat $(srcdir)/config/rs6000/eabi-ci.asm >ecrti.S
+
+ecrtn.S: $(srcdir)/config/rs6000/eabi-cn.asm
+ cat $(srcdir)/config/rs6000/eabi-cn.asm >ecrtn.S
+
+ncrti.S: $(srcdir)/config/rs6000/sol-ci.asm
+ cat $(srcdir)/config/rs6000/sol-ci.asm >ncrti.S
+
+ncrtn.S: $(srcdir)/config/rs6000/sol-cn.asm
+ cat $(srcdir)/config/rs6000/sol-cn.asm >ncrtn.S
+
+# Build multiple copies of ?crt{i,n}.o, one for each target switch.
+$(T)ecrti$(objext): ecrti.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ecrti.S -o $(T)ecrti$(objext)
+
+$(T)ecrtn$(objext): ecrtn.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ecrtn.S -o $(T)ecrtn$(objext)
+
+$(T)ncrti$(objext): ncrti.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ncrti.S -o $(T)ncrti$(objext)
+
+$(T)ncrtn$(objext): ncrtn.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ncrtn.S -o $(T)ncrtn$(objext)
+
+# It is important that crtbegin.o, etc., aren't surprised by stuff in .sdata.
+CRTSTUFF_T_CFLAGS = -msdata=none
+# Make sure crt*.o are built with -fPIC even if configured with
+# --enable-shared --disable-multilib
+CRTSTUFF_T_CFLAGS_S = -fPIC -msdata=none
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-ppcendian b/gcc-4.4.3/gcc/config/rs6000/t-ppcendian
new file mode 100644
index 000000000..b6252ee0c
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-ppcendian
@@ -0,0 +1,12 @@
+# Multilibs for powerpc embedded ELF targets with altivec.
+
+MULTILIB_OPTIONS = msoft-float \
+ mlittle/mbig
+
+MULTILIB_DIRNAMES = nof \
+ le be
+
+
+MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT} \
+ ${MULTILIB_MATCHES_ENDIAN} \
+ ${MULTILIB_MATCHES_SYSV}
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-ppcgas b/gcc-4.4.3/gcc/config/rs6000/t-ppcgas
new file mode 100644
index 000000000..120aef446
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-ppcgas
@@ -0,0 +1,14 @@
+# Multilibs for powerpc embedded ELF targets.
+
+MULTILIB_OPTIONS = msoft-float \
+ mlittle/mbig \
+ fleading-underscore
+
+MULTILIB_DIRNAMES = nof \
+ le be \
+ und
+
+MULTILIB_EXTRA_OPTS = mrelocatable-lib mno-eabi mstrict-align
+
+MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT} \
+ ${MULTILIB_MATCHES_ENDIAN}
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-ppcos b/gcc-4.4.3/gcc/config/rs6000/t-ppcos
new file mode 100644
index 000000000..819863bea
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-ppcos
@@ -0,0 +1,8 @@
+# Multilibs for a powerpc hosted ELF target (linux, SVR4)
+
+MULTILIB_OPTIONS = msoft-float
+MULTILIB_DIRNAMES = nof
+MULTILIB_EXTRA_OPTS = fPIC mstrict-align
+MULTILIB_EXCEPTIONS =
+
+MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT}
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-rs6000 b/gcc-4.4.3/gcc/config/rs6000/t-rs6000
new file mode 100644
index 000000000..30f021e80
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-rs6000
@@ -0,0 +1,18 @@
+# General rules that all rs6000/ targets must have.
+
+rs6000.o: $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \
+ $(RTL_H) $(REGS_H) hard-reg-set.h \
+ real.h insn-config.h conditions.h insn-attr.h flags.h $(RECOG_H) \
+ $(OBSTACK_H) $(TREE_H) $(EXPR_H) $(OPTABS_H) except.h function.h \
+ output.h $(BASIC_BLOCK_H) $(INTEGRATE_H) toplev.h $(GGC_H) $(HASHTAB_H) \
+ $(TM_P_H) $(TARGET_H) $(TARGET_DEF_H) langhooks.h reload.h gt-rs6000.h \
+ cfglayout.h
+
+rs6000-c.o: $(srcdir)/config/rs6000/rs6000-c.c \
+ $(srcdir)/config/rs6000/rs6000-protos.h \
+ $(CONFIG_H) $(SYSTEM_H) $(TREE_H) $(CPPLIB_H) \
+ $(TM_P_H) c-pragma.h errors.h coretypes.h $(TM_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/rs6000/rs6000-c.c
+
+# The rs6000 backend doesn't cause warnings in these files.
+insn-conditions.o-warn =
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-rtems b/gcc-4.4.3/gcc/config/rs6000/t-rtems
new file mode 100644
index 000000000..c0fd8bf3b
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-rtems
@@ -0,0 +1,64 @@
+# Multilibs for powerpc RTEMS targets.
+
+MULTILIB_OPTIONS = \
+mcpu=403/mcpu=505/mcpu=601/mcpu=603e/mcpu=604/mcpu=860/mcpu=7400 \
+Dmpc8260 \
+msoft-float
+
+MULTILIB_DIRNAMES = \
+m403 m505 m601 m603e m604 m860 m7400 \
+mpc8260 \
+nof
+
+# MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT}
+MULTILIB_MATCHES =
+MULTILIB_MATCHES += ${MULTILIB_MATCHES_ENDIAN}
+MULTILIB_MATCHES += ${MULTILIB_MATCHES_SYSV}
+# Map 405 to 403
+MULTILIB_MATCHES += mcpu?403=mcpu?405
+# Map 602, 603e, 603 to 603e
+MULTILIB_MATCHES += mcpu?603e=mcpu?602
+MULTILIB_MATCHES += mcpu?603e=mcpu?603
+# Map 801, 821, 823 to 860
+MULTILIB_MATCHES += mcpu?860=mcpu?801
+MULTILIB_MATCHES += mcpu?860=mcpu?821
+MULTILIB_MATCHES += mcpu?860=mcpu?823
+# Map 7450 to 7400
+MULTILIB_MATCHES += mcpu?7400=mcpu?7450
+
+# Map 750 to .
+MULTILIB_MATCHES += mcpu?750=
+
+# Soft-float only, default implies msoft-float
+# NOTE: Must match with MULTILIB_MATCHES_FLOAT and MULTILIB_MATCHES
+MULTILIB_SOFTFLOAT_ONLY = \
+*mcpu=401/*msoft-float* \
+*mcpu=403/*msoft-float* \
+*mcpu=405/*msoft-float* \
+*mcpu=801/*msoft-float* \
+*mcpu=821/*msoft-float* \
+*mcpu=823/*msoft-float* \
+*mcpu=860/*msoft-float*
+
+# Hard-float only, take out msoft-float
+MULTILIB_HARDFLOAT_ONLY = \
+*mcpu=505/*msoft-float*
+
+MULTILIB_EXCEPTIONS =
+
+# Disallow -Dppc and -Dmpc without other options
+MULTILIB_EXCEPTIONS += Dppc* Dmpc*
+
+MULTILIB_EXCEPTIONS += \
+${MULTILIB_SOFTFLOAT_ONLY} \
+${MULTILIB_HARDFLOAT_ONLY}
+
+# Special rules
+# Take out all variants we don't want
+MULTILIB_EXCEPTIONS += *mcpu=403/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=505/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=601/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=604/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=750/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=860/Dmpc*
+MULTILIB_EXCEPTIONS += *mcpu=7400/Dmpc*
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-spe b/gcc-4.4.3/gcc/config/rs6000/t-spe
new file mode 100644
index 000000000..bd0b79593
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-spe
@@ -0,0 +1,68 @@
+# Multilibs for e500
+
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# What we really want are these variants:
+# -mcpu=7400
+# -mcpu=7400 -maltivec -mabi=altivec
+# -mcpu=7400 -msoft-float
+# -msoft-float
+# -mspe=no -mabi=no-spe -misel=no
+# so we'll need to create exceptions later below.
+
+MULTILIB_OPTIONS = mcpu=7400 \
+ maltivec \
+ mabi=altivec \
+ msoft-float \
+ mspe=no \
+ mabi=no-spe \
+ misel=no \
+ mlittle
+
+MULTILIB_DIRNAMES = mpc7400 altivec abi-altivec \
+ nof no-spe no-abi-spe no-isel le
+
+MULTILIB_EXCEPTIONS = maltivec mabi=altivec mspe=no mabi=no-spe misel=no \
+ maltivec/mabi=altivec \
+ mcpu=7400/maltivec \
+ mcpu=7400/mabi=altivec \
+ *mcpu=7400/*mspe=no* \
+ *mcpu=7400/*mabi=no-spe* \
+ *mcpu=7400/*misel=no* \
+ *maltivec/*msoft-float* \
+ *maltivec/*mspe=no* \
+ *maltivec/*mabi=no-spe* \
+ *maltivec/*misel=no* \
+ *mabi=altivec/*msoft-float* \
+ *mabi=altivec/*mspe=no* \
+ *mabi=altivec/*mabi=no-spe* \
+ *mabi=altivec/*misel=no* \
+ *msoft-float/*mspe=no* \
+ *msoft-float/*mabi=no-spe* \
+ *msoft-float/*misel=no* \
+ mspe=no/mabi=no-spe \
+ mspe=no/misel=no \
+ mabi=no-spe/misel=no \
+ misel=no/mlittle \
+ mabi=no-spe/misel=no/mlittle \
+ mspe=no/mlittle \
+ mabi=spe/mlittle \
+ mcpu=7400/mabi=altivec/mlittle \
+ mcpu=7400/maltivec/mlittle \
+ mabi=no-spe/mlittle \
+ mspe=no/misel=no/mlittle \
+ mspe=no/mabi=no-spe/mlittle \
+ mabi=altivec/mlittle \
+ maltivec/mlittle \
+ maltivec/mabi=altivec/mlittle
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-vxworks b/gcc-4.4.3/gcc/config/rs6000/t-vxworks
new file mode 100644
index 000000000..f042b772c
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-vxworks
@@ -0,0 +1,16 @@
+# Multilibs for VxWorks.
+
+# The base multilib is -mhard-float.
+MULTILIB_OPTIONS = mrtp fPIC msoft-float
+MULTILIB_DIRNAMES =
+MULTILIB_MATCHES = fPIC=fpic
+MULTILIB_EXCEPTIONS = fPIC*
+
+# This is set from the common config/t-vxworks but clobbered by t-ppccomm
+# on this target.
+EXTRA_MULTILIB_PARTS =
+
+# Similarily, LIB2FUNCS_EXTRA is set from config/t-vxworks and
+# t-ppccomm *adds* to it, but the common contents are useful to us.
+# In particular the base trampoline_setup bits are expected to be
+# provided there.
diff --git a/gcc-4.4.3/gcc/config/rs6000/t-vxworksae b/gcc-4.4.3/gcc/config/rs6000/t-vxworksae
new file mode 100644
index 000000000..5f682627e
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/t-vxworksae
@@ -0,0 +1,5 @@
+# Multilibs for VxWorks AE.
+
+MULTILIB_OPTIONS = mvthreads msoft-float
+MULTILIB_MATCHES =
+MULTILIB_EXCEPTIONS =
diff --git a/gcc-4.4.3/gcc/config/rs6000/tramp.asm b/gcc-4.4.3/gcc/config/rs6000/tramp.asm
new file mode 100644
index 000000000..133b98840
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/tramp.asm
@@ -0,0 +1,107 @@
+/* Special support for trampolines
+ *
+ * Copyright (C) 1996, 1997, 2000, 2007, 2008, 2009 Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 3, or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * Under Section 7 of GPL version 3, you are granted additional
+ * permissions described in the GCC Runtime Library Exception, version
+ * 3.1, as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License and
+ * a copy of the GCC Runtime Library Exception along with this program;
+ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ * <http://www.gnu.org/licenses/>.
+ */
+
+/* Set up trampolines. */
+
+ .section ".text"
+#include "ppc-asm.h"
+#include "config.h"
+
+#ifndef __powerpc64__
+ .type trampoline_initial,@object
+ .align 2
+trampoline_initial:
+ mflr r0
+ bcl 20,31,1f
+.Lfunc = .-trampoline_initial
+ .long 0 /* will be replaced with function address */
+.Lchain = .-trampoline_initial
+ .long 0 /* will be replaced with static chain */
+1: mflr r11
+ mtlr r0
+ lwz r0,0(r11) /* function address */
+ lwz r11,4(r11) /* static chain */
+ mtctr r0
+ bctr
+
+trampoline_size = .-trampoline_initial
+ .size trampoline_initial,trampoline_size
+
+
+/* R3 = stack address to store trampoline */
+/* R4 = length of trampoline area */
+/* R5 = function address */
+/* R6 = static chain */
+
+FUNC_START(__trampoline_setup)
+ mflr r0 /* save return address */
+ bcl 20,31,.LCF0 /* load up __trampoline_initial into r7 */
+.LCF0:
+ mflr r11
+ addi r7,r11,trampoline_initial-4-.LCF0 /* trampoline address -4 */
+
+ li r8,trampoline_size /* verify that the trampoline is big enough */
+ cmpw cr1,r8,r4
+ srwi r4,r4,2 /* # words to move */
+ addi r9,r3,-4 /* adjust pointer for lwzu */
+ mtctr r4
+ blt cr1,.Labort
+
+ mtlr r0
+
+ /* Copy the instructions to the stack */
+.Lmove:
+ lwzu r10,4(r7)
+ stwu r10,4(r9)
+ bdnz .Lmove
+
+ /* Store correct function and static chain */
+ stw r5,.Lfunc(r3)
+ stw r6,.Lchain(r3)
+
+ /* Now flush both caches */
+ mtctr r4
+.Lcache:
+ icbi 0,r3
+ dcbf 0,r3
+ addi r3,r3,4
+ bdnz .Lcache
+
+ /* Finally synchronize things & return */
+ sync
+ isync
+ blr
+
+.Labort:
+#if (defined __PIC__ || defined __pic__) && defined HAVE_AS_REL16
+ bcl 20,31,1f
+1: mflr r30
+ addis r30,r30,_GLOBAL_OFFSET_TABLE_-1b@ha
+ addi r30,r30,_GLOBAL_OFFSET_TABLE_-1b@l
+#endif
+ bl JUMP_TARGET(abort)
+FUNC_END(__trampoline_setup)
+
+#endif
diff --git a/gcc-4.4.3/gcc/config/rs6000/vec_types.h b/gcc-4.4.3/gcc/config/rs6000/vec_types.h
new file mode 100644
index 000000000..dca637d7c
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/vec_types.h
@@ -0,0 +1,52 @@
+/* Cell single token vector types
+ Copyright (C) 2007, 2009 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3 of the License, or (at your option)
+ any later version.
+
+ This file is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Single token vector data types for the PowerPC SIMD/Vector Multi-media
+ eXtension */
+
+#ifndef _VEC_TYPES_H_
+#define _VEC_TYPES_H_ 1
+
+#define qword __vector unsigned char
+
+#define vec_uchar16 __vector unsigned char
+#define vec_char16 __vector signed char
+#define vec_bchar16 __vector bool char
+
+#define vec_ushort8 __vector unsigned short
+#define vec_short8 __vector signed short
+#define vec_bshort8 __vector bool short
+
+#define vec_pixel8 __vector pixel
+
+#define vec_uint4 __vector unsigned int
+#define vec_int4 __vector signed int
+#define vec_bint4 __vector bool int
+
+#define vec_float4 __vector float
+
+#define vec_ullong2 __vector bool char
+#define vec_llong2 __vector bool short
+
+#define vec_double2 __vector bool int
+
+#endif /* _VEC_TYPES_H_ */
diff --git a/gcc-4.4.3/gcc/config/rs6000/vxworks.h b/gcc-4.4.3/gcc/config/rs6000/vxworks.h
new file mode 100644
index 000000000..cfd11eb6d
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/vxworks.h
@@ -0,0 +1,139 @@
+/* Definitions of target machine for GNU compiler. Vxworks PowerPC version.
+ Copyright (C) 1996, 2000, 2002, 2003, 2004, 2005, 2007
+ Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Note to future editors: VxWorks is mostly an EABI target. We do
+ not use rs6000/eabi.h because we would have to override most of
+ it anyway. However, if you change that file, consider making
+ analogous changes here too. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC VxWorks)");
+
+/* CPP predefined macros. */
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__ppc"); \
+ builtin_define ("__EABI__"); \
+ builtin_define ("__ELF__"); \
+ builtin_define ("__vxworks"); \
+ builtin_define ("__VXWORKS__"); \
+ if (!TARGET_SOFT_FLOAT) \
+ builtin_define ("__hardfp"); \
+ \
+ /* C89 namespace violation! */ \
+ builtin_define ("CPU_FAMILY=PPC"); \
+ } \
+ while (0)
+
+/* Only big endian PPC is supported by VxWorks. */
+#undef BYTES_BIG_ENDIAN
+#define BYTES_BIG_ENDIAN 1
+
+/* We have to kill off the entire specs set created by rs6000/sysv4.h
+ and substitute our own set. The top level vxworks.h has done some
+ of this for us. */
+
+#undef SUBTARGET_EXTRA_SPECS
+#undef CPP_SPEC
+#undef CC1_SPEC
+#undef ASM_SPEC
+
+#define SUBTARGET_EXTRA_SPECS /* none needed */
+
+/* FIXME: The only reason we allow no -mcpu switch at all is because
+ config-ml.in insists on a "." multilib. */
+#define CPP_SPEC \
+"%{!DCPU=*: \
+ %{mcpu=403 : -DCPU=PPC403 ; \
+ mcpu=405 : -DCPU=PPC405 ; \
+ mcpu=440 : -DCPU=PPC440 ; \
+ mcpu=603 : -DCPU=PPC603 ; \
+ mcpu=604 : -DCPU=PPC604 ; \
+ mcpu=860 : -DCPU=PPC860 ; \
+ mcpu=8540: -DCPU=PPC85XX ; \
+ : -DCPU=PPC604 }}" \
+VXWORKS_ADDITIONAL_CPP_SPEC
+
+#define CC1_SPEC \
+"%{G*} %{mno-sdata:-msdata=none} %{msdata:-msdata=default} \
+ %{mlittle|mlittle-endian:-mstrict-align} \
+ %{profile: -p} \
+ %{fvec:-maltivec} %{fvec-eabi:-maltivec -mabi=altivec}"
+
+#define ASM_SPEC \
+"%(asm_cpu) \
+ %{,assembler|,assembler-with-cpp: %{mregnames} %{mno-regnames}} \
+ %{v:-v} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*} \
+ %{mrelocatable} %{mrelocatable-lib} %{fpic:-K PIC} %{fPIC:-K PIC} -mbig"
+
+#undef LIB_SPEC
+#define LIB_SPEC VXWORKS_LIB_SPEC
+#undef LINK_SPEC
+#define LINK_SPEC VXWORKS_LINK_SPEC
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC VXWORKS_STARTFILE_SPEC
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC VXWORKS_ENDFILE_SPEC
+
+/* There is no default multilib. */
+#undef MULTILIB_DEFAULTS
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI | MASK_STRICT_ALIGN)
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC604
+
+/* Nor sdata, for kernel mode. We use this in
+ SUBSUBTARGET_INITIALIZE_OPTIONS, after rs6000_rtp has been initialized. */
+#undef SDATA_DEFAULT_SIZE
+#define SDATA_DEFAULT_SIZE (TARGET_VXWORKS_RTP ? 8 : 0)
+
+#undef STACK_BOUNDARY
+#define STACK_BOUNDARY (16*BITS_PER_UNIT)
+/* Override sysv4.h, reset to the default. */
+#undef PREFERRED_STACK_BOUNDARY
+
+/* Make -mcpu=8540 imply SPE. ISEL is automatically enabled, the
+ others must be done by hand. Handle -mrtp. Disable -fPIC
+ for -mrtp - the VxWorks PIC model is not compatible with it. */
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS \
+ do { \
+ if (TARGET_E500) \
+ { \
+ rs6000_spe = 1; \
+ rs6000_spe_abi = 1; \
+ rs6000_float_gprs = 1; \
+ } \
+ \
+ if (!g_switch_set) \
+ g_switch_value = SDATA_DEFAULT_SIZE; \
+ VXWORKS_OVERRIDE_OPTIONS; \
+ } while (0)
+
+/* No _mcount profiling on VxWorks. */
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE,LABELNO) VXWORKS_FUNCTION_PROFILER(FILE,LABELNO)
diff --git a/gcc-4.4.3/gcc/config/rs6000/vxworksae.h b/gcc-4.4.3/gcc/config/rs6000/vxworksae.h
new file mode 100644
index 000000000..dd95bb1e4
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/vxworksae.h
@@ -0,0 +1,23 @@
+/* PowerPC VxWorks AE target definitions for GNU compiler.
+ Copyright (C) 2005, 2007 Free Software Foundation, Inc.
+ Contributed by CodeSourcery, LLC.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC VxWorks AE)");
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/x-aix b/gcc-4.4.3/gcc/config/rs6000/x-aix
new file mode 100644
index 000000000..11ccb932d
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/x-aix
@@ -0,0 +1,6 @@
+# genautomata requires more than 256MB of data
+build/genautomata : override LDFLAGS += -Wl,-bmaxdata:0x20000000
+
+# jc1 requires more than 256MB of data
+jc1 : override LDFLAGS += -Wl,-bmaxdata:0x20000000
+
diff --git a/gcc-4.4.3/gcc/config/rs6000/x-darwin b/gcc-4.4.3/gcc/config/rs6000/x-darwin
new file mode 100644
index 000000000..033ab6bf5
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/x-darwin
@@ -0,0 +1,4 @@
+host-ppc-darwin.o : $(srcdir)/config/rs6000/host-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) toplev.h \
+ config/host-darwin.h $(DIAGNOSTIC_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -o $@
diff --git a/gcc-4.4.3/gcc/config/rs6000/x-darwin64 b/gcc-4.4.3/gcc/config/rs6000/x-darwin64
new file mode 100644
index 000000000..3cb423db3
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/x-darwin64
@@ -0,0 +1,4 @@
+host-ppc64-darwin.o : $(srcdir)/config/rs6000/host-ppc64-darwin.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h hosthooks.h $(HOSTHOOKS_DEF_H) toplev.h \
+ config/host-darwin.h $(DIAGNOSTIC_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $< -o $@
diff --git a/gcc-4.4.3/gcc/config/rs6000/x-linux-relax b/gcc-4.4.3/gcc/config/rs6000/x-linux-relax
new file mode 100644
index 000000000..2743a94e4
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/x-linux-relax
@@ -0,0 +1,2 @@
+# At -O0 cc1 etc. are too large and -Wl,--relax is needed
+$(COMPILERS) : override LDFLAGS += -Wl,--relax
diff --git a/gcc-4.4.3/gcc/config/rs6000/x-rs6000 b/gcc-4.4.3/gcc/config/rs6000/x-rs6000
new file mode 100644
index 000000000..8246c3776
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/x-rs6000
@@ -0,0 +1,3 @@
+driver-rs6000.o : $(srcdir)/config/rs6000/driver-rs6000.c \
+ $(CONFIG_H) $(SYSTEM_H) $(TM_H) coretypes.h
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
diff --git a/gcc-4.4.3/gcc/config/rs6000/xcoff.h b/gcc-4.4.3/gcc/config/rs6000/xcoff.h
new file mode 100644
index 000000000..76bbf4044
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/xcoff.h
@@ -0,0 +1,334 @@
+/* Definitions of target machine for GNU compiler,
+ for some generic XCOFF file format
+ Copyright (C) 2001, 2002, 2003, 2004, 2007, 2008
+ Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#define TARGET_OBJECT_FORMAT OBJECT_XCOFF
+
+/* The RS/6000 uses the XCOFF format. */
+#define XCOFF_DEBUGGING_INFO 1
+
+/* Define if the object format being used is COFF or a superset. */
+#define OBJECT_FORMAT_COFF
+
+/* Define the magic numbers that we recognize as COFF.
+
+ AIX 4.3 adds U803XTOCMAGIC (0757) for 64-bit objects and AIX V5 adds
+ U64_TOCMAGIC (0767), but collect2.c does not include files in the
+ correct order to conditionally define the symbolic name in this macro.
+
+ The AIX linker accepts import/export files as object files,
+ so accept "#!" (0x2321) magic number. */
+#define MY_ISCOFF(magic) \
+ ((magic) == U802WRMAGIC || (magic) == U802ROMAGIC \
+ || (magic) == U802TOCMAGIC || (magic) == 0757 || (magic) == 0767 \
+ || (magic) == 0x2321)
+
+/* We don't have GAS for the RS/6000 yet, so don't write out special
+ .stabs in cc1plus. */
+
+#define FASCIST_ASSEMBLER
+
+/* We define this to prevent the name mangler from putting dollar signs into
+ function names. */
+
+#define NO_DOLLAR_IN_LABEL
+
+/* We define this to 0 so that gcc will never accept a dollar sign in a
+ variable name. This is needed because the AIX assembler will not accept
+ dollar signs. */
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+/* AIX .align pseudo-op accept value from 0 to 12, corresponding to
+ log base 2 of the alignment in bytes; 12 = 4096 bytes = 32768 bits. */
+
+#define MAX_OFILE_ALIGNMENT 32768
+
+/* Default alignment factor for csect directives, chosen to honor
+ BIGGEST_ALIGNMENT. */
+#define XCOFF_CSECT_DEFAULT_ALIGNMENT_STR "4"
+
+/* Return nonzero if this entry is to be written into the constant
+ pool in a special way. We do so if this is a SYMBOL_REF, LABEL_REF
+ or a CONST containing one of them. If -mfp-in-toc (the default),
+ we also do this for floating-point constants. We actually can only
+ do this if the FP formats of the target and host machines are the
+ same, but we can't check that since not every file that uses
+ GO_IF_LEGITIMATE_ADDRESS_P includes real.h. We also do this when
+ we can write the entry into the TOC and the entry is not larger
+ than a TOC entry. */
+
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) \
+ (TARGET_TOC \
+ && (GET_CODE (X) == SYMBOL_REF \
+ || (GET_CODE (X) == CONST && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF) \
+ || GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST_INT \
+ && GET_MODE_BITSIZE (MODE) <= GET_MODE_BITSIZE (Pmode)) \
+ || (GET_CODE (X) == CONST_DOUBLE \
+ && (TARGET_MINIMAL_TOC \
+ || (SCALAR_FLOAT_MODE_P (GET_MODE (X)) \
+ && ! TARGET_NO_FP_IN_TOC)))))
+
+#define TARGET_ASM_OUTPUT_ANCHOR rs6000_xcoff_asm_output_anchor
+#define TARGET_ASM_GLOBALIZE_LABEL rs6000_xcoff_asm_globalize_label
+#define TARGET_ASM_INIT_SECTIONS rs6000_xcoff_asm_init_sections
+#define TARGET_ASM_RELOC_RW_MASK rs6000_xcoff_reloc_rw_mask
+#define TARGET_ASM_NAMED_SECTION rs6000_xcoff_asm_named_section
+#define TARGET_ASM_SELECT_SECTION rs6000_xcoff_select_section
+#define TARGET_ASM_SELECT_RTX_SECTION rs6000_xcoff_select_rtx_section
+#define TARGET_ASM_UNIQUE_SECTION rs6000_xcoff_unique_section
+#define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
+#define TARGET_STRIP_NAME_ENCODING rs6000_xcoff_strip_name_encoding
+#define TARGET_SECTION_TYPE_FLAGS rs6000_xcoff_section_type_flags
+
+/* FP save and restore routines. */
+#define SAVE_FP_PREFIX "._savef"
+#define SAVE_FP_SUFFIX ""
+#define RESTORE_FP_PREFIX "._restf"
+#define RESTORE_FP_SUFFIX ""
+
+/* Function name to call to do profiling. */
+#undef RS6000_MCOUNT
+#define RS6000_MCOUNT ".__mcount"
+
+/* This outputs NAME to FILE up to the first null or '['. */
+
+#define RS6000_OUTPUT_BASENAME(FILE, NAME) \
+ assemble_name ((FILE), (*targetm.strip_name_encoding) (NAME))
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { RS6000_OUTPUT_BASENAME (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP "\t.globl "
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START rs6000_xcoff_file_start
+#define TARGET_ASM_FILE_END rs6000_xcoff_file_end
+#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+#define TARGET_ASM_FILE_START_FILE_DIRECTIVE false
+
+/* This macro produces the initial definition of a function name.
+ On the RS/6000, we need to place an extra '.' in the function name and
+ output the function descriptor.
+ Dollar signs are converted to underscores.
+
+ The csect for the function will have already been created when
+ text_section was selected. We do have to go back to that csect, however.
+
+ The third and fourth parameters to the .function pseudo-op (16 and 044)
+ are placeholders which no longer have any use. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE,NAME,DECL) \
+{ char *buffer = (char *) alloca (strlen (NAME) + 1); \
+ char *p; \
+ int dollar_inside = 0; \
+ strcpy (buffer, NAME); \
+ p = strchr (buffer, '$'); \
+ while (p) { \
+ *p = '_'; \
+ dollar_inside++; \
+ p = strchr (p + 1, '$'); \
+ } \
+ if (TREE_PUBLIC (DECL)) \
+ { \
+ if (!RS6000_WEAK || !DECL_WEAK (decl)) \
+ { \
+ if (dollar_inside) { \
+ fprintf(FILE, "\t.rename .%s,\".%s\"\n", buffer, NAME); \
+ fprintf(FILE, "\t.rename %s,\"%s\"\n", buffer, NAME); \
+ } \
+ fputs ("\t.globl .", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, buffer); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ else \
+ { \
+ if (dollar_inside) { \
+ fprintf(FILE, "\t.rename .%s,\".%s\"\n", buffer, NAME); \
+ fprintf(FILE, "\t.rename %s,\"%s\"\n", buffer, NAME); \
+ } \
+ fputs ("\t.lglobl .", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, buffer); \
+ putc ('\n', FILE); \
+ } \
+ fputs ("\t.csect ", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, buffer); \
+ fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, buffer); \
+ fputs (":\n", FILE); \
+ fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, buffer); \
+ fputs (", TOC[tc0], 0\n", FILE); \
+ in_section = NULL; \
+ switch_to_section (function_section (DECL)); \
+ putc ('.', FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, buffer); \
+ fputs (":\n", FILE); \
+ if (write_symbols != NO_DEBUG) \
+ xcoffout_declare_function (FILE, DECL, buffer); \
+}
+
+/* Output a reference to SYM on FILE. */
+
+#define ASM_OUTPUT_SYMBOL_REF(FILE, SYM) \
+ rs6000_output_symbol_ref (FILE, SYM)
+
+/* This says how to output an external.
+ Dollar signs are converted to underscores. */
+
+#undef ASM_OUTPUT_EXTERNAL
+#define ASM_OUTPUT_EXTERNAL(FILE, DECL, NAME) \
+{ char *buffer = (char *) alloca (strlen (NAME) + 1); \
+ char *p; \
+ rtx _symref = XEXP (DECL_RTL (DECL), 0); \
+ int dollar_inside = 0; \
+ strcpy (buffer, NAME); \
+ p = strchr (buffer, '$'); \
+ while (p) { \
+ *p = '_'; \
+ dollar_inside++; \
+ p = strchr (p + 1, '$'); \
+ } \
+ if (dollar_inside) { \
+ fputs ("\t.extern .", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, buffer); \
+ putc ('\n', FILE); \
+ fprintf(FILE, "\t.rename .%s,\".%s\"\n", buffer, NAME); \
+ } \
+ if ((TREE_CODE (DECL) == VAR_DECL \
+ || TREE_CODE (DECL) == FUNCTION_DECL) \
+ && (NAME)[strlen (NAME) - 1] != ']') \
+ { \
+ XSTR (_symref, 0) = concat (XSTR (_symref, 0), \
+ (TREE_CODE (DECL) == FUNCTION_DECL \
+ ? "[DS]" : "[RW]"), \
+ NULL); \
+ } \
+}
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+ asm_fprintf ((FILE), "%U%s", rs6000_xcoff_strip_dollar (NAME));
+
+/* This is how to output an internal label prefix. rs6000.c uses this
+ when generating traceback tables. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL_PREFIX(FILE,PREFIX) \
+ fprintf (FILE, "%s..", PREFIX)
+
+/* This is how to output a label for a jump table. Arguments are the same as
+ for (*targetm.asm_out.internal_label), except the insn for the jump table is
+ passed. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLEINSN) \
+{ ASM_OUTPUT_ALIGN (FILE, 2); (*targetm.asm_out.internal_label) (FILE, PREFIX, NUM); }
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s..%u", rs6000_xcoff_strip_dollar (PREFIX), (unsigned) (NUM))
+
+/* This is how to output an assembler line to define N characters starting
+ at P to FILE. */
+
+#define ASM_OUTPUT_ASCII(FILE, P, N) output_ascii ((FILE), (P), (N))
+
+/* This is how to advance the location counter by SIZE bytes. */
+
+#define SKIP_ASM_OP "\t.space "
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "%s"HOST_WIDE_INT_PRINT_UNSIGNED"\n", SKIP_ASM_OP, (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define COMMON_ASM_OP "\t.comm "
+
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+ do { fputs (COMMON_ASM_OP, (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ if ((ALIGN) > 32) \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n", (SIZE), \
+ exact_log2 ((ALIGN) / BITS_PER_UNIT)); \
+ else if ((SIZE) > 4) \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",3\n", (SIZE)); \
+ else \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED"\n", (SIZE)); \
+ } while (0)
+
+/* This says how to output an assembler line
+ to define a local common symbol.
+ Alignment cannot be specified, but we can try to maintain
+ alignment after preceding TOC section if it was aligned
+ for 64-bit mode. */
+
+#define LOCAL_COMMON_ASM_OP "\t.lcomm "
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+ do { fputs (LOCAL_COMMON_ASM_OP, (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fprintf ((FILE), ","HOST_WIDE_INT_PRINT_UNSIGNED",%s\n", \
+ (TARGET_32BIT ? (SIZE) : (ROUNDED)), \
+ xcoff_bss_section_name); \
+ } while (0)
+
+/* This is how we tell the assembler that two symbols have the same value. */
+#define SET_ASM_OP "\t.set "
+
+/* This is how we tell the assembler to equate two values. */
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "%s", SET_ASM_OP); \
+ RS6000_OUTPUT_BASENAME (FILE, LABEL1); \
+ fprintf (FILE, ","); \
+ RS6000_OUTPUT_BASENAME (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* Used by rs6000_assemble_integer, among others. */
+#define DOUBLE_INT_ASM_OP "\t.llong\t"
+
+/* Output before instructions. */
+#define TEXT_SECTION_ASM_OP "\t.csect .text[PR]"
+
+/* Output before writable data. */
+#define DATA_SECTION_ASM_OP \
+ "\t.csect .data[RW]," XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
+
+
+/* Define to prevent DWARF2 unwind info in the data section rather
+ than in the .eh_frame section. We do this because the AIX linker
+ would otherwise garbage collect these sections. */
+#define EH_FRAME_IN_DATA_SECTION 1
diff --git a/gcc-4.4.3/gcc/config/rs6000/xfpu.h b/gcc-4.4.3/gcc/config/rs6000/xfpu.h
new file mode 100644
index 000000000..af6311636
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/xfpu.h
@@ -0,0 +1,26 @@
+/* Definitions for Xilinx PowerPC 405/440 APU.
+
+ Copyright (C) 2008 Free Software Foundation, Inc.
+ Contributed by Michael Eager (eager@eagercon.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+/* Undefine definitions from rs6000.h. */
+#undef TARGET_XILINX_FPU
+
+#define TARGET_XILINX_FPU (rs6000_xilinx_fpu)
diff --git a/gcc-4.4.3/gcc/config/rs6000/xfpu.md b/gcc-4.4.3/gcc/config/rs6000/xfpu.md
new file mode 100644
index 000000000..25c449a51
--- /dev/null
+++ b/gcc-4.4.3/gcc/config/rs6000/xfpu.md
@@ -0,0 +1,140 @@
+;; Scheduling description for the Xilinx PowerPC 405 APU Floating Point Unit.
+;; Copyright (C) 2008 Free Software Foundation, Inc.
+;; Contributed by Michael Eager (eager@eagercon.com).
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;;----------------------------------------------------
+;; Xilinx APU FPU Pipeline Description
+;;
+;; - attr 'type' and 'fp_type' should definitely
+;; be cleaned up at some point in the future.
+;; ddiv,sdiv,dmul,smul etc are quite confusing.
+;; Should use consistent fp* attrs. 'fp_type'
+;; should also go away, leaving us only with 'fp'
+;;
+;;----------------------------------------------------
+
+;; -------------------------------------------------------------------------
+;; Latencies
+;; Latest latency figures (all in FCB cycles). PowerPC to FPU frequency ratio
+;; assumed to be 1/2. (most common deployment)
+;; Add 2 PPC cycles for (register file access + wb) and 2 PPC cycles
+;; for issue (from PPC)
+;; SP DP
+;; Loads: 4 6
+;; Stores: 1 2 (from availability of data)
+;; Move/Abs/Neg: 1 1
+;; Add/Subtract: 5 7
+;; Multiply: 4 11
+;; Multiply-add: 10 19
+;; Convert (any): 4 6
+;; Divide/Sqrt: 27 56
+;; Compares: 1 2
+;;
+;; bypasses needed for forwarding capability of the FPU.
+;; Add this at some future time.
+;; -------------------------------------------------------------------------
+(define_automaton "Xfpu")
+(define_cpu_unit "Xfpu_issue,Xfpu_addsub,Xfpu_mul,Xfpu_div,Xfpu_sqrt" "Xfpu")
+
+
+(define_insn_reservation "fp-default" 2
+ (and (and
+ (eq_attr "type" "fp")
+ (eq_attr "fp_type" "fp_default"))
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2")
+
+(define_insn_reservation "fp-compare" 6
+ (and (eq_attr "type" "fpcompare") ;; Inconsistent naming
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_addsub")
+
+(define_insn_reservation "fp-addsub-s" 14
+ (and (and
+ (eq_attr "type" "fp")
+ (eq_attr "fp_type" "fp_addsub_s"))
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_addsub")
+
+(define_insn_reservation "fp-addsub-d" 18
+ (and (and
+ (eq_attr "type" "fp")
+ (eq_attr "fp_type" "fp_addsub_d"))
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_addsub")
+
+(define_insn_reservation "fp-mul-s" 12
+ (and (and
+ (eq_attr "type" "fp")
+ (eq_attr "fp_type" "fp_mul_s"))
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_mul")
+
+(define_insn_reservation "fp-mul-d" 16 ;; Actually 28. Long latencies are killing the automaton formation. Need to figure out why.
+ (and (and
+ (eq_attr "type" "fp")
+ (eq_attr "fp_type" "fp_mul_d"))
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_mul")
+
+(define_insn_reservation "fp-div-s" 24 ;; Actually 34
+ (and (eq_attr "type" "sdiv") ;; Inconsistent attr naming
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_div*10") ;; Unpipelined
+
+(define_insn_reservation "fp-div-d" 34 ;; Actually 116
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc405")) ;; Inconsistent attr naming
+ "Xfpu_issue*2,Xfpu_div*10") ;; Unpipelined
+
+(define_insn_reservation "fp-maddsub-s" 24
+ (and (and
+ (eq_attr "type" "fp")
+ (eq_attr "fp_type" "fp_maddsub_s"))
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_mul,nothing*7,Xfpu_addsub")
+
+(define_insn_reservation "fp-maddsub-d" 34 ;; Actually 42
+ (and (and
+ (eq_attr "type" "dmul") ;; Inconsistent attr naming
+ (eq_attr "fp_type" "fp_maddsub_d"))
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_mul,nothing*7,Xfpu_addsub")
+
+(define_insn_reservation "fp-load" 10 ;; FIXME. Is double/single precision the same ?
+ (and (eq_attr "type" "fpload, fpload_ux, fpload_u")
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*10")
+
+(define_insn_reservation "fp-store" 4
+ (and (eq_attr "type" "fpstore, fpstore_ux, fpstore_u")
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*4")
+
+(define_insn_reservation "fp-sqrt-s" 24 ;; Actually 56
+ (and (eq_attr "type" "ssqrt")
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_sqrt*10") ;; Unpipelined
+
+
+(define_insn_reservation "fp-sqrt-d" 34 ;; Actually 116
+ (and (eq_attr "type" "dsqrt")
+ (eq_attr "cpu" "ppc405"))
+ "Xfpu_issue*2,Xfpu_sqrt*10") ;; Unpipelined
+