aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/libgcc/config/cris
diff options
context:
space:
mode:
Diffstat (limited to 'gcc-4.9/libgcc/config/cris')
-rw-r--r--gcc-4.9/libgcc/config/cris/arit.c313
-rw-r--r--gcc-4.9/libgcc/config/cris/libgcc-glibc.ver7
-rw-r--r--gcc-4.9/libgcc/config/cris/mulsi3.S269
-rw-r--r--gcc-4.9/libgcc/config/cris/sfp-machine.h81
-rw-r--r--gcc-4.9/libgcc/config/cris/t-cris10
-rw-r--r--gcc-4.9/libgcc/config/cris/t-elfmulti3
-rw-r--r--gcc-4.9/libgcc/config/cris/t-linux2
-rw-r--r--gcc-4.9/libgcc/config/cris/umulsidi3.S289
8 files changed, 974 insertions, 0 deletions
diff --git a/gcc-4.9/libgcc/config/cris/arit.c b/gcc-4.9/libgcc/config/cris/arit.c
new file mode 100644
index 000000000..6934f5ded
--- /dev/null
+++ b/gcc-4.9/libgcc/config/cris/arit.c
@@ -0,0 +1,313 @@
+/* Signed and unsigned multiplication and division and modulus for CRIS.
+ Contributed by Axis Communications.
+ Written by Hans-Peter Nilsson <hp@axis.se>, c:a 1992.
+
+ Copyright (C) 1998-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+
+/* Note that we provide prototypes for all "const" functions, to attach
+ the const attribute. This is necessary in 2.7.2 - adding the
+ attribute to the function *definition* is a syntax error.
+ This did not work with e.g. 2.1; back then, the return type had to
+ be "const". */
+
+#include "config.h"
+
+#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 3
+#define LZ(v) __builtin_clz (v)
+#endif
+
+/* In (at least) the 4.7 series, GCC doesn't automatically choose the
+ most optimal strategy, possibly related to insufficient modelling of
+ delay-slot costs. */
+#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10
+#define SIGNMULT(s, a) ((s) * (a)) /* Cheap multiplication, better than branch. */
+#else
+#define SIGNMULT(s, a) ((s) < 0 ? -(a) : (a)) /* Branches are still better. */
+#endif
+
+#if defined (L_udivsi3) || defined (L_divsi3) || defined (L_umodsi3) \
+ || defined (L_modsi3)
+/* Result type of divmod worker function. */
+struct quot_rem
+ {
+ long quot;
+ long rem;
+ };
+
+/* This is the worker function for div and mod. It is inlined into the
+ respective library function. Parameter A must have bit 31 == 0. */
+
+static __inline__ struct quot_rem
+do_31div (unsigned long a, unsigned long b)
+ __attribute__ ((__const__, __always_inline__));
+
+static __inline__ struct quot_rem
+do_31div (unsigned long a, unsigned long b)
+{
+ /* Adjust operands and result if a is 31 bits. */
+ long extra = 0;
+ int quot_digits = 0;
+
+ if (b == 0)
+ {
+ struct quot_rem ret;
+ ret.quot = 0xffffffff;
+ ret.rem = 0xffffffff;
+ return ret;
+ }
+
+ if (a < b)
+ return (struct quot_rem) { 0, a };
+
+#ifdef LZ
+ if (b <= a)
+ {
+ quot_digits = LZ (b) - LZ (a);
+ quot_digits += (a >= (b << quot_digits));
+ b <<= quot_digits;
+ }
+#else
+ while (b <= a)
+ {
+ b <<= 1;
+ quot_digits++;
+ }
+#endif
+
+ /* Is a 31 bits? Note that bit 31 is handled by the caller. */
+ if (a & 0x40000000)
+ {
+ /* Then make b:s highest bit max 0x40000000, because it must have
+ been 0x80000000 to be 1 bit higher than a. */
+ b >>= 1;
+
+ /* Adjust a to be maximum 0x3fffffff, i.e. two upper bits zero. */
+ if (a >= b)
+ {
+ a -= b;
+ extra = 1 << (quot_digits - 1);
+ }
+ else
+ {
+ a -= b >> 1;
+
+ /* Remember that we adjusted a by subtracting b * 2 ** Something. */
+ extra = 1 << quot_digits;
+ }
+
+ /* The number of quotient digits will be one less, because
+ we just adjusted b. */
+ quot_digits--;
+ }
+
+ /* Now do the division part. */
+
+ /* Subtract b and add ones to the right when a >= b
+ i.e. "a - (b - 1) == (a - b) + 1". */
+ b--;
+
+#define DS __asm__ ("dstep %2,%0" : "=r" (a) : "0" (a), "r" (b))
+
+ switch (quot_digits)
+ {
+ case 32: DS; case 31: DS; case 30: DS; case 29: DS;
+ case 28: DS; case 27: DS; case 26: DS; case 25: DS;
+ case 24: DS; case 23: DS; case 22: DS; case 21: DS;
+ case 20: DS; case 19: DS; case 18: DS; case 17: DS;
+ case 16: DS; case 15: DS; case 14: DS; case 13: DS;
+ case 12: DS; case 11: DS; case 10: DS; case 9: DS;
+ case 8: DS; case 7: DS; case 6: DS; case 5: DS;
+ case 4: DS; case 3: DS; case 2: DS; case 1: DS;
+ case 0:;
+ }
+
+ {
+ struct quot_rem ret;
+ ret.quot = (a & ((1 << quot_digits) - 1)) + extra;
+ ret.rem = a >> quot_digits;
+ return ret;
+ }
+}
+
+#ifdef L_udivsi3
+unsigned long
+__Udiv (unsigned long a, unsigned long b) __attribute__ ((__const__));
+
+unsigned long
+__Udiv (unsigned long a, unsigned long b)
+{
+ long extra = 0;
+
+ /* Adjust operands and result, if a and/or b is 32 bits. */
+ /* Effectively: b & 0x80000000. */
+ if ((long) b < 0)
+ return a >= b;
+
+ /* Effectively: a & 0x80000000. */
+ if ((long) a < 0)
+ {
+ int tmp = 0;
+
+ if (b == 0)
+ return 0xffffffff;
+#ifdef LZ
+ tmp = LZ (b);
+#else
+ for (tmp = 31; (((long) b & (1 << tmp)) == 0); tmp--)
+ ;
+
+ tmp = 31 - tmp;
+#endif
+
+ if ((b << tmp) > a)
+ {
+ extra = 1 << (tmp-1);
+ a -= b << (tmp - 1);
+ }
+ else
+ {
+ extra = 1 << tmp;
+ a -= b << tmp;
+ }
+ }
+
+ return do_31div (a, b).quot+extra;
+}
+#endif /* L_udivsi3 */
+
+#ifdef L_divsi3
+long
+__Div (long a, long b) __attribute__ ((__const__));
+
+long
+__Div (long a, long b)
+{
+ long extra = 0;
+ long sign = (b < 0) ? -1 : 1;
+ long res;
+
+ /* We need to handle a == -2147483648 as expected and must while
+ doing that avoid producing a sequence like "abs (a) < 0" as GCC
+ may optimize out the test. That sequence may not be obvious as
+ we call inline functions. Testing for a being negative and
+ handling (presumably much rarer than positive) enables us to get
+ a bit of optimization for an (accumulated) reduction of the
+ penalty of the 0x80000000 special-case. */
+ if (a < 0)
+ {
+ sign = -sign;
+
+ if ((a & 0x7fffffff) == 0)
+ {
+ /* We're at 0x80000000. Tread carefully. */
+ a -= SIGNMULT (sign, b);
+ extra = sign;
+ }
+ a = -a;
+ }
+
+ res = do_31div (a, __builtin_labs (b)).quot;
+ return SIGNMULT (sign, res) + extra;
+}
+#endif /* L_divsi3 */
+
+
+#ifdef L_umodsi3
+unsigned long
+__Umod (unsigned long a, unsigned long b) __attribute__ ((__const__));
+
+unsigned long
+__Umod (unsigned long a, unsigned long b)
+{
+ /* Adjust operands and result if a and/or b is 32 bits. */
+ if ((long) b < 0)
+ return a >= b ? a - b : a;
+
+ if ((long) a < 0)
+ {
+ int tmp = 0;
+
+ if (b == 0)
+ return a;
+#ifdef LZ
+ tmp = LZ (b);
+#else
+ for (tmp = 31; (((long) b & (1 << tmp)) == 0); tmp--)
+ ;
+ tmp = 31 - tmp;
+#endif
+
+ if ((b << tmp) > a)
+ {
+ a -= b << (tmp - 1);
+ }
+ else
+ {
+ a -= b << tmp;
+ }
+ }
+
+ return do_31div (a, b).rem;
+}
+#endif /* L_umodsi3 */
+
+#ifdef L_modsi3
+long
+__Mod (long a, long b) __attribute__ ((__const__));
+
+long
+__Mod (long a, long b)
+{
+ long sign = 1;
+ long res;
+
+ /* We need to handle a == -2147483648 as expected and must while
+ doing that avoid producing a sequence like "abs (a) < 0" as GCC
+ may optimize out the test. That sequence may not be obvious as
+ we call inline functions. Testing for a being negative and
+ handling (presumably much rarer than positive) enables us to get
+ a bit of optimization for an (accumulated) reduction of the
+ penalty of the 0x80000000 special-case. */
+ if (a < 0)
+ {
+ sign = -1;
+ if ((a & 0x7fffffff) == 0)
+ /* We're at 0x80000000. Tread carefully. */
+ a += __builtin_labs (b);
+ a = -a;
+ }
+
+ res = do_31div (a, __builtin_labs (b)).rem;
+ return SIGNMULT (sign, res);
+}
+#endif /* L_modsi3 */
+#endif /* L_udivsi3 || L_divsi3 || L_umodsi3 || L_modsi3 */
+
+/*
+ * Local variables:
+ * eval: (c-set-style "gnu")
+ * indent-tabs-mode: t
+ * End:
+ */
diff --git a/gcc-4.9/libgcc/config/cris/libgcc-glibc.ver b/gcc-4.9/libgcc/config/cris/libgcc-glibc.ver
new file mode 100644
index 000000000..e35de8310
--- /dev/null
+++ b/gcc-4.9/libgcc/config/cris/libgcc-glibc.ver
@@ -0,0 +1,7 @@
+GCC_4.3 {
+ __Mul
+ __Div
+ __Udiv
+ __Mod
+ __Umod
+}
diff --git a/gcc-4.9/libgcc/config/cris/mulsi3.S b/gcc-4.9/libgcc/config/cris/mulsi3.S
new file mode 100644
index 000000000..240895df0
--- /dev/null
+++ b/gcc-4.9/libgcc/config/cris/mulsi3.S
@@ -0,0 +1,269 @@
+;; Copyright (C) 2001-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3, or (at your option) any later
+;; version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+;; for more details.
+;;
+;; Under Section 7 of GPL version 3, you are granted additional
+;; permissions described in the GCC Runtime Library Exception, version
+;; 3.1, as published by the Free Software Foundation.
+;;
+;; You should have received a copy of the GNU General Public License and
+;; a copy of the GCC Runtime Library Exception along with this program;
+;; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; This code used to be expanded through interesting expansions in
+;; the machine description, compiled from this code:
+;;
+;; #ifdef L_mulsi3
+;; long __Mul (unsigned long a, unsigned long b) __attribute__ ((__const__));
+;;
+;; /* This must be compiled with the -mexpand-mul flag, to synthesize the
+;; multiplication from the mstep instructions. The check for
+;; smaller-size multiplication pays off in the order of .5-10%;
+;; estimated median 1%, depending on application.
+;; FIXME: It can be further optimized if we go to assembler code, as
+;; gcc 2.7.2 adds a few unnecessary instructions and does not put the
+;; basic blocks in optimal order. */
+;; long
+;; __Mul (unsigned long a, unsigned long b)
+;; {
+;; #if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10
+;; /* In case other code is compiled without -march=v10, they will
+;; contain calls to __Mul, regardless of flags at link-time. The
+;; "else"-code below will work, but is unnecessarily slow. This
+;; sometimes cuts a few minutes off from simulation time by just
+;; returning a "mulu.d". */
+;; return a * b;
+;; #else
+;; unsigned long min;
+;;
+;; /* Get minimum via the bound insn. */
+;; min = a < b ? a : b;
+;;
+;; /* Can we omit computation of the high part? */
+;; if (min > 65535)
+;; /* No. Perform full multiplication. */
+;; return a * b;
+;; else
+;; {
+;; /* Check if both operands are within 16 bits. */
+;; unsigned long max;
+;;
+;; /* Get maximum, by knowing the minimum.
+;; This will partition a and b into max and min.
+;; This is not currently something GCC understands,
+;; so do this trick by asm. */
+;; __asm__ ("xor %1,%0\n\txor %2,%0"
+;; : "=r" (max)
+;; : "r" (b), "r" (a), "0" (min));
+;;
+;; if (max > 65535)
+;; /* Make GCC understand that only the low part of "min" will be
+;; used. */
+;; return max * (unsigned short) min;
+;; else
+;; /* Only the low parts of both operands are necessary. */
+;; return ((unsigned short) max) * (unsigned short) min;
+;; }
+;; #endif /* not __CRIS_arch_version >= 10 */
+;; }
+;; #endif /* L_mulsi3 */
+;;
+;; That approach was abandoned since the caveats outweighted the
+;; benefits. The expand-multiplication machinery is also removed, so you
+;; can't do this anymore.
+;;
+;; For doubters of there being any benefits, some where: insensitivity to:
+;; - ABI changes (mostly for experimentation)
+;; - assembler syntax differences (mostly debug format).
+;; - insn scheduling issues.
+;; Most ABI experiments will presumably happen with arches with mul insns,
+;; so that argument doesn't really hold anymore, and it's unlikely there
+;; being new arch variants needing insn scheduling and not having mul
+;; insns.
+
+;; ELF and a.out have different syntax for local labels: the "wrong"
+;; one may not be omitted from the object.
+#undef L
+#ifdef __AOUT__
+# define L(x) x
+#else
+# define L(x) .x
+#endif
+
+ .global ___Mul
+ .type ___Mul,@function
+___Mul:
+#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10
+;; Can't have the mulu.d last on a cache-line (in the delay-slot of the
+;; "ret"), due to hardware bug. See documentation for -mmul-bug-workaround.
+;; Not worthwhile to conditionalize here.
+ .p2alignw 2,0x050f
+ mulu.d $r11,$r10
+ ret
+ nop
+#else
+;; See if we can avoid multiplying some of the parts, knowing
+;; they're zero.
+
+ move.d $r11,$r9
+ bound.d $r10,$r9
+ cmpu.w 65535,$r9
+ bls L(L3)
+ move.d $r10,$r12
+
+;; Nope, have to do all the parts of a 32-bit multiplication.
+;; See head comment in optabs.c:expand_doubleword_mult.
+
+ move.d $r10,$r13
+ movu.w $r11,$r9 ; ab*cd = (a*d + b*c)<<16 + b*d
+ lslq 16,$r13
+ mstep $r9,$r13 ; d*b
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ clear.w $r10
+ test.d $r10
+ mstep $r9,$r10 ; d*a
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ movu.w $r12,$r12
+ clear.w $r11
+ move.d $r11,$r9 ; Doubles as a "test.d" preparing for the mstep.
+ mstep $r12,$r9 ; b*c
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ mstep $r12,$r9
+ add.w $r9,$r10
+ lslq 16,$r10
+ ret
+ add.d $r13,$r10
+
+L(L3):
+;; Form the maximum in $r10, by knowing the minimum, $r9.
+;; (We don't know which one of $r10 or $r11 it is.)
+;; Check if the largest operand is still just 16 bits.
+
+ xor $r9,$r10
+ xor $r11,$r10
+ cmpu.w 65535,$r10
+ bls L(L5)
+ movu.w $r9,$r13
+
+;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but c==0
+;; so we only need (a*d)<<16 + b*d with d = $r13, ab = $r10.
+;; We drop the upper part of (a*d)<<16 as we're only doing a
+;; 32-bit-result multiplication.
+
+ move.d $r10,$r9
+ lslq 16,$r9
+ mstep $r13,$r9 ; b*d
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ clear.w $r10
+ test.d $r10
+ mstep $r13,$r10 ; a*d
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ lslq 16,$r10
+ ret
+ add.d $r9,$r10
+
+L(L5):
+;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but a and c==0
+;; so b*d (with b=$r13, a=$r10) it is.
+
+ lslq 16,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ ret
+ mstep $r13,$r10
+#endif
+L(Lfe1):
+ .size ___Mul,L(Lfe1)-___Mul
diff --git a/gcc-4.9/libgcc/config/cris/sfp-machine.h b/gcc-4.9/libgcc/config/cris/sfp-machine.h
new file mode 100644
index 000000000..abef282e0
--- /dev/null
+++ b/gcc-4.9/libgcc/config/cris/sfp-machine.h
@@ -0,0 +1,81 @@
+/* Soft-FP definitions for CRIS.
+ Copyright (C) 2013-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE long
+
+/* The type of the result of a floating point comparison. This must
+ match `__libgcc_cmp_return__' in GCC for the target. */
+typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
+#define CMPtype __gcc_CMPtype
+
+/* FIXME: none of the *MEAT* macros have actually been benchmarked to be
+ better than any other choice for any CRIS variant. */
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_QNANNEGATEDP 0
+#define _FP_KEEPNANFRACP 1
+
+/* Someone please check this. */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ else \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+/* Not checked. */
+#define _FP_TININESS_AFTER_ROUNDING 0
+
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+
+# define __BYTE_ORDER __LITTLE_ENDIAN
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
diff --git a/gcc-4.9/libgcc/config/cris/t-cris b/gcc-4.9/libgcc/config/cris/t-cris
new file mode 100644
index 000000000..b582974a4
--- /dev/null
+++ b/gcc-4.9/libgcc/config/cris/t-cris
@@ -0,0 +1,10 @@
+LIB2ADD = _udivsi3.c _divsi3.c _umodsi3.c _modsi3.c
+
+# The fixed-point arithmetic code is in one file, arit.c,
+# similar to libgcc2.c (or the old libgcc1.c). We need to
+# "split it up" with one file per define.
+$(LIB2ADD): $(srcdir)/config/cris/arit.c
+ name=`echo $@ | sed -e 's,.*/,,' | sed -e 's,.c$$,,'`; \
+ echo "#define L$$name" > tmp-$@ \
+ && echo '#include "$<"' >> tmp-$@ \
+ && mv -f tmp-$@ $@
diff --git a/gcc-4.9/libgcc/config/cris/t-elfmulti b/gcc-4.9/libgcc/config/cris/t-elfmulti
new file mode 100644
index 000000000..308ef5105
--- /dev/null
+++ b/gcc-4.9/libgcc/config/cris/t-elfmulti
@@ -0,0 +1,3 @@
+LIB2ADD_ST = $(srcdir)/config/cris/mulsi3.S $(srcdir)/config/cris/umulsidi3.S
+
+CRTSTUFF_T_CFLAGS = -moverride-best-lib-options
diff --git a/gcc-4.9/libgcc/config/cris/t-linux b/gcc-4.9/libgcc/config/cris/t-linux
new file mode 100644
index 000000000..8c7f4d442
--- /dev/null
+++ b/gcc-4.9/libgcc/config/cris/t-linux
@@ -0,0 +1,2 @@
+# Override t-linux default.
+SHLIB_MAPFILES = libgcc-std.ver $(srcdir)/config/cris/libgcc-glibc.ver
diff --git a/gcc-4.9/libgcc/config/cris/umulsidi3.S b/gcc-4.9/libgcc/config/cris/umulsidi3.S
new file mode 100644
index 000000000..b5f011cab
--- /dev/null
+++ b/gcc-4.9/libgcc/config/cris/umulsidi3.S
@@ -0,0 +1,289 @@
+;; Copyright (C) 2001-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify it under
+;; the terms of the GNU General Public License as published by the Free
+;; Software Foundation; either version 3, or (at your option) any later
+;; version.
+;;
+;; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+;; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+;; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+;; for more details.
+;;
+;; Under Section 7 of GPL version 3, you are granted additional
+;; permissions described in the GCC Runtime Library Exception, version
+;; 3.1, as published by the Free Software Foundation.
+;;
+;; You should have received a copy of the GNU General Public License and
+;; a copy of the GCC Runtime Library Exception along with this program;
+;; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; This code is derived from mulsi3.S, observing that the mstep*16-based
+;; multiplications there, from which it is formed, are actually
+;; zero-extending; in gcc-speak "umulhisi3". The difference to *this*
+;; function is just a missing top mstep*16 sequence and shifts and 64-bit
+;; additions for the high part. Compared to an implementation based on
+;; calling __Mul four times (see default implementation of umul_ppmm in
+;; longlong.h), this will complete in a time between a fourth and a third
+;; of that, assuming the value-based optimizations don't strike. If they
+;; all strike there (very often) but none here, we still win, though by a
+;; lesser margin, due to lesser total overhead.
+
+#define L(x) .x
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+
+#ifdef __USER_LABEL_PREFIX__
+# define SYM(x) CONCAT1 (__USER_LABEL_PREFIX__, x)
+#else
+# define SYM(x) x
+#endif
+
+ .global SYM(__umulsidi3)
+ .type SYM(__umulsidi3),@function
+SYM(__umulsidi3):
+#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 10
+;; Can't have the mulu.d last on a cache-line, due to a hardware bug. See
+;; the documentation for -mmul-bug-workaround.
+;; Not worthwhile to conditionalize here.
+ .p2alignw 2,0x050f
+ mulu.d $r11,$r10
+ ret
+ move $mof,$r11
+#else
+ move.d $r11,$r9
+ bound.d $r10,$r9
+ cmpu.w 65535,$r9
+ bls L(L3)
+ move.d $r10,$r12
+
+ move.d $r10,$r13
+ movu.w $r11,$r9 ; ab*cd = (a*c)<<32 (a*d + b*c)<<16 + b*d
+
+;; We're called for floating point numbers very often with the "low" 16
+;; bits zero, so it's worthwhile to optimize for that.
+
+ beq L(L6) ; d == 0?
+ lslq 16,$r13
+
+ beq L(L7) ; b == 0?
+ clear.w $r10
+
+ mstep $r9,$r13 ; d*b
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+ mstep $r9,$r13
+
+L(L7):
+ test.d $r10
+ mstep $r9,$r10 ; d*a
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+ mstep $r9,$r10
+
+;; d*a in $r10, d*b in $r13, ab in $r12 and cd in $r11
+;; $r9 = d, need to do b*c and a*c; we can drop d.
+;; so $r9 is up for use and we can shift down $r11 as the mstep
+;; source for the next mstep-part.
+
+L(L8):
+ lsrq 16,$r11
+ move.d $r12,$r9
+ lslq 16,$r9
+ beq L(L9) ; b == 0?
+ mstep $r11,$r9
+
+ mstep $r11,$r9 ; b*c
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+ mstep $r11,$r9
+L(L9):
+
+;; d*a in $r10, d*b in $r13, c*b in $r9, ab in $r12 and c in $r11,
+;; need to do a*c. We want that to end up in $r11, so we shift up $r11 to
+;; now use as the destination operand. We'd need a test insn to update N
+;; to do it the other way round.
+
+ lsrq 16,$r12
+ lslq 16,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+ mstep $r12,$r11
+
+;; d*a in $r10, d*b in $r13, c*b in $r9, a*c in $r11 ($r12 free).
+;; Need (a*d + b*c)<<16 + b*d into $r10 and
+;; a*c + (a*d + b*c)>>16 plus carry from the additions into $r11.
+
+ add.d $r9,$r10 ; (a*d + b*c) - may produce a carry.
+ scs $r12 ; The carry corresponds to bit 16 of $r11.
+ lslq 16,$r12
+ add.d $r12,$r11 ; $r11 = a*c + carry from (a*d + b*c).
+
+#if defined (__CRIS_arch_version) && __CRIS_arch_version >= 8
+ swapw $r10
+ addu.w $r10,$r11 ; $r11 = a*c + (a*d + b*c) >> 16 including carry.
+ clear.w $r10 ; $r10 = (a*d + b*c) << 16
+#else
+ move.d $r10,$r9
+ lsrq 16,$r9
+ add.d $r9,$r11 ; $r11 = a*c + (a*d + b*c) >> 16 including carry.
+ lslq 16,$r10 ; $r10 = (a*d + b*c) << 16
+#endif
+ add.d $r13,$r10 ; $r10 = (a*d + b*c) << 16 + b*d - may produce a carry.
+ scs $r9
+ ret
+ add.d $r9,$r11 ; Last carry added to the high-order 32 bits.
+
+L(L6):
+ clear.d $r13
+ ba L(L8)
+ clear.d $r10
+
+L(L11):
+ clear.d $r10
+ ret
+ clear.d $r11
+
+L(L3):
+;; Form the maximum in $r10, by knowing the minimum, $r9.
+;; (We don't know which one of $r10 or $r11 it is.)
+;; Check if the largest operand is still just 16 bits.
+
+ xor $r9,$r10
+ xor $r11,$r10
+ cmpu.w 65535,$r10
+ bls L(L5)
+ movu.w $r9,$r13
+
+;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but c==0
+;; so we only need (a*d)<<16 + b*d with d = $r13, ab = $r10.
+;; Remember that the upper part of (a*d)<<16 goes into the lower part
+;; of $r11 and there may be a carry from adding the low 32 parts.
+ beq L(L11) ; d == 0?
+ move.d $r10,$r9
+
+ lslq 16,$r9
+ beq L(L10) ; b == 0?
+ clear.w $r10
+
+ mstep $r13,$r9 ; b*d
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+ mstep $r13,$r9
+L(L10):
+ test.d $r10
+ mstep $r13,$r10 ; a*d
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ move.d $r10,$r11
+ lsrq 16,$r11
+ lslq 16,$r10
+ add.d $r9,$r10
+ scs $r12
+ ret
+ add.d $r12,$r11
+
+L(L5):
+;; We have ab*cd = (a*c)<<32 + (a*d + b*c)<<16 + b*d, but a and c==0
+;; so b*d (with min=b=$r13, max=d=$r10) it is. As it won't overflow the
+;; 32-bit part, just set $r11 to 0.
+
+ lslq 16,$r10
+ clear.d $r11
+
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ mstep $r13,$r10
+ ret
+ mstep $r13,$r10
+#endif
+L(Lfe1):
+ .size SYM(__umulsidi3),L(Lfe1)-SYM(__umulsidi3)