aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.9/gcc/config/avr
diff options
context:
space:
mode:
authorBen Cheng <bccheng@google.com>2014-03-25 22:37:19 -0700
committerBen Cheng <bccheng@google.com>2014-03-25 22:37:19 -0700
commit1bc5aee63eb72b341f506ad058502cd0361f0d10 (patch)
treec607e8252f3405424ff15bc2d00aa38dadbb2518 /gcc-4.9/gcc/config/avr
parent283a0bf58fcf333c58a2a92c3ebbc41fb9eb1fdb (diff)
downloadtoolchain_gcc-1bc5aee63eb72b341f506ad058502cd0361f0d10.tar.gz
toolchain_gcc-1bc5aee63eb72b341f506ad058502cd0361f0d10.tar.bz2
toolchain_gcc-1bc5aee63eb72b341f506ad058502cd0361f0d10.zip
Initial checkin of GCC 4.9.0 from trunk (r208799).
Change-Id: I48a3c08bb98542aa215912a75f03c0890e497dba
Diffstat (limited to 'gcc-4.9/gcc/config/avr')
-rw-r--r--gcc-4.9/gcc/config/avr/avr-arch.h156
-rw-r--r--gcc-4.9/gcc/config/avr/avr-c.c402
-rw-r--r--gcc-4.9/gcc/config/avr/avr-devices.c114
-rw-r--r--gcc-4.9/gcc/config/avr/avr-dimode.md479
-rw-r--r--gcc-4.9/gcc/config/avr/avr-fixed.md497
-rw-r--r--gcc-4.9/gcc/config/avr/avr-log.c351
-rw-r--r--gcc-4.9/gcc/config/avr/avr-mcus.def323
-rw-r--r--gcc-4.9/gcc/config/avr/avr-modes.def33
-rw-r--r--gcc-4.9/gcc/config/avr/avr-protos.h164
-rw-r--r--gcc-4.9/gcc/config/avr/avr-stdint.h66
-rw-r--r--gcc-4.9/gcc/config/avr/avr-tables.opt766
-rw-r--r--gcc-4.9/gcc/config/avr/avr.c12522
-rw-r--r--gcc-4.9/gcc/config/avr/avr.h606
-rw-r--r--gcc-4.9/gcc/config/avr/avr.md6358
-rw-r--r--gcc-4.9/gcc/config/avr/avr.opt84
-rw-r--r--gcc-4.9/gcc/config/avr/avrlibc.h30
-rw-r--r--gcc-4.9/gcc/config/avr/builtins.def169
-rw-r--r--gcc-4.9/gcc/config/avr/constraints.md238
-rw-r--r--gcc-4.9/gcc/config/avr/driver-avr.c150
-rw-r--r--gcc-4.9/gcc/config/avr/elf.h41
-rw-r--r--gcc-4.9/gcc/config/avr/gen-avr-mmcu-texi.c144
-rw-r--r--gcc-4.9/gcc/config/avr/genmultilib.awk216
-rwxr-xr-xgcc-4.9/gcc/config/avr/genopt.sh59
-rw-r--r--gcc-4.9/gcc/config/avr/predicates.md275
-rw-r--r--gcc-4.9/gcc/config/avr/rtems.h27
-rw-r--r--gcc-4.9/gcc/config/avr/stdfix.h236
-rw-r--r--gcc-4.9/gcc/config/avr/t-avr83
-rw-r--r--gcc-4.9/gcc/config/avr/t-multilib269
-rw-r--r--gcc-4.9/gcc/config/avr/t-rtems3
29 files changed, 24861 insertions, 0 deletions
diff --git a/gcc-4.9/gcc/config/avr/avr-arch.h b/gcc-4.9/gcc/config/avr/avr-arch.h
new file mode 100644
index 000000000..6357e997c
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-arch.h
@@ -0,0 +1,156 @@
+/* Definitions of types that are used to store AVR architecture and
+ device information.
+ Copyright (C) 2012-2014 Free Software Foundation, Inc.
+ Contributed by Georg-Johann Lay (avr@gjlay.de)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+
+/* This enum supplies indices into the avr_arch_types[] table below. */
+
+enum avr_arch
+{
+ ARCH_UNKNOWN,
+ ARCH_AVR1,
+ ARCH_AVR2,
+ ARCH_AVR25,
+ ARCH_AVR3,
+ ARCH_AVR31,
+ ARCH_AVR35,
+ ARCH_AVR4,
+ ARCH_AVR5,
+ ARCH_AVR51,
+ ARCH_AVR6,
+ ARCH_AVRXMEGA2,
+ ARCH_AVRXMEGA4,
+ ARCH_AVRXMEGA5,
+ ARCH_AVRXMEGA6,
+ ARCH_AVRXMEGA7
+};
+
+
+/* Architecture-specific properties. */
+
+typedef struct
+{
+ /* Assembler only. */
+ int asm_only;
+
+ /* Core have 'MUL*' instructions. */
+ int have_mul;
+
+ /* Core have 'CALL' and 'JMP' instructions. */
+ int have_jmp_call;
+
+ /* Core have 'MOVW' and 'LPM Rx,Z' instructions. */
+ int have_movw_lpmx;
+
+ /* Core have 'ELPM' instructions. */
+ int have_elpm;
+
+ /* Core have 'ELPM Rx,Z' instructions. */
+ int have_elpmx;
+
+ /* Core have 'EICALL' and 'EIJMP' instructions. */
+ int have_eijmp_eicall;
+
+ /* This is an XMEGA core. */
+ int xmega_p;
+
+ /* This core has the RAMPD special function register
+ and thus also the RAMPX, RAMPY and RAMPZ registers. */
+ int have_rampd;
+
+ /* Default start of data section address for architecture. */
+ int default_data_section_start;
+
+ /* Offset between SFR address and RAM address:
+ SFR-address = RAM-address - sfr_offset */
+ int sfr_offset;
+
+ /* Architecture id to built-in define __AVR_ARCH__ (NULL -> no macro) */
+ const char *const macro;
+
+ /* Architecture name. */
+ const char *const arch_name;
+} avr_arch_t;
+
+
+/* Device-specific properties. */
+
+typedef struct
+{
+ /* Device name. */
+ const char *const name;
+
+ /* Index in avr_arch_types[]. */
+ enum avr_arch arch;
+
+ /* Must lie outside user's namespace. NULL == no macro. */
+ const char *const macro;
+
+ /* Stack pointer have 8 bits width. */
+ int short_sp;
+
+ /* Some AVR devices have a core erratum when skipping a 2-word instruction.
+ Skip instructions are: SBRC, SBRS, SBIC, SBIS, CPSE.
+ Problems will occur with return address is IRQ executes during the
+ skip sequence.
+
+ A support ticket from Atmel returned the following information:
+
+ Subject: (ATTicket:644469) On AVR skip-bug core Erratum
+ From: avr@atmel.com Date: 2011-07-27
+ (Please keep the subject when replying to this mail)
+
+ This errata exists only in AT90S8515 and ATmega103 devices.
+
+ For information please refer the following respective errata links
+ http://www.atmel.com/dyn/resources/prod_documents/doc2494.pdf
+ http://www.atmel.com/dyn/resources/prod_documents/doc1436.pdf */
+
+ /* Core Erratum: Must not skip 2-word instruction. */
+ int errata_skip;
+
+ /* Start of data section. */
+ int data_section_start;
+
+ /* Number of 64k segments in the flash. */
+ int n_flash;
+
+ /* Name of device library. */
+ const char *const library_name;
+} avr_mcu_t;
+
+/* Map architecture to its texinfo string. */
+
+typedef struct
+{
+ /* Architecture ID. */
+ enum avr_arch arch;
+
+ /* textinfo source to describe the archtiecture. */
+ const char *texinfo;
+} avr_arch_info_t;
+
+/* Preprocessor macros to define depending on MCU type. */
+
+extern const avr_arch_t avr_arch_types[];
+extern const avr_arch_t *avr_current_arch;
+
+extern const avr_mcu_t avr_mcu_types[];
+extern const avr_mcu_t *avr_current_device;
diff --git a/gcc-4.9/gcc/config/avr/avr-c.c b/gcc-4.9/gcc/config/avr/avr-c.c
new file mode 100644
index 000000000..101d28092
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-c.c
@@ -0,0 +1,402 @@
+/* Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by Anatoly Sokolov (aesok@post.ru)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Not included in avr.c since this requires C front end. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "tm_p.h"
+#include "cpplib.h"
+#include "tree.h"
+#include "stor-layout.h"
+#include "target.h"
+#include "c-family/c-common.h"
+#include "langhooks.h"
+
+
+/* IDs for all the AVR builtins. */
+
+enum avr_builtin_id
+ {
+#define DEF_BUILTIN(NAME, N_ARGS, TYPE, CODE, LIBNAME) \
+ AVR_BUILTIN_ ## NAME,
+#include "builtins.def"
+#undef DEF_BUILTIN
+
+ AVR_BUILTIN_COUNT
+ };
+
+
+/* Implement `TARGET_RESOLVE_OVERLOADED_PLUGIN'. */
+
+static tree
+avr_resolve_overloaded_builtin (unsigned int iloc, tree fndecl, void *vargs)
+{
+ tree type0, type1, fold = NULL_TREE;
+ enum avr_builtin_id id = AVR_BUILTIN_COUNT;
+ location_t loc = (location_t) iloc;
+ vec<tree, va_gc> &args = * (vec<tree, va_gc>*) vargs;
+
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ default:
+ break;
+
+ case AVR_BUILTIN_ABSFX:
+ if (args.length() != 1)
+ {
+ error_at (loc, "%qs expects 1 argument but %d given",
+ "absfx", (int) args.length());
+
+ fold = error_mark_node;
+ break;
+ }
+
+ type0 = TREE_TYPE (args[0]);
+
+ if (!FIXED_POINT_TYPE_P (type0))
+ {
+ error_at (loc, "%qs expects a fixed-point value as argument",
+ "absfx");
+
+ fold = error_mark_node;
+ }
+
+ switch (TYPE_MODE (type0))
+ {
+ case QQmode: id = AVR_BUILTIN_ABSHR; break;
+ case HQmode: id = AVR_BUILTIN_ABSR; break;
+ case SQmode: id = AVR_BUILTIN_ABSLR; break;
+ case DQmode: id = AVR_BUILTIN_ABSLLR; break;
+
+ case HAmode: id = AVR_BUILTIN_ABSHK; break;
+ case SAmode: id = AVR_BUILTIN_ABSK; break;
+ case DAmode: id = AVR_BUILTIN_ABSLK; break;
+ case TAmode: id = AVR_BUILTIN_ABSLLK; break;
+
+ case UQQmode:
+ case UHQmode:
+ case USQmode:
+ case UDQmode:
+ case UHAmode:
+ case USAmode:
+ case UDAmode:
+ case UTAmode:
+ warning_at (loc, 0, "using %qs with unsigned type has no effect",
+ "absfx");
+ return args[0];
+
+ default:
+ error_at (loc, "no matching fixed-point overload found for %qs",
+ "absfx");
+
+ fold = error_mark_node;
+ break;
+ }
+
+ fold = targetm.builtin_decl (id, true);
+
+ if (fold != error_mark_node)
+ fold = build_function_call_vec (loc, vNULL, fold, &args, NULL);
+
+ break; // absfx
+
+ case AVR_BUILTIN_ROUNDFX:
+ if (args.length() != 2)
+ {
+ error_at (loc, "%qs expects 2 arguments but %d given",
+ "roundfx", (int) args.length());
+
+ fold = error_mark_node;
+ break;
+ }
+
+ type0 = TREE_TYPE (args[0]);
+ type1 = TREE_TYPE (args[1]);
+
+ if (!FIXED_POINT_TYPE_P (type0))
+ {
+ error_at (loc, "%qs expects a fixed-point value as first argument",
+ "roundfx");
+
+ fold = error_mark_node;
+ }
+
+ if (!INTEGRAL_TYPE_P (type1))
+ {
+ error_at (loc, "%qs expects an integer value as second argument",
+ "roundfx");
+
+ fold = error_mark_node;
+ }
+
+ switch (TYPE_MODE (type0))
+ {
+ case QQmode: id = AVR_BUILTIN_ROUNDHR; break;
+ case HQmode: id = AVR_BUILTIN_ROUNDR; break;
+ case SQmode: id = AVR_BUILTIN_ROUNDLR; break;
+ case DQmode: id = AVR_BUILTIN_ROUNDLLR; break;
+
+ case UQQmode: id = AVR_BUILTIN_ROUNDUHR; break;
+ case UHQmode: id = AVR_BUILTIN_ROUNDUR; break;
+ case USQmode: id = AVR_BUILTIN_ROUNDULR; break;
+ case UDQmode: id = AVR_BUILTIN_ROUNDULLR; break;
+
+ case HAmode: id = AVR_BUILTIN_ROUNDHK; break;
+ case SAmode: id = AVR_BUILTIN_ROUNDK; break;
+ case DAmode: id = AVR_BUILTIN_ROUNDLK; break;
+ case TAmode: id = AVR_BUILTIN_ROUNDLLK; break;
+
+ case UHAmode: id = AVR_BUILTIN_ROUNDUHK; break;
+ case USAmode: id = AVR_BUILTIN_ROUNDUK; break;
+ case UDAmode: id = AVR_BUILTIN_ROUNDULK; break;
+ case UTAmode: id = AVR_BUILTIN_ROUNDULLK; break;
+
+ default:
+ error_at (loc, "no matching fixed-point overload found for %qs",
+ "roundfx");
+
+ fold = error_mark_node;
+ break;
+ }
+
+ fold = targetm.builtin_decl (id, true);
+
+ if (fold != error_mark_node)
+ fold = build_function_call_vec (loc, vNULL, fold, &args, NULL);
+
+ break; // roundfx
+
+ case AVR_BUILTIN_COUNTLSFX:
+ if (args.length() != 1)
+ {
+ error_at (loc, "%qs expects 1 argument but %d given",
+ "countlsfx", (int) args.length());
+
+ fold = error_mark_node;
+ break;
+ }
+
+ type0 = TREE_TYPE (args[0]);
+
+ if (!FIXED_POINT_TYPE_P (type0))
+ {
+ error_at (loc, "%qs expects a fixed-point value as first argument",
+ "countlsfx");
+
+ fold = error_mark_node;
+ }
+
+ switch (TYPE_MODE (type0))
+ {
+ case QQmode: id = AVR_BUILTIN_COUNTLSHR; break;
+ case HQmode: id = AVR_BUILTIN_COUNTLSR; break;
+ case SQmode: id = AVR_BUILTIN_COUNTLSLR; break;
+ case DQmode: id = AVR_BUILTIN_COUNTLSLLR; break;
+
+ case UQQmode: id = AVR_BUILTIN_COUNTLSUHR; break;
+ case UHQmode: id = AVR_BUILTIN_COUNTLSUR; break;
+ case USQmode: id = AVR_BUILTIN_COUNTLSULR; break;
+ case UDQmode: id = AVR_BUILTIN_COUNTLSULLR; break;
+
+ case HAmode: id = AVR_BUILTIN_COUNTLSHK; break;
+ case SAmode: id = AVR_BUILTIN_COUNTLSK; break;
+ case DAmode: id = AVR_BUILTIN_COUNTLSLK; break;
+ case TAmode: id = AVR_BUILTIN_COUNTLSLLK; break;
+
+ case UHAmode: id = AVR_BUILTIN_COUNTLSUHK; break;
+ case USAmode: id = AVR_BUILTIN_COUNTLSUK; break;
+ case UDAmode: id = AVR_BUILTIN_COUNTLSULK; break;
+ case UTAmode: id = AVR_BUILTIN_COUNTLSULLK; break;
+
+ default:
+ error_at (loc, "no matching fixed-point overload found for %qs",
+ "countlsfx");
+
+ fold = error_mark_node;
+ break;
+ }
+
+ fold = targetm.builtin_decl (id, true);
+
+ if (fold != error_mark_node)
+ fold = build_function_call_vec (loc, vNULL, fold, &args, NULL);
+
+ break; // countlsfx
+ }
+
+ return fold;
+}
+
+
+/* Implement `REGISTER_TARGET_PRAGMAS'. */
+
+void
+avr_register_target_pragmas (void)
+{
+ int i;
+
+ gcc_assert (ADDR_SPACE_GENERIC == ADDR_SPACE_RAM);
+
+ /* Register address spaces. The order must be the same as in the respective
+ enum from avr.h (or designated initializers must be used in avr.c). */
+
+ for (i = 0; i < ADDR_SPACE_COUNT; i++)
+ {
+ gcc_assert (i == avr_addrspace[i].id);
+
+ if (!ADDR_SPACE_GENERIC_P (i))
+ c_register_addr_space (avr_addrspace[i].name, avr_addrspace[i].id);
+ }
+
+ targetm.resolve_overloaded_builtin = avr_resolve_overloaded_builtin;
+}
+
+
+/* Transform LO into uppercase and write the result to UP.
+ You must provide enough space for UP. Return UP. */
+
+static char*
+avr_toupper (char *up, const char *lo)
+{
+ char *up0 = up;
+
+ for (; *lo; lo++, up++)
+ *up = TOUPPER (*lo);
+
+ *up = '\0';
+
+ return up0;
+}
+
+/* Worker function for TARGET_CPU_CPP_BUILTINS. */
+
+void
+avr_cpu_cpp_builtins (struct cpp_reader *pfile)
+{
+ int i;
+
+ builtin_define_std ("AVR");
+
+ if (avr_current_arch->macro)
+ cpp_define_formatted (pfile, "__AVR_ARCH__=%s", avr_current_arch->macro);
+ if (avr_current_device->macro)
+ cpp_define (pfile, avr_current_device->macro);
+ if (AVR_HAVE_RAMPD) cpp_define (pfile, "__AVR_HAVE_RAMPD__");
+ if (AVR_HAVE_RAMPX) cpp_define (pfile, "__AVR_HAVE_RAMPX__");
+ if (AVR_HAVE_RAMPY) cpp_define (pfile, "__AVR_HAVE_RAMPY__");
+ if (AVR_HAVE_RAMPZ) cpp_define (pfile, "__AVR_HAVE_RAMPZ__");
+ if (AVR_HAVE_ELPM) cpp_define (pfile, "__AVR_HAVE_ELPM__");
+ if (AVR_HAVE_ELPMX) cpp_define (pfile, "__AVR_HAVE_ELPMX__");
+ if (AVR_HAVE_MOVW) cpp_define (pfile, "__AVR_HAVE_MOVW__");
+ if (AVR_HAVE_LPMX) cpp_define (pfile, "__AVR_HAVE_LPMX__");
+
+ if (avr_current_arch->asm_only)
+ cpp_define (pfile, "__AVR_ASM_ONLY__");
+ if (AVR_HAVE_MUL)
+ {
+ cpp_define (pfile, "__AVR_ENHANCED__");
+ cpp_define (pfile, "__AVR_HAVE_MUL__");
+ }
+ if (avr_current_arch->have_jmp_call)
+ {
+ cpp_define (pfile, "__AVR_MEGA__");
+ cpp_define (pfile, "__AVR_HAVE_JMP_CALL__");
+ }
+ if (AVR_XMEGA)
+ cpp_define (pfile, "__AVR_XMEGA__");
+ if (avr_current_arch->have_eijmp_eicall)
+ {
+ cpp_define (pfile, "__AVR_HAVE_EIJMP_EICALL__");
+ cpp_define (pfile, "__AVR_3_BYTE_PC__");
+ }
+ else
+ {
+ cpp_define (pfile, "__AVR_2_BYTE_PC__");
+ }
+
+ if (AVR_HAVE_8BIT_SP)
+ cpp_define (pfile, "__AVR_HAVE_8BIT_SP__");
+ else
+ cpp_define (pfile, "__AVR_HAVE_16BIT_SP__");
+
+ if (avr_sp8)
+ cpp_define (pfile, "__AVR_SP8__");
+
+ if (AVR_HAVE_SPH)
+ cpp_define (pfile, "__AVR_HAVE_SPH__");
+
+ if (TARGET_NO_INTERRUPTS)
+ cpp_define (pfile, "__NO_INTERRUPTS__");
+
+ if (avr_current_device->errata_skip)
+ {
+ cpp_define (pfile, "__AVR_ERRATA_SKIP__");
+
+ if (avr_current_arch->have_jmp_call)
+ cpp_define (pfile, "__AVR_ERRATA_SKIP_JMP_CALL__");
+ }
+
+ cpp_define_formatted (pfile, "__AVR_SFR_OFFSET__=0x%x",
+ avr_current_arch->sfr_offset);
+
+#ifdef WITH_AVRLIBC
+ cpp_define (pfile, "__WITH_AVRLIBC__");
+#endif /* WITH_AVRLIBC */
+
+ /* Define builtin macros so that the user can easily query whether
+ non-generic address spaces (and which) are supported or not.
+ This is only supported for C. For C++, a language extension is needed
+ (as mentioned in ISO/IEC DTR 18037; Annex F.2) which is not
+ implemented in GCC up to now. */
+
+ if (!strcmp (lang_hooks.name, "GNU C"))
+ {
+ for (i = 0; i < ADDR_SPACE_COUNT; i++)
+ if (!ADDR_SPACE_GENERIC_P (i)
+ /* Only supply __FLASH<n> macro if the address space is reasonable
+ for this target. The address space qualifier itself is still
+ supported, but using it will throw an error. */
+ && avr_addrspace[i].segment < avr_current_device->n_flash)
+ {
+ const char *name = avr_addrspace[i].name;
+ char *Name = (char*) alloca (1 + strlen (name));
+
+ cpp_define (pfile, avr_toupper (Name, name));
+ }
+ }
+
+ /* Define builtin macros so that the user can easily query whether or
+ not a specific builtin is available. */
+
+#define DEF_BUILTIN(NAME, N_ARGS, TYPE, CODE, LIBNAME) \
+ cpp_define (pfile, "__BUILTIN_AVR_" #NAME);
+#include "builtins.def"
+#undef DEF_BUILTIN
+
+ /* Builtin macros for the __int24 and __uint24 type. */
+
+ cpp_define_formatted (pfile, "__INT24_MAX__=8388607%s",
+ INT_TYPE_SIZE == 8 ? "LL" : "L");
+ cpp_define (pfile, "__INT24_MIN__=(-__INT24_MAX__-1)");
+ cpp_define_formatted (pfile, "__UINT24_MAX__=16777215%s",
+ INT_TYPE_SIZE == 8 ? "ULL" : "UL");
+}
diff --git a/gcc-4.9/gcc/config/avr/avr-devices.c b/gcc-4.9/gcc/config/avr/avr-devices.c
new file mode 100644
index 000000000..177f1961f
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-devices.c
@@ -0,0 +1,114 @@
+/* Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by Anatoly Sokolov (aesok@post.ru)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef IN_GEN_AVR_MMCU_TEXI
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#endif /* IN_GEN_AVR_MMCU_TEXI */
+
+/* List of all known AVR MCU architectures.
+ Order as of enum avr_arch from avr.h. */
+
+const avr_arch_t
+avr_arch_types[] =
+{
+ /* unknown device specified */
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0060, 32, NULL, "avr2" },
+ /*
+ A M J LM E E E X R d S S O A
+ S U M PO L L I M A a t F ff r
+ M L P MV P P J E M t a R s c
+ XW M M M G P a r e h
+ X P A D t t ID */
+ { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0x0060, 32, "1", "avr1" },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x0060, 32, "2", "avr2" },
+ { 0, 0, 0, 1, 0, 0, 0, 0, 0, 0x0060, 32, "25", "avr25" },
+ { 0, 0, 1, 0, 0, 0, 0, 0, 0, 0x0060, 32, "3", "avr3" },
+ { 0, 0, 1, 0, 1, 0, 0, 0, 0, 0x0060, 32, "31", "avr31" },
+ { 0, 0, 1, 1, 0, 0, 0, 0, 0, 0x0060, 32, "35", "avr35" },
+ { 0, 1, 0, 1, 0, 0, 0, 0, 0, 0x0060, 32, "4", "avr4" },
+ { 0, 1, 1, 1, 0, 0, 0, 0, 0, 0x0060, 32, "5", "avr5" },
+ { 0, 1, 1, 1, 1, 1, 0, 0, 0, 0x0060, 32, "51", "avr51" },
+ { 0, 1, 1, 1, 1, 1, 1, 0, 0, 0x0060, 32, "6", "avr6" },
+
+ { 0, 1, 1, 1, 0, 0, 0, 1, 0, 0x2000, 0, "102", "avrxmega2" },
+ { 0, 1, 1, 1, 1, 1, 0, 1, 0, 0x2000, 0, "104", "avrxmega4" },
+ { 0, 1, 1, 1, 1, 1, 0, 1, 1, 0x2000, 0, "105", "avrxmega5" },
+ { 0, 1, 1, 1, 1, 1, 1, 1, 0, 0x2000, 0, "106", "avrxmega6" },
+ { 0, 1, 1, 1, 1, 1, 1, 1, 1, 0x2000, 0, "107", "avrxmega7" }
+};
+
+const avr_arch_info_t
+avr_texinfo[] =
+{
+ { ARCH_AVR1,
+ "This ISA is implemented by the minimal AVR core and supported "
+ "for assembler only." },
+ { ARCH_AVR2,
+ "``Classic'' devices with up to 8@tie{}KiB of program memory." },
+ { ARCH_AVR25,
+ "``Classic'' devices with up to 8@tie{}KiB of program memory and with "
+ "the @code{MOVW} instruction." },
+ { ARCH_AVR3,
+ "``Classic'' devices with 16@tie{}KiB up to 64@tie{}KiB of "
+ " program memory." },
+ { ARCH_AVR31,
+ "``Classic'' devices with 128@tie{}KiB of program memory." },
+ { ARCH_AVR35,
+ "``Classic'' devices with 16@tie{}KiB up to 64@tie{}KiB of "
+ "program memory and with the @code{MOVW} instruction." },
+ { ARCH_AVR4,
+ "``Enhanced'' devices with up to 8@tie{}KiB of program memory." },
+ { ARCH_AVR5,
+ "``Enhanced'' devices with 16@tie{}KiB up to 64@tie{}KiB of "
+ "program memory." },
+ { ARCH_AVR51,
+ "``Enhanced'' devices with 128@tie{}KiB of program memory." },
+ { ARCH_AVR6,
+ "``Enhanced'' devices with 3-byte PC, i.e.@: with more than 128@tie{}KiB "
+ "of program memory." },
+ { ARCH_AVRXMEGA2,
+ "``XMEGA'' devices with more than 8@tie{}KiB and up to 64@tie{}KiB "
+ "of program memory." },
+ { ARCH_AVRXMEGA4,
+ "``XMEGA'' devices with more than 64@tie{}KiB and up to 128@tie{}KiB "
+ "of program memory." },
+ { ARCH_AVRXMEGA5,
+ "``XMEGA'' devices with more than 64@tie{}KiB and up to 128@tie{}KiB "
+ "of program memory and more than 64@tie{}KiB of RAM." },
+ { ARCH_AVRXMEGA6,
+ "``XMEGA'' devices with more than 128@tie{}KiB of program memory." },
+ { ARCH_AVRXMEGA7,
+ "``XMEGA'' devices with more than 128@tie{}KiB of program memory "
+ "and more than 64@tie{}KiB of RAM." }
+};
+
+const avr_mcu_t
+avr_mcu_types[] =
+{
+#define AVR_MCU(NAME, ARCH, MACRO, SP8, ERR_SKIP, DATA_SEC, N_FLASH, LIBNAME)\
+ { NAME, ARCH, MACRO, SP8, ERR_SKIP, DATA_SEC, N_FLASH, LIBNAME },
+#include "avr-mcus.def"
+#undef AVR_MCU
+ /* End of list. */
+ { NULL, ARCH_UNKNOWN, NULL, 0, 0, 0, 0, NULL }
+};
+
diff --git a/gcc-4.9/gcc/config/avr/avr-dimode.md b/gcc-4.9/gcc/config/avr/avr-dimode.md
new file mode 100644
index 000000000..639810518
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-dimode.md
@@ -0,0 +1,479 @@
+;; Machine description for GNU compiler,
+;; for Atmel AVR micro controllers.
+;; Copyright (C) 1998-2014 Free Software Foundation, Inc.
+;; Contributed by Georg Lay (avr@gjlay.de)
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; The purpose of this file is to provide a light-weight DImode
+;; implementation for AVR. The trouble with DImode is that tree -> RTL
+;; lowering leads to really unpleasant code for operations that don't
+;; work byte-wise like NEG, PLUS, MINUS, etc. Defining optabs entries for
+;; them won't help because the optab machinery assumes these operations
+;; are cheap and does not check if a libgcc implementation is available.
+;;
+;; The DImode insns are all straight forward -- except movdi. The approach
+;; of this implementation is to provide DImode insns without the burden of
+;; introducing movdi.
+;;
+;; The caveat is that if there are insns for some mode, there must also be a
+;; respective move insn that describes reloads. Therefore, this
+;; implementation uses an accumulator-based model with two hard-coded,
+;; accumulator-like registers
+;;
+;; A[] = reg:DI 18
+;; B[] = reg:DI 10
+;;
+;; so that no DImode insn contains pseudos or needs reloading.
+
+(define_constants
+ [(ACC_A 18)
+ (ACC_B 10)])
+
+;; Supported modes that are 8 bytes wide
+(define_mode_iterator ALL8 [DI DQ UDQ DA UDA TA UTA])
+
+(define_mode_iterator ALL8U [UDQ UDA UTA])
+(define_mode_iterator ALL8S [ DQ DA TA])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Addition
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; "adddi3"
+;; "adddq3" "addudq3"
+;; "addda3" "adduda3"
+;; "addta3" "adduta3"
+(define_expand "add<mode>3"
+ [(parallel [(match_operand:ALL8 0 "general_operand" "")
+ (match_operand:ALL8 1 "general_operand" "")
+ (match_operand:ALL8 2 "general_operand" "")])]
+ "avr_have_dimode"
+ {
+ rtx acc_a = gen_rtx_REG (<MODE>mode, ACC_A);
+
+ emit_move_insn (acc_a, operands[1]);
+
+ if (DImode == <MODE>mode
+ && s8_operand (operands[2], VOIDmode))
+ {
+ emit_move_insn (gen_rtx_REG (QImode, REG_X), operands[2]);
+ emit_insn (gen_adddi3_const8_insn ());
+ }
+ else if (const_operand (operands[2], GET_MODE (operands[2])))
+ {
+ emit_insn (gen_add<mode>3_const_insn (operands[2]));
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (<MODE>mode, ACC_B), operands[2]);
+ emit_insn (gen_add<mode>3_insn ());
+ }
+
+ emit_move_insn (operands[0], acc_a);
+ DONE;
+ })
+
+;; "adddi3_insn"
+;; "adddq3_insn" "addudq3_insn"
+;; "addda3_insn" "adduda3_insn"
+;; "addta3_insn" "adduta3_insn"
+(define_insn "add<mode>3_insn"
+ [(set (reg:ALL8 ACC_A)
+ (plus:ALL8 (reg:ALL8 ACC_A)
+ (reg:ALL8 ACC_B)))]
+ "avr_have_dimode"
+ "%~call __adddi3"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "clobber")])
+
+(define_insn "adddi3_const8_insn"
+ [(set (reg:DI ACC_A)
+ (plus:DI (reg:DI ACC_A)
+ (sign_extend:DI (reg:QI REG_X))))]
+ "avr_have_dimode"
+ "%~call __adddi3_s8"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "clobber")])
+
+;; "adddi3_const_insn"
+;; "adddq3_const_insn" "addudq3_const_insn"
+;; "addda3_const_insn" "adduda3_const_insn"
+;; "addta3_const_insn" "adduta3_const_insn"
+(define_insn "add<mode>3_const_insn"
+ [(set (reg:ALL8 ACC_A)
+ (plus:ALL8 (reg:ALL8 ACC_A)
+ (match_operand:ALL8 0 "const_operand" "n Ynn")))]
+ "avr_have_dimode
+ && !s8_operand (operands[0], VOIDmode)"
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "adjust_len" "plus")
+ (set_attr "cc" "clobber")])
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Subtraction
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+;; "subdi3"
+;; "subdq3" "subudq3"
+;; "subda3" "subuda3"
+;; "subta3" "subuta3"
+(define_expand "sub<mode>3"
+ [(parallel [(match_operand:ALL8 0 "general_operand" "")
+ (match_operand:ALL8 1 "general_operand" "")
+ (match_operand:ALL8 2 "general_operand" "")])]
+ "avr_have_dimode"
+ {
+ rtx acc_a = gen_rtx_REG (<MODE>mode, ACC_A);
+
+ emit_move_insn (acc_a, operands[1]);
+
+ if (const_operand (operands[2], GET_MODE (operands[2])))
+ {
+ emit_insn (gen_sub<mode>3_const_insn (operands[2]));
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (<MODE>mode, ACC_B), operands[2]);
+ emit_insn (gen_sub<mode>3_insn ());
+ }
+
+ emit_move_insn (operands[0], acc_a);
+ DONE;
+ })
+
+;; "subdi3_insn"
+;; "subdq3_insn" "subudq3_insn"
+;; "subda3_insn" "subuda3_insn"
+;; "subta3_insn" "subuta3_insn"
+(define_insn "sub<mode>3_insn"
+ [(set (reg:ALL8 ACC_A)
+ (minus:ALL8 (reg:ALL8 ACC_A)
+ (reg:ALL8 ACC_B)))]
+ "avr_have_dimode"
+ "%~call __subdi3"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "set_czn")])
+
+;; "subdi3_const_insn"
+;; "subdq3_const_insn" "subudq3_const_insn"
+;; "subda3_const_insn" "subuda3_const_insn"
+;; "subta3_const_insn" "subuta3_const_insn"
+(define_insn "sub<mode>3_const_insn"
+ [(set (reg:ALL8 ACC_A)
+ (minus:ALL8 (reg:ALL8 ACC_A)
+ (match_operand:ALL8 0 "const_operand" "n Ynn")))]
+ "avr_have_dimode"
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "adjust_len" "plus")
+ (set_attr "cc" "clobber")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Signed Saturating Addition and Subtraction
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "<code_stdname><mode>3"
+ [(set (match_operand:ALL8S 0 "general_operand" "")
+ (ss_addsub:ALL8S (match_operand:ALL8S 1 "general_operand" "")
+ (match_operand:ALL8S 2 "general_operand" "")))]
+ "avr_have_dimode"
+ {
+ rtx acc_a = gen_rtx_REG (<MODE>mode, ACC_A);
+
+ emit_move_insn (acc_a, operands[1]);
+
+ if (const_operand (operands[2], GET_MODE (operands[2])))
+ {
+ emit_insn (gen_<code_stdname><mode>3_const_insn (operands[2]));
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (<MODE>mode, ACC_B), operands[2]);
+ emit_insn (gen_<code_stdname><mode>3_insn ());
+ }
+
+ emit_move_insn (operands[0], acc_a);
+ DONE;
+ })
+
+(define_insn "<code_stdname><mode>3_insn"
+ [(set (reg:ALL8S ACC_A)
+ (ss_addsub:ALL8S (reg:ALL8S ACC_A)
+ (reg:ALL8S ACC_B)))]
+ "avr_have_dimode"
+ "%~call __<code_stdname><mode>3"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "clobber")])
+
+(define_insn "<code_stdname><mode>3_const_insn"
+ [(set (reg:ALL8S ACC_A)
+ (ss_addsub:ALL8S (reg:ALL8S ACC_A)
+ (match_operand:ALL8S 0 "const_operand" "n Ynn")))]
+ "avr_have_dimode"
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "adjust_len" "plus")
+ (set_attr "cc" "clobber")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Unsigned Saturating Addition and Subtraction
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "<code_stdname><mode>3"
+ [(set (match_operand:ALL8U 0 "general_operand" "")
+ (us_addsub:ALL8U (match_operand:ALL8U 1 "general_operand" "")
+ (match_operand:ALL8U 2 "general_operand" "")))]
+ "avr_have_dimode"
+ {
+ rtx acc_a = gen_rtx_REG (<MODE>mode, ACC_A);
+
+ emit_move_insn (acc_a, operands[1]);
+
+ if (const_operand (operands[2], GET_MODE (operands[2])))
+ {
+ emit_insn (gen_<code_stdname><mode>3_const_insn (operands[2]));
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (<MODE>mode, ACC_B), operands[2]);
+ emit_insn (gen_<code_stdname><mode>3_insn ());
+ }
+
+ emit_move_insn (operands[0], acc_a);
+ DONE;
+ })
+
+(define_insn "<code_stdname><mode>3_insn"
+ [(set (reg:ALL8U ACC_A)
+ (us_addsub:ALL8U (reg:ALL8U ACC_A)
+ (reg:ALL8U ACC_B)))]
+ "avr_have_dimode"
+ "%~call __<code_stdname><mode>3"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "clobber")])
+
+(define_insn "<code_stdname><mode>3_const_insn"
+ [(set (reg:ALL8U ACC_A)
+ (us_addsub:ALL8U (reg:ALL8U ACC_A)
+ (match_operand:ALL8U 0 "const_operand" "n Ynn")))]
+ "avr_have_dimode"
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "adjust_len" "plus")
+ (set_attr "cc" "clobber")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Negation
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "negdi2"
+ [(parallel [(match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" "")])]
+ "avr_have_dimode"
+ {
+ rtx acc_a = gen_rtx_REG (DImode, ACC_A);
+
+ emit_move_insn (acc_a, operands[1]);
+ emit_insn (gen_negdi2_insn ());
+ emit_move_insn (operands[0], acc_a);
+ DONE;
+ })
+
+(define_insn "negdi2_insn"
+ [(set (reg:DI ACC_A)
+ (neg:DI (reg:DI ACC_A)))]
+ "avr_have_dimode"
+ "%~call __negdi2"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "clobber")])
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Comparison
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_expand "conditional_jump"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ "avr_have_dimode")
+
+;; "cbranchdi4"
+;; "cbranchdq4" "cbranchudq4"
+;; "cbranchda4" "cbranchuda4"
+;; "cbranchta4" "cbranchuta4"
+(define_expand "cbranch<mode>4"
+ [(parallel [(match_operand:ALL8 1 "register_operand" "")
+ (match_operand:ALL8 2 "nonmemory_operand" "")
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))])]
+ "avr_have_dimode"
+ {
+ rtx acc_a = gen_rtx_REG (<MODE>mode, ACC_A);
+
+ emit_move_insn (acc_a, operands[1]);
+
+ if (s8_operand (operands[2], VOIDmode))
+ {
+ emit_move_insn (gen_rtx_REG (QImode, REG_X), operands[2]);
+ emit_insn (gen_compare_const8_di2 ());
+ }
+ else if (const_operand (operands[2], GET_MODE (operands[2])))
+ {
+ emit_insn (gen_compare_const_<mode>2 (operands[2]));
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (<MODE>mode, ACC_B), operands[2]);
+ emit_insn (gen_compare_<mode>2 ());
+ }
+
+ emit_jump_insn (gen_conditional_jump (operands[0], operands[3]));
+ DONE;
+ })
+
+;; "compare_di2"
+;; "compare_dq2" "compare_udq2"
+;; "compare_da2" "compare_uda2"
+;; "compare_ta2" "compare_uta2"
+(define_insn "compare_<mode>2"
+ [(set (cc0)
+ (compare (reg:ALL8 ACC_A)
+ (reg:ALL8 ACC_B)))]
+ "avr_have_dimode"
+ "%~call __cmpdi2"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "compare")])
+
+(define_insn "compare_const8_di2"
+ [(set (cc0)
+ (compare (reg:DI ACC_A)
+ (sign_extend:DI (reg:QI REG_X))))]
+ "avr_have_dimode"
+ "%~call __cmpdi2_s8"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "compare")])
+
+;; "compare_const_di2"
+;; "compare_const_dq2" "compare_const_udq2"
+;; "compare_const_da2" "compare_const_uda2"
+;; "compare_const_ta2" "compare_const_uta2"
+(define_insn "compare_const_<mode>2"
+ [(set (cc0)
+ (compare (reg:ALL8 ACC_A)
+ (match_operand:ALL8 0 "const_operand" "n Ynn")))
+ (clobber (match_scratch:QI 1 "=&d"))]
+ "avr_have_dimode
+ && !s8_operand (operands[0], VOIDmode)"
+ {
+ return avr_out_compare64 (insn, operands, NULL);
+ }
+ [(set_attr "adjust_len" "compare64")
+ (set_attr "cc" "compare")])
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; Shifts and Rotate
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_code_iterator di_shifts
+ [ashift ashiftrt lshiftrt rotate])
+
+;; Shift functions from libgcc are called without defining these insns,
+;; but with them we can describe their reduced register footprint.
+
+;; "ashldi3" "ashrdi3" "lshrdi3" "rotldi3"
+;; "ashldq3" "ashrdq3" "lshrdq3" "rotldq3"
+;; "ashlda3" "ashrda3" "lshrda3" "rotlda3"
+;; "ashlta3" "ashrta3" "lshrta3" "rotlta3"
+;; "ashludq3" "ashrudq3" "lshrudq3" "rotludq3"
+;; "ashluda3" "ashruda3" "lshruda3" "rotluda3"
+;; "ashluta3" "ashruta3" "lshruta3" "rotluta3"
+(define_expand "<code_stdname><mode>3"
+ [(parallel [(match_operand:ALL8 0 "general_operand" "")
+ (di_shifts:ALL8 (match_operand:ALL8 1 "general_operand" "")
+ (match_operand:QI 2 "general_operand" ""))])]
+ "avr_have_dimode"
+ {
+ rtx acc_a = gen_rtx_REG (<MODE>mode, ACC_A);
+
+ emit_move_insn (acc_a, operands[1]);
+ emit_move_insn (gen_rtx_REG (QImode, 16), operands[2]);
+ emit_insn (gen_<code_stdname><mode>3_insn ());
+ emit_move_insn (operands[0], acc_a);
+ DONE;
+ })
+
+;; "ashldi3_insn" "ashrdi3_insn" "lshrdi3_insn" "rotldi3_insn"
+;; "ashldq3_insn" "ashrdq3_insn" "lshrdq3_insn" "rotldq3_insn"
+;; "ashlda3_insn" "ashrda3_insn" "lshrda3_insn" "rotlda3_insn"
+;; "ashlta3_insn" "ashrta3_insn" "lshrta3_insn" "rotlta3_insn"
+;; "ashludq3_insn" "ashrudq3_insn" "lshrudq3_insn" "rotludq3_insn"
+;; "ashluda3_insn" "ashruda3_insn" "lshruda3_insn" "rotluda3_insn"
+;; "ashluta3_insn" "ashruta3_insn" "lshruta3_insn" "rotluta3_insn"
+(define_insn "<code_stdname><mode>3_insn"
+ [(set (reg:ALL8 ACC_A)
+ (di_shifts:ALL8 (reg:ALL8 ACC_A)
+ (reg:QI 16)))]
+ "avr_have_dimode"
+ "%~call __<code_stdname>di3"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "clobber")])
+
+;; "umulsidi3"
+;; "mulsidi3"
+(define_expand "<extend_u>mulsidi3"
+ [(parallel [(match_operand:DI 0 "register_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")
+ ;; Just to mention the iterator
+ (clobber (any_extend:SI (match_dup 1)))])]
+ "avr_have_dimode"
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 22), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 18), operands[2]);
+ emit_insn (gen_<extend_u>mulsidi3_insn());
+ // Use emit_move_insn and not open-coded expand because of missing movdi
+ emit_move_insn (operands[0], gen_rtx_REG (DImode, ACC_A));
+ DONE;
+ })
+
+;; "umulsidi3_insn"
+;; "mulsidi3_insn"
+(define_insn "<extend_u>mulsidi3_insn"
+ [(set (reg:DI ACC_A)
+ (mult:DI (any_extend:DI (reg:SI 18))
+ (any_extend:DI (reg:SI 22))))
+ (clobber (reg:HI REG_X))
+ (clobber (reg:HI REG_Z))]
+ "avr_have_dimode"
+ "%~call __<extend_u>mulsidi3"
+ [(set_attr "adjust_len" "call")
+ (set_attr "cc" "clobber")])
diff --git a/gcc-4.9/gcc/config/avr/avr-fixed.md b/gcc-4.9/gcc/config/avr/avr-fixed.md
new file mode 100644
index 000000000..1652415b1
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-fixed.md
@@ -0,0 +1,497 @@
+;; This file contains instructions that support fixed-point operations
+;; for Atmel AVR micro controllers.
+;; Copyright (C) 2012-2014 Free Software Foundation, Inc.
+;;
+;; Contributed by Sean D'Epagnier (sean@depagnier.com)
+;; Georg-Johann Lay (avr@gjlay.de)
+
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_mode_iterator ALL1Q [QQ UQQ])
+(define_mode_iterator ALL2Q [HQ UHQ])
+(define_mode_iterator ALL2A [HA UHA])
+(define_mode_iterator ALL4A [SA USA])
+(define_mode_iterator ALL2QA [HQ UHQ HA UHA])
+(define_mode_iterator ALL4QA [SQ USQ SA USA])
+(define_mode_iterator ALL124QA [ QQ HQ HA SA SQ
+ UQQ UHQ UHA USA USQ])
+
+(define_mode_iterator ALL2S [HQ HA])
+(define_mode_iterator ALL4S [SA SQ])
+(define_mode_iterator ALL24S [ HQ HA SA SQ])
+(define_mode_iterator ALL124S [ QQ HQ HA SA SQ])
+(define_mode_iterator ALL124U [UQQ UHQ UHA USA USQ])
+
+;;; Conversions
+
+(define_mode_iterator FIXED_A
+ [QQ UQQ
+ HQ UHQ HA UHA
+ SQ USQ SA USA
+ DQ UDQ DA UDA
+ TA UTA
+ QI HI SI DI])
+
+;; Same so that be can build cross products
+
+(define_mode_iterator FIXED_B
+ [QQ UQQ
+ HQ UHQ HA UHA
+ SQ USQ SA USA
+ DQ UDQ DA UDA
+ TA UTA
+ QI HI SI DI])
+
+(define_insn "fract<FIXED_B:mode><FIXED_A:mode>2"
+ [(set (match_operand:FIXED_A 0 "register_operand" "=r")
+ (fract_convert:FIXED_A
+ (match_operand:FIXED_B 1 "register_operand" "r")))]
+ "<FIXED_B:MODE>mode != <FIXED_A:MODE>mode"
+ {
+ return avr_out_fract (insn, operands, true, NULL);
+ }
+ [(set_attr "cc" "clobber")
+ (set_attr "adjust_len" "sfract")])
+
+(define_insn "fractuns<FIXED_B:mode><FIXED_A:mode>2"
+ [(set (match_operand:FIXED_A 0 "register_operand" "=r")
+ (unsigned_fract_convert:FIXED_A
+ (match_operand:FIXED_B 1 "register_operand" "r")))]
+ "<FIXED_B:MODE>mode != <FIXED_A:MODE>mode"
+ {
+ return avr_out_fract (insn, operands, false, NULL);
+ }
+ [(set_attr "cc" "clobber")
+ (set_attr "adjust_len" "ufract")])
+
+;******************************************************************************
+;** Saturated Addition and Subtraction
+;******************************************************************************
+
+;; Fixme: It would be nice if we could expand the 32-bit versions to a
+;; transparent libgcc call if $2 is a REG. Problem is that it is
+;; not possible to describe that addition is commutative.
+;; And defining register classes/constraintrs for the involved hard
+;; registers and let IRA do the work, yields inacceptable bloated code.
+;; Thus, we have to live with the up to 11 instructions that are output
+;; for these 32-bit saturated operations.
+
+;; "ssaddqq3" "ssaddhq3" "ssaddha3" "ssaddsq3" "ssaddsa3"
+;; "sssubqq3" "sssubhq3" "sssubha3" "sssubsq3" "sssubsa3"
+(define_insn "<code_stdname><mode>3"
+ [(set (match_operand:ALL124S 0 "register_operand" "=??d,d")
+ (ss_addsub:ALL124S (match_operand:ALL124S 1 "register_operand" "<abelian>0,0")
+ (match_operand:ALL124S 2 "nonmemory_operand" "r,Ynn")))]
+ ""
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "cc" "clobber")
+ (set_attr "adjust_len" "plus")])
+
+;; "usadduqq3" "usadduhq3" "usadduha3" "usaddusq3" "usaddusa3"
+;; "ussubuqq3" "ussubuhq3" "ussubuha3" "ussubusq3" "ussubusa3"
+(define_insn "<code_stdname><mode>3"
+ [(set (match_operand:ALL124U 0 "register_operand" "=??r,d")
+ (us_addsub:ALL124U (match_operand:ALL124U 1 "register_operand" "<abelian>0,0")
+ (match_operand:ALL124U 2 "nonmemory_operand" "r,Ynn")))]
+ ""
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "cc" "clobber")
+ (set_attr "adjust_len" "plus")])
+
+;******************************************************************************
+;** Saturated Negation and Absolute Value
+;******************************************************************************
+
+;; Fixme: This will always result in 0. Dunno why simplify-rtx.c says
+;; "unknown" on how to optimize this. libgcc call would be in order,
+;; but the performance is *PLAIN* *HORROR* because the optimizers don't
+;; manage to optimize out MEMCPY that's sprincled all over fixed-bit.c */
+
+(define_expand "usneg<mode>2"
+ [(parallel [(match_operand:ALL124U 0 "register_operand" "")
+ (match_operand:ALL124U 1 "nonmemory_operand" "")])]
+ ""
+ {
+ emit_move_insn (operands[0], CONST0_RTX (<MODE>mode));
+ DONE;
+ })
+
+(define_insn "ssnegqq2"
+ [(set (match_operand:QQ 0 "register_operand" "=r")
+ (ss_neg:QQ (match_operand:QQ 1 "register_operand" "0")))]
+ ""
+ "neg %0\;brvc 0f\;dec %0\;0:"
+ [(set_attr "cc" "clobber")
+ (set_attr "length" "3")])
+
+(define_insn "ssabsqq2"
+ [(set (match_operand:QQ 0 "register_operand" "=r")
+ (ss_abs:QQ (match_operand:QQ 1 "register_operand" "0")))]
+ ""
+ "sbrc %0,7\;neg %0\;sbrc %0,7\;dec %0"
+ [(set_attr "cc" "clobber")
+ (set_attr "length" "4")])
+
+;; "ssneghq2" "ssnegha2" "ssnegsq2" "ssnegsa2"
+;; "ssabshq2" "ssabsha2" "ssabssq2" "ssabssa2"
+(define_expand "<code_stdname><mode>2"
+ [(set (match_dup 2)
+ (match_operand:ALL24S 1 "register_operand" ""))
+ (set (match_dup 2)
+ (ss_abs_neg:ALL24S (match_dup 2)))
+ (set (match_operand:ALL24S 0 "register_operand" "")
+ (match_dup 2))]
+ ""
+ {
+ operands[2] = gen_rtx_REG (<MODE>mode, 26 - GET_MODE_SIZE (<MODE>mode));
+ })
+
+;; "*ssneghq2" "*ssnegha2"
+;; "*ssabshq2" "*ssabsha2"
+(define_insn "*<code_stdname><mode>2"
+ [(set (reg:ALL2S 24)
+ (ss_abs_neg:ALL2S (reg:ALL2S 24)))]
+ ""
+ "%~call __<code_stdname>_2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; "*ssnegsq2" "*ssnegsa2"
+;; "*ssabssq2" "*ssabssa2"
+(define_insn "*<code_stdname><mode>2"
+ [(set (reg:ALL4S 22)
+ (ss_abs_neg:ALL4S (reg:ALL4S 22)))]
+ ""
+ "%~call __<code_stdname>_4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;******************************************************************************
+; mul
+
+;; "mulqq3" "muluqq3"
+(define_expand "mul<mode>3"
+ [(parallel [(match_operand:ALL1Q 0 "register_operand" "")
+ (match_operand:ALL1Q 1 "register_operand" "")
+ (match_operand:ALL1Q 2 "register_operand" "")])]
+ ""
+ {
+ emit_insn (AVR_HAVE_MUL
+ ? gen_mul<mode>3_enh (operands[0], operands[1], operands[2])
+ : gen_mul<mode>3_nomul (operands[0], operands[1], operands[2]));
+ DONE;
+ })
+
+(define_insn "mulqq3_enh"
+ [(set (match_operand:QQ 0 "register_operand" "=r")
+ (mult:QQ (match_operand:QQ 1 "register_operand" "a")
+ (match_operand:QQ 2 "register_operand" "a")))]
+ "AVR_HAVE_MUL"
+ "fmuls %1,%2\;dec r1\;brvs 0f\;inc r1\;0:\;mov %0,r1\;clr __zero_reg__"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+(define_insn "muluqq3_enh"
+ [(set (match_operand:UQQ 0 "register_operand" "=r")
+ (mult:UQQ (match_operand:UQQ 1 "register_operand" "r")
+ (match_operand:UQQ 2 "register_operand" "r")))]
+ "AVR_HAVE_MUL"
+ "mul %1,%2\;mov %0,r1\;clr __zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_expand "mulqq3_nomul"
+ [(set (reg:QQ 24)
+ (match_operand:QQ 1 "register_operand" ""))
+ (set (reg:QQ 25)
+ (match_operand:QQ 2 "register_operand" ""))
+ ;; "*mulqq3.call"
+ (parallel [(set (reg:QQ 23)
+ (mult:QQ (reg:QQ 24)
+ (reg:QQ 25)))
+ (clobber (reg:QI 22))
+ (clobber (reg:HI 24))])
+ (set (match_operand:QQ 0 "register_operand" "")
+ (reg:QQ 23))]
+ "!AVR_HAVE_MUL")
+
+(define_expand "muluqq3_nomul"
+ [(set (reg:UQQ 22)
+ (match_operand:UQQ 1 "register_operand" ""))
+ (set (reg:UQQ 24)
+ (match_operand:UQQ 2 "register_operand" ""))
+ ;; "*umulqihi3.call"
+ (parallel [(set (reg:HI 24)
+ (mult:HI (zero_extend:HI (reg:QI 22))
+ (zero_extend:HI (reg:QI 24))))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI 22))])
+ (set (match_operand:UQQ 0 "register_operand" "")
+ (reg:UQQ 25))]
+ "!AVR_HAVE_MUL")
+
+(define_insn "*mulqq3.call"
+ [(set (reg:QQ 23)
+ (mult:QQ (reg:QQ 24)
+ (reg:QQ 25)))
+ (clobber (reg:QI 22))
+ (clobber (reg:HI 24))]
+ "!AVR_HAVE_MUL"
+ "%~call __mulqq3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;; "mulhq3" "muluhq3"
+;; "mulha3" "muluha3"
+(define_expand "mul<mode>3"
+ [(set (reg:ALL2QA 18)
+ (match_operand:ALL2QA 1 "register_operand" ""))
+ (set (reg:ALL2QA 26)
+ (match_operand:ALL2QA 2 "register_operand" ""))
+ ;; "*mulhq3.call.enh"
+ (parallel [(set (reg:ALL2QA 24)
+ (mult:ALL2QA (reg:ALL2QA 18)
+ (reg:ALL2QA 26)))
+ (clobber (reg:HI 22))])
+ (set (match_operand:ALL2QA 0 "register_operand" "")
+ (reg:ALL2QA 24))]
+ "AVR_HAVE_MUL")
+
+;; "*mulhq3.call" "*muluhq3.call"
+;; "*mulha3.call" "*muluha3.call"
+(define_insn "*mul<mode>3.call"
+ [(set (reg:ALL2QA 24)
+ (mult:ALL2QA (reg:ALL2QA 18)
+ (reg:ALL2QA 26)))
+ (clobber (reg:HI 22))]
+ "AVR_HAVE_MUL"
+ "%~call __mul<mode>3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;; On the enhanced core, don't clobber either input and use a separate output
+
+;; "mulsa3" "mulusa3"
+(define_expand "mul<mode>3"
+ [(set (reg:ALL4A 16)
+ (match_operand:ALL4A 1 "register_operand" ""))
+ (set (reg:ALL4A 20)
+ (match_operand:ALL4A 2 "register_operand" ""))
+ (set (reg:ALL4A 24)
+ (mult:ALL4A (reg:ALL4A 16)
+ (reg:ALL4A 20)))
+ (set (match_operand:ALL4A 0 "register_operand" "")
+ (reg:ALL4A 24))]
+ "AVR_HAVE_MUL")
+
+;; "*mulsa3.call" "*mulusa3.call"
+(define_insn "*mul<mode>3.call"
+ [(set (reg:ALL4A 24)
+ (mult:ALL4A (reg:ALL4A 16)
+ (reg:ALL4A 20)))]
+ "AVR_HAVE_MUL"
+ "%~call __mul<mode>3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
+; div
+
+(define_code_iterator usdiv [udiv div])
+
+;; "divqq3" "udivuqq3"
+(define_expand "<code><mode>3"
+ [(set (reg:ALL1Q 25)
+ (match_operand:ALL1Q 1 "register_operand" ""))
+ (set (reg:ALL1Q 22)
+ (match_operand:ALL1Q 2 "register_operand" ""))
+ (parallel [(set (reg:ALL1Q 24)
+ (usdiv:ALL1Q (reg:ALL1Q 25)
+ (reg:ALL1Q 22)))
+ (clobber (reg:QI 25))])
+ (set (match_operand:ALL1Q 0 "register_operand" "")
+ (reg:ALL1Q 24))])
+
+;; "*divqq3.call" "*udivuqq3.call"
+(define_insn "*<code><mode>3.call"
+ [(set (reg:ALL1Q 24)
+ (usdiv:ALL1Q (reg:ALL1Q 25)
+ (reg:ALL1Q 22)))
+ (clobber (reg:QI 25))]
+ ""
+ "%~call __<code><mode>3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; "divhq3" "udivuhq3"
+;; "divha3" "udivuha3"
+(define_expand "<code><mode>3"
+ [(set (reg:ALL2QA 26)
+ (match_operand:ALL2QA 1 "register_operand" ""))
+ (set (reg:ALL2QA 22)
+ (match_operand:ALL2QA 2 "register_operand" ""))
+ (parallel [(set (reg:ALL2QA 24)
+ (usdiv:ALL2QA (reg:ALL2QA 26)
+ (reg:ALL2QA 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))])
+ (set (match_operand:ALL2QA 0 "register_operand" "")
+ (reg:ALL2QA 24))])
+
+;; "*divhq3.call" "*udivuhq3.call"
+;; "*divha3.call" "*udivuha3.call"
+(define_insn "*<code><mode>3.call"
+ [(set (reg:ALL2QA 24)
+ (usdiv:ALL2QA (reg:ALL2QA 26)
+ (reg:ALL2QA 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))]
+ ""
+ "%~call __<code><mode>3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; Note the first parameter gets passed in already offset by 2 bytes
+
+;; "divsa3" "udivusa3"
+(define_expand "<code><mode>3"
+ [(set (reg:ALL4A 24)
+ (match_operand:ALL4A 1 "register_operand" ""))
+ (set (reg:ALL4A 18)
+ (match_operand:ALL4A 2 "register_operand" ""))
+ (parallel [(set (reg:ALL4A 22)
+ (usdiv:ALL4A (reg:ALL4A 24)
+ (reg:ALL4A 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])
+ (set (match_operand:ALL4A 0 "register_operand" "")
+ (reg:ALL4A 22))])
+
+;; "*divsa3.call" "*udivusa3.call"
+(define_insn "*<code><mode>3.call"
+ [(set (reg:ALL4A 22)
+ (usdiv:ALL4A (reg:ALL4A 24)
+ (reg:ALL4A 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))]
+ ""
+ "%~call __<code><mode>3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;******************************************************************************
+;** Rounding
+;******************************************************************************
+
+;; "roundqq3" "rounduqq3"
+;; "roundhq3" "rounduhq3" "roundha3" "rounduha3"
+;; "roundsq3" "roundusq3" "roundsa3" "roundusa3"
+(define_expand "round<mode>3"
+ [(set (match_dup 4)
+ (match_operand:ALL124QA 1 "register_operand" ""))
+ (set (reg:QI 24)
+ (match_dup 5))
+ (parallel [(set (match_dup 3)
+ (unspec:ALL124QA [(match_dup 4)
+ (reg:QI 24)] UNSPEC_ROUND))
+ (clobber (match_dup 4))])
+ (set (match_operand:ALL124QA 0 "register_operand" "")
+ (match_dup 3))
+ (use (match_operand:HI 2 "nonmemory_operand" ""))]
+ ""
+ {
+ if (CONST_INT_P (operands[2])
+ && !(optimize_size
+ && 4 == GET_MODE_SIZE (<MODE>mode)))
+ {
+ emit_insn (gen_round<mode>3_const (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+
+ // Input and output of the libgcc function
+ const unsigned int regno_in[] = { -1, 22, 22, -1, 18 };
+ const unsigned int regno_out[] = { -1, 24, 24, -1, 22 };
+
+ operands[3] = gen_rtx_REG (<MODE>mode, regno_out[(size_t) GET_MODE_SIZE (<MODE>mode)]);
+ operands[4] = gen_rtx_REG (<MODE>mode, regno_in[(size_t) GET_MODE_SIZE (<MODE>mode)]);
+ operands[5] = simplify_gen_subreg (QImode, force_reg (HImode, operands[2]), HImode, 0);
+ // $2 is no more needed, but is referenced for expand.
+ operands[2] = const0_rtx;
+ })
+
+;; Expand rounding with known rounding points inline so that the addend / mask
+;; will be consumed by operation with immediate operands and there is no
+;; need for a shift with variable offset.
+
+;; "roundqq3_const" "rounduqq3_const"
+;; "roundhq3_const" "rounduhq3_const" "roundha3_const" "rounduha3_const"
+;; "roundsq3_const" "roundusq3_const" "roundsa3_const" "roundusa3_const"
+(define_insn "round<mode>3_const"
+ [(set (match_operand:ALL124QA 0 "register_operand" "=d")
+ (unspec:ALL124QA [(match_operand:ALL124QA 1 "register_operand" "0")
+ (match_operand:HI 2 "const_int_operand" "n")
+ (const_int 0)]
+ UNSPEC_ROUND))]
+ ""
+ {
+ return avr_out_round (insn, operands);
+ }
+ [(set_attr "cc" "clobber")
+ (set_attr "adjust_len" "round")])
+
+
+;; "*roundqq3.libgcc" "*rounduqq3.libgcc"
+(define_insn "*round<mode>3.libgcc"
+ [(set (reg:ALL1Q 24)
+ (unspec:ALL1Q [(reg:ALL1Q 22)
+ (reg:QI 24)] UNSPEC_ROUND))
+ (clobber (reg:ALL1Q 22))]
+ ""
+ "%~call __round<mode>3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; "*roundhq3.libgcc" "*rounduhq3.libgcc"
+;; "*roundha3.libgcc" "*rounduha3.libgcc"
+(define_insn "*round<mode>3.libgcc"
+ [(set (reg:ALL2QA 24)
+ (unspec:ALL2QA [(reg:ALL2QA 22)
+ (reg:QI 24)] UNSPEC_ROUND))
+ (clobber (reg:ALL2QA 22))]
+ ""
+ "%~call __round<mode>3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; "*roundsq3.libgcc" "*roundusq3.libgcc"
+;; "*roundsa3.libgcc" "*roundusa3.libgcc"
+(define_insn "*round<mode>3.libgcc"
+ [(set (reg:ALL4QA 22)
+ (unspec:ALL4QA [(reg:ALL4QA 18)
+ (reg:QI 24)] UNSPEC_ROUND))
+ (clobber (reg:ALL4QA 18))]
+ ""
+ "%~call __round<mode>3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
diff --git a/gcc-4.9/gcc/config/avr/avr-log.c b/gcc-4.9/gcc/config/avr/avr-log.c
new file mode 100644
index 000000000..8e27cec6d
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-log.c
@@ -0,0 +1,351 @@
+/* Subroutines for log output for Atmel AVR back end.
+ Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Georg-Johann Lay (avr@gjlay.de)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "print-tree.h"
+#include "output.h"
+#include "input.h"
+#include "function.h"
+#include "tm_p.h"
+#include "tree-pass.h" /* for current_pass */
+
+/* This file supplies some functions for AVR back-end developers
+ with a printf-like interface. The functions are called through
+ macros avr_edump or avr_fdump from avr-protos.h:
+
+ avr_edump (const char *fmt, ...);
+
+ avr_fdump (FILE *stream, const char *fmt, ...);
+
+ avr_edump (fmt, ...) is a shortcut for avr_fdump (stderr, fmt, ...)
+
+ == known %-codes ==
+
+ b: bool
+ r: rtx
+ t: tree
+ T: tree (brief)
+ C: enum rtx_code
+ m: enum machine_mode
+ R: enum reg_class
+ L: insn list
+ H: location_t
+
+ == no arguments ==
+
+ A: call abort()
+ f: current_function_name()
+ F: caller (via __FUNCTION__)
+ P: Pass name and number
+ ?: Print caller, current function and pass info
+ !: Ditto, but only print if in a pass with static pass number,
+ else return.
+
+ == same as printf ==
+
+ %: %
+ c: char
+ s: string
+ d: int (decimal)
+ x: int (hex)
+*/
+
+/* Set according to -mlog= option. */
+avr_log_t avr_log;
+
+/* The caller as of __FUNCTION__ */
+static const char *avr_log_caller = "?";
+
+/* The worker function implementing the %-codes */
+static void avr_log_vadump (FILE*, const char*, va_list);
+
+/* As we have no variadic macros, avr_edump maps to a call to
+ avr_log_set_caller_e which saves __FUNCTION__ to avr_log_caller and
+ returns a function pointer to avr_log_fdump_e. avr_log_fdump_e
+ gets the printf-like arguments and calls avr_log_vadump, the
+ worker function. avr_fdump works the same way. */
+
+/* Provide avr_log_fdump_e/f so that avr_log_set_caller_e/_f can return
+ their address. */
+
+static int
+avr_log_fdump_e (const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start (ap, fmt);
+ avr_log_vadump (stderr, fmt, ap);
+ va_end (ap);
+
+ return 1;
+}
+
+static int
+avr_log_fdump_f (FILE *stream, const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start (ap, fmt);
+ if (stream)
+ avr_log_vadump (stream, fmt, ap);
+ va_end (ap);
+
+ return 1;
+}
+
+/* Macros avr_edump/avr_fdump map to calls of the following two functions,
+ respectively. You don't need to call them directly. */
+
+int (*
+avr_log_set_caller_e (const char *caller)
+ )(const char*, ...)
+{
+ avr_log_caller = caller;
+
+ return avr_log_fdump_e;
+}
+
+int (*
+avr_log_set_caller_f (const char *caller)
+ )(FILE*, const char*, ...)
+{
+ avr_log_caller = caller;
+
+ return avr_log_fdump_f;
+}
+
+
+/* Worker function implementing the %-codes and forwarding to
+ respective print/dump function. */
+
+static void
+avr_log_vadump (FILE *file, const char *fmt, va_list ap)
+{
+ char bs[3] = {'\\', '?', '\0'};
+
+ while (*fmt)
+ {
+ switch (*fmt++)
+ {
+ default:
+ fputc (*(fmt-1), file);
+ break;
+
+ case '\\':
+ bs[1] = *fmt++;
+ fputs (bs, file);
+ break;
+
+ case '%':
+ switch (*fmt++)
+ {
+ case '%':
+ fputc ('%', file);
+ break;
+
+ case 't':
+ {
+ tree t = va_arg (ap, tree);
+ if (NULL_TREE == t)
+ fprintf (file, "<NULL-TREE>");
+ else
+ {
+ if (stderr == file)
+ debug_tree (t);
+ else
+ {
+ print_node (file, "", t, 0);
+ putc ('\n', file);
+ }
+ }
+ break;
+ }
+
+ case 'T':
+ print_node_brief (file, "", va_arg (ap, tree), 3);
+ break;
+
+ case 'd':
+ fprintf (file, "%d", va_arg (ap, int));
+ break;
+
+ case 'x':
+ fprintf (file, "%x", va_arg (ap, int));
+ break;
+
+ case 'b':
+ fprintf (file, "%s", va_arg (ap, int) ? "true" : "false");
+ break;
+
+ case 'c':
+ fputc (va_arg (ap, int), file);
+ break;
+
+ case 'r':
+ print_inline_rtx (file, va_arg (ap, rtx), 0);
+ break;
+
+ case 'L':
+ {
+ rtx insn = va_arg (ap, rtx);
+
+ while (insn)
+ {
+ print_inline_rtx (file, insn, 0);
+ fprintf (file, "\n");
+ insn = NEXT_INSN (insn);
+ }
+ break;
+ }
+
+ case 'f':
+ if (cfun && cfun->decl)
+ fputs (current_function_name(), file);
+ break;
+
+ case 's':
+ {
+ const char *str = va_arg (ap, char*);
+ fputs (str ? str : "(null)", file);
+ }
+ break;
+
+ case 'm':
+ fputs (GET_MODE_NAME ((enum machine_mode) va_arg (ap, int)),
+ file);
+ break;
+
+ case 'C':
+ fputs (rtx_name[va_arg (ap, int)], file);
+ break;
+
+ case 'R':
+ fputs (reg_class_names[va_arg (ap, int)], file);
+ break;
+
+ case 'F':
+ fputs (avr_log_caller, file);
+ break;
+
+ case 'H':
+ {
+ location_t loc = va_arg (ap, location_t);
+
+ if (BUILTINS_LOCATION == loc)
+ fprintf (file, "<BUILTIN-LOCATION>");
+ else if (UNKNOWN_LOCATION == loc)
+ fprintf (file, "<UNKNOWN-LOCATION>");
+ else
+ fprintf (file, "%s:%d",
+ LOCATION_FILE (loc), LOCATION_LINE (loc));
+
+ break;
+ }
+
+ case '!':
+ if (!current_pass)
+ return;
+ /* FALLTHRU */
+
+ case '?':
+ avr_log_fdump_f (file, "%F[%f:%P]");
+ break;
+
+ case 'P':
+ if (current_pass)
+ fprintf (file, "%s(%d)",
+ current_pass->name,
+ current_pass->static_pass_number);
+ else
+ fprintf (file, "pass=?");
+
+ break;
+
+ case 'A':
+ fflush (file);
+ abort();
+
+ default:
+ /* Unknown %-code: Stop printing */
+
+ fprintf (file, "??? %%%c ???%s\n", *(fmt-1), fmt);
+ fmt = "";
+
+ break;
+ }
+ break; /* % */
+ }
+ }
+
+ fflush (file);
+}
+
+
+/* Called from avr.c:avr_option_override().
+ Parse argument of -mlog= and set respective fields in avr_log. */
+
+void
+avr_log_set_avr_log (void)
+{
+ bool all = TARGET_ALL_DEBUG != 0;
+
+ if (all || avr_log_details)
+ {
+ /* Adding , at beginning and end of string makes searching easier. */
+
+ char *str = (char*) alloca (3 + strlen (avr_log_details));
+ bool info;
+
+ str[0] = ',';
+ strcat (stpcpy (str+1, avr_log_details), ",");
+
+ all |= NULL != strstr (str, ",all,");
+ info = NULL != strstr (str, ",?,");
+
+ if (info)
+ fprintf (stderr, "\n-mlog=");
+
+#define SET_DUMP_DETAIL(S) \
+ do { \
+ avr_log.S = (all || NULL != strstr (str, "," #S ",")); \
+ if (info) \
+ fprintf (stderr, #S ","); \
+ } while (0)
+
+ SET_DUMP_DETAIL (address_cost);
+ SET_DUMP_DETAIL (builtin);
+ SET_DUMP_DETAIL (constraints);
+ SET_DUMP_DETAIL (legitimate_address_p);
+ SET_DUMP_DETAIL (legitimize_address);
+ SET_DUMP_DETAIL (legitimize_reload_address);
+ SET_DUMP_DETAIL (progmem);
+ SET_DUMP_DETAIL (rtx_costs);
+
+#undef SET_DUMP_DETAIL
+
+ if (info)
+ fprintf (stderr, "?\n\n");
+ }
+}
diff --git a/gcc-4.9/gcc/config/avr/avr-mcus.def b/gcc-4.9/gcc/config/avr/avr-mcus.def
new file mode 100644
index 000000000..d068f5e80
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-mcus.def
@@ -0,0 +1,323 @@
+/* AVR MCUs.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* List of all known AVR MCU types. If updated, cd to $(builddir)/gcc and run
+
+ $ make avr-mcus
+
+ This will regenerate / update the following source files:
+
+ - $(srcdir)/config/avr/t-multilib
+ - $(srcdir)/config/avr/avr-tables.opt
+ - $(srcdir)/doc/avr-mmcu.texi
+
+ After that, rebuild everything and check-in the new sources to the repo.
+ The device list below has to be kept in sync with AVR-LibC.
+
+
+ Before including this file, define a macro:
+
+ AVR_MCU (NAME, ARCH, MACRO, SHORT_SP, ERRATA_SKIP, DATA_SEC, N_FLASH,
+ LIBRARY_NAME)
+
+ where the arguments are the fields of avr_mcu_t:
+
+ NAME Accept -mmcu=<NAME>
+
+ ARCH Specifies the multilib variant together with SHORT_SP
+
+ MACRO If NULL, this is a core and not a device. If non-NULL,
+ supply respective built-in macro.
+
+ SHORT_SP The device / multilib has an 8-bit stack pointer (no SPH).
+
+ ERRATA_SKIP Apply work-around for the "skip 32-bit instruction"
+ silicon bug: Don't skip 32-bit instrctions.
+
+ DATA_SEC First address of SRAM, used in -Tdata= by the driver.
+
+ N_FLASH Number of 64 KiB flash segments, rounded up.
+
+ LIBRARY_NAME Used by the driver to linke startup code from avr-libc
+ as of crt<LIBRARY_NAME>.o
+
+ "avr2" must be first for the "0" default to work as intended. */
+
+/* Classic, <= 8K. */
+AVR_MCU ("avr2", ARCH_AVR2, NULL, 0, 1, 0x0060, 6, "s8515")
+AVR_MCU ("at90s2313", ARCH_AVR2, "__AVR_AT90S2313__", 1, 0, 0x0060, 1, "s2313")
+AVR_MCU ("at90s2323", ARCH_AVR2, "__AVR_AT90S2323__", 1, 0, 0x0060, 1, "s2323")
+AVR_MCU ("at90s2333", ARCH_AVR2, "__AVR_AT90S2333__", 1, 0, 0x0060, 1, "s2333")
+AVR_MCU ("at90s2343", ARCH_AVR2, "__AVR_AT90S2343__", 1, 0, 0x0060, 1, "s2343")
+AVR_MCU ("attiny22", ARCH_AVR2, "__AVR_ATtiny22__", 1, 0, 0x0060, 1, "tn22")
+AVR_MCU ("attiny26", ARCH_AVR2, "__AVR_ATtiny26__", 1, 0, 0x0060, 1, "tn26")
+AVR_MCU ("at90s4414", ARCH_AVR2, "__AVR_AT90S4414__", 0, 0, 0x0060, 1, "s4414")
+AVR_MCU ("at90s4433", ARCH_AVR2, "__AVR_AT90S4433__", 1, 0, 0x0060, 1, "s4433")
+AVR_MCU ("at90s4434", ARCH_AVR2, "__AVR_AT90S4434__", 0, 0, 0x0060, 1, "s4434")
+AVR_MCU ("at90s8515", ARCH_AVR2, "__AVR_AT90S8515__", 0, 1, 0x0060, 1, "s8515")
+AVR_MCU ("at90c8534", ARCH_AVR2, "__AVR_AT90C8534__", 0, 0, 0x0060, 1, "c8534")
+AVR_MCU ("at90s8535", ARCH_AVR2, "__AVR_AT90S8535__", 0, 0, 0x0060, 1, "s8535")
+/* Classic + MOVW, <= 8K. */
+AVR_MCU ("avr25", ARCH_AVR25, NULL, 0, 0, 0x0060, 1, "tn85")
+AVR_MCU ("ata6289", ARCH_AVR25, "__AVR_ATA6289__", 0, 0, 0x0100, 1, "a6289")
+AVR_MCU ("ata5272", ARCH_AVR25, "__AVR_ATA5272__", 0, 0, 0x0100, 1, "a5272")
+AVR_MCU ("attiny13", ARCH_AVR25, "__AVR_ATtiny13__", 1, 0, 0x0060, 1, "tn13")
+AVR_MCU ("attiny13a", ARCH_AVR25, "__AVR_ATtiny13A__", 1, 0, 0x0060, 1, "tn13a")
+AVR_MCU ("attiny2313", ARCH_AVR25, "__AVR_ATtiny2313__", 1, 0, 0x0060, 1, "tn2313")
+AVR_MCU ("attiny2313a", ARCH_AVR25, "__AVR_ATtiny2313A__", 1, 0, 0x0060, 1, "tn2313a")
+AVR_MCU ("attiny24", ARCH_AVR25, "__AVR_ATtiny24__", 1, 0, 0x0060, 1, "tn24")
+AVR_MCU ("attiny24a", ARCH_AVR25, "__AVR_ATtiny24A__", 1, 0, 0x0060, 1, "tn24a")
+AVR_MCU ("attiny4313", ARCH_AVR25, "__AVR_ATtiny4313__", 0, 0, 0x0060, 1, "tn4313")
+AVR_MCU ("attiny44", ARCH_AVR25, "__AVR_ATtiny44__", 0, 0, 0x0060, 1, "tn44")
+AVR_MCU ("attiny44a", ARCH_AVR25, "__AVR_ATtiny44A__", 0, 0, 0x0060, 1, "tn44a")
+AVR_MCU ("attiny84", ARCH_AVR25, "__AVR_ATtiny84__", 0, 0, 0x0060, 1, "tn84")
+AVR_MCU ("attiny84a", ARCH_AVR25, "__AVR_ATtiny84A__", 0, 0, 0x0060, 1, "tn84")
+AVR_MCU ("attiny25", ARCH_AVR25, "__AVR_ATtiny25__", 1, 0, 0x0060, 1, "tn25")
+AVR_MCU ("attiny45", ARCH_AVR25, "__AVR_ATtiny45__", 0, 0, 0x0060, 1, "tn45")
+AVR_MCU ("attiny85", ARCH_AVR25, "__AVR_ATtiny85__", 0, 0, 0x0060, 1, "tn85")
+AVR_MCU ("attiny261", ARCH_AVR25, "__AVR_ATtiny261__", 1, 0, 0x0060, 1, "tn261")
+AVR_MCU ("attiny261a", ARCH_AVR25, "__AVR_ATtiny261A__", 1, 0, 0x0060, 1, "tn261a")
+AVR_MCU ("attiny461", ARCH_AVR25, "__AVR_ATtiny461__", 0, 0, 0x0060, 1, "tn461")
+AVR_MCU ("attiny461a", ARCH_AVR25, "__AVR_ATtiny461A__", 0, 0, 0x0060, 1, "tn461a")
+AVR_MCU ("attiny861", ARCH_AVR25, "__AVR_ATtiny861__", 0, 0, 0x0060, 1, "tn861")
+AVR_MCU ("attiny861a", ARCH_AVR25, "__AVR_ATtiny861A__", 0, 0, 0x0060, 1, "tn861a")
+AVR_MCU ("attiny43u", ARCH_AVR25, "__AVR_ATtiny43U__", 0, 0, 0x0060, 1, "tn43u")
+AVR_MCU ("attiny87", ARCH_AVR25, "__AVR_ATtiny87__", 0, 0, 0x0100, 1, "tn87")
+AVR_MCU ("attiny48", ARCH_AVR25, "__AVR_ATtiny48__", 0, 0, 0x0100, 1, "tn48")
+AVR_MCU ("attiny88", ARCH_AVR25, "__AVR_ATtiny88__", 0, 0, 0x0100, 1, "tn88")
+AVR_MCU ("at86rf401", ARCH_AVR25, "__AVR_AT86RF401__", 0, 0, 0x0060, 1, "86401")
+/* Classic, > 8K, <= 64K. */
+AVR_MCU ("avr3", ARCH_AVR3, NULL, 0, 0, 0x0060, 1, "43355")
+AVR_MCU ("at43usb355", ARCH_AVR3, "__AVR_AT43USB355__", 0, 0, 0x0060, 1, "43355")
+AVR_MCU ("at76c711", ARCH_AVR3, "__AVR_AT76C711__", 0, 0, 0x0060, 1, "76711")
+/* Classic, == 128K. */
+AVR_MCU ("avr31", ARCH_AVR31, NULL, 0, 1, 0x0060, 2, "m103")
+AVR_MCU ("atmega103", ARCH_AVR31, "__AVR_ATmega103__", 0, 1, 0x0060, 2, "m103")
+AVR_MCU ("at43usb320", ARCH_AVR31, "__AVR_AT43USB320__", 0, 0, 0x0060, 2, "43320")
+/* Classic + MOVW + JMP/CALL. */
+AVR_MCU ("avr35", ARCH_AVR35, NULL, 0, 0, 0x0100, 1, "usb162")
+AVR_MCU ("ata5505", ARCH_AVR35, "__AVR_ATA5505__", 0, 0, 0x0100, 1, "a5505")
+AVR_MCU ("at90usb82", ARCH_AVR35, "__AVR_AT90USB82__", 0, 0, 0x0100, 1, "usb82")
+AVR_MCU ("at90usb162", ARCH_AVR35, "__AVR_AT90USB162__", 0, 0, 0x0100, 1, "usb162")
+AVR_MCU ("atmega8u2", ARCH_AVR35, "__AVR_ATmega8U2__", 0, 0, 0x0100, 1, "m8u2")
+AVR_MCU ("atmega16u2", ARCH_AVR35, "__AVR_ATmega16U2__", 0, 0, 0x0100, 1, "m16u2")
+AVR_MCU ("atmega32u2", ARCH_AVR35, "__AVR_ATmega32U2__", 0, 0, 0x0100, 1, "m32u2")
+AVR_MCU ("attiny167", ARCH_AVR35, "__AVR_ATtiny167__", 0, 0, 0x0100, 1, "tn167")
+AVR_MCU ("attiny1634", ARCH_AVR35, "__AVR_ATtiny1634__", 0, 0, 0x0100, 1, "tn1634")
+/* Enhanced, <= 8K. */
+AVR_MCU ("avr4", ARCH_AVR4, NULL, 0, 0, 0x0060, 1, "m8")
+AVR_MCU ("ata6285", ARCH_AVR4, "__AVR_ATA6285__", 0, 0, 0x0100, 1, "a6285")
+AVR_MCU ("ata6286", ARCH_AVR4, "__AVR_ATA6286__", 0, 0, 0x0100, 1, "a6286")
+AVR_MCU ("atmega8", ARCH_AVR4, "__AVR_ATmega8__", 0, 0, 0x0060, 1, "m8")
+AVR_MCU ("atmega8a", ARCH_AVR4, "__AVR_ATmega8A__", 0, 0, 0x0060, 1, "m8a")
+AVR_MCU ("atmega48", ARCH_AVR4, "__AVR_ATmega48__", 0, 0, 0x0100, 1, "m48")
+AVR_MCU ("atmega48a", ARCH_AVR4, "__AVR_ATmega48A__", 0, 0, 0x0100, 1, "m48a")
+AVR_MCU ("atmega48p", ARCH_AVR4, "__AVR_ATmega48P__", 0, 0, 0x0100, 1, "m48p")
+AVR_MCU ("atmega48pa", ARCH_AVR4, "__AVR_ATmega48PA__", 0, 0, 0x0100, 1, "m48pa")
+AVR_MCU ("atmega88", ARCH_AVR4, "__AVR_ATmega88__", 0, 0, 0x0100, 1, "m88")
+AVR_MCU ("atmega88a", ARCH_AVR4, "__AVR_ATmega88A__", 0, 0, 0x0100, 1, "m88a")
+AVR_MCU ("atmega88p", ARCH_AVR4, "__AVR_ATmega88P__", 0, 0, 0x0100, 1, "m88p")
+AVR_MCU ("atmega88pa", ARCH_AVR4, "__AVR_ATmega88PA__", 0, 0, 0x0100, 1, "m88pa")
+AVR_MCU ("atmega8515", ARCH_AVR4, "__AVR_ATmega8515__", 0, 0, 0x0060, 1, "m8515")
+AVR_MCU ("atmega8535", ARCH_AVR4, "__AVR_ATmega8535__", 0, 0, 0x0060, 1, "m8535")
+AVR_MCU ("atmega8hva", ARCH_AVR4, "__AVR_ATmega8HVA__", 0, 0, 0x0100, 1, "m8hva")
+AVR_MCU ("at90pwm1", ARCH_AVR4, "__AVR_AT90PWM1__", 0, 0, 0x0100, 1, "90pwm1")
+AVR_MCU ("at90pwm2", ARCH_AVR4, "__AVR_AT90PWM2__", 0, 0, 0x0100, 1, "90pwm2")
+AVR_MCU ("at90pwm2b", ARCH_AVR4, "__AVR_AT90PWM2B__", 0, 0, 0x0100, 1, "90pwm2b")
+AVR_MCU ("at90pwm3", ARCH_AVR4, "__AVR_AT90PWM3__", 0, 0, 0x0100, 1, "90pwm3")
+AVR_MCU ("at90pwm3b", ARCH_AVR4, "__AVR_AT90PWM3B__", 0, 0, 0x0100, 1, "90pwm3b")
+AVR_MCU ("at90pwm81", ARCH_AVR4, "__AVR_AT90PWM81__", 0, 0, 0x0100, 1, "90pwm81")
+/* Enhanced, > 8K, <= 64K. */
+AVR_MCU ("avr5", ARCH_AVR5, NULL, 0, 0, 0x0060, 1, "m16")
+AVR_MCU ("ata5790", ARCH_AVR5, "__AVR_ATA5790__", 0, 0, 0x0100, 1, "a5790")
+AVR_MCU ("ata5790n", ARCH_AVR5, "__AVR_ATA5790N__", 0, 0, 0x0100, 1, "a5790n")
+AVR_MCU ("ata5795", ARCH_AVR5, "__AVR_ATA5795__", 0, 0, 0x0100, 1, "a5795")
+AVR_MCU ("atmega16", ARCH_AVR5, "__AVR_ATmega16__", 0, 0, 0x0060, 1, "m16")
+AVR_MCU ("atmega16a", ARCH_AVR5, "__AVR_ATmega16A__", 0, 0, 0x0060, 1, "m16a")
+AVR_MCU ("atmega161", ARCH_AVR5, "__AVR_ATmega161__", 0, 0, 0x0060, 1, "m161")
+AVR_MCU ("atmega162", ARCH_AVR5, "__AVR_ATmega162__", 0, 0, 0x0100, 1, "m162")
+AVR_MCU ("atmega163", ARCH_AVR5, "__AVR_ATmega163__", 0, 0, 0x0060, 1, "m163")
+AVR_MCU ("atmega164a", ARCH_AVR5, "__AVR_ATmega164A__", 0, 0, 0x0100, 1, "m164a")
+AVR_MCU ("atmega164p", ARCH_AVR5, "__AVR_ATmega164P__", 0, 0, 0x0100, 1, "m164p")
+AVR_MCU ("atmega164pa", ARCH_AVR5, "__AVR_ATmega164PA__", 0, 0, 0x0100, 1, "m164pa")
+AVR_MCU ("atmega165", ARCH_AVR5, "__AVR_ATmega165__", 0, 0, 0x0100, 1, "m165")
+AVR_MCU ("atmega165a", ARCH_AVR5, "__AVR_ATmega165A__", 0, 0, 0x0100, 1, "m165a")
+AVR_MCU ("atmega165p", ARCH_AVR5, "__AVR_ATmega165P__", 0, 0, 0x0100, 1, "m165p")
+AVR_MCU ("atmega165pa", ARCH_AVR5, "__AVR_ATmega165PA__", 0, 0, 0x0100, 1, "m165pa")
+AVR_MCU ("atmega168", ARCH_AVR5, "__AVR_ATmega168__", 0, 0, 0x0100, 1, "m168")
+AVR_MCU ("atmega168a", ARCH_AVR5, "__AVR_ATmega168A__", 0, 0, 0x0100, 1, "m168a")
+AVR_MCU ("atmega168p", ARCH_AVR5, "__AVR_ATmega168P__", 0, 0, 0x0100, 1, "m168p")
+AVR_MCU ("atmega168pa", ARCH_AVR5, "__AVR_ATmega168PA__", 0, 0, 0x0100, 1, "m168pa")
+AVR_MCU ("atmega169", ARCH_AVR5, "__AVR_ATmega169__", 0, 0, 0x0100, 1, "m169")
+AVR_MCU ("atmega169a", ARCH_AVR5, "__AVR_ATmega169A__", 0, 0, 0x0100, 1, "m169a")
+AVR_MCU ("atmega169p", ARCH_AVR5, "__AVR_ATmega169P__", 0, 0, 0x0100, 1, "m169p")
+AVR_MCU ("atmega169pa", ARCH_AVR5, "__AVR_ATmega169PA__", 0, 0, 0x0100, 1, "m169pa")
+AVR_MCU ("atmega16hvb", ARCH_AVR5, "__AVR_ATmega16HVB__", 0, 0, 0x0100, 1, "m16hvb")
+AVR_MCU ("atmega16hvbrevb", ARCH_AVR5, "__AVR_ATmega16HVBREVB__", 0, 0, 0x0100, 1, "m16hvbrevb")
+AVR_MCU ("atmega16m1", ARCH_AVR5, "__AVR_ATmega16M1__", 0, 0, 0x0100, 1, "m16m1")
+AVR_MCU ("atmega16u4", ARCH_AVR5, "__AVR_ATmega16U4__", 0, 0, 0x0100, 1, "m16u4")
+AVR_MCU ("atmega26hvg", ARCH_AVR5, "__AVR_ATmega26HVG__", 0, 0, 0x0100, 1, "m26hvg")
+AVR_MCU ("atmega32a", ARCH_AVR5, "__AVR_ATmega32A__", 0, 0, 0x0060, 1, "m32a")
+AVR_MCU ("atmega32", ARCH_AVR5, "__AVR_ATmega32__", 0, 0, 0x0060, 1, "m32")
+AVR_MCU ("atmega323", ARCH_AVR5, "__AVR_ATmega323__", 0, 0, 0x0060, 1, "m323")
+AVR_MCU ("atmega324a", ARCH_AVR5, "__AVR_ATmega324A__", 0, 0, 0x0100, 1, "m324a")
+AVR_MCU ("atmega324p", ARCH_AVR5, "__AVR_ATmega324P__", 0, 0, 0x0100, 1, "m324p")
+AVR_MCU ("atmega324pa", ARCH_AVR5, "__AVR_ATmega324PA__", 0, 0, 0x0100, 1, "m324pa")
+AVR_MCU ("atmega325", ARCH_AVR5, "__AVR_ATmega325__", 0, 0, 0x0100, 1, "m325")
+AVR_MCU ("atmega325a", ARCH_AVR5, "__AVR_ATmega325A__", 0, 0, 0x0100, 1, "m325a")
+AVR_MCU ("atmega325p", ARCH_AVR5, "__AVR_ATmega325P__", 0, 0, 0x0100, 1, "m325p")
+AVR_MCU ("atmega3250", ARCH_AVR5, "__AVR_ATmega3250__", 0, 0, 0x0100, 1, "m3250")
+AVR_MCU ("atmega3250a", ARCH_AVR5, "__AVR_ATmega3250A__", 0, 0, 0x0100, 1, "m3250a")
+AVR_MCU ("atmega3250p", ARCH_AVR5, "__AVR_ATmega3250P__", 0, 0, 0x0100, 1, "m3250p")
+AVR_MCU ("atmega3250pa", ARCH_AVR5, "__AVR_ATmega3250PA__", 0, 0, 0x0100, 1, "m3250pa")
+AVR_MCU ("atmega328", ARCH_AVR5, "__AVR_ATmega328__", 0, 0, 0x0100, 1, "m328")
+AVR_MCU ("atmega328p", ARCH_AVR5, "__AVR_ATmega328P__", 0, 0, 0x0100, 1, "m328p")
+AVR_MCU ("atmega329", ARCH_AVR5, "__AVR_ATmega329__", 0, 0, 0x0100, 1, "m329")
+AVR_MCU ("atmega329a", ARCH_AVR5, "__AVR_ATmega329A__", 0, 0, 0x0100, 1, "m329a")
+AVR_MCU ("atmega329p", ARCH_AVR5, "__AVR_ATmega329P__", 0, 0, 0x0100, 1, "m329p")
+AVR_MCU ("atmega329pa", ARCH_AVR5, "__AVR_ATmega329PA__", 0, 0, 0x0100, 1, "m329pa")
+AVR_MCU ("atmega3290", ARCH_AVR5, "__AVR_ATmega3290__", 0, 0, 0x0100, 1, "m3290")
+AVR_MCU ("atmega3290a", ARCH_AVR5, "__AVR_ATmega3290A__", 0, 0, 0x0100, 1, "m3290a")
+AVR_MCU ("atmega3290p", ARCH_AVR5, "__AVR_ATmega3290P__", 0, 0, 0x0100, 1, "m3290p")
+AVR_MCU ("atmega3290pa", ARCH_AVR5, "__AVR_ATmega3290PA__", 0, 0, 0x0100, 1, "m3290pa")
+AVR_MCU ("atmega32c1", ARCH_AVR5, "__AVR_ATmega32C1__", 0, 0, 0x0100, 1, "m32c1")
+AVR_MCU ("atmega32m1", ARCH_AVR5, "__AVR_ATmega32M1__", 0, 0, 0x0100, 1, "m32m1")
+AVR_MCU ("atmega32u4", ARCH_AVR5, "__AVR_ATmega32U4__", 0, 0, 0x0100, 1, "m32u4")
+AVR_MCU ("atmega32u6", ARCH_AVR5, "__AVR_ATmega32U6__", 0, 0, 0x0100, 1, "m32u6")
+AVR_MCU ("atmega406", ARCH_AVR5, "__AVR_ATmega406__", 0, 0, 0x0100, 1, "m406")
+AVR_MCU ("atmega64", ARCH_AVR5, "__AVR_ATmega64__", 0, 0, 0x0100, 1, "m64")
+AVR_MCU ("atmega64a", ARCH_AVR5, "__AVR_ATmega64A__", 0, 0, 0x0100, 1, "m64a")
+AVR_MCU ("atmega640", ARCH_AVR5, "__AVR_ATmega640__", 0, 0, 0x0200, 1, "m640")
+AVR_MCU ("atmega644", ARCH_AVR5, "__AVR_ATmega644__", 0, 0, 0x0100, 1, "m644")
+AVR_MCU ("atmega644a", ARCH_AVR5, "__AVR_ATmega644A__", 0, 0, 0x0100, 1, "m644a")
+AVR_MCU ("atmega644p", ARCH_AVR5, "__AVR_ATmega644P__", 0, 0, 0x0100, 1, "m644p")
+AVR_MCU ("atmega644pa", ARCH_AVR5, "__AVR_ATmega644PA__", 0, 0, 0x0100, 1, "m644pa")
+AVR_MCU ("atmega645", ARCH_AVR5, "__AVR_ATmega645__", 0, 0, 0x0100, 1, "m645")
+AVR_MCU ("atmega645a", ARCH_AVR5, "__AVR_ATmega645A__", 0, 0, 0x0100, 1, "m645a")
+AVR_MCU ("atmega645p", ARCH_AVR5, "__AVR_ATmega645P__", 0, 0, 0x0100, 1, "m645p")
+AVR_MCU ("atmega6450", ARCH_AVR5, "__AVR_ATmega6450__", 0, 0, 0x0100, 1, "m6450")
+AVR_MCU ("atmega6450a", ARCH_AVR5, "__AVR_ATmega6450A__", 0, 0, 0x0100, 1, "m6450a")
+AVR_MCU ("atmega6450p", ARCH_AVR5, "__AVR_ATmega6450P__", 0, 0, 0x0100, 1, "m6450p")
+AVR_MCU ("atmega649", ARCH_AVR5, "__AVR_ATmega649__", 0, 0, 0x0100, 1, "m649")
+AVR_MCU ("atmega649a", ARCH_AVR5, "__AVR_ATmega649A__", 0, 0, 0x0100, 1, "m649a")
+AVR_MCU ("atmega649p", ARCH_AVR5, "__AVR_ATmega649P__", 0, 0, 0x0100, 1, "m649p")
+AVR_MCU ("atmega6490", ARCH_AVR5, "__AVR_ATmega6490__", 0, 0, 0x0100, 1, "m6490")
+AVR_MCU ("atmega16hva", ARCH_AVR5, "__AVR_ATmega16HVA__", 0, 0, 0x0100, 1, "m16hva")
+AVR_MCU ("atmega16hva2", ARCH_AVR5, "__AVR_ATmega16HVA2__", 0, 0, 0x0100, 1, "m16hva2")
+AVR_MCU ("atmega32hvb", ARCH_AVR5, "__AVR_ATmega32HVB__", 0, 0, 0x0100, 1, "m32hvb")
+AVR_MCU ("atmega6490a", ARCH_AVR5, "__AVR_ATmega6490A__", 0, 0, 0x0100, 1, "m6490a")
+AVR_MCU ("atmega6490p", ARCH_AVR5, "__AVR_ATmega6490P__", 0, 0, 0x0100, 1, "m6490p")
+AVR_MCU ("atmega64c1", ARCH_AVR5, "__AVR_ATmega64C1__", 0, 0, 0x0100, 1, "m64c1")
+AVR_MCU ("atmega64m1", ARCH_AVR5, "__AVR_ATmega64M1__", 0, 0, 0x0100, 1, "m64m1")
+AVR_MCU ("atmega64hve", ARCH_AVR5, "__AVR_ATmega64HVE__", 0, 0, 0x0100, 1, "m64hve")
+AVR_MCU ("atmega64rfa2", ARCH_AVR5, "__AVR_ATmega64RFA2__", 0, 0, 0x0200, 1, "m64rfa2")
+AVR_MCU ("atmega64rfr2", ARCH_AVR5, "__AVR_ATmega64RFR2__", 0, 0, 0x0200, 1, "m64rfr2")
+AVR_MCU ("atmega32hvbrevb", ARCH_AVR5, "__AVR_ATmega32HVBREVB__", 0, 0, 0x0100, 1, "m32hvbrevb")
+AVR_MCU ("atmega48hvf", ARCH_AVR5, "__AVR_ATmega48HVF__", 0, 0, 0x0100, 1, "m48hvf")
+AVR_MCU ("at90can32", ARCH_AVR5, "__AVR_AT90CAN32__", 0, 0, 0x0100, 1, "can32")
+AVR_MCU ("at90can64", ARCH_AVR5, "__AVR_AT90CAN64__", 0, 0, 0x0100, 1, "can64")
+AVR_MCU ("at90pwm161", ARCH_AVR5, "__AVR_AT90PWM161__", 0, 0, 0x0100, 1, "90pwm161")
+AVR_MCU ("at90pwm216", ARCH_AVR5, "__AVR_AT90PWM216__", 0, 0, 0x0100, 1, "90pwm216")
+AVR_MCU ("at90pwm316", ARCH_AVR5, "__AVR_AT90PWM316__", 0, 0, 0x0100, 1, "90pwm316")
+AVR_MCU ("at90scr100", ARCH_AVR5, "__AVR_AT90SCR100__", 0, 0, 0x0100, 1, "90scr100")
+AVR_MCU ("at90usb646", ARCH_AVR5, "__AVR_AT90USB646__", 0, 0, 0x0100, 1, "usb646")
+AVR_MCU ("at90usb647", ARCH_AVR5, "__AVR_AT90USB647__", 0, 0, 0x0100, 1, "usb647")
+AVR_MCU ("at94k", ARCH_AVR5, "__AVR_AT94K__", 0, 0, 0x0060, 1, "at94k")
+AVR_MCU ("m3000", ARCH_AVR5, "__AVR_M3000__", 0, 0, 0x1000, 1, "m3000")
+/* Enhanced, == 128K. */
+AVR_MCU ("avr51", ARCH_AVR51, NULL, 0, 0, 0x0100, 2, "m128")
+AVR_MCU ("atmega128", ARCH_AVR51, "__AVR_ATmega128__", 0, 0, 0x0100, 2, "m128")
+AVR_MCU ("atmega128a", ARCH_AVR51, "__AVR_ATmega128A__", 0, 0, 0x0100, 2, "m128a")
+AVR_MCU ("atmega1280", ARCH_AVR51, "__AVR_ATmega1280__", 0, 0, 0x0200, 2, "m1280")
+AVR_MCU ("atmega1281", ARCH_AVR51, "__AVR_ATmega1281__", 0, 0, 0x0200, 2, "m1281")
+AVR_MCU ("atmega1284", ARCH_AVR51, "__AVR_ATmega1284__", 0, 0, 0x0100, 2, "m1284")
+AVR_MCU ("atmega1284p", ARCH_AVR51, "__AVR_ATmega1284P__", 0, 0, 0x0100, 2, "m1284p")
+AVR_MCU ("atmega128rfa1", ARCH_AVR51, "__AVR_ATmega128RFA1__", 0, 0, 0x0200, 2, "m128rfa1")
+AVR_MCU ("at90can128", ARCH_AVR51, "__AVR_AT90CAN128__", 0, 0, 0x0100, 2, "can128")
+AVR_MCU ("at90usb1286", ARCH_AVR51, "__AVR_AT90USB1286__", 0, 0, 0x0100, 2, "usb1286")
+AVR_MCU ("at90usb1287", ARCH_AVR51, "__AVR_AT90USB1287__", 0, 0, 0x0100, 2, "usb1287")
+/* 3-Byte PC. */
+AVR_MCU ("avr6", ARCH_AVR6, NULL, 0, 0, 0x0200, 4, "m2561")
+AVR_MCU ("atmega2560", ARCH_AVR6, "__AVR_ATmega2560__", 0, 0, 0x0200, 4, "m2560")
+AVR_MCU ("atmega2561", ARCH_AVR6, "__AVR_ATmega2561__", 0, 0, 0x0200, 4, "m2561")
+/* Xmega, 16K <= Flash < 64K, RAM <= 64K */
+AVR_MCU ("avrxmega2", ARCH_AVRXMEGA2, NULL, 0, 0, 0x2000, 1, "x32a4")
+AVR_MCU ("atxmega16a4", ARCH_AVRXMEGA2, "__AVR_ATxmega16A4__", 0, 0, 0x2000, 1, "x16a4")
+AVR_MCU ("atxmega16d4", ARCH_AVRXMEGA2, "__AVR_ATxmega16D4__", 0, 0, 0x2000, 1, "x16d4")
+AVR_MCU ("atxmega32a4", ARCH_AVRXMEGA2, "__AVR_ATxmega32A4__", 0, 0, 0x2000, 1, "x32a4")
+AVR_MCU ("atxmega32d4", ARCH_AVRXMEGA2, "__AVR_ATxmega32D4__", 0, 0, 0x2000, 1, "x32d4")
+AVR_MCU ("atxmega32x1", ARCH_AVRXMEGA2, "__AVR_ATxmega32X1__", 0, 0, 0x2000, 1, "x32x1")
+AVR_MCU ("atmxt112sl", ARCH_AVRXMEGA2, "__AVR_ATMXT112SL__", 0, 0, 0x2000, 1, "mxt112sl")
+AVR_MCU ("atmxt224", ARCH_AVRXMEGA2, "__AVR_ATMXT224__", 0, 0, 0x2000, 1, "mxt224")
+AVR_MCU ("atmxt224e", ARCH_AVRXMEGA2, "__AVR_ATMXT224E__", 0, 0, 0x2000, 1, "mxt224e")
+AVR_MCU ("atmxt336s", ARCH_AVRXMEGA2, "__AVR_ATMXT336S__", 0, 0, 0x2000, 1, "mxt336s")
+AVR_MCU ("atxmega16a4u", ARCH_AVRXMEGA2, "__AVR_ATxmega16A4U__", 0, 0, 0x2000, 1, "x16a4u")
+AVR_MCU ("atxmega16c4", ARCH_AVRXMEGA2, "__AVR_ATxmega16C4__", 0, 0, 0x2000, 1, "x16c4")
+AVR_MCU ("atxmega32a4u", ARCH_AVRXMEGA2, "__AVR_ATxmega32A4U__", 0, 0, 0x2000, 1, "x32a4u")
+AVR_MCU ("atxmega32c4", ARCH_AVRXMEGA2, "__AVR_ATxmega32C4__", 0, 0, 0x2000, 1, "x32c4")
+AVR_MCU ("atxmega32e5", ARCH_AVRXMEGA2, "__AVR_ATxmega32E5__", 0, 0, 0x2000, 1, "x32e5")
+/* Xmega, 64K < Flash <= 128K, RAM <= 64K */
+AVR_MCU ("avrxmega4", ARCH_AVRXMEGA4, NULL, 0, 0, 0x2000, 2, "x64a4")
+AVR_MCU ("atxmega64a3", ARCH_AVRXMEGA4, "__AVR_ATxmega64A3__", 0, 0, 0x2000, 2, "x64a3")
+AVR_MCU ("atxmega64d3", ARCH_AVRXMEGA4, "__AVR_ATxmega64D3__", 0, 0, 0x2000, 2, "x64d3")
+AVR_MCU ("atxmega64a3u", ARCH_AVRXMEGA4, "__AVR_ATxmega64A3U__", 0, 0, 0x2000, 2, "x64a3u")
+AVR_MCU ("atxmega64a4u", ARCH_AVRXMEGA4, "__AVR_ATxmega64A4U__", 0, 0, 0x2000, 2, "x64a4u")
+AVR_MCU ("atxmega64b1", ARCH_AVRXMEGA4, "__AVR_ATxmega64B1__", 0, 0, 0x2000, 2, "x64b1")
+AVR_MCU ("atxmega64b3", ARCH_AVRXMEGA4, "__AVR_ATxmega64B3__", 0, 0, 0x2000, 2, "x64b3")
+AVR_MCU ("atxmega64c3", ARCH_AVRXMEGA4, "__AVR_ATxmega64C3__", 0, 0, 0x2000, 2, "x64c3")
+AVR_MCU ("atxmega64d4", ARCH_AVRXMEGA4, "__AVR_ATxmega64D4__", 0, 0, 0x2000, 2, "x64d4")
+/* Xmega, 64K < Flash <= 128K, RAM > 64K */
+AVR_MCU ("avrxmega5", ARCH_AVRXMEGA5, NULL, 0, 0, 0x2000, 2, "x64a1")
+AVR_MCU ("atxmega64a1", ARCH_AVRXMEGA5, "__AVR_ATxmega64A1__", 0, 0, 0x2000, 2, "x64a1")
+AVR_MCU ("atxmega64a1u", ARCH_AVRXMEGA5, "__AVR_ATxmega64A1U__", 0, 0, 0x2000, 2, "x64a1u")
+/* Xmega, 128K < Flash, RAM <= 64K */
+AVR_MCU ("avrxmega6", ARCH_AVRXMEGA6, NULL, 0, 0, 0x2000, 6, "x128a3")
+AVR_MCU ("atxmega128a3", ARCH_AVRXMEGA6, "__AVR_ATxmega128A3__", 0, 0, 0x2000, 3, "x128a3")
+AVR_MCU ("atxmega128d3", ARCH_AVRXMEGA6, "__AVR_ATxmega128D3__", 0, 0, 0x2000, 3, "x128d3")
+AVR_MCU ("atxmega192a3", ARCH_AVRXMEGA6, "__AVR_ATxmega192A3__", 0, 0, 0x2000, 4, "x192a3")
+AVR_MCU ("atxmega192d3", ARCH_AVRXMEGA6, "__AVR_ATxmega192D3__", 0, 0, 0x2000, 4, "x192d3")
+AVR_MCU ("atxmega256a3", ARCH_AVRXMEGA6, "__AVR_ATxmega256A3__", 0, 0, 0x2000, 5, "x256a3")
+AVR_MCU ("atxmega256a3b", ARCH_AVRXMEGA6, "__AVR_ATxmega256A3B__", 0, 0, 0x2000, 5, "x256a3b")
+AVR_MCU ("atxmega256a3bu", ARCH_AVRXMEGA6, "__AVR_ATxmega256A3BU__", 0, 0, 0x2000, 5, "x256a3bu")
+AVR_MCU ("atxmega256d3", ARCH_AVRXMEGA6, "__AVR_ATxmega256D3__", 0, 0, 0x2000, 5, "x256d3")
+AVR_MCU ("atxmega128a3u", ARCH_AVRXMEGA6, "__AVR_ATxmega128A3U__", 0, 0, 0x2000, 3, "x128a3u")
+AVR_MCU ("atxmega128b1", ARCH_AVRXMEGA6, "__AVR_ATxmega128B1__", 0, 0, 0x2000, 3, "x128b1")
+AVR_MCU ("atxmega128b3", ARCH_AVRXMEGA6, "__AVR_ATxmega128B3__", 0, 0, 0x2000, 3, "x128b3")
+AVR_MCU ("atxmega128c3", ARCH_AVRXMEGA6, "__AVR_ATxmega128C3__", 0, 0, 0x2000, 3, "x128c3")
+AVR_MCU ("atxmega128d4", ARCH_AVRXMEGA6, "__AVR_ATxmega128D4__", 0, 0, 0x2000, 3, "x128d4")
+AVR_MCU ("atmxt540s", ARCH_AVRXMEGA6, "__AVR_ATMXT540S__", 0, 0, 0x2000, 2, "mxt540s")
+AVR_MCU ("atmxt540sreva", ARCH_AVRXMEGA6, "__AVR_ATMXT540SREVA__", 0, 0, 0x2000, 2, "mxt540sreva")
+AVR_MCU ("atxmega192a3u", ARCH_AVRXMEGA6, "__AVR_ATxmega192A3U__", 0, 0, 0x2000, 4, "x192a3u")
+AVR_MCU ("atxmega192c3", ARCH_AVRXMEGA6, "__AVR_ATxmega192C3__", 0, 0, 0x2000, 4, "x192c3")
+AVR_MCU ("atxmega256a3u", ARCH_AVRXMEGA6, "__AVR_ATxmega256A3U__", 0, 0, 0x2000, 5, "x256a3u")
+AVR_MCU ("atxmega256c3", ARCH_AVRXMEGA6, "__AVR_ATxmega256C3__", 0, 0, 0x2000, 5, "x256c3")
+AVR_MCU ("atxmega384c3", ARCH_AVRXMEGA6, "__AVR_ATxmega384C3__", 0, 0, 0x2000, 6, "x384c3")
+AVR_MCU ("atxmega384d3", ARCH_AVRXMEGA6, "__AVR_ATxmega384D3__", 0, 0, 0x2000, 6, "x384d3")
+/* Xmega, 128K < Flash, RAM > 64K RAM. */
+AVR_MCU ("avrxmega7", ARCH_AVRXMEGA7, NULL, 0, 0, 0x2000, 3, "x128a1")
+AVR_MCU ("atxmega128a1", ARCH_AVRXMEGA7, "__AVR_ATxmega128A1__", 0, 0, 0x2000, 3, "x128a1")
+AVR_MCU ("atxmega128a1u", ARCH_AVRXMEGA7, "__AVR_ATxmega128A1U__", 0, 0, 0x2000, 3, "x128a1u")
+AVR_MCU ("atxmega128a4u", ARCH_AVRXMEGA7, "__AVR_ATxmega128A4U__", 0, 0, 0x2000, 3, "x128a4u")
+/* Assembler only. */
+AVR_MCU ("avr1", ARCH_AVR1, NULL, 0, 0, 0x0060, 1, "s1200")
+AVR_MCU ("at90s1200", ARCH_AVR1, "__AVR_AT90S1200__", 0, 0, 0x0060, 1, "s1200")
+AVR_MCU ("attiny11", ARCH_AVR1, "__AVR_ATtiny11__", 0, 0, 0x0060, 1, "tn11")
+AVR_MCU ("attiny12", ARCH_AVR1, "__AVR_ATtiny12__", 0, 0, 0x0060, 1, "tn12")
+AVR_MCU ("attiny15", ARCH_AVR1, "__AVR_ATtiny15__", 0, 0, 0x0060, 1, "tn15")
+AVR_MCU ("attiny28", ARCH_AVR1, "__AVR_ATtiny28__", 0, 0, 0x0060, 1, "tn28")
diff --git a/gcc-4.9/gcc/config/avr/avr-modes.def b/gcc-4.9/gcc/config/avr/avr-modes.def
new file mode 100644
index 000000000..7d380b068
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-modes.def
@@ -0,0 +1,33 @@
+/* Copyright (C) 2012-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+FRACTIONAL_INT_MODE (PSI, 24, 3);
+
+/* Make TA and UTA 64 bits wide.
+ 128 bit wide modes would be insane on a 8-bit machine.
+ This needs special treatment in avr.c and avr-lib.h. */
+
+ADJUST_BYTESIZE (TA, 8);
+ADJUST_ALIGNMENT (TA, 1);
+ADJUST_IBIT (TA, 16);
+ADJUST_FBIT (TA, 47);
+
+ADJUST_BYTESIZE (UTA, 8);
+ADJUST_ALIGNMENT (UTA, 1);
+ADJUST_IBIT (UTA, 16);
+ADJUST_FBIT (UTA, 48);
diff --git a/gcc-4.9/gcc/config/avr/avr-protos.h b/gcc-4.9/gcc/config/avr/avr-protos.h
new file mode 100644
index 000000000..c5ce78429
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-protos.h
@@ -0,0 +1,164 @@
+/* Prototypes for exported functions defined in avr.c
+
+ Copyright (C) 2000-2014 Free Software Foundation, Inc.
+ Contributed by Denis Chertykov (chertykov@gmail.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+extern int avr_function_arg_regno_p (int r);
+extern void avr_cpu_cpp_builtins (struct cpp_reader * pfile);
+extern enum reg_class avr_regno_reg_class (int r);
+extern void asm_globalize_label (FILE *file, const char *name);
+extern void avr_adjust_reg_alloc_order (void);
+extern int avr_initial_elimination_offset (int from, int to);
+extern int avr_simple_epilogue (void);
+extern int avr_hard_regno_rename_ok (unsigned int, unsigned int);
+extern rtx avr_return_addr_rtx (int count, rtx tem);
+extern void avr_register_target_pragmas (void);
+extern void avr_init_expanders (void);
+
+#ifdef TREE_CODE
+extern void avr_asm_output_aligned_decl_common (FILE*, const_tree, const char*, unsigned HOST_WIDE_INT, unsigned int, bool);
+extern void asm_output_external (FILE *file, tree decl, char *name);
+extern int avr_progmem_p (tree decl, tree attributes);
+
+#ifdef RTX_CODE /* inside TREE_CODE */
+extern void avr_init_cumulative_args (CUMULATIVE_ARGS*, tree, rtx, tree);
+#endif /* RTX_CODE inside TREE_CODE */
+
+#endif /* TREE_CODE */
+
+#ifdef RTX_CODE
+extern int avr_hard_regno_call_part_clobbered (unsigned, enum machine_mode);
+extern const char *output_movqi (rtx insn, rtx operands[], int *l);
+extern const char *output_movhi (rtx insn, rtx operands[], int *l);
+extern const char *output_movsisf (rtx insn, rtx operands[], int *l);
+extern const char *avr_out_tstsi (rtx, rtx*, int*);
+extern const char *avr_out_tsthi (rtx, rtx*, int*);
+extern const char *avr_out_tstpsi (rtx, rtx*, int*);
+extern const char *avr_out_compare (rtx, rtx*, int*);
+extern const char *avr_out_compare64 (rtx, rtx*, int*);
+extern const char *ret_cond_branch (rtx x, int len, int reverse);
+extern const char *avr_out_movpsi (rtx, rtx*, int*);
+
+extern const char *ashlqi3_out (rtx insn, rtx operands[], int *len);
+extern const char *ashlhi3_out (rtx insn, rtx operands[], int *len);
+extern const char *ashlsi3_out (rtx insn, rtx operands[], int *len);
+
+extern const char *ashrqi3_out (rtx insn, rtx operands[], int *len);
+extern const char *ashrhi3_out (rtx insn, rtx operands[], int *len);
+extern const char *ashrsi3_out (rtx insn, rtx operands[], int *len);
+
+extern const char *lshrqi3_out (rtx insn, rtx operands[], int *len);
+extern const char *lshrhi3_out (rtx insn, rtx operands[], int *len);
+extern const char *lshrsi3_out (rtx insn, rtx operands[], int *len);
+
+extern const char *avr_out_ashlpsi3 (rtx, rtx*, int*);
+extern const char *avr_out_ashrpsi3 (rtx, rtx*, int*);
+extern const char *avr_out_lshrpsi3 (rtx, rtx*, int*);
+
+extern bool avr_rotate_bytes (rtx operands[]);
+
+extern const char* avr_out_fract (rtx, rtx[], bool, int*);
+extern rtx avr_to_int_mode (rtx);
+
+extern void avr_expand_prologue (void);
+extern void avr_expand_epilogue (bool);
+extern bool avr_emit_movmemhi (rtx*);
+extern int avr_epilogue_uses (int regno);
+extern int avr_starting_frame_offset (void);
+
+extern void avr_output_addr_vec_elt (FILE *stream, int value);
+extern const char *avr_out_sbxx_branch (rtx insn, rtx operands[]);
+extern const char* avr_out_bitop (rtx, rtx*, int*);
+extern const char* avr_out_plus (rtx, rtx*, int* =NULL, int* =NULL, bool =true);
+extern const char* avr_out_round (rtx, rtx*, int* =NULL);
+extern const char* avr_out_addto_sp (rtx*, int*);
+extern const char* avr_out_xload (rtx, rtx*, int*);
+extern const char* avr_out_movmem (rtx, rtx*, int*);
+extern const char* avr_out_insert_bits (rtx*, int*);
+extern bool avr_popcount_each_byte (rtx, int, int);
+extern bool avr_has_nibble_0xf (rtx);
+
+extern int extra_constraint_Q (rtx x);
+extern int avr_adjust_insn_length (rtx insn, int len);
+extern const char* output_reload_inhi (rtx*, rtx, int*);
+extern const char* output_reload_insisf (rtx*, rtx, int*);
+extern const char* avr_out_reload_inpsi (rtx*, rtx, int*);
+extern const char* avr_out_lpm (rtx, rtx*, int*);
+extern void avr_notice_update_cc (rtx body, rtx insn);
+extern int reg_unused_after (rtx insn, rtx reg);
+extern int _reg_unused_after (rtx insn, rtx reg);
+extern int avr_jump_mode (rtx x, rtx insn);
+extern int test_hard_reg_class (enum reg_class rclass, rtx x);
+extern int jump_over_one_insn_p (rtx insn, rtx dest);
+
+extern int avr_hard_regno_mode_ok (int regno, enum machine_mode mode);
+extern void avr_final_prescan_insn (rtx insn, rtx *operand, int num_operands);
+extern int avr_simplify_comparison_p (enum machine_mode mode,
+ RTX_CODE op, rtx x);
+extern RTX_CODE avr_normalize_condition (RTX_CODE condition);
+extern void out_shift_with_cnt (const char *templ, rtx insn,
+ rtx operands[], int *len, int t_len);
+extern enum reg_class avr_mode_code_base_reg_class (enum machine_mode, addr_space_t, RTX_CODE, RTX_CODE);
+extern bool avr_regno_mode_code_ok_for_base_p (int, enum machine_mode, addr_space_t, RTX_CODE, RTX_CODE);
+extern rtx avr_incoming_return_addr_rtx (void);
+extern rtx avr_legitimize_reload_address (rtx*, enum machine_mode, int, int, int, int, rtx (*)(rtx,int));
+extern bool avr_mem_flash_p (rtx);
+extern bool avr_mem_memx_p (rtx);
+extern bool avr_load_libgcc_p (rtx);
+extern bool avr_xload_libgcc_p (enum machine_mode);
+
+extern rtx lpm_reg_rtx;
+extern rtx lpm_addr_reg_rtx;
+extern rtx tmp_reg_rtx;
+extern rtx zero_reg_rtx;
+extern rtx all_regs_rtx[32];
+extern rtx rampz_rtx;
+
+#endif /* RTX_CODE */
+
+#ifdef REAL_VALUE_TYPE
+extern void asm_output_float (FILE *file, REAL_VALUE_TYPE n);
+#endif
+
+extern bool avr_have_dimode;
+
+/* From avr-log.c */
+
+#define avr_edump (avr_log_set_caller_e (__FUNCTION__))
+#define avr_fdump (avr_log_set_caller_f (__FUNCTION__))
+
+extern int (*avr_log_set_caller_e (const char*))(const char*, ...);
+extern int (*avr_log_set_caller_f (const char*))(FILE*, const char*, ...);
+
+extern void avr_log_set_avr_log (void);
+
+typedef struct
+{
+ unsigned address_cost :1;
+ unsigned builtin :1;
+ unsigned constraints :1;
+ unsigned legitimate_address_p :1;
+ unsigned legitimize_address :1;
+ unsigned legitimize_reload_address :1;
+ unsigned progmem :1;
+ unsigned rtx_costs :1;
+} avr_log_t;
+
+extern avr_log_t avr_log;
diff --git a/gcc-4.9/gcc/config/avr/avr-stdint.h b/gcc-4.9/gcc/config/avr/avr-stdint.h
new file mode 100644
index 000000000..3ecc26895
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-stdint.h
@@ -0,0 +1,66 @@
+/* Definitions for <stdint.h> types on systems using newlib.
+ Copyright (C) 2012-2014 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ The intention of this file is to supply definitions that work with
+ avr-gcc's -mint8 that sets int to an 8-bit type.
+
+ This file is intended to yield the same results as newlib-stdint.h,
+ but there are some differences to newlib-stdint.h:
+
+ - AVR is an 8-bit architecture that cannot access 16-bit values
+ atomically, this SIG_ATOMIC_TYPE is "char".
+
+ - For the same reason, [u]int_fast8_t is defined as 8-bit type.
+
+*/
+
+#define SIG_ATOMIC_TYPE "char"
+
+#define INT8_TYPE "signed char"
+#define INT16_TYPE (INT_TYPE_SIZE == 16 ? "int" : "long int")
+#define INT32_TYPE (INT_TYPE_SIZE == 16 ? "long int" : "long long int")
+#define INT64_TYPE (INT_TYPE_SIZE == 16 ? "long long int" : 0)
+#define UINT8_TYPE "unsigned char"
+#define UINT16_TYPE (INT_TYPE_SIZE == 16 ? "unsigned int" : "long unsigned int")
+#define UINT32_TYPE (INT_TYPE_SIZE == 16 ? "long unsigned int" : "long long unsigned int")
+#define UINT64_TYPE (INT_TYPE_SIZE == 16 ? "long long unsigned int" : 0)
+
+#define INT_LEAST8_TYPE INT8_TYPE
+#define INT_LEAST16_TYPE INT16_TYPE
+#define INT_LEAST32_TYPE INT32_TYPE
+#define INT_LEAST64_TYPE INT64_TYPE
+#define UINT_LEAST8_TYPE UINT8_TYPE
+#define UINT_LEAST16_TYPE UINT16_TYPE
+#define UINT_LEAST32_TYPE UINT32_TYPE
+#define UINT_LEAST64_TYPE UINT64_TYPE
+
+#define INT_FAST8_TYPE INT8_TYPE
+#define INT_FAST16_TYPE (INT_TYPE_SIZE == 16 ? "int" : INT16_TYPE)
+#define INT_FAST32_TYPE INT32_TYPE
+#define INT_FAST64_TYPE INT64_TYPE
+#define UINT_FAST8_TYPE UINT8_TYPE
+#define UINT_FAST16_TYPE (INT_TYPE_SIZE == 16 ? "unsigned int" : UINT16_TYPE)
+#define UINT_FAST32_TYPE UINT32_TYPE
+#define UINT_FAST64_TYPE UINT64_TYPE
+
+#define INTPTR_TYPE PTRDIFF_TYPE
+#ifndef UINTPTR_TYPE
+#define UINTPTR_TYPE SIZE_TYPE
+#endif
diff --git a/gcc-4.9/gcc/config/avr/avr-tables.opt b/gcc-4.9/gcc/config/avr/avr-tables.opt
new file mode 100644
index 000000000..b5c6d8290
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr-tables.opt
@@ -0,0 +1,766 @@
+; -*- buffer-read-only: t -*-
+; Generated automatically by genopt.sh from avr-mcus.def.
+
+; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+Enum
+Name(avr_mcu) Type(int)
+Known MCU names:
+
+EnumValue
+Enum(avr_mcu) String(avr2) Value(0)
+
+EnumValue
+Enum(avr_mcu) String(at90s2313) Value(1)
+
+EnumValue
+Enum(avr_mcu) String(at90s2323) Value(2)
+
+EnumValue
+Enum(avr_mcu) String(at90s2333) Value(3)
+
+EnumValue
+Enum(avr_mcu) String(at90s2343) Value(4)
+
+EnumValue
+Enum(avr_mcu) String(attiny22) Value(5)
+
+EnumValue
+Enum(avr_mcu) String(attiny26) Value(6)
+
+EnumValue
+Enum(avr_mcu) String(at90s4414) Value(7)
+
+EnumValue
+Enum(avr_mcu) String(at90s4433) Value(8)
+
+EnumValue
+Enum(avr_mcu) String(at90s4434) Value(9)
+
+EnumValue
+Enum(avr_mcu) String(at90s8515) Value(10)
+
+EnumValue
+Enum(avr_mcu) String(at90c8534) Value(11)
+
+EnumValue
+Enum(avr_mcu) String(at90s8535) Value(12)
+
+EnumValue
+Enum(avr_mcu) String(avr25) Value(13)
+
+EnumValue
+Enum(avr_mcu) String(ata6289) Value(14)
+
+EnumValue
+Enum(avr_mcu) String(ata5272) Value(15)
+
+EnumValue
+Enum(avr_mcu) String(attiny13) Value(16)
+
+EnumValue
+Enum(avr_mcu) String(attiny13a) Value(17)
+
+EnumValue
+Enum(avr_mcu) String(attiny2313) Value(18)
+
+EnumValue
+Enum(avr_mcu) String(attiny2313a) Value(19)
+
+EnumValue
+Enum(avr_mcu) String(attiny24) Value(20)
+
+EnumValue
+Enum(avr_mcu) String(attiny24a) Value(21)
+
+EnumValue
+Enum(avr_mcu) String(attiny4313) Value(22)
+
+EnumValue
+Enum(avr_mcu) String(attiny44) Value(23)
+
+EnumValue
+Enum(avr_mcu) String(attiny44a) Value(24)
+
+EnumValue
+Enum(avr_mcu) String(attiny84) Value(25)
+
+EnumValue
+Enum(avr_mcu) String(attiny84a) Value(26)
+
+EnumValue
+Enum(avr_mcu) String(attiny25) Value(27)
+
+EnumValue
+Enum(avr_mcu) String(attiny45) Value(28)
+
+EnumValue
+Enum(avr_mcu) String(attiny85) Value(29)
+
+EnumValue
+Enum(avr_mcu) String(attiny261) Value(30)
+
+EnumValue
+Enum(avr_mcu) String(attiny261a) Value(31)
+
+EnumValue
+Enum(avr_mcu) String(attiny461) Value(32)
+
+EnumValue
+Enum(avr_mcu) String(attiny461a) Value(33)
+
+EnumValue
+Enum(avr_mcu) String(attiny861) Value(34)
+
+EnumValue
+Enum(avr_mcu) String(attiny861a) Value(35)
+
+EnumValue
+Enum(avr_mcu) String(attiny43u) Value(36)
+
+EnumValue
+Enum(avr_mcu) String(attiny87) Value(37)
+
+EnumValue
+Enum(avr_mcu) String(attiny48) Value(38)
+
+EnumValue
+Enum(avr_mcu) String(attiny88) Value(39)
+
+EnumValue
+Enum(avr_mcu) String(at86rf401) Value(40)
+
+EnumValue
+Enum(avr_mcu) String(avr3) Value(41)
+
+EnumValue
+Enum(avr_mcu) String(at43usb355) Value(42)
+
+EnumValue
+Enum(avr_mcu) String(at76c711) Value(43)
+
+EnumValue
+Enum(avr_mcu) String(avr31) Value(44)
+
+EnumValue
+Enum(avr_mcu) String(atmega103) Value(45)
+
+EnumValue
+Enum(avr_mcu) String(at43usb320) Value(46)
+
+EnumValue
+Enum(avr_mcu) String(avr35) Value(47)
+
+EnumValue
+Enum(avr_mcu) String(ata5505) Value(48)
+
+EnumValue
+Enum(avr_mcu) String(at90usb82) Value(49)
+
+EnumValue
+Enum(avr_mcu) String(at90usb162) Value(50)
+
+EnumValue
+Enum(avr_mcu) String(atmega8u2) Value(51)
+
+EnumValue
+Enum(avr_mcu) String(atmega16u2) Value(52)
+
+EnumValue
+Enum(avr_mcu) String(atmega32u2) Value(53)
+
+EnumValue
+Enum(avr_mcu) String(attiny167) Value(54)
+
+EnumValue
+Enum(avr_mcu) String(attiny1634) Value(55)
+
+EnumValue
+Enum(avr_mcu) String(avr4) Value(56)
+
+EnumValue
+Enum(avr_mcu) String(ata6285) Value(57)
+
+EnumValue
+Enum(avr_mcu) String(ata6286) Value(58)
+
+EnumValue
+Enum(avr_mcu) String(atmega8) Value(59)
+
+EnumValue
+Enum(avr_mcu) String(atmega8a) Value(60)
+
+EnumValue
+Enum(avr_mcu) String(atmega48) Value(61)
+
+EnumValue
+Enum(avr_mcu) String(atmega48a) Value(62)
+
+EnumValue
+Enum(avr_mcu) String(atmega48p) Value(63)
+
+EnumValue
+Enum(avr_mcu) String(atmega48pa) Value(64)
+
+EnumValue
+Enum(avr_mcu) String(atmega88) Value(65)
+
+EnumValue
+Enum(avr_mcu) String(atmega88a) Value(66)
+
+EnumValue
+Enum(avr_mcu) String(atmega88p) Value(67)
+
+EnumValue
+Enum(avr_mcu) String(atmega88pa) Value(68)
+
+EnumValue
+Enum(avr_mcu) String(atmega8515) Value(69)
+
+EnumValue
+Enum(avr_mcu) String(atmega8535) Value(70)
+
+EnumValue
+Enum(avr_mcu) String(atmega8hva) Value(71)
+
+EnumValue
+Enum(avr_mcu) String(at90pwm1) Value(72)
+
+EnumValue
+Enum(avr_mcu) String(at90pwm2) Value(73)
+
+EnumValue
+Enum(avr_mcu) String(at90pwm2b) Value(74)
+
+EnumValue
+Enum(avr_mcu) String(at90pwm3) Value(75)
+
+EnumValue
+Enum(avr_mcu) String(at90pwm3b) Value(76)
+
+EnumValue
+Enum(avr_mcu) String(at90pwm81) Value(77)
+
+EnumValue
+Enum(avr_mcu) String(avr5) Value(78)
+
+EnumValue
+Enum(avr_mcu) String(ata5790) Value(79)
+
+EnumValue
+Enum(avr_mcu) String(ata5790n) Value(80)
+
+EnumValue
+Enum(avr_mcu) String(ata5795) Value(81)
+
+EnumValue
+Enum(avr_mcu) String(atmega16) Value(82)
+
+EnumValue
+Enum(avr_mcu) String(atmega16a) Value(83)
+
+EnumValue
+Enum(avr_mcu) String(atmega161) Value(84)
+
+EnumValue
+Enum(avr_mcu) String(atmega162) Value(85)
+
+EnumValue
+Enum(avr_mcu) String(atmega163) Value(86)
+
+EnumValue
+Enum(avr_mcu) String(atmega164a) Value(87)
+
+EnumValue
+Enum(avr_mcu) String(atmega164p) Value(88)
+
+EnumValue
+Enum(avr_mcu) String(atmega164pa) Value(89)
+
+EnumValue
+Enum(avr_mcu) String(atmega165) Value(90)
+
+EnumValue
+Enum(avr_mcu) String(atmega165a) Value(91)
+
+EnumValue
+Enum(avr_mcu) String(atmega165p) Value(92)
+
+EnumValue
+Enum(avr_mcu) String(atmega165pa) Value(93)
+
+EnumValue
+Enum(avr_mcu) String(atmega168) Value(94)
+
+EnumValue
+Enum(avr_mcu) String(atmega168a) Value(95)
+
+EnumValue
+Enum(avr_mcu) String(atmega168p) Value(96)
+
+EnumValue
+Enum(avr_mcu) String(atmega168pa) Value(97)
+
+EnumValue
+Enum(avr_mcu) String(atmega169) Value(98)
+
+EnumValue
+Enum(avr_mcu) String(atmega169a) Value(99)
+
+EnumValue
+Enum(avr_mcu) String(atmega169p) Value(100)
+
+EnumValue
+Enum(avr_mcu) String(atmega169pa) Value(101)
+
+EnumValue
+Enum(avr_mcu) String(atmega16hvb) Value(102)
+
+EnumValue
+Enum(avr_mcu) String(atmega16hvbrevb) Value(103)
+
+EnumValue
+Enum(avr_mcu) String(atmega16m1) Value(104)
+
+EnumValue
+Enum(avr_mcu) String(atmega16u4) Value(105)
+
+EnumValue
+Enum(avr_mcu) String(atmega26hvg) Value(106)
+
+EnumValue
+Enum(avr_mcu) String(atmega32a) Value(107)
+
+EnumValue
+Enum(avr_mcu) String(atmega32) Value(108)
+
+EnumValue
+Enum(avr_mcu) String(atmega323) Value(109)
+
+EnumValue
+Enum(avr_mcu) String(atmega324a) Value(110)
+
+EnumValue
+Enum(avr_mcu) String(atmega324p) Value(111)
+
+EnumValue
+Enum(avr_mcu) String(atmega324pa) Value(112)
+
+EnumValue
+Enum(avr_mcu) String(atmega325) Value(113)
+
+EnumValue
+Enum(avr_mcu) String(atmega325a) Value(114)
+
+EnumValue
+Enum(avr_mcu) String(atmega325p) Value(115)
+
+EnumValue
+Enum(avr_mcu) String(atmega3250) Value(116)
+
+EnumValue
+Enum(avr_mcu) String(atmega3250a) Value(117)
+
+EnumValue
+Enum(avr_mcu) String(atmega3250p) Value(118)
+
+EnumValue
+Enum(avr_mcu) String(atmega3250pa) Value(119)
+
+EnumValue
+Enum(avr_mcu) String(atmega328) Value(120)
+
+EnumValue
+Enum(avr_mcu) String(atmega328p) Value(121)
+
+EnumValue
+Enum(avr_mcu) String(atmega329) Value(122)
+
+EnumValue
+Enum(avr_mcu) String(atmega329a) Value(123)
+
+EnumValue
+Enum(avr_mcu) String(atmega329p) Value(124)
+
+EnumValue
+Enum(avr_mcu) String(atmega329pa) Value(125)
+
+EnumValue
+Enum(avr_mcu) String(atmega3290) Value(126)
+
+EnumValue
+Enum(avr_mcu) String(atmega3290a) Value(127)
+
+EnumValue
+Enum(avr_mcu) String(atmega3290p) Value(128)
+
+EnumValue
+Enum(avr_mcu) String(atmega3290pa) Value(129)
+
+EnumValue
+Enum(avr_mcu) String(atmega32c1) Value(130)
+
+EnumValue
+Enum(avr_mcu) String(atmega32m1) Value(131)
+
+EnumValue
+Enum(avr_mcu) String(atmega32u4) Value(132)
+
+EnumValue
+Enum(avr_mcu) String(atmega32u6) Value(133)
+
+EnumValue
+Enum(avr_mcu) String(atmega406) Value(134)
+
+EnumValue
+Enum(avr_mcu) String(atmega64) Value(135)
+
+EnumValue
+Enum(avr_mcu) String(atmega64a) Value(136)
+
+EnumValue
+Enum(avr_mcu) String(atmega640) Value(137)
+
+EnumValue
+Enum(avr_mcu) String(atmega644) Value(138)
+
+EnumValue
+Enum(avr_mcu) String(atmega644a) Value(139)
+
+EnumValue
+Enum(avr_mcu) String(atmega644p) Value(140)
+
+EnumValue
+Enum(avr_mcu) String(atmega644pa) Value(141)
+
+EnumValue
+Enum(avr_mcu) String(atmega645) Value(142)
+
+EnumValue
+Enum(avr_mcu) String(atmega645a) Value(143)
+
+EnumValue
+Enum(avr_mcu) String(atmega645p) Value(144)
+
+EnumValue
+Enum(avr_mcu) String(atmega6450) Value(145)
+
+EnumValue
+Enum(avr_mcu) String(atmega6450a) Value(146)
+
+EnumValue
+Enum(avr_mcu) String(atmega6450p) Value(147)
+
+EnumValue
+Enum(avr_mcu) String(atmega649) Value(148)
+
+EnumValue
+Enum(avr_mcu) String(atmega649a) Value(149)
+
+EnumValue
+Enum(avr_mcu) String(atmega649p) Value(150)
+
+EnumValue
+Enum(avr_mcu) String(atmega6490) Value(151)
+
+EnumValue
+Enum(avr_mcu) String(atmega16hva) Value(152)
+
+EnumValue
+Enum(avr_mcu) String(atmega16hva2) Value(153)
+
+EnumValue
+Enum(avr_mcu) String(atmega32hvb) Value(154)
+
+EnumValue
+Enum(avr_mcu) String(atmega6490a) Value(155)
+
+EnumValue
+Enum(avr_mcu) String(atmega6490p) Value(156)
+
+EnumValue
+Enum(avr_mcu) String(atmega64c1) Value(157)
+
+EnumValue
+Enum(avr_mcu) String(atmega64m1) Value(158)
+
+EnumValue
+Enum(avr_mcu) String(atmega64hve) Value(159)
+
+EnumValue
+Enum(avr_mcu) String(atmega64rfa2) Value(160)
+
+EnumValue
+Enum(avr_mcu) String(atmega64rfr2) Value(161)
+
+EnumValue
+Enum(avr_mcu) String(atmega32hvbrevb) Value(162)
+
+EnumValue
+Enum(avr_mcu) String(atmega48hvf) Value(163)
+
+EnumValue
+Enum(avr_mcu) String(at90can32) Value(164)
+
+EnumValue
+Enum(avr_mcu) String(at90can64) Value(165)
+
+EnumValue
+Enum(avr_mcu) String(at90pwm161) Value(166)
+
+EnumValue
+Enum(avr_mcu) String(at90pwm216) Value(167)
+
+EnumValue
+Enum(avr_mcu) String(at90pwm316) Value(168)
+
+EnumValue
+Enum(avr_mcu) String(at90scr100) Value(169)
+
+EnumValue
+Enum(avr_mcu) String(at90usb646) Value(170)
+
+EnumValue
+Enum(avr_mcu) String(at90usb647) Value(171)
+
+EnumValue
+Enum(avr_mcu) String(at94k) Value(172)
+
+EnumValue
+Enum(avr_mcu) String(m3000) Value(173)
+
+EnumValue
+Enum(avr_mcu) String(avr51) Value(174)
+
+EnumValue
+Enum(avr_mcu) String(atmega128) Value(175)
+
+EnumValue
+Enum(avr_mcu) String(atmega128a) Value(176)
+
+EnumValue
+Enum(avr_mcu) String(atmega1280) Value(177)
+
+EnumValue
+Enum(avr_mcu) String(atmega1281) Value(178)
+
+EnumValue
+Enum(avr_mcu) String(atmega1284) Value(179)
+
+EnumValue
+Enum(avr_mcu) String(atmega1284p) Value(180)
+
+EnumValue
+Enum(avr_mcu) String(atmega128rfa1) Value(181)
+
+EnumValue
+Enum(avr_mcu) String(at90can128) Value(182)
+
+EnumValue
+Enum(avr_mcu) String(at90usb1286) Value(183)
+
+EnumValue
+Enum(avr_mcu) String(at90usb1287) Value(184)
+
+EnumValue
+Enum(avr_mcu) String(avr6) Value(185)
+
+EnumValue
+Enum(avr_mcu) String(atmega2560) Value(186)
+
+EnumValue
+Enum(avr_mcu) String(atmega2561) Value(187)
+
+EnumValue
+Enum(avr_mcu) String(avrxmega2) Value(188)
+
+EnumValue
+Enum(avr_mcu) String(atxmega16a4) Value(189)
+
+EnumValue
+Enum(avr_mcu) String(atxmega16d4) Value(190)
+
+EnumValue
+Enum(avr_mcu) String(atxmega32a4) Value(191)
+
+EnumValue
+Enum(avr_mcu) String(atxmega32d4) Value(192)
+
+EnumValue
+Enum(avr_mcu) String(atxmega32x1) Value(193)
+
+EnumValue
+Enum(avr_mcu) String(atmxt112sl) Value(194)
+
+EnumValue
+Enum(avr_mcu) String(atmxt224) Value(195)
+
+EnumValue
+Enum(avr_mcu) String(atmxt224e) Value(196)
+
+EnumValue
+Enum(avr_mcu) String(atmxt336s) Value(197)
+
+EnumValue
+Enum(avr_mcu) String(atxmega16a4u) Value(198)
+
+EnumValue
+Enum(avr_mcu) String(atxmega16c4) Value(199)
+
+EnumValue
+Enum(avr_mcu) String(atxmega32a4u) Value(200)
+
+EnumValue
+Enum(avr_mcu) String(atxmega32c4) Value(201)
+
+EnumValue
+Enum(avr_mcu) String(atxmega32e5) Value(202)
+
+EnumValue
+Enum(avr_mcu) String(avrxmega4) Value(203)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64a3) Value(204)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64d3) Value(205)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64a3u) Value(206)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64a4u) Value(207)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64b1) Value(208)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64b3) Value(209)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64c3) Value(210)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64d4) Value(211)
+
+EnumValue
+Enum(avr_mcu) String(avrxmega5) Value(212)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64a1) Value(213)
+
+EnumValue
+Enum(avr_mcu) String(atxmega64a1u) Value(214)
+
+EnumValue
+Enum(avr_mcu) String(avrxmega6) Value(215)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128a3) Value(216)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128d3) Value(217)
+
+EnumValue
+Enum(avr_mcu) String(atxmega192a3) Value(218)
+
+EnumValue
+Enum(avr_mcu) String(atxmega192d3) Value(219)
+
+EnumValue
+Enum(avr_mcu) String(atxmega256a3) Value(220)
+
+EnumValue
+Enum(avr_mcu) String(atxmega256a3b) Value(221)
+
+EnumValue
+Enum(avr_mcu) String(atxmega256a3bu) Value(222)
+
+EnumValue
+Enum(avr_mcu) String(atxmega256d3) Value(223)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128a3u) Value(224)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128b1) Value(225)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128b3) Value(226)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128c3) Value(227)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128d4) Value(228)
+
+EnumValue
+Enum(avr_mcu) String(atmxt540s) Value(229)
+
+EnumValue
+Enum(avr_mcu) String(atmxt540sreva) Value(230)
+
+EnumValue
+Enum(avr_mcu) String(atxmega192a3u) Value(231)
+
+EnumValue
+Enum(avr_mcu) String(atxmega192c3) Value(232)
+
+EnumValue
+Enum(avr_mcu) String(atxmega256a3u) Value(233)
+
+EnumValue
+Enum(avr_mcu) String(atxmega256c3) Value(234)
+
+EnumValue
+Enum(avr_mcu) String(atxmega384c3) Value(235)
+
+EnumValue
+Enum(avr_mcu) String(atxmega384d3) Value(236)
+
+EnumValue
+Enum(avr_mcu) String(avrxmega7) Value(237)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128a1) Value(238)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128a1u) Value(239)
+
+EnumValue
+Enum(avr_mcu) String(atxmega128a4u) Value(240)
+
+EnumValue
+Enum(avr_mcu) String(avr1) Value(241)
+
+EnumValue
+Enum(avr_mcu) String(at90s1200) Value(242)
+
+EnumValue
+Enum(avr_mcu) String(attiny11) Value(243)
+
+EnumValue
+Enum(avr_mcu) String(attiny12) Value(244)
+
+EnumValue
+Enum(avr_mcu) String(attiny15) Value(245)
+
+EnumValue
+Enum(avr_mcu) String(attiny28) Value(246)
+
diff --git a/gcc-4.9/gcc/config/avr/avr.c b/gcc-4.9/gcc/config/avr/avr.c
new file mode 100644
index 000000000..8ca7de0b3
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr.c
@@ -0,0 +1,12522 @@
+/* Subroutines for insn-output.c for ATMEL AVR micro controllers
+ Copyright (C) 1998-2014 Free Software Foundation, Inc.
+ Contributed by Denis Chertykov (chertykov@gmail.com)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-attr.h"
+#include "insn-codes.h"
+#include "flags.h"
+#include "reload.h"
+#include "tree.h"
+#include "print-tree.h"
+#include "calls.h"
+#include "stor-layout.h"
+#include "stringpool.h"
+#include "output.h"
+#include "expr.h"
+#include "c-family/c-common.h"
+#include "diagnostic-core.h"
+#include "obstack.h"
+#include "function.h"
+#include "recog.h"
+#include "optabs.h"
+#include "ggc.h"
+#include "langhooks.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "params.h"
+#include "df.h"
+
+/* Maximal allowed offset for an address in the LD command */
+#define MAX_LD_OFFSET(MODE) (64 - (signed)GET_MODE_SIZE (MODE))
+
+/* Return true if STR starts with PREFIX and false, otherwise. */
+#define STR_PREFIX_P(STR,PREFIX) (0 == strncmp (STR, PREFIX, strlen (PREFIX)))
+
+/* The 4 bits starting at SECTION_MACH_DEP are reserved to store the
+ address space where data is to be located.
+ As the only non-generic address spaces are all located in flash,
+ this can be used to test if data shall go into some .progmem* section.
+ This must be the rightmost field of machine dependent section flags. */
+#define AVR_SECTION_PROGMEM (0xf * SECTION_MACH_DEP)
+
+/* Similar 4-bit region for SYMBOL_REF_FLAGS. */
+#define AVR_SYMBOL_FLAG_PROGMEM (0xf * SYMBOL_FLAG_MACH_DEP)
+
+/* Similar 4-bit region in SYMBOL_REF_FLAGS:
+ Set address-space AS in SYMBOL_REF_FLAGS of SYM */
+#define AVR_SYMBOL_SET_ADDR_SPACE(SYM,AS) \
+ do { \
+ SYMBOL_REF_FLAGS (sym) &= ~AVR_SYMBOL_FLAG_PROGMEM; \
+ SYMBOL_REF_FLAGS (sym) |= (AS) * SYMBOL_FLAG_MACH_DEP; \
+ } while (0)
+
+/* Read address-space from SYMBOL_REF_FLAGS of SYM */
+#define AVR_SYMBOL_GET_ADDR_SPACE(SYM) \
+ ((SYMBOL_REF_FLAGS (sym) & AVR_SYMBOL_FLAG_PROGMEM) \
+ / SYMBOL_FLAG_MACH_DEP)
+
+/* Known address spaces. The order must be the same as in the respective
+ enum from avr.h (or designated initialized must be used). */
+const avr_addrspace_t avr_addrspace[ADDR_SPACE_COUNT] =
+{
+ { ADDR_SPACE_RAM, 0, 2, "", 0, NULL },
+ { ADDR_SPACE_FLASH, 1, 2, "__flash", 0, ".progmem.data" },
+ { ADDR_SPACE_FLASH1, 1, 2, "__flash1", 1, ".progmem1.data" },
+ { ADDR_SPACE_FLASH2, 1, 2, "__flash2", 2, ".progmem2.data" },
+ { ADDR_SPACE_FLASH3, 1, 2, "__flash3", 3, ".progmem3.data" },
+ { ADDR_SPACE_FLASH4, 1, 2, "__flash4", 4, ".progmem4.data" },
+ { ADDR_SPACE_FLASH5, 1, 2, "__flash5", 5, ".progmem5.data" },
+ { ADDR_SPACE_MEMX, 1, 3, "__memx", 0, ".progmemx.data" },
+};
+
+
+/* Holding RAM addresses of some SFRs used by the compiler and that
+ are unique over all devices in an architecture like 'avr4'. */
+
+typedef struct
+{
+ /* SREG: The processor status */
+ int sreg;
+
+ /* RAMPX, RAMPY, RAMPD and CCP of XMEGA */
+ int ccp;
+ int rampd;
+ int rampx;
+ int rampy;
+
+ /* RAMPZ: The high byte of 24-bit address used with ELPM */
+ int rampz;
+
+ /* SP: The stack pointer and its low and high byte */
+ int sp_l;
+ int sp_h;
+} avr_addr_t;
+
+static avr_addr_t avr_addr;
+
+
+/* Prototypes for local helper functions. */
+
+static const char* out_movqi_r_mr (rtx, rtx[], int*);
+static const char* out_movhi_r_mr (rtx, rtx[], int*);
+static const char* out_movsi_r_mr (rtx, rtx[], int*);
+static const char* out_movqi_mr_r (rtx, rtx[], int*);
+static const char* out_movhi_mr_r (rtx, rtx[], int*);
+static const char* out_movsi_mr_r (rtx, rtx[], int*);
+
+static int get_sequence_length (rtx insns);
+static int sequent_regs_live (void);
+static const char *ptrreg_to_str (int);
+static const char *cond_string (enum rtx_code);
+static int avr_num_arg_regs (enum machine_mode, const_tree);
+static int avr_operand_rtx_cost (rtx, enum machine_mode, enum rtx_code,
+ int, bool);
+static void output_reload_in_const (rtx*, rtx, int*, bool);
+static struct machine_function * avr_init_machine_status (void);
+
+
+/* Prototypes for hook implementors if needed before their implementation. */
+
+static bool avr_rtx_costs (rtx, int, int, int, int*, bool);
+
+
+/* Allocate registers from r25 to r8 for parameters for function calls. */
+#define FIRST_CUM_REG 26
+
+/* Implicit target register of LPM instruction (R0) */
+extern GTY(()) rtx lpm_reg_rtx;
+rtx lpm_reg_rtx;
+
+/* (Implicit) address register of LPM instruction (R31:R30 = Z) */
+extern GTY(()) rtx lpm_addr_reg_rtx;
+rtx lpm_addr_reg_rtx;
+
+/* Temporary register RTX (reg:QI TMP_REGNO) */
+extern GTY(()) rtx tmp_reg_rtx;
+rtx tmp_reg_rtx;
+
+/* Zeroed register RTX (reg:QI ZERO_REGNO) */
+extern GTY(()) rtx zero_reg_rtx;
+rtx zero_reg_rtx;
+
+/* RTXs for all general purpose registers as QImode */
+extern GTY(()) rtx all_regs_rtx[32];
+rtx all_regs_rtx[32];
+
+/* SREG, the processor status */
+extern GTY(()) rtx sreg_rtx;
+rtx sreg_rtx;
+
+/* RAMP* special function registers */
+extern GTY(()) rtx rampd_rtx;
+extern GTY(()) rtx rampx_rtx;
+extern GTY(()) rtx rampy_rtx;
+extern GTY(()) rtx rampz_rtx;
+rtx rampd_rtx;
+rtx rampx_rtx;
+rtx rampy_rtx;
+rtx rampz_rtx;
+
+/* RTX containing the strings "" and "e", respectively */
+static GTY(()) rtx xstring_empty;
+static GTY(()) rtx xstring_e;
+
+/* Current architecture. */
+const avr_arch_t *avr_current_arch;
+
+/* Current device. */
+const avr_mcu_t *avr_current_device;
+
+/* Section to put switch tables in. */
+static GTY(()) section *progmem_swtable_section;
+
+/* Unnamed sections associated to __attribute__((progmem)) aka. PROGMEM
+ or to address space __flash* or __memx. Only used as singletons inside
+ avr_asm_select_section, but it must not be local there because of GTY. */
+static GTY(()) section *progmem_section[ADDR_SPACE_COUNT];
+
+/* Condition for insns/expanders from avr-dimode.md. */
+bool avr_have_dimode = true;
+
+/* To track if code will use .bss and/or .data. */
+bool avr_need_clear_bss_p = false;
+bool avr_need_copy_data_p = false;
+
+
+/* Transform UP into lowercase and write the result to LO.
+ You must provide enough space for LO. Return LO. */
+
+static char*
+avr_tolower (char *lo, const char *up)
+{
+ char *lo0 = lo;
+
+ for (; *up; up++, lo++)
+ *lo = TOLOWER (*up);
+
+ *lo = '\0';
+
+ return lo0;
+}
+
+
+/* Custom function to count number of set bits. */
+
+static inline int
+avr_popcount (unsigned int val)
+{
+ int pop = 0;
+
+ while (val)
+ {
+ val &= val-1;
+ pop++;
+ }
+
+ return pop;
+}
+
+
+/* Constraint helper function. XVAL is a CONST_INT or a CONST_DOUBLE.
+ Return true if the least significant N_BYTES bytes of XVAL all have a
+ popcount in POP_MASK and false, otherwise. POP_MASK represents a subset
+ of integers which contains an integer N iff bit N of POP_MASK is set. */
+
+bool
+avr_popcount_each_byte (rtx xval, int n_bytes, int pop_mask)
+{
+ int i;
+
+ enum machine_mode mode = GET_MODE (xval);
+
+ if (VOIDmode == mode)
+ mode = SImode;
+
+ for (i = 0; i < n_bytes; i++)
+ {
+ rtx xval8 = simplify_gen_subreg (QImode, xval, mode, i);
+ unsigned int val8 = UINTVAL (xval8) & GET_MODE_MASK (QImode);
+
+ if (0 == (pop_mask & (1 << avr_popcount (val8))))
+ return false;
+ }
+
+ return true;
+}
+
+
+/* Access some RTX as INT_MODE. If X is a CONST_FIXED we can get
+ the bit representation of X by "casting" it to CONST_INT. */
+
+rtx
+avr_to_int_mode (rtx x)
+{
+ enum machine_mode mode = GET_MODE (x);
+
+ return VOIDmode == mode
+ ? x
+ : simplify_gen_subreg (int_mode_for_mode (mode), x, mode, 0);
+}
+
+
+/* Implement `TARGET_OPTION_OVERRIDE'. */
+
+static void
+avr_option_override (void)
+{
+ flag_delete_null_pointer_checks = 0;
+
+ /* caller-save.c looks for call-clobbered hard registers that are assigned
+ to pseudos that cross calls and tries so save-restore them around calls
+ in order to reduce the number of stack slots needed.
+
+ This might lead to situations where reload is no more able to cope
+ with the challenge of AVR's very few address registers and fails to
+ perform the requested spills. */
+
+ if (avr_strict_X)
+ flag_caller_saves = 0;
+
+ /* Unwind tables currently require a frame pointer for correctness,
+ see toplev.c:process_options(). */
+
+ if ((flag_unwind_tables
+ || flag_non_call_exceptions
+ || flag_asynchronous_unwind_tables)
+ && !ACCUMULATE_OUTGOING_ARGS)
+ {
+ flag_omit_frame_pointer = 0;
+ }
+
+ if (flag_pic == 1)
+ warning (OPT_fpic, "-fpic is not supported");
+ if (flag_pic == 2)
+ warning (OPT_fPIC, "-fPIC is not supported");
+ if (flag_pie == 1)
+ warning (OPT_fpie, "-fpie is not supported");
+ if (flag_pie == 2)
+ warning (OPT_fPIE, "-fPIE is not supported");
+
+ avr_current_device = &avr_mcu_types[avr_mcu_index];
+ avr_current_arch = &avr_arch_types[avr_current_device->arch];
+
+ /* RAM addresses of some SFRs common to all devices in respective arch. */
+
+ /* SREG: Status Register containing flags like I (global IRQ) */
+ avr_addr.sreg = 0x3F + avr_current_arch->sfr_offset;
+
+ /* RAMPZ: Address' high part when loading via ELPM */
+ avr_addr.rampz = 0x3B + avr_current_arch->sfr_offset;
+
+ avr_addr.rampy = 0x3A + avr_current_arch->sfr_offset;
+ avr_addr.rampx = 0x39 + avr_current_arch->sfr_offset;
+ avr_addr.rampd = 0x38 + avr_current_arch->sfr_offset;
+ avr_addr.ccp = 0x34 + avr_current_arch->sfr_offset;
+
+ /* SP: Stack Pointer (SP_H:SP_L) */
+ avr_addr.sp_l = 0x3D + avr_current_arch->sfr_offset;
+ avr_addr.sp_h = avr_addr.sp_l + 1;
+
+ init_machine_status = avr_init_machine_status;
+
+ avr_log_set_avr_log();
+}
+
+/* Function to set up the backend function structure. */
+
+static struct machine_function *
+avr_init_machine_status (void)
+{
+ return ggc_alloc_cleared_machine_function ();
+}
+
+
+/* Implement `INIT_EXPANDERS'. */
+/* The function works like a singleton. */
+
+void
+avr_init_expanders (void)
+{
+ int regno;
+
+ for (regno = 0; regno < 32; regno ++)
+ all_regs_rtx[regno] = gen_rtx_REG (QImode, regno);
+
+ lpm_reg_rtx = all_regs_rtx[LPM_REGNO];
+ tmp_reg_rtx = all_regs_rtx[TMP_REGNO];
+ zero_reg_rtx = all_regs_rtx[ZERO_REGNO];
+
+ lpm_addr_reg_rtx = gen_rtx_REG (HImode, REG_Z);
+
+ sreg_rtx = gen_rtx_MEM (QImode, GEN_INT (avr_addr.sreg));
+ rampd_rtx = gen_rtx_MEM (QImode, GEN_INT (avr_addr.rampd));
+ rampx_rtx = gen_rtx_MEM (QImode, GEN_INT (avr_addr.rampx));
+ rampy_rtx = gen_rtx_MEM (QImode, GEN_INT (avr_addr.rampy));
+ rampz_rtx = gen_rtx_MEM (QImode, GEN_INT (avr_addr.rampz));
+
+ xstring_empty = gen_rtx_CONST_STRING (VOIDmode, "");
+ xstring_e = gen_rtx_CONST_STRING (VOIDmode, "e");
+}
+
+
+/* Implement `REGNO_REG_CLASS'. */
+/* Return register class for register R. */
+
+enum reg_class
+avr_regno_reg_class (int r)
+{
+ static const enum reg_class reg_class_tab[] =
+ {
+ R0_REG,
+ /* r1 - r15 */
+ NO_LD_REGS, NO_LD_REGS, NO_LD_REGS,
+ NO_LD_REGS, NO_LD_REGS, NO_LD_REGS, NO_LD_REGS,
+ NO_LD_REGS, NO_LD_REGS, NO_LD_REGS, NO_LD_REGS,
+ NO_LD_REGS, NO_LD_REGS, NO_LD_REGS, NO_LD_REGS,
+ /* r16 - r23 */
+ SIMPLE_LD_REGS, SIMPLE_LD_REGS, SIMPLE_LD_REGS, SIMPLE_LD_REGS,
+ SIMPLE_LD_REGS, SIMPLE_LD_REGS, SIMPLE_LD_REGS, SIMPLE_LD_REGS,
+ /* r24, r25 */
+ ADDW_REGS, ADDW_REGS,
+ /* X: r26, 27 */
+ POINTER_X_REGS, POINTER_X_REGS,
+ /* Y: r28, r29 */
+ POINTER_Y_REGS, POINTER_Y_REGS,
+ /* Z: r30, r31 */
+ POINTER_Z_REGS, POINTER_Z_REGS,
+ /* SP: SPL, SPH */
+ STACK_REG, STACK_REG
+ };
+
+ if (r <= 33)
+ return reg_class_tab[r];
+
+ return ALL_REGS;
+}
+
+
+/* Implement `TARGET_SCALAR_MODE_SUPPORTED_P'. */
+
+static bool
+avr_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (ALL_FIXED_POINT_MODE_P (mode))
+ return true;
+
+ if (PSImode == mode)
+ return true;
+
+ return default_scalar_mode_supported_p (mode);
+}
+
+
+/* Return TRUE if DECL is a VAR_DECL located in flash and FALSE, otherwise. */
+
+static bool
+avr_decl_flash_p (tree decl)
+{
+ if (TREE_CODE (decl) != VAR_DECL
+ || TREE_TYPE (decl) == error_mark_node)
+ {
+ return false;
+ }
+
+ return !ADDR_SPACE_GENERIC_P (TYPE_ADDR_SPACE (TREE_TYPE (decl)));
+}
+
+
+/* Return TRUE if DECL is a VAR_DECL located in the 24-bit flash
+ address space and FALSE, otherwise. */
+
+static bool
+avr_decl_memx_p (tree decl)
+{
+ if (TREE_CODE (decl) != VAR_DECL
+ || TREE_TYPE (decl) == error_mark_node)
+ {
+ return false;
+ }
+
+ return (ADDR_SPACE_MEMX == TYPE_ADDR_SPACE (TREE_TYPE (decl)));
+}
+
+
+/* Return TRUE if X is a MEM rtx located in flash and FALSE, otherwise. */
+
+bool
+avr_mem_flash_p (rtx x)
+{
+ return (MEM_P (x)
+ && !ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x)));
+}
+
+
+/* Return TRUE if X is a MEM rtx located in the 24-bit flash
+ address space and FALSE, otherwise. */
+
+bool
+avr_mem_memx_p (rtx x)
+{
+ return (MEM_P (x)
+ && ADDR_SPACE_MEMX == MEM_ADDR_SPACE (x));
+}
+
+
+/* A helper for the subsequent function attribute used to dig for
+ attribute 'name' in a FUNCTION_DECL or FUNCTION_TYPE */
+
+static inline int
+avr_lookup_function_attribute1 (const_tree func, const char *name)
+{
+ if (FUNCTION_DECL == TREE_CODE (func))
+ {
+ if (NULL_TREE != lookup_attribute (name, DECL_ATTRIBUTES (func)))
+ {
+ return true;
+ }
+
+ func = TREE_TYPE (func);
+ }
+
+ gcc_assert (TREE_CODE (func) == FUNCTION_TYPE
+ || TREE_CODE (func) == METHOD_TYPE);
+
+ return NULL_TREE != lookup_attribute (name, TYPE_ATTRIBUTES (func));
+}
+
+/* Return nonzero if FUNC is a naked function. */
+
+static int
+avr_naked_function_p (tree func)
+{
+ return avr_lookup_function_attribute1 (func, "naked");
+}
+
+/* Return nonzero if FUNC is an interrupt function as specified
+ by the "interrupt" attribute. */
+
+static int
+avr_interrupt_function_p (tree func)
+{
+ return avr_lookup_function_attribute1 (func, "interrupt");
+}
+
+/* Return nonzero if FUNC is a signal function as specified
+ by the "signal" attribute. */
+
+static int
+avr_signal_function_p (tree func)
+{
+ return avr_lookup_function_attribute1 (func, "signal");
+}
+
+/* Return nonzero if FUNC is an OS_task function. */
+
+static int
+avr_OS_task_function_p (tree func)
+{
+ return avr_lookup_function_attribute1 (func, "OS_task");
+}
+
+/* Return nonzero if FUNC is an OS_main function. */
+
+static int
+avr_OS_main_function_p (tree func)
+{
+ return avr_lookup_function_attribute1 (func, "OS_main");
+}
+
+
+/* Implement `TARGET_SET_CURRENT_FUNCTION'. */
+/* Sanity cheching for above function attributes. */
+
+static void
+avr_set_current_function (tree decl)
+{
+ location_t loc;
+ const char *isr;
+
+ if (decl == NULL_TREE
+ || current_function_decl == NULL_TREE
+ || current_function_decl == error_mark_node
+ || ! cfun->machine
+ || cfun->machine->attributes_checked_p)
+ return;
+
+ loc = DECL_SOURCE_LOCATION (decl);
+
+ cfun->machine->is_naked = avr_naked_function_p (decl);
+ cfun->machine->is_signal = avr_signal_function_p (decl);
+ cfun->machine->is_interrupt = avr_interrupt_function_p (decl);
+ cfun->machine->is_OS_task = avr_OS_task_function_p (decl);
+ cfun->machine->is_OS_main = avr_OS_main_function_p (decl);
+
+ isr = cfun->machine->is_interrupt ? "interrupt" : "signal";
+
+ /* Too much attributes make no sense as they request conflicting features. */
+
+ if (cfun->machine->is_OS_task + cfun->machine->is_OS_main
+ + (cfun->machine->is_signal || cfun->machine->is_interrupt) > 1)
+ error_at (loc, "function attributes %qs, %qs and %qs are mutually"
+ " exclusive", "OS_task", "OS_main", isr);
+
+ /* 'naked' will hide effects of 'OS_task' and 'OS_main'. */
+
+ if (cfun->machine->is_naked
+ && (cfun->machine->is_OS_task || cfun->machine->is_OS_main))
+ warning_at (loc, OPT_Wattributes, "function attributes %qs and %qs have"
+ " no effect on %qs function", "OS_task", "OS_main", "naked");
+
+ if (cfun->machine->is_interrupt || cfun->machine->is_signal)
+ {
+ tree args = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ tree ret = TREE_TYPE (TREE_TYPE (decl));
+ const char *name;
+
+ name = DECL_ASSEMBLER_NAME_SET_P (decl)
+ ? IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))
+ : IDENTIFIER_POINTER (DECL_NAME (decl));
+
+ /* Skip a leading '*' that might still prefix the assembler name,
+ e.g. in non-LTO runs. */
+
+ name = default_strip_name_encoding (name);
+
+ /* Silently ignore 'signal' if 'interrupt' is present. AVR-LibC startet
+ using this when it switched from SIGNAL and INTERRUPT to ISR. */
+
+ if (cfun->machine->is_interrupt)
+ cfun->machine->is_signal = 0;
+
+ /* Interrupt handlers must be void __vector (void) functions. */
+
+ if (args && TREE_CODE (TREE_VALUE (args)) != VOID_TYPE)
+ error_at (loc, "%qs function cannot have arguments", isr);
+
+ if (TREE_CODE (ret) != VOID_TYPE)
+ error_at (loc, "%qs function cannot return a value", isr);
+
+ /* If the function has the 'signal' or 'interrupt' attribute, ensure
+ that the name of the function is "__vector_NN" so as to catch
+ when the user misspells the vector name. */
+
+ if (!STR_PREFIX_P (name, "__vector"))
+ warning_at (loc, 0, "%qs appears to be a misspelled %s handler",
+ name, isr);
+ }
+
+ /* Don't print the above diagnostics more than once. */
+
+ cfun->machine->attributes_checked_p = 1;
+}
+
+
+/* Implement `ACCUMULATE_OUTGOING_ARGS'. */
+
+int
+avr_accumulate_outgoing_args (void)
+{
+ if (!cfun)
+ return TARGET_ACCUMULATE_OUTGOING_ARGS;
+
+ /* FIXME: For setjmp and in avr_builtin_setjmp_frame_value we don't know
+ what offset is correct. In some cases it is relative to
+ virtual_outgoing_args_rtx and in others it is relative to
+ virtual_stack_vars_rtx. For example code see
+ gcc.c-torture/execute/built-in-setjmp.c
+ gcc.c-torture/execute/builtins/sprintf-chk.c */
+
+ return (TARGET_ACCUMULATE_OUTGOING_ARGS
+ && !(cfun->calls_setjmp
+ || cfun->has_nonlocal_label));
+}
+
+
+/* Report contribution of accumulated outgoing arguments to stack size. */
+
+static inline int
+avr_outgoing_args_size (void)
+{
+ return ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0;
+}
+
+
+/* Implement `STARTING_FRAME_OFFSET'. */
+/* This is the offset from the frame pointer register to the first stack slot
+ that contains a variable living in the frame. */
+
+int
+avr_starting_frame_offset (void)
+{
+ return 1 + avr_outgoing_args_size ();
+}
+
+
+/* Return the number of hard registers to push/pop in the prologue/epilogue
+ of the current function, and optionally store these registers in SET. */
+
+static int
+avr_regs_to_save (HARD_REG_SET *set)
+{
+ int reg, count;
+ int int_or_sig_p = cfun->machine->is_interrupt || cfun->machine->is_signal;
+
+ if (set)
+ CLEAR_HARD_REG_SET (*set);
+ count = 0;
+
+ /* No need to save any registers if the function never returns or
+ has the "OS_task" or "OS_main" attribute. */
+
+ if (TREE_THIS_VOLATILE (current_function_decl)
+ || cfun->machine->is_OS_task
+ || cfun->machine->is_OS_main)
+ return 0;
+
+ for (reg = 0; reg < 32; reg++)
+ {
+ /* Do not push/pop __tmp_reg__, __zero_reg__, as well as
+ any global register variables. */
+
+ if (fixed_regs[reg])
+ continue;
+
+ if ((int_or_sig_p && !crtl->is_leaf && call_used_regs[reg])
+ || (df_regs_ever_live_p (reg)
+ && (int_or_sig_p || !call_used_regs[reg])
+ /* Don't record frame pointer registers here. They are treated
+ indivitually in prologue. */
+ && !(frame_pointer_needed
+ && (reg == REG_Y || reg == (REG_Y+1)))))
+ {
+ if (set)
+ SET_HARD_REG_BIT (*set, reg);
+ count++;
+ }
+ }
+ return count;
+}
+
+
+/* Implement `TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS' */
+
+static bool
+avr_allocate_stack_slots_for_args (void)
+{
+ return !cfun->machine->is_naked;
+}
+
+
+/* Return true if register FROM can be eliminated via register TO. */
+
+static bool
+avr_can_eliminate (const int from, const int to)
+{
+ return ((frame_pointer_needed && to == FRAME_POINTER_REGNUM)
+ || !frame_pointer_needed);
+}
+
+
+/* Implement `TARGET_WARN_FUNC_RETURN'. */
+
+static bool
+avr_warn_func_return (tree decl)
+{
+ /* Naked functions are implemented entirely in assembly, including the
+ return sequence, so suppress warnings about this. */
+
+ return !avr_naked_function_p (decl);
+}
+
+/* Compute offset between arg_pointer and frame_pointer. */
+
+int
+avr_initial_elimination_offset (int from, int to)
+{
+ if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ return 0;
+ else
+ {
+ int offset = frame_pointer_needed ? 2 : 0;
+ int avr_pc_size = AVR_HAVE_EIJMP_EICALL ? 3 : 2;
+
+ offset += avr_regs_to_save (NULL);
+ return (get_frame_size () + avr_outgoing_args_size()
+ + avr_pc_size + 1 + offset);
+ }
+}
+
+
+/* Helper for the function below. */
+
+static void
+avr_adjust_type_node (tree *node, enum machine_mode mode, int sat_p)
+{
+ *node = make_node (FIXED_POINT_TYPE);
+ TYPE_SATURATING (*node) = sat_p;
+ TYPE_UNSIGNED (*node) = UNSIGNED_FIXED_POINT_MODE_P (mode);
+ TYPE_IBIT (*node) = GET_MODE_IBIT (mode);
+ TYPE_FBIT (*node) = GET_MODE_FBIT (mode);
+ TYPE_PRECISION (*node) = GET_MODE_BITSIZE (mode);
+ TYPE_ALIGN (*node) = 8;
+ SET_TYPE_MODE (*node, mode);
+
+ layout_type (*node);
+}
+
+
+/* Implement `TARGET_BUILD_BUILTIN_VA_LIST'. */
+
+static tree
+avr_build_builtin_va_list (void)
+{
+ /* avr-modes.def adjusts [U]TA to be 64-bit modes with 48 fractional bits.
+ This is more appropriate for the 8-bit machine AVR than 128-bit modes.
+ The ADJUST_IBIT/FBIT are handled in toplev:init_adjust_machine_modes()
+ which is auto-generated by genmodes, but the compiler assigns [U]DAmode
+ to the long long accum modes instead of the desired [U]TAmode.
+
+ Fix this now, right after node setup in tree.c:build_common_tree_nodes().
+ This must run before c-cppbuiltin.c:builtin_define_fixed_point_constants()
+ which built-in defines macros like __ULLACCUM_FBIT__ that are used by
+ libgcc to detect IBIT and FBIT. */
+
+ avr_adjust_type_node (&ta_type_node, TAmode, 0);
+ avr_adjust_type_node (&uta_type_node, UTAmode, 0);
+ avr_adjust_type_node (&sat_ta_type_node, TAmode, 1);
+ avr_adjust_type_node (&sat_uta_type_node, UTAmode, 1);
+
+ unsigned_long_long_accum_type_node = uta_type_node;
+ long_long_accum_type_node = ta_type_node;
+ sat_unsigned_long_long_accum_type_node = sat_uta_type_node;
+ sat_long_long_accum_type_node = sat_ta_type_node;
+
+ /* Dispatch to the default handler. */
+
+ return std_build_builtin_va_list ();
+}
+
+
+/* Implement `TARGET_BUILTIN_SETJMP_FRAME_VALUE'. */
+/* Actual start of frame is virtual_stack_vars_rtx this is offset from
+ frame pointer by +STARTING_FRAME_OFFSET.
+ Using saved frame = virtual_stack_vars_rtx - STARTING_FRAME_OFFSET
+ avoids creating add/sub of offset in nonlocal goto and setjmp. */
+
+static rtx
+avr_builtin_setjmp_frame_value (void)
+{
+ rtx xval = gen_reg_rtx (Pmode);
+ emit_insn (gen_subhi3 (xval, virtual_stack_vars_rtx,
+ gen_int_mode (STARTING_FRAME_OFFSET, Pmode)));
+ return xval;
+}
+
+
+/* Return contents of MEM at frame pointer + stack size + 1 (+2 if 3-byte PC).
+ This is return address of function. */
+
+rtx
+avr_return_addr_rtx (int count, rtx tem)
+{
+ rtx r;
+
+ /* Can only return this function's return address. Others not supported. */
+ if (count)
+ return NULL;
+
+ if (AVR_3_BYTE_PC)
+ {
+ r = gen_rtx_SYMBOL_REF (Pmode, ".L__stack_usage+2");
+ warning (0, "%<builtin_return_address%> contains only 2 bytes"
+ " of address");
+ }
+ else
+ r = gen_rtx_SYMBOL_REF (Pmode, ".L__stack_usage+1");
+
+ r = gen_rtx_PLUS (Pmode, tem, r);
+ r = gen_frame_mem (Pmode, memory_address (Pmode, r));
+ r = gen_rtx_ROTATE (HImode, r, GEN_INT (8));
+ return r;
+}
+
+/* Return 1 if the function epilogue is just a single "ret". */
+
+int
+avr_simple_epilogue (void)
+{
+ return (! frame_pointer_needed
+ && get_frame_size () == 0
+ && avr_outgoing_args_size() == 0
+ && avr_regs_to_save (NULL) == 0
+ && ! cfun->machine->is_interrupt
+ && ! cfun->machine->is_signal
+ && ! cfun->machine->is_naked
+ && ! TREE_THIS_VOLATILE (current_function_decl));
+}
+
+/* This function checks sequence of live registers. */
+
+static int
+sequent_regs_live (void)
+{
+ int reg;
+ int live_seq = 0;
+ int cur_seq = 0;
+
+ for (reg = 0; reg < 18; ++reg)
+ {
+ if (fixed_regs[reg])
+ {
+ /* Don't recognize sequences that contain global register
+ variables. */
+
+ if (live_seq != 0)
+ return 0;
+ else
+ continue;
+ }
+
+ if (!call_used_regs[reg])
+ {
+ if (df_regs_ever_live_p (reg))
+ {
+ ++live_seq;
+ ++cur_seq;
+ }
+ else
+ cur_seq = 0;
+ }
+ }
+
+ if (!frame_pointer_needed)
+ {
+ if (df_regs_ever_live_p (REG_Y))
+ {
+ ++live_seq;
+ ++cur_seq;
+ }
+ else
+ cur_seq = 0;
+
+ if (df_regs_ever_live_p (REG_Y+1))
+ {
+ ++live_seq;
+ ++cur_seq;
+ }
+ else
+ cur_seq = 0;
+ }
+ else
+ {
+ cur_seq += 2;
+ live_seq += 2;
+ }
+ return (cur_seq == live_seq) ? live_seq : 0;
+}
+
+/* Obtain the length sequence of insns. */
+
+int
+get_sequence_length (rtx insns)
+{
+ rtx insn;
+ int length;
+
+ for (insn = insns, length = 0; insn; insn = NEXT_INSN (insn))
+ length += get_attr_length (insn);
+
+ return length;
+}
+
+
+/* Implement `INCOMING_RETURN_ADDR_RTX'. */
+
+rtx
+avr_incoming_return_addr_rtx (void)
+{
+ /* The return address is at the top of the stack. Note that the push
+ was via post-decrement, which means the actual address is off by one. */
+ return gen_frame_mem (HImode, plus_constant (Pmode, stack_pointer_rtx, 1));
+}
+
+/* Helper for expand_prologue. Emit a push of a byte register. */
+
+static void
+emit_push_byte (unsigned regno, bool frame_related_p)
+{
+ rtx mem, reg, insn;
+
+ mem = gen_rtx_POST_DEC (HImode, stack_pointer_rtx);
+ mem = gen_frame_mem (QImode, mem);
+ reg = gen_rtx_REG (QImode, regno);
+
+ insn = emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
+ if (frame_related_p)
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ cfun->machine->stack_usage++;
+}
+
+
+/* Helper for expand_prologue. Emit a push of a SFR via tmp_reg.
+ SFR is a MEM representing the memory location of the SFR.
+ If CLR_P then clear the SFR after the push using zero_reg. */
+
+static void
+emit_push_sfr (rtx sfr, bool frame_related_p, bool clr_p)
+{
+ rtx insn;
+
+ gcc_assert (MEM_P (sfr));
+
+ /* IN __tmp_reg__, IO(SFR) */
+ insn = emit_move_insn (tmp_reg_rtx, sfr);
+ if (frame_related_p)
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* PUSH __tmp_reg__ */
+ emit_push_byte (TMP_REGNO, frame_related_p);
+
+ if (clr_p)
+ {
+ /* OUT IO(SFR), __zero_reg__ */
+ insn = emit_move_insn (sfr, const0_rtx);
+ if (frame_related_p)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+}
+
+static void
+avr_prologue_setup_frame (HOST_WIDE_INT size, HARD_REG_SET set)
+{
+ rtx insn;
+ bool isr_p = cfun->machine->is_interrupt || cfun->machine->is_signal;
+ int live_seq = sequent_regs_live ();
+
+ HOST_WIDE_INT size_max
+ = (HOST_WIDE_INT) GET_MODE_MASK (AVR_HAVE_8BIT_SP ? QImode : Pmode);
+
+ bool minimize = (TARGET_CALL_PROLOGUES
+ && size < size_max
+ && live_seq
+ && !isr_p
+ && !cfun->machine->is_OS_task
+ && !cfun->machine->is_OS_main);
+
+ if (minimize
+ && (frame_pointer_needed
+ || avr_outgoing_args_size() > 8
+ || (AVR_2_BYTE_PC && live_seq > 6)
+ || live_seq > 7))
+ {
+ rtx pattern;
+ int first_reg, reg, offset;
+
+ emit_move_insn (gen_rtx_REG (HImode, REG_X),
+ gen_int_mode (size, HImode));
+
+ pattern = gen_call_prologue_saves (gen_int_mode (live_seq, HImode),
+ gen_int_mode (live_seq+size, HImode));
+ insn = emit_insn (pattern);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ /* Describe the effect of the unspec_volatile call to prologue_saves.
+ Note that this formulation assumes that add_reg_note pushes the
+ notes to the front. Thus we build them in the reverse order of
+ how we want dwarf2out to process them. */
+
+ /* The function does always set frame_pointer_rtx, but whether that
+ is going to be permanent in the function is frame_pointer_needed. */
+
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, (frame_pointer_needed
+ ? frame_pointer_rtx
+ : stack_pointer_rtx),
+ plus_constant (Pmode, stack_pointer_rtx,
+ -(size + live_seq))));
+
+ /* Note that live_seq always contains r28+r29, but the other
+ registers to be saved are all below 18. */
+
+ first_reg = 18 - (live_seq - 2);
+
+ for (reg = 29, offset = -live_seq + 1;
+ reg >= first_reg;
+ reg = (reg == 28 ? 17 : reg - 1), ++offset)
+ {
+ rtx m, r;
+
+ m = gen_rtx_MEM (QImode, plus_constant (Pmode, stack_pointer_rtx,
+ offset));
+ r = gen_rtx_REG (QImode, reg);
+ add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (VOIDmode, m, r));
+ }
+
+ cfun->machine->stack_usage += size + live_seq;
+ }
+ else /* !minimize */
+ {
+ int reg;
+
+ for (reg = 0; reg < 32; ++reg)
+ if (TEST_HARD_REG_BIT (set, reg))
+ emit_push_byte (reg, true);
+
+ if (frame_pointer_needed
+ && (!(cfun->machine->is_OS_task || cfun->machine->is_OS_main)))
+ {
+ /* Push frame pointer. Always be consistent about the
+ ordering of pushes -- epilogue_restores expects the
+ register pair to be pushed low byte first. */
+
+ emit_push_byte (REG_Y, true);
+ emit_push_byte (REG_Y + 1, true);
+ }
+
+ if (frame_pointer_needed
+ && size == 0)
+ {
+ insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ if (size != 0)
+ {
+ /* Creating a frame can be done by direct manipulation of the
+ stack or via the frame pointer. These two methods are:
+ fp = sp
+ fp -= size
+ sp = fp
+ or
+ sp -= size
+ fp = sp (*)
+ the optimum method depends on function type, stack and
+ frame size. To avoid a complex logic, both methods are
+ tested and shortest is selected.
+
+ There is also the case where SIZE != 0 and no frame pointer is
+ needed; this can occur if ACCUMULATE_OUTGOING_ARGS is on.
+ In that case, insn (*) is not needed in that case.
+ We use the X register as scratch. This is save because in X
+ is call-clobbered.
+ In an interrupt routine, the case of SIZE != 0 together with
+ !frame_pointer_needed can only occur if the function is not a
+ leaf function and thus X has already been saved. */
+
+ int irq_state = -1;
+ HOST_WIDE_INT size_cfa = size, neg_size;
+ rtx fp_plus_insns, fp, my_fp;
+
+ gcc_assert (frame_pointer_needed
+ || !isr_p
+ || !crtl->is_leaf);
+
+ fp = my_fp = (frame_pointer_needed
+ ? frame_pointer_rtx
+ : gen_rtx_REG (Pmode, REG_X));
+
+ if (AVR_HAVE_8BIT_SP)
+ {
+ /* The high byte (r29) does not change:
+ Prefer SUBI (1 cycle) over SBIW (2 cycles, same size). */
+
+ my_fp = all_regs_rtx[FRAME_POINTER_REGNUM];
+ }
+
+ /* Cut down size and avoid size = 0 so that we don't run
+ into ICE like PR52488 in the remainder. */
+
+ if (size > size_max)
+ {
+ /* Don't error so that insane code from newlib still compiles
+ and does not break building newlib. As PR51345 is implemented
+ now, there are multilib variants with -msp8.
+
+ If user wants sanity checks he can use -Wstack-usage=
+ or similar options.
+
+ For CFA we emit the original, non-saturated size so that
+ the generic machinery is aware of the real stack usage and
+ will print the above diagnostic as expected. */
+
+ size = size_max;
+ }
+
+ size = trunc_int_for_mode (size, GET_MODE (my_fp));
+ neg_size = trunc_int_for_mode (-size, GET_MODE (my_fp));
+
+ /************ Method 1: Adjust frame pointer ************/
+
+ start_sequence ();
+
+ /* Normally, the dwarf2out frame-related-expr interpreter does
+ not expect to have the CFA change once the frame pointer is
+ set up. Thus, we avoid marking the move insn below and
+ instead indicate that the entire operation is complete after
+ the frame pointer subtraction is done. */
+
+ insn = emit_move_insn (fp, stack_pointer_rtx);
+ if (frame_pointer_needed)
+ {
+ RTX_FRAME_RELATED_P (insn) = 1;
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, fp, stack_pointer_rtx));
+ }
+
+ insn = emit_move_insn (my_fp, plus_constant (GET_MODE (my_fp),
+ my_fp, neg_size));
+
+ if (frame_pointer_needed)
+ {
+ RTX_FRAME_RELATED_P (insn) = 1;
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, fp,
+ plus_constant (Pmode, fp,
+ -size_cfa)));
+ }
+
+ /* Copy to stack pointer. Note that since we've already
+ changed the CFA to the frame pointer this operation
+ need not be annotated if frame pointer is needed.
+ Always move through unspec, see PR50063.
+ For meaning of irq_state see movhi_sp_r insn. */
+
+ if (cfun->machine->is_interrupt)
+ irq_state = 1;
+
+ if (TARGET_NO_INTERRUPTS
+ || cfun->machine->is_signal
+ || cfun->machine->is_OS_main)
+ irq_state = 0;
+
+ if (AVR_HAVE_8BIT_SP)
+ irq_state = 2;
+
+ insn = emit_insn (gen_movhi_sp_r (stack_pointer_rtx,
+ fp, GEN_INT (irq_state)));
+ if (!frame_pointer_needed)
+ {
+ RTX_FRAME_RELATED_P (insn) = 1;
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ -size_cfa)));
+ }
+
+ fp_plus_insns = get_insns ();
+ end_sequence ();
+
+ /************ Method 2: Adjust Stack pointer ************/
+
+ /* Stack adjustment by means of RCALL . and/or PUSH __TMP_REG__
+ can only handle specific offsets. */
+
+ if (avr_sp_immediate_operand (gen_int_mode (-size, HImode), HImode))
+ {
+ rtx sp_plus_insns;
+
+ start_sequence ();
+
+ insn = emit_move_insn (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx,
+ -size));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ add_reg_note (insn, REG_CFA_ADJUST_CFA,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ plus_constant (Pmode,
+ stack_pointer_rtx,
+ -size_cfa)));
+ if (frame_pointer_needed)
+ {
+ insn = emit_move_insn (fp, stack_pointer_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ sp_plus_insns = get_insns ();
+ end_sequence ();
+
+ /************ Use shortest method ************/
+
+ emit_insn (get_sequence_length (sp_plus_insns)
+ < get_sequence_length (fp_plus_insns)
+ ? sp_plus_insns
+ : fp_plus_insns);
+ }
+ else
+ {
+ emit_insn (fp_plus_insns);
+ }
+
+ cfun->machine->stack_usage += size_cfa;
+ } /* !minimize && size != 0 */
+ } /* !minimize */
+}
+
+
+/* Output function prologue. */
+
+void
+avr_expand_prologue (void)
+{
+ HARD_REG_SET set;
+ HOST_WIDE_INT size;
+
+ size = get_frame_size() + avr_outgoing_args_size();
+
+ cfun->machine->stack_usage = 0;
+
+ /* Prologue: naked. */
+ if (cfun->machine->is_naked)
+ {
+ return;
+ }
+
+ avr_regs_to_save (&set);
+
+ if (cfun->machine->is_interrupt || cfun->machine->is_signal)
+ {
+ /* Enable interrupts. */
+ if (cfun->machine->is_interrupt)
+ emit_insn (gen_enable_interrupt ());
+
+ /* Push zero reg. */
+ emit_push_byte (ZERO_REGNO, true);
+
+ /* Push tmp reg. */
+ emit_push_byte (TMP_REGNO, true);
+
+ /* Push SREG. */
+ /* ??? There's no dwarf2 column reserved for SREG. */
+ emit_push_sfr (sreg_rtx, false, false /* clr */);
+
+ /* Clear zero reg. */
+ emit_move_insn (zero_reg_rtx, const0_rtx);
+
+ /* Prevent any attempt to delete the setting of ZERO_REG! */
+ emit_use (zero_reg_rtx);
+
+ /* Push and clear RAMPD/X/Y/Z if present and low-part register is used.
+ ??? There are no dwarf2 columns reserved for RAMPD/X/Y/Z. */
+
+ if (AVR_HAVE_RAMPD)
+ emit_push_sfr (rampd_rtx, false /* frame-related */, true /* clr */);
+
+ if (AVR_HAVE_RAMPX
+ && TEST_HARD_REG_BIT (set, REG_X)
+ && TEST_HARD_REG_BIT (set, REG_X + 1))
+ {
+ emit_push_sfr (rampx_rtx, false /* frame-related */, true /* clr */);
+ }
+
+ if (AVR_HAVE_RAMPY
+ && (frame_pointer_needed
+ || (TEST_HARD_REG_BIT (set, REG_Y)
+ && TEST_HARD_REG_BIT (set, REG_Y + 1))))
+ {
+ emit_push_sfr (rampy_rtx, false /* frame-related */, true /* clr */);
+ }
+
+ if (AVR_HAVE_RAMPZ
+ && TEST_HARD_REG_BIT (set, REG_Z)
+ && TEST_HARD_REG_BIT (set, REG_Z + 1))
+ {
+ emit_push_sfr (rampz_rtx, false /* frame-related */, AVR_HAVE_RAMPD);
+ }
+ } /* is_interrupt is_signal */
+
+ avr_prologue_setup_frame (size, set);
+
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = cfun->machine->stack_usage;
+}
+
+
+/* Implement `TARGET_ASM_FUNCTION_END_PROLOGUE'. */
+/* Output summary at end of function prologue. */
+
+static void
+avr_asm_function_end_prologue (FILE *file)
+{
+ if (cfun->machine->is_naked)
+ {
+ fputs ("/* prologue: naked */\n", file);
+ }
+ else
+ {
+ if (cfun->machine->is_interrupt)
+ {
+ fputs ("/* prologue: Interrupt */\n", file);
+ }
+ else if (cfun->machine->is_signal)
+ {
+ fputs ("/* prologue: Signal */\n", file);
+ }
+ else
+ fputs ("/* prologue: function */\n", file);
+ }
+
+ if (ACCUMULATE_OUTGOING_ARGS)
+ fprintf (file, "/* outgoing args size = %d */\n",
+ avr_outgoing_args_size());
+
+ fprintf (file, "/* frame size = " HOST_WIDE_INT_PRINT_DEC " */\n",
+ get_frame_size());
+ fprintf (file, "/* stack size = %d */\n",
+ cfun->machine->stack_usage);
+ /* Create symbol stack offset here so all functions have it. Add 1 to stack
+ usage for offset so that SP + .L__stack_offset = return address. */
+ fprintf (file, ".L__stack_usage = %d\n", cfun->machine->stack_usage);
+}
+
+
+/* Implement `EPILOGUE_USES'. */
+
+int
+avr_epilogue_uses (int regno ATTRIBUTE_UNUSED)
+{
+ if (reload_completed
+ && cfun->machine
+ && (cfun->machine->is_interrupt || cfun->machine->is_signal))
+ return 1;
+ return 0;
+}
+
+/* Helper for avr_expand_epilogue. Emit a pop of a byte register. */
+
+static void
+emit_pop_byte (unsigned regno)
+{
+ rtx mem, reg;
+
+ mem = gen_rtx_PRE_INC (HImode, stack_pointer_rtx);
+ mem = gen_frame_mem (QImode, mem);
+ reg = gen_rtx_REG (QImode, regno);
+
+ emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
+}
+
+/* Output RTL epilogue. */
+
+void
+avr_expand_epilogue (bool sibcall_p)
+{
+ int reg;
+ int live_seq;
+ HARD_REG_SET set;
+ int minimize;
+ HOST_WIDE_INT size;
+ bool isr_p = cfun->machine->is_interrupt || cfun->machine->is_signal;
+
+ size = get_frame_size() + avr_outgoing_args_size();
+
+ /* epilogue: naked */
+ if (cfun->machine->is_naked)
+ {
+ gcc_assert (!sibcall_p);
+
+ emit_jump_insn (gen_return ());
+ return;
+ }
+
+ avr_regs_to_save (&set);
+ live_seq = sequent_regs_live ();
+
+ minimize = (TARGET_CALL_PROLOGUES
+ && live_seq
+ && !isr_p
+ && !cfun->machine->is_OS_task
+ && !cfun->machine->is_OS_main);
+
+ if (minimize
+ && (live_seq > 4
+ || frame_pointer_needed
+ || size))
+ {
+ /* Get rid of frame. */
+
+ if (!frame_pointer_needed)
+ {
+ emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
+ }
+
+ if (size)
+ {
+ emit_move_insn (frame_pointer_rtx,
+ plus_constant (Pmode, frame_pointer_rtx, size));
+ }
+
+ emit_insn (gen_epilogue_restores (gen_int_mode (live_seq, HImode)));
+ return;
+ }
+
+ if (size)
+ {
+ /* Try two methods to adjust stack and select shortest. */
+
+ int irq_state = -1;
+ rtx fp, my_fp;
+ rtx fp_plus_insns;
+ HOST_WIDE_INT size_max;
+
+ gcc_assert (frame_pointer_needed
+ || !isr_p
+ || !crtl->is_leaf);
+
+ fp = my_fp = (frame_pointer_needed
+ ? frame_pointer_rtx
+ : gen_rtx_REG (Pmode, REG_X));
+
+ if (AVR_HAVE_8BIT_SP)
+ {
+ /* The high byte (r29) does not change:
+ Prefer SUBI (1 cycle) over SBIW (2 cycles). */
+
+ my_fp = all_regs_rtx[FRAME_POINTER_REGNUM];
+ }
+
+ /* For rationale see comment in prologue generation. */
+
+ size_max = (HOST_WIDE_INT) GET_MODE_MASK (GET_MODE (my_fp));
+ if (size > size_max)
+ size = size_max;
+ size = trunc_int_for_mode (size, GET_MODE (my_fp));
+
+ /********** Method 1: Adjust fp register **********/
+
+ start_sequence ();
+
+ if (!frame_pointer_needed)
+ emit_move_insn (fp, stack_pointer_rtx);
+
+ emit_move_insn (my_fp, plus_constant (GET_MODE (my_fp), my_fp, size));
+
+ /* Copy to stack pointer. */
+
+ if (TARGET_NO_INTERRUPTS)
+ irq_state = 0;
+
+ if (AVR_HAVE_8BIT_SP)
+ irq_state = 2;
+
+ emit_insn (gen_movhi_sp_r (stack_pointer_rtx, fp,
+ GEN_INT (irq_state)));
+
+ fp_plus_insns = get_insns ();
+ end_sequence ();
+
+ /********** Method 2: Adjust Stack pointer **********/
+
+ if (avr_sp_immediate_operand (gen_int_mode (size, HImode), HImode))
+ {
+ rtx sp_plus_insns;
+
+ start_sequence ();
+
+ emit_move_insn (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, size));
+
+ sp_plus_insns = get_insns ();
+ end_sequence ();
+
+ /************ Use shortest method ************/
+
+ emit_insn (get_sequence_length (sp_plus_insns)
+ < get_sequence_length (fp_plus_insns)
+ ? sp_plus_insns
+ : fp_plus_insns);
+ }
+ else
+ emit_insn (fp_plus_insns);
+ } /* size != 0 */
+
+ if (frame_pointer_needed
+ && !(cfun->machine->is_OS_task || cfun->machine->is_OS_main))
+ {
+ /* Restore previous frame_pointer. See avr_expand_prologue for
+ rationale for not using pophi. */
+
+ emit_pop_byte (REG_Y + 1);
+ emit_pop_byte (REG_Y);
+ }
+
+ /* Restore used registers. */
+
+ for (reg = 31; reg >= 0; --reg)
+ if (TEST_HARD_REG_BIT (set, reg))
+ emit_pop_byte (reg);
+
+ if (isr_p)
+ {
+ /* Restore RAMPZ/Y/X/D using tmp_reg as scratch.
+ The conditions to restore them must be tha same as in prologue. */
+
+ if (AVR_HAVE_RAMPZ
+ && TEST_HARD_REG_BIT (set, REG_Z)
+ && TEST_HARD_REG_BIT (set, REG_Z + 1))
+ {
+ emit_pop_byte (TMP_REGNO);
+ emit_move_insn (rampz_rtx, tmp_reg_rtx);
+ }
+
+ if (AVR_HAVE_RAMPY
+ && (frame_pointer_needed
+ || (TEST_HARD_REG_BIT (set, REG_Y)
+ && TEST_HARD_REG_BIT (set, REG_Y + 1))))
+ {
+ emit_pop_byte (TMP_REGNO);
+ emit_move_insn (rampy_rtx, tmp_reg_rtx);
+ }
+
+ if (AVR_HAVE_RAMPX
+ && TEST_HARD_REG_BIT (set, REG_X)
+ && TEST_HARD_REG_BIT (set, REG_X + 1))
+ {
+ emit_pop_byte (TMP_REGNO);
+ emit_move_insn (rampx_rtx, tmp_reg_rtx);
+ }
+
+ if (AVR_HAVE_RAMPD)
+ {
+ emit_pop_byte (TMP_REGNO);
+ emit_move_insn (rampd_rtx, tmp_reg_rtx);
+ }
+
+ /* Restore SREG using tmp_reg as scratch. */
+
+ emit_pop_byte (TMP_REGNO);
+ emit_move_insn (sreg_rtx, tmp_reg_rtx);
+
+ /* Restore tmp REG. */
+ emit_pop_byte (TMP_REGNO);
+
+ /* Restore zero REG. */
+ emit_pop_byte (ZERO_REGNO);
+ }
+
+ if (!sibcall_p)
+ emit_jump_insn (gen_return ());
+}
+
+
+/* Implement `TARGET_ASM_FUNCTION_BEGIN_EPILOGUE'. */
+
+static void
+avr_asm_function_begin_epilogue (FILE *file)
+{
+ fprintf (file, "/* epilogue start */\n");
+}
+
+
+/* Implement `TARGET_CANNOT_MODITY_JUMPS_P'. */
+
+static bool
+avr_cannot_modify_jumps_p (void)
+{
+
+ /* Naked Functions must not have any instructions after
+ their epilogue, see PR42240 */
+
+ if (reload_completed
+ && cfun->machine
+ && cfun->machine->is_naked)
+ {
+ return true;
+ }
+
+ return false;
+}
+
+
+/* Implement `TARGET_MODE_DEPENDENT_ADDRESS_P'. */
+
+static bool
+avr_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED, addr_space_t as)
+{
+ /* FIXME: Non-generic addresses are not mode-dependent in themselves.
+ This hook just serves to hack around PR rtl-optimization/52543 by
+ claiming that non-generic addresses were mode-dependent so that
+ lower-subreg.c will skip these addresses. lower-subreg.c sets up fake
+ RTXes to probe SET and MEM costs and assumes that MEM is always in the
+ generic address space which is not true. */
+
+ return !ADDR_SPACE_GENERIC_P (as);
+}
+
+
+/* Helper function for `avr_legitimate_address_p'. */
+
+static inline bool
+avr_reg_ok_for_addr_p (rtx reg, addr_space_t as,
+ RTX_CODE outer_code, bool strict)
+{
+ return (REG_P (reg)
+ && (avr_regno_mode_code_ok_for_base_p (REGNO (reg), QImode,
+ as, outer_code, UNKNOWN)
+ || (!strict
+ && REGNO (reg) >= FIRST_PSEUDO_REGISTER)));
+}
+
+
+/* Return nonzero if X (an RTX) is a legitimate memory address on the target
+ machine for a memory operand of mode MODE. */
+
+static bool
+avr_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
+{
+ bool ok = CONSTANT_ADDRESS_P (x);
+
+ switch (GET_CODE (x))
+ {
+ case REG:
+ ok = avr_reg_ok_for_addr_p (x, ADDR_SPACE_GENERIC,
+ MEM, strict);
+
+ if (strict
+ && GET_MODE_SIZE (mode) > 4
+ && REG_X == REGNO (x))
+ {
+ ok = false;
+ }
+ break;
+
+ case POST_INC:
+ case PRE_DEC:
+ ok = avr_reg_ok_for_addr_p (XEXP (x, 0), ADDR_SPACE_GENERIC,
+ GET_CODE (x), strict);
+ break;
+
+ case PLUS:
+ {
+ rtx reg = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+
+ if (REG_P (reg)
+ && CONST_INT_P (op1)
+ && INTVAL (op1) >= 0)
+ {
+ bool fit = IN_RANGE (INTVAL (op1), 0, MAX_LD_OFFSET (mode));
+
+ if (fit)
+ {
+ ok = (! strict
+ || avr_reg_ok_for_addr_p (reg, ADDR_SPACE_GENERIC,
+ PLUS, strict));
+
+ if (reg == frame_pointer_rtx
+ || reg == arg_pointer_rtx)
+ {
+ ok = true;
+ }
+ }
+ else if (frame_pointer_needed
+ && reg == frame_pointer_rtx)
+ {
+ ok = true;
+ }
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (avr_log.legitimate_address_p)
+ {
+ avr_edump ("\n%?: ret=%d, mode=%m strict=%d "
+ "reload_completed=%d reload_in_progress=%d %s:",
+ ok, mode, strict, reload_completed, reload_in_progress,
+ reg_renumber ? "(reg_renumber)" : "");
+
+ if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1))
+ && IN_RANGE (INTVAL (XEXP (x, 1)), 0, MAX_LD_OFFSET (mode))
+ && reg_renumber)
+ {
+ avr_edump ("(r%d ---> r%d)", REGNO (XEXP (x, 0)),
+ true_regnum (XEXP (x, 0)));
+ }
+
+ avr_edump ("\n%r\n", x);
+ }
+
+ return ok;
+}
+
+
+/* Former implementation of TARGET_LEGITIMIZE_ADDRESS,
+ now only a helper for avr_addr_space_legitimize_address. */
+/* Attempts to replace X with a valid
+ memory address for an operand of mode MODE */
+
+static rtx
+avr_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
+{
+ bool big_offset_p = false;
+
+ x = oldx;
+
+ if (GET_CODE (oldx) == PLUS
+ && REG_P (XEXP (oldx, 0)))
+ {
+ if (REG_P (XEXP (oldx, 1)))
+ x = force_reg (GET_MODE (oldx), oldx);
+ else if (CONST_INT_P (XEXP (oldx, 1)))
+ {
+ int offs = INTVAL (XEXP (oldx, 1));
+ if (frame_pointer_rtx != XEXP (oldx, 0)
+ && offs > MAX_LD_OFFSET (mode))
+ {
+ big_offset_p = true;
+ x = force_reg (GET_MODE (oldx), oldx);
+ }
+ }
+ }
+
+ if (avr_log.legitimize_address)
+ {
+ avr_edump ("\n%?: mode=%m\n %r\n", mode, oldx);
+
+ if (x != oldx)
+ avr_edump (" %s --> %r\n", big_offset_p ? "(big offset)" : "", x);
+ }
+
+ return x;
+}
+
+
+/* Implement `LEGITIMIZE_RELOAD_ADDRESS'. */
+/* This will allow register R26/27 to be used where it is no worse than normal
+ base pointers R28/29 or R30/31. For example, if base offset is greater
+ than 63 bytes or for R++ or --R addressing. */
+
+rtx
+avr_legitimize_reload_address (rtx *px, enum machine_mode mode,
+ int opnum, int type, int addr_type,
+ int ind_levels ATTRIBUTE_UNUSED,
+ rtx (*mk_memloc)(rtx,int))
+{
+ rtx x = *px;
+
+ if (avr_log.legitimize_reload_address)
+ avr_edump ("\n%?:%m %r\n", mode, x);
+
+ if (1 && (GET_CODE (x) == POST_INC
+ || GET_CODE (x) == PRE_DEC))
+ {
+ push_reload (XEXP (x, 0), XEXP (x, 0), &XEXP (x, 0), &XEXP (x, 0),
+ POINTER_REGS, GET_MODE (x), GET_MODE (x), 0, 0,
+ opnum, RELOAD_OTHER);
+
+ if (avr_log.legitimize_reload_address)
+ avr_edump (" RCLASS.1 = %R\n IN = %r\n OUT = %r\n",
+ POINTER_REGS, XEXP (x, 0), XEXP (x, 0));
+
+ return x;
+ }
+
+ if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && 0 == reg_equiv_constant (REGNO (XEXP (x, 0)))
+ && CONST_INT_P (XEXP (x, 1))
+ && INTVAL (XEXP (x, 1)) >= 1)
+ {
+ bool fit = INTVAL (XEXP (x, 1)) <= MAX_LD_OFFSET (mode);
+
+ if (fit)
+ {
+ if (reg_equiv_address (REGNO (XEXP (x, 0))) != 0)
+ {
+ int regno = REGNO (XEXP (x, 0));
+ rtx mem = mk_memloc (x, regno);
+
+ push_reload (XEXP (mem, 0), NULL_RTX, &XEXP (mem, 0), NULL,
+ POINTER_REGS, Pmode, VOIDmode, 0, 0,
+ 1, (enum reload_type) addr_type);
+
+ if (avr_log.legitimize_reload_address)
+ avr_edump (" RCLASS.2 = %R\n IN = %r\n OUT = %r\n",
+ POINTER_REGS, XEXP (mem, 0), NULL_RTX);
+
+ push_reload (mem, NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_POINTER_REGS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+
+ if (avr_log.legitimize_reload_address)
+ avr_edump (" RCLASS.2 = %R\n IN = %r\n OUT = %r\n",
+ BASE_POINTER_REGS, mem, NULL_RTX);
+
+ return x;
+ }
+ }
+ else if (! (frame_pointer_needed
+ && XEXP (x, 0) == frame_pointer_rtx))
+ {
+ push_reload (x, NULL_RTX, px, NULL,
+ POINTER_REGS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type) type);
+
+ if (avr_log.legitimize_reload_address)
+ avr_edump (" RCLASS.3 = %R\n IN = %r\n OUT = %r\n",
+ POINTER_REGS, x, NULL_RTX);
+
+ return x;
+ }
+ }
+
+ return NULL_RTX;
+}
+
+
+/* Implement `TARGET_SECONDARY_RELOAD' */
+
+static reg_class_t
+avr_secondary_reload (bool in_p, rtx x,
+ reg_class_t reload_class ATTRIBUTE_UNUSED,
+ enum machine_mode mode, secondary_reload_info *sri)
+{
+ if (in_p
+ && MEM_P (x)
+ && !ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))
+ && ADDR_SPACE_MEMX != MEM_ADDR_SPACE (x))
+ {
+ /* For the non-generic 16-bit spaces we need a d-class scratch. */
+
+ switch (mode)
+ {
+ default:
+ gcc_unreachable();
+
+ case QImode: sri->icode = CODE_FOR_reload_inqi; break;
+ case QQmode: sri->icode = CODE_FOR_reload_inqq; break;
+ case UQQmode: sri->icode = CODE_FOR_reload_inuqq; break;
+
+ case HImode: sri->icode = CODE_FOR_reload_inhi; break;
+ case HQmode: sri->icode = CODE_FOR_reload_inhq; break;
+ case HAmode: sri->icode = CODE_FOR_reload_inha; break;
+ case UHQmode: sri->icode = CODE_FOR_reload_inuhq; break;
+ case UHAmode: sri->icode = CODE_FOR_reload_inuha; break;
+
+ case PSImode: sri->icode = CODE_FOR_reload_inpsi; break;
+
+ case SImode: sri->icode = CODE_FOR_reload_insi; break;
+ case SFmode: sri->icode = CODE_FOR_reload_insf; break;
+ case SQmode: sri->icode = CODE_FOR_reload_insq; break;
+ case SAmode: sri->icode = CODE_FOR_reload_insa; break;
+ case USQmode: sri->icode = CODE_FOR_reload_inusq; break;
+ case USAmode: sri->icode = CODE_FOR_reload_inusa; break;
+ }
+ }
+
+ return NO_REGS;
+}
+
+
+/* Helper function to print assembler resp. track instruction
+ sequence lengths. Always return "".
+
+ If PLEN == NULL:
+ Output assembler code from template TPL with operands supplied
+ by OPERANDS. This is just forwarding to output_asm_insn.
+
+ If PLEN != NULL:
+ If N_WORDS >= 0 Add N_WORDS to *PLEN.
+ If N_WORDS < 0 Set *PLEN to -N_WORDS.
+ Don't output anything.
+*/
+
+static const char*
+avr_asm_len (const char* tpl, rtx* operands, int* plen, int n_words)
+{
+ if (NULL == plen)
+ {
+ output_asm_insn (tpl, operands);
+ }
+ else
+ {
+ if (n_words < 0)
+ *plen = -n_words;
+ else
+ *plen += n_words;
+ }
+
+ return "";
+}
+
+
+/* Return a pointer register name as a string. */
+
+static const char*
+ptrreg_to_str (int regno)
+{
+ switch (regno)
+ {
+ case REG_X: return "X";
+ case REG_Y: return "Y";
+ case REG_Z: return "Z";
+ default:
+ output_operand_lossage ("address operand requires constraint for"
+ " X, Y, or Z register");
+ }
+ return NULL;
+}
+
+/* Return the condition name as a string.
+ Used in conditional jump constructing */
+
+static const char*
+cond_string (enum rtx_code code)
+{
+ switch (code)
+ {
+ case NE:
+ return "ne";
+ case EQ:
+ return "eq";
+ case GE:
+ if (cc_prev_status.flags & CC_OVERFLOW_UNUSABLE)
+ return "pl";
+ else
+ return "ge";
+ case LT:
+ if (cc_prev_status.flags & CC_OVERFLOW_UNUSABLE)
+ return "mi";
+ else
+ return "lt";
+ case GEU:
+ return "sh";
+ case LTU:
+ return "lo";
+ default:
+ gcc_unreachable ();
+ }
+
+ return "";
+}
+
+
+/* Implement `TARGET_PRINT_OPERAND_ADDRESS'. */
+/* Output ADDR to FILE as address. */
+
+static void
+avr_print_operand_address (FILE *file, rtx addr)
+{
+ switch (GET_CODE (addr))
+ {
+ case REG:
+ fprintf (file, ptrreg_to_str (REGNO (addr)));
+ break;
+
+ case PRE_DEC:
+ fprintf (file, "-%s", ptrreg_to_str (REGNO (XEXP (addr, 0))));
+ break;
+
+ case POST_INC:
+ fprintf (file, "%s+", ptrreg_to_str (REGNO (XEXP (addr, 0))));
+ break;
+
+ default:
+ if (CONSTANT_ADDRESS_P (addr)
+ && text_segment_operand (addr, VOIDmode))
+ {
+ rtx x = addr;
+ if (GET_CODE (x) == CONST)
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x,1)) == CONST_INT)
+ {
+ /* Assembler gs() will implant word address. Make offset
+ a byte offset inside gs() for assembler. This is
+ needed because the more logical (constant+gs(sym)) is not
+ accepted by gas. For 128K and smaller devices this is ok.
+ For large devices it will create a trampoline to offset
+ from symbol which may not be what the user really wanted. */
+
+ fprintf (file, "gs(");
+ output_addr_const (file, XEXP (x,0));
+ fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC ")",
+ 2 * INTVAL (XEXP (x, 1)));
+ if (AVR_3_BYTE_PC)
+ if (warning (0, "pointer offset from symbol maybe incorrect"))
+ {
+ output_addr_const (stderr, addr);
+ fprintf(stderr,"\n");
+ }
+ }
+ else
+ {
+ fprintf (file, "gs(");
+ output_addr_const (file, addr);
+ fprintf (file, ")");
+ }
+ }
+ else
+ output_addr_const (file, addr);
+ }
+}
+
+
+/* Implement `TARGET_PRINT_OPERAND_PUNCT_VALID_P'. */
+
+static bool
+avr_print_operand_punct_valid_p (unsigned char code)
+{
+ return code == '~' || code == '!';
+}
+
+
+/* Implement `TARGET_PRINT_OPERAND'. */
+/* Output X as assembler operand to file FILE.
+ For a description of supported %-codes, see top of avr.md. */
+
+static void
+avr_print_operand (FILE *file, rtx x, int code)
+{
+ int abcd = 0;
+
+ if (code >= 'A' && code <= 'D')
+ abcd = code - 'A';
+
+ if (code == '~')
+ {
+ if (!AVR_HAVE_JMP_CALL)
+ fputc ('r', file);
+ }
+ else if (code == '!')
+ {
+ if (AVR_HAVE_EIJMP_EICALL)
+ fputc ('e', file);
+ }
+ else if (code == 't'
+ || code == 'T')
+ {
+ static int t_regno = -1;
+ static int t_nbits = -1;
+
+ if (REG_P (x) && t_regno < 0 && code == 'T')
+ {
+ t_regno = REGNO (x);
+ t_nbits = GET_MODE_BITSIZE (GET_MODE (x));
+ }
+ else if (CONST_INT_P (x) && t_regno >= 0
+ && IN_RANGE (INTVAL (x), 0, t_nbits - 1))
+ {
+ int bpos = INTVAL (x);
+
+ fprintf (file, "%s", reg_names[t_regno + bpos / 8]);
+ if (code == 'T')
+ fprintf (file, ",%d", bpos % 8);
+
+ t_regno = -1;
+ }
+ else
+ fatal_insn ("operands to %T/%t must be reg + const_int:", x);
+ }
+ else if (REG_P (x))
+ {
+ if (x == zero_reg_rtx)
+ fprintf (file, "__zero_reg__");
+ else if (code == 'r' && REGNO (x) < 32)
+ fprintf (file, "%d", (int) REGNO (x));
+ else
+ fprintf (file, reg_names[REGNO (x) + abcd]);
+ }
+ else if (CONST_INT_P (x))
+ {
+ HOST_WIDE_INT ival = INTVAL (x);
+
+ if ('i' != code)
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival + abcd);
+ else if (low_io_address_operand (x, VOIDmode)
+ || high_io_address_operand (x, VOIDmode))
+ {
+ if (AVR_HAVE_RAMPZ && ival == avr_addr.rampz)
+ fprintf (file, "__RAMPZ__");
+ else if (AVR_HAVE_RAMPY && ival == avr_addr.rampy)
+ fprintf (file, "__RAMPY__");
+ else if (AVR_HAVE_RAMPX && ival == avr_addr.rampx)
+ fprintf (file, "__RAMPX__");
+ else if (AVR_HAVE_RAMPD && ival == avr_addr.rampd)
+ fprintf (file, "__RAMPD__");
+ else if (AVR_XMEGA && ival == avr_addr.ccp)
+ fprintf (file, "__CCP__");
+ else if (ival == avr_addr.sreg) fprintf (file, "__SREG__");
+ else if (ival == avr_addr.sp_l) fprintf (file, "__SP_L__");
+ else if (ival == avr_addr.sp_h) fprintf (file, "__SP_H__");
+ else
+ {
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ ival - avr_current_arch->sfr_offset);
+ }
+ }
+ else
+ fatal_insn ("bad address, not an I/O address:", x);
+ }
+ else if (MEM_P (x))
+ {
+ rtx addr = XEXP (x, 0);
+
+ if (code == 'm')
+ {
+ if (!CONSTANT_P (addr))
+ fatal_insn ("bad address, not a constant:", addr);
+ /* Assembler template with m-code is data - not progmem section */
+ if (text_segment_operand (addr, VOIDmode))
+ if (warning (0, "accessing data memory with"
+ " program memory address"))
+ {
+ output_addr_const (stderr, addr);
+ fprintf(stderr,"\n");
+ }
+ output_addr_const (file, addr);
+ }
+ else if (code == 'i')
+ {
+ avr_print_operand (file, addr, 'i');
+ }
+ else if (code == 'o')
+ {
+ if (GET_CODE (addr) != PLUS)
+ fatal_insn ("bad address, not (reg+disp):", addr);
+
+ avr_print_operand (file, XEXP (addr, 1), 0);
+ }
+ else if (code == 'p' || code == 'r')
+ {
+ if (GET_CODE (addr) != POST_INC && GET_CODE (addr) != PRE_DEC)
+ fatal_insn ("bad address, not post_inc or pre_dec:", addr);
+
+ if (code == 'p')
+ avr_print_operand_address (file, XEXP (addr, 0)); /* X, Y, Z */
+ else
+ avr_print_operand (file, XEXP (addr, 0), 0); /* r26, r28, r30 */
+ }
+ else if (GET_CODE (addr) == PLUS)
+ {
+ avr_print_operand_address (file, XEXP (addr,0));
+ if (REGNO (XEXP (addr, 0)) == REG_X)
+ fatal_insn ("internal compiler error. Bad address:"
+ ,addr);
+ fputc ('+', file);
+ avr_print_operand (file, XEXP (addr,1), code);
+ }
+ else
+ avr_print_operand_address (file, addr);
+ }
+ else if (code == 'i')
+ {
+ fatal_insn ("bad address, not an I/O address:", x);
+ }
+ else if (code == 'x')
+ {
+ /* Constant progmem address - like used in jmp or call */
+ if (0 == text_segment_operand (x, VOIDmode))
+ if (warning (0, "accessing program memory"
+ " with data memory address"))
+ {
+ output_addr_const (stderr, x);
+ fprintf(stderr,"\n");
+ }
+ /* Use normal symbol for direct address no linker trampoline needed */
+ output_addr_const (file, x);
+ }
+ else if (CONST_FIXED_P (x))
+ {
+ HOST_WIDE_INT ival = INTVAL (avr_to_int_mode (x));
+ if (code != 0)
+ output_operand_lossage ("Unsupported code '%c' for fixed-point:",
+ code);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ival);
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ long val;
+ REAL_VALUE_TYPE rv;
+ if (GET_MODE (x) != SFmode)
+ fatal_insn ("internal compiler error. Unknown mode:", x);
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, val);
+ fprintf (file, "0x%lx", val);
+ }
+ else if (GET_CODE (x) == CONST_STRING)
+ fputs (XSTR (x, 0), file);
+ else if (code == 'j')
+ fputs (cond_string (GET_CODE (x)), file);
+ else if (code == 'k')
+ fputs (cond_string (reverse_condition (GET_CODE (x))), file);
+ else
+ avr_print_operand_address (file, x);
+}
+
+
+/* Worker function for `NOTICE_UPDATE_CC'. */
+/* Update the condition code in the INSN. */
+
+void
+avr_notice_update_cc (rtx body ATTRIBUTE_UNUSED, rtx insn)
+{
+ rtx set;
+ enum attr_cc cc = get_attr_cc (insn);
+
+ switch (cc)
+ {
+ default:
+ break;
+
+ case CC_PLUS:
+ case CC_LDI:
+ {
+ rtx *op = recog_data.operand;
+ int len_dummy, icc;
+
+ /* Extract insn's operands. */
+ extract_constrain_insn_cached (insn);
+
+ switch (cc)
+ {
+ default:
+ gcc_unreachable();
+
+ case CC_PLUS:
+ avr_out_plus (insn, op, &len_dummy, &icc);
+ cc = (enum attr_cc) icc;
+ break;
+
+ case CC_LDI:
+
+ cc = (op[1] == CONST0_RTX (GET_MODE (op[0]))
+ && reg_overlap_mentioned_p (op[0], zero_reg_rtx))
+ /* Loading zero-reg with 0 uses CLR and thus clobbers cc0. */
+ ? CC_CLOBBER
+ /* Any other "r,rL" combination does not alter cc0. */
+ : CC_NONE;
+
+ break;
+ } /* inner switch */
+
+ break;
+ }
+ } /* outer swicth */
+
+ switch (cc)
+ {
+ default:
+ /* Special values like CC_OUT_PLUS from above have been
+ mapped to "standard" CC_* values so we never come here. */
+
+ gcc_unreachable();
+ break;
+
+ case CC_NONE:
+ /* Insn does not affect CC at all. */
+ break;
+
+ case CC_SET_N:
+ CC_STATUS_INIT;
+ break;
+
+ case CC_SET_ZN:
+ set = single_set (insn);
+ CC_STATUS_INIT;
+ if (set)
+ {
+ cc_status.flags |= CC_NO_OVERFLOW;
+ cc_status.value1 = SET_DEST (set);
+ }
+ break;
+
+ case CC_SET_CZN:
+ /* Insn sets the Z,N,C flags of CC to recog_operand[0].
+ The V flag may or may not be known but that's ok because
+ alter_cond will change tests to use EQ/NE. */
+ set = single_set (insn);
+ CC_STATUS_INIT;
+ if (set)
+ {
+ cc_status.value1 = SET_DEST (set);
+ cc_status.flags |= CC_OVERFLOW_UNUSABLE;
+ }
+ break;
+
+ case CC_COMPARE:
+ set = single_set (insn);
+ CC_STATUS_INIT;
+ if (set)
+ cc_status.value1 = SET_SRC (set);
+ break;
+
+ case CC_CLOBBER:
+ /* Insn doesn't leave CC in a usable state. */
+ CC_STATUS_INIT;
+ break;
+ }
+}
+
+/* Choose mode for jump insn:
+ 1 - relative jump in range -63 <= x <= 62 ;
+ 2 - relative jump in range -2046 <= x <= 2045 ;
+ 3 - absolute jump (only for ATmega[16]03). */
+
+int
+avr_jump_mode (rtx x, rtx insn)
+{
+ int dest_addr = INSN_ADDRESSES (INSN_UID (GET_CODE (x) == LABEL_REF
+ ? XEXP (x, 0) : x));
+ int cur_addr = INSN_ADDRESSES (INSN_UID (insn));
+ int jump_distance = cur_addr - dest_addr;
+
+ if (-63 <= jump_distance && jump_distance <= 62)
+ return 1;
+ else if (-2046 <= jump_distance && jump_distance <= 2045)
+ return 2;
+ else if (AVR_HAVE_JMP_CALL)
+ return 3;
+
+ return 2;
+}
+
+/* Return an AVR condition jump commands.
+ X is a comparison RTX.
+ LEN is a number returned by avr_jump_mode function.
+ If REVERSE nonzero then condition code in X must be reversed. */
+
+const char*
+ret_cond_branch (rtx x, int len, int reverse)
+{
+ RTX_CODE cond = reverse ? reverse_condition (GET_CODE (x)) : GET_CODE (x);
+
+ switch (cond)
+ {
+ case GT:
+ if (cc_prev_status.flags & CC_OVERFLOW_UNUSABLE)
+ return (len == 1 ? ("breq .+2" CR_TAB
+ "brpl %0") :
+ len == 2 ? ("breq .+4" CR_TAB
+ "brmi .+2" CR_TAB
+ "rjmp %0") :
+ ("breq .+6" CR_TAB
+ "brmi .+4" CR_TAB
+ "jmp %0"));
+
+ else
+ return (len == 1 ? ("breq .+2" CR_TAB
+ "brge %0") :
+ len == 2 ? ("breq .+4" CR_TAB
+ "brlt .+2" CR_TAB
+ "rjmp %0") :
+ ("breq .+6" CR_TAB
+ "brlt .+4" CR_TAB
+ "jmp %0"));
+ case GTU:
+ return (len == 1 ? ("breq .+2" CR_TAB
+ "brsh %0") :
+ len == 2 ? ("breq .+4" CR_TAB
+ "brlo .+2" CR_TAB
+ "rjmp %0") :
+ ("breq .+6" CR_TAB
+ "brlo .+4" CR_TAB
+ "jmp %0"));
+ case LE:
+ if (cc_prev_status.flags & CC_OVERFLOW_UNUSABLE)
+ return (len == 1 ? ("breq %0" CR_TAB
+ "brmi %0") :
+ len == 2 ? ("breq .+2" CR_TAB
+ "brpl .+2" CR_TAB
+ "rjmp %0") :
+ ("breq .+2" CR_TAB
+ "brpl .+4" CR_TAB
+ "jmp %0"));
+ else
+ return (len == 1 ? ("breq %0" CR_TAB
+ "brlt %0") :
+ len == 2 ? ("breq .+2" CR_TAB
+ "brge .+2" CR_TAB
+ "rjmp %0") :
+ ("breq .+2" CR_TAB
+ "brge .+4" CR_TAB
+ "jmp %0"));
+ case LEU:
+ return (len == 1 ? ("breq %0" CR_TAB
+ "brlo %0") :
+ len == 2 ? ("breq .+2" CR_TAB
+ "brsh .+2" CR_TAB
+ "rjmp %0") :
+ ("breq .+2" CR_TAB
+ "brsh .+4" CR_TAB
+ "jmp %0"));
+ default:
+ if (reverse)
+ {
+ switch (len)
+ {
+ case 1:
+ return "br%k1 %0";
+ case 2:
+ return ("br%j1 .+2" CR_TAB
+ "rjmp %0");
+ default:
+ return ("br%j1 .+4" CR_TAB
+ "jmp %0");
+ }
+ }
+ else
+ {
+ switch (len)
+ {
+ case 1:
+ return "br%j1 %0";
+ case 2:
+ return ("br%k1 .+2" CR_TAB
+ "rjmp %0");
+ default:
+ return ("br%k1 .+4" CR_TAB
+ "jmp %0");
+ }
+ }
+ }
+ return "";
+}
+
+
+/* Worker function for `FINAL_PRESCAN_INSN'. */
+/* Output insn cost for next insn. */
+
+void
+avr_final_prescan_insn (rtx insn, rtx *operand ATTRIBUTE_UNUSED,
+ int num_operands ATTRIBUTE_UNUSED)
+{
+ if (avr_log.rtx_costs)
+ {
+ rtx set = single_set (insn);
+
+ if (set)
+ fprintf (asm_out_file, "/* DEBUG: cost = %d. */\n",
+ set_src_cost (SET_SRC (set), optimize_insn_for_speed_p ()));
+ else
+ fprintf (asm_out_file, "/* DEBUG: pattern-cost = %d. */\n",
+ rtx_cost (PATTERN (insn), INSN, 0,
+ optimize_insn_for_speed_p()));
+ }
+}
+
+/* Return 0 if undefined, 1 if always true or always false. */
+
+int
+avr_simplify_comparison_p (enum machine_mode mode, RTX_CODE op, rtx x)
+{
+ unsigned int max = (mode == QImode ? 0xff :
+ mode == HImode ? 0xffff :
+ mode == PSImode ? 0xffffff :
+ mode == SImode ? 0xffffffff : 0);
+ if (max && op && CONST_INT_P (x))
+ {
+ if (unsigned_condition (op) != op)
+ max >>= 1;
+
+ if (max != (INTVAL (x) & max)
+ && INTVAL (x) != 0xff)
+ return 1;
+ }
+ return 0;
+}
+
+
+/* Worker function for `FUNCTION_ARG_REGNO_P'. */
+/* Returns nonzero if REGNO is the number of a hard
+ register in which function arguments are sometimes passed. */
+
+int
+avr_function_arg_regno_p(int r)
+{
+ return (r >= 8 && r <= 25);
+}
+
+
+/* Worker function for `INIT_CUMULATIVE_ARGS'. */
+/* Initializing the variable cum for the state at the beginning
+ of the argument list. */
+
+void
+avr_init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype, rtx libname,
+ tree fndecl ATTRIBUTE_UNUSED)
+{
+ cum->nregs = 18;
+ cum->regno = FIRST_CUM_REG;
+ if (!libname && stdarg_p (fntype))
+ cum->nregs = 0;
+
+ /* Assume the calle may be tail called */
+
+ cfun->machine->sibcall_fails = 0;
+}
+
+/* Returns the number of registers to allocate for a function argument. */
+
+static int
+avr_num_arg_regs (enum machine_mode mode, const_tree type)
+{
+ int size;
+
+ if (mode == BLKmode)
+ size = int_size_in_bytes (type);
+ else
+ size = GET_MODE_SIZE (mode);
+
+ /* Align all function arguments to start in even-numbered registers.
+ Odd-sized arguments leave holes above them. */
+
+ return (size + 1) & ~1;
+}
+
+
+/* Implement `TARGET_FUNCTION_ARG'. */
+/* Controls whether a function argument is passed
+ in a register, and which register. */
+
+static rtx
+avr_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ int bytes = avr_num_arg_regs (mode, type);
+
+ if (cum->nregs && bytes <= cum->nregs)
+ return gen_rtx_REG (mode, cum->regno - bytes);
+
+ return NULL_RTX;
+}
+
+
+/* Implement `TARGET_FUNCTION_ARG_ADVANCE'. */
+/* Update the summarizer variable CUM to advance past an argument
+ in the argument list. */
+
+static void
+avr_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ int bytes = avr_num_arg_regs (mode, type);
+
+ cum->nregs -= bytes;
+ cum->regno -= bytes;
+
+ /* A parameter is being passed in a call-saved register. As the original
+ contents of these regs has to be restored before leaving the function,
+ a function must not pass arguments in call-saved regs in order to get
+ tail-called. */
+
+ if (cum->regno >= 8
+ && cum->nregs >= 0
+ && !call_used_regs[cum->regno])
+ {
+ /* FIXME: We ship info on failing tail-call in struct machine_function.
+ This uses internals of calls.c:expand_call() and the way args_so_far
+ is used. targetm.function_ok_for_sibcall() needs to be extended to
+ pass &args_so_far, too. At present, CUMULATIVE_ARGS is target
+ dependent so that such an extension is not wanted. */
+
+ cfun->machine->sibcall_fails = 1;
+ }
+
+ /* Test if all registers needed by the ABI are actually available. If the
+ user has fixed a GPR needed to pass an argument, an (implicit) function
+ call will clobber that fixed register. See PR45099 for an example. */
+
+ if (cum->regno >= 8
+ && cum->nregs >= 0)
+ {
+ int regno;
+
+ for (regno = cum->regno; regno < cum->regno + bytes; regno++)
+ if (fixed_regs[regno])
+ warning (0, "fixed register %s used to pass parameter to function",
+ reg_names[regno]);
+ }
+
+ if (cum->nregs <= 0)
+ {
+ cum->nregs = 0;
+ cum->regno = FIRST_CUM_REG;
+ }
+}
+
+/* Implement `TARGET_FUNCTION_OK_FOR_SIBCALL' */
+/* Decide whether we can make a sibling call to a function. DECL is the
+ declaration of the function being targeted by the call and EXP is the
+ CALL_EXPR representing the call. */
+
+static bool
+avr_function_ok_for_sibcall (tree decl_callee, tree exp_callee)
+{
+ tree fntype_callee;
+
+ /* Tail-calling must fail if callee-saved regs are used to pass
+ function args. We must not tail-call when `epilogue_restores'
+ is used. Unfortunately, we cannot tell at this point if that
+ actually will happen or not, and we cannot step back from
+ tail-calling. Thus, we inhibit tail-calling with -mcall-prologues. */
+
+ if (cfun->machine->sibcall_fails
+ || TARGET_CALL_PROLOGUES)
+ {
+ return false;
+ }
+
+ fntype_callee = TREE_TYPE (CALL_EXPR_FN (exp_callee));
+
+ if (decl_callee)
+ {
+ decl_callee = TREE_TYPE (decl_callee);
+ }
+ else
+ {
+ decl_callee = fntype_callee;
+
+ while (FUNCTION_TYPE != TREE_CODE (decl_callee)
+ && METHOD_TYPE != TREE_CODE (decl_callee))
+ {
+ decl_callee = TREE_TYPE (decl_callee);
+ }
+ }
+
+ /* Ensure that caller and callee have compatible epilogues */
+
+ if (cfun->machine->is_interrupt
+ || cfun->machine->is_signal
+ || cfun->machine->is_naked
+ || avr_naked_function_p (decl_callee)
+ /* FIXME: For OS_task and OS_main, this might be over-conservative. */
+ || (avr_OS_task_function_p (decl_callee)
+ != cfun->machine->is_OS_task)
+ || (avr_OS_main_function_p (decl_callee)
+ != cfun->machine->is_OS_main))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+/***********************************************************************
+ Functions for outputting various mov's for a various modes
+************************************************************************/
+
+/* Return true if a value of mode MODE is read from flash by
+ __load_* function from libgcc. */
+
+bool
+avr_load_libgcc_p (rtx op)
+{
+ enum machine_mode mode = GET_MODE (op);
+ int n_bytes = GET_MODE_SIZE (mode);
+
+ return (n_bytes > 2
+ && !AVR_HAVE_LPMX
+ && avr_mem_flash_p (op));
+}
+
+/* Return true if a value of mode MODE is read by __xload_* function. */
+
+bool
+avr_xload_libgcc_p (enum machine_mode mode)
+{
+ int n_bytes = GET_MODE_SIZE (mode);
+
+ return (n_bytes > 1
+ || avr_current_device->n_flash > 1);
+}
+
+
+/* Fixme: This is a hack because secondary reloads don't works as expected.
+
+ Find an unused d-register to be used as scratch in INSN.
+ EXCLUDE is either NULL_RTX or some register. In the case where EXCLUDE
+ is a register, skip all possible return values that overlap EXCLUDE.
+ The policy for the returned register is similar to that of
+ `reg_unused_after', i.e. the returned register may overlap the SET_DEST
+ of INSN.
+
+ Return a QImode d-register or NULL_RTX if nothing found. */
+
+static rtx
+avr_find_unused_d_reg (rtx insn, rtx exclude)
+{
+ int regno;
+ bool isr_p = (avr_interrupt_function_p (current_function_decl)
+ || avr_signal_function_p (current_function_decl));
+
+ for (regno = 16; regno < 32; regno++)
+ {
+ rtx reg = all_regs_rtx[regno];
+
+ if ((exclude
+ && reg_overlap_mentioned_p (exclude, reg))
+ || fixed_regs[regno])
+ {
+ continue;
+ }
+
+ /* Try non-live register */
+
+ if (!df_regs_ever_live_p (regno)
+ && (TREE_THIS_VOLATILE (current_function_decl)
+ || cfun->machine->is_OS_task
+ || cfun->machine->is_OS_main
+ || (!isr_p && call_used_regs[regno])))
+ {
+ return reg;
+ }
+
+ /* Any live register can be used if it is unused after.
+ Prologue/epilogue will care for it as needed. */
+
+ if (df_regs_ever_live_p (regno)
+ && reg_unused_after (insn, reg))
+ {
+ return reg;
+ }
+ }
+
+ return NULL_RTX;
+}
+
+
+/* Helper function for the next function in the case where only restricted
+ version of LPM instruction is available. */
+
+static const char*
+avr_out_lpm_no_lpmx (rtx insn, rtx *xop, int *plen)
+{
+ rtx dest = xop[0];
+ rtx addr = xop[1];
+ int n_bytes = GET_MODE_SIZE (GET_MODE (dest));
+ int regno_dest;
+
+ regno_dest = REGNO (dest);
+
+ /* The implicit target register of LPM. */
+ xop[3] = lpm_reg_rtx;
+
+ switch (GET_CODE (addr))
+ {
+ default:
+ gcc_unreachable();
+
+ case REG:
+
+ gcc_assert (REG_Z == REGNO (addr));
+
+ switch (n_bytes)
+ {
+ default:
+ gcc_unreachable();
+
+ case 1:
+ avr_asm_len ("%4lpm", xop, plen, 1);
+
+ if (regno_dest != LPM_REGNO)
+ avr_asm_len ("mov %0,%3", xop, plen, 1);
+
+ return "";
+
+ case 2:
+ if (REGNO (dest) == REG_Z)
+ return avr_asm_len ("%4lpm" CR_TAB
+ "push %3" CR_TAB
+ "adiw %2,1" CR_TAB
+ "%4lpm" CR_TAB
+ "mov %B0,%3" CR_TAB
+ "pop %A0", xop, plen, 6);
+
+ avr_asm_len ("%4lpm" CR_TAB
+ "mov %A0,%3" CR_TAB
+ "adiw %2,1" CR_TAB
+ "%4lpm" CR_TAB
+ "mov %B0,%3", xop, plen, 5);
+
+ if (!reg_unused_after (insn, addr))
+ avr_asm_len ("sbiw %2,1", xop, plen, 1);
+
+ break; /* 2 */
+ }
+
+ break; /* REG */
+
+ case POST_INC:
+
+ gcc_assert (REG_Z == REGNO (XEXP (addr, 0))
+ && n_bytes <= 4);
+
+ if (regno_dest == LPM_REGNO)
+ avr_asm_len ("%4lpm" CR_TAB
+ "adiw %2,1", xop, plen, 2);
+ else
+ avr_asm_len ("%4lpm" CR_TAB
+ "mov %A0,%3" CR_TAB
+ "adiw %2,1", xop, plen, 3);
+
+ if (n_bytes >= 2)
+ avr_asm_len ("%4lpm" CR_TAB
+ "mov %B0,%3" CR_TAB
+ "adiw %2,1", xop, plen, 3);
+
+ if (n_bytes >= 3)
+ avr_asm_len ("%4lpm" CR_TAB
+ "mov %C0,%3" CR_TAB
+ "adiw %2,1", xop, plen, 3);
+
+ if (n_bytes >= 4)
+ avr_asm_len ("%4lpm" CR_TAB
+ "mov %D0,%3" CR_TAB
+ "adiw %2,1", xop, plen, 3);
+
+ break; /* POST_INC */
+
+ } /* switch CODE (addr) */
+
+ return "";
+}
+
+
+/* If PLEN == NULL: Ouput instructions to load a value from a memory location
+ OP[1] in AS1 to register OP[0].
+ If PLEN != 0 set *PLEN to the length in words of the instruction sequence.
+ Return "". */
+
+const char*
+avr_out_lpm (rtx insn, rtx *op, int *plen)
+{
+ rtx xop[7];
+ rtx dest = op[0];
+ rtx src = SET_SRC (single_set (insn));
+ rtx addr;
+ int n_bytes = GET_MODE_SIZE (GET_MODE (dest));
+ int segment;
+ RTX_CODE code;
+ addr_space_t as = MEM_ADDR_SPACE (src);
+
+ if (plen)
+ *plen = 0;
+
+ if (MEM_P (dest))
+ {
+ warning (0, "writing to address space %qs not supported",
+ avr_addrspace[MEM_ADDR_SPACE (dest)].name);
+
+ return "";
+ }
+
+ addr = XEXP (src, 0);
+ code = GET_CODE (addr);
+
+ gcc_assert (REG_P (dest));
+ gcc_assert (REG == code || POST_INC == code);
+
+ xop[0] = dest;
+ xop[1] = addr;
+ xop[2] = lpm_addr_reg_rtx;
+ xop[4] = xstring_empty;
+ xop[5] = tmp_reg_rtx;
+ xop[6] = XEXP (rampz_rtx, 0);
+
+ segment = avr_addrspace[as].segment;
+
+ /* Set RAMPZ as needed. */
+
+ if (segment)
+ {
+ xop[4] = GEN_INT (segment);
+ xop[3] = avr_find_unused_d_reg (insn, lpm_addr_reg_rtx);
+
+ if (xop[3] != NULL_RTX)
+ {
+ avr_asm_len ("ldi %3,%4" CR_TAB
+ "out %i6,%3", xop, plen, 2);
+ }
+ else if (segment == 1)
+ {
+ avr_asm_len ("clr %5" CR_TAB
+ "inc %5" CR_TAB
+ "out %i6,%5", xop, plen, 3);
+ }
+ else
+ {
+ avr_asm_len ("mov %5,%2" CR_TAB
+ "ldi %2,%4" CR_TAB
+ "out %i6,%2" CR_TAB
+ "mov %2,%5", xop, plen, 4);
+ }
+
+ xop[4] = xstring_e;
+
+ if (!AVR_HAVE_ELPMX)
+ return avr_out_lpm_no_lpmx (insn, xop, plen);
+ }
+ else if (!AVR_HAVE_LPMX)
+ {
+ return avr_out_lpm_no_lpmx (insn, xop, plen);
+ }
+
+ /* We have [E]LPMX: Output reading from Flash the comfortable way. */
+
+ switch (GET_CODE (addr))
+ {
+ default:
+ gcc_unreachable();
+
+ case REG:
+
+ gcc_assert (REG_Z == REGNO (addr));
+
+ switch (n_bytes)
+ {
+ default:
+ gcc_unreachable();
+
+ case 1:
+ return avr_asm_len ("%4lpm %0,%a2", xop, plen, 1);
+
+ case 2:
+ if (REGNO (dest) == REG_Z)
+ return avr_asm_len ("%4lpm %5,%a2+" CR_TAB
+ "%4lpm %B0,%a2" CR_TAB
+ "mov %A0,%5", xop, plen, 3);
+ else
+ {
+ avr_asm_len ("%4lpm %A0,%a2+" CR_TAB
+ "%4lpm %B0,%a2", xop, plen, 2);
+
+ if (!reg_unused_after (insn, addr))
+ avr_asm_len ("sbiw %2,1", xop, plen, 1);
+ }
+
+ break; /* 2 */
+
+ case 3:
+
+ avr_asm_len ("%4lpm %A0,%a2+" CR_TAB
+ "%4lpm %B0,%a2+" CR_TAB
+ "%4lpm %C0,%a2", xop, plen, 3);
+
+ if (!reg_unused_after (insn, addr))
+ avr_asm_len ("sbiw %2,2", xop, plen, 1);
+
+ break; /* 3 */
+
+ case 4:
+
+ avr_asm_len ("%4lpm %A0,%a2+" CR_TAB
+ "%4lpm %B0,%a2+", xop, plen, 2);
+
+ if (REGNO (dest) == REG_Z - 2)
+ return avr_asm_len ("%4lpm %5,%a2+" CR_TAB
+ "%4lpm %C0,%a2" CR_TAB
+ "mov %D0,%5", xop, plen, 3);
+ else
+ {
+ avr_asm_len ("%4lpm %C0,%a2+" CR_TAB
+ "%4lpm %D0,%a2", xop, plen, 2);
+
+ if (!reg_unused_after (insn, addr))
+ avr_asm_len ("sbiw %2,3", xop, plen, 1);
+ }
+
+ break; /* 4 */
+ } /* n_bytes */
+
+ break; /* REG */
+
+ case POST_INC:
+
+ gcc_assert (REG_Z == REGNO (XEXP (addr, 0))
+ && n_bytes <= 4);
+
+ avr_asm_len ("%4lpm %A0,%a2+", xop, plen, 1);
+ if (n_bytes >= 2) avr_asm_len ("%4lpm %B0,%a2+", xop, plen, 1);
+ if (n_bytes >= 3) avr_asm_len ("%4lpm %C0,%a2+", xop, plen, 1);
+ if (n_bytes >= 4) avr_asm_len ("%4lpm %D0,%a2+", xop, plen, 1);
+
+ break; /* POST_INC */
+
+ } /* switch CODE (addr) */
+
+ if (xop[4] == xstring_e && AVR_HAVE_RAMPD)
+ {
+ /* Reset RAMPZ to 0 so that EBI devices don't read garbage from RAM. */
+
+ xop[0] = zero_reg_rtx;
+ avr_asm_len ("out %i6,%0", xop, plen, 1);
+ }
+
+ return "";
+}
+
+
+/* Worker function for xload_8 insn. */
+
+const char*
+avr_out_xload (rtx insn ATTRIBUTE_UNUSED, rtx *op, int *plen)
+{
+ rtx xop[4];
+
+ xop[0] = op[0];
+ xop[1] = op[1];
+ xop[2] = lpm_addr_reg_rtx;
+ xop[3] = AVR_HAVE_LPMX ? op[0] : lpm_reg_rtx;
+
+ avr_asm_len (AVR_HAVE_LPMX ? "lpm %3,%a2" : "lpm", xop, plen, -1);
+
+ avr_asm_len ("sbrc %1,7" CR_TAB
+ "ld %3,%a2", xop, plen, 2);
+
+ if (REGNO (xop[0]) != REGNO (xop[3]))
+ avr_asm_len ("mov %0,%3", xop, plen, 1);
+
+ return "";
+}
+
+
+const char*
+output_movqi (rtx insn, rtx operands[], int *plen)
+{
+ rtx dest = operands[0];
+ rtx src = operands[1];
+
+ if (avr_mem_flash_p (src)
+ || avr_mem_flash_p (dest))
+ {
+ return avr_out_lpm (insn, operands, plen);
+ }
+
+ gcc_assert (1 == GET_MODE_SIZE (GET_MODE (dest)));
+
+ if (REG_P (dest))
+ {
+ if (REG_P (src)) /* mov r,r */
+ {
+ if (test_hard_reg_class (STACK_REG, dest))
+ return avr_asm_len ("out %0,%1", operands, plen, -1);
+ else if (test_hard_reg_class (STACK_REG, src))
+ return avr_asm_len ("in %0,%1", operands, plen, -1);
+
+ return avr_asm_len ("mov %0,%1", operands, plen, -1);
+ }
+ else if (CONSTANT_P (src))
+ {
+ output_reload_in_const (operands, NULL_RTX, plen, false);
+ return "";
+ }
+ else if (MEM_P (src))
+ return out_movqi_r_mr (insn, operands, plen); /* mov r,m */
+ }
+ else if (MEM_P (dest))
+ {
+ rtx xop[2];
+
+ xop[0] = dest;
+ xop[1] = src == CONST0_RTX (GET_MODE (dest)) ? zero_reg_rtx : src;
+
+ return out_movqi_mr_r (insn, xop, plen);
+ }
+
+ return "";
+}
+
+
+const char *
+output_movhi (rtx insn, rtx xop[], int *plen)
+{
+ rtx dest = xop[0];
+ rtx src = xop[1];
+
+ gcc_assert (GET_MODE_SIZE (GET_MODE (dest)) == 2);
+
+ if (avr_mem_flash_p (src)
+ || avr_mem_flash_p (dest))
+ {
+ return avr_out_lpm (insn, xop, plen);
+ }
+
+ gcc_assert (2 == GET_MODE_SIZE (GET_MODE (dest)));
+
+ if (REG_P (dest))
+ {
+ if (REG_P (src)) /* mov r,r */
+ {
+ if (test_hard_reg_class (STACK_REG, dest))
+ {
+ if (AVR_HAVE_8BIT_SP)
+ return avr_asm_len ("out __SP_L__,%A1", xop, plen, -1);
+
+ if (AVR_XMEGA)
+ return avr_asm_len ("out __SP_L__,%A1" CR_TAB
+ "out __SP_H__,%B1", xop, plen, -2);
+
+ /* Use simple load of SP if no interrupts are used. */
+
+ return TARGET_NO_INTERRUPTS
+ ? avr_asm_len ("out __SP_H__,%B1" CR_TAB
+ "out __SP_L__,%A1", xop, plen, -2)
+ : avr_asm_len ("in __tmp_reg__,__SREG__" CR_TAB
+ "cli" CR_TAB
+ "out __SP_H__,%B1" CR_TAB
+ "out __SREG__,__tmp_reg__" CR_TAB
+ "out __SP_L__,%A1", xop, plen, -5);
+ }
+ else if (test_hard_reg_class (STACK_REG, src))
+ {
+ return !AVR_HAVE_SPH
+ ? avr_asm_len ("in %A0,__SP_L__" CR_TAB
+ "clr %B0", xop, plen, -2)
+
+ : avr_asm_len ("in %A0,__SP_L__" CR_TAB
+ "in %B0,__SP_H__", xop, plen, -2);
+ }
+
+ return AVR_HAVE_MOVW
+ ? avr_asm_len ("movw %0,%1", xop, plen, -1)
+
+ : avr_asm_len ("mov %A0,%A1" CR_TAB
+ "mov %B0,%B1", xop, plen, -2);
+ } /* REG_P (src) */
+ else if (CONSTANT_P (src))
+ {
+ return output_reload_inhi (xop, NULL, plen);
+ }
+ else if (MEM_P (src))
+ {
+ return out_movhi_r_mr (insn, xop, plen); /* mov r,m */
+ }
+ }
+ else if (MEM_P (dest))
+ {
+ rtx xop[2];
+
+ xop[0] = dest;
+ xop[1] = src == CONST0_RTX (GET_MODE (dest)) ? zero_reg_rtx : src;
+
+ return out_movhi_mr_r (insn, xop, plen);
+ }
+
+ fatal_insn ("invalid insn:", insn);
+
+ return "";
+}
+
+static const char*
+out_movqi_r_mr (rtx insn, rtx op[], int *plen)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx x = XEXP (src, 0);
+
+ if (CONSTANT_ADDRESS_P (x))
+ {
+ return optimize > 0 && io_address_operand (x, QImode)
+ ? avr_asm_len ("in %0,%i1", op, plen, -1)
+ : avr_asm_len ("lds %0,%m1", op, plen, -2);
+ }
+ else if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ /* memory access by reg+disp */
+
+ int disp = INTVAL (XEXP (x, 1));
+
+ if (disp - GET_MODE_SIZE (GET_MODE (src)) >= 63)
+ {
+ if (REGNO (XEXP (x, 0)) != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (src)))
+ return avr_asm_len ("adiw r28,%o1-63" CR_TAB
+ "ldd %0,Y+63" CR_TAB
+ "sbiw r28,%o1-63", op, plen, -3);
+
+ return avr_asm_len ("subi r28,lo8(-%o1)" CR_TAB
+ "sbci r29,hi8(-%o1)" CR_TAB
+ "ld %0,Y" CR_TAB
+ "subi r28,lo8(%o1)" CR_TAB
+ "sbci r29,hi8(%o1)", op, plen, -5);
+ }
+ else if (REGNO (XEXP (x, 0)) == REG_X)
+ {
+ /* This is a paranoid case LEGITIMIZE_RELOAD_ADDRESS must exclude
+ it but I have this situation with extremal optimizing options. */
+
+ avr_asm_len ("adiw r26,%o1" CR_TAB
+ "ld %0,X", op, plen, -2);
+
+ if (!reg_overlap_mentioned_p (dest, XEXP (x,0))
+ && !reg_unused_after (insn, XEXP (x,0)))
+ {
+ avr_asm_len ("sbiw r26,%o1", op, plen, 1);
+ }
+
+ return "";
+ }
+
+ return avr_asm_len ("ldd %0,%1", op, plen, -1);
+ }
+
+ return avr_asm_len ("ld %0,%1", op, plen, -1);
+}
+
+static const char*
+out_movhi_r_mr (rtx insn, rtx op[], int *plen)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (src, 0);
+ int reg_dest = true_regnum (dest);
+ int reg_base = true_regnum (base);
+ /* "volatile" forces reading low byte first, even if less efficient,
+ for correct operation with 16-bit I/O registers. */
+ int mem_volatile_p = MEM_VOLATILE_P (src);
+
+ if (reg_base > 0)
+ {
+ if (reg_dest == reg_base) /* R = (R) */
+ return avr_asm_len ("ld __tmp_reg__,%1+" CR_TAB
+ "ld %B0,%1" CR_TAB
+ "mov %A0,__tmp_reg__", op, plen, -3);
+
+ if (reg_base != REG_X)
+ return avr_asm_len ("ld %A0,%1" CR_TAB
+ "ldd %B0,%1+1", op, plen, -2);
+
+ avr_asm_len ("ld %A0,X+" CR_TAB
+ "ld %B0,X", op, plen, -2);
+
+ if (!reg_unused_after (insn, base))
+ avr_asm_len ("sbiw r26,1", op, plen, 1);
+
+ return "";
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+ int disp = INTVAL (XEXP (base, 1));
+ int reg_base = true_regnum (XEXP (base, 0));
+
+ if (disp > MAX_LD_OFFSET (GET_MODE (src)))
+ {
+ if (REGNO (XEXP (base, 0)) != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ return disp <= 63 + MAX_LD_OFFSET (GET_MODE (src))
+ ? avr_asm_len ("adiw r28,%o1-62" CR_TAB
+ "ldd %A0,Y+62" CR_TAB
+ "ldd %B0,Y+63" CR_TAB
+ "sbiw r28,%o1-62", op, plen, -4)
+
+ : avr_asm_len ("subi r28,lo8(-%o1)" CR_TAB
+ "sbci r29,hi8(-%o1)" CR_TAB
+ "ld %A0,Y" CR_TAB
+ "ldd %B0,Y+1" CR_TAB
+ "subi r28,lo8(%o1)" CR_TAB
+ "sbci r29,hi8(%o1)", op, plen, -6);
+ }
+
+ /* This is a paranoid case. LEGITIMIZE_RELOAD_ADDRESS must exclude
+ it but I have this situation with extremal
+ optimization options. */
+
+ if (reg_base == REG_X)
+ return reg_base == reg_dest
+ ? avr_asm_len ("adiw r26,%o1" CR_TAB
+ "ld __tmp_reg__,X+" CR_TAB
+ "ld %B0,X" CR_TAB
+ "mov %A0,__tmp_reg__", op, plen, -4)
+
+ : avr_asm_len ("adiw r26,%o1" CR_TAB
+ "ld %A0,X+" CR_TAB
+ "ld %B0,X" CR_TAB
+ "sbiw r26,%o1+1", op, plen, -4);
+
+ return reg_base == reg_dest
+ ? avr_asm_len ("ldd __tmp_reg__,%A1" CR_TAB
+ "ldd %B0,%B1" CR_TAB
+ "mov %A0,__tmp_reg__", op, plen, -3)
+
+ : avr_asm_len ("ldd %A0,%A1" CR_TAB
+ "ldd %B0,%B1", op, plen, -2);
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ {
+ if (reg_overlap_mentioned_p (dest, XEXP (base, 0)))
+ fatal_insn ("incorrect insn:", insn);
+
+ if (!mem_volatile_p)
+ return avr_asm_len ("ld %B0,%1" CR_TAB
+ "ld %A0,%1", op, plen, -2);
+
+ return REGNO (XEXP (base, 0)) == REG_X
+ ? avr_asm_len ("sbiw r26,2" CR_TAB
+ "ld %A0,X+" CR_TAB
+ "ld %B0,X" CR_TAB
+ "sbiw r26,1", op, plen, -4)
+
+ : avr_asm_len ("sbiw %r1,2" CR_TAB
+ "ld %A0,%p1" CR_TAB
+ "ldd %B0,%p1+1", op, plen, -3);
+ }
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ {
+ if (reg_overlap_mentioned_p (dest, XEXP (base, 0)))
+ fatal_insn ("incorrect insn:", insn);
+
+ return avr_asm_len ("ld %A0,%1" CR_TAB
+ "ld %B0,%1", op, plen, -2);
+ }
+ else if (CONSTANT_ADDRESS_P (base))
+ {
+ return optimize > 0 && io_address_operand (base, HImode)
+ ? avr_asm_len ("in %A0,%i1" CR_TAB
+ "in %B0,%i1+1", op, plen, -2)
+
+ : avr_asm_len ("lds %A0,%m1" CR_TAB
+ "lds %B0,%m1+1", op, plen, -4);
+ }
+
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+static const char*
+out_movsi_r_mr (rtx insn, rtx op[], int *l)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (src, 0);
+ int reg_dest = true_regnum (dest);
+ int reg_base = true_regnum (base);
+ int tmp;
+
+ if (!l)
+ l = &tmp;
+
+ if (reg_base > 0)
+ {
+ if (reg_base == REG_X) /* (R26) */
+ {
+ if (reg_dest == REG_X)
+ /* "ld r26,-X" is undefined */
+ return *l=7, ("adiw r26,3" CR_TAB
+ "ld r29,X" CR_TAB
+ "ld r28,-X" CR_TAB
+ "ld __tmp_reg__,-X" CR_TAB
+ "sbiw r26,1" CR_TAB
+ "ld r26,X" CR_TAB
+ "mov r27,__tmp_reg__");
+ else if (reg_dest == REG_X - 2)
+ return *l=5, ("ld %A0,X+" CR_TAB
+ "ld %B0,X+" CR_TAB
+ "ld __tmp_reg__,X+" CR_TAB
+ "ld %D0,X" CR_TAB
+ "mov %C0,__tmp_reg__");
+ else if (reg_unused_after (insn, base))
+ return *l=4, ("ld %A0,X+" CR_TAB
+ "ld %B0,X+" CR_TAB
+ "ld %C0,X+" CR_TAB
+ "ld %D0,X");
+ else
+ return *l=5, ("ld %A0,X+" CR_TAB
+ "ld %B0,X+" CR_TAB
+ "ld %C0,X+" CR_TAB
+ "ld %D0,X" CR_TAB
+ "sbiw r26,3");
+ }
+ else
+ {
+ if (reg_dest == reg_base)
+ return *l=5, ("ldd %D0,%1+3" CR_TAB
+ "ldd %C0,%1+2" CR_TAB
+ "ldd __tmp_reg__,%1+1" CR_TAB
+ "ld %A0,%1" CR_TAB
+ "mov %B0,__tmp_reg__");
+ else if (reg_base == reg_dest + 2)
+ return *l=5, ("ld %A0,%1" CR_TAB
+ "ldd %B0,%1+1" CR_TAB
+ "ldd __tmp_reg__,%1+2" CR_TAB
+ "ldd %D0,%1+3" CR_TAB
+ "mov %C0,__tmp_reg__");
+ else
+ return *l=4, ("ld %A0,%1" CR_TAB
+ "ldd %B0,%1+1" CR_TAB
+ "ldd %C0,%1+2" CR_TAB
+ "ldd %D0,%1+3");
+ }
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+ int disp = INTVAL (XEXP (base, 1));
+
+ if (disp > MAX_LD_OFFSET (GET_MODE (src)))
+ {
+ if (REGNO (XEXP (base, 0)) != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (src)))
+ return *l = 6, ("adiw r28,%o1-60" CR_TAB
+ "ldd %A0,Y+60" CR_TAB
+ "ldd %B0,Y+61" CR_TAB
+ "ldd %C0,Y+62" CR_TAB
+ "ldd %D0,Y+63" CR_TAB
+ "sbiw r28,%o1-60");
+
+ return *l = 8, ("subi r28,lo8(-%o1)" CR_TAB
+ "sbci r29,hi8(-%o1)" CR_TAB
+ "ld %A0,Y" CR_TAB
+ "ldd %B0,Y+1" CR_TAB
+ "ldd %C0,Y+2" CR_TAB
+ "ldd %D0,Y+3" CR_TAB
+ "subi r28,lo8(%o1)" CR_TAB
+ "sbci r29,hi8(%o1)");
+ }
+
+ reg_base = true_regnum (XEXP (base, 0));
+ if (reg_base == REG_X)
+ {
+ /* R = (X + d) */
+ if (reg_dest == REG_X)
+ {
+ *l = 7;
+ /* "ld r26,-X" is undefined */
+ return ("adiw r26,%o1+3" CR_TAB
+ "ld r29,X" CR_TAB
+ "ld r28,-X" CR_TAB
+ "ld __tmp_reg__,-X" CR_TAB
+ "sbiw r26,1" CR_TAB
+ "ld r26,X" CR_TAB
+ "mov r27,__tmp_reg__");
+ }
+ *l = 6;
+ if (reg_dest == REG_X - 2)
+ return ("adiw r26,%o1" CR_TAB
+ "ld r24,X+" CR_TAB
+ "ld r25,X+" CR_TAB
+ "ld __tmp_reg__,X+" CR_TAB
+ "ld r27,X" CR_TAB
+ "mov r26,__tmp_reg__");
+
+ return ("adiw r26,%o1" CR_TAB
+ "ld %A0,X+" CR_TAB
+ "ld %B0,X+" CR_TAB
+ "ld %C0,X+" CR_TAB
+ "ld %D0,X" CR_TAB
+ "sbiw r26,%o1+3");
+ }
+ if (reg_dest == reg_base)
+ return *l=5, ("ldd %D0,%D1" CR_TAB
+ "ldd %C0,%C1" CR_TAB
+ "ldd __tmp_reg__,%B1" CR_TAB
+ "ldd %A0,%A1" CR_TAB
+ "mov %B0,__tmp_reg__");
+ else if (reg_dest == reg_base - 2)
+ return *l=5, ("ldd %A0,%A1" CR_TAB
+ "ldd %B0,%B1" CR_TAB
+ "ldd __tmp_reg__,%C1" CR_TAB
+ "ldd %D0,%D1" CR_TAB
+ "mov %C0,__tmp_reg__");
+ return *l=4, ("ldd %A0,%A1" CR_TAB
+ "ldd %B0,%B1" CR_TAB
+ "ldd %C0,%C1" CR_TAB
+ "ldd %D0,%D1");
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ return *l=4, ("ld %D0,%1" CR_TAB
+ "ld %C0,%1" CR_TAB
+ "ld %B0,%1" CR_TAB
+ "ld %A0,%1");
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ return *l=4, ("ld %A0,%1" CR_TAB
+ "ld %B0,%1" CR_TAB
+ "ld %C0,%1" CR_TAB
+ "ld %D0,%1");
+ else if (CONSTANT_ADDRESS_P (base))
+ return *l=8, ("lds %A0,%m1" CR_TAB
+ "lds %B0,%m1+1" CR_TAB
+ "lds %C0,%m1+2" CR_TAB
+ "lds %D0,%m1+3");
+
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+static const char*
+out_movsi_mr_r (rtx insn, rtx op[], int *l)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (dest, 0);
+ int reg_base = true_regnum (base);
+ int reg_src = true_regnum (src);
+ int tmp;
+
+ if (!l)
+ l = &tmp;
+
+ if (CONSTANT_ADDRESS_P (base))
+ return *l=8,("sts %m0,%A1" CR_TAB
+ "sts %m0+1,%B1" CR_TAB
+ "sts %m0+2,%C1" CR_TAB
+ "sts %m0+3,%D1");
+ if (reg_base > 0) /* (r) */
+ {
+ if (reg_base == REG_X) /* (R26) */
+ {
+ if (reg_src == REG_X)
+ {
+ /* "st X+,r26" is undefined */
+ if (reg_unused_after (insn, base))
+ return *l=6, ("mov __tmp_reg__,r27" CR_TAB
+ "st X,r26" CR_TAB
+ "adiw r26,1" CR_TAB
+ "st X+,__tmp_reg__" CR_TAB
+ "st X+,r28" CR_TAB
+ "st X,r29");
+ else
+ return *l=7, ("mov __tmp_reg__,r27" CR_TAB
+ "st X,r26" CR_TAB
+ "adiw r26,1" CR_TAB
+ "st X+,__tmp_reg__" CR_TAB
+ "st X+,r28" CR_TAB
+ "st X,r29" CR_TAB
+ "sbiw r26,3");
+ }
+ else if (reg_base == reg_src + 2)
+ {
+ if (reg_unused_after (insn, base))
+ return *l=7, ("mov __zero_reg__,%C1" CR_TAB
+ "mov __tmp_reg__,%D1" CR_TAB
+ "st %0+,%A1" CR_TAB
+ "st %0+,%B1" CR_TAB
+ "st %0+,__zero_reg__" CR_TAB
+ "st %0,__tmp_reg__" CR_TAB
+ "clr __zero_reg__");
+ else
+ return *l=8, ("mov __zero_reg__,%C1" CR_TAB
+ "mov __tmp_reg__,%D1" CR_TAB
+ "st %0+,%A1" CR_TAB
+ "st %0+,%B1" CR_TAB
+ "st %0+,__zero_reg__" CR_TAB
+ "st %0,__tmp_reg__" CR_TAB
+ "clr __zero_reg__" CR_TAB
+ "sbiw r26,3");
+ }
+ return *l=5, ("st %0+,%A1" CR_TAB
+ "st %0+,%B1" CR_TAB
+ "st %0+,%C1" CR_TAB
+ "st %0,%D1" CR_TAB
+ "sbiw r26,3");
+ }
+ else
+ return *l=4, ("st %0,%A1" CR_TAB
+ "std %0+1,%B1" CR_TAB
+ "std %0+2,%C1" CR_TAB
+ "std %0+3,%D1");
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+ int disp = INTVAL (XEXP (base, 1));
+ reg_base = REGNO (XEXP (base, 0));
+ if (disp > MAX_LD_OFFSET (GET_MODE (dest)))
+ {
+ if (reg_base != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+ return *l = 6, ("adiw r28,%o0-60" CR_TAB
+ "std Y+60,%A1" CR_TAB
+ "std Y+61,%B1" CR_TAB
+ "std Y+62,%C1" CR_TAB
+ "std Y+63,%D1" CR_TAB
+ "sbiw r28,%o0-60");
+
+ return *l = 8, ("subi r28,lo8(-%o0)" CR_TAB
+ "sbci r29,hi8(-%o0)" CR_TAB
+ "st Y,%A1" CR_TAB
+ "std Y+1,%B1" CR_TAB
+ "std Y+2,%C1" CR_TAB
+ "std Y+3,%D1" CR_TAB
+ "subi r28,lo8(%o0)" CR_TAB
+ "sbci r29,hi8(%o0)");
+ }
+ if (reg_base == REG_X)
+ {
+ /* (X + d) = R */
+ if (reg_src == REG_X)
+ {
+ *l = 9;
+ return ("mov __tmp_reg__,r26" CR_TAB
+ "mov __zero_reg__,r27" CR_TAB
+ "adiw r26,%o0" CR_TAB
+ "st X+,__tmp_reg__" CR_TAB
+ "st X+,__zero_reg__" CR_TAB
+ "st X+,r28" CR_TAB
+ "st X,r29" CR_TAB
+ "clr __zero_reg__" CR_TAB
+ "sbiw r26,%o0+3");
+ }
+ else if (reg_src == REG_X - 2)
+ {
+ *l = 9;
+ return ("mov __tmp_reg__,r26" CR_TAB
+ "mov __zero_reg__,r27" CR_TAB
+ "adiw r26,%o0" CR_TAB
+ "st X+,r24" CR_TAB
+ "st X+,r25" CR_TAB
+ "st X+,__tmp_reg__" CR_TAB
+ "st X,__zero_reg__" CR_TAB
+ "clr __zero_reg__" CR_TAB
+ "sbiw r26,%o0+3");
+ }
+ *l = 6;
+ return ("adiw r26,%o0" CR_TAB
+ "st X+,%A1" CR_TAB
+ "st X+,%B1" CR_TAB
+ "st X+,%C1" CR_TAB
+ "st X,%D1" CR_TAB
+ "sbiw r26,%o0+3");
+ }
+ return *l=4, ("std %A0,%A1" CR_TAB
+ "std %B0,%B1" CR_TAB
+ "std %C0,%C1" CR_TAB
+ "std %D0,%D1");
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ return *l=4, ("st %0,%D1" CR_TAB
+ "st %0,%C1" CR_TAB
+ "st %0,%B1" CR_TAB
+ "st %0,%A1");
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ return *l=4, ("st %0,%A1" CR_TAB
+ "st %0,%B1" CR_TAB
+ "st %0,%C1" CR_TAB
+ "st %0,%D1");
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+const char *
+output_movsisf (rtx insn, rtx operands[], int *l)
+{
+ int dummy;
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ int *real_l = l;
+
+ if (avr_mem_flash_p (src)
+ || avr_mem_flash_p (dest))
+ {
+ return avr_out_lpm (insn, operands, real_l);
+ }
+
+ if (!l)
+ l = &dummy;
+
+ gcc_assert (4 == GET_MODE_SIZE (GET_MODE (dest)));
+ if (REG_P (dest))
+ {
+ if (REG_P (src)) /* mov r,r */
+ {
+ if (true_regnum (dest) > true_regnum (src))
+ {
+ if (AVR_HAVE_MOVW)
+ {
+ *l = 2;
+ return ("movw %C0,%C1" CR_TAB
+ "movw %A0,%A1");
+ }
+ *l = 4;
+ return ("mov %D0,%D1" CR_TAB
+ "mov %C0,%C1" CR_TAB
+ "mov %B0,%B1" CR_TAB
+ "mov %A0,%A1");
+ }
+ else
+ {
+ if (AVR_HAVE_MOVW)
+ {
+ *l = 2;
+ return ("movw %A0,%A1" CR_TAB
+ "movw %C0,%C1");
+ }
+ *l = 4;
+ return ("mov %A0,%A1" CR_TAB
+ "mov %B0,%B1" CR_TAB
+ "mov %C0,%C1" CR_TAB
+ "mov %D0,%D1");
+ }
+ }
+ else if (CONSTANT_P (src))
+ {
+ return output_reload_insisf (operands, NULL_RTX, real_l);
+ }
+ else if (MEM_P (src))
+ return out_movsi_r_mr (insn, operands, real_l); /* mov r,m */
+ }
+ else if (MEM_P (dest))
+ {
+ const char *templ;
+
+ if (src == CONST0_RTX (GET_MODE (dest)))
+ operands[1] = zero_reg_rtx;
+
+ templ = out_movsi_mr_r (insn, operands, real_l);
+
+ if (!real_l)
+ output_asm_insn (templ, operands);
+
+ operands[1] = src;
+ return "";
+ }
+ fatal_insn ("invalid insn:", insn);
+ return "";
+}
+
+
+/* Handle loads of 24-bit types from memory to register. */
+
+static const char*
+avr_out_load_psi (rtx insn, rtx *op, int *plen)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (src, 0);
+ int reg_dest = true_regnum (dest);
+ int reg_base = true_regnum (base);
+
+ if (reg_base > 0)
+ {
+ if (reg_base == REG_X) /* (R26) */
+ {
+ if (reg_dest == REG_X)
+ /* "ld r26,-X" is undefined */
+ return avr_asm_len ("adiw r26,2" CR_TAB
+ "ld r28,X" CR_TAB
+ "ld __tmp_reg__,-X" CR_TAB
+ "sbiw r26,1" CR_TAB
+ "ld r26,X" CR_TAB
+ "mov r27,__tmp_reg__", op, plen, -6);
+ else
+ {
+ avr_asm_len ("ld %A0,X+" CR_TAB
+ "ld %B0,X+" CR_TAB
+ "ld %C0,X", op, plen, -3);
+
+ if (reg_dest != REG_X - 2
+ && !reg_unused_after (insn, base))
+ {
+ avr_asm_len ("sbiw r26,2", op, plen, 1);
+ }
+
+ return "";
+ }
+ }
+ else /* reg_base != REG_X */
+ {
+ if (reg_dest == reg_base)
+ return avr_asm_len ("ldd %C0,%1+2" CR_TAB
+ "ldd __tmp_reg__,%1+1" CR_TAB
+ "ld %A0,%1" CR_TAB
+ "mov %B0,__tmp_reg__", op, plen, -4);
+ else
+ return avr_asm_len ("ld %A0,%1" CR_TAB
+ "ldd %B0,%1+1" CR_TAB
+ "ldd %C0,%1+2", op, plen, -3);
+ }
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+ int disp = INTVAL (XEXP (base, 1));
+
+ if (disp > MAX_LD_OFFSET (GET_MODE (src)))
+ {
+ if (REGNO (XEXP (base, 0)) != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (src)))
+ return avr_asm_len ("adiw r28,%o1-61" CR_TAB
+ "ldd %A0,Y+61" CR_TAB
+ "ldd %B0,Y+62" CR_TAB
+ "ldd %C0,Y+63" CR_TAB
+ "sbiw r28,%o1-61", op, plen, -5);
+
+ return avr_asm_len ("subi r28,lo8(-%o1)" CR_TAB
+ "sbci r29,hi8(-%o1)" CR_TAB
+ "ld %A0,Y" CR_TAB
+ "ldd %B0,Y+1" CR_TAB
+ "ldd %C0,Y+2" CR_TAB
+ "subi r28,lo8(%o1)" CR_TAB
+ "sbci r29,hi8(%o1)", op, plen, -7);
+ }
+
+ reg_base = true_regnum (XEXP (base, 0));
+ if (reg_base == REG_X)
+ {
+ /* R = (X + d) */
+ if (reg_dest == REG_X)
+ {
+ /* "ld r26,-X" is undefined */
+ return avr_asm_len ("adiw r26,%o1+2" CR_TAB
+ "ld r28,X" CR_TAB
+ "ld __tmp_reg__,-X" CR_TAB
+ "sbiw r26,1" CR_TAB
+ "ld r26,X" CR_TAB
+ "mov r27,__tmp_reg__", op, plen, -6);
+ }
+
+ avr_asm_len ("adiw r26,%o1" CR_TAB
+ "ld %A0,X+" CR_TAB
+ "ld %B0,X+" CR_TAB
+ "ld %C0,X", op, plen, -4);
+
+ if (reg_dest != REG_W
+ && !reg_unused_after (insn, XEXP (base, 0)))
+ avr_asm_len ("sbiw r26,%o1+2", op, plen, 1);
+
+ return "";
+ }
+
+ if (reg_dest == reg_base)
+ return avr_asm_len ("ldd %C0,%C1" CR_TAB
+ "ldd __tmp_reg__,%B1" CR_TAB
+ "ldd %A0,%A1" CR_TAB
+ "mov %B0,__tmp_reg__", op, plen, -4);
+
+ return avr_asm_len ("ldd %A0,%A1" CR_TAB
+ "ldd %B0,%B1" CR_TAB
+ "ldd %C0,%C1", op, plen, -3);
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ return avr_asm_len ("ld %C0,%1" CR_TAB
+ "ld %B0,%1" CR_TAB
+ "ld %A0,%1", op, plen, -3);
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ return avr_asm_len ("ld %A0,%1" CR_TAB
+ "ld %B0,%1" CR_TAB
+ "ld %C0,%1", op, plen, -3);
+
+ else if (CONSTANT_ADDRESS_P (base))
+ return avr_asm_len ("lds %A0,%m1" CR_TAB
+ "lds %B0,%m1+1" CR_TAB
+ "lds %C0,%m1+2", op, plen , -6);
+
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+/* Handle store of 24-bit type from register or zero to memory. */
+
+static const char*
+avr_out_store_psi (rtx insn, rtx *op, int *plen)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (dest, 0);
+ int reg_base = true_regnum (base);
+
+ if (CONSTANT_ADDRESS_P (base))
+ return avr_asm_len ("sts %m0,%A1" CR_TAB
+ "sts %m0+1,%B1" CR_TAB
+ "sts %m0+2,%C1", op, plen, -6);
+
+ if (reg_base > 0) /* (r) */
+ {
+ if (reg_base == REG_X) /* (R26) */
+ {
+ gcc_assert (!reg_overlap_mentioned_p (base, src));
+
+ avr_asm_len ("st %0+,%A1" CR_TAB
+ "st %0+,%B1" CR_TAB
+ "st %0,%C1", op, plen, -3);
+
+ if (!reg_unused_after (insn, base))
+ avr_asm_len ("sbiw r26,2", op, plen, 1);
+
+ return "";
+ }
+ else
+ return avr_asm_len ("st %0,%A1" CR_TAB
+ "std %0+1,%B1" CR_TAB
+ "std %0+2,%C1", op, plen, -3);
+ }
+ else if (GET_CODE (base) == PLUS) /* (R + i) */
+ {
+ int disp = INTVAL (XEXP (base, 1));
+ reg_base = REGNO (XEXP (base, 0));
+
+ if (disp > MAX_LD_OFFSET (GET_MODE (dest)))
+ {
+ if (reg_base != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+ return avr_asm_len ("adiw r28,%o0-61" CR_TAB
+ "std Y+61,%A1" CR_TAB
+ "std Y+62,%B1" CR_TAB
+ "std Y+63,%C1" CR_TAB
+ "sbiw r28,%o0-60", op, plen, -5);
+
+ return avr_asm_len ("subi r28,lo8(-%o0)" CR_TAB
+ "sbci r29,hi8(-%o0)" CR_TAB
+ "st Y,%A1" CR_TAB
+ "std Y+1,%B1" CR_TAB
+ "std Y+2,%C1" CR_TAB
+ "subi r28,lo8(%o0)" CR_TAB
+ "sbci r29,hi8(%o0)", op, plen, -7);
+ }
+ if (reg_base == REG_X)
+ {
+ /* (X + d) = R */
+ gcc_assert (!reg_overlap_mentioned_p (XEXP (base, 0), src));
+
+ avr_asm_len ("adiw r26,%o0" CR_TAB
+ "st X+,%A1" CR_TAB
+ "st X+,%B1" CR_TAB
+ "st X,%C1", op, plen, -4);
+
+ if (!reg_unused_after (insn, XEXP (base, 0)))
+ avr_asm_len ("sbiw r26,%o0+2", op, plen, 1);
+
+ return "";
+ }
+
+ return avr_asm_len ("std %A0,%A1" CR_TAB
+ "std %B0,%B1" CR_TAB
+ "std %C0,%C1", op, plen, -3);
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ return avr_asm_len ("st %0,%C1" CR_TAB
+ "st %0,%B1" CR_TAB
+ "st %0,%A1", op, plen, -3);
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ return avr_asm_len ("st %0,%A1" CR_TAB
+ "st %0,%B1" CR_TAB
+ "st %0,%C1", op, plen, -3);
+
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+
+/* Move around 24-bit stuff. */
+
+const char *
+avr_out_movpsi (rtx insn, rtx *op, int *plen)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+
+ if (avr_mem_flash_p (src)
+ || avr_mem_flash_p (dest))
+ {
+ return avr_out_lpm (insn, op, plen);
+ }
+
+ if (register_operand (dest, VOIDmode))
+ {
+ if (register_operand (src, VOIDmode)) /* mov r,r */
+ {
+ if (true_regnum (dest) > true_regnum (src))
+ {
+ avr_asm_len ("mov %C0,%C1", op, plen, -1);
+
+ if (AVR_HAVE_MOVW)
+ return avr_asm_len ("movw %A0,%A1", op, plen, 1);
+ else
+ return avr_asm_len ("mov %B0,%B1" CR_TAB
+ "mov %A0,%A1", op, plen, 2);
+ }
+ else
+ {
+ if (AVR_HAVE_MOVW)
+ avr_asm_len ("movw %A0,%A1", op, plen, -1);
+ else
+ avr_asm_len ("mov %A0,%A1" CR_TAB
+ "mov %B0,%B1", op, plen, -2);
+
+ return avr_asm_len ("mov %C0,%C1", op, plen, 1);
+ }
+ }
+ else if (CONSTANT_P (src))
+ {
+ return avr_out_reload_inpsi (op, NULL_RTX, plen);
+ }
+ else if (MEM_P (src))
+ return avr_out_load_psi (insn, op, plen); /* mov r,m */
+ }
+ else if (MEM_P (dest))
+ {
+ rtx xop[2];
+
+ xop[0] = dest;
+ xop[1] = src == CONST0_RTX (GET_MODE (dest)) ? zero_reg_rtx : src;
+
+ return avr_out_store_psi (insn, xop, plen);
+ }
+
+ fatal_insn ("invalid insn:", insn);
+ return "";
+}
+
+
+static const char*
+out_movqi_mr_r (rtx insn, rtx op[], int *plen)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx x = XEXP (dest, 0);
+
+ if (CONSTANT_ADDRESS_P (x))
+ {
+ return optimize > 0 && io_address_operand (x, QImode)
+ ? avr_asm_len ("out %i0,%1", op, plen, -1)
+ : avr_asm_len ("sts %m0,%1", op, plen, -2);
+ }
+ else if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1)))
+ {
+ /* memory access by reg+disp */
+
+ int disp = INTVAL (XEXP (x, 1));
+
+ if (disp - GET_MODE_SIZE (GET_MODE (dest)) >= 63)
+ {
+ if (REGNO (XEXP (x, 0)) != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ if (disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest)))
+ return avr_asm_len ("adiw r28,%o0-63" CR_TAB
+ "std Y+63,%1" CR_TAB
+ "sbiw r28,%o0-63", op, plen, -3);
+
+ return avr_asm_len ("subi r28,lo8(-%o0)" CR_TAB
+ "sbci r29,hi8(-%o0)" CR_TAB
+ "st Y,%1" CR_TAB
+ "subi r28,lo8(%o0)" CR_TAB
+ "sbci r29,hi8(%o0)", op, plen, -5);
+ }
+ else if (REGNO (XEXP (x,0)) == REG_X)
+ {
+ if (reg_overlap_mentioned_p (src, XEXP (x, 0)))
+ {
+ avr_asm_len ("mov __tmp_reg__,%1" CR_TAB
+ "adiw r26,%o0" CR_TAB
+ "st X,__tmp_reg__", op, plen, -3);
+ }
+ else
+ {
+ avr_asm_len ("adiw r26,%o0" CR_TAB
+ "st X,%1", op, plen, -2);
+ }
+
+ if (!reg_unused_after (insn, XEXP (x,0)))
+ avr_asm_len ("sbiw r26,%o0", op, plen, 1);
+
+ return "";
+ }
+
+ return avr_asm_len ("std %0,%1", op, plen, -1);
+ }
+
+ return avr_asm_len ("st %0,%1", op, plen, -1);
+}
+
+
+/* Helper for the next function for XMEGA. It does the same
+ but with low byte first. */
+
+static const char*
+avr_out_movhi_mr_r_xmega (rtx insn, rtx op[], int *plen)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (dest, 0);
+ int reg_base = true_regnum (base);
+ int reg_src = true_regnum (src);
+
+ /* "volatile" forces writing low byte first, even if less efficient,
+ for correct operation with 16-bit I/O registers like SP. */
+ int mem_volatile_p = MEM_VOLATILE_P (dest);
+
+ if (CONSTANT_ADDRESS_P (base))
+ return optimize > 0 && io_address_operand (base, HImode)
+ ? avr_asm_len ("out %i0,%A1" CR_TAB
+ "out %i0+1,%B1", op, plen, -2)
+
+ : avr_asm_len ("sts %m0,%A1" CR_TAB
+ "sts %m0+1,%B1", op, plen, -4);
+
+ if (reg_base > 0)
+ {
+ if (reg_base != REG_X)
+ return avr_asm_len ("st %0,%A1" CR_TAB
+ "std %0+1,%B1", op, plen, -2);
+
+ if (reg_src == REG_X)
+ /* "st X+,r26" and "st -X,r26" are undefined. */
+ avr_asm_len ("mov __tmp_reg__,r27" CR_TAB
+ "st X,r26" CR_TAB
+ "adiw r26,1" CR_TAB
+ "st X,__tmp_reg__", op, plen, -4);
+ else
+ avr_asm_len ("st X+,%A1" CR_TAB
+ "st X,%B1", op, plen, -2);
+
+ return reg_unused_after (insn, base)
+ ? ""
+ : avr_asm_len ("sbiw r26,1", op, plen, 1);
+ }
+ else if (GET_CODE (base) == PLUS)
+ {
+ int disp = INTVAL (XEXP (base, 1));
+ reg_base = REGNO (XEXP (base, 0));
+ if (disp > MAX_LD_OFFSET (GET_MODE (dest)))
+ {
+ if (reg_base != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ return disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest))
+ ? avr_asm_len ("adiw r28,%o0-62" CR_TAB
+ "std Y+62,%A1" CR_TAB
+ "std Y+63,%B1" CR_TAB
+ "sbiw r28,%o0-62", op, plen, -4)
+
+ : avr_asm_len ("subi r28,lo8(-%o0)" CR_TAB
+ "sbci r29,hi8(-%o0)" CR_TAB
+ "st Y,%A1" CR_TAB
+ "std Y+1,%B1" CR_TAB
+ "subi r28,lo8(%o0)" CR_TAB
+ "sbci r29,hi8(%o0)", op, plen, -6);
+ }
+
+ if (reg_base != REG_X)
+ return avr_asm_len ("std %A0,%A1" CR_TAB
+ "std %B0,%B1", op, plen, -2);
+ /* (X + d) = R */
+ return reg_src == REG_X
+ ? avr_asm_len ("mov __tmp_reg__,r26" CR_TAB
+ "mov __zero_reg__,r27" CR_TAB
+ "adiw r26,%o0" CR_TAB
+ "st X+,__tmp_reg__" CR_TAB
+ "st X,__zero_reg__" CR_TAB
+ "clr __zero_reg__" CR_TAB
+ "sbiw r26,%o0+1", op, plen, -7)
+
+ : avr_asm_len ("adiw r26,%o0" CR_TAB
+ "st X+,%A1" CR_TAB
+ "st X,%B1" CR_TAB
+ "sbiw r26,%o0+1", op, plen, -4);
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ {
+ if (!mem_volatile_p)
+ return avr_asm_len ("st %0,%B1" CR_TAB
+ "st %0,%A1", op, plen, -2);
+
+ return REGNO (XEXP (base, 0)) == REG_X
+ ? avr_asm_len ("sbiw r26,2" CR_TAB
+ "st X+,%A1" CR_TAB
+ "st X,%B1" CR_TAB
+ "sbiw r26,1", op, plen, -4)
+
+ : avr_asm_len ("sbiw %r0,2" CR_TAB
+ "st %p0,%A1" CR_TAB
+ "std %p0+1,%B1", op, plen, -3);
+ }
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ {
+ return avr_asm_len ("st %0,%A1" CR_TAB
+ "st %0,%B1", op, plen, -2);
+
+ }
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+
+static const char*
+out_movhi_mr_r (rtx insn, rtx op[], int *plen)
+{
+ rtx dest = op[0];
+ rtx src = op[1];
+ rtx base = XEXP (dest, 0);
+ int reg_base = true_regnum (base);
+ int reg_src = true_regnum (src);
+ int mem_volatile_p;
+
+ /* "volatile" forces writing high-byte first (no-xmega) resp.
+ low-byte first (xmega) even if less efficient, for correct
+ operation with 16-bit I/O registers like. */
+
+ if (AVR_XMEGA)
+ return avr_out_movhi_mr_r_xmega (insn, op, plen);
+
+ mem_volatile_p = MEM_VOLATILE_P (dest);
+
+ if (CONSTANT_ADDRESS_P (base))
+ return optimize > 0 && io_address_operand (base, HImode)
+ ? avr_asm_len ("out %i0+1,%B1" CR_TAB
+ "out %i0,%A1", op, plen, -2)
+
+ : avr_asm_len ("sts %m0+1,%B1" CR_TAB
+ "sts %m0,%A1", op, plen, -4);
+
+ if (reg_base > 0)
+ {
+ if (reg_base != REG_X)
+ return avr_asm_len ("std %0+1,%B1" CR_TAB
+ "st %0,%A1", op, plen, -2);
+
+ if (reg_src == REG_X)
+ /* "st X+,r26" and "st -X,r26" are undefined. */
+ return !mem_volatile_p && reg_unused_after (insn, src)
+ ? avr_asm_len ("mov __tmp_reg__,r27" CR_TAB
+ "st X,r26" CR_TAB
+ "adiw r26,1" CR_TAB
+ "st X,__tmp_reg__", op, plen, -4)
+
+ : avr_asm_len ("mov __tmp_reg__,r27" CR_TAB
+ "adiw r26,1" CR_TAB
+ "st X,__tmp_reg__" CR_TAB
+ "sbiw r26,1" CR_TAB
+ "st X,r26", op, plen, -5);
+
+ return !mem_volatile_p && reg_unused_after (insn, base)
+ ? avr_asm_len ("st X+,%A1" CR_TAB
+ "st X,%B1", op, plen, -2)
+ : avr_asm_len ("adiw r26,1" CR_TAB
+ "st X,%B1" CR_TAB
+ "st -X,%A1", op, plen, -3);
+ }
+ else if (GET_CODE (base) == PLUS)
+ {
+ int disp = INTVAL (XEXP (base, 1));
+ reg_base = REGNO (XEXP (base, 0));
+ if (disp > MAX_LD_OFFSET (GET_MODE (dest)))
+ {
+ if (reg_base != REG_Y)
+ fatal_insn ("incorrect insn:",insn);
+
+ return disp <= 63 + MAX_LD_OFFSET (GET_MODE (dest))
+ ? avr_asm_len ("adiw r28,%o0-62" CR_TAB
+ "std Y+63,%B1" CR_TAB
+ "std Y+62,%A1" CR_TAB
+ "sbiw r28,%o0-62", op, plen, -4)
+
+ : avr_asm_len ("subi r28,lo8(-%o0)" CR_TAB
+ "sbci r29,hi8(-%o0)" CR_TAB
+ "std Y+1,%B1" CR_TAB
+ "st Y,%A1" CR_TAB
+ "subi r28,lo8(%o0)" CR_TAB
+ "sbci r29,hi8(%o0)", op, plen, -6);
+ }
+
+ if (reg_base != REG_X)
+ return avr_asm_len ("std %B0,%B1" CR_TAB
+ "std %A0,%A1", op, plen, -2);
+ /* (X + d) = R */
+ return reg_src == REG_X
+ ? avr_asm_len ("mov __tmp_reg__,r26" CR_TAB
+ "mov __zero_reg__,r27" CR_TAB
+ "adiw r26,%o0+1" CR_TAB
+ "st X,__zero_reg__" CR_TAB
+ "st -X,__tmp_reg__" CR_TAB
+ "clr __zero_reg__" CR_TAB
+ "sbiw r26,%o0", op, plen, -7)
+
+ : avr_asm_len ("adiw r26,%o0+1" CR_TAB
+ "st X,%B1" CR_TAB
+ "st -X,%A1" CR_TAB
+ "sbiw r26,%o0", op, plen, -4);
+ }
+ else if (GET_CODE (base) == PRE_DEC) /* (--R) */
+ {
+ return avr_asm_len ("st %0,%B1" CR_TAB
+ "st %0,%A1", op, plen, -2);
+ }
+ else if (GET_CODE (base) == POST_INC) /* (R++) */
+ {
+ if (!mem_volatile_p)
+ return avr_asm_len ("st %0,%A1" CR_TAB
+ "st %0,%B1", op, plen, -2);
+
+ return REGNO (XEXP (base, 0)) == REG_X
+ ? avr_asm_len ("adiw r26,1" CR_TAB
+ "st X,%B1" CR_TAB
+ "st -X,%A1" CR_TAB
+ "adiw r26,2", op, plen, -4)
+
+ : avr_asm_len ("std %p0+1,%B1" CR_TAB
+ "st %p0,%A1" CR_TAB
+ "adiw %r0,2", op, plen, -3);
+ }
+ fatal_insn ("unknown move insn:",insn);
+ return "";
+}
+
+/* Return 1 if frame pointer for current function required. */
+
+static bool
+avr_frame_pointer_required_p (void)
+{
+ return (cfun->calls_alloca
+ || cfun->calls_setjmp
+ || cfun->has_nonlocal_label
+ || crtl->args.info.nregs == 0
+ || get_frame_size () > 0);
+}
+
+/* Returns the condition of compare insn INSN, or UNKNOWN. */
+
+static RTX_CODE
+compare_condition (rtx insn)
+{
+ rtx next = next_real_insn (insn);
+
+ if (next && JUMP_P (next))
+ {
+ rtx pat = PATTERN (next);
+ rtx src = SET_SRC (pat);
+
+ if (IF_THEN_ELSE == GET_CODE (src))
+ return GET_CODE (XEXP (src, 0));
+ }
+
+ return UNKNOWN;
+}
+
+
+/* Returns true iff INSN is a tst insn that only tests the sign. */
+
+static bool
+compare_sign_p (rtx insn)
+{
+ RTX_CODE cond = compare_condition (insn);
+ return (cond == GE || cond == LT);
+}
+
+
+/* Returns true iff the next insn is a JUMP_INSN with a condition
+ that needs to be swapped (GT, GTU, LE, LEU). */
+
+static bool
+compare_diff_p (rtx insn)
+{
+ RTX_CODE cond = compare_condition (insn);
+ return (cond == GT || cond == GTU || cond == LE || cond == LEU) ? cond : 0;
+}
+
+/* Returns true iff INSN is a compare insn with the EQ or NE condition. */
+
+static bool
+compare_eq_p (rtx insn)
+{
+ RTX_CODE cond = compare_condition (insn);
+ return (cond == EQ || cond == NE);
+}
+
+
+/* Output compare instruction
+
+ compare (XOP[0], XOP[1])
+
+ for a register XOP[0] and a compile-time constant XOP[1]. Return "".
+ XOP[2] is an 8-bit scratch register as needed.
+
+ PLEN == NULL: Output instructions.
+ PLEN != NULL: Set *PLEN to the length (in words) of the sequence.
+ Don't output anything. */
+
+const char*
+avr_out_compare (rtx insn, rtx *xop, int *plen)
+{
+ /* Register to compare and value to compare against. */
+ rtx xreg = xop[0];
+ rtx xval = xop[1];
+
+ /* MODE of the comparison. */
+ enum machine_mode mode;
+
+ /* Number of bytes to operate on. */
+ int i, n_bytes = GET_MODE_SIZE (GET_MODE (xreg));
+
+ /* Value (0..0xff) held in clobber register xop[2] or -1 if unknown. */
+ int clobber_val = -1;
+
+ /* Map fixed mode operands to integer operands with the same binary
+ representation. They are easier to handle in the remainder. */
+
+ if (CONST_FIXED_P (xval))
+ {
+ xreg = avr_to_int_mode (xop[0]);
+ xval = avr_to_int_mode (xop[1]);
+ }
+
+ mode = GET_MODE (xreg);
+
+ gcc_assert (REG_P (xreg));
+ gcc_assert ((CONST_INT_P (xval) && n_bytes <= 4)
+ || (const_double_operand (xval, VOIDmode) && n_bytes == 8));
+
+ if (plen)
+ *plen = 0;
+
+ /* Comparisons == +/-1 and != +/-1 can be done similar to camparing
+ against 0 by ORing the bytes. This is one instruction shorter.
+ Notice that 64-bit comparisons are always against reg:ALL8 18 (ACC_A)
+ and therefore don't use this. */
+
+ if (!test_hard_reg_class (LD_REGS, xreg)
+ && compare_eq_p (insn)
+ && reg_unused_after (insn, xreg))
+ {
+ if (xval == const1_rtx)
+ {
+ avr_asm_len ("dec %A0" CR_TAB
+ "or %A0,%B0", xop, plen, 2);
+
+ if (n_bytes >= 3)
+ avr_asm_len ("or %A0,%C0", xop, plen, 1);
+
+ if (n_bytes >= 4)
+ avr_asm_len ("or %A0,%D0", xop, plen, 1);
+
+ return "";
+ }
+ else if (xval == constm1_rtx)
+ {
+ if (n_bytes >= 4)
+ avr_asm_len ("and %A0,%D0", xop, plen, 1);
+
+ if (n_bytes >= 3)
+ avr_asm_len ("and %A0,%C0", xop, plen, 1);
+
+ return avr_asm_len ("and %A0,%B0" CR_TAB
+ "com %A0", xop, plen, 2);
+ }
+ }
+
+ for (i = 0; i < n_bytes; i++)
+ {
+ /* We compare byte-wise. */
+ rtx reg8 = simplify_gen_subreg (QImode, xreg, mode, i);
+ rtx xval8 = simplify_gen_subreg (QImode, xval, mode, i);
+
+ /* 8-bit value to compare with this byte. */
+ unsigned int val8 = UINTVAL (xval8) & GET_MODE_MASK (QImode);
+
+ /* Registers R16..R31 can operate with immediate. */
+ bool ld_reg_p = test_hard_reg_class (LD_REGS, reg8);
+
+ xop[0] = reg8;
+ xop[1] = gen_int_mode (val8, QImode);
+
+ /* Word registers >= R24 can use SBIW/ADIW with 0..63. */
+
+ if (i == 0
+ && test_hard_reg_class (ADDW_REGS, reg8))
+ {
+ int val16 = trunc_int_for_mode (INTVAL (xval), HImode);
+
+ if (IN_RANGE (val16, 0, 63)
+ && (val8 == 0
+ || reg_unused_after (insn, xreg)))
+ {
+ avr_asm_len ("sbiw %0,%1", xop, plen, 1);
+ i++;
+ continue;
+ }
+
+ if (n_bytes == 2
+ && IN_RANGE (val16, -63, -1)
+ && compare_eq_p (insn)
+ && reg_unused_after (insn, xreg))
+ {
+ return avr_asm_len ("adiw %0,%n1", xop, plen, 1);
+ }
+ }
+
+ /* Comparing against 0 is easy. */
+
+ if (val8 == 0)
+ {
+ avr_asm_len (i == 0
+ ? "cp %0,__zero_reg__"
+ : "cpc %0,__zero_reg__", xop, plen, 1);
+ continue;
+ }
+
+ /* Upper registers can compare and subtract-with-carry immediates.
+ Notice that compare instructions do the same as respective subtract
+ instruction; the only difference is that comparisons don't write
+ the result back to the target register. */
+
+ if (ld_reg_p)
+ {
+ if (i == 0)
+ {
+ avr_asm_len ("cpi %0,%1", xop, plen, 1);
+ continue;
+ }
+ else if (reg_unused_after (insn, xreg))
+ {
+ avr_asm_len ("sbci %0,%1", xop, plen, 1);
+ continue;
+ }
+ }
+
+ /* Must load the value into the scratch register. */
+
+ gcc_assert (REG_P (xop[2]));
+
+ if (clobber_val != (int) val8)
+ avr_asm_len ("ldi %2,%1", xop, plen, 1);
+ clobber_val = (int) val8;
+
+ avr_asm_len (i == 0
+ ? "cp %0,%2"
+ : "cpc %0,%2", xop, plen, 1);
+ }
+
+ return "";
+}
+
+
+/* Prepare operands of compare_const_di2 to be used with avr_out_compare. */
+
+const char*
+avr_out_compare64 (rtx insn, rtx *op, int *plen)
+{
+ rtx xop[3];
+
+ xop[0] = gen_rtx_REG (DImode, 18);
+ xop[1] = op[0];
+ xop[2] = op[1];
+
+ return avr_out_compare (insn, xop, plen);
+}
+
+/* Output test instruction for HImode. */
+
+const char*
+avr_out_tsthi (rtx insn, rtx *op, int *plen)
+{
+ if (compare_sign_p (insn))
+ {
+ avr_asm_len ("tst %B0", op, plen, -1);
+ }
+ else if (reg_unused_after (insn, op[0])
+ && compare_eq_p (insn))
+ {
+ /* Faster than sbiw if we can clobber the operand. */
+ avr_asm_len ("or %A0,%B0", op, plen, -1);
+ }
+ else
+ {
+ avr_out_compare (insn, op, plen);
+ }
+
+ return "";
+}
+
+
+/* Output test instruction for PSImode. */
+
+const char*
+avr_out_tstpsi (rtx insn, rtx *op, int *plen)
+{
+ if (compare_sign_p (insn))
+ {
+ avr_asm_len ("tst %C0", op, plen, -1);
+ }
+ else if (reg_unused_after (insn, op[0])
+ && compare_eq_p (insn))
+ {
+ /* Faster than sbiw if we can clobber the operand. */
+ avr_asm_len ("or %A0,%B0" CR_TAB
+ "or %A0,%C0", op, plen, -2);
+ }
+ else
+ {
+ avr_out_compare (insn, op, plen);
+ }
+
+ return "";
+}
+
+
+/* Output test instruction for SImode. */
+
+const char*
+avr_out_tstsi (rtx insn, rtx *op, int *plen)
+{
+ if (compare_sign_p (insn))
+ {
+ avr_asm_len ("tst %D0", op, plen, -1);
+ }
+ else if (reg_unused_after (insn, op[0])
+ && compare_eq_p (insn))
+ {
+ /* Faster than sbiw if we can clobber the operand. */
+ avr_asm_len ("or %A0,%B0" CR_TAB
+ "or %A0,%C0" CR_TAB
+ "or %A0,%D0", op, plen, -3);
+ }
+ else
+ {
+ avr_out_compare (insn, op, plen);
+ }
+
+ return "";
+}
+
+
+/* Generate asm equivalent for various shifts. This only handles cases
+ that are not already carefully hand-optimized in ?sh??i3_out.
+
+ OPERANDS[0] resp. %0 in TEMPL is the operand to be shifted.
+ OPERANDS[2] is the shift count as CONST_INT, MEM or REG.
+ OPERANDS[3] is a QImode scratch register from LD regs if
+ available and SCRATCH, otherwise (no scratch available)
+
+ TEMPL is an assembler template that shifts by one position.
+ T_LEN is the length of this template. */
+
+void
+out_shift_with_cnt (const char *templ, rtx insn, rtx operands[],
+ int *plen, int t_len)
+{
+ bool second_label = true;
+ bool saved_in_tmp = false;
+ bool use_zero_reg = false;
+ rtx op[5];
+
+ op[0] = operands[0];
+ op[1] = operands[1];
+ op[2] = operands[2];
+ op[3] = operands[3];
+
+ if (plen)
+ *plen = 0;
+
+ if (CONST_INT_P (operands[2]))
+ {
+ bool scratch = (GET_CODE (PATTERN (insn)) == PARALLEL
+ && REG_P (operands[3]));
+ int count = INTVAL (operands[2]);
+ int max_len = 10; /* If larger than this, always use a loop. */
+
+ if (count <= 0)
+ return;
+
+ if (count < 8 && !scratch)
+ use_zero_reg = true;
+
+ if (optimize_size)
+ max_len = t_len + (scratch ? 3 : (use_zero_reg ? 4 : 5));
+
+ if (t_len * count <= max_len)
+ {
+ /* Output shifts inline with no loop - faster. */
+
+ while (count-- > 0)
+ avr_asm_len (templ, op, plen, t_len);
+
+ return;
+ }
+
+ if (scratch)
+ {
+ avr_asm_len ("ldi %3,%2", op, plen, 1);
+ }
+ else if (use_zero_reg)
+ {
+ /* Hack to save one word: use __zero_reg__ as loop counter.
+ Set one bit, then shift in a loop until it is 0 again. */
+
+ op[3] = zero_reg_rtx;
+
+ avr_asm_len ("set" CR_TAB
+ "bld %3,%2-1", op, plen, 2);
+ }
+ else
+ {
+ /* No scratch register available, use one from LD_REGS (saved in
+ __tmp_reg__) that doesn't overlap with registers to shift. */
+
+ op[3] = all_regs_rtx[((REGNO (op[0]) - 1) & 15) + 16];
+ op[4] = tmp_reg_rtx;
+ saved_in_tmp = true;
+
+ avr_asm_len ("mov %4,%3" CR_TAB
+ "ldi %3,%2", op, plen, 2);
+ }
+
+ second_label = false;
+ }
+ else if (MEM_P (op[2]))
+ {
+ rtx op_mov[2];
+
+ op_mov[0] = op[3] = tmp_reg_rtx;
+ op_mov[1] = op[2];
+
+ out_movqi_r_mr (insn, op_mov, plen);
+ }
+ else if (register_operand (op[2], QImode))
+ {
+ op[3] = op[2];
+
+ if (!reg_unused_after (insn, op[2])
+ || reg_overlap_mentioned_p (op[0], op[2]))
+ {
+ op[3] = tmp_reg_rtx;
+ avr_asm_len ("mov %3,%2", op, plen, 1);
+ }
+ }
+ else
+ fatal_insn ("bad shift insn:", insn);
+
+ if (second_label)
+ avr_asm_len ("rjmp 2f", op, plen, 1);
+
+ avr_asm_len ("1:", op, plen, 0);
+ avr_asm_len (templ, op, plen, t_len);
+
+ if (second_label)
+ avr_asm_len ("2:", op, plen, 0);
+
+ avr_asm_len (use_zero_reg ? "lsr %3" : "dec %3", op, plen, 1);
+ avr_asm_len (second_label ? "brpl 1b" : "brne 1b", op, plen, 1);
+
+ if (saved_in_tmp)
+ avr_asm_len ("mov %3,%4", op, plen, 1);
+}
+
+
+/* 8bit shift left ((char)x << i) */
+
+const char *
+ashlqi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 8)
+ break;
+
+ *len = 1;
+ return "clr %0";
+
+ case 1:
+ *len = 1;
+ return "lsl %0";
+
+ case 2:
+ *len = 2;
+ return ("lsl %0" CR_TAB
+ "lsl %0");
+
+ case 3:
+ *len = 3;
+ return ("lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0");
+
+ case 4:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 2;
+ return ("swap %0" CR_TAB
+ "andi %0,0xf0");
+ }
+ *len = 4;
+ return ("lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0");
+
+ case 5:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 3;
+ return ("swap %0" CR_TAB
+ "lsl %0" CR_TAB
+ "andi %0,0xe0");
+ }
+ *len = 5;
+ return ("lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0");
+
+ case 6:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 4;
+ return ("swap %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "andi %0,0xc0");
+ }
+ *len = 6;
+ return ("lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0" CR_TAB
+ "lsl %0");
+
+ case 7:
+ *len = 3;
+ return ("ror %0" CR_TAB
+ "clr %0" CR_TAB
+ "ror %0");
+ }
+ }
+ else if (CONSTANT_P (operands[2]))
+ fatal_insn ("internal compiler error. Incorrect shift:", insn);
+
+ out_shift_with_cnt ("lsl %0",
+ insn, operands, len, 1);
+ return "";
+}
+
+
+/* 16bit shift left ((short)x << i) */
+
+const char *
+ashlhi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int scratch = (GET_CODE (PATTERN (insn)) == PARALLEL);
+ int ldi_ok = test_hard_reg_class (LD_REGS, operands[0]);
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 16)
+ break;
+
+ *len = 2;
+ return ("clr %B0" CR_TAB
+ "clr %A0");
+
+ case 4:
+ if (optimize_size && scratch)
+ break; /* 5 */
+ if (ldi_ok)
+ {
+ *len = 6;
+ return ("swap %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "andi %B0,0xf0" CR_TAB
+ "eor %B0,%A0" CR_TAB
+ "andi %A0,0xf0" CR_TAB
+ "eor %B0,%A0");
+ }
+ if (scratch)
+ {
+ *len = 7;
+ return ("swap %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "ldi %3,0xf0" CR_TAB
+ "and %B0,%3" CR_TAB
+ "eor %B0,%A0" CR_TAB
+ "and %A0,%3" CR_TAB
+ "eor %B0,%A0");
+ }
+ break; /* optimize_size ? 6 : 8 */
+
+ case 5:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ if (ldi_ok)
+ {
+ *len = 8;
+ return ("lsl %A0" CR_TAB
+ "rol %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "andi %B0,0xf0" CR_TAB
+ "eor %B0,%A0" CR_TAB
+ "andi %A0,0xf0" CR_TAB
+ "eor %B0,%A0");
+ }
+ if (scratch)
+ {
+ *len = 9;
+ return ("lsl %A0" CR_TAB
+ "rol %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "ldi %3,0xf0" CR_TAB
+ "and %B0,%3" CR_TAB
+ "eor %B0,%A0" CR_TAB
+ "and %A0,%3" CR_TAB
+ "eor %B0,%A0");
+ }
+ break; /* 10 */
+
+ case 6:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ *len = 9;
+ return ("clr __tmp_reg__" CR_TAB
+ "lsr %B0" CR_TAB
+ "ror %A0" CR_TAB
+ "ror __tmp_reg__" CR_TAB
+ "lsr %B0" CR_TAB
+ "ror %A0" CR_TAB
+ "ror __tmp_reg__" CR_TAB
+ "mov %B0,%A0" CR_TAB
+ "mov %A0,__tmp_reg__");
+
+ case 7:
+ *len = 5;
+ return ("lsr %B0" CR_TAB
+ "mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "ror %B0" CR_TAB
+ "ror %A0");
+
+ case 8:
+ return *len = 2, ("mov %B0,%A1" CR_TAB
+ "clr %A0");
+
+ case 9:
+ *len = 3;
+ return ("mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "lsl %B0");
+
+ case 10:
+ *len = 4;
+ return ("mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0");
+
+ case 11:
+ *len = 5;
+ return ("mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0");
+
+ case 12:
+ if (ldi_ok)
+ {
+ *len = 4;
+ return ("mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "andi %B0,0xf0");
+ }
+ if (scratch)
+ {
+ *len = 5;
+ return ("mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "ldi %3,0xf0" CR_TAB
+ "and %B0,%3");
+ }
+ *len = 6;
+ return ("mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0");
+
+ case 13:
+ if (ldi_ok)
+ {
+ *len = 5;
+ return ("mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "andi %B0,0xe0");
+ }
+ if (AVR_HAVE_MUL && scratch)
+ {
+ *len = 5;
+ return ("ldi %3,0x20" CR_TAB
+ "mul %A0,%3" CR_TAB
+ "mov %B0,r0" CR_TAB
+ "clr %A0" CR_TAB
+ "clr __zero_reg__");
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ if (scratch)
+ {
+ *len = 6;
+ return ("mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "ldi %3,0xe0" CR_TAB
+ "and %B0,%3");
+ }
+ if (AVR_HAVE_MUL)
+ {
+ *len = 6;
+ return ("set" CR_TAB
+ "bld r1,5" CR_TAB
+ "mul %A0,r1" CR_TAB
+ "mov %B0,r0" CR_TAB
+ "clr %A0" CR_TAB
+ "clr __zero_reg__");
+ }
+ *len = 7;
+ return ("mov %B0,%A0" CR_TAB
+ "clr %A0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "lsl %B0");
+
+ case 14:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return ("ldi %B0,0x40" CR_TAB
+ "mul %A0,%B0" CR_TAB
+ "mov %B0,r0" CR_TAB
+ "clr %A0" CR_TAB
+ "clr __zero_reg__");
+ }
+ if (AVR_HAVE_MUL && scratch)
+ {
+ *len = 5;
+ return ("ldi %3,0x40" CR_TAB
+ "mul %A0,%3" CR_TAB
+ "mov %B0,r0" CR_TAB
+ "clr %A0" CR_TAB
+ "clr __zero_reg__");
+ }
+ if (optimize_size && ldi_ok)
+ {
+ *len = 5;
+ return ("mov %B0,%A0" CR_TAB
+ "ldi %A0,6" "\n1:\t"
+ "lsl %B0" CR_TAB
+ "dec %A0" CR_TAB
+ "brne 1b");
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ *len = 6;
+ return ("clr %B0" CR_TAB
+ "lsr %A0" CR_TAB
+ "ror %B0" CR_TAB
+ "lsr %A0" CR_TAB
+ "ror %B0" CR_TAB
+ "clr %A0");
+
+ case 15:
+ *len = 4;
+ return ("clr %B0" CR_TAB
+ "lsr %A0" CR_TAB
+ "ror %B0" CR_TAB
+ "clr %A0");
+ }
+ len = t;
+ }
+ out_shift_with_cnt ("lsl %A0" CR_TAB
+ "rol %B0", insn, operands, len, 2);
+ return "";
+}
+
+
+/* 24-bit shift left */
+
+const char*
+avr_out_ashlpsi3 (rtx insn, rtx *op, int *plen)
+{
+ if (plen)
+ *plen = 0;
+
+ if (CONST_INT_P (op[2]))
+ {
+ switch (INTVAL (op[2]))
+ {
+ default:
+ if (INTVAL (op[2]) < 24)
+ break;
+
+ return avr_asm_len ("clr %A0" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %C0", op, plen, 3);
+
+ case 8:
+ {
+ int reg0 = REGNO (op[0]);
+ int reg1 = REGNO (op[1]);
+
+ if (reg0 >= reg1)
+ return avr_asm_len ("mov %C0,%B1" CR_TAB
+ "mov %B0,%A1" CR_TAB
+ "clr %A0", op, plen, 3);
+ else
+ return avr_asm_len ("clr %A0" CR_TAB
+ "mov %B0,%A1" CR_TAB
+ "mov %C0,%B1", op, plen, 3);
+ }
+
+ case 16:
+ {
+ int reg0 = REGNO (op[0]);
+ int reg1 = REGNO (op[1]);
+
+ if (reg0 + 2 != reg1)
+ avr_asm_len ("mov %C0,%A0", op, plen, 1);
+
+ return avr_asm_len ("clr %B0" CR_TAB
+ "clr %A0", op, plen, 2);
+ }
+
+ case 23:
+ return avr_asm_len ("clr %C0" CR_TAB
+ "lsr %A0" CR_TAB
+ "ror %C0" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %A0", op, plen, 5);
+ }
+ }
+
+ out_shift_with_cnt ("lsl %A0" CR_TAB
+ "rol %B0" CR_TAB
+ "rol %C0", insn, op, plen, 3);
+ return "";
+}
+
+
+/* 32bit shift left ((long)x << i) */
+
+const char *
+ashlsi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 32)
+ break;
+
+ if (AVR_HAVE_MOVW)
+ return *len = 3, ("clr %D0" CR_TAB
+ "clr %C0" CR_TAB
+ "movw %A0,%C0");
+ *len = 4;
+ return ("clr %D0" CR_TAB
+ "clr %C0" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %A0");
+
+ case 8:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+ *len = 4;
+ if (reg0 >= reg1)
+ return ("mov %D0,%C1" CR_TAB
+ "mov %C0,%B1" CR_TAB
+ "mov %B0,%A1" CR_TAB
+ "clr %A0");
+ else
+ return ("clr %A0" CR_TAB
+ "mov %B0,%A1" CR_TAB
+ "mov %C0,%B1" CR_TAB
+ "mov %D0,%C1");
+ }
+
+ case 16:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+ if (reg0 + 2 == reg1)
+ return *len = 2, ("clr %B0" CR_TAB
+ "clr %A0");
+ if (AVR_HAVE_MOVW)
+ return *len = 3, ("movw %C0,%A1" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %A0");
+ else
+ return *len = 4, ("mov %C0,%A1" CR_TAB
+ "mov %D0,%B1" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %A0");
+ }
+
+ case 24:
+ *len = 4;
+ return ("mov %D0,%A1" CR_TAB
+ "clr %C0" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %A0");
+
+ case 31:
+ *len = 6;
+ return ("clr %D0" CR_TAB
+ "lsr %A0" CR_TAB
+ "ror %D0" CR_TAB
+ "clr %C0" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %A0");
+ }
+ len = t;
+ }
+ out_shift_with_cnt ("lsl %A0" CR_TAB
+ "rol %B0" CR_TAB
+ "rol %C0" CR_TAB
+ "rol %D0", insn, operands, len, 4);
+ return "";
+}
+
+/* 8bit arithmetic shift right ((signed char)x >> i) */
+
+const char *
+ashrqi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ case 1:
+ *len = 1;
+ return "asr %0";
+
+ case 2:
+ *len = 2;
+ return ("asr %0" CR_TAB
+ "asr %0");
+
+ case 3:
+ *len = 3;
+ return ("asr %0" CR_TAB
+ "asr %0" CR_TAB
+ "asr %0");
+
+ case 4:
+ *len = 4;
+ return ("asr %0" CR_TAB
+ "asr %0" CR_TAB
+ "asr %0" CR_TAB
+ "asr %0");
+
+ case 5:
+ *len = 5;
+ return ("asr %0" CR_TAB
+ "asr %0" CR_TAB
+ "asr %0" CR_TAB
+ "asr %0" CR_TAB
+ "asr %0");
+
+ case 6:
+ *len = 4;
+ return ("bst %0,6" CR_TAB
+ "lsl %0" CR_TAB
+ "sbc %0,%0" CR_TAB
+ "bld %0,0");
+
+ default:
+ if (INTVAL (operands[2]) < 8)
+ break;
+
+ /* fall through */
+
+ case 7:
+ *len = 2;
+ return ("lsl %0" CR_TAB
+ "sbc %0,%0");
+ }
+ }
+ else if (CONSTANT_P (operands[2]))
+ fatal_insn ("internal compiler error. Incorrect shift:", insn);
+
+ out_shift_with_cnt ("asr %0",
+ insn, operands, len, 1);
+ return "";
+}
+
+
+/* 16bit arithmetic shift right ((signed short)x >> i) */
+
+const char *
+ashrhi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int scratch = (GET_CODE (PATTERN (insn)) == PARALLEL);
+ int ldi_ok = test_hard_reg_class (LD_REGS, operands[0]);
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ case 4:
+ case 5:
+ /* XXX try to optimize this too? */
+ break;
+
+ case 6:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ *len = 8;
+ return ("mov __tmp_reg__,%A0" CR_TAB
+ "mov %A0,%B0" CR_TAB
+ "lsl __tmp_reg__" CR_TAB
+ "rol %A0" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "lsl __tmp_reg__" CR_TAB
+ "rol %A0" CR_TAB
+ "rol %B0");
+
+ case 7:
+ *len = 4;
+ return ("lsl %A0" CR_TAB
+ "mov %A0,%B0" CR_TAB
+ "rol %A0" CR_TAB
+ "sbc %B0,%B0");
+
+ case 8:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+
+ if (reg0 == reg1)
+ return *len = 3, ("mov %A0,%B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "sbc %B0,%B0");
+ else
+ return *len = 4, ("mov %A0,%B1" CR_TAB
+ "clr %B0" CR_TAB
+ "sbrc %A0,7" CR_TAB
+ "dec %B0");
+ }
+
+ case 9:
+ *len = 4;
+ return ("mov %A0,%B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "asr %A0");
+
+ case 10:
+ *len = 5;
+ return ("mov %A0,%B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0");
+
+ case 11:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return ("ldi %A0,0x20" CR_TAB
+ "muls %B0,%A0" CR_TAB
+ "mov %A0,r1" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "clr __zero_reg__");
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ *len = 6;
+ return ("mov %A0,%B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0");
+
+ case 12:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return ("ldi %A0,0x10" CR_TAB
+ "muls %B0,%A0" CR_TAB
+ "mov %A0,r1" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "clr __zero_reg__");
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ *len = 7;
+ return ("mov %A0,%B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0");
+
+ case 13:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return ("ldi %A0,0x08" CR_TAB
+ "muls %B0,%A0" CR_TAB
+ "mov %A0,r1" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "clr __zero_reg__");
+ }
+ if (optimize_size)
+ break; /* scratch ? 5 : 7 */
+ *len = 8;
+ return ("mov %A0,%B0" CR_TAB
+ "lsl %B0" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0" CR_TAB
+ "asr %A0");
+
+ case 14:
+ *len = 5;
+ return ("lsl %B0" CR_TAB
+ "sbc %A0,%A0" CR_TAB
+ "lsl %B0" CR_TAB
+ "mov %B0,%A0" CR_TAB
+ "rol %A0");
+
+ default:
+ if (INTVAL (operands[2]) < 16)
+ break;
+
+ /* fall through */
+
+ case 15:
+ return *len = 3, ("lsl %B0" CR_TAB
+ "sbc %A0,%A0" CR_TAB
+ "mov %B0,%A0");
+ }
+ len = t;
+ }
+ out_shift_with_cnt ("asr %B0" CR_TAB
+ "ror %A0", insn, operands, len, 2);
+ return "";
+}
+
+
+/* 24-bit arithmetic shift right */
+
+const char*
+avr_out_ashrpsi3 (rtx insn, rtx *op, int *plen)
+{
+ int dest = REGNO (op[0]);
+ int src = REGNO (op[1]);
+
+ if (CONST_INT_P (op[2]))
+ {
+ if (plen)
+ *plen = 0;
+
+ switch (INTVAL (op[2]))
+ {
+ case 8:
+ if (dest <= src)
+ return avr_asm_len ("mov %A0,%B1" CR_TAB
+ "mov %B0,%C1" CR_TAB
+ "clr %C0" CR_TAB
+ "sbrc %B0,7" CR_TAB
+ "dec %C0", op, plen, 5);
+ else
+ return avr_asm_len ("clr %C0" CR_TAB
+ "sbrc %C1,7" CR_TAB
+ "dec %C0" CR_TAB
+ "mov %B0,%C1" CR_TAB
+ "mov %A0,%B1", op, plen, 5);
+
+ case 16:
+ if (dest != src + 2)
+ avr_asm_len ("mov %A0,%C1", op, plen, 1);
+
+ return avr_asm_len ("clr %B0" CR_TAB
+ "sbrc %A0,7" CR_TAB
+ "com %B0" CR_TAB
+ "mov %C0,%B0", op, plen, 4);
+
+ default:
+ if (INTVAL (op[2]) < 24)
+ break;
+
+ /* fall through */
+
+ case 23:
+ return avr_asm_len ("lsl %C0" CR_TAB
+ "sbc %A0,%A0" CR_TAB
+ "mov %B0,%A0" CR_TAB
+ "mov %C0,%A0", op, plen, 4);
+ } /* switch */
+ }
+
+ out_shift_with_cnt ("asr %C0" CR_TAB
+ "ror %B0" CR_TAB
+ "ror %A0", insn, op, plen, 3);
+ return "";
+}
+
+
+/* 32-bit arithmetic shift right ((signed long)x >> i) */
+
+const char *
+ashrsi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ case 8:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+ *len=6;
+ if (reg0 <= reg1)
+ return ("mov %A0,%B1" CR_TAB
+ "mov %B0,%C1" CR_TAB
+ "mov %C0,%D1" CR_TAB
+ "clr %D0" CR_TAB
+ "sbrc %C0,7" CR_TAB
+ "dec %D0");
+ else
+ return ("clr %D0" CR_TAB
+ "sbrc %D1,7" CR_TAB
+ "dec %D0" CR_TAB
+ "mov %C0,%D1" CR_TAB
+ "mov %B0,%C1" CR_TAB
+ "mov %A0,%B1");
+ }
+
+ case 16:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+
+ if (reg0 == reg1 + 2)
+ return *len = 4, ("clr %D0" CR_TAB
+ "sbrc %B0,7" CR_TAB
+ "com %D0" CR_TAB
+ "mov %C0,%D0");
+ if (AVR_HAVE_MOVW)
+ return *len = 5, ("movw %A0,%C1" CR_TAB
+ "clr %D0" CR_TAB
+ "sbrc %B0,7" CR_TAB
+ "com %D0" CR_TAB
+ "mov %C0,%D0");
+ else
+ return *len = 6, ("mov %B0,%D1" CR_TAB
+ "mov %A0,%C1" CR_TAB
+ "clr %D0" CR_TAB
+ "sbrc %B0,7" CR_TAB
+ "com %D0" CR_TAB
+ "mov %C0,%D0");
+ }
+
+ case 24:
+ return *len = 6, ("mov %A0,%D1" CR_TAB
+ "clr %D0" CR_TAB
+ "sbrc %A0,7" CR_TAB
+ "com %D0" CR_TAB
+ "mov %B0,%D0" CR_TAB
+ "mov %C0,%D0");
+
+ default:
+ if (INTVAL (operands[2]) < 32)
+ break;
+
+ /* fall through */
+
+ case 31:
+ if (AVR_HAVE_MOVW)
+ return *len = 4, ("lsl %D0" CR_TAB
+ "sbc %A0,%A0" CR_TAB
+ "mov %B0,%A0" CR_TAB
+ "movw %C0,%A0");
+ else
+ return *len = 5, ("lsl %D0" CR_TAB
+ "sbc %A0,%A0" CR_TAB
+ "mov %B0,%A0" CR_TAB
+ "mov %C0,%A0" CR_TAB
+ "mov %D0,%A0");
+ }
+ len = t;
+ }
+ out_shift_with_cnt ("asr %D0" CR_TAB
+ "ror %C0" CR_TAB
+ "ror %B0" CR_TAB
+ "ror %A0", insn, operands, len, 4);
+ return "";
+}
+
+/* 8-bit logic shift right ((unsigned char)x >> i) */
+
+const char *
+lshrqi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 8)
+ break;
+
+ *len = 1;
+ return "clr %0";
+
+ case 1:
+ *len = 1;
+ return "lsr %0";
+
+ case 2:
+ *len = 2;
+ return ("lsr %0" CR_TAB
+ "lsr %0");
+ case 3:
+ *len = 3;
+ return ("lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0");
+
+ case 4:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len=2;
+ return ("swap %0" CR_TAB
+ "andi %0,0x0f");
+ }
+ *len = 4;
+ return ("lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0");
+
+ case 5:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 3;
+ return ("swap %0" CR_TAB
+ "lsr %0" CR_TAB
+ "andi %0,0x7");
+ }
+ *len = 5;
+ return ("lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0");
+
+ case 6:
+ if (test_hard_reg_class (LD_REGS, operands[0]))
+ {
+ *len = 4;
+ return ("swap %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "andi %0,0x3");
+ }
+ *len = 6;
+ return ("lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0" CR_TAB
+ "lsr %0");
+
+ case 7:
+ *len = 3;
+ return ("rol %0" CR_TAB
+ "clr %0" CR_TAB
+ "rol %0");
+ }
+ }
+ else if (CONSTANT_P (operands[2]))
+ fatal_insn ("internal compiler error. Incorrect shift:", insn);
+
+ out_shift_with_cnt ("lsr %0",
+ insn, operands, len, 1);
+ return "";
+}
+
+/* 16-bit logic shift right ((unsigned short)x >> i) */
+
+const char *
+lshrhi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int scratch = (GET_CODE (PATTERN (insn)) == PARALLEL);
+ int ldi_ok = test_hard_reg_class (LD_REGS, operands[0]);
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 16)
+ break;
+
+ *len = 2;
+ return ("clr %B0" CR_TAB
+ "clr %A0");
+
+ case 4:
+ if (optimize_size && scratch)
+ break; /* 5 */
+ if (ldi_ok)
+ {
+ *len = 6;
+ return ("swap %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "andi %A0,0x0f" CR_TAB
+ "eor %A0,%B0" CR_TAB
+ "andi %B0,0x0f" CR_TAB
+ "eor %A0,%B0");
+ }
+ if (scratch)
+ {
+ *len = 7;
+ return ("swap %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "ldi %3,0x0f" CR_TAB
+ "and %A0,%3" CR_TAB
+ "eor %A0,%B0" CR_TAB
+ "and %B0,%3" CR_TAB
+ "eor %A0,%B0");
+ }
+ break; /* optimize_size ? 6 : 8 */
+
+ case 5:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ if (ldi_ok)
+ {
+ *len = 8;
+ return ("lsr %B0" CR_TAB
+ "ror %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "andi %A0,0x0f" CR_TAB
+ "eor %A0,%B0" CR_TAB
+ "andi %B0,0x0f" CR_TAB
+ "eor %A0,%B0");
+ }
+ if (scratch)
+ {
+ *len = 9;
+ return ("lsr %B0" CR_TAB
+ "ror %A0" CR_TAB
+ "swap %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "ldi %3,0x0f" CR_TAB
+ "and %A0,%3" CR_TAB
+ "eor %A0,%B0" CR_TAB
+ "and %B0,%3" CR_TAB
+ "eor %A0,%B0");
+ }
+ break; /* 10 */
+
+ case 6:
+ if (optimize_size)
+ break; /* scratch ? 5 : 6 */
+ *len = 9;
+ return ("clr __tmp_reg__" CR_TAB
+ "lsl %A0" CR_TAB
+ "rol %B0" CR_TAB
+ "rol __tmp_reg__" CR_TAB
+ "lsl %A0" CR_TAB
+ "rol %B0" CR_TAB
+ "rol __tmp_reg__" CR_TAB
+ "mov %A0,%B0" CR_TAB
+ "mov %B0,__tmp_reg__");
+
+ case 7:
+ *len = 5;
+ return ("lsl %A0" CR_TAB
+ "mov %A0,%B0" CR_TAB
+ "rol %A0" CR_TAB
+ "sbc %B0,%B0" CR_TAB
+ "neg %B0");
+
+ case 8:
+ return *len = 2, ("mov %A0,%B1" CR_TAB
+ "clr %B0");
+
+ case 9:
+ *len = 3;
+ return ("mov %A0,%B0" CR_TAB
+ "clr %B0" CR_TAB
+ "lsr %A0");
+
+ case 10:
+ *len = 4;
+ return ("mov %A0,%B0" CR_TAB
+ "clr %B0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0");
+
+ case 11:
+ *len = 5;
+ return ("mov %A0,%B0" CR_TAB
+ "clr %B0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0");
+
+ case 12:
+ if (ldi_ok)
+ {
+ *len = 4;
+ return ("mov %A0,%B0" CR_TAB
+ "clr %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "andi %A0,0x0f");
+ }
+ if (scratch)
+ {
+ *len = 5;
+ return ("mov %A0,%B0" CR_TAB
+ "clr %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "ldi %3,0x0f" CR_TAB
+ "and %A0,%3");
+ }
+ *len = 6;
+ return ("mov %A0,%B0" CR_TAB
+ "clr %B0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0");
+
+ case 13:
+ if (ldi_ok)
+ {
+ *len = 5;
+ return ("mov %A0,%B0" CR_TAB
+ "clr %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "lsr %A0" CR_TAB
+ "andi %A0,0x07");
+ }
+ if (AVR_HAVE_MUL && scratch)
+ {
+ *len = 5;
+ return ("ldi %3,0x08" CR_TAB
+ "mul %B0,%3" CR_TAB
+ "mov %A0,r1" CR_TAB
+ "clr %B0" CR_TAB
+ "clr __zero_reg__");
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ if (scratch)
+ {
+ *len = 6;
+ return ("mov %A0,%B0" CR_TAB
+ "clr %B0" CR_TAB
+ "swap %A0" CR_TAB
+ "lsr %A0" CR_TAB
+ "ldi %3,0x07" CR_TAB
+ "and %A0,%3");
+ }
+ if (AVR_HAVE_MUL)
+ {
+ *len = 6;
+ return ("set" CR_TAB
+ "bld r1,3" CR_TAB
+ "mul %B0,r1" CR_TAB
+ "mov %A0,r1" CR_TAB
+ "clr %B0" CR_TAB
+ "clr __zero_reg__");
+ }
+ *len = 7;
+ return ("mov %A0,%B0" CR_TAB
+ "clr %B0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0" CR_TAB
+ "lsr %A0");
+
+ case 14:
+ if (AVR_HAVE_MUL && ldi_ok)
+ {
+ *len = 5;
+ return ("ldi %A0,0x04" CR_TAB
+ "mul %B0,%A0" CR_TAB
+ "mov %A0,r1" CR_TAB
+ "clr %B0" CR_TAB
+ "clr __zero_reg__");
+ }
+ if (AVR_HAVE_MUL && scratch)
+ {
+ *len = 5;
+ return ("ldi %3,0x04" CR_TAB
+ "mul %B0,%3" CR_TAB
+ "mov %A0,r1" CR_TAB
+ "clr %B0" CR_TAB
+ "clr __zero_reg__");
+ }
+ if (optimize_size && ldi_ok)
+ {
+ *len = 5;
+ return ("mov %A0,%B0" CR_TAB
+ "ldi %B0,6" "\n1:\t"
+ "lsr %A0" CR_TAB
+ "dec %B0" CR_TAB
+ "brne 1b");
+ }
+ if (optimize_size && scratch)
+ break; /* 5 */
+ *len = 6;
+ return ("clr %A0" CR_TAB
+ "lsl %B0" CR_TAB
+ "rol %A0" CR_TAB
+ "lsl %B0" CR_TAB
+ "rol %A0" CR_TAB
+ "clr %B0");
+
+ case 15:
+ *len = 4;
+ return ("clr %A0" CR_TAB
+ "lsl %B0" CR_TAB
+ "rol %A0" CR_TAB
+ "clr %B0");
+ }
+ len = t;
+ }
+ out_shift_with_cnt ("lsr %B0" CR_TAB
+ "ror %A0", insn, operands, len, 2);
+ return "";
+}
+
+
+/* 24-bit logic shift right */
+
+const char*
+avr_out_lshrpsi3 (rtx insn, rtx *op, int *plen)
+{
+ int dest = REGNO (op[0]);
+ int src = REGNO (op[1]);
+
+ if (CONST_INT_P (op[2]))
+ {
+ if (plen)
+ *plen = 0;
+
+ switch (INTVAL (op[2]))
+ {
+ case 8:
+ if (dest <= src)
+ return avr_asm_len ("mov %A0,%B1" CR_TAB
+ "mov %B0,%C1" CR_TAB
+ "clr %C0", op, plen, 3);
+ else
+ return avr_asm_len ("clr %C0" CR_TAB
+ "mov %B0,%C1" CR_TAB
+ "mov %A0,%B1", op, plen, 3);
+
+ case 16:
+ if (dest != src + 2)
+ avr_asm_len ("mov %A0,%C1", op, plen, 1);
+
+ return avr_asm_len ("clr %B0" CR_TAB
+ "clr %C0", op, plen, 2);
+
+ default:
+ if (INTVAL (op[2]) < 24)
+ break;
+
+ /* fall through */
+
+ case 23:
+ return avr_asm_len ("clr %A0" CR_TAB
+ "sbrc %C0,7" CR_TAB
+ "inc %A0" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %C0", op, plen, 5);
+ } /* switch */
+ }
+
+ out_shift_with_cnt ("lsr %C0" CR_TAB
+ "ror %B0" CR_TAB
+ "ror %A0", insn, op, plen, 3);
+ return "";
+}
+
+
+/* 32-bit logic shift right ((unsigned int)x >> i) */
+
+const char *
+lshrsi3_out (rtx insn, rtx operands[], int *len)
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ int k;
+ int *t = len;
+
+ if (!len)
+ len = &k;
+
+ switch (INTVAL (operands[2]))
+ {
+ default:
+ if (INTVAL (operands[2]) < 32)
+ break;
+
+ if (AVR_HAVE_MOVW)
+ return *len = 3, ("clr %D0" CR_TAB
+ "clr %C0" CR_TAB
+ "movw %A0,%C0");
+ *len = 4;
+ return ("clr %D0" CR_TAB
+ "clr %C0" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %A0");
+
+ case 8:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+ *len = 4;
+ if (reg0 <= reg1)
+ return ("mov %A0,%B1" CR_TAB
+ "mov %B0,%C1" CR_TAB
+ "mov %C0,%D1" CR_TAB
+ "clr %D0");
+ else
+ return ("clr %D0" CR_TAB
+ "mov %C0,%D1" CR_TAB
+ "mov %B0,%C1" CR_TAB
+ "mov %A0,%B1");
+ }
+
+ case 16:
+ {
+ int reg0 = true_regnum (operands[0]);
+ int reg1 = true_regnum (operands[1]);
+
+ if (reg0 == reg1 + 2)
+ return *len = 2, ("clr %C0" CR_TAB
+ "clr %D0");
+ if (AVR_HAVE_MOVW)
+ return *len = 3, ("movw %A0,%C1" CR_TAB
+ "clr %C0" CR_TAB
+ "clr %D0");
+ else
+ return *len = 4, ("mov %B0,%D1" CR_TAB
+ "mov %A0,%C1" CR_TAB
+ "clr %C0" CR_TAB
+ "clr %D0");
+ }
+
+ case 24:
+ return *len = 4, ("mov %A0,%D1" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %C0" CR_TAB
+ "clr %D0");
+
+ case 31:
+ *len = 6;
+ return ("clr %A0" CR_TAB
+ "sbrc %D0,7" CR_TAB
+ "inc %A0" CR_TAB
+ "clr %B0" CR_TAB
+ "clr %C0" CR_TAB
+ "clr %D0");
+ }
+ len = t;
+ }
+ out_shift_with_cnt ("lsr %D0" CR_TAB
+ "ror %C0" CR_TAB
+ "ror %B0" CR_TAB
+ "ror %A0", insn, operands, len, 4);
+ return "";
+}
+
+
+/* Output addition of register XOP[0] and compile time constant XOP[2].
+ CODE == PLUS: perform addition by using ADD instructions or
+ CODE == MINUS: perform addition by using SUB instructions:
+
+ XOP[0] = XOP[0] + XOP[2]
+
+ Or perform addition/subtraction with register XOP[2] depending on CODE:
+
+ XOP[0] = XOP[0] +/- XOP[2]
+
+ If PLEN == NULL, print assembler instructions to perform the operation;
+ otherwise, set *PLEN to the length of the instruction sequence (in words)
+ printed with PLEN == NULL. XOP[3] is an 8-bit scratch register or NULL_RTX.
+ Set *PCC to effect on cc0 according to respective CC_* insn attribute.
+
+ CODE_SAT == UNKNOWN: Perform ordinary, non-saturating operation.
+ CODE_SAT != UNKNOWN: Perform operation and saturate according to CODE_SAT.
+ If CODE_SAT != UNKNOWN then SIGN contains the sign of the summand resp.
+ the subtrahend in the original insn, provided it is a compile time constant.
+ In all other cases, SIGN is 0.
+
+ If OUT_LABEL is true, print the final 0: label which is needed for
+ saturated addition / subtraction. The only case where OUT_LABEL = false
+ is useful is for saturated addition / subtraction performed during
+ fixed-point rounding, cf. `avr_out_round'. */
+
+static void
+avr_out_plus_1 (rtx *xop, int *plen, enum rtx_code code, int *pcc,
+ enum rtx_code code_sat, int sign, bool out_label)
+{
+ /* MODE of the operation. */
+ enum machine_mode mode = GET_MODE (xop[0]);
+
+ /* INT_MODE of the same size. */
+ enum machine_mode imode = int_mode_for_mode (mode);
+
+ /* Number of bytes to operate on. */
+ int i, n_bytes = GET_MODE_SIZE (mode);
+
+ /* Value (0..0xff) held in clobber register op[3] or -1 if unknown. */
+ int clobber_val = -1;
+
+ /* op[0]: 8-bit destination register
+ op[1]: 8-bit const int
+ op[2]: 8-bit scratch register */
+ rtx op[3];
+
+ /* Started the operation? Before starting the operation we may skip
+ adding 0. This is no more true after the operation started because
+ carry must be taken into account. */
+ bool started = false;
+
+ /* Value to add. There are two ways to add VAL: R += VAL and R -= -VAL. */
+ rtx xval = xop[2];
+
+ /* Output a BRVC instruction. Only needed with saturation. */
+ bool out_brvc = true;
+
+ if (plen)
+ *plen = 0;
+
+ if (REG_P (xop[2]))
+ {
+ *pcc = MINUS == code ? (int) CC_SET_CZN : (int) CC_SET_N;
+
+ for (i = 0; i < n_bytes; i++)
+ {
+ /* We operate byte-wise on the destination. */
+ op[0] = simplify_gen_subreg (QImode, xop[0], mode, i);
+ op[1] = simplify_gen_subreg (QImode, xop[2], mode, i);
+
+ if (i == 0)
+ avr_asm_len (code == PLUS ? "add %0,%1" : "sub %0,%1",
+ op, plen, 1);
+ else
+ avr_asm_len (code == PLUS ? "adc %0,%1" : "sbc %0,%1",
+ op, plen, 1);
+ }
+
+ if (reg_overlap_mentioned_p (xop[0], xop[2]))
+ {
+ gcc_assert (REGNO (xop[0]) == REGNO (xop[2]));
+
+ if (MINUS == code)
+ return;
+ }
+
+ goto saturate;
+ }
+
+ /* Except in the case of ADIW with 16-bit register (see below)
+ addition does not set cc0 in a usable way. */
+
+ *pcc = (MINUS == code) ? CC_SET_CZN : CC_CLOBBER;
+
+ if (CONST_FIXED_P (xval))
+ xval = avr_to_int_mode (xval);
+
+ /* Adding/Subtracting zero is a no-op. */
+
+ if (xval == const0_rtx)
+ {
+ *pcc = CC_NONE;
+ return;
+ }
+
+ if (MINUS == code)
+ xval = simplify_unary_operation (NEG, imode, xval, imode);
+
+ op[2] = xop[3];
+
+ if (SS_PLUS == code_sat && MINUS == code
+ && sign < 0
+ && 0x80 == (INTVAL (simplify_gen_subreg (QImode, xval, imode, n_bytes-1))
+ & GET_MODE_MASK (QImode)))
+ {
+ /* We compute x + 0x80 by means of SUB instructions. We negated the
+ constant subtrahend above and are left with x - (-128) so that we
+ need something like SUBI r,128 which does not exist because SUBI sets
+ V according to the sign of the subtrahend. Notice the only case
+ where this must be done is when NEG overflowed in case [2s] because
+ the V computation needs the right sign of the subtrahend. */
+
+ rtx msb = simplify_gen_subreg (QImode, xop[0], mode, n_bytes-1);
+
+ avr_asm_len ("subi %0,128" CR_TAB
+ "brmi 0f", &msb, plen, 2);
+ out_brvc = false;
+
+ goto saturate;
+ }
+
+ for (i = 0; i < n_bytes; i++)
+ {
+ /* We operate byte-wise on the destination. */
+ rtx reg8 = simplify_gen_subreg (QImode, xop[0], mode, i);
+ rtx xval8 = simplify_gen_subreg (QImode, xval, imode, i);
+
+ /* 8-bit value to operate with this byte. */
+ unsigned int val8 = UINTVAL (xval8) & GET_MODE_MASK (QImode);
+
+ /* Registers R16..R31 can operate with immediate. */
+ bool ld_reg_p = test_hard_reg_class (LD_REGS, reg8);
+
+ op[0] = reg8;
+ op[1] = gen_int_mode (val8, QImode);
+
+ /* To get usable cc0 no low-bytes must have been skipped. */
+
+ if (i && !started)
+ *pcc = CC_CLOBBER;
+
+ if (!started
+ && i % 2 == 0
+ && i + 2 <= n_bytes
+ && test_hard_reg_class (ADDW_REGS, reg8))
+ {
+ rtx xval16 = simplify_gen_subreg (HImode, xval, imode, i);
+ unsigned int val16 = UINTVAL (xval16) & GET_MODE_MASK (HImode);
+
+ /* Registers R24, X, Y, Z can use ADIW/SBIW with constants < 64
+ i.e. operate word-wise. */
+
+ if (val16 < 64)
+ {
+ if (val16 != 0)
+ {
+ started = true;
+ avr_asm_len (code == PLUS ? "adiw %0,%1" : "sbiw %0,%1",
+ op, plen, 1);
+
+ if (n_bytes == 2 && PLUS == code)
+ *pcc = CC_SET_ZN;
+ }
+
+ i++;
+ continue;
+ }
+ }
+
+ if (val8 == 0)
+ {
+ if (started)
+ avr_asm_len (code == PLUS
+ ? "adc %0,__zero_reg__" : "sbc %0,__zero_reg__",
+ op, plen, 1);
+ continue;
+ }
+ else if ((val8 == 1 || val8 == 0xff)
+ && UNKNOWN == code_sat
+ && !started
+ && i == n_bytes - 1)
+ {
+ avr_asm_len ((code == PLUS) ^ (val8 == 1) ? "dec %0" : "inc %0",
+ op, plen, 1);
+ break;
+ }
+
+ switch (code)
+ {
+ case PLUS:
+
+ gcc_assert (plen != NULL || (op[2] && REG_P (op[2])));
+
+ if (plen != NULL && UNKNOWN != code_sat)
+ {
+ /* This belongs to the x + 0x80 corner case. The code with
+ ADD instruction is not smaller, thus make this case
+ expensive so that the caller won't pick it. */
+
+ *plen += 10;
+ break;
+ }
+
+ if (clobber_val != (int) val8)
+ avr_asm_len ("ldi %2,%1", op, plen, 1);
+ clobber_val = (int) val8;
+
+ avr_asm_len (started ? "adc %0,%2" : "add %0,%2", op, plen, 1);
+
+ break; /* PLUS */
+
+ case MINUS:
+
+ if (ld_reg_p)
+ avr_asm_len (started ? "sbci %0,%1" : "subi %0,%1", op, plen, 1);
+ else
+ {
+ gcc_assert (plen != NULL || REG_P (op[2]));
+
+ if (clobber_val != (int) val8)
+ avr_asm_len ("ldi %2,%1", op, plen, 1);
+ clobber_val = (int) val8;
+
+ avr_asm_len (started ? "sbc %0,%2" : "sub %0,%2", op, plen, 1);
+ }
+
+ break; /* MINUS */
+
+ default:
+ /* Unknown code */
+ gcc_unreachable();
+ }
+
+ started = true;
+
+ } /* for all sub-bytes */
+
+ saturate:
+
+ if (UNKNOWN == code_sat)
+ return;
+
+ *pcc = (int) CC_CLOBBER;
+
+ /* Vanilla addition/subtraction is done. We are left with saturation.
+
+ We have to compute A = A <op> B where A is a register and
+ B is a register or a non-zero compile time constant CONST.
+ A is register class "r" if unsigned && B is REG. Otherwise, A is in "d".
+ B stands for the original operand $2 in INSN. In the case of B = CONST,
+ SIGN in { -1, 1 } is the sign of B. Otherwise, SIGN is 0.
+
+ CODE is the instruction flavor we use in the asm sequence to perform <op>.
+
+
+ unsigned
+ operation | code | sat if | b is | sat value | case
+ -----------------+-------+----------+--------------+-----------+-------
+ + as a + b | add | C == 1 | const, reg | u+ = 0xff | [1u]
+ + as a - (-b) | sub | C == 0 | const | u+ = 0xff | [2u]
+ - as a - b | sub | C == 1 | const, reg | u- = 0 | [3u]
+ - as a + (-b) | add | C == 0 | const | u- = 0 | [4u]
+
+
+ signed
+ operation | code | sat if | b is | sat value | case
+ -----------------+-------+----------+--------------+-----------+-------
+ + as a + b | add | V == 1 | const, reg | s+ | [1s]
+ + as a - (-b) | sub | V == 1 | const | s+ | [2s]
+ - as a - b | sub | V == 1 | const, reg | s- | [3s]
+ - as a + (-b) | add | V == 1 | const | s- | [4s]
+
+ s+ = b < 0 ? -0x80 : 0x7f
+ s- = b < 0 ? 0x7f : -0x80
+
+ The cases a - b actually perform a - (-(-b)) if B is CONST.
+ */
+
+ op[0] = simplify_gen_subreg (QImode, xop[0], mode, n_bytes-1);
+ op[1] = n_bytes > 1
+ ? simplify_gen_subreg (QImode, xop[0], mode, n_bytes-2)
+ : NULL_RTX;
+
+ bool need_copy = true;
+ int len_call = 1 + AVR_HAVE_JMP_CALL;
+
+ switch (code_sat)
+ {
+ default:
+ gcc_unreachable();
+
+ case SS_PLUS:
+ case SS_MINUS:
+
+ if (out_brvc)
+ avr_asm_len ("brvc 0f", op, plen, 1);
+
+ if (reg_overlap_mentioned_p (xop[0], xop[2]))
+ {
+ /* [1s,reg] */
+
+ if (n_bytes == 1)
+ avr_asm_len ("ldi %0,0x7f" CR_TAB
+ "adc %0,__zero_reg__", op, plen, 2);
+ else
+ avr_asm_len ("ldi %0,0x7f" CR_TAB
+ "ldi %1,0xff" CR_TAB
+ "adc %1,__zero_reg__" CR_TAB
+ "adc %0,__zero_reg__", op, plen, 4);
+ }
+ else if (sign == 0 && PLUS == code)
+ {
+ /* [1s,reg] */
+
+ op[2] = simplify_gen_subreg (QImode, xop[2], mode, n_bytes-1);
+
+ if (n_bytes == 1)
+ avr_asm_len ("ldi %0,0x80" CR_TAB
+ "sbrs %2,7" CR_TAB
+ "dec %0", op, plen, 3);
+ else
+ avr_asm_len ("ldi %0,0x80" CR_TAB
+ "cp %2,%0" CR_TAB
+ "sbc %1,%1" CR_TAB
+ "sbci %0,0", op, plen, 4);
+ }
+ else if (sign == 0 && MINUS == code)
+ {
+ /* [3s,reg] */
+
+ op[2] = simplify_gen_subreg (QImode, xop[2], mode, n_bytes-1);
+
+ if (n_bytes == 1)
+ avr_asm_len ("ldi %0,0x7f" CR_TAB
+ "sbrs %2,7" CR_TAB
+ "inc %0", op, plen, 3);
+ else
+ avr_asm_len ("ldi %0,0x7f" CR_TAB
+ "cp %0,%2" CR_TAB
+ "sbc %1,%1" CR_TAB
+ "sbci %0,-1", op, plen, 4);
+ }
+ else if ((sign < 0) ^ (SS_MINUS == code_sat))
+ {
+ /* [1s,const,B < 0] [2s,B < 0] */
+ /* [3s,const,B > 0] [4s,B > 0] */
+
+ if (n_bytes == 8)
+ {
+ avr_asm_len ("%~call __clr_8", op, plen, len_call);
+ need_copy = false;
+ }
+
+ avr_asm_len ("ldi %0,0x80", op, plen, 1);
+ if (n_bytes > 1 && need_copy)
+ avr_asm_len ("clr %1", op, plen, 1);
+ }
+ else if ((sign > 0) ^ (SS_MINUS == code_sat))
+ {
+ /* [1s,const,B > 0] [2s,B > 0] */
+ /* [3s,const,B < 0] [4s,B < 0] */
+
+ if (n_bytes == 8)
+ {
+ avr_asm_len ("sec" CR_TAB
+ "%~call __sbc_8", op, plen, 1 + len_call);
+ need_copy = false;
+ }
+
+ avr_asm_len ("ldi %0,0x7f", op, plen, 1);
+ if (n_bytes > 1 && need_copy)
+ avr_asm_len ("ldi %1,0xff", op, plen, 1);
+ }
+ else
+ gcc_unreachable();
+
+ break;
+
+ case US_PLUS:
+ /* [1u] : [2u] */
+
+ avr_asm_len (PLUS == code ? "brcc 0f" : "brcs 0f", op, plen, 1);
+
+ if (n_bytes == 8)
+ {
+ if (MINUS == code)
+ avr_asm_len ("sec", op, plen, 1);
+ avr_asm_len ("%~call __sbc_8", op, plen, len_call);
+
+ need_copy = false;
+ }
+ else
+ {
+ if (MINUS == code && !test_hard_reg_class (LD_REGS, op[0]))
+ avr_asm_len ("sec" CR_TAB "sbc %0,%0", op, plen, 2);
+ else
+ avr_asm_len (PLUS == code ? "sbc %0,%0" : "ldi %0,0xff",
+ op, plen, 1);
+ }
+ break; /* US_PLUS */
+
+ case US_MINUS:
+ /* [4u] : [3u] */
+
+ avr_asm_len (PLUS == code ? "brcs 0f" : "brcc 0f", op, plen, 1);
+
+ if (n_bytes == 8)
+ {
+ avr_asm_len ("%~call __clr_8", op, plen, len_call);
+ need_copy = false;
+ }
+ else
+ avr_asm_len ("clr %0", op, plen, 1);
+
+ break;
+ }
+
+ /* We set the MSB in the unsigned case and the 2 MSBs in the signed case.
+ Now copy the right value to the LSBs. */
+
+ if (need_copy && n_bytes > 1)
+ {
+ if (US_MINUS == code_sat || US_PLUS == code_sat)
+ {
+ avr_asm_len ("mov %1,%0", op, plen, 1);
+
+ if (n_bytes > 2)
+ {
+ op[0] = xop[0];
+ if (AVR_HAVE_MOVW)
+ avr_asm_len ("movw %0,%1", op, plen, 1);
+ else
+ avr_asm_len ("mov %A0,%1" CR_TAB
+ "mov %B0,%1", op, plen, 2);
+ }
+ }
+ else if (n_bytes > 2)
+ {
+ op[0] = xop[0];
+ avr_asm_len ("mov %A0,%1" CR_TAB
+ "mov %B0,%1", op, plen, 2);
+ }
+ }
+
+ if (need_copy && n_bytes == 8)
+ {
+ if (AVR_HAVE_MOVW)
+ avr_asm_len ("movw %r0+2,%0" CR_TAB
+ "movw %r0+4,%0", xop, plen, 2);
+ else
+ avr_asm_len ("mov %r0+2,%0" CR_TAB
+ "mov %r0+3,%0" CR_TAB
+ "mov %r0+4,%0" CR_TAB
+ "mov %r0+5,%0", xop, plen, 4);
+ }
+
+ if (out_label)
+ avr_asm_len ("0:", op, plen, 0);
+}
+
+
+/* Output addition/subtraction of register XOP[0] and a constant XOP[2] that
+ is ont a compile-time constant:
+
+ XOP[0] = XOP[0] +/- XOP[2]
+
+ This is a helper for the function below. The only insns that need this
+ are additions/subtraction for pointer modes, i.e. HImode and PSImode. */
+
+static const char*
+avr_out_plus_symbol (rtx *xop, enum rtx_code code, int *plen, int *pcc)
+{
+ enum machine_mode mode = GET_MODE (xop[0]);
+
+ /* Only pointer modes want to add symbols. */
+
+ gcc_assert (mode == HImode || mode == PSImode);
+
+ *pcc = MINUS == code ? (int) CC_SET_CZN : (int) CC_SET_N;
+
+ avr_asm_len (PLUS == code
+ ? "subi %A0,lo8(-(%2))" CR_TAB "sbci %B0,hi8(-(%2))"
+ : "subi %A0,lo8(%2)" CR_TAB "sbci %B0,hi8(%2)",
+ xop, plen, -2);
+
+ if (PSImode == mode)
+ avr_asm_len (PLUS == code
+ ? "sbci %C0,hlo8(-(%2))"
+ : "sbci %C0,hlo8(%2)", xop, plen, 1);
+ return "";
+}
+
+
+/* Prepare operands of addition/subtraction to be used with avr_out_plus_1.
+
+ INSN is a single_set insn or an insn pattern with a binary operation as
+ SET_SRC that is one of: PLUS, SS_PLUS, US_PLUS, MINUS, SS_MINUS, US_MINUS.
+
+ XOP are the operands of INSN. In the case of 64-bit operations with
+ constant XOP[] has just one element: The summand/subtrahend in XOP[0].
+ The non-saturating insns up to 32 bits may or may not supply a "d" class
+ scratch as XOP[3].
+
+ If PLEN == NULL output the instructions.
+ If PLEN != NULL set *PLEN to the length of the sequence in words.
+
+ PCC is a pointer to store the instructions' effect on cc0.
+ PCC may be NULL.
+
+ PLEN and PCC default to NULL.
+
+ OUT_LABEL defaults to TRUE. For a description, see AVR_OUT_PLUS_1.
+
+ Return "" */
+
+const char*
+avr_out_plus (rtx insn, rtx *xop, int *plen, int *pcc, bool out_label)
+{
+ int cc_plus, cc_minus, cc_dummy;
+ int len_plus, len_minus;
+ rtx op[4];
+ rtx xpattern = INSN_P (insn) ? single_set (insn) : insn;
+ rtx xdest = SET_DEST (xpattern);
+ enum machine_mode mode = GET_MODE (xdest);
+ enum machine_mode imode = int_mode_for_mode (mode);
+ int n_bytes = GET_MODE_SIZE (mode);
+ enum rtx_code code_sat = GET_CODE (SET_SRC (xpattern));
+ enum rtx_code code
+ = (PLUS == code_sat || SS_PLUS == code_sat || US_PLUS == code_sat
+ ? PLUS : MINUS);
+
+ if (!pcc)
+ pcc = &cc_dummy;
+
+ /* PLUS and MINUS don't saturate: Use modular wrap-around. */
+
+ if (PLUS == code_sat || MINUS == code_sat)
+ code_sat = UNKNOWN;
+
+ if (n_bytes <= 4 && REG_P (xop[2]))
+ {
+ avr_out_plus_1 (xop, plen, code, pcc, code_sat, 0, out_label);
+ return "";
+ }
+
+ if (8 == n_bytes)
+ {
+ op[0] = gen_rtx_REG (DImode, ACC_A);
+ op[1] = gen_rtx_REG (DImode, ACC_A);
+ op[2] = avr_to_int_mode (xop[0]);
+ }
+ else
+ {
+ if (!REG_P (xop[2])
+ && !CONST_INT_P (xop[2])
+ && !CONST_FIXED_P (xop[2]))
+ {
+ return avr_out_plus_symbol (xop, code, plen, pcc);
+ }
+
+ op[0] = avr_to_int_mode (xop[0]);
+ op[1] = avr_to_int_mode (xop[1]);
+ op[2] = avr_to_int_mode (xop[2]);
+ }
+
+ /* Saturations and 64-bit operations don't have a clobber operand.
+ For the other cases, the caller will provide a proper XOP[3]. */
+
+ xpattern = INSN_P (insn) ? PATTERN (insn) : insn;
+ op[3] = PARALLEL == GET_CODE (xpattern) ? xop[3] : NULL_RTX;
+
+ /* Saturation will need the sign of the original operand. */
+
+ rtx xmsb = simplify_gen_subreg (QImode, op[2], imode, n_bytes-1);
+ int sign = INTVAL (xmsb) < 0 ? -1 : 1;
+
+ /* If we subtract and the subtrahend is a constant, then negate it
+ so that avr_out_plus_1 can be used. */
+
+ if (MINUS == code)
+ op[2] = simplify_unary_operation (NEG, imode, op[2], imode);
+
+ /* Work out the shortest sequence. */
+
+ avr_out_plus_1 (op, &len_minus, MINUS, &cc_minus, code_sat, sign, out_label);
+ avr_out_plus_1 (op, &len_plus, PLUS, &cc_plus, code_sat, sign, out_label);
+
+ if (plen)
+ {
+ *plen = (len_minus <= len_plus) ? len_minus : len_plus;
+ *pcc = (len_minus <= len_plus) ? cc_minus : cc_plus;
+ }
+ else if (len_minus <= len_plus)
+ avr_out_plus_1 (op, NULL, MINUS, pcc, code_sat, sign, out_label);
+ else
+ avr_out_plus_1 (op, NULL, PLUS, pcc, code_sat, sign, out_label);
+
+ return "";
+}
+
+
+/* Output bit operation (IOR, AND, XOR) with register XOP[0] and compile
+ time constant XOP[2]:
+
+ XOP[0] = XOP[0] <op> XOP[2]
+
+ and return "". If PLEN == NULL, print assembler instructions to perform the
+ operation; otherwise, set *PLEN to the length of the instruction sequence
+ (in words) printed with PLEN == NULL. XOP[3] is either an 8-bit clobber
+ register or SCRATCH if no clobber register is needed for the operation.
+ INSN is an INSN_P or a pattern of an insn. */
+
+const char*
+avr_out_bitop (rtx insn, rtx *xop, int *plen)
+{
+ /* CODE and MODE of the operation. */
+ rtx xpattern = INSN_P (insn) ? single_set (insn) : insn;
+ enum rtx_code code = GET_CODE (SET_SRC (xpattern));
+ enum machine_mode mode = GET_MODE (xop[0]);
+
+ /* Number of bytes to operate on. */
+ int i, n_bytes = GET_MODE_SIZE (mode);
+
+ /* Value of T-flag (0 or 1) or -1 if unknow. */
+ int set_t = -1;
+
+ /* Value (0..0xff) held in clobber register op[3] or -1 if unknown. */
+ int clobber_val = -1;
+
+ /* op[0]: 8-bit destination register
+ op[1]: 8-bit const int
+ op[2]: 8-bit clobber register or SCRATCH
+ op[3]: 8-bit register containing 0xff or NULL_RTX */
+ rtx op[4];
+
+ op[2] = xop[3];
+ op[3] = NULL_RTX;
+
+ if (plen)
+ *plen = 0;
+
+ for (i = 0; i < n_bytes; i++)
+ {
+ /* We operate byte-wise on the destination. */
+ rtx reg8 = simplify_gen_subreg (QImode, xop[0], mode, i);
+ rtx xval8 = simplify_gen_subreg (QImode, xop[2], mode, i);
+
+ /* 8-bit value to operate with this byte. */
+ unsigned int val8 = UINTVAL (xval8) & GET_MODE_MASK (QImode);
+
+ /* Number of bits set in the current byte of the constant. */
+ int pop8 = avr_popcount (val8);
+
+ /* Registers R16..R31 can operate with immediate. */
+ bool ld_reg_p = test_hard_reg_class (LD_REGS, reg8);
+
+ op[0] = reg8;
+ op[1] = GEN_INT (val8);
+
+ switch (code)
+ {
+ case IOR:
+
+ if (0 == pop8)
+ continue;
+ else if (ld_reg_p)
+ avr_asm_len ("ori %0,%1", op, plen, 1);
+ else if (1 == pop8)
+ {
+ if (set_t != 1)
+ avr_asm_len ("set", op, plen, 1);
+ set_t = 1;
+
+ op[1] = GEN_INT (exact_log2 (val8));
+ avr_asm_len ("bld %0,%1", op, plen, 1);
+ }
+ else if (8 == pop8)
+ {
+ if (op[3] != NULL_RTX)
+ avr_asm_len ("mov %0,%3", op, plen, 1);
+ else
+ avr_asm_len ("clr %0" CR_TAB
+ "dec %0", op, plen, 2);
+
+ op[3] = op[0];
+ }
+ else
+ {
+ if (clobber_val != (int) val8)
+ avr_asm_len ("ldi %2,%1", op, plen, 1);
+ clobber_val = (int) val8;
+
+ avr_asm_len ("or %0,%2", op, plen, 1);
+ }
+
+ continue; /* IOR */
+
+ case AND:
+
+ if (8 == pop8)
+ continue;
+ else if (0 == pop8)
+ avr_asm_len ("clr %0", op, plen, 1);
+ else if (ld_reg_p)
+ avr_asm_len ("andi %0,%1", op, plen, 1);
+ else if (7 == pop8)
+ {
+ if (set_t != 0)
+ avr_asm_len ("clt", op, plen, 1);
+ set_t = 0;
+
+ op[1] = GEN_INT (exact_log2 (GET_MODE_MASK (QImode) & ~val8));
+ avr_asm_len ("bld %0,%1", op, plen, 1);
+ }
+ else
+ {
+ if (clobber_val != (int) val8)
+ avr_asm_len ("ldi %2,%1", op, plen, 1);
+ clobber_val = (int) val8;
+
+ avr_asm_len ("and %0,%2", op, plen, 1);
+ }
+
+ continue; /* AND */
+
+ case XOR:
+
+ if (0 == pop8)
+ continue;
+ else if (8 == pop8)
+ avr_asm_len ("com %0", op, plen, 1);
+ else if (ld_reg_p && val8 == (1 << 7))
+ avr_asm_len ("subi %0,%1", op, plen, 1);
+ else
+ {
+ if (clobber_val != (int) val8)
+ avr_asm_len ("ldi %2,%1", op, plen, 1);
+ clobber_val = (int) val8;
+
+ avr_asm_len ("eor %0,%2", op, plen, 1);
+ }
+
+ continue; /* XOR */
+
+ default:
+ /* Unknown rtx_code */
+ gcc_unreachable();
+ }
+ } /* for all sub-bytes */
+
+ return "";
+}
+
+
+/* PLEN == NULL: Output code to add CONST_INT OP[0] to SP.
+ PLEN != NULL: Set *PLEN to the length of that sequence.
+ Return "". */
+
+const char*
+avr_out_addto_sp (rtx *op, int *plen)
+{
+ int pc_len = AVR_2_BYTE_PC ? 2 : 3;
+ int addend = INTVAL (op[0]);
+
+ if (plen)
+ *plen = 0;
+
+ if (addend < 0)
+ {
+ if (flag_verbose_asm || flag_print_asm_name)
+ avr_asm_len (ASM_COMMENT_START "SP -= %n0", op, plen, 0);
+
+ while (addend <= -pc_len)
+ {
+ addend += pc_len;
+ avr_asm_len ("rcall .", op, plen, 1);
+ }
+
+ while (addend++ < 0)
+ avr_asm_len ("push __zero_reg__", op, plen, 1);
+ }
+ else if (addend > 0)
+ {
+ if (flag_verbose_asm || flag_print_asm_name)
+ avr_asm_len (ASM_COMMENT_START "SP += %0", op, plen, 0);
+
+ while (addend-- > 0)
+ avr_asm_len ("pop __tmp_reg__", op, plen, 1);
+ }
+
+ return "";
+}
+
+
+/* Outputs instructions needed for fixed point type conversion.
+ This includes converting between any fixed point type, as well
+ as converting to any integer type. Conversion between integer
+ types is not supported.
+
+ Converting signed fractional types requires a bit shift if converting
+ to or from any unsigned fractional type because the decimal place is
+ shifted by 1 bit. When the destination is a signed fractional, the sign
+ is stored in either the carry or T bit. */
+
+const char*
+avr_out_fract (rtx insn, rtx operands[], bool intsigned, int *plen)
+{
+ size_t i;
+ rtx xop[6];
+ RTX_CODE shift = UNKNOWN;
+ bool sign_in_carry = false;
+ bool msb_in_carry = false;
+ bool lsb_in_tmp_reg = false;
+ bool lsb_in_carry = false;
+ bool frac_rounded = false;
+ const char *code_ashift = "lsl %0";
+
+
+#define MAY_CLOBBER(RR) \
+ /* Shorthand used below. */ \
+ ((sign_bytes \
+ && IN_RANGE (RR, dest.regno_msb - sign_bytes + 1, dest.regno_msb)) \
+ || (offset && IN_RANGE (RR, dest.regno, dest.regno_msb)) \
+ || (reg_unused_after (insn, all_regs_rtx[RR]) \
+ && !IN_RANGE (RR, dest.regno, dest.regno_msb)))
+
+ struct
+ {
+ /* bytes : Length of operand in bytes.
+ ibyte : Length of integral part in bytes.
+ fbyte, fbit : Length of fractional part in bytes, bits. */
+
+ bool sbit;
+ unsigned fbit, bytes, ibyte, fbyte;
+ unsigned regno, regno_msb;
+ } dest, src, *val[2] = { &dest, &src };
+
+ if (plen)
+ *plen = 0;
+
+ /* Step 0: Determine information on source and destination operand we
+ ====== will need in the remainder. */
+
+ for (i = 0; i < sizeof (val) / sizeof (*val); i++)
+ {
+ enum machine_mode mode;
+
+ xop[i] = operands[i];
+
+ mode = GET_MODE (xop[i]);
+
+ val[i]->bytes = GET_MODE_SIZE (mode);
+ val[i]->regno = REGNO (xop[i]);
+ val[i]->regno_msb = REGNO (xop[i]) + val[i]->bytes - 1;
+
+ if (SCALAR_INT_MODE_P (mode))
+ {
+ val[i]->sbit = intsigned;
+ val[i]->fbit = 0;
+ }
+ else if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
+ {
+ val[i]->sbit = SIGNED_SCALAR_FIXED_POINT_MODE_P (mode);
+ val[i]->fbit = GET_MODE_FBIT (mode);
+ }
+ else
+ fatal_insn ("unsupported fixed-point conversion", insn);
+
+ val[i]->fbyte = (1 + val[i]->fbit) / BITS_PER_UNIT;
+ val[i]->ibyte = val[i]->bytes - val[i]->fbyte;
+ }
+
+ // Byte offset of the decimal point taking into account different place
+ // of the decimal point in input and output and different register numbers
+ // of input and output.
+ int offset = dest.regno - src.regno + dest.fbyte - src.fbyte;
+
+ // Number of destination bytes that will come from sign / zero extension.
+ int sign_bytes = (dest.ibyte - src.ibyte) * (dest.ibyte > src.ibyte);
+
+ // Number of bytes at the low end to be filled with zeros.
+ int zero_bytes = (dest.fbyte - src.fbyte) * (dest.fbyte > src.fbyte);
+
+ // Do we have a 16-Bit register that is cleared?
+ rtx clrw = NULL_RTX;
+
+ bool sign_extend = src.sbit && sign_bytes;
+
+ if (0 == dest.fbit % 8 && 7 == src.fbit % 8)
+ shift = ASHIFT;
+ else if (7 == dest.fbit % 8 && 0 == src.fbit % 8)
+ shift = ASHIFTRT;
+ else if (dest.fbit % 8 == src.fbit % 8)
+ shift = UNKNOWN;
+ else
+ gcc_unreachable();
+
+ /* If we need to round the fraction part, we might need to save/round it
+ before clobbering any of it in Step 1. Also, we might to want to do
+ the rounding now to make use of LD_REGS. */
+ if (SCALAR_INT_MODE_P (GET_MODE (xop[0]))
+ && SCALAR_ACCUM_MODE_P (GET_MODE (xop[1]))
+ && !TARGET_FRACT_CONV_TRUNC)
+ {
+ bool overlap
+ = (src.regno <=
+ (offset ? dest.regno_msb - sign_bytes : dest.regno + zero_bytes - 1)
+ && dest.regno - offset -1 >= dest.regno);
+ unsigned s0 = dest.regno - offset -1;
+ bool use_src = true;
+ unsigned sn;
+ unsigned copied_msb = src.regno_msb;
+ bool have_carry = false;
+
+ if (src.ibyte > dest.ibyte)
+ copied_msb -= src.ibyte - dest.ibyte;
+
+ for (sn = s0; sn <= copied_msb; sn++)
+ if (!IN_RANGE (sn, dest.regno, dest.regno_msb)
+ && !reg_unused_after (insn, all_regs_rtx[sn]))
+ use_src = false;
+ if (use_src && TEST_HARD_REG_BIT (reg_class_contents[LD_REGS], s0))
+ {
+ avr_asm_len ("tst %0" CR_TAB "brpl 0f",
+ &all_regs_rtx[src.regno_msb], plen, 2);
+ sn = src.regno;
+ if (sn < s0)
+ {
+ if (TEST_HARD_REG_BIT (reg_class_contents[LD_REGS], sn))
+ avr_asm_len ("cpi %0,1", &all_regs_rtx[sn], plen, 1);
+ else
+ avr_asm_len ("sec" CR_TAB "cpc %0,__zero_reg__",
+ &all_regs_rtx[sn], plen, 2);
+ have_carry = true;
+ }
+ while (++sn < s0)
+ avr_asm_len ("cpc %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
+ avr_asm_len (have_carry ? "sbci %0,128" : "subi %0,129",
+ &all_regs_rtx[s0], plen, 1);
+ for (sn = src.regno + src.fbyte; sn <= copied_msb; sn++)
+ avr_asm_len ("sbci %0,255", &all_regs_rtx[sn], plen, 1);
+ avr_asm_len ("\n0:", NULL, plen, 0);
+ frac_rounded = true;
+ }
+ else if (use_src && overlap)
+ {
+ avr_asm_len ("clr __tmp_reg__" CR_TAB
+ "sbrc %1,0" CR_TAB "dec __tmp_reg__", xop, plen, 1);
+ sn = src.regno;
+ if (sn < s0)
+ {
+ avr_asm_len ("add %0,__tmp_reg__", &all_regs_rtx[sn], plen, 1);
+ have_carry = true;
+ }
+ while (++sn < s0)
+ avr_asm_len ("adc %0,__tmp_reg__", &all_regs_rtx[sn], plen, 1);
+ if (have_carry)
+ avr_asm_len ("clt" CR_TAB "bld __tmp_reg__,7" CR_TAB
+ "adc %0,__tmp_reg__",
+ &all_regs_rtx[s0], plen, 1);
+ else
+ avr_asm_len ("lsr __tmp_reg" CR_TAB "add %0,__tmp_reg__",
+ &all_regs_rtx[s0], plen, 2);
+ for (sn = src.regno + src.fbyte; sn <= copied_msb; sn++)
+ avr_asm_len ("adc %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
+ frac_rounded = true;
+ }
+ else if (overlap)
+ {
+ bool use_src
+ = (TEST_HARD_REG_BIT (reg_class_contents[LD_REGS], s0)
+ && (IN_RANGE (s0, dest.regno, dest.regno_msb)
+ || reg_unused_after (insn, all_regs_rtx[s0])));
+ xop[2] = all_regs_rtx[s0];
+ unsigned sn = src.regno;
+ if (!use_src || sn == s0)
+ avr_asm_len ("mov __tmp_reg__,%2", xop, plen, 1);
+ /* We need to consider to-be-discarded bits
+ if the value is negative. */
+ if (sn < s0)
+ {
+ avr_asm_len ("tst %0" CR_TAB "brpl 0f",
+ &all_regs_rtx[src.regno_msb], plen, 2);
+ /* Test to-be-discarded bytes for any nozero bits.
+ ??? Could use OR or SBIW to test two registers at once. */
+ if (sn < s0)
+ avr_asm_len ("cp %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
+ while (++sn < s0)
+ avr_asm_len ("cpc %0,__zero_reg__", &all_regs_rtx[sn], plen, 1);
+ /* Set bit 0 in __tmp_reg__ if any of the lower bits was set. */
+ if (use_src)
+ avr_asm_len ("breq 0f" CR_TAB
+ "ori %2,1" "\n0:\t" "mov __tmp_reg__,%2",
+ xop, plen, 3);
+ else
+ avr_asm_len ("breq 0f" CR_TAB
+ "set" CR_TAB "bld __tmp_reg__,0\n0:",
+ xop, plen, 3);
+ }
+ lsb_in_tmp_reg = true;
+ }
+ }
+
+ /* Step 1: Clear bytes at the low end and copy payload bits from source
+ ====== to destination. */
+
+ int step = offset < 0 ? 1 : -1;
+ unsigned d0 = offset < 0 ? dest.regno : dest.regno_msb;
+
+ // We cleared at least that number of registers.
+ int clr_n = 0;
+
+ for (; d0 >= dest.regno && d0 <= dest.regno_msb; d0 += step)
+ {
+ // Next regno of destination is needed for MOVW
+ unsigned d1 = d0 + step;
+
+ // Current and next regno of source
+ signed s0 = d0 - offset;
+ signed s1 = s0 + step;
+
+ // Must current resp. next regno be CLRed? This applies to the low
+ // bytes of the destination that have no associated source bytes.
+ bool clr0 = s0 < (signed) src.regno;
+ bool clr1 = s1 < (signed) src.regno && d1 >= dest.regno;
+
+ // First gather what code to emit (if any) and additional step to
+ // apply if a MOVW is in use. xop[2] is destination rtx and xop[3]
+ // is the source rtx for the current loop iteration.
+ const char *code = NULL;
+ int stepw = 0;
+
+ if (clr0)
+ {
+ if (AVR_HAVE_MOVW && clr1 && clrw)
+ {
+ xop[2] = all_regs_rtx[d0 & ~1];
+ xop[3] = clrw;
+ code = "movw %2,%3";
+ stepw = step;
+ }
+ else
+ {
+ xop[2] = all_regs_rtx[d0];
+ code = "clr %2";
+
+ if (++clr_n >= 2
+ && !clrw
+ && d0 % 2 == (step > 0))
+ {
+ clrw = all_regs_rtx[d0 & ~1];
+ }
+ }
+ }
+ else if (offset && s0 <= (signed) src.regno_msb)
+ {
+ int movw = AVR_HAVE_MOVW && offset % 2 == 0
+ && d0 % 2 == (offset > 0)
+ && d1 <= dest.regno_msb && d1 >= dest.regno
+ && s1 <= (signed) src.regno_msb && s1 >= (signed) src.regno;
+
+ xop[2] = all_regs_rtx[d0 & ~movw];
+ xop[3] = all_regs_rtx[s0 & ~movw];
+ code = movw ? "movw %2,%3" : "mov %2,%3";
+ stepw = step * movw;
+ }
+
+ if (code)
+ {
+ if (sign_extend && shift != ASHIFT && !sign_in_carry
+ && (d0 == src.regno_msb || d0 + stepw == src.regno_msb))
+ {
+ /* We are going to override the sign bit. If we sign-extend,
+ store the sign in the Carry flag. This is not needed if
+ the destination will be ASHIFT is the remainder because
+ the ASHIFT will set Carry without extra instruction. */
+
+ avr_asm_len ("lsl %0", &all_regs_rtx[src.regno_msb], plen, 1);
+ sign_in_carry = true;
+ }
+
+ unsigned src_msb = dest.regno_msb - sign_bytes - offset + 1;
+
+ if (!sign_extend && shift == ASHIFTRT && !msb_in_carry
+ && src.ibyte > dest.ibyte
+ && (d0 == src_msb || d0 + stepw == src_msb))
+ {
+ /* We are going to override the MSB. If we shift right,
+ store the MSB in the Carry flag. This is only needed if
+ we don't sign-extend becaue with sign-extension the MSB
+ (the sign) will be produced by the sign extension. */
+
+ avr_asm_len ("lsr %0", &all_regs_rtx[src_msb], plen, 1);
+ msb_in_carry = true;
+ }
+
+ unsigned src_lsb = dest.regno - offset -1;
+
+ if (shift == ASHIFT && src.fbyte > dest.fbyte && !lsb_in_carry
+ && !lsb_in_tmp_reg
+ && (d0 == src_lsb || d0 + stepw == src_lsb))
+ {
+ /* We are going to override the new LSB; store it into carry. */
+
+ avr_asm_len ("lsl %0", &all_regs_rtx[src_lsb], plen, 1);
+ code_ashift = "rol %0";
+ lsb_in_carry = true;
+ }
+
+ avr_asm_len (code, xop, plen, 1);
+ d0 += stepw;
+ }
+ }
+
+ /* Step 2: Shift destination left by 1 bit position. This might be needed
+ ====== for signed input and unsigned output. */
+
+ if (shift == ASHIFT && src.fbyte > dest.fbyte && !lsb_in_carry)
+ {
+ unsigned s0 = dest.regno - offset -1;
+
+ /* n1169 4.1.4 says:
+ "Conversions from a fixed-point to an integer type round toward zero."
+ Hence, converting a fract type to integer only gives a non-zero result
+ for -1. */
+ if (SCALAR_INT_MODE_P (GET_MODE (xop[0]))
+ && SCALAR_FRACT_MODE_P (GET_MODE (xop[1]))
+ && !TARGET_FRACT_CONV_TRUNC)
+ {
+ gcc_assert (s0 == src.regno_msb);
+ /* Check if the input is -1. We do that by checking if negating
+ the input causes an integer overflow. */
+ unsigned sn = src.regno;
+ avr_asm_len ("cp __zero_reg__,%0", &all_regs_rtx[sn++], plen, 1);
+ while (sn <= s0)
+ avr_asm_len ("cpc __zero_reg__,%0", &all_regs_rtx[sn++], plen, 1);
+
+ /* Overflow goes with set carry. Clear carry otherwise. */
+ avr_asm_len ("brvs 0f" CR_TAB "clc\n0:", NULL, plen, 2);
+ }
+ /* Likewise, when converting from accumulator types to integer, we
+ need to round up negative values. */
+ else if (SCALAR_INT_MODE_P (GET_MODE (xop[0]))
+ && SCALAR_ACCUM_MODE_P (GET_MODE (xop[1]))
+ && !TARGET_FRACT_CONV_TRUNC
+ && !frac_rounded)
+ {
+ bool have_carry = false;
+
+ xop[2] = all_regs_rtx[s0];
+ if (!lsb_in_tmp_reg && !MAY_CLOBBER (s0))
+ avr_asm_len ("mov __tmp_reg__,%2", xop, plen, 1);
+ avr_asm_len ("tst %0" CR_TAB "brpl 0f",
+ &all_regs_rtx[src.regno_msb], plen, 2);
+ if (!lsb_in_tmp_reg)
+ {
+ unsigned sn = src.regno;
+ if (sn < s0)
+ {
+ avr_asm_len ("cp __zero_reg__,%0", &all_regs_rtx[sn],
+ plen, 1);
+ have_carry = true;
+ }
+ while (++sn < s0)
+ avr_asm_len ("cpc __zero_reg__,%0", &all_regs_rtx[sn], plen, 1);
+ lsb_in_tmp_reg = !MAY_CLOBBER (s0);
+ }
+ /* Add in C and the rounding value 127. */
+ /* If the destination msb is a sign byte, and in LD_REGS,
+ grab it as a temporary. */
+ if (sign_bytes
+ && TEST_HARD_REG_BIT (reg_class_contents[LD_REGS],
+ dest.regno_msb))
+ {
+ xop[3] = all_regs_rtx[dest.regno_msb];
+ avr_asm_len ("ldi %3,127", xop, plen, 1);
+ avr_asm_len ((have_carry && lsb_in_tmp_reg ? "adc __tmp_reg__,%3"
+ : have_carry ? "adc %2,%3"
+ : lsb_in_tmp_reg ? "add __tmp_reg__,%3"
+ : "add %2,%3"),
+ xop, plen, 1);
+ }
+ else
+ {
+ /* Fall back to use __zero_reg__ as a temporary. */
+ avr_asm_len ("dec __zero_reg__", NULL, plen, 1);
+ if (have_carry)
+ avr_asm_len ("clt" CR_TAB "bld __zero_reg__,7", NULL, plen, 2);
+ else
+ avr_asm_len ("lsr __zero_reg__", NULL, plen, 1);
+ avr_asm_len ((have_carry && lsb_in_tmp_reg
+ ? "adc __tmp_reg__,__zero_reg__"
+ : have_carry ? "adc %2,__zero_reg__"
+ : lsb_in_tmp_reg ? "add __tmp_reg__,__zero_reg__"
+ : "add %2,__zero_reg__"),
+ xop, plen, 1);
+ avr_asm_len ("eor __zero_reg__,__zero_reg__", NULL, plen, 1);
+ }
+ for (d0 = dest.regno + zero_bytes;
+ d0 <= dest.regno_msb - sign_bytes; d0++)
+ avr_asm_len ("adc %0,__zero_reg__", &all_regs_rtx[d0], plen, 1);
+ avr_asm_len (lsb_in_tmp_reg
+ ? "\n0:\t" "lsl __tmp_reg__" : "\n0:\t" "lsl %2",
+ xop, plen, 1);
+ }
+ else if (MAY_CLOBBER (s0))
+ avr_asm_len ("lsl %0", &all_regs_rtx[s0], plen, 1);
+ else
+ avr_asm_len ("mov __tmp_reg__,%0" CR_TAB
+ "lsl __tmp_reg__", &all_regs_rtx[s0], plen, 2);
+
+ code_ashift = "rol %0";
+ lsb_in_carry = true;
+ }
+
+ if (shift == ASHIFT)
+ {
+ for (d0 = dest.regno + zero_bytes;
+ d0 <= dest.regno_msb - sign_bytes; d0++)
+ {
+ avr_asm_len (code_ashift, &all_regs_rtx[d0], plen, 1);
+ code_ashift = "rol %0";
+ }
+
+ lsb_in_carry = false;
+ sign_in_carry = true;
+ }
+
+ /* Step 4a: Store MSB in carry if we don't already have it or will produce
+ ======= it in sign-extension below. */
+
+ if (!sign_extend && shift == ASHIFTRT && !msb_in_carry
+ && src.ibyte > dest.ibyte)
+ {
+ unsigned s0 = dest.regno_msb - sign_bytes - offset + 1;
+
+ if (MAY_CLOBBER (s0))
+ avr_asm_len ("lsr %0", &all_regs_rtx[s0], plen, 1);
+ else
+ avr_asm_len ("mov __tmp_reg__,%0" CR_TAB
+ "lsr __tmp_reg__", &all_regs_rtx[s0], plen, 2);
+
+ msb_in_carry = true;
+ }
+
+ /* Step 3: Sign-extend or zero-extend the destination as needed.
+ ====== */
+
+ if (sign_extend && !sign_in_carry)
+ {
+ unsigned s0 = src.regno_msb;
+
+ if (MAY_CLOBBER (s0))
+ avr_asm_len ("lsl %0", &all_regs_rtx[s0], plen, 1);
+ else
+ avr_asm_len ("mov __tmp_reg__,%0" CR_TAB
+ "lsl __tmp_reg__", &all_regs_rtx[s0], plen, 2);
+
+ sign_in_carry = true;
+ }
+
+ gcc_assert (sign_in_carry + msb_in_carry + lsb_in_carry <= 1);
+
+ unsigned copies = 0;
+ rtx movw = sign_extend ? NULL_RTX : clrw;
+
+ for (d0 = dest.regno_msb - sign_bytes + 1; d0 <= dest.regno_msb; d0++)
+ {
+ if (AVR_HAVE_MOVW && movw
+ && d0 % 2 == 0 && d0 + 1 <= dest.regno_msb)
+ {
+ xop[2] = all_regs_rtx[d0];
+ xop[3] = movw;
+ avr_asm_len ("movw %2,%3", xop, plen, 1);
+ d0++;
+ }
+ else
+ {
+ avr_asm_len (sign_extend ? "sbc %0,%0" : "clr %0",
+ &all_regs_rtx[d0], plen, 1);
+
+ if (++copies >= 2 && !movw && d0 % 2 == 1)
+ movw = all_regs_rtx[d0-1];
+ }
+ } /* for */
+
+
+ /* Step 4: Right shift the destination. This might be needed for
+ ====== conversions from unsigned to signed. */
+
+ if (shift == ASHIFTRT)
+ {
+ const char *code_ashiftrt = "lsr %0";
+
+ if (sign_extend || msb_in_carry)
+ code_ashiftrt = "ror %0";
+
+ if (src.sbit && src.ibyte == dest.ibyte)
+ code_ashiftrt = "asr %0";
+
+ for (d0 = dest.regno_msb - sign_bytes;
+ d0 >= dest.regno + zero_bytes - 1 && d0 >= dest.regno; d0--)
+ {
+ avr_asm_len (code_ashiftrt, &all_regs_rtx[d0], plen, 1);
+ code_ashiftrt = "ror %0";
+ }
+ }
+
+#undef MAY_CLOBBER
+
+ return "";
+}
+
+
+/* Output fixed-point rounding. XOP[0] = XOP[1] is the operand to round.
+ XOP[2] is the rounding point, a CONST_INT. The function prints the
+ instruction sequence if PLEN = NULL and computes the length in words
+ of the sequence if PLEN != NULL. Most of this function deals with
+ preparing operands for calls to `avr_out_plus' and `avr_out_bitop'. */
+
+const char*
+avr_out_round (rtx insn ATTRIBUTE_UNUSED, rtx *xop, int *plen)
+{
+ enum machine_mode mode = GET_MODE (xop[0]);
+ enum machine_mode imode = int_mode_for_mode (mode);
+ // The smallest fractional bit not cleared by the rounding is 2^(-RP).
+ int fbit = (int) GET_MODE_FBIT (mode);
+ double_int i_add = double_int_zero.set_bit (fbit-1 - INTVAL (xop[2]));
+ // Lengths of PLUS and AND parts.
+ int len_add = 0, *plen_add = plen ? &len_add : NULL;
+ int len_and = 0, *plen_and = plen ? &len_and : NULL;
+
+ // Add-Saturate 1/2 * 2^(-RP). Don't print the label "0:" when printing
+ // the saturated addition so that we can emit the "rjmp 1f" before the
+ // "0:" below.
+
+ rtx xadd = const_fixed_from_double_int (i_add, mode);
+ rtx xpattern, xsrc, op[4];
+
+ xsrc = SIGNED_FIXED_POINT_MODE_P (mode)
+ ? gen_rtx_SS_PLUS (mode, xop[1], xadd)
+ : gen_rtx_US_PLUS (mode, xop[1], xadd);
+ xpattern = gen_rtx_SET (VOIDmode, xop[0], xsrc);
+
+ op[0] = xop[0];
+ op[1] = xop[1];
+ op[2] = xadd;
+ avr_out_plus (xpattern, op, plen_add, NULL, false /* Don't print "0:" */);
+
+ avr_asm_len ("rjmp 1f" CR_TAB
+ "0:", NULL, plen_add, 1);
+
+ // Keep all bits from RP and higher: ... 2^(-RP)
+ // Clear all bits from RP+1 and lower: 2^(-RP-1) ...
+ // Rounding point ^^^^^^^
+ // Added above ^^^^^^^^^
+ rtx xreg = simplify_gen_subreg (imode, xop[0], mode, 0);
+ rtx xmask = immed_double_int_const (-i_add - i_add, imode);
+
+ xpattern = gen_rtx_SET (VOIDmode, xreg, gen_rtx_AND (imode, xreg, xmask));
+
+ op[0] = xreg;
+ op[1] = xreg;
+ op[2] = xmask;
+ op[3] = gen_rtx_SCRATCH (QImode);
+ avr_out_bitop (xpattern, op, plen_and);
+ avr_asm_len ("1:", NULL, plen, 0);
+
+ if (plen)
+ *plen = len_add + len_and;
+
+ return "";
+}
+
+
+/* Create RTL split patterns for byte sized rotate expressions. This
+ produces a series of move instructions and considers overlap situations.
+ Overlapping non-HImode operands need a scratch register. */
+
+bool
+avr_rotate_bytes (rtx operands[])
+{
+ int i, j;
+ enum machine_mode mode = GET_MODE (operands[0]);
+ bool overlapped = reg_overlap_mentioned_p (operands[0], operands[1]);
+ bool same_reg = rtx_equal_p (operands[0], operands[1]);
+ int num = INTVAL (operands[2]);
+ rtx scratch = operands[3];
+ /* Work out if byte or word move is needed. Odd byte rotates need QImode.
+ Word move if no scratch is needed, otherwise use size of scratch. */
+ enum machine_mode move_mode = QImode;
+ int move_size, offset, size;
+
+ if (num & 0xf)
+ move_mode = QImode;
+ else if ((mode == SImode && !same_reg) || !overlapped)
+ move_mode = HImode;
+ else
+ move_mode = GET_MODE (scratch);
+
+ /* Force DI rotate to use QI moves since other DI moves are currently split
+ into QI moves so forward propagation works better. */
+ if (mode == DImode)
+ move_mode = QImode;
+ /* Make scratch smaller if needed. */
+ if (SCRATCH != GET_CODE (scratch)
+ && HImode == GET_MODE (scratch)
+ && QImode == move_mode)
+ scratch = simplify_gen_subreg (move_mode, scratch, HImode, 0);
+
+ move_size = GET_MODE_SIZE (move_mode);
+ /* Number of bytes/words to rotate. */
+ offset = (num >> 3) / move_size;
+ /* Number of moves needed. */
+ size = GET_MODE_SIZE (mode) / move_size;
+ /* Himode byte swap is special case to avoid a scratch register. */
+ if (mode == HImode && same_reg)
+ {
+ /* HImode byte swap, using xor. This is as quick as using scratch. */
+ rtx src, dst;
+ src = simplify_gen_subreg (move_mode, operands[1], mode, 0);
+ dst = simplify_gen_subreg (move_mode, operands[0], mode, 1);
+ if (!rtx_equal_p (dst, src))
+ {
+ emit_move_insn (dst, gen_rtx_XOR (QImode, dst, src));
+ emit_move_insn (src, gen_rtx_XOR (QImode, src, dst));
+ emit_move_insn (dst, gen_rtx_XOR (QImode, dst, src));
+ }
+ }
+ else
+ {
+#define MAX_SIZE 8 /* GET_MODE_SIZE (DImode) / GET_MODE_SIZE (QImode) */
+ /* Create linked list of moves to determine move order. */
+ struct {
+ rtx src, dst;
+ int links;
+ } move[MAX_SIZE + 8];
+ int blocked, moves;
+
+ gcc_assert (size <= MAX_SIZE);
+ /* Generate list of subreg moves. */
+ for (i = 0; i < size; i++)
+ {
+ int from = i;
+ int to = (from + offset) % size;
+ move[i].src = simplify_gen_subreg (move_mode, operands[1],
+ mode, from * move_size);
+ move[i].dst = simplify_gen_subreg (move_mode, operands[0],
+ mode, to * move_size);
+ move[i].links = -1;
+ }
+ /* Mark dependence where a dst of one move is the src of another move.
+ The first move is a conflict as it must wait until second is
+ performed. We ignore moves to self - we catch this later. */
+ if (overlapped)
+ for (i = 0; i < size; i++)
+ if (reg_overlap_mentioned_p (move[i].dst, operands[1]))
+ for (j = 0; j < size; j++)
+ if (j != i && rtx_equal_p (move[j].src, move[i].dst))
+ {
+ /* The dst of move i is the src of move j. */
+ move[i].links = j;
+ break;
+ }
+
+ blocked = -1;
+ moves = 0;
+ /* Go through move list and perform non-conflicting moves. As each
+ non-overlapping move is made, it may remove other conflicts
+ so the process is repeated until no conflicts remain. */
+ do
+ {
+ blocked = -1;
+ moves = 0;
+ /* Emit move where dst is not also a src or we have used that
+ src already. */
+ for (i = 0; i < size; i++)
+ if (move[i].src != NULL_RTX)
+ {
+ if (move[i].links == -1
+ || move[move[i].links].src == NULL_RTX)
+ {
+ moves++;
+ /* Ignore NOP moves to self. */
+ if (!rtx_equal_p (move[i].dst, move[i].src))
+ emit_move_insn (move[i].dst, move[i].src);
+
+ /* Remove conflict from list. */
+ move[i].src = NULL_RTX;
+ }
+ else
+ blocked = i;
+ }
+
+ /* Check for deadlock. This is when no moves occurred and we have
+ at least one blocked move. */
+ if (moves == 0 && blocked != -1)
+ {
+ /* Need to use scratch register to break deadlock.
+ Add move to put dst of blocked move into scratch.
+ When this move occurs, it will break chain deadlock.
+ The scratch register is substituted for real move. */
+
+ gcc_assert (SCRATCH != GET_CODE (scratch));
+
+ move[size].src = move[blocked].dst;
+ move[size].dst = scratch;
+ /* Scratch move is never blocked. */
+ move[size].links = -1;
+ /* Make sure we have valid link. */
+ gcc_assert (move[blocked].links != -1);
+ /* Replace src of blocking move with scratch reg. */
+ move[move[blocked].links].src = scratch;
+ /* Make dependent on scratch move occurring. */
+ move[blocked].links = size;
+ size=size+1;
+ }
+ }
+ while (blocked != -1);
+ }
+ return true;
+}
+
+
+/* Worker function for `ADJUST_INSN_LENGTH'. */
+/* Modifies the length assigned to instruction INSN
+ LEN is the initially computed length of the insn. */
+
+int
+avr_adjust_insn_length (rtx insn, int len)
+{
+ rtx *op = recog_data.operand;
+ enum attr_adjust_len adjust_len;
+
+ /* Some complex insns don't need length adjustment and therefore
+ the length need not/must not be adjusted for these insns.
+ It is easier to state this in an insn attribute "adjust_len" than
+ to clutter up code here... */
+
+ if (-1 == recog_memoized (insn))
+ {
+ return len;
+ }
+
+ /* Read from insn attribute "adjust_len" if/how length is to be adjusted. */
+
+ adjust_len = get_attr_adjust_len (insn);
+
+ if (adjust_len == ADJUST_LEN_NO)
+ {
+ /* Nothing to adjust: The length from attribute "length" is fine.
+ This is the default. */
+
+ return len;
+ }
+
+ /* Extract insn's operands. */
+
+ extract_constrain_insn_cached (insn);
+
+ /* Dispatch to right function. */
+
+ switch (adjust_len)
+ {
+ case ADJUST_LEN_RELOAD_IN16: output_reload_inhi (op, op[2], &len); break;
+ case ADJUST_LEN_RELOAD_IN24: avr_out_reload_inpsi (op, op[2], &len); break;
+ case ADJUST_LEN_RELOAD_IN32: output_reload_insisf (op, op[2], &len); break;
+
+ case ADJUST_LEN_OUT_BITOP: avr_out_bitop (insn, op, &len); break;
+
+ case ADJUST_LEN_PLUS: avr_out_plus (insn, op, &len); break;
+ case ADJUST_LEN_ADDTO_SP: avr_out_addto_sp (op, &len); break;
+
+ case ADJUST_LEN_MOV8: output_movqi (insn, op, &len); break;
+ case ADJUST_LEN_MOV16: output_movhi (insn, op, &len); break;
+ case ADJUST_LEN_MOV24: avr_out_movpsi (insn, op, &len); break;
+ case ADJUST_LEN_MOV32: output_movsisf (insn, op, &len); break;
+ case ADJUST_LEN_MOVMEM: avr_out_movmem (insn, op, &len); break;
+ case ADJUST_LEN_XLOAD: avr_out_xload (insn, op, &len); break;
+ case ADJUST_LEN_LPM: avr_out_lpm (insn, op, &len); break;
+
+ case ADJUST_LEN_SFRACT: avr_out_fract (insn, op, true, &len); break;
+ case ADJUST_LEN_UFRACT: avr_out_fract (insn, op, false, &len); break;
+ case ADJUST_LEN_ROUND: avr_out_round (insn, op, &len); break;
+
+ case ADJUST_LEN_TSTHI: avr_out_tsthi (insn, op, &len); break;
+ case ADJUST_LEN_TSTPSI: avr_out_tstpsi (insn, op, &len); break;
+ case ADJUST_LEN_TSTSI: avr_out_tstsi (insn, op, &len); break;
+ case ADJUST_LEN_COMPARE: avr_out_compare (insn, op, &len); break;
+ case ADJUST_LEN_COMPARE64: avr_out_compare64 (insn, op, &len); break;
+
+ case ADJUST_LEN_LSHRQI: lshrqi3_out (insn, op, &len); break;
+ case ADJUST_LEN_LSHRHI: lshrhi3_out (insn, op, &len); break;
+ case ADJUST_LEN_LSHRSI: lshrsi3_out (insn, op, &len); break;
+
+ case ADJUST_LEN_ASHRQI: ashrqi3_out (insn, op, &len); break;
+ case ADJUST_LEN_ASHRHI: ashrhi3_out (insn, op, &len); break;
+ case ADJUST_LEN_ASHRSI: ashrsi3_out (insn, op, &len); break;
+
+ case ADJUST_LEN_ASHLQI: ashlqi3_out (insn, op, &len); break;
+ case ADJUST_LEN_ASHLHI: ashlhi3_out (insn, op, &len); break;
+ case ADJUST_LEN_ASHLSI: ashlsi3_out (insn, op, &len); break;
+
+ case ADJUST_LEN_ASHLPSI: avr_out_ashlpsi3 (insn, op, &len); break;
+ case ADJUST_LEN_ASHRPSI: avr_out_ashrpsi3 (insn, op, &len); break;
+ case ADJUST_LEN_LSHRPSI: avr_out_lshrpsi3 (insn, op, &len); break;
+
+ case ADJUST_LEN_CALL: len = AVR_HAVE_JMP_CALL ? 2 : 1; break;
+
+ case ADJUST_LEN_INSERT_BITS: avr_out_insert_bits (op, &len); break;
+
+ default:
+ gcc_unreachable();
+ }
+
+ return len;
+}
+
+/* Return nonzero if register REG dead after INSN. */
+
+int
+reg_unused_after (rtx insn, rtx reg)
+{
+ return (dead_or_set_p (insn, reg)
+ || (REG_P(reg) && _reg_unused_after (insn, reg)));
+}
+
+/* Return nonzero if REG is not used after INSN.
+ We assume REG is a reload reg, and therefore does
+ not live past labels. It may live past calls or jumps though. */
+
+int
+_reg_unused_after (rtx insn, rtx reg)
+{
+ enum rtx_code code;
+ rtx set;
+
+ /* If the reg is set by this instruction, then it is safe for our
+ case. Disregard the case where this is a store to memory, since
+ we are checking a register used in the store address. */
+ set = single_set (insn);
+ if (set && GET_CODE (SET_DEST (set)) != MEM
+ && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ return 1;
+
+ while ((insn = NEXT_INSN (insn)))
+ {
+ rtx set;
+ code = GET_CODE (insn);
+
+#if 0
+ /* If this is a label that existed before reload, then the register
+ if dead here. However, if this is a label added by reorg, then
+ the register may still be live here. We can't tell the difference,
+ so we just ignore labels completely. */
+ if (code == CODE_LABEL)
+ return 1;
+ /* else */
+#endif
+
+ if (!INSN_P (insn))
+ continue;
+
+ if (code == JUMP_INSN)
+ return 0;
+
+ /* If this is a sequence, we must handle them all at once.
+ We could have for instance a call that sets the target register,
+ and an insn in a delay slot that uses the register. In this case,
+ we must return 0. */
+ else if (code == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
+ {
+ int i;
+ int retval = 0;
+
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ {
+ rtx this_insn = XVECEXP (PATTERN (insn), 0, i);
+ rtx set = single_set (this_insn);
+
+ if (CALL_P (this_insn))
+ code = CALL_INSN;
+ else if (JUMP_P (this_insn))
+ {
+ if (INSN_ANNULLED_BRANCH_P (this_insn))
+ return 0;
+ code = JUMP_INSN;
+ }
+
+ if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
+ return 0;
+ if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ {
+ if (GET_CODE (SET_DEST (set)) != MEM)
+ retval = 1;
+ else
+ return 0;
+ }
+ if (set == 0
+ && reg_overlap_mentioned_p (reg, PATTERN (this_insn)))
+ return 0;
+ }
+ if (retval == 1)
+ return 1;
+ else if (code == JUMP_INSN)
+ return 0;
+ }
+
+ if (code == CALL_INSN)
+ {
+ rtx tem;
+ for (tem = CALL_INSN_FUNCTION_USAGE (insn); tem; tem = XEXP (tem, 1))
+ if (GET_CODE (XEXP (tem, 0)) == USE
+ && REG_P (XEXP (XEXP (tem, 0), 0))
+ && reg_overlap_mentioned_p (reg, XEXP (XEXP (tem, 0), 0)))
+ return 0;
+ if (call_used_regs[REGNO (reg)])
+ return 1;
+ }
+
+ set = single_set (insn);
+
+ if (set && reg_overlap_mentioned_p (reg, SET_SRC (set)))
+ return 0;
+ if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ return GET_CODE (SET_DEST (set)) != MEM;
+ if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ return 0;
+ }
+ return 1;
+}
+
+
+/* Implement `TARGET_ASM_INTEGER'. */
+/* Target hook for assembling integer objects. The AVR version needs
+ special handling for references to certain labels. */
+
+static bool
+avr_assemble_integer (rtx x, unsigned int size, int aligned_p)
+{
+ if (size == POINTER_SIZE / BITS_PER_UNIT && aligned_p
+ && text_segment_operand (x, VOIDmode))
+ {
+ fputs ("\t.word\tgs(", asm_out_file);
+ output_addr_const (asm_out_file, x);
+ fputs (")\n", asm_out_file);
+
+ return true;
+ }
+ else if (GET_MODE (x) == PSImode)
+ {
+ /* This needs binutils 2.23+, see PR binutils/13503 */
+
+ fputs ("\t.byte\tlo8(", asm_out_file);
+ output_addr_const (asm_out_file, x);
+ fputs (")" ASM_COMMENT_START "need binutils PR13503\n", asm_out_file);
+
+ fputs ("\t.byte\thi8(", asm_out_file);
+ output_addr_const (asm_out_file, x);
+ fputs (")" ASM_COMMENT_START "need binutils PR13503\n", asm_out_file);
+
+ fputs ("\t.byte\thh8(", asm_out_file);
+ output_addr_const (asm_out_file, x);
+ fputs (")" ASM_COMMENT_START "need binutils PR13503\n", asm_out_file);
+
+ return true;
+ }
+ else if (CONST_FIXED_P (x))
+ {
+ unsigned n;
+
+ /* varasm fails to handle big fixed modes that don't fit in hwi. */
+
+ for (n = 0; n < size; n++)
+ {
+ rtx xn = simplify_gen_subreg (QImode, x, GET_MODE (x), n);
+ default_assemble_integer (xn, 1, aligned_p);
+ }
+
+ return true;
+ }
+
+ return default_assemble_integer (x, size, aligned_p);
+}
+
+
+/* Implement `TARGET_CLASS_LIKELY_SPILLED_P'. */
+/* Return value is nonzero if pseudos that have been
+ assigned to registers of class CLASS would likely be spilled
+ because registers of CLASS are needed for spill registers. */
+
+static bool
+avr_class_likely_spilled_p (reg_class_t c)
+{
+ return (c != ALL_REGS && c != ADDW_REGS);
+}
+
+
+/* Valid attributes:
+ progmem - Put data to program memory.
+ signal - Make a function to be hardware interrupt.
+ After function prologue interrupts remain disabled.
+ interrupt - Make a function to be hardware interrupt. Before function
+ prologue interrupts are enabled by means of SEI.
+ naked - Don't generate function prologue/epilogue and RET
+ instruction. */
+
+/* Handle a "progmem" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+avr_handle_progmem_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (DECL_P (*node))
+ {
+ if (TREE_CODE (*node) == TYPE_DECL)
+ {
+ /* This is really a decl attribute, not a type attribute,
+ but try to handle it for GCC 3.0 backwards compatibility. */
+
+ tree type = TREE_TYPE (*node);
+ tree attr = tree_cons (name, args, TYPE_ATTRIBUTES (type));
+ tree newtype = build_type_attribute_variant (type, attr);
+
+ TYPE_MAIN_VARIANT (newtype) = TYPE_MAIN_VARIANT (type);
+ TREE_TYPE (*node) = newtype;
+ *no_add_attrs = true;
+ }
+ else if (TREE_STATIC (*node) || DECL_EXTERNAL (*node))
+ {
+ *no_add_attrs = false;
+ }
+ else
+ {
+ warning (OPT_Wattributes, "%qE attribute ignored",
+ name);
+ *no_add_attrs = true;
+ }
+ }
+
+ return NULL_TREE;
+}
+
+/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+avr_handle_fndecl_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_DECL)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+static tree
+avr_handle_fntype_attribute (tree *node, tree name,
+ tree args ATTRIBUTE_UNUSED,
+ int flags ATTRIBUTE_UNUSED,
+ bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) != FUNCTION_TYPE)
+ {
+ warning (OPT_Wattributes, "%qE attribute only applies to functions",
+ name);
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+
+/* AVR attributes. */
+static const struct attribute_spec
+avr_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
+ affects_type_identity } */
+ { "progmem", 0, 0, false, false, false, avr_handle_progmem_attribute,
+ false },
+ { "signal", 0, 0, true, false, false, avr_handle_fndecl_attribute,
+ false },
+ { "interrupt", 0, 0, true, false, false, avr_handle_fndecl_attribute,
+ false },
+ { "naked", 0, 0, false, true, true, avr_handle_fntype_attribute,
+ false },
+ { "OS_task", 0, 0, false, true, true, avr_handle_fntype_attribute,
+ false },
+ { "OS_main", 0, 0, false, true, true, avr_handle_fntype_attribute,
+ false },
+ { NULL, 0, 0, false, false, false, NULL, false }
+};
+
+
+/* Look if DECL shall be placed in program memory space by
+ means of attribute `progmem' or some address-space qualifier.
+ Return non-zero if DECL is data that must end up in Flash and
+ zero if the data lives in RAM (.bss, .data, .rodata, ...).
+
+ Return 2 if DECL is located in 24-bit flash address-space
+ Return 1 if DECL is located in 16-bit flash address-space
+ Return -1 if attribute `progmem' occurs in DECL or ATTRIBUTES
+ Return 0 otherwise */
+
+int
+avr_progmem_p (tree decl, tree attributes)
+{
+ tree a;
+
+ if (TREE_CODE (decl) != VAR_DECL)
+ return 0;
+
+ if (avr_decl_memx_p (decl))
+ return 2;
+
+ if (avr_decl_flash_p (decl))
+ return 1;
+
+ if (NULL_TREE
+ != lookup_attribute ("progmem", attributes))
+ return -1;
+
+ a = decl;
+
+ do
+ a = TREE_TYPE(a);
+ while (TREE_CODE (a) == ARRAY_TYPE);
+
+ if (a == error_mark_node)
+ return 0;
+
+ if (NULL_TREE != lookup_attribute ("progmem", TYPE_ATTRIBUTES (a)))
+ return -1;
+
+ return 0;
+}
+
+
+/* Scan type TYP for pointer references to address space ASn.
+ Return ADDR_SPACE_GENERIC (i.e. 0) if all pointers targeting
+ the AS are also declared to be CONST.
+ Otherwise, return the respective address space, i.e. a value != 0. */
+
+static addr_space_t
+avr_nonconst_pointer_addrspace (tree typ)
+{
+ while (ARRAY_TYPE == TREE_CODE (typ))
+ typ = TREE_TYPE (typ);
+
+ if (POINTER_TYPE_P (typ))
+ {
+ addr_space_t as;
+ tree target = TREE_TYPE (typ);
+
+ /* Pointer to function: Test the function's return type. */
+
+ if (FUNCTION_TYPE == TREE_CODE (target))
+ return avr_nonconst_pointer_addrspace (TREE_TYPE (target));
+
+ /* "Ordinary" pointers... */
+
+ while (TREE_CODE (target) == ARRAY_TYPE)
+ target = TREE_TYPE (target);
+
+ /* Pointers to non-generic address space must be const.
+ Refuse address spaces outside the device's flash. */
+
+ as = TYPE_ADDR_SPACE (target);
+
+ if (!ADDR_SPACE_GENERIC_P (as)
+ && (!TYPE_READONLY (target)
+ || avr_addrspace[as].segment >= avr_current_device->n_flash))
+ {
+ return as;
+ }
+
+ /* Scan pointer's target type. */
+
+ return avr_nonconst_pointer_addrspace (target);
+ }
+
+ return ADDR_SPACE_GENERIC;
+}
+
+
+/* Sanity check NODE so that all pointers targeting non-generic address spaces
+ go along with CONST qualifier. Writing to these address spaces should
+ be detected and complained about as early as possible. */
+
+static bool
+avr_pgm_check_var_decl (tree node)
+{
+ const char *reason = NULL;
+
+ addr_space_t as = ADDR_SPACE_GENERIC;
+
+ gcc_assert (as == 0);
+
+ if (avr_log.progmem)
+ avr_edump ("%?: %t\n", node);
+
+ switch (TREE_CODE (node))
+ {
+ default:
+ break;
+
+ case VAR_DECL:
+ if (as = avr_nonconst_pointer_addrspace (TREE_TYPE (node)), as)
+ reason = "variable";
+ break;
+
+ case PARM_DECL:
+ if (as = avr_nonconst_pointer_addrspace (TREE_TYPE (node)), as)
+ reason = "function parameter";
+ break;
+
+ case FIELD_DECL:
+ if (as = avr_nonconst_pointer_addrspace (TREE_TYPE (node)), as)
+ reason = "structure field";
+ break;
+
+ case FUNCTION_DECL:
+ if (as = avr_nonconst_pointer_addrspace (TREE_TYPE (TREE_TYPE (node))),
+ as)
+ reason = "return type of function";
+ break;
+
+ case POINTER_TYPE:
+ if (as = avr_nonconst_pointer_addrspace (node), as)
+ reason = "pointer";
+ break;
+ }
+
+ if (reason)
+ {
+ if (avr_addrspace[as].segment >= avr_current_device->n_flash)
+ {
+ if (TYPE_P (node))
+ error ("%qT uses address space %qs beyond flash of %qs",
+ node, avr_addrspace[as].name, avr_current_device->name);
+ else
+ error ("%s %q+D uses address space %qs beyond flash of %qs",
+ reason, node, avr_addrspace[as].name,
+ avr_current_device->name);
+ }
+ else
+ {
+ if (TYPE_P (node))
+ error ("pointer targeting address space %qs must be const in %qT",
+ avr_addrspace[as].name, node);
+ else
+ error ("pointer targeting address space %qs must be const"
+ " in %s %q+D",
+ avr_addrspace[as].name, reason, node);
+ }
+ }
+
+ return reason == NULL;
+}
+
+
+/* Add the section attribute if the variable is in progmem. */
+
+static void
+avr_insert_attributes (tree node, tree *attributes)
+{
+ avr_pgm_check_var_decl (node);
+
+ if (TREE_CODE (node) == VAR_DECL
+ && (TREE_STATIC (node) || DECL_EXTERNAL (node))
+ && avr_progmem_p (node, *attributes))
+ {
+ addr_space_t as;
+ tree node0 = node;
+
+ /* For C++, we have to peel arrays in order to get correct
+ determination of readonlyness. */
+
+ do
+ node0 = TREE_TYPE (node0);
+ while (TREE_CODE (node0) == ARRAY_TYPE);
+
+ if (error_mark_node == node0)
+ return;
+
+ as = TYPE_ADDR_SPACE (TREE_TYPE (node));
+
+ if (avr_addrspace[as].segment >= avr_current_device->n_flash)
+ {
+ error ("variable %q+D located in address space %qs"
+ " beyond flash of %qs",
+ node, avr_addrspace[as].name, avr_current_device->name);
+ }
+
+ if (!TYPE_READONLY (node0)
+ && !TREE_READONLY (node))
+ {
+ const char *reason = "__attribute__((progmem))";
+
+ if (!ADDR_SPACE_GENERIC_P (as))
+ reason = avr_addrspace[as].name;
+
+ if (avr_log.progmem)
+ avr_edump ("\n%?: %t\n%t\n", node, node0);
+
+ error ("variable %q+D must be const in order to be put into"
+ " read-only section by means of %qs", node, reason);
+ }
+ }
+}
+
+
+/* Implement `ASM_OUTPUT_ALIGNED_DECL_LOCAL'. */
+/* Implement `ASM_OUTPUT_ALIGNED_DECL_COMMON'. */
+/* Track need of __do_clear_bss. */
+
+void
+avr_asm_output_aligned_decl_common (FILE * stream,
+ const_tree decl ATTRIBUTE_UNUSED,
+ const char *name,
+ unsigned HOST_WIDE_INT size,
+ unsigned int align, bool local_p)
+{
+ /* __gnu_lto_v1 etc. are just markers for the linker injected by toplev.c.
+ There is no need to trigger __do_clear_bss code for them. */
+
+ if (!STR_PREFIX_P (name, "__gnu_lto"))
+ avr_need_clear_bss_p = true;
+
+ if (local_p)
+ ASM_OUTPUT_ALIGNED_LOCAL (stream, name, size, align);
+ else
+ ASM_OUTPUT_ALIGNED_COMMON (stream, name, size, align);
+}
+
+
+/* Unnamed section callback for data_section
+ to track need of __do_copy_data. */
+
+static void
+avr_output_data_section_asm_op (const void *data)
+{
+ avr_need_copy_data_p = true;
+
+ /* Dispatch to default. */
+ output_section_asm_op (data);
+}
+
+
+/* Unnamed section callback for bss_section
+ to track need of __do_clear_bss. */
+
+static void
+avr_output_bss_section_asm_op (const void *data)
+{
+ avr_need_clear_bss_p = true;
+
+ /* Dispatch to default. */
+ output_section_asm_op (data);
+}
+
+
+/* Unnamed section callback for progmem*.data sections. */
+
+static void
+avr_output_progmem_section_asm_op (const void *data)
+{
+ fprintf (asm_out_file, "\t.section\t%s,\"a\",@progbits\n",
+ (const char*) data);
+}
+
+
+/* Implement `TARGET_ASM_INIT_SECTIONS'. */
+
+static void
+avr_asm_init_sections (void)
+{
+ /* Set up a section for jump tables. Alignment is handled by
+ ASM_OUTPUT_BEFORE_CASE_LABEL. */
+
+ if (AVR_HAVE_JMP_CALL)
+ {
+ progmem_swtable_section
+ = get_unnamed_section (0, output_section_asm_op,
+ "\t.section\t.progmem.gcc_sw_table"
+ ",\"a\",@progbits");
+ }
+ else
+ {
+ progmem_swtable_section
+ = get_unnamed_section (SECTION_CODE, output_section_asm_op,
+ "\t.section\t.progmem.gcc_sw_table"
+ ",\"ax\",@progbits");
+ }
+
+ /* Override section callbacks to keep track of `avr_need_clear_bss_p'
+ resp. `avr_need_copy_data_p'. */
+
+ readonly_data_section->unnamed.callback = avr_output_data_section_asm_op;
+ data_section->unnamed.callback = avr_output_data_section_asm_op;
+ bss_section->unnamed.callback = avr_output_bss_section_asm_op;
+}
+
+
+/* Implement `TARGET_ASM_FUNCTION_RODATA_SECTION'. */
+
+static section*
+avr_asm_function_rodata_section (tree decl)
+{
+ /* If a function is unused and optimized out by -ffunction-sections
+ and --gc-sections, ensure that the same will happen for its jump
+ tables by putting them into individual sections. */
+
+ unsigned int flags;
+ section * frodata;
+
+ /* Get the frodata section from the default function in varasm.c
+ but treat function-associated data-like jump tables as code
+ rather than as user defined data. AVR has no constant pools. */
+ {
+ int fdata = flag_data_sections;
+
+ flag_data_sections = flag_function_sections;
+ frodata = default_function_rodata_section (decl);
+ flag_data_sections = fdata;
+ flags = frodata->common.flags;
+ }
+
+ if (frodata != readonly_data_section
+ && flags & SECTION_NAMED)
+ {
+ /* Adjust section flags and replace section name prefix. */
+
+ unsigned int i;
+
+ static const char* const prefix[] =
+ {
+ ".rodata", ".progmem.gcc_sw_table",
+ ".gnu.linkonce.r.", ".gnu.linkonce.t."
+ };
+
+ for (i = 0; i < sizeof (prefix) / sizeof (*prefix); i += 2)
+ {
+ const char * old_prefix = prefix[i];
+ const char * new_prefix = prefix[i+1];
+ const char * name = frodata->named.name;
+
+ if (STR_PREFIX_P (name, old_prefix))
+ {
+ const char *rname = ACONCAT ((new_prefix,
+ name + strlen (old_prefix), NULL));
+ flags &= ~SECTION_CODE;
+ flags |= AVR_HAVE_JMP_CALL ? 0 : SECTION_CODE;
+
+ return get_section (rname, flags, frodata->named.decl);
+ }
+ }
+ }
+
+ return progmem_swtable_section;
+}
+
+
+/* Implement `TARGET_ASM_NAMED_SECTION'. */
+/* Track need of __do_clear_bss, __do_copy_data for named sections. */
+
+static void
+avr_asm_named_section (const char *name, unsigned int flags, tree decl)
+{
+ if (flags & AVR_SECTION_PROGMEM)
+ {
+ addr_space_t as = (flags & AVR_SECTION_PROGMEM) / SECTION_MACH_DEP;
+ const char *old_prefix = ".rodata";
+ const char *new_prefix = avr_addrspace[as].section_name;
+
+ if (STR_PREFIX_P (name, old_prefix))
+ {
+ const char *sname = ACONCAT ((new_prefix,
+ name + strlen (old_prefix), NULL));
+ default_elf_asm_named_section (sname, flags, decl);
+ return;
+ }
+
+ default_elf_asm_named_section (new_prefix, flags, decl);
+ return;
+ }
+
+ if (!avr_need_copy_data_p)
+ avr_need_copy_data_p = (STR_PREFIX_P (name, ".data")
+ || STR_PREFIX_P (name, ".rodata")
+ || STR_PREFIX_P (name, ".gnu.linkonce.d"));
+
+ if (!avr_need_clear_bss_p)
+ avr_need_clear_bss_p = STR_PREFIX_P (name, ".bss");
+
+ default_elf_asm_named_section (name, flags, decl);
+}
+
+
+/* Implement `TARGET_SECTION_TYPE_FLAGS'. */
+
+static unsigned int
+avr_section_type_flags (tree decl, const char *name, int reloc)
+{
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
+
+ if (STR_PREFIX_P (name, ".noinit"))
+ {
+ if (decl && TREE_CODE (decl) == VAR_DECL
+ && DECL_INITIAL (decl) == NULL_TREE)
+ flags |= SECTION_BSS; /* @nobits */
+ else
+ warning (0, "only uninitialized variables can be placed in the "
+ ".noinit section");
+ }
+
+ if (decl && DECL_P (decl)
+ && avr_progmem_p (decl, DECL_ATTRIBUTES (decl)))
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (decl));
+
+ /* Attribute progmem puts data in generic address space.
+ Set section flags as if it was in __flash to get the right
+ section prefix in the remainder. */
+
+ if (ADDR_SPACE_GENERIC_P (as))
+ as = ADDR_SPACE_FLASH;
+
+ flags |= as * SECTION_MACH_DEP;
+ flags &= ~SECTION_WRITE;
+ flags &= ~SECTION_BSS;
+ }
+
+ return flags;
+}
+
+
+/* Implement `TARGET_ENCODE_SECTION_INFO'. */
+
+static void
+avr_encode_section_info (tree decl, rtx rtl, int new_decl_p)
+{
+ /* In avr_handle_progmem_attribute, DECL_INITIAL is not yet
+ readily available, see PR34734. So we postpone the warning
+ about uninitialized data in program memory section until here. */
+
+ if (new_decl_p
+ && decl && DECL_P (decl)
+ && NULL_TREE == DECL_INITIAL (decl)
+ && !DECL_EXTERNAL (decl)
+ && avr_progmem_p (decl, DECL_ATTRIBUTES (decl)))
+ {
+ warning (OPT_Wuninitialized,
+ "uninitialized variable %q+D put into "
+ "program memory area", decl);
+ }
+
+ default_encode_section_info (decl, rtl, new_decl_p);
+
+ if (decl && DECL_P (decl)
+ && TREE_CODE (decl) != FUNCTION_DECL
+ && MEM_P (rtl)
+ && SYMBOL_REF == GET_CODE (XEXP (rtl, 0)))
+ {
+ rtx sym = XEXP (rtl, 0);
+ tree type = TREE_TYPE (decl);
+ if (type == error_mark_node)
+ return;
+ addr_space_t as = TYPE_ADDR_SPACE (type);
+
+ /* PSTR strings are in generic space but located in flash:
+ patch address space. */
+
+ if (-1 == avr_progmem_p (decl, DECL_ATTRIBUTES (decl)))
+ as = ADDR_SPACE_FLASH;
+
+ AVR_SYMBOL_SET_ADDR_SPACE (sym, as);
+ }
+}
+
+
+/* Implement `TARGET_ASM_SELECT_SECTION' */
+
+static section *
+avr_asm_select_section (tree decl, int reloc, unsigned HOST_WIDE_INT align)
+{
+ section * sect = default_elf_select_section (decl, reloc, align);
+
+ if (decl && DECL_P (decl)
+ && avr_progmem_p (decl, DECL_ATTRIBUTES (decl)))
+ {
+ addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (decl));
+
+ /* __progmem__ goes in generic space but shall be allocated to
+ .progmem.data */
+
+ if (ADDR_SPACE_GENERIC_P (as))
+ as = ADDR_SPACE_FLASH;
+
+ if (sect->common.flags & SECTION_NAMED)
+ {
+ const char * name = sect->named.name;
+ const char * old_prefix = ".rodata";
+ const char * new_prefix = avr_addrspace[as].section_name;
+
+ if (STR_PREFIX_P (name, old_prefix))
+ {
+ const char *sname = ACONCAT ((new_prefix,
+ name + strlen (old_prefix), NULL));
+ return get_section (sname, sect->common.flags, sect->named.decl);
+ }
+ }
+
+ if (!progmem_section[as])
+ {
+ progmem_section[as]
+ = get_unnamed_section (0, avr_output_progmem_section_asm_op,
+ avr_addrspace[as].section_name);
+ }
+
+ return progmem_section[as];
+ }
+
+ return sect;
+}
+
+/* Implement `TARGET_ASM_FILE_START'. */
+/* Outputs some text at the start of each assembler file. */
+
+static void
+avr_file_start (void)
+{
+ int sfr_offset = avr_current_arch->sfr_offset;
+
+ if (avr_current_arch->asm_only)
+ error ("MCU %qs supported for assembler only", avr_current_device->name);
+
+ default_file_start ();
+
+ /* Print I/O addresses of some SFRs used with IN and OUT. */
+
+ if (AVR_HAVE_SPH)
+ fprintf (asm_out_file, "__SP_H__ = 0x%02x\n", avr_addr.sp_h - sfr_offset);
+
+ fprintf (asm_out_file, "__SP_L__ = 0x%02x\n", avr_addr.sp_l - sfr_offset);
+ fprintf (asm_out_file, "__SREG__ = 0x%02x\n", avr_addr.sreg - sfr_offset);
+ if (AVR_HAVE_RAMPZ)
+ fprintf (asm_out_file, "__RAMPZ__ = 0x%02x\n", avr_addr.rampz - sfr_offset);
+ if (AVR_HAVE_RAMPY)
+ fprintf (asm_out_file, "__RAMPY__ = 0x%02x\n", avr_addr.rampy - sfr_offset);
+ if (AVR_HAVE_RAMPX)
+ fprintf (asm_out_file, "__RAMPX__ = 0x%02x\n", avr_addr.rampx - sfr_offset);
+ if (AVR_HAVE_RAMPD)
+ fprintf (asm_out_file, "__RAMPD__ = 0x%02x\n", avr_addr.rampd - sfr_offset);
+ if (AVR_XMEGA)
+ fprintf (asm_out_file, "__CCP__ = 0x%02x\n", avr_addr.ccp - sfr_offset);
+ fprintf (asm_out_file, "__tmp_reg__ = %d\n", TMP_REGNO);
+ fprintf (asm_out_file, "__zero_reg__ = %d\n", ZERO_REGNO);
+}
+
+
+/* Implement `TARGET_ASM_FILE_END'. */
+/* Outputs to the stdio stream FILE some
+ appropriate text to go at the end of an assembler file. */
+
+static void
+avr_file_end (void)
+{
+ /* Output these only if there is anything in the
+ .data* / .rodata* / .gnu.linkonce.* resp. .bss* or COMMON
+ input section(s) - some code size can be saved by not
+ linking in the initialization code from libgcc if resp.
+ sections are empty, see PR18145. */
+
+ if (avr_need_copy_data_p)
+ fputs (".global __do_copy_data\n", asm_out_file);
+
+ if (avr_need_clear_bss_p)
+ fputs (".global __do_clear_bss\n", asm_out_file);
+}
+
+
+/* Worker function for `ADJUST_REG_ALLOC_ORDER'. */
+/* Choose the order in which to allocate hard registers for
+ pseudo-registers local to a basic block.
+
+ Store the desired register order in the array `reg_alloc_order'.
+ Element 0 should be the register to allocate first; element 1, the
+ next register; and so on. */
+
+void
+avr_adjust_reg_alloc_order (void)
+{
+ unsigned int i;
+ static const int order_0[] =
+ {
+ 24, 25,
+ 18, 19, 20, 21, 22, 23,
+ 30, 31,
+ 26, 27, 28, 29,
+ 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2,
+ 0, 1,
+ 32, 33, 34, 35
+ };
+ static const int order_1[] =
+ {
+ 18, 19, 20, 21, 22, 23, 24, 25,
+ 30, 31,
+ 26, 27, 28, 29,
+ 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2,
+ 0, 1,
+ 32, 33, 34, 35
+ };
+ static const int order_2[] =
+ {
+ 25, 24, 23, 22, 21, 20, 19, 18,
+ 30, 31,
+ 26, 27, 28, 29,
+ 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2,
+ 1, 0,
+ 32, 33, 34, 35
+ };
+
+ const int *order = (TARGET_ORDER_1 ? order_1 :
+ TARGET_ORDER_2 ? order_2 :
+ order_0);
+ for (i = 0; i < ARRAY_SIZE (order_0); ++i)
+ reg_alloc_order[i] = order[i];
+}
+
+
+/* Implement `TARGET_REGISTER_MOVE_COST' */
+
+static int
+avr_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
+ reg_class_t from, reg_class_t to)
+{
+ return (from == STACK_REG ? 6
+ : to == STACK_REG ? 12
+ : 2);
+}
+
+
+/* Implement `TARGET_MEMORY_MOVE_COST' */
+
+static int
+avr_memory_move_cost (enum machine_mode mode,
+ reg_class_t rclass ATTRIBUTE_UNUSED,
+ bool in ATTRIBUTE_UNUSED)
+{
+ return (mode == QImode ? 2
+ : mode == HImode ? 4
+ : mode == SImode ? 8
+ : mode == SFmode ? 8
+ : 16);
+}
+
+
+/* Mutually recursive subroutine of avr_rtx_cost for calculating the
+ cost of an RTX operand given its context. X is the rtx of the
+ operand, MODE is its mode, and OUTER is the rtx_code of this
+ operand's parent operator. */
+
+static int
+avr_operand_rtx_cost (rtx x, enum machine_mode mode, enum rtx_code outer,
+ int opno, bool speed)
+{
+ enum rtx_code code = GET_CODE (x);
+ int total;
+
+ switch (code)
+ {
+ case REG:
+ case SUBREG:
+ return 0;
+
+ case CONST_INT:
+ case CONST_FIXED:
+ case CONST_DOUBLE:
+ return COSTS_N_INSNS (GET_MODE_SIZE (mode));
+
+ default:
+ break;
+ }
+
+ total = 0;
+ avr_rtx_costs (x, code, outer, opno, &total, speed);
+ return total;
+}
+
+/* Worker function for AVR backend's rtx_cost function.
+ X is rtx expression whose cost is to be calculated.
+ Return true if the complete cost has been computed.
+ Return false if subexpressions should be scanned.
+ In either case, *TOTAL contains the cost result. */
+
+static bool
+avr_rtx_costs_1 (rtx x, int codearg, int outer_code ATTRIBUTE_UNUSED,
+ int opno ATTRIBUTE_UNUSED, int *total, bool speed)
+{
+ enum rtx_code code = (enum rtx_code) codearg;
+ enum machine_mode mode = GET_MODE (x);
+ HOST_WIDE_INT val;
+
+ switch (code)
+ {
+ case CONST_INT:
+ case CONST_FIXED:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CONST:
+ case LABEL_REF:
+ /* Immediate constants are as cheap as registers. */
+ *total = 0;
+ return true;
+
+ case MEM:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ return true;
+
+ case NEG:
+ switch (mode)
+ {
+ case QImode:
+ case SFmode:
+ *total = COSTS_N_INSNS (1);
+ break;
+
+ case HImode:
+ case PSImode:
+ case SImode:
+ *total = COSTS_N_INSNS (2 * GET_MODE_SIZE (mode) - 1);
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case ABS:
+ switch (mode)
+ {
+ case QImode:
+ case SFmode:
+ *total = COSTS_N_INSNS (1);
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case NOT:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case ZERO_EXTEND:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode)
+ - GET_MODE_SIZE (GET_MODE (XEXP (x, 0))));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case SIGN_EXTEND:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) + 2
+ - GET_MODE_SIZE (GET_MODE (XEXP (x, 0))));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case PLUS:
+ switch (mode)
+ {
+ case QImode:
+ if (AVR_HAVE_MUL
+ && MULT == GET_CODE (XEXP (x, 0))
+ && register_operand (XEXP (x, 1), QImode))
+ {
+ /* multiply-add */
+ *total = COSTS_N_INSNS (speed ? 4 : 3);
+ /* multiply-add with constant: will be split and load constant. */
+ if (CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+ *total = COSTS_N_INSNS (1) + *total;
+ return true;
+ }
+ *total = COSTS_N_INSNS (1);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1, speed);
+ break;
+
+ case HImode:
+ if (AVR_HAVE_MUL
+ && (MULT == GET_CODE (XEXP (x, 0))
+ || ASHIFT == GET_CODE (XEXP (x, 0)))
+ && register_operand (XEXP (x, 1), HImode)
+ && (ZERO_EXTEND == GET_CODE (XEXP (XEXP (x, 0), 0))
+ || SIGN_EXTEND == GET_CODE (XEXP (XEXP (x, 0), 0))))
+ {
+ /* multiply-add */
+ *total = COSTS_N_INSNS (speed ? 5 : 4);
+ /* multiply-add with constant: will be split and load constant. */
+ if (CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+ *total = COSTS_N_INSNS (1) + *total;
+ return true;
+ }
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (2);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else if (INTVAL (XEXP (x, 1)) >= -63 && INTVAL (XEXP (x, 1)) <= 63)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (2);
+ break;
+
+ case PSImode:
+ if (!CONST_INT_P (XEXP (x, 1)))
+ {
+ *total = COSTS_N_INSNS (3);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else if (INTVAL (XEXP (x, 1)) >= -63 && INTVAL (XEXP (x, 1)) <= 63)
+ *total = COSTS_N_INSNS (2);
+ else
+ *total = COSTS_N_INSNS (3);
+ break;
+
+ case SImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (4);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else if (INTVAL (XEXP (x, 1)) >= -63 && INTVAL (XEXP (x, 1)) <= 63)
+ *total = COSTS_N_INSNS (1);
+ else
+ *total = COSTS_N_INSNS (4);
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case MINUS:
+ if (AVR_HAVE_MUL
+ && QImode == mode
+ && register_operand (XEXP (x, 0), QImode)
+ && MULT == GET_CODE (XEXP (x, 1)))
+ {
+ /* multiply-sub */
+ *total = COSTS_N_INSNS (speed ? 4 : 3);
+ /* multiply-sub with constant: will be split and load constant. */
+ if (CONST_INT_P (XEXP (XEXP (x, 1), 1)))
+ *total = COSTS_N_INSNS (1) + *total;
+ return true;
+ }
+ if (AVR_HAVE_MUL
+ && HImode == mode
+ && register_operand (XEXP (x, 0), HImode)
+ && (MULT == GET_CODE (XEXP (x, 1))
+ || ASHIFT == GET_CODE (XEXP (x, 1)))
+ && (ZERO_EXTEND == GET_CODE (XEXP (XEXP (x, 1), 0))
+ || SIGN_EXTEND == GET_CODE (XEXP (XEXP (x, 1), 0))))
+ {
+ /* multiply-sub */
+ *total = COSTS_N_INSNS (speed ? 5 : 4);
+ /* multiply-sub with constant: will be split and load constant. */
+ if (CONST_INT_P (XEXP (XEXP (x, 1), 1)))
+ *total = COSTS_N_INSNS (1) + *total;
+ return true;
+ }
+ /* FALLTHRU */
+ case AND:
+ case IOR:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1, speed);
+ return true;
+
+ case XOR:
+ *total = COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1, speed);
+ return true;
+
+ case MULT:
+ switch (mode)
+ {
+ case QImode:
+ if (AVR_HAVE_MUL)
+ *total = COSTS_N_INSNS (!speed ? 3 : 4);
+ else if (!speed)
+ *total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 2 : 1);
+ else
+ return false;
+ break;
+
+ case HImode:
+ if (AVR_HAVE_MUL)
+ {
+ rtx op0 = XEXP (x, 0);
+ rtx op1 = XEXP (x, 1);
+ enum rtx_code code0 = GET_CODE (op0);
+ enum rtx_code code1 = GET_CODE (op1);
+ bool ex0 = SIGN_EXTEND == code0 || ZERO_EXTEND == code0;
+ bool ex1 = SIGN_EXTEND == code1 || ZERO_EXTEND == code1;
+
+ if (ex0
+ && (u8_operand (op1, HImode)
+ || s8_operand (op1, HImode)))
+ {
+ *total = COSTS_N_INSNS (!speed ? 4 : 6);
+ return true;
+ }
+ if (ex0
+ && register_operand (op1, HImode))
+ {
+ *total = COSTS_N_INSNS (!speed ? 5 : 8);
+ return true;
+ }
+ else if (ex0 || ex1)
+ {
+ *total = COSTS_N_INSNS (!speed ? 3 : 5);
+ return true;
+ }
+ else if (register_operand (op0, HImode)
+ && (u8_operand (op1, HImode)
+ || s8_operand (op1, HImode)))
+ {
+ *total = COSTS_N_INSNS (!speed ? 6 : 9);
+ return true;
+ }
+ else
+ *total = COSTS_N_INSNS (!speed ? 7 : 10);
+ }
+ else if (!speed)
+ *total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 2 : 1);
+ else
+ return false;
+ break;
+
+ case PSImode:
+ if (!speed)
+ *total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 2 : 1);
+ else
+ *total = 10;
+ break;
+
+ case SImode:
+ if (AVR_HAVE_MUL)
+ {
+ if (!speed)
+ {
+ /* Add some additional costs besides CALL like moves etc. */
+
+ *total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 5 : 4);
+ }
+ else
+ {
+ /* Just a rough estimate. Even with -O2 we don't want bulky
+ code expanded inline. */
+
+ *total = COSTS_N_INSNS (25);
+ }
+ }
+ else
+ {
+ if (speed)
+ *total = COSTS_N_INSNS (300);
+ else
+ /* Add some additional costs besides CALL like moves etc. */
+ *total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 5 : 4);
+ }
+
+ return true;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1, speed);
+ return true;
+
+ case DIV:
+ case MOD:
+ case UDIV:
+ case UMOD:
+ if (!speed)
+ *total = COSTS_N_INSNS (AVR_HAVE_JMP_CALL ? 2 : 1);
+ else
+ *total = COSTS_N_INSNS (15 * GET_MODE_SIZE (mode));
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ /* For div/mod with const-int divisor we have at least the cost of
+ loading the divisor. */
+ if (CONST_INT_P (XEXP (x, 1)))
+ *total += COSTS_N_INSNS (GET_MODE_SIZE (mode));
+ /* Add some overall penaly for clobbering and moving around registers */
+ *total += COSTS_N_INSNS (2);
+ return true;
+
+ case ROTATE:
+ switch (mode)
+ {
+ case QImode:
+ if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) == 4)
+ *total = COSTS_N_INSNS (1);
+
+ break;
+
+ case HImode:
+ if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) == 8)
+ *total = COSTS_N_INSNS (3);
+
+ break;
+
+ case SImode:
+ if (CONST_INT_P (XEXP (x, 1)))
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 8:
+ case 24:
+ *total = COSTS_N_INSNS (5);
+ break;
+ case 16:
+ *total = COSTS_N_INSNS (AVR_HAVE_MOVW ? 4 : 6);
+ break;
+ }
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case ASHIFT:
+ switch (mode)
+ {
+ case QImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 4 : 17);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else
+ {
+ val = INTVAL (XEXP (x, 1));
+ if (val == 7)
+ *total = COSTS_N_INSNS (3);
+ else if (val >= 0 && val <= 7)
+ *total = COSTS_N_INSNS (val);
+ else
+ *total = COSTS_N_INSNS (1);
+ }
+ break;
+
+ case HImode:
+ if (AVR_HAVE_MUL)
+ {
+ if (const_2_to_7_operand (XEXP (x, 1), HImode)
+ && (SIGN_EXTEND == GET_CODE (XEXP (x, 0))
+ || ZERO_EXTEND == GET_CODE (XEXP (x, 0))))
+ {
+ *total = COSTS_N_INSNS (!speed ? 4 : 6);
+ return true;
+ }
+ }
+
+ if (const1_rtx == (XEXP (x, 1))
+ && SIGN_EXTEND == GET_CODE (XEXP (x, 0)))
+ {
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ case 8:
+ *total = COSTS_N_INSNS (2);
+ break;
+ case 9:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 2:
+ case 3:
+ case 10:
+ case 15:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 7:
+ case 11:
+ case 12:
+ *total = COSTS_N_INSNS (5);
+ break;
+ case 4:
+ *total = COSTS_N_INSNS (!speed ? 5 : 8);
+ break;
+ case 6:
+ *total = COSTS_N_INSNS (!speed ? 5 : 9);
+ break;
+ case 5:
+ *total = COSTS_N_INSNS (!speed ? 5 : 10);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ break;
+
+ case PSImode:
+ if (!CONST_INT_P (XEXP (x, 1)))
+ {
+ *total = COSTS_N_INSNS (!speed ? 6 : 73);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ case 8:
+ case 16:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 23:
+ *total = COSTS_N_INSNS (5);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 5 : 3 * INTVAL (XEXP (x, 1)));
+ break;
+ }
+ break;
+
+ case SImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 24:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 1:
+ case 8:
+ case 16:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 31:
+ *total = COSTS_N_INSNS (6);
+ break;
+ case 2:
+ *total = COSTS_N_INSNS (!speed ? 7 : 8);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case ASHIFTRT:
+ switch (mode)
+ {
+ case QImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 4 : 17);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else
+ {
+ val = INTVAL (XEXP (x, 1));
+ if (val == 6)
+ *total = COSTS_N_INSNS (4);
+ else if (val == 7)
+ *total = COSTS_N_INSNS (2);
+ else if (val >= 0 && val <= 7)
+ *total = COSTS_N_INSNS (val);
+ else
+ *total = COSTS_N_INSNS (1);
+ }
+ break;
+
+ case HImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ *total = COSTS_N_INSNS (2);
+ break;
+ case 15:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 2:
+ case 7:
+ case 8:
+ case 9:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 10:
+ case 14:
+ *total = COSTS_N_INSNS (5);
+ break;
+ case 11:
+ *total = COSTS_N_INSNS (!speed ? 5 : 6);
+ break;
+ case 12:
+ *total = COSTS_N_INSNS (!speed ? 5 : 7);
+ break;
+ case 6:
+ case 13:
+ *total = COSTS_N_INSNS (!speed ? 5 : 8);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ break;
+
+ case PSImode:
+ if (!CONST_INT_P (XEXP (x, 1)))
+ {
+ *total = COSTS_N_INSNS (!speed ? 6 : 73);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 16:
+ case 8:
+ *total = COSTS_N_INSNS (5);
+ break;
+ case 23:
+ *total = COSTS_N_INSNS (4);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 5 : 3 * INTVAL (XEXP (x, 1)));
+ break;
+ }
+ break;
+
+ case SImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 8:
+ case 16:
+ case 24:
+ *total = COSTS_N_INSNS (6);
+ break;
+ case 2:
+ *total = COSTS_N_INSNS (!speed ? 7 : 8);
+ break;
+ case 31:
+ *total = COSTS_N_INSNS (AVR_HAVE_MOVW ? 4 : 5);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case LSHIFTRT:
+ switch (mode)
+ {
+ case QImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 4 : 17);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else
+ {
+ val = INTVAL (XEXP (x, 1));
+ if (val == 7)
+ *total = COSTS_N_INSNS (3);
+ else if (val >= 0 && val <= 7)
+ *total = COSTS_N_INSNS (val);
+ else
+ *total = COSTS_N_INSNS (1);
+ }
+ break;
+
+ case HImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ case 8:
+ *total = COSTS_N_INSNS (2);
+ break;
+ case 9:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 2:
+ case 10:
+ case 15:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 7:
+ case 11:
+ *total = COSTS_N_INSNS (5);
+ break;
+ case 3:
+ case 12:
+ case 13:
+ case 14:
+ *total = COSTS_N_INSNS (!speed ? 5 : 6);
+ break;
+ case 4:
+ *total = COSTS_N_INSNS (!speed ? 5 : 7);
+ break;
+ case 5:
+ case 6:
+ *total = COSTS_N_INSNS (!speed ? 5 : 9);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 5 : 41);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ break;
+
+ case PSImode:
+ if (!CONST_INT_P (XEXP (x, 1)))
+ {
+ *total = COSTS_N_INSNS (!speed ? 6 : 73);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ case 8:
+ case 16:
+ *total = COSTS_N_INSNS (3);
+ break;
+ case 23:
+ *total = COSTS_N_INSNS (5);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 5 : 3 * INTVAL (XEXP (x, 1)));
+ break;
+ }
+ break;
+
+ case SImode:
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ {
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ else
+ switch (INTVAL (XEXP (x, 1)))
+ {
+ case 0:
+ *total = 0;
+ break;
+ case 1:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 2:
+ *total = COSTS_N_INSNS (!speed ? 7 : 8);
+ break;
+ case 8:
+ case 16:
+ case 24:
+ *total = COSTS_N_INSNS (4);
+ break;
+ case 31:
+ *total = COSTS_N_INSNS (6);
+ break;
+ default:
+ *total = COSTS_N_INSNS (!speed ? 7 : 113);
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1,
+ speed);
+ }
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case COMPARE:
+ switch (GET_MODE (XEXP (x, 0)))
+ {
+ case QImode:
+ *total = COSTS_N_INSNS (1);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1, speed);
+ break;
+
+ case HImode:
+ *total = COSTS_N_INSNS (2);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1, speed);
+ else if (INTVAL (XEXP (x, 1)) != 0)
+ *total += COSTS_N_INSNS (1);
+ break;
+
+ case PSImode:
+ *total = COSTS_N_INSNS (3);
+ if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) != 0)
+ *total += COSTS_N_INSNS (2);
+ break;
+
+ case SImode:
+ *total = COSTS_N_INSNS (4);
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ *total += avr_operand_rtx_cost (XEXP (x, 1), mode, code, 1, speed);
+ else if (INTVAL (XEXP (x, 1)) != 0)
+ *total += COSTS_N_INSNS (3);
+ break;
+
+ default:
+ return false;
+ }
+ *total += avr_operand_rtx_cost (XEXP (x, 0), mode, code, 0, speed);
+ return true;
+
+ case TRUNCATE:
+ if (AVR_HAVE_MUL
+ && LSHIFTRT == GET_CODE (XEXP (x, 0))
+ && MULT == GET_CODE (XEXP (XEXP (x, 0), 0))
+ && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
+ {
+ if (QImode == mode || HImode == mode)
+ {
+ *total = COSTS_N_INSNS (2);
+ return true;
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ return false;
+}
+
+
+/* Implement `TARGET_RTX_COSTS'. */
+
+static bool
+avr_rtx_costs (rtx x, int codearg, int outer_code,
+ int opno, int *total, bool speed)
+{
+ bool done = avr_rtx_costs_1 (x, codearg, outer_code,
+ opno, total, speed);
+
+ if (avr_log.rtx_costs)
+ {
+ avr_edump ("\n%?=%b (%s) total=%d, outer=%C:\n%r\n",
+ done, speed ? "speed" : "size", *total, outer_code, x);
+ }
+
+ return done;
+}
+
+
+/* Implement `TARGET_ADDRESS_COST'. */
+
+static int
+avr_address_cost (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED,
+ bool speed ATTRIBUTE_UNUSED)
+{
+ int cost = 4;
+
+ if (GET_CODE (x) == PLUS
+ && CONST_INT_P (XEXP (x, 1))
+ && (REG_P (XEXP (x, 0))
+ || GET_CODE (XEXP (x, 0)) == SUBREG))
+ {
+ if (INTVAL (XEXP (x, 1)) >= 61)
+ cost = 18;
+ }
+ else if (CONSTANT_ADDRESS_P (x))
+ {
+ if (optimize > 0
+ && io_address_operand (x, QImode))
+ cost = 2;
+ }
+
+ if (avr_log.address_cost)
+ avr_edump ("\n%?: %d = %r\n", cost, x);
+
+ return cost;
+}
+
+/* Test for extra memory constraint 'Q'.
+ It's a memory address based on Y or Z pointer with valid displacement. */
+
+int
+extra_constraint_Q (rtx x)
+{
+ int ok = 0;
+
+ if (GET_CODE (XEXP (x,0)) == PLUS
+ && REG_P (XEXP (XEXP (x,0), 0))
+ && GET_CODE (XEXP (XEXP (x,0), 1)) == CONST_INT
+ && (INTVAL (XEXP (XEXP (x,0), 1))
+ <= MAX_LD_OFFSET (GET_MODE (x))))
+ {
+ rtx xx = XEXP (XEXP (x,0), 0);
+ int regno = REGNO (xx);
+
+ ok = (/* allocate pseudos */
+ regno >= FIRST_PSEUDO_REGISTER
+ /* strictly check */
+ || regno == REG_Z || regno == REG_Y
+ /* XXX frame & arg pointer checks */
+ || xx == frame_pointer_rtx
+ || xx == arg_pointer_rtx);
+
+ if (avr_log.constraints)
+ avr_edump ("\n%?=%d reload_completed=%d reload_in_progress=%d\n %r\n",
+ ok, reload_completed, reload_in_progress, x);
+ }
+
+ return ok;
+}
+
+/* Convert condition code CONDITION to the valid AVR condition code. */
+
+RTX_CODE
+avr_normalize_condition (RTX_CODE condition)
+{
+ switch (condition)
+ {
+ case GT:
+ return GE;
+ case GTU:
+ return GEU;
+ case LE:
+ return LT;
+ case LEU:
+ return LTU;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Helper function for `avr_reorg'. */
+
+static rtx
+avr_compare_pattern (rtx insn)
+{
+ rtx pattern = single_set (insn);
+
+ if (pattern
+ && NONJUMP_INSN_P (insn)
+ && SET_DEST (pattern) == cc0_rtx
+ && GET_CODE (SET_SRC (pattern)) == COMPARE)
+ {
+ enum machine_mode mode0 = GET_MODE (XEXP (SET_SRC (pattern), 0));
+ enum machine_mode mode1 = GET_MODE (XEXP (SET_SRC (pattern), 1));
+
+ /* The 64-bit comparisons have fixed operands ACC_A and ACC_B.
+ They must not be swapped, thus skip them. */
+
+ if ((mode0 == VOIDmode || GET_MODE_SIZE (mode0) <= 4)
+ && (mode1 == VOIDmode || GET_MODE_SIZE (mode1) <= 4))
+ return pattern;
+ }
+
+ return NULL_RTX;
+}
+
+/* Helper function for `avr_reorg'. */
+
+/* Expansion of switch/case decision trees leads to code like
+
+ cc0 = compare (Reg, Num)
+ if (cc0 == 0)
+ goto L1
+
+ cc0 = compare (Reg, Num)
+ if (cc0 > 0)
+ goto L2
+
+ The second comparison is superfluous and can be deleted.
+ The second jump condition can be transformed from a
+ "difficult" one to a "simple" one because "cc0 > 0" and
+ "cc0 >= 0" will have the same effect here.
+
+ This function relies on the way switch/case is being expaned
+ as binary decision tree. For example code see PR 49903.
+
+ Return TRUE if optimization performed.
+ Return FALSE if nothing changed.
+
+ INSN1 is a comparison, i.e. avr_compare_pattern != 0.
+
+ We don't want to do this in text peephole because it is
+ tedious to work out jump offsets there and the second comparison
+ might have been transormed by `avr_reorg'.
+
+ RTL peephole won't do because peephole2 does not scan across
+ basic blocks. */
+
+static bool
+avr_reorg_remove_redundant_compare (rtx insn1)
+{
+ rtx comp1, ifelse1, xcond1, branch1;
+ rtx comp2, ifelse2, xcond2, branch2, insn2;
+ enum rtx_code code;
+ rtx jump, target, cond;
+
+ /* Look out for: compare1 - branch1 - compare2 - branch2 */
+
+ branch1 = next_nonnote_nondebug_insn (insn1);
+ if (!branch1 || !JUMP_P (branch1))
+ return false;
+
+ insn2 = next_nonnote_nondebug_insn (branch1);
+ if (!insn2 || !avr_compare_pattern (insn2))
+ return false;
+
+ branch2 = next_nonnote_nondebug_insn (insn2);
+ if (!branch2 || !JUMP_P (branch2))
+ return false;
+
+ comp1 = avr_compare_pattern (insn1);
+ comp2 = avr_compare_pattern (insn2);
+ xcond1 = single_set (branch1);
+ xcond2 = single_set (branch2);
+
+ if (!comp1 || !comp2
+ || !rtx_equal_p (comp1, comp2)
+ || !xcond1 || SET_DEST (xcond1) != pc_rtx
+ || !xcond2 || SET_DEST (xcond2) != pc_rtx
+ || IF_THEN_ELSE != GET_CODE (SET_SRC (xcond1))
+ || IF_THEN_ELSE != GET_CODE (SET_SRC (xcond2)))
+ {
+ return false;
+ }
+
+ comp1 = SET_SRC (comp1);
+ ifelse1 = SET_SRC (xcond1);
+ ifelse2 = SET_SRC (xcond2);
+
+ /* comp<n> is COMPARE now and ifelse<n> is IF_THEN_ELSE. */
+
+ if (EQ != GET_CODE (XEXP (ifelse1, 0))
+ || !REG_P (XEXP (comp1, 0))
+ || !CONST_INT_P (XEXP (comp1, 1))
+ || XEXP (ifelse1, 2) != pc_rtx
+ || XEXP (ifelse2, 2) != pc_rtx
+ || LABEL_REF != GET_CODE (XEXP (ifelse1, 1))
+ || LABEL_REF != GET_CODE (XEXP (ifelse2, 1))
+ || !COMPARISON_P (XEXP (ifelse2, 0))
+ || cc0_rtx != XEXP (XEXP (ifelse1, 0), 0)
+ || cc0_rtx != XEXP (XEXP (ifelse2, 0), 0)
+ || const0_rtx != XEXP (XEXP (ifelse1, 0), 1)
+ || const0_rtx != XEXP (XEXP (ifelse2, 0), 1))
+ {
+ return false;
+ }
+
+ /* We filtered the insn sequence to look like
+
+ (set (cc0)
+ (compare (reg:M N)
+ (const_int VAL)))
+ (set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref L1)
+ (pc)))
+
+ (set (cc0)
+ (compare (reg:M N)
+ (const_int VAL)))
+ (set (pc)
+ (if_then_else (CODE (cc0)
+ (const_int 0))
+ (label_ref L2)
+ (pc)))
+ */
+
+ code = GET_CODE (XEXP (ifelse2, 0));
+
+ /* Map GT/GTU to GE/GEU which is easier for AVR.
+ The first two instructions compare/branch on EQ
+ so we may replace the difficult
+
+ if (x == VAL) goto L1;
+ if (x > VAL) goto L2;
+
+ with easy
+
+ if (x == VAL) goto L1;
+ if (x >= VAL) goto L2;
+
+ Similarly, replace LE/LEU by LT/LTU. */
+
+ switch (code)
+ {
+ case EQ:
+ case LT: case LTU:
+ case GE: case GEU:
+ break;
+
+ case LE: case LEU:
+ case GT: case GTU:
+ code = avr_normalize_condition (code);
+ break;
+
+ default:
+ return false;
+ }
+
+ /* Wrap the branches into UNSPECs so they won't be changed or
+ optimized in the remainder. */
+
+ target = XEXP (XEXP (ifelse1, 1), 0);
+ cond = XEXP (ifelse1, 0);
+ jump = emit_jump_insn_after (gen_branch_unspec (target, cond), insn1);
+
+ JUMP_LABEL (jump) = JUMP_LABEL (branch1);
+
+ target = XEXP (XEXP (ifelse2, 1), 0);
+ cond = gen_rtx_fmt_ee (code, VOIDmode, cc0_rtx, const0_rtx);
+ jump = emit_jump_insn_after (gen_branch_unspec (target, cond), insn2);
+
+ JUMP_LABEL (jump) = JUMP_LABEL (branch2);
+
+ /* The comparisons in insn1 and insn2 are exactly the same;
+ insn2 is superfluous so delete it. */
+
+ delete_insn (insn2);
+ delete_insn (branch1);
+ delete_insn (branch2);
+
+ return true;
+}
+
+
+/* Implement `TARGET_MACHINE_DEPENDENT_REORG'. */
+/* Optimize conditional jumps. */
+
+static void
+avr_reorg (void)
+{
+ rtx insn = get_insns();
+
+ for (insn = next_real_insn (insn); insn; insn = next_real_insn (insn))
+ {
+ rtx pattern = avr_compare_pattern (insn);
+
+ if (!pattern)
+ continue;
+
+ if (optimize
+ && avr_reorg_remove_redundant_compare (insn))
+ {
+ continue;
+ }
+
+ if (compare_diff_p (insn))
+ {
+ /* Now we work under compare insn with difficult branch. */
+
+ rtx next = next_real_insn (insn);
+ rtx pat = PATTERN (next);
+
+ pattern = SET_SRC (pattern);
+
+ if (true_regnum (XEXP (pattern, 0)) >= 0
+ && true_regnum (XEXP (pattern, 1)) >= 0)
+ {
+ rtx x = XEXP (pattern, 0);
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src,0);
+ PUT_CODE (t, swap_condition (GET_CODE (t)));
+ XEXP (pattern, 0) = XEXP (pattern, 1);
+ XEXP (pattern, 1) = x;
+ INSN_CODE (next) = -1;
+ }
+ else if (true_regnum (XEXP (pattern, 0)) >= 0
+ && XEXP (pattern, 1) == const0_rtx)
+ {
+ /* This is a tst insn, we can reverse it. */
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src,0);
+
+ PUT_CODE (t, swap_condition (GET_CODE (t)));
+ XEXP (pattern, 1) = XEXP (pattern, 0);
+ XEXP (pattern, 0) = const0_rtx;
+ INSN_CODE (next) = -1;
+ INSN_CODE (insn) = -1;
+ }
+ else if (true_regnum (XEXP (pattern, 0)) >= 0
+ && CONST_INT_P (XEXP (pattern, 1)))
+ {
+ rtx x = XEXP (pattern, 1);
+ rtx src = SET_SRC (pat);
+ rtx t = XEXP (src,0);
+ enum machine_mode mode = GET_MODE (XEXP (pattern, 0));
+
+ if (avr_simplify_comparison_p (mode, GET_CODE (t), x))
+ {
+ XEXP (pattern, 1) = gen_int_mode (INTVAL (x) + 1, mode);
+ PUT_CODE (t, avr_normalize_condition (GET_CODE (t)));
+ INSN_CODE (next) = -1;
+ INSN_CODE (insn) = -1;
+ }
+ }
+ }
+ }
+}
+
+/* Returns register number for function return value.*/
+
+static inline unsigned int
+avr_ret_register (void)
+{
+ return 24;
+}
+
+
+/* Implement `TARGET_FUNCTION_VALUE_REGNO_P'. */
+
+static bool
+avr_function_value_regno_p (const unsigned int regno)
+{
+ return (regno == avr_ret_register ());
+}
+
+
+/* Implement `TARGET_LIBCALL_VALUE'. */
+/* Create an RTX representing the place where a
+ library function returns a value of mode MODE. */
+
+static rtx
+avr_libcall_value (enum machine_mode mode,
+ const_rtx func ATTRIBUTE_UNUSED)
+{
+ int offs = GET_MODE_SIZE (mode);
+
+ if (offs <= 4)
+ offs = (offs + 1) & ~1;
+
+ return gen_rtx_REG (mode, avr_ret_register () + 2 - offs);
+}
+
+
+/* Implement `TARGET_FUNCTION_VALUE'. */
+/* Create an RTX representing the place where a
+ function returns a value of data type VALTYPE. */
+
+static rtx
+avr_function_value (const_tree type,
+ const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ unsigned int offs;
+
+ if (TYPE_MODE (type) != BLKmode)
+ return avr_libcall_value (TYPE_MODE (type), NULL_RTX);
+
+ offs = int_size_in_bytes (type);
+ if (offs < 2)
+ offs = 2;
+ if (offs > 2 && offs < GET_MODE_SIZE (SImode))
+ offs = GET_MODE_SIZE (SImode);
+ else if (offs > GET_MODE_SIZE (SImode) && offs < GET_MODE_SIZE (DImode))
+ offs = GET_MODE_SIZE (DImode);
+
+ return gen_rtx_REG (BLKmode, avr_ret_register () + 2 - offs);
+}
+
+int
+test_hard_reg_class (enum reg_class rclass, rtx x)
+{
+ int regno = true_regnum (x);
+ if (regno < 0)
+ return 0;
+
+ if (TEST_HARD_REG_CLASS (rclass, regno))
+ return 1;
+
+ return 0;
+}
+
+
+/* Helper for jump_over_one_insn_p: Test if INSN is a 2-word instruction
+ and thus is suitable to be skipped by CPSE, SBRC, etc. */
+
+static bool
+avr_2word_insn_p (rtx insn)
+{
+ if (avr_current_device->errata_skip
+ || !insn
+ || 2 != get_attr_length (insn))
+ {
+ return false;
+ }
+
+ switch (INSN_CODE (insn))
+ {
+ default:
+ return false;
+
+ case CODE_FOR_movqi_insn:
+ case CODE_FOR_movuqq_insn:
+ case CODE_FOR_movqq_insn:
+ {
+ rtx set = single_set (insn);
+ rtx src = SET_SRC (set);
+ rtx dest = SET_DEST (set);
+
+ /* Factor out LDS and STS from movqi_insn. */
+
+ if (MEM_P (dest)
+ && (REG_P (src) || src == CONST0_RTX (GET_MODE (dest))))
+ {
+ return CONSTANT_ADDRESS_P (XEXP (dest, 0));
+ }
+ else if (REG_P (dest)
+ && MEM_P (src))
+ {
+ return CONSTANT_ADDRESS_P (XEXP (src, 0));
+ }
+
+ return false;
+ }
+
+ case CODE_FOR_call_insn:
+ case CODE_FOR_call_value_insn:
+ return true;
+ }
+}
+
+
+int
+jump_over_one_insn_p (rtx insn, rtx dest)
+{
+ int uid = INSN_UID (GET_CODE (dest) == LABEL_REF
+ ? XEXP (dest, 0)
+ : dest);
+ int jump_addr = INSN_ADDRESSES (INSN_UID (insn));
+ int dest_addr = INSN_ADDRESSES (uid);
+ int jump_offset = dest_addr - jump_addr - get_attr_length (insn);
+
+ return (jump_offset == 1
+ || (jump_offset == 2
+ && avr_2word_insn_p (next_active_insn (insn))));
+}
+
+
+/* Worker function for `HARD_REGNO_MODE_OK'. */
+/* Returns 1 if a value of mode MODE can be stored starting with hard
+ register number REGNO. On the enhanced core, anything larger than
+ 1 byte must start in even numbered register for "movw" to work
+ (this way we don't have to check for odd registers everywhere). */
+
+int
+avr_hard_regno_mode_ok (int regno, enum machine_mode mode)
+{
+ /* NOTE: 8-bit values must not be disallowed for R28 or R29.
+ Disallowing QI et al. in these regs might lead to code like
+ (set (subreg:QI (reg:HI 28) n) ...)
+ which will result in wrong code because reload does not
+ handle SUBREGs of hard regsisters like this.
+ This could be fixed in reload. However, it appears
+ that fixing reload is not wanted by reload people. */
+
+ /* Any GENERAL_REGS register can hold 8-bit values. */
+
+ if (GET_MODE_SIZE (mode) == 1)
+ return 1;
+
+ /* FIXME: Ideally, the following test is not needed.
+ However, it turned out that it can reduce the number
+ of spill fails. AVR and it's poor endowment with
+ address registers is extreme stress test for reload. */
+
+ if (GET_MODE_SIZE (mode) >= 4
+ && regno >= REG_X)
+ return 0;
+
+ /* All modes larger than 8 bits should start in an even register. */
+
+ return !(regno & 1);
+}
+
+
+/* Implement `HARD_REGNO_CALL_PART_CLOBBERED'. */
+
+int
+avr_hard_regno_call_part_clobbered (unsigned regno, enum machine_mode mode)
+{
+ /* FIXME: This hook gets called with MODE:REGNO combinations that don't
+ represent valid hard registers like, e.g. HI:29. Returning TRUE
+ for such registers can lead to performance degradation as mentioned
+ in PR53595. Thus, report invalid hard registers as FALSE. */
+
+ if (!avr_hard_regno_mode_ok (regno, mode))
+ return 0;
+
+ /* Return true if any of the following boundaries is crossed:
+ 17/18, 27/28 and 29/30. */
+
+ return ((regno < 18 && regno + GET_MODE_SIZE (mode) > 18)
+ || (regno < REG_Y && regno + GET_MODE_SIZE (mode) > REG_Y)
+ || (regno < REG_Z && regno + GET_MODE_SIZE (mode) > REG_Z));
+}
+
+
+/* Implement `MODE_CODE_BASE_REG_CLASS'. */
+
+enum reg_class
+avr_mode_code_base_reg_class (enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as, RTX_CODE outer_code,
+ RTX_CODE index_code ATTRIBUTE_UNUSED)
+{
+ if (!ADDR_SPACE_GENERIC_P (as))
+ {
+ return POINTER_Z_REGS;
+ }
+
+ if (!avr_strict_X)
+ return reload_completed ? BASE_POINTER_REGS : POINTER_REGS;
+
+ return PLUS == outer_code ? BASE_POINTER_REGS : POINTER_REGS;
+}
+
+
+/* Implement `REGNO_MODE_CODE_OK_FOR_BASE_P'. */
+
+bool
+avr_regno_mode_code_ok_for_base_p (int regno,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ addr_space_t as ATTRIBUTE_UNUSED,
+ RTX_CODE outer_code,
+ RTX_CODE index_code ATTRIBUTE_UNUSED)
+{
+ bool ok = false;
+
+ if (!ADDR_SPACE_GENERIC_P (as))
+ {
+ if (regno < FIRST_PSEUDO_REGISTER
+ && regno == REG_Z)
+ {
+ return true;
+ }
+
+ if (reg_renumber)
+ {
+ regno = reg_renumber[regno];
+
+ if (regno == REG_Z)
+ {
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ if (regno < FIRST_PSEUDO_REGISTER
+ && (regno == REG_X
+ || regno == REG_Y
+ || regno == REG_Z
+ || regno == ARG_POINTER_REGNUM))
+ {
+ ok = true;
+ }
+ else if (reg_renumber)
+ {
+ regno = reg_renumber[regno];
+
+ if (regno == REG_X
+ || regno == REG_Y
+ || regno == REG_Z
+ || regno == ARG_POINTER_REGNUM)
+ {
+ ok = true;
+ }
+ }
+
+ if (avr_strict_X
+ && PLUS == outer_code
+ && regno == REG_X)
+ {
+ ok = false;
+ }
+
+ return ok;
+}
+
+
+/* A helper for `output_reload_insisf' and `output_reload_inhi'. */
+/* Set 32-bit register OP[0] to compile-time constant OP[1].
+ CLOBBER_REG is a QI clobber register or NULL_RTX.
+ LEN == NULL: output instructions.
+ LEN != NULL: set *LEN to the length of the instruction sequence
+ (in words) printed with LEN = NULL.
+ If CLEAR_P is true, OP[0] had been cleard to Zero already.
+ If CLEAR_P is false, nothing is known about OP[0].
+
+ The effect on cc0 is as follows:
+
+ Load 0 to any register except ZERO_REG : NONE
+ Load ld register with any value : NONE
+ Anything else: : CLOBBER */
+
+static void
+output_reload_in_const (rtx *op, rtx clobber_reg, int *len, bool clear_p)
+{
+ rtx src = op[1];
+ rtx dest = op[0];
+ rtx xval, xdest[4];
+ int ival[4];
+ int clobber_val = 1234;
+ bool cooked_clobber_p = false;
+ bool set_p = false;
+ enum machine_mode mode = GET_MODE (dest);
+ int n, n_bytes = GET_MODE_SIZE (mode);
+
+ gcc_assert (REG_P (dest)
+ && CONSTANT_P (src));
+
+ if (len)
+ *len = 0;
+
+ /* (REG:SI 14) is special: It's neither in LD_REGS nor in NO_LD_REGS
+ but has some subregs that are in LD_REGS. Use the MSB (REG:QI 17). */
+
+ if (REGNO (dest) < 16
+ && REGNO (dest) + GET_MODE_SIZE (mode) > 16)
+ {
+ clobber_reg = all_regs_rtx[REGNO (dest) + n_bytes - 1];
+ }
+
+ /* We might need a clobber reg but don't have one. Look at the value to
+ be loaded more closely. A clobber is only needed if it is a symbol
+ or contains a byte that is neither 0, -1 or a power of 2. */
+
+ if (NULL_RTX == clobber_reg
+ && !test_hard_reg_class (LD_REGS, dest)
+ && (! (CONST_INT_P (src) || CONST_FIXED_P (src) || CONST_DOUBLE_P (src))
+ || !avr_popcount_each_byte (src, n_bytes,
+ (1 << 0) | (1 << 1) | (1 << 8))))
+ {
+ /* We have no clobber register but need one. Cook one up.
+ That's cheaper than loading from constant pool. */
+
+ cooked_clobber_p = true;
+ clobber_reg = all_regs_rtx[REG_Z + 1];
+ avr_asm_len ("mov __tmp_reg__,%0", &clobber_reg, len, 1);
+ }
+
+ /* Now start filling DEST from LSB to MSB. */
+
+ for (n = 0; n < n_bytes; n++)
+ {
+ int ldreg_p;
+ bool done_byte = false;
+ int j;
+ rtx xop[3];
+
+ /* Crop the n-th destination byte. */
+
+ xdest[n] = simplify_gen_subreg (QImode, dest, mode, n);
+ ldreg_p = test_hard_reg_class (LD_REGS, xdest[n]);
+
+ if (!CONST_INT_P (src)
+ && !CONST_FIXED_P (src)
+ && !CONST_DOUBLE_P (src))
+ {
+ static const char* const asm_code[][2] =
+ {
+ { "ldi %2,lo8(%1)" CR_TAB "mov %0,%2", "ldi %0,lo8(%1)" },
+ { "ldi %2,hi8(%1)" CR_TAB "mov %0,%2", "ldi %0,hi8(%1)" },
+ { "ldi %2,hlo8(%1)" CR_TAB "mov %0,%2", "ldi %0,hlo8(%1)" },
+ { "ldi %2,hhi8(%1)" CR_TAB "mov %0,%2", "ldi %0,hhi8(%1)" }
+ };
+
+ xop[0] = xdest[n];
+ xop[1] = src;
+ xop[2] = clobber_reg;
+
+ avr_asm_len (asm_code[n][ldreg_p], xop, len, ldreg_p ? 1 : 2);
+
+ continue;
+ }
+
+ /* Crop the n-th source byte. */
+
+ xval = simplify_gen_subreg (QImode, src, mode, n);
+ ival[n] = INTVAL (xval);
+
+ /* Look if we can reuse the low word by means of MOVW. */
+
+ if (n == 2
+ && n_bytes >= 4
+ && AVR_HAVE_MOVW)
+ {
+ rtx lo16 = simplify_gen_subreg (HImode, src, mode, 0);
+ rtx hi16 = simplify_gen_subreg (HImode, src, mode, 2);
+
+ if (INTVAL (lo16) == INTVAL (hi16))
+ {
+ if (0 != INTVAL (lo16)
+ || !clear_p)
+ {
+ avr_asm_len ("movw %C0,%A0", &op[0], len, 1);
+ }
+
+ break;
+ }
+ }
+
+ /* Don't use CLR so that cc0 is set as expected. */
+
+ if (ival[n] == 0)
+ {
+ if (!clear_p)
+ avr_asm_len (ldreg_p ? "ldi %0,0"
+ : ZERO_REGNO == REGNO (xdest[n]) ? "clr %0"
+ : "mov %0,__zero_reg__",
+ &xdest[n], len, 1);
+ continue;
+ }
+
+ if (clobber_val == ival[n]
+ && REGNO (clobber_reg) == REGNO (xdest[n]))
+ {
+ continue;
+ }
+
+ /* LD_REGS can use LDI to move a constant value */
+
+ if (ldreg_p)
+ {
+ xop[0] = xdest[n];
+ xop[1] = xval;
+ avr_asm_len ("ldi %0,lo8(%1)", xop, len, 1);
+ continue;
+ }
+
+ /* Try to reuse value already loaded in some lower byte. */
+
+ for (j = 0; j < n; j++)
+ if (ival[j] == ival[n])
+ {
+ xop[0] = xdest[n];
+ xop[1] = xdest[j];
+
+ avr_asm_len ("mov %0,%1", xop, len, 1);
+ done_byte = true;
+ break;
+ }
+
+ if (done_byte)
+ continue;
+
+ /* Need no clobber reg for -1: Use CLR/DEC */
+
+ if (-1 == ival[n])
+ {
+ if (!clear_p)
+ avr_asm_len ("clr %0", &xdest[n], len, 1);
+
+ avr_asm_len ("dec %0", &xdest[n], len, 1);
+ continue;
+ }
+ else if (1 == ival[n])
+ {
+ if (!clear_p)
+ avr_asm_len ("clr %0", &xdest[n], len, 1);
+
+ avr_asm_len ("inc %0", &xdest[n], len, 1);
+ continue;
+ }
+
+ /* Use T flag or INC to manage powers of 2 if we have
+ no clobber reg. */
+
+ if (NULL_RTX == clobber_reg
+ && single_one_operand (xval, QImode))
+ {
+ xop[0] = xdest[n];
+ xop[1] = GEN_INT (exact_log2 (ival[n] & GET_MODE_MASK (QImode)));
+
+ gcc_assert (constm1_rtx != xop[1]);
+
+ if (!set_p)
+ {
+ set_p = true;
+ avr_asm_len ("set", xop, len, 1);
+ }
+
+ if (!clear_p)
+ avr_asm_len ("clr %0", xop, len, 1);
+
+ avr_asm_len ("bld %0,%1", xop, len, 1);
+ continue;
+ }
+
+ /* We actually need the LD_REGS clobber reg. */
+
+ gcc_assert (NULL_RTX != clobber_reg);
+
+ xop[0] = xdest[n];
+ xop[1] = xval;
+ xop[2] = clobber_reg;
+ clobber_val = ival[n];
+
+ avr_asm_len ("ldi %2,lo8(%1)" CR_TAB
+ "mov %0,%2", xop, len, 2);
+ }
+
+ /* If we cooked up a clobber reg above, restore it. */
+
+ if (cooked_clobber_p)
+ {
+ avr_asm_len ("mov %0,__tmp_reg__", &clobber_reg, len, 1);
+ }
+}
+
+
+/* Reload the constant OP[1] into the HI register OP[0].
+ CLOBBER_REG is a QI clobber reg needed to move vast majority of consts
+ into a NO_LD_REGS register. If CLOBBER_REG is NULL_RTX we either don't
+ need a clobber reg or have to cook one up.
+
+ PLEN == NULL: Output instructions.
+ PLEN != NULL: Output nothing. Set *PLEN to number of words occupied
+ by the insns printed.
+
+ Return "". */
+
+const char*
+output_reload_inhi (rtx *op, rtx clobber_reg, int *plen)
+{
+ output_reload_in_const (op, clobber_reg, plen, false);
+ return "";
+}
+
+
+/* Reload a SI or SF compile time constant OP[1] into the register OP[0].
+ CLOBBER_REG is a QI clobber reg needed to move vast majority of consts
+ into a NO_LD_REGS register. If CLOBBER_REG is NULL_RTX we either don't
+ need a clobber reg or have to cook one up.
+
+ LEN == NULL: Output instructions.
+
+ LEN != NULL: Output nothing. Set *LEN to number of words occupied
+ by the insns printed.
+
+ Return "". */
+
+const char *
+output_reload_insisf (rtx *op, rtx clobber_reg, int *len)
+{
+ if (AVR_HAVE_MOVW
+ && !test_hard_reg_class (LD_REGS, op[0])
+ && (CONST_INT_P (op[1])
+ || CONST_FIXED_P (op[1])
+ || CONST_DOUBLE_P (op[1])))
+ {
+ int len_clr, len_noclr;
+
+ /* In some cases it is better to clear the destination beforehand, e.g.
+
+ CLR R2 CLR R3 MOVW R4,R2 INC R2
+
+ is shorther than
+
+ CLR R2 INC R2 CLR R3 CLR R4 CLR R5
+
+ We find it too tedious to work that out in the print function.
+ Instead, we call the print function twice to get the lengths of
+ both methods and use the shortest one. */
+
+ output_reload_in_const (op, clobber_reg, &len_clr, true);
+ output_reload_in_const (op, clobber_reg, &len_noclr, false);
+
+ if (len_noclr - len_clr == 4)
+ {
+ /* Default needs 4 CLR instructions: clear register beforehand. */
+
+ avr_asm_len ("mov %A0,__zero_reg__" CR_TAB
+ "mov %B0,__zero_reg__" CR_TAB
+ "movw %C0,%A0", &op[0], len, 3);
+
+ output_reload_in_const (op, clobber_reg, len, true);
+
+ if (len)
+ *len += 3;
+
+ return "";
+ }
+ }
+
+ /* Default: destination not pre-cleared. */
+
+ output_reload_in_const (op, clobber_reg, len, false);
+ return "";
+}
+
+const char*
+avr_out_reload_inpsi (rtx *op, rtx clobber_reg, int *len)
+{
+ output_reload_in_const (op, clobber_reg, len, false);
+ return "";
+}
+
+
+/* Worker function for `ASM_OUTPUT_ADDR_VEC_ELT'. */
+
+void
+avr_output_addr_vec_elt (FILE *stream, int value)
+{
+ if (AVR_HAVE_JMP_CALL)
+ fprintf (stream, "\t.word gs(.L%d)\n", value);
+ else
+ fprintf (stream, "\trjmp .L%d\n", value);
+}
+
+
+/* Implement `TARGET_HARD_REGNO_SCRATCH_OK'. */
+/* Returns true if SCRATCH are safe to be allocated as a scratch
+ registers (for a define_peephole2) in the current function. */
+
+static bool
+avr_hard_regno_scratch_ok (unsigned int regno)
+{
+ /* Interrupt functions can only use registers that have already been saved
+ by the prologue, even if they would normally be call-clobbered. */
+
+ if ((cfun->machine->is_interrupt || cfun->machine->is_signal)
+ && !df_regs_ever_live_p (regno))
+ return false;
+
+ /* Don't allow hard registers that might be part of the frame pointer.
+ Some places in the compiler just test for [HARD_]FRAME_POINTER_REGNUM
+ and don't care for a frame pointer that spans more than one register. */
+
+ if ((!reload_completed || frame_pointer_needed)
+ && (regno == REG_Y || regno == REG_Y + 1))
+ {
+ return false;
+ }
+
+ return true;
+}
+
+
+/* Worker function for `HARD_REGNO_RENAME_OK'. */
+/* Return nonzero if register OLD_REG can be renamed to register NEW_REG. */
+
+int
+avr_hard_regno_rename_ok (unsigned int old_reg,
+ unsigned int new_reg)
+{
+ /* Interrupt functions can only use registers that have already been
+ saved by the prologue, even if they would normally be
+ call-clobbered. */
+
+ if ((cfun->machine->is_interrupt || cfun->machine->is_signal)
+ && !df_regs_ever_live_p (new_reg))
+ return 0;
+
+ /* Don't allow hard registers that might be part of the frame pointer.
+ Some places in the compiler just test for [HARD_]FRAME_POINTER_REGNUM
+ and don't care for a frame pointer that spans more than one register. */
+
+ if ((!reload_completed || frame_pointer_needed)
+ && (old_reg == REG_Y || old_reg == REG_Y + 1
+ || new_reg == REG_Y || new_reg == REG_Y + 1))
+ {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Output a branch that tests a single bit of a register (QI, HI, SI or DImode)
+ or memory location in the I/O space (QImode only).
+
+ Operand 0: comparison operator (must be EQ or NE, compare bit to zero).
+ Operand 1: register operand to test, or CONST_INT memory address.
+ Operand 2: bit number.
+ Operand 3: label to jump to if the test is true. */
+
+const char*
+avr_out_sbxx_branch (rtx insn, rtx operands[])
+{
+ enum rtx_code comp = GET_CODE (operands[0]);
+ bool long_jump = get_attr_length (insn) >= 4;
+ bool reverse = long_jump || jump_over_one_insn_p (insn, operands[3]);
+
+ if (comp == GE)
+ comp = EQ;
+ else if (comp == LT)
+ comp = NE;
+
+ if (reverse)
+ comp = reverse_condition (comp);
+
+ switch (GET_CODE (operands[1]))
+ {
+ default:
+ gcc_unreachable();
+
+ case CONST_INT:
+
+ if (low_io_address_operand (operands[1], QImode))
+ {
+ if (comp == EQ)
+ output_asm_insn ("sbis %i1,%2", operands);
+ else
+ output_asm_insn ("sbic %i1,%2", operands);
+ }
+ else
+ {
+ output_asm_insn ("in __tmp_reg__,%i1", operands);
+ if (comp == EQ)
+ output_asm_insn ("sbrs __tmp_reg__,%2", operands);
+ else
+ output_asm_insn ("sbrc __tmp_reg__,%2", operands);
+ }
+
+ break; /* CONST_INT */
+
+ case REG:
+
+ if (comp == EQ)
+ output_asm_insn ("sbrs %T1%T2", operands);
+ else
+ output_asm_insn ("sbrc %T1%T2", operands);
+
+ break; /* REG */
+ } /* switch */
+
+ if (long_jump)
+ return ("rjmp .+4" CR_TAB
+ "jmp %x3");
+
+ if (!reverse)
+ return "rjmp %x3";
+
+ return "";
+}
+
+/* Worker function for `TARGET_ASM_CONSTRUCTOR'. */
+
+static void
+avr_asm_out_ctor (rtx symbol, int priority)
+{
+ fputs ("\t.global __do_global_ctors\n", asm_out_file);
+ default_ctor_section_asm_out_constructor (symbol, priority);
+}
+
+
+/* Worker function for `TARGET_ASM_DESTRUCTOR'. */
+
+static void
+avr_asm_out_dtor (rtx symbol, int priority)
+{
+ fputs ("\t.global __do_global_dtors\n", asm_out_file);
+ default_dtor_section_asm_out_destructor (symbol, priority);
+}
+
+
+/* Worker function for `TARGET_RETURN_IN_MEMORY'. */
+
+static bool
+avr_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ if (TYPE_MODE (type) == BLKmode)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ return (size == -1 || size > 8);
+ }
+ else
+ return false;
+}
+
+
+/* Implement `CASE_VALUES_THRESHOLD'. */
+/* Supply the default for --param case-values-threshold=0 */
+
+static unsigned int
+avr_case_values_threshold (void)
+{
+ /* The exact break-even point between a jump table and an if-else tree
+ depends on several factors not available here like, e.g. if 8-bit
+ comparisons can be used in the if-else tree or not, on the
+ range of the case values, if the case value can be reused, on the
+ register allocation, etc. '7' appears to be a good choice. */
+
+ return 7;
+}
+
+
+/* Implement `TARGET_ADDR_SPACE_ADDRESS_MODE'. */
+
+static enum machine_mode
+avr_addr_space_address_mode (addr_space_t as)
+{
+ return avr_addrspace[as].pointer_size == 3 ? PSImode : HImode;
+}
+
+
+/* Implement `TARGET_ADDR_SPACE_POINTER_MODE'. */
+
+static enum machine_mode
+avr_addr_space_pointer_mode (addr_space_t as)
+{
+ return avr_addr_space_address_mode (as);
+}
+
+
+/* Helper for following function. */
+
+static bool
+avr_reg_ok_for_pgm_addr (rtx reg, bool strict)
+{
+ gcc_assert (REG_P (reg));
+
+ if (strict)
+ {
+ return REGNO (reg) == REG_Z;
+ }
+
+ /* Avoid combine to propagate hard regs. */
+
+ if (can_create_pseudo_p()
+ && REGNO (reg) < REG_Z)
+ {
+ return false;
+ }
+
+ return true;
+}
+
+
+/* Implement `TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P'. */
+
+static bool
+avr_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
+ bool strict, addr_space_t as)
+{
+ bool ok = false;
+
+ switch (as)
+ {
+ default:
+ gcc_unreachable();
+
+ case ADDR_SPACE_GENERIC:
+ return avr_legitimate_address_p (mode, x, strict);
+
+ case ADDR_SPACE_FLASH:
+ case ADDR_SPACE_FLASH1:
+ case ADDR_SPACE_FLASH2:
+ case ADDR_SPACE_FLASH3:
+ case ADDR_SPACE_FLASH4:
+ case ADDR_SPACE_FLASH5:
+
+ switch (GET_CODE (x))
+ {
+ case REG:
+ ok = avr_reg_ok_for_pgm_addr (x, strict);
+ break;
+
+ case POST_INC:
+ ok = avr_reg_ok_for_pgm_addr (XEXP (x, 0), strict);
+ break;
+
+ default:
+ break;
+ }
+
+ break; /* FLASH */
+
+ case ADDR_SPACE_MEMX:
+ if (REG_P (x))
+ ok = (!strict
+ && can_create_pseudo_p());
+
+ if (LO_SUM == GET_CODE (x))
+ {
+ rtx hi = XEXP (x, 0);
+ rtx lo = XEXP (x, 1);
+
+ ok = (REG_P (hi)
+ && (!strict || REGNO (hi) < FIRST_PSEUDO_REGISTER)
+ && REG_P (lo)
+ && REGNO (lo) == REG_Z);
+ }
+
+ break; /* MEMX */
+ }
+
+ if (avr_log.legitimate_address_p)
+ {
+ avr_edump ("\n%?: ret=%b, mode=%m strict=%d "
+ "reload_completed=%d reload_in_progress=%d %s:",
+ ok, mode, strict, reload_completed, reload_in_progress,
+ reg_renumber ? "(reg_renumber)" : "");
+
+ if (GET_CODE (x) == PLUS
+ && REG_P (XEXP (x, 0))
+ && CONST_INT_P (XEXP (x, 1))
+ && IN_RANGE (INTVAL (XEXP (x, 1)), 0, MAX_LD_OFFSET (mode))
+ && reg_renumber)
+ {
+ avr_edump ("(r%d ---> r%d)", REGNO (XEXP (x, 0)),
+ true_regnum (XEXP (x, 0)));
+ }
+
+ avr_edump ("\n%r\n", x);
+ }
+
+ return ok;
+}
+
+
+/* Implement `TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS'. */
+
+static rtx
+avr_addr_space_legitimize_address (rtx x, rtx old_x,
+ enum machine_mode mode, addr_space_t as)
+{
+ if (ADDR_SPACE_GENERIC_P (as))
+ return avr_legitimize_address (x, old_x, mode);
+
+ if (avr_log.legitimize_address)
+ {
+ avr_edump ("\n%?: mode=%m\n %r\n", mode, old_x);
+ }
+
+ return old_x;
+}
+
+
+/* Implement `TARGET_ADDR_SPACE_CONVERT'. */
+
+static rtx
+avr_addr_space_convert (rtx src, tree type_from, tree type_to)
+{
+ addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (type_from));
+ addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type_to));
+
+ if (avr_log.progmem)
+ avr_edump ("\n%!: op = %r\nfrom = %t\nto = %t\n",
+ src, type_from, type_to);
+
+ /* Up-casting from 16-bit to 24-bit pointer. */
+
+ if (as_from != ADDR_SPACE_MEMX
+ && as_to == ADDR_SPACE_MEMX)
+ {
+ int msb;
+ rtx sym = src;
+ rtx reg = gen_reg_rtx (PSImode);
+
+ while (CONST == GET_CODE (sym) || PLUS == GET_CODE (sym))
+ sym = XEXP (sym, 0);
+
+ /* Look at symbol flags: avr_encode_section_info set the flags
+ also if attribute progmem was seen so that we get the right
+ promotion for, e.g. PSTR-like strings that reside in generic space
+ but are located in flash. In that case we patch the incoming
+ address space. */
+
+ if (SYMBOL_REF == GET_CODE (sym)
+ && ADDR_SPACE_FLASH == AVR_SYMBOL_GET_ADDR_SPACE (sym))
+ {
+ as_from = ADDR_SPACE_FLASH;
+ }
+
+ /* Linearize memory: RAM has bit 23 set. */
+
+ msb = ADDR_SPACE_GENERIC_P (as_from)
+ ? 0x80
+ : avr_addrspace[as_from].segment;
+
+ src = force_reg (Pmode, src);
+
+ emit_insn (msb == 0
+ ? gen_zero_extendhipsi2 (reg, src)
+ : gen_n_extendhipsi2 (reg, gen_int_mode (msb, QImode), src));
+
+ return reg;
+ }
+
+ /* Down-casting from 24-bit to 16-bit throws away the high byte. */
+
+ if (as_from == ADDR_SPACE_MEMX
+ && as_to != ADDR_SPACE_MEMX)
+ {
+ rtx new_src = gen_reg_rtx (Pmode);
+
+ src = force_reg (PSImode, src);
+
+ emit_move_insn (new_src,
+ simplify_gen_subreg (Pmode, src, PSImode, 0));
+ return new_src;
+ }
+
+ return src;
+}
+
+
+/* Implement `TARGET_ADDR_SPACE_SUBSET_P'. */
+
+static bool
+avr_addr_space_subset_p (addr_space_t subset ATTRIBUTE_UNUSED,
+ addr_space_t superset ATTRIBUTE_UNUSED)
+{
+ /* Allow any kind of pointer mess. */
+
+ return true;
+}
+
+
+/* Implement `TARGET_CONVERT_TO_TYPE'. */
+
+static tree
+avr_convert_to_type (tree type, tree expr)
+{
+ /* Print a diagnose for pointer conversion that changes the address
+ space of the pointer target to a non-enclosing address space,
+ provided -Waddr-space-convert is on.
+
+ FIXME: Filter out cases where the target object is known to
+ be located in the right memory, like in
+
+ (const __flash*) PSTR ("text")
+
+ Also try to distinguish between explicit casts requested by
+ the user and implicit casts like
+
+ void f (const __flash char*);
+
+ void g (const char *p)
+ {
+ f ((const __flash*) p);
+ }
+
+ under the assumption that an explicit casts means that the user
+ knows what he is doing, e.g. interface with PSTR or old style
+ code with progmem and pgm_read_xxx.
+ */
+
+ if (avr_warn_addr_space_convert
+ && expr != error_mark_node
+ && POINTER_TYPE_P (type)
+ && POINTER_TYPE_P (TREE_TYPE (expr)))
+ {
+ addr_space_t as_old = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (expr)));
+ addr_space_t as_new = TYPE_ADDR_SPACE (TREE_TYPE (type));
+
+ if (avr_log.progmem)
+ avr_edump ("%?: type = %t\nexpr = %t\n\n", type, expr);
+
+ if (as_new != ADDR_SPACE_MEMX
+ && as_new != as_old)
+ {
+ location_t loc = EXPR_LOCATION (expr);
+ const char *name_old = avr_addrspace[as_old].name;
+ const char *name_new = avr_addrspace[as_new].name;
+
+ warning (OPT_Waddr_space_convert,
+ "conversion from address space %qs to address space %qs",
+ ADDR_SPACE_GENERIC_P (as_old) ? "generic" : name_old,
+ ADDR_SPACE_GENERIC_P (as_new) ? "generic" : name_new);
+
+ return fold_build1_loc (loc, ADDR_SPACE_CONVERT_EXPR, type, expr);
+ }
+ }
+
+ return NULL_TREE;
+}
+
+
+/* Worker function for movmemhi expander.
+ XOP[0] Destination as MEM:BLK
+ XOP[1] Source " "
+ XOP[2] # Bytes to copy
+
+ Return TRUE if the expansion is accomplished.
+ Return FALSE if the operand compination is not supported. */
+
+bool
+avr_emit_movmemhi (rtx *xop)
+{
+ HOST_WIDE_INT count;
+ enum machine_mode loop_mode;
+ addr_space_t as = MEM_ADDR_SPACE (xop[1]);
+ rtx loop_reg, addr1, a_src, a_dest, insn, xas;
+ rtx a_hi8 = NULL_RTX;
+
+ if (avr_mem_flash_p (xop[0]))
+ return false;
+
+ if (!CONST_INT_P (xop[2]))
+ return false;
+
+ count = INTVAL (xop[2]);
+ if (count <= 0)
+ return false;
+
+ a_src = XEXP (xop[1], 0);
+ a_dest = XEXP (xop[0], 0);
+
+ if (PSImode == GET_MODE (a_src))
+ {
+ gcc_assert (as == ADDR_SPACE_MEMX);
+
+ loop_mode = (count < 0x100) ? QImode : HImode;
+ loop_reg = gen_rtx_REG (loop_mode, 24);
+ emit_move_insn (loop_reg, gen_int_mode (count, loop_mode));
+
+ addr1 = simplify_gen_subreg (HImode, a_src, PSImode, 0);
+ a_hi8 = simplify_gen_subreg (QImode, a_src, PSImode, 2);
+ }
+ else
+ {
+ int segment = avr_addrspace[as].segment;
+
+ if (segment
+ && avr_current_device->n_flash > 1)
+ {
+ a_hi8 = GEN_INT (segment);
+ emit_move_insn (rampz_rtx, a_hi8 = copy_to_mode_reg (QImode, a_hi8));
+ }
+ else if (!ADDR_SPACE_GENERIC_P (as))
+ {
+ as = ADDR_SPACE_FLASH;
+ }
+
+ addr1 = a_src;
+
+ loop_mode = (count <= 0x100) ? QImode : HImode;
+ loop_reg = copy_to_mode_reg (loop_mode, gen_int_mode (count, loop_mode));
+ }
+
+ xas = GEN_INT (as);
+
+ /* FIXME: Register allocator might come up with spill fails if it is left
+ on its own. Thus, we allocate the pointer registers by hand:
+ Z = source address
+ X = destination address */
+
+ emit_move_insn (lpm_addr_reg_rtx, addr1);
+ emit_move_insn (gen_rtx_REG (HImode, REG_X), a_dest);
+
+ /* FIXME: Register allocator does a bad job and might spill address
+ register(s) inside the loop leading to additional move instruction
+ to/from stack which could clobber tmp_reg. Thus, do *not* emit
+ load and store as separate insns. Instead, we perform the copy
+ by means of one monolithic insn. */
+
+ gcc_assert (TMP_REGNO == LPM_REGNO);
+
+ if (as != ADDR_SPACE_MEMX)
+ {
+ /* Load instruction ([E]LPM or LD) is known at compile time:
+ Do the copy-loop inline. */
+
+ rtx (*fun) (rtx, rtx, rtx)
+ = QImode == loop_mode ? gen_movmem_qi : gen_movmem_hi;
+
+ insn = fun (xas, loop_reg, loop_reg);
+ }
+ else
+ {
+ rtx (*fun) (rtx, rtx)
+ = QImode == loop_mode ? gen_movmemx_qi : gen_movmemx_hi;
+
+ emit_move_insn (gen_rtx_REG (QImode, 23), a_hi8);
+
+ insn = fun (xas, GEN_INT (avr_addr.rampz));
+ }
+
+ set_mem_addr_space (SET_SRC (XVECEXP (insn, 0, 0)), as);
+ emit_insn (insn);
+
+ return true;
+}
+
+
+/* Print assembler for movmem_qi, movmem_hi insns...
+ $0 : Address Space
+ $1, $2 : Loop register
+ Z : Source address
+ X : Destination address
+*/
+
+const char*
+avr_out_movmem (rtx insn ATTRIBUTE_UNUSED, rtx *op, int *plen)
+{
+ addr_space_t as = (addr_space_t) INTVAL (op[0]);
+ enum machine_mode loop_mode = GET_MODE (op[1]);
+ bool sbiw_p = test_hard_reg_class (ADDW_REGS, op[1]);
+ rtx xop[3];
+
+ if (plen)
+ *plen = 0;
+
+ xop[0] = op[0];
+ xop[1] = op[1];
+ xop[2] = tmp_reg_rtx;
+
+ /* Loop label */
+
+ avr_asm_len ("0:", xop, plen, 0);
+
+ /* Load with post-increment */
+
+ switch (as)
+ {
+ default:
+ gcc_unreachable();
+
+ case ADDR_SPACE_GENERIC:
+
+ avr_asm_len ("ld %2,Z+", xop, plen, 1);
+ break;
+
+ case ADDR_SPACE_FLASH:
+
+ if (AVR_HAVE_LPMX)
+ avr_asm_len ("lpm %2,Z+", xop, plen, 1);
+ else
+ avr_asm_len ("lpm" CR_TAB
+ "adiw r30,1", xop, plen, 2);
+ break;
+
+ case ADDR_SPACE_FLASH1:
+ case ADDR_SPACE_FLASH2:
+ case ADDR_SPACE_FLASH3:
+ case ADDR_SPACE_FLASH4:
+ case ADDR_SPACE_FLASH5:
+
+ if (AVR_HAVE_ELPMX)
+ avr_asm_len ("elpm %2,Z+", xop, plen, 1);
+ else
+ avr_asm_len ("elpm" CR_TAB
+ "adiw r30,1", xop, plen, 2);
+ break;
+ }
+
+ /* Store with post-increment */
+
+ avr_asm_len ("st X+,%2", xop, plen, 1);
+
+ /* Decrement loop-counter and set Z-flag */
+
+ if (QImode == loop_mode)
+ {
+ avr_asm_len ("dec %1", xop, plen, 1);
+ }
+ else if (sbiw_p)
+ {
+ avr_asm_len ("sbiw %1,1", xop, plen, 1);
+ }
+ else
+ {
+ avr_asm_len ("subi %A1,1" CR_TAB
+ "sbci %B1,0", xop, plen, 2);
+ }
+
+ /* Loop until zero */
+
+ return avr_asm_len ("brne 0b", xop, plen, 1);
+}
+
+
+
+/* Helper for __builtin_avr_delay_cycles */
+
+static rtx
+avr_mem_clobber (void)
+{
+ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (mem) = 1;
+ return mem;
+}
+
+static void
+avr_expand_delay_cycles (rtx operands0)
+{
+ unsigned HOST_WIDE_INT cycles = UINTVAL (operands0) & GET_MODE_MASK (SImode);
+ unsigned HOST_WIDE_INT cycles_used;
+ unsigned HOST_WIDE_INT loop_count;
+
+ if (IN_RANGE (cycles, 83886082, 0xFFFFFFFF))
+ {
+ loop_count = ((cycles - 9) / 6) + 1;
+ cycles_used = ((loop_count - 1) * 6) + 9;
+ emit_insn (gen_delay_cycles_4 (gen_int_mode (loop_count, SImode),
+ avr_mem_clobber()));
+ cycles -= cycles_used;
+ }
+
+ if (IN_RANGE (cycles, 262145, 83886081))
+ {
+ loop_count = ((cycles - 7) / 5) + 1;
+ if (loop_count > 0xFFFFFF)
+ loop_count = 0xFFFFFF;
+ cycles_used = ((loop_count - 1) * 5) + 7;
+ emit_insn (gen_delay_cycles_3 (gen_int_mode (loop_count, SImode),
+ avr_mem_clobber()));
+ cycles -= cycles_used;
+ }
+
+ if (IN_RANGE (cycles, 768, 262144))
+ {
+ loop_count = ((cycles - 5) / 4) + 1;
+ if (loop_count > 0xFFFF)
+ loop_count = 0xFFFF;
+ cycles_used = ((loop_count - 1) * 4) + 5;
+ emit_insn (gen_delay_cycles_2 (gen_int_mode (loop_count, HImode),
+ avr_mem_clobber()));
+ cycles -= cycles_used;
+ }
+
+ if (IN_RANGE (cycles, 6, 767))
+ {
+ loop_count = cycles / 3;
+ if (loop_count > 255)
+ loop_count = 255;
+ cycles_used = loop_count * 3;
+ emit_insn (gen_delay_cycles_1 (gen_int_mode (loop_count, QImode),
+ avr_mem_clobber()));
+ cycles -= cycles_used;
+ }
+
+ while (cycles >= 2)
+ {
+ emit_insn (gen_nopv (GEN_INT(2)));
+ cycles -= 2;
+ }
+
+ if (cycles == 1)
+ {
+ emit_insn (gen_nopv (GEN_INT(1)));
+ cycles--;
+ }
+}
+
+
+/* Compute the image of x under f, i.e. perform x --> f(x) */
+
+static int
+avr_map (unsigned int f, int x)
+{
+ return x < 8 ? (f >> (4 * x)) & 0xf : 0;
+}
+
+
+/* Return some metrics of map A. */
+
+enum
+ {
+ /* Number of fixed points in { 0 ... 7 } */
+ MAP_FIXED_0_7,
+
+ /* Size of preimage of non-fixed points in { 0 ... 7 } */
+ MAP_NONFIXED_0_7,
+
+ /* Mask representing the fixed points in { 0 ... 7 } */
+ MAP_MASK_FIXED_0_7,
+
+ /* Size of the preimage of { 0 ... 7 } */
+ MAP_PREIMAGE_0_7,
+
+ /* Mask that represents the preimage of { f } */
+ MAP_MASK_PREIMAGE_F
+ };
+
+static unsigned
+avr_map_metric (unsigned int a, int mode)
+{
+ unsigned i, metric = 0;
+
+ for (i = 0; i < 8; i++)
+ {
+ unsigned ai = avr_map (a, i);
+
+ if (mode == MAP_FIXED_0_7)
+ metric += ai == i;
+ else if (mode == MAP_NONFIXED_0_7)
+ metric += ai < 8 && ai != i;
+ else if (mode == MAP_MASK_FIXED_0_7)
+ metric |= ((unsigned) (ai == i)) << i;
+ else if (mode == MAP_PREIMAGE_0_7)
+ metric += ai < 8;
+ else if (mode == MAP_MASK_PREIMAGE_F)
+ metric |= ((unsigned) (ai == 0xf)) << i;
+ else
+ gcc_unreachable();
+ }
+
+ return metric;
+}
+
+
+/* Return true if IVAL has a 0xf in its hexadecimal representation
+ and false, otherwise. Only nibbles 0..7 are taken into account.
+ Used as constraint helper for C0f and Cxf. */
+
+bool
+avr_has_nibble_0xf (rtx ival)
+{
+ unsigned int map = UINTVAL (ival) & GET_MODE_MASK (SImode);
+ return 0 != avr_map_metric (map, MAP_MASK_PREIMAGE_F);
+}
+
+
+/* We have a set of bits that are mapped by a function F.
+ Try to decompose F by means of a second function G so that
+
+ F = F o G^-1 o G
+
+ and
+
+ cost (F o G^-1) + cost (G) < cost (F)
+
+ Example: Suppose builtin insert_bits supplies us with the map
+ F = 0x3210ffff. Instead of doing 4 bit insertions to get the high
+ nibble of the result, we can just as well rotate the bits before inserting
+ them and use the map 0x7654ffff which is cheaper than the original map.
+ For this example G = G^-1 = 0x32107654 and F o G^-1 = 0x7654ffff. */
+
+typedef struct
+{
+ /* tree code of binary function G */
+ enum tree_code code;
+
+ /* The constant second argument of G */
+ int arg;
+
+ /* G^-1, the inverse of G (*, arg) */
+ unsigned ginv;
+
+ /* The cost of appplying G (*, arg) */
+ int cost;
+
+ /* The composition F o G^-1 (*, arg) for some function F */
+ unsigned int map;
+
+ /* For debug purpose only */
+ const char *str;
+} avr_map_op_t;
+
+static const avr_map_op_t avr_map_op[] =
+ {
+ { LROTATE_EXPR, 0, 0x76543210, 0, 0, "id" },
+ { LROTATE_EXPR, 1, 0x07654321, 2, 0, "<<<" },
+ { LROTATE_EXPR, 2, 0x10765432, 4, 0, "<<<" },
+ { LROTATE_EXPR, 3, 0x21076543, 4, 0, "<<<" },
+ { LROTATE_EXPR, 4, 0x32107654, 1, 0, "<<<" },
+ { LROTATE_EXPR, 5, 0x43210765, 3, 0, "<<<" },
+ { LROTATE_EXPR, 6, 0x54321076, 5, 0, "<<<" },
+ { LROTATE_EXPR, 7, 0x65432107, 3, 0, "<<<" },
+ { RSHIFT_EXPR, 1, 0x6543210c, 1, 0, ">>" },
+ { RSHIFT_EXPR, 1, 0x7543210c, 1, 0, ">>" },
+ { RSHIFT_EXPR, 2, 0x543210cc, 2, 0, ">>" },
+ { RSHIFT_EXPR, 2, 0x643210cc, 2, 0, ">>" },
+ { RSHIFT_EXPR, 2, 0x743210cc, 2, 0, ">>" },
+ { LSHIFT_EXPR, 1, 0xc7654321, 1, 0, "<<" },
+ { LSHIFT_EXPR, 2, 0xcc765432, 2, 0, "<<" }
+ };
+
+
+/* Try to decompose F as F = (F o G^-1) o G as described above.
+ The result is a struct representing F o G^-1 and G.
+ If result.cost < 0 then such a decomposition does not exist. */
+
+static avr_map_op_t
+avr_map_decompose (unsigned int f, const avr_map_op_t *g, bool val_const_p)
+{
+ int i;
+ bool val_used_p = 0 != avr_map_metric (f, MAP_MASK_PREIMAGE_F);
+ avr_map_op_t f_ginv = *g;
+ unsigned int ginv = g->ginv;
+
+ f_ginv.cost = -1;
+
+ /* Step 1: Computing F o G^-1 */
+
+ for (i = 7; i >= 0; i--)
+ {
+ int x = avr_map (f, i);
+
+ if (x <= 7)
+ {
+ x = avr_map (ginv, x);
+
+ /* The bit is no element of the image of G: no avail (cost = -1) */
+
+ if (x > 7)
+ return f_ginv;
+ }
+
+ f_ginv.map = (f_ginv.map << 4) + x;
+ }
+
+ /* Step 2: Compute the cost of the operations.
+ The overall cost of doing an operation prior to the insertion is
+ the cost of the insertion plus the cost of the operation. */
+
+ /* Step 2a: Compute cost of F o G^-1 */
+
+ if (0 == avr_map_metric (f_ginv.map, MAP_NONFIXED_0_7))
+ {
+ /* The mapping consists only of fixed points and can be folded
+ to AND/OR logic in the remainder. Reasonable cost is 3. */
+
+ f_ginv.cost = 2 + (val_used_p && !val_const_p);
+ }
+ else
+ {
+ rtx xop[4];
+
+ /* Get the cost of the insn by calling the output worker with some
+ fake values. Mimic effect of reloading xop[3]: Unused operands
+ are mapped to 0 and used operands are reloaded to xop[0]. */
+
+ xop[0] = all_regs_rtx[24];
+ xop[1] = gen_int_mode (f_ginv.map, SImode);
+ xop[2] = all_regs_rtx[25];
+ xop[3] = val_used_p ? xop[0] : const0_rtx;
+
+ avr_out_insert_bits (xop, &f_ginv.cost);
+
+ f_ginv.cost += val_const_p && val_used_p ? 1 : 0;
+ }
+
+ /* Step 2b: Add cost of G */
+
+ f_ginv.cost += g->cost;
+
+ if (avr_log.builtin)
+ avr_edump (" %s%d=%d", g->str, g->arg, f_ginv.cost);
+
+ return f_ginv;
+}
+
+
+/* Insert bits from XOP[1] into XOP[0] according to MAP.
+ XOP[0] and XOP[1] don't overlap.
+ If FIXP_P = true: Move all bits according to MAP using BLD/BST sequences.
+ If FIXP_P = false: Just move the bit if its position in the destination
+ is different to its source position. */
+
+static void
+avr_move_bits (rtx *xop, unsigned int map, bool fixp_p, int *plen)
+{
+ int bit_dest, b;
+
+ /* T-flag contains this bit of the source, i.e. of XOP[1] */
+ int t_bit_src = -1;
+
+ /* We order the operations according to the requested source bit b. */
+
+ for (b = 0; b < 8; b++)
+ for (bit_dest = 0; bit_dest < 8; bit_dest++)
+ {
+ int bit_src = avr_map (map, bit_dest);
+
+ if (b != bit_src
+ || bit_src >= 8
+ /* Same position: No need to copy as requested by FIXP_P. */
+ || (bit_dest == bit_src && !fixp_p))
+ continue;
+
+ if (t_bit_src != bit_src)
+ {
+ /* Source bit is not yet in T: Store it to T. */
+
+ t_bit_src = bit_src;
+
+ xop[3] = GEN_INT (bit_src);
+ avr_asm_len ("bst %T1%T3", xop, plen, 1);
+ }
+
+ /* Load destination bit with T. */
+
+ xop[3] = GEN_INT (bit_dest);
+ avr_asm_len ("bld %T0%T3", xop, plen, 1);
+ }
+}
+
+
+/* PLEN == 0: Print assembler code for `insert_bits'.
+ PLEN != 0: Compute code length in bytes.
+
+ OP[0]: Result
+ OP[1]: The mapping composed of nibbles. If nibble no. N is
+ 0: Bit N of result is copied from bit OP[2].0
+ ... ...
+ 7: Bit N of result is copied from bit OP[2].7
+ 0xf: Bit N of result is copied from bit OP[3].N
+ OP[2]: Bits to be inserted
+ OP[3]: Target value */
+
+const char*
+avr_out_insert_bits (rtx *op, int *plen)
+{
+ unsigned int map = UINTVAL (op[1]) & GET_MODE_MASK (SImode);
+ unsigned mask_fixed;
+ bool fixp_p = true;
+ rtx xop[4];
+
+ xop[0] = op[0];
+ xop[1] = op[2];
+ xop[2] = op[3];
+
+ gcc_assert (REG_P (xop[2]) || CONST_INT_P (xop[2]));
+
+ if (plen)
+ *plen = 0;
+ else if (flag_print_asm_name)
+ fprintf (asm_out_file, ASM_COMMENT_START "map = 0x%08x\n", map);
+
+ /* If MAP has fixed points it might be better to initialize the result
+ with the bits to be inserted instead of moving all bits by hand. */
+
+ mask_fixed = avr_map_metric (map, MAP_MASK_FIXED_0_7);
+
+ if (REGNO (xop[0]) == REGNO (xop[1]))
+ {
+ /* Avoid early-clobber conflicts */
+
+ avr_asm_len ("mov __tmp_reg__,%1", xop, plen, 1);
+ xop[1] = tmp_reg_rtx;
+ fixp_p = false;
+ }
+
+ if (avr_map_metric (map, MAP_MASK_PREIMAGE_F))
+ {
+ /* XOP[2] is used and reloaded to XOP[0] already */
+
+ int n_fix = 0, n_nofix = 0;
+
+ gcc_assert (REG_P (xop[2]));
+
+ /* Get the code size of the bit insertions; once with all bits
+ moved and once with fixed points omitted. */
+
+ avr_move_bits (xop, map, true, &n_fix);
+ avr_move_bits (xop, map, false, &n_nofix);
+
+ if (fixp_p && n_fix - n_nofix > 3)
+ {
+ xop[3] = gen_int_mode (~mask_fixed, QImode);
+
+ avr_asm_len ("eor %0,%1" CR_TAB
+ "andi %0,%3" CR_TAB
+ "eor %0,%1", xop, plen, 3);
+ fixp_p = false;
+ }
+ }
+ else
+ {
+ /* XOP[2] is unused */
+
+ if (fixp_p && mask_fixed)
+ {
+ avr_asm_len ("mov %0,%1", xop, plen, 1);
+ fixp_p = false;
+ }
+ }
+
+ /* Move/insert remaining bits. */
+
+ avr_move_bits (xop, map, fixp_p, plen);
+
+ return "";
+}
+
+
+/* IDs for all the AVR builtins. */
+
+enum avr_builtin_id
+ {
+#define DEF_BUILTIN(NAME, N_ARGS, TYPE, CODE, LIBNAME) \
+ AVR_BUILTIN_ ## NAME,
+#include "builtins.def"
+#undef DEF_BUILTIN
+
+ AVR_BUILTIN_COUNT
+ };
+
+struct GTY(()) avr_builtin_description
+{
+ enum insn_code icode;
+ int n_args;
+ tree fndecl;
+};
+
+
+/* Notice that avr_bdesc[] and avr_builtin_id are initialized in such a way
+ that a built-in's ID can be used to access the built-in by means of
+ avr_bdesc[ID] */
+
+static GTY(()) struct avr_builtin_description
+avr_bdesc[AVR_BUILTIN_COUNT] =
+ {
+#define DEF_BUILTIN(NAME, N_ARGS, TYPE, ICODE, LIBNAME) \
+ { (enum insn_code) CODE_FOR_ ## ICODE, N_ARGS, NULL_TREE },
+#include "builtins.def"
+#undef DEF_BUILTIN
+ };
+
+
+/* Implement `TARGET_BUILTIN_DECL'. */
+
+static tree
+avr_builtin_decl (unsigned id, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (id < AVR_BUILTIN_COUNT)
+ return avr_bdesc[id].fndecl;
+
+ return error_mark_node;
+}
+
+
+static void
+avr_init_builtin_int24 (void)
+{
+ tree int24_type = make_signed_type (GET_MODE_BITSIZE (PSImode));
+ tree uint24_type = make_unsigned_type (GET_MODE_BITSIZE (PSImode));
+
+ lang_hooks.types.register_builtin_type (int24_type, "__int24");
+ lang_hooks.types.register_builtin_type (uint24_type, "__uint24");
+}
+
+
+/* Implement `TARGET_INIT_BUILTINS' */
+/* Set up all builtin functions for this target. */
+
+static void
+avr_init_builtins (void)
+{
+ tree void_ftype_void
+ = build_function_type_list (void_type_node, NULL_TREE);
+ tree uchar_ftype_uchar
+ = build_function_type_list (unsigned_char_type_node,
+ unsigned_char_type_node,
+ NULL_TREE);
+ tree uint_ftype_uchar_uchar
+ = build_function_type_list (unsigned_type_node,
+ unsigned_char_type_node,
+ unsigned_char_type_node,
+ NULL_TREE);
+ tree int_ftype_char_char
+ = build_function_type_list (integer_type_node,
+ char_type_node,
+ char_type_node,
+ NULL_TREE);
+ tree int_ftype_char_uchar
+ = build_function_type_list (integer_type_node,
+ char_type_node,
+ unsigned_char_type_node,
+ NULL_TREE);
+ tree void_ftype_ulong
+ = build_function_type_list (void_type_node,
+ long_unsigned_type_node,
+ NULL_TREE);
+
+ tree uchar_ftype_ulong_uchar_uchar
+ = build_function_type_list (unsigned_char_type_node,
+ long_unsigned_type_node,
+ unsigned_char_type_node,
+ unsigned_char_type_node,
+ NULL_TREE);
+
+ tree const_memx_void_node
+ = build_qualified_type (void_type_node,
+ TYPE_QUAL_CONST
+ | ENCODE_QUAL_ADDR_SPACE (ADDR_SPACE_MEMX));
+
+ tree const_memx_ptr_type_node
+ = build_pointer_type_for_mode (const_memx_void_node, PSImode, false);
+
+ tree char_ftype_const_memx_ptr
+ = build_function_type_list (char_type_node,
+ const_memx_ptr_type_node,
+ NULL);
+
+#define ITYP(T) \
+ lang_hooks.types.type_for_size (TYPE_PRECISION (T), TYPE_UNSIGNED (T))
+
+#define FX_FTYPE_FX(fx) \
+ tree fx##r_ftype_##fx##r \
+ = build_function_type_list (node_##fx##r, node_##fx##r, NULL); \
+ tree fx##k_ftype_##fx##k \
+ = build_function_type_list (node_##fx##k, node_##fx##k, NULL)
+
+#define FX_FTYPE_FX_INT(fx) \
+ tree fx##r_ftype_##fx##r_int \
+ = build_function_type_list (node_##fx##r, node_##fx##r, \
+ integer_type_node, NULL); \
+ tree fx##k_ftype_##fx##k_int \
+ = build_function_type_list (node_##fx##k, node_##fx##k, \
+ integer_type_node, NULL)
+
+#define INT_FTYPE_FX(fx) \
+ tree int_ftype_##fx##r \
+ = build_function_type_list (integer_type_node, node_##fx##r, NULL); \
+ tree int_ftype_##fx##k \
+ = build_function_type_list (integer_type_node, node_##fx##k, NULL)
+
+#define INTX_FTYPE_FX(fx) \
+ tree int##fx##r_ftype_##fx##r \
+ = build_function_type_list (ITYP (node_##fx##r), node_##fx##r, NULL); \
+ tree int##fx##k_ftype_##fx##k \
+ = build_function_type_list (ITYP (node_##fx##k), node_##fx##k, NULL)
+
+#define FX_FTYPE_INTX(fx) \
+ tree fx##r_ftype_int##fx##r \
+ = build_function_type_list (node_##fx##r, ITYP (node_##fx##r), NULL); \
+ tree fx##k_ftype_int##fx##k \
+ = build_function_type_list (node_##fx##k, ITYP (node_##fx##k), NULL)
+
+ tree node_hr = short_fract_type_node;
+ tree node_nr = fract_type_node;
+ tree node_lr = long_fract_type_node;
+ tree node_llr = long_long_fract_type_node;
+
+ tree node_uhr = unsigned_short_fract_type_node;
+ tree node_unr = unsigned_fract_type_node;
+ tree node_ulr = unsigned_long_fract_type_node;
+ tree node_ullr = unsigned_long_long_fract_type_node;
+
+ tree node_hk = short_accum_type_node;
+ tree node_nk = accum_type_node;
+ tree node_lk = long_accum_type_node;
+ tree node_llk = long_long_accum_type_node;
+
+ tree node_uhk = unsigned_short_accum_type_node;
+ tree node_unk = unsigned_accum_type_node;
+ tree node_ulk = unsigned_long_accum_type_node;
+ tree node_ullk = unsigned_long_long_accum_type_node;
+
+
+ /* For absfx builtins. */
+
+ FX_FTYPE_FX (h);
+ FX_FTYPE_FX (n);
+ FX_FTYPE_FX (l);
+ FX_FTYPE_FX (ll);
+
+ /* For roundfx builtins. */
+
+ FX_FTYPE_FX_INT (h);
+ FX_FTYPE_FX_INT (n);
+ FX_FTYPE_FX_INT (l);
+ FX_FTYPE_FX_INT (ll);
+
+ FX_FTYPE_FX_INT (uh);
+ FX_FTYPE_FX_INT (un);
+ FX_FTYPE_FX_INT (ul);
+ FX_FTYPE_FX_INT (ull);
+
+ /* For countlsfx builtins. */
+
+ INT_FTYPE_FX (h);
+ INT_FTYPE_FX (n);
+ INT_FTYPE_FX (l);
+ INT_FTYPE_FX (ll);
+
+ INT_FTYPE_FX (uh);
+ INT_FTYPE_FX (un);
+ INT_FTYPE_FX (ul);
+ INT_FTYPE_FX (ull);
+
+ /* For bitsfx builtins. */
+
+ INTX_FTYPE_FX (h);
+ INTX_FTYPE_FX (n);
+ INTX_FTYPE_FX (l);
+ INTX_FTYPE_FX (ll);
+
+ INTX_FTYPE_FX (uh);
+ INTX_FTYPE_FX (un);
+ INTX_FTYPE_FX (ul);
+ INTX_FTYPE_FX (ull);
+
+ /* For fxbits builtins. */
+
+ FX_FTYPE_INTX (h);
+ FX_FTYPE_INTX (n);
+ FX_FTYPE_INTX (l);
+ FX_FTYPE_INTX (ll);
+
+ FX_FTYPE_INTX (uh);
+ FX_FTYPE_INTX (un);
+ FX_FTYPE_INTX (ul);
+ FX_FTYPE_INTX (ull);
+
+
+#define DEF_BUILTIN(NAME, N_ARGS, TYPE, CODE, LIBNAME) \
+ { \
+ int id = AVR_BUILTIN_ ## NAME; \
+ const char *Name = "__builtin_avr_" #NAME; \
+ char *name = (char*) alloca (1 + strlen (Name)); \
+ \
+ gcc_assert (id < AVR_BUILTIN_COUNT); \
+ avr_bdesc[id].fndecl \
+ = add_builtin_function (avr_tolower (name, Name), TYPE, id, \
+ BUILT_IN_MD, LIBNAME, NULL_TREE); \
+ }
+#include "builtins.def"
+#undef DEF_BUILTIN
+
+ avr_init_builtin_int24 ();
+}
+
+
+/* Subroutine of avr_expand_builtin to expand vanilla builtins
+ with non-void result and 1 ... 3 arguments. */
+
+static rtx
+avr_default_expand_builtin (enum insn_code icode, tree exp, rtx target)
+{
+ rtx pat, xop[3];
+ int n, n_args = call_expr_nargs (exp);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+
+ gcc_assert (n_args >= 1 && n_args <= 3);
+
+ if (target == NULL_RTX
+ || GET_MODE (target) != tmode
+ || !insn_data[icode].operand[0].predicate (target, tmode))
+ {
+ target = gen_reg_rtx (tmode);
+ }
+
+ for (n = 0; n < n_args; n++)
+ {
+ tree arg = CALL_EXPR_ARG (exp, n);
+ rtx op = expand_expr (arg, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ enum machine_mode opmode = GET_MODE (op);
+ enum machine_mode mode = insn_data[icode].operand[n+1].mode;
+
+ if ((opmode == SImode || opmode == VOIDmode) && mode == HImode)
+ {
+ opmode = HImode;
+ op = gen_lowpart (HImode, op);
+ }
+
+ /* In case the insn wants input operands in modes different from
+ the result, abort. */
+
+ gcc_assert (opmode == mode || opmode == VOIDmode);
+
+ if (!insn_data[icode].operand[n+1].predicate (op, mode))
+ op = copy_to_mode_reg (mode, op);
+
+ xop[n] = op;
+ }
+
+ switch (n_args)
+ {
+ case 1: pat = GEN_FCN (icode) (target, xop[0]); break;
+ case 2: pat = GEN_FCN (icode) (target, xop[0], xop[1]); break;
+ case 3: pat = GEN_FCN (icode) (target, xop[0], xop[1], xop[2]); break;
+
+ default:
+ gcc_unreachable();
+ }
+
+ if (pat == NULL_RTX)
+ return NULL_RTX;
+
+ emit_insn (pat);
+
+ return target;
+}
+
+
+/* Implement `TARGET_EXPAND_BUILTIN'. */
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+avr_expand_builtin (tree exp, rtx target,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore)
+{
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ const char *bname = IDENTIFIER_POINTER (DECL_NAME (fndecl));
+ unsigned int id = DECL_FUNCTION_CODE (fndecl);
+ const struct avr_builtin_description *d = &avr_bdesc[id];
+ tree arg0;
+ rtx op0;
+
+ gcc_assert (id < AVR_BUILTIN_COUNT);
+
+ switch (id)
+ {
+ case AVR_BUILTIN_NOP:
+ emit_insn (gen_nopv (GEN_INT(1)));
+ return 0;
+
+ case AVR_BUILTIN_DELAY_CYCLES:
+ {
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+
+ if (!CONST_INT_P (op0))
+ error ("%s expects a compile time integer constant", bname);
+ else
+ avr_expand_delay_cycles (op0);
+
+ return NULL_RTX;
+ }
+
+ case AVR_BUILTIN_INSERT_BITS:
+ {
+ arg0 = CALL_EXPR_ARG (exp, 0);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+
+ if (!CONST_INT_P (op0))
+ {
+ error ("%s expects a compile time long integer constant"
+ " as first argument", bname);
+ return target;
+ }
+
+ break;
+ }
+
+ case AVR_BUILTIN_ROUNDHR: case AVR_BUILTIN_ROUNDUHR:
+ case AVR_BUILTIN_ROUNDR: case AVR_BUILTIN_ROUNDUR:
+ case AVR_BUILTIN_ROUNDLR: case AVR_BUILTIN_ROUNDULR:
+ case AVR_BUILTIN_ROUNDLLR: case AVR_BUILTIN_ROUNDULLR:
+
+ case AVR_BUILTIN_ROUNDHK: case AVR_BUILTIN_ROUNDUHK:
+ case AVR_BUILTIN_ROUNDK: case AVR_BUILTIN_ROUNDUK:
+ case AVR_BUILTIN_ROUNDLK: case AVR_BUILTIN_ROUNDULK:
+ case AVR_BUILTIN_ROUNDLLK: case AVR_BUILTIN_ROUNDULLK:
+
+ /* Warn about odd rounding. Rounding points >= FBIT will have
+ no effect. */
+
+ if (TREE_CODE (CALL_EXPR_ARG (exp, 1)) != INTEGER_CST)
+ break;
+
+ int rbit = (int) TREE_INT_CST_LOW (CALL_EXPR_ARG (exp, 1));
+
+ if (rbit >= (int) GET_MODE_FBIT (mode))
+ {
+ warning (OPT_Wextra, "rounding to %d bits has no effect for "
+ "fixed-point value with %d fractional bits",
+ rbit, GET_MODE_FBIT (mode));
+
+ return expand_expr (CALL_EXPR_ARG (exp, 0), NULL_RTX, mode,
+ EXPAND_NORMAL);
+ }
+ else if (rbit <= - (int) GET_MODE_IBIT (mode))
+ {
+ warning (0, "rounding result will always be 0");
+ return CONST0_RTX (mode);
+ }
+
+ /* The rounding points RP satisfies now: -IBIT < RP < FBIT.
+
+ TR 18037 only specifies results for RP > 0. However, the
+ remaining cases of -IBIT < RP <= 0 can easily be supported
+ without any additional overhead. */
+
+ break; /* round */
+ }
+
+ /* No fold found and no insn: Call support function from libgcc. */
+
+ if (d->icode == CODE_FOR_nothing
+ && DECL_ASSEMBLER_NAME (get_callee_fndecl (exp)) != NULL_TREE)
+ {
+ return expand_call (exp, target, ignore);
+ }
+
+ /* No special treatment needed: vanilla expand. */
+
+ gcc_assert (d->icode != CODE_FOR_nothing);
+ gcc_assert (d->n_args == call_expr_nargs (exp));
+
+ if (d->n_args == 0)
+ {
+ emit_insn ((GEN_FCN (d->icode)) (target));
+ return NULL_RTX;
+ }
+
+ return avr_default_expand_builtin (d->icode, exp, target);
+}
+
+
+/* Helper for `avr_fold_builtin' that folds absfx (FIXED_CST). */
+
+static tree
+avr_fold_absfx (tree tval)
+{
+ if (FIXED_CST != TREE_CODE (tval))
+ return NULL_TREE;
+
+ /* Our fixed-points have no padding: Use double_int payload directly. */
+
+ FIXED_VALUE_TYPE fval = TREE_FIXED_CST (tval);
+ unsigned int bits = GET_MODE_BITSIZE (fval.mode);
+ double_int ival = fval.data.sext (bits);
+
+ if (!ival.is_negative())
+ return tval;
+
+ /* ISO/IEC TR 18037, 7.18a.6.2: The absfx functions are saturating. */
+
+ fval.data = (ival == double_int::min_value (bits, false).sext (bits))
+ ? double_int::max_value (bits, false)
+ : -ival;
+
+ return build_fixed (TREE_TYPE (tval), fval);
+}
+
+
+/* Implement `TARGET_FOLD_BUILTIN'. */
+
+static tree
+avr_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *arg,
+ bool ignore ATTRIBUTE_UNUSED)
+{
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+ tree val_type = TREE_TYPE (TREE_TYPE (fndecl));
+
+ if (!optimize)
+ return NULL_TREE;
+
+ switch (fcode)
+ {
+ default:
+ break;
+
+ case AVR_BUILTIN_SWAP:
+ {
+ return fold_build2 (LROTATE_EXPR, val_type, arg[0],
+ build_int_cst (val_type, 4));
+ }
+
+ case AVR_BUILTIN_ABSHR:
+ case AVR_BUILTIN_ABSR:
+ case AVR_BUILTIN_ABSLR:
+ case AVR_BUILTIN_ABSLLR:
+
+ case AVR_BUILTIN_ABSHK:
+ case AVR_BUILTIN_ABSK:
+ case AVR_BUILTIN_ABSLK:
+ case AVR_BUILTIN_ABSLLK:
+ /* GCC is not good with folding ABS for fixed-point. Do it by hand. */
+
+ return avr_fold_absfx (arg[0]);
+
+ case AVR_BUILTIN_BITSHR: case AVR_BUILTIN_HRBITS:
+ case AVR_BUILTIN_BITSHK: case AVR_BUILTIN_HKBITS:
+ case AVR_BUILTIN_BITSUHR: case AVR_BUILTIN_UHRBITS:
+ case AVR_BUILTIN_BITSUHK: case AVR_BUILTIN_UHKBITS:
+
+ case AVR_BUILTIN_BITSR: case AVR_BUILTIN_RBITS:
+ case AVR_BUILTIN_BITSK: case AVR_BUILTIN_KBITS:
+ case AVR_BUILTIN_BITSUR: case AVR_BUILTIN_URBITS:
+ case AVR_BUILTIN_BITSUK: case AVR_BUILTIN_UKBITS:
+
+ case AVR_BUILTIN_BITSLR: case AVR_BUILTIN_LRBITS:
+ case AVR_BUILTIN_BITSLK: case AVR_BUILTIN_LKBITS:
+ case AVR_BUILTIN_BITSULR: case AVR_BUILTIN_ULRBITS:
+ case AVR_BUILTIN_BITSULK: case AVR_BUILTIN_ULKBITS:
+
+ case AVR_BUILTIN_BITSLLR: case AVR_BUILTIN_LLRBITS:
+ case AVR_BUILTIN_BITSLLK: case AVR_BUILTIN_LLKBITS:
+ case AVR_BUILTIN_BITSULLR: case AVR_BUILTIN_ULLRBITS:
+ case AVR_BUILTIN_BITSULLK: case AVR_BUILTIN_ULLKBITS:
+
+ gcc_assert (TYPE_PRECISION (val_type)
+ == TYPE_PRECISION (TREE_TYPE (arg[0])));
+
+ return build1 (VIEW_CONVERT_EXPR, val_type, arg[0]);
+
+ case AVR_BUILTIN_INSERT_BITS:
+ {
+ tree tbits = arg[1];
+ tree tval = arg[2];
+ tree tmap;
+ tree map_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
+ unsigned int map;
+ bool changed = false;
+ unsigned i;
+ avr_map_op_t best_g;
+
+ if (TREE_CODE (arg[0]) != INTEGER_CST)
+ {
+ /* No constant as first argument: Don't fold this and run into
+ error in avr_expand_builtin. */
+
+ break;
+ }
+
+ tmap = double_int_to_tree (map_type, tree_to_double_int (arg[0]));
+ map = TREE_INT_CST_LOW (tmap);
+
+ if (TREE_CODE (tval) != INTEGER_CST
+ && 0 == avr_map_metric (map, MAP_MASK_PREIMAGE_F))
+ {
+ /* There are no F in the map, i.e. 3rd operand is unused.
+ Replace that argument with some constant to render
+ respective input unused. */
+
+ tval = build_int_cst (val_type, 0);
+ changed = true;
+ }
+
+ if (TREE_CODE (tbits) != INTEGER_CST
+ && 0 == avr_map_metric (map, MAP_PREIMAGE_0_7))
+ {
+ /* Similar for the bits to be inserted. If they are unused,
+ we can just as well pass 0. */
+
+ tbits = build_int_cst (val_type, 0);
+ }
+
+ if (TREE_CODE (tbits) == INTEGER_CST)
+ {
+ /* Inserting bits known at compile time is easy and can be
+ performed by AND and OR with appropriate masks. */
+
+ int bits = TREE_INT_CST_LOW (tbits);
+ int mask_ior = 0, mask_and = 0xff;
+
+ for (i = 0; i < 8; i++)
+ {
+ int mi = avr_map (map, i);
+
+ if (mi < 8)
+ {
+ if (bits & (1 << mi)) mask_ior |= (1 << i);
+ else mask_and &= ~(1 << i);
+ }
+ }
+
+ tval = fold_build2 (BIT_IOR_EXPR, val_type, tval,
+ build_int_cst (val_type, mask_ior));
+ return fold_build2 (BIT_AND_EXPR, val_type, tval,
+ build_int_cst (val_type, mask_and));
+ }
+
+ if (changed)
+ return build_call_expr (fndecl, 3, tmap, tbits, tval);
+
+ /* If bits don't change their position we can use vanilla logic
+ to merge the two arguments. */
+
+ if (0 == avr_map_metric (map, MAP_NONFIXED_0_7))
+ {
+ int mask_f = avr_map_metric (map, MAP_MASK_PREIMAGE_F);
+ tree tres, tmask = build_int_cst (val_type, mask_f ^ 0xff);
+
+ tres = fold_build2 (BIT_XOR_EXPR, val_type, tbits, tval);
+ tres = fold_build2 (BIT_AND_EXPR, val_type, tres, tmask);
+ return fold_build2 (BIT_XOR_EXPR, val_type, tres, tval);
+ }
+
+ /* Try to decomposing map to reduce overall cost. */
+
+ if (avr_log.builtin)
+ avr_edump ("\n%?: %x\n%?: ROL cost: ", map);
+
+ best_g = avr_map_op[0];
+ best_g.cost = 1000;
+
+ for (i = 0; i < sizeof (avr_map_op) / sizeof (*avr_map_op); i++)
+ {
+ avr_map_op_t g
+ = avr_map_decompose (map, avr_map_op + i,
+ TREE_CODE (tval) == INTEGER_CST);
+
+ if (g.cost >= 0 && g.cost < best_g.cost)
+ best_g = g;
+ }
+
+ if (avr_log.builtin)
+ avr_edump ("\n");
+
+ if (best_g.arg == 0)
+ /* No optimization found */
+ break;
+
+ /* Apply operation G to the 2nd argument. */
+
+ if (avr_log.builtin)
+ avr_edump ("%?: using OP(%s%d, %x) cost %d\n",
+ best_g.str, best_g.arg, best_g.map, best_g.cost);
+
+ /* Do right-shifts arithmetically: They copy the MSB instead of
+ shifting in a non-usable value (0) as with logic right-shift. */
+
+ tbits = fold_convert (signed_char_type_node, tbits);
+ tbits = fold_build2 (best_g.code, signed_char_type_node, tbits,
+ build_int_cst (val_type, best_g.arg));
+ tbits = fold_convert (val_type, tbits);
+
+ /* Use map o G^-1 instead of original map to undo the effect of G. */
+
+ tmap = double_int_to_tree (map_type,
+ double_int::from_uhwi (best_g.map));
+
+ return build_call_expr (fndecl, 3, tmap, tbits, tval);
+ } /* AVR_BUILTIN_INSERT_BITS */
+ }
+
+ return NULL_TREE;
+}
+
+
+
+/* Initialize the GCC target structure. */
+
+#undef TARGET_ASM_ALIGNED_HI_OP
+#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
+#undef TARGET_ASM_ALIGNED_SI_OP
+#define TARGET_ASM_ALIGNED_SI_OP "\t.long\t"
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.word\t"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
+#undef TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER avr_assemble_integer
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START avr_file_start
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END avr_file_end
+
+#undef TARGET_ASM_FUNCTION_END_PROLOGUE
+#define TARGET_ASM_FUNCTION_END_PROLOGUE avr_asm_function_end_prologue
+#undef TARGET_ASM_FUNCTION_BEGIN_EPILOGUE
+#define TARGET_ASM_FUNCTION_BEGIN_EPILOGUE avr_asm_function_begin_epilogue
+
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE avr_function_value
+#undef TARGET_LIBCALL_VALUE
+#define TARGET_LIBCALL_VALUE avr_libcall_value
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P avr_function_value_regno_p
+
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE avr_attribute_table
+#undef TARGET_INSERT_ATTRIBUTES
+#define TARGET_INSERT_ATTRIBUTES avr_insert_attributes
+#undef TARGET_SECTION_TYPE_FLAGS
+#define TARGET_SECTION_TYPE_FLAGS avr_section_type_flags
+
+#undef TARGET_ASM_NAMED_SECTION
+#define TARGET_ASM_NAMED_SECTION avr_asm_named_section
+#undef TARGET_ASM_INIT_SECTIONS
+#define TARGET_ASM_INIT_SECTIONS avr_asm_init_sections
+#undef TARGET_ENCODE_SECTION_INFO
+#define TARGET_ENCODE_SECTION_INFO avr_encode_section_info
+#undef TARGET_ASM_SELECT_SECTION
+#define TARGET_ASM_SELECT_SECTION avr_asm_select_section
+
+#undef TARGET_REGISTER_MOVE_COST
+#define TARGET_REGISTER_MOVE_COST avr_register_move_cost
+#undef TARGET_MEMORY_MOVE_COST
+#define TARGET_MEMORY_MOVE_COST avr_memory_move_cost
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS avr_rtx_costs
+#undef TARGET_ADDRESS_COST
+#define TARGET_ADDRESS_COST avr_address_cost
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG avr_reorg
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG avr_function_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE avr_function_arg_advance
+
+#undef TARGET_SET_CURRENT_FUNCTION
+#define TARGET_SET_CURRENT_FUNCTION avr_set_current_function
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY avr_return_in_memory
+
+#undef TARGET_STRICT_ARGUMENT_NAMING
+#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
+
+#undef TARGET_BUILTIN_SETJMP_FRAME_VALUE
+#define TARGET_BUILTIN_SETJMP_FRAME_VALUE avr_builtin_setjmp_frame_value
+
+#undef TARGET_HARD_REGNO_SCRATCH_OK
+#define TARGET_HARD_REGNO_SCRATCH_OK avr_hard_regno_scratch_ok
+#undef TARGET_CASE_VALUES_THRESHOLD
+#define TARGET_CASE_VALUES_THRESHOLD avr_case_values_threshold
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED avr_frame_pointer_required_p
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE avr_can_eliminate
+
+#undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
+#define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS avr_allocate_stack_slots_for_args
+
+#undef TARGET_WARN_FUNC_RETURN
+#define TARGET_WARN_FUNC_RETURN avr_warn_func_return
+
+#undef TARGET_CLASS_LIKELY_SPILLED_P
+#define TARGET_CLASS_LIKELY_SPILLED_P avr_class_likely_spilled_p
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE avr_option_override
+
+#undef TARGET_CANNOT_MODIFY_JUMPS_P
+#define TARGET_CANNOT_MODIFY_JUMPS_P avr_cannot_modify_jumps_p
+
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL avr_function_ok_for_sibcall
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS avr_init_builtins
+
+#undef TARGET_BUILTIN_DECL
+#define TARGET_BUILTIN_DECL avr_builtin_decl
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN avr_expand_builtin
+
+#undef TARGET_FOLD_BUILTIN
+#define TARGET_FOLD_BUILTIN avr_fold_builtin
+
+#undef TARGET_ASM_FUNCTION_RODATA_SECTION
+#define TARGET_ASM_FUNCTION_RODATA_SECTION avr_asm_function_rodata_section
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P avr_scalar_mode_supported_p
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST avr_build_builtin_va_list
+
+#undef TARGET_FIXED_POINT_SUPPORTED_P
+#define TARGET_FIXED_POINT_SUPPORTED_P hook_bool_void_true
+
+#undef TARGET_CONVERT_TO_TYPE
+#define TARGET_CONVERT_TO_TYPE avr_convert_to_type
+
+#undef TARGET_ADDR_SPACE_SUBSET_P
+#define TARGET_ADDR_SPACE_SUBSET_P avr_addr_space_subset_p
+
+#undef TARGET_ADDR_SPACE_CONVERT
+#define TARGET_ADDR_SPACE_CONVERT avr_addr_space_convert
+
+#undef TARGET_ADDR_SPACE_ADDRESS_MODE
+#define TARGET_ADDR_SPACE_ADDRESS_MODE avr_addr_space_address_mode
+
+#undef TARGET_ADDR_SPACE_POINTER_MODE
+#define TARGET_ADDR_SPACE_POINTER_MODE avr_addr_space_pointer_mode
+
+#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
+#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
+ avr_addr_space_legitimate_address_p
+
+#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
+#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS avr_addr_space_legitimize_address
+
+#undef TARGET_MODE_DEPENDENT_ADDRESS_P
+#define TARGET_MODE_DEPENDENT_ADDRESS_P avr_mode_dependent_address_p
+
+#undef TARGET_SECONDARY_RELOAD
+#define TARGET_SECONDARY_RELOAD avr_secondary_reload
+
+#undef TARGET_PRINT_OPERAND
+#define TARGET_PRINT_OPERAND avr_print_operand
+#undef TARGET_PRINT_OPERAND_ADDRESS
+#define TARGET_PRINT_OPERAND_ADDRESS avr_print_operand_address
+#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
+#define TARGET_PRINT_OPERAND_PUNCT_VALID_P avr_print_operand_punct_valid_p
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+
+#include "gt-avr.h"
diff --git a/gcc-4.9/gcc/config/avr/avr.h b/gcc-4.9/gcc/config/avr/avr.h
new file mode 100644
index 000000000..74be83c8a
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr.h
@@ -0,0 +1,606 @@
+/* Definitions of target machine for GNU compiler,
+ for ATMEL AVR at90s8515, ATmega103/103L, ATmega603/603L microcontrollers.
+ Copyright (C) 1998-2014 Free Software Foundation, Inc.
+ Contributed by Denis Chertykov (chertykov@gmail.com)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+typedef struct
+{
+ /* Id of the address space as used in c_register_addr_space */
+ unsigned char id;
+
+ /* Flavour of memory: 0 = RAM, 1 = Flash */
+ int memory_class;
+
+ /* Width of pointer (in bytes) */
+ int pointer_size;
+
+ /* Name of the address space as visible to the user */
+ const char *name;
+
+ /* Segment (i.e. 64k memory chunk) number. */
+ int segment;
+
+ /* Section prefix, e.g. ".progmem1.data" */
+ const char *section_name;
+} avr_addrspace_t;
+
+extern const avr_addrspace_t avr_addrspace[];
+
+/* Known address spaces */
+
+enum
+ {
+ ADDR_SPACE_RAM, /* ADDR_SPACE_GENERIC */
+ ADDR_SPACE_FLASH,
+ ADDR_SPACE_FLASH1,
+ ADDR_SPACE_FLASH2,
+ ADDR_SPACE_FLASH3,
+ ADDR_SPACE_FLASH4,
+ ADDR_SPACE_FLASH5,
+ ADDR_SPACE_MEMX,
+ /* Sentinel */
+ ADDR_SPACE_COUNT
+ };
+
+#define TARGET_CPU_CPP_BUILTINS() avr_cpu_cpp_builtins (pfile)
+
+#define AVR_HAVE_JMP_CALL (avr_current_arch->have_jmp_call)
+#define AVR_HAVE_MUL (avr_current_arch->have_mul)
+#define AVR_HAVE_MOVW (avr_current_arch->have_movw_lpmx)
+#define AVR_HAVE_LPMX (avr_current_arch->have_movw_lpmx)
+#define AVR_HAVE_ELPM (avr_current_arch->have_elpm)
+#define AVR_HAVE_ELPMX (avr_current_arch->have_elpmx)
+#define AVR_HAVE_RAMPD (avr_current_arch->have_rampd)
+#define AVR_HAVE_RAMPX (avr_current_arch->have_rampd)
+#define AVR_HAVE_RAMPY (avr_current_arch->have_rampd)
+#define AVR_HAVE_RAMPZ (avr_current_arch->have_elpm \
+ || avr_current_arch->have_rampd)
+#define AVR_HAVE_EIJMP_EICALL (avr_current_arch->have_eijmp_eicall)
+
+/* Handling of 8-bit SP versus 16-bit SP is as follows:
+
+ -msp8 is used internally to select the right multilib for targets with
+ 8-bit SP. -msp8 is set automatically by DRIVER_SELF_SPECS for devices
+ with 8-bit SP or by multilib generation machinery. If a frame pointer is
+ needed and SP is only 8 bits wide, SP is zero-extended to get FP.
+
+ TARGET_TINY_STACK is triggered by -mtiny-stack which is a user option.
+ This option has no effect on multilib selection. It serves to save some
+ bytes on 16-bit SP devices by only changing SP_L and leaving SP_H alone.
+
+ These two properties are reflected by built-in macros __AVR_SP8__ resp.
+ __AVR_HAVE_8BIT_SP__ and __AVR_HAVE_16BIT_SP__. During multilib generation
+ there is always __AVR_SP8__ == __AVR_HAVE_8BIT_SP__. */
+
+#define AVR_HAVE_8BIT_SP \
+ (avr_current_device->short_sp || TARGET_TINY_STACK || avr_sp8)
+
+#define AVR_HAVE_SPH (!avr_sp8)
+
+#define AVR_2_BYTE_PC (!AVR_HAVE_EIJMP_EICALL)
+#define AVR_3_BYTE_PC (AVR_HAVE_EIJMP_EICALL)
+
+#define AVR_XMEGA (avr_current_arch->xmega_p)
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN 0
+#define WORDS_BIG_ENDIAN 0
+
+#ifdef IN_LIBGCC2
+/* This is to get correct SI and DI modes in libgcc2.c (32 and 64 bits). */
+#define UNITS_PER_WORD 4
+#else
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD 1
+#endif
+
+#define POINTER_SIZE 16
+
+
+/* Maximum sized of reasonable data type
+ DImode or Dfmode ... */
+#define MAX_FIXED_MODE_SIZE 32
+
+#define PARM_BOUNDARY 8
+
+#define FUNCTION_BOUNDARY 8
+
+#define EMPTY_FIELD_BOUNDARY 8
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 8
+
+#define TARGET_VTABLE_ENTRY_ALIGN 8
+
+#define STRICT_ALIGNMENT 0
+
+#define INT_TYPE_SIZE (TARGET_INT8 ? 8 : 16)
+#define SHORT_TYPE_SIZE (INT_TYPE_SIZE == 8 ? INT_TYPE_SIZE : 16)
+#define LONG_TYPE_SIZE (INT_TYPE_SIZE == 8 ? 16 : 32)
+#define LONG_LONG_TYPE_SIZE (INT_TYPE_SIZE == 8 ? 32 : 64)
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 32
+#define LONG_DOUBLE_TYPE_SIZE 32
+#define LONG_LONG_ACCUM_TYPE_SIZE 64
+
+#define DEFAULT_SIGNED_CHAR 1
+
+#define SIZE_TYPE (INT_TYPE_SIZE == 8 ? "long unsigned int" : "unsigned int")
+#define PTRDIFF_TYPE (INT_TYPE_SIZE == 8 ? "long int" :"int")
+
+#define WCHAR_TYPE_SIZE 16
+
+#define FIRST_PSEUDO_REGISTER 36
+
+#define FIXED_REGISTERS {\
+ 1,1,/* r0 r1 */\
+ 0,0,/* r2 r3 */\
+ 0,0,/* r4 r5 */\
+ 0,0,/* r6 r7 */\
+ 0,0,/* r8 r9 */\
+ 0,0,/* r10 r11 */\
+ 0,0,/* r12 r13 */\
+ 0,0,/* r14 r15 */\
+ 0,0,/* r16 r17 */\
+ 0,0,/* r18 r19 */\
+ 0,0,/* r20 r21 */\
+ 0,0,/* r22 r23 */\
+ 0,0,/* r24 r25 */\
+ 0,0,/* r26 r27 */\
+ 0,0,/* r28 r29 */\
+ 0,0,/* r30 r31 */\
+ 1,1,/* STACK */\
+ 1,1 /* arg pointer */ }
+
+#define CALL_USED_REGISTERS { \
+ 1,1,/* r0 r1 */ \
+ 0,0,/* r2 r3 */ \
+ 0,0,/* r4 r5 */ \
+ 0,0,/* r6 r7 */ \
+ 0,0,/* r8 r9 */ \
+ 0,0,/* r10 r11 */ \
+ 0,0,/* r12 r13 */ \
+ 0,0,/* r14 r15 */ \
+ 0,0,/* r16 r17 */ \
+ 1,1,/* r18 r19 */ \
+ 1,1,/* r20 r21 */ \
+ 1,1,/* r22 r23 */ \
+ 1,1,/* r24 r25 */ \
+ 1,1,/* r26 r27 */ \
+ 0,0,/* r28 r29 */ \
+ 1,1,/* r30 r31 */ \
+ 1,1,/* STACK */ \
+ 1,1 /* arg pointer */ }
+
+#define REG_ALLOC_ORDER { \
+ 24,25, \
+ 18,19, \
+ 20,21, \
+ 22,23, \
+ 30,31, \
+ 26,27, \
+ 28,29, \
+ 17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2, \
+ 0,1, \
+ 32,33,34,35 \
+ }
+
+#define ADJUST_REG_ALLOC_ORDER avr_adjust_reg_alloc_order()
+
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) avr_hard_regno_mode_ok(REGNO, MODE)
+
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+enum reg_class {
+ NO_REGS,
+ R0_REG, /* r0 */
+ POINTER_X_REGS, /* r26 - r27 */
+ POINTER_Y_REGS, /* r28 - r29 */
+ POINTER_Z_REGS, /* r30 - r31 */
+ STACK_REG, /* STACK */
+ BASE_POINTER_REGS, /* r28 - r31 */
+ POINTER_REGS, /* r26 - r31 */
+ ADDW_REGS, /* r24 - r31 */
+ SIMPLE_LD_REGS, /* r16 - r23 */
+ LD_REGS, /* r16 - r31 */
+ NO_LD_REGS, /* r0 - r15 */
+ GENERAL_REGS, /* r0 - r31 */
+ ALL_REGS, LIM_REG_CLASSES
+};
+
+
+#define N_REG_CLASSES (int)LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES { \
+ "NO_REGS", \
+ "R0_REG", /* r0 */ \
+ "POINTER_X_REGS", /* r26 - r27 */ \
+ "POINTER_Y_REGS", /* r28 - r29 */ \
+ "POINTER_Z_REGS", /* r30 - r31 */ \
+ "STACK_REG", /* STACK */ \
+ "BASE_POINTER_REGS", /* r28 - r31 */ \
+ "POINTER_REGS", /* r26 - r31 */ \
+ "ADDW_REGS", /* r24 - r31 */ \
+ "SIMPLE_LD_REGS", /* r16 - r23 */ \
+ "LD_REGS", /* r16 - r31 */ \
+ "NO_LD_REGS", /* r0 - r15 */ \
+ "GENERAL_REGS", /* r0 - r31 */ \
+ "ALL_REGS" }
+
+#define REG_CLASS_CONTENTS { \
+ {0x00000000,0x00000000}, /* NO_REGS */ \
+ {0x00000001,0x00000000}, /* R0_REG */ \
+ {3 << REG_X,0x00000000}, /* POINTER_X_REGS, r26 - r27 */ \
+ {3 << REG_Y,0x00000000}, /* POINTER_Y_REGS, r28 - r29 */ \
+ {3 << REG_Z,0x00000000}, /* POINTER_Z_REGS, r30 - r31 */ \
+ {0x00000000,0x00000003}, /* STACK_REG, STACK */ \
+ {(3 << REG_Y) | (3 << REG_Z), \
+ 0x00000000}, /* BASE_POINTER_REGS, r28 - r31 */ \
+ {(3 << REG_X) | (3 << REG_Y) | (3 << REG_Z), \
+ 0x00000000}, /* POINTER_REGS, r26 - r31 */ \
+ {(3 << REG_X) | (3 << REG_Y) | (3 << REG_Z) | (3 << REG_W), \
+ 0x00000000}, /* ADDW_REGS, r24 - r31 */ \
+ {0x00ff0000,0x00000000}, /* SIMPLE_LD_REGS r16 - r23 */ \
+ {(3 << REG_X)|(3 << REG_Y)|(3 << REG_Z)|(3 << REG_W)|(0xff << 16), \
+ 0x00000000}, /* LD_REGS, r16 - r31 */ \
+ {0x0000ffff,0x00000000}, /* NO_LD_REGS r0 - r15 */ \
+ {0xffffffff,0x00000000}, /* GENERAL_REGS, r0 - r31 */ \
+ {0xffffffff,0x00000003} /* ALL_REGS */ \
+}
+
+#define REGNO_REG_CLASS(R) avr_regno_reg_class(R)
+
+#define MODE_CODE_BASE_REG_CLASS(mode, as, outer_code, index_code) \
+ avr_mode_code_base_reg_class (mode, as, outer_code, index_code)
+
+#define INDEX_REG_CLASS NO_REGS
+
+#define REGNO_MODE_CODE_OK_FOR_BASE_P(num, mode, as, outer_code, index_code) \
+ avr_regno_mode_code_ok_for_base_p (num, mode, as, outer_code, index_code)
+
+#define REGNO_OK_FOR_INDEX_P(NUM) 0
+
+#define HARD_REGNO_CALL_PART_CLOBBERED(REGNO, MODE) \
+ avr_hard_regno_call_part_clobbered (REGNO, MODE)
+
+#define TARGET_SMALL_REGISTER_CLASSES_FOR_MODE_P hook_bool_mode_true
+
+#define STACK_PUSH_CODE POST_DEC
+
+#define STACK_GROWS_DOWNWARD
+
+#define STARTING_FRAME_OFFSET avr_starting_frame_offset()
+
+#define STACK_POINTER_OFFSET 1
+
+#define FIRST_PARM_OFFSET(FUNDECL) 0
+
+#define STACK_BOUNDARY 8
+
+#define STACK_POINTER_REGNUM 32
+
+#define FRAME_POINTER_REGNUM REG_Y
+
+#define ARG_POINTER_REGNUM 34
+
+#define STATIC_CHAIN_REGNUM 2
+
+#define ELIMINABLE_REGS { \
+ {ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM} \
+ ,{FRAME_POINTER_REGNUM+1,STACK_POINTER_REGNUM+1}}
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ OFFSET = avr_initial_elimination_offset (FROM, TO)
+
+#define RETURN_ADDR_RTX(count, tem) avr_return_addr_rtx (count, tem)
+
+/* Don't use Push rounding. expr.c: emit_single_push_insn is broken
+ for POST_DEC targets (PR27386). */
+/*#define PUSH_ROUNDING(NPUSHED) (NPUSHED)*/
+
+typedef struct avr_args
+{
+ /* # Registers available for passing */
+ int nregs;
+
+ /* Next available register number */
+ int regno;
+} CUMULATIVE_ARGS;
+
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
+ avr_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL)
+
+#define FUNCTION_ARG_REGNO_P(r) avr_function_arg_regno_p(r)
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#define EPILOGUE_USES(REGNO) avr_epilogue_uses(REGNO)
+
+#define HAVE_POST_INCREMENT 1
+#define HAVE_PRE_DECREMENT 1
+
+#define MAX_REGS_PER_ADDRESS 1
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_L,WIN) \
+ do { \
+ rtx new_x = avr_legitimize_reload_address (&(X), MODE, OPNUM, TYPE, \
+ ADDR_TYPE (TYPE), \
+ IND_L, make_memloc); \
+ if (new_x) \
+ { \
+ X = new_x; \
+ goto WIN; \
+ } \
+ } while (0)
+
+#define BRANCH_COST(speed_p, predictable_p) avr_branch_cost
+
+#define SLOW_BYTE_ACCESS 0
+
+#define NO_FUNCTION_CSE
+
+#define REGISTER_TARGET_PRAGMAS() \
+ do { \
+ avr_register_target_pragmas(); \
+ } while (0)
+
+#define TEXT_SECTION_ASM_OP "\t.text"
+
+#define DATA_SECTION_ASM_OP "\t.data"
+
+#define BSS_SECTION_ASM_OP "\t.section .bss"
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+ There are no shared libraries on this target, and these sections are
+ placed in the read-only program memory, so they are not writable. */
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP "\t.section .ctors,\"a\",@progbits"
+
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP "\t.section .dtors,\"a\",@progbits"
+
+#define TARGET_ASM_CONSTRUCTOR avr_asm_out_ctor
+
+#define TARGET_ASM_DESTRUCTOR avr_asm_out_dtor
+
+#define SUPPORTS_INIT_PRIORITY 0
+
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+
+#define ASM_COMMENT_START " ; "
+
+#define ASM_APP_ON "/* #APP */\n"
+
+#define ASM_APP_OFF "/* #NOAPP */\n"
+
+#define IS_ASM_LOGICAL_LINE_SEPARATOR(C, STR) ((C) == '\n' || ((C) == '$'))
+
+#define ASM_OUTPUT_ALIGNED_DECL_COMMON(STREAM, DECL, NAME, SIZE, ALIGN) \
+ avr_asm_output_aligned_decl_common (STREAM, DECL, NAME, SIZE, ALIGN, false)
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+
+#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(STREAM, DECL, NAME, SIZE, ALIGN) \
+ avr_asm_output_aligned_decl_common (STREAM, DECL, NAME, SIZE, ALIGN, true)
+
+/* Globalizing directive for a label. */
+#define GLOBAL_ASM_OP ".global\t"
+
+#define SUPPORTS_WEAK 1
+
+#define HAS_INIT_SECTION 1
+
+#define REGISTER_NAMES { \
+ "r0","r1","r2","r3","r4","r5","r6","r7", \
+ "r8","r9","r10","r11","r12","r13","r14","r15", \
+ "r16","r17","r18","r19","r20","r21","r22","r23", \
+ "r24","r25","r26","r27","r28","r29","r30","r31", \
+ "__SP_L__","__SP_H__","argL","argH"}
+
+#define FINAL_PRESCAN_INSN(insn, operand, nop) \
+ avr_final_prescan_insn (insn, operand,nop)
+
+#define ASM_OUTPUT_REG_PUSH(STREAM, REGNO) \
+{ \
+ gcc_assert (REGNO < 32); \
+ fprintf (STREAM, "\tpush\tr%d", REGNO); \
+}
+
+#define ASM_OUTPUT_REG_POP(STREAM, REGNO) \
+{ \
+ gcc_assert (REGNO < 32); \
+ fprintf (STREAM, "\tpop\tr%d", REGNO); \
+}
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(STREAM, VALUE) \
+ avr_output_addr_vec_elt (STREAM, VALUE)
+
+#define ASM_OUTPUT_ALIGN(STREAM, POWER) \
+ do { \
+ if ((POWER) > 0) \
+ fprintf (STREAM, "\t.p2align\t%d\n", POWER); \
+ } while (0)
+
+#define CASE_VECTOR_MODE HImode
+
+#undef WORD_REGISTER_OPERATIONS
+
+#define MOVE_MAX 4
+
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+#define Pmode HImode
+
+#define FUNCTION_MODE HImode
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+#define TRAMPOLINE_SIZE 4
+
+/* Store in cc_status the expressions
+ that the condition codes will describe
+ after execution of an instruction whose pattern is EXP.
+ Do not alter them if the instruction would not alter the cc's. */
+
+#define NOTICE_UPDATE_CC(EXP, INSN) avr_notice_update_cc (EXP, INSN)
+
+/* The add insns don't set overflow in a usable way. */
+#define CC_OVERFLOW_UNUSABLE 01000
+/* The mov,and,or,xor insns don't set carry. That's ok though as the
+ Z bit is all we need when doing unsigned comparisons on the result of
+ these insns (since they're always with 0). However, conditions.h has
+ CC_NO_OVERFLOW defined for this purpose. Rename it to something more
+ understandable. */
+#define CC_NO_CARRY CC_NO_OVERFLOW
+
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "/* profiler %d */", (LABELNO))
+
+#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
+ (LENGTH = avr_adjust_insn_length (INSN, LENGTH))
+
+extern const char *avr_device_to_as (int argc, const char **argv);
+extern const char *avr_device_to_ld (int argc, const char **argv);
+extern const char *avr_device_to_data_start (int argc, const char **argv);
+extern const char *avr_device_to_startfiles (int argc, const char **argv);
+extern const char *avr_device_to_devicelib (int argc, const char **argv);
+extern const char *avr_device_to_sp8 (int argc, const char **argv);
+
+#define EXTRA_SPEC_FUNCTIONS \
+ { "device_to_as", avr_device_to_as }, \
+ { "device_to_ld", avr_device_to_ld }, \
+ { "device_to_data_start", avr_device_to_data_start }, \
+ { "device_to_startfile", avr_device_to_startfiles }, \
+ { "device_to_devicelib", avr_device_to_devicelib }, \
+ { "device_to_sp8", avr_device_to_sp8 },
+
+#define DRIVER_SELF_SPECS " %:device_to_sp8(%{mmcu=*:%*}) "
+#define CPP_SPEC ""
+
+#define CC1_SPEC ""
+
+#define CC1PLUS_SPEC "%{!frtti:-fno-rtti} \
+ %{!fenforce-eh-specs:-fno-enforce-eh-specs} \
+ %{!fexceptions:-fno-exceptions}"
+
+#define ASM_SPEC "%:device_to_as(%{mmcu=*:%*}) "
+
+#define LINK_SPEC "\
+%{mrelax:--relax\
+ %{mpmem-wrap-around:%{mmcu=at90usb8*:--pmem-wrap-around=8k}\
+ %{mmcu=atmega16*:--pmem-wrap-around=16k}\
+ %{mmcu=atmega32*|\
+ mmcu=at90can32*:--pmem-wrap-around=32k}\
+ %{mmcu=atmega64*|\
+ mmcu=at90can64*|\
+ mmcu=at90usb64*:--pmem-wrap-around=64k}}}\
+%:device_to_ld(%{mmcu=*:%*})\
+%:device_to_data_start(%{mmcu=*:%*})\
+%{shared:%eshared is not supported}"
+
+#define LIB_SPEC \
+ "%{!mmcu=at90s1*:%{!mmcu=attiny11:%{!mmcu=attiny12:%{!mmcu=attiny15:%{!mmcu=attiny28: -lc }}}}}"
+
+#define LIBSTDCXX "gcc"
+/* No libstdc++ for now. Empty string doesn't work. */
+
+#define LIBGCC_SPEC \
+ "%{!mmcu=at90s1*:%{!mmcu=attiny11:%{!mmcu=attiny12:%{!mmcu=attiny15:%{!mmcu=attiny28: -lgcc }}}}}"
+
+#define STARTFILE_SPEC "%:device_to_startfile(%{mmcu=*:%*})"
+
+#define ENDFILE_SPEC ""
+
+/* This is the default without any -mmcu=* option (AT90S*). */
+#define MULTILIB_DEFAULTS { "mmcu=avr2" }
+
+#define TEST_HARD_REG_CLASS(CLASS, REGNO) \
+ TEST_HARD_REG_BIT (reg_class_contents[ (int) (CLASS)], REGNO)
+
+#define CR_TAB "\n\t"
+
+#define DWARF2_ADDR_SIZE 4
+
+#define INCOMING_RETURN_ADDR_RTX avr_incoming_return_addr_rtx ()
+#define INCOMING_FRAME_SP_OFFSET (AVR_3_BYTE_PC ? 3 : 2)
+
+/* The caller's stack pointer value immediately before the call
+ is one byte below the first argument. */
+#define ARG_POINTER_CFA_OFFSET(FNDECL) -1
+
+#define HARD_REGNO_RENAME_OK(OLD_REG, NEW_REG) \
+ avr_hard_regno_rename_ok (OLD_REG, NEW_REG)
+
+/* A C structure for machine-specific, per-function data.
+ This is added to the cfun structure. */
+struct GTY(()) machine_function
+{
+ /* 'true' - if current function is a naked function. */
+ int is_naked;
+
+ /* 'true' - if current function is an interrupt function
+ as specified by the "interrupt" attribute. */
+ int is_interrupt;
+
+ /* 'true' - if current function is a signal function
+ as specified by the "signal" attribute. */
+ int is_signal;
+
+ /* 'true' - if current function is a 'task' function
+ as specified by the "OS_task" attribute. */
+ int is_OS_task;
+
+ /* 'true' - if current function is a 'main' function
+ as specified by the "OS_main" attribute. */
+ int is_OS_main;
+
+ /* Current function stack size. */
+ int stack_usage;
+
+ /* 'true' if a callee might be tail called */
+ int sibcall_fails;
+
+ /* 'true' if the above is_foo predicates are sanity-checked to avoid
+ multiple diagnose for the same function. */
+ int attributes_checked_p;
+};
+
+/* AVR does not round pushes, but the existence of this macro is
+ required in order for pushes to be generated. */
+#define PUSH_ROUNDING(X) (X)
+
+/* Define prototype here to avoid build warning. Some files using
+ ACCUMULATE_OUTGOING_ARGS (directly or indirectly) include
+ tm.h but not tm_p.h. */
+extern int avr_accumulate_outgoing_args (void);
+#define ACCUMULATE_OUTGOING_ARGS avr_accumulate_outgoing_args()
+
+#define INIT_EXPANDERS avr_init_expanders()
diff --git a/gcc-4.9/gcc/config/avr/avr.md b/gcc-4.9/gcc/config/avr/avr.md
new file mode 100644
index 000000000..f2d8605cd
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr.md
@@ -0,0 +1,6358 @@
+;; Machine description for GNU compiler,
+;; for ATMEL AVR micro controllers.
+;; Copyright (C) 1998-2014 Free Software Foundation, Inc.
+;; Contributed by Denis Chertykov (chertykov@gmail.com)
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Special characters after '%':
+;; A No effect (add 0).
+;; B Add 1 to REG number, MEM address or CONST_INT.
+;; C Add 2.
+;; D Add 3.
+;; j Branch condition.
+;; k Reverse branch condition.
+;;..m..Constant Direct Data memory address.
+;; i Print the SFR address quivalent of a CONST_INT or a CONST_INT
+;; RAM address. The resulting address is suitable to be used in IN/OUT.
+;; o Displacement for (mem (plus (reg) (const_int))) operands.
+;; p POST_INC or PRE_DEC address as a pointer (X, Y, Z)
+;; r POST_INC or PRE_DEC address as a register (r26, r28, r30)
+;; r Print a REG without the register prefix 'r'.
+;; T/T Print operand suitable for BLD/BST instruction, i.e. register and
+;; bit number. This gets 2 operands: The first %T gets a REG_P and
+;; just cashes the operand for the next %T. The second %T gets
+;; a CONST_INT that represents a bit position.
+;; Example: With %0 = (reg:HI 18) and %1 = (const_int 13)
+;; "%T0%T1" it will print "r19,5".
+;; Notice that you must not write a comma between %T0 and %T1.
+;; T/t Similar to above, but don't print the comma and the bit number.
+;; Example: With %0 = (reg:HI 18) and %1 = (const_int 13)
+;; "%T0%t1" it will print "r19".
+;;..x..Constant Direct Program memory address.
+;; ~ Output 'r' if not AVR_HAVE_JMP_CALL.
+;; ! Output 'e' if AVR_HAVE_EIJMP_EICALL.
+
+
+(define_constants
+ [(REG_X 26)
+ (REG_Y 28)
+ (REG_Z 30)
+ (REG_W 24)
+ (REG_SP 32)
+ (LPM_REGNO 0) ; implicit target register of LPM
+ (TMP_REGNO 0) ; temporary register r0
+ (ZERO_REGNO 1) ; zero register r1
+ ])
+
+(define_c_enum "unspec"
+ [UNSPEC_STRLEN
+ UNSPEC_MOVMEM
+ UNSPEC_INDEX_JMP
+ UNSPEC_FMUL
+ UNSPEC_FMULS
+ UNSPEC_FMULSU
+ UNSPEC_COPYSIGN
+ UNSPEC_IDENTITY
+ UNSPEC_INSERT_BITS
+ UNSPEC_ROUND
+ ])
+
+(define_c_enum "unspecv"
+ [UNSPECV_PROLOGUE_SAVES
+ UNSPECV_EPILOGUE_RESTORES
+ UNSPECV_WRITE_SP
+ UNSPECV_GOTO_RECEIVER
+ UNSPECV_ENABLE_IRQS
+ UNSPECV_MEMORY_BARRIER
+ UNSPECV_NOP
+ UNSPECV_SLEEP
+ UNSPECV_WDR
+ UNSPECV_DELAY_CYCLES
+ ])
+
+
+(include "predicates.md")
+(include "constraints.md")
+
+;; Condition code settings.
+(define_attr "cc" "none,set_czn,set_zn,set_n,compare,clobber,
+ plus,ldi"
+ (const_string "none"))
+
+(define_attr "type" "branch,branch1,arith,xcall"
+ (const_string "arith"))
+
+;; The size of instructions in bytes.
+;; XXX may depend from "cc"
+
+(define_attr "length" ""
+ (cond [(eq_attr "type" "branch")
+ (if_then_else (and (ge (minus (pc) (match_dup 0))
+ (const_int -63))
+ (le (minus (pc) (match_dup 0))
+ (const_int 62)))
+ (const_int 1)
+ (if_then_else (and (ge (minus (pc) (match_dup 0))
+ (const_int -2045))
+ (le (minus (pc) (match_dup 0))
+ (const_int 2045)))
+ (const_int 2)
+ (const_int 3)))
+ (eq_attr "type" "branch1")
+ (if_then_else (and (ge (minus (pc) (match_dup 0))
+ (const_int -62))
+ (le (minus (pc) (match_dup 0))
+ (const_int 61)))
+ (const_int 2)
+ (if_then_else (and (ge (minus (pc) (match_dup 0))
+ (const_int -2044))
+ (le (minus (pc) (match_dup 0))
+ (const_int 2043)))
+ (const_int 3)
+ (const_int 4)))
+ (eq_attr "type" "xcall")
+ (if_then_else (match_test "!AVR_HAVE_JMP_CALL")
+ (const_int 1)
+ (const_int 2))]
+ (const_int 2)))
+
+;; Lengths of several insns are adjusted in avr.c:adjust_insn_length().
+;; Following insn attribute tells if and how the adjustment has to be
+;; done:
+;; no No adjustment needed; attribute "length" is fine.
+;; Otherwise do special processing depending on the attribute.
+
+(define_attr "adjust_len"
+ "out_bitop, plus, addto_sp,
+ tsthi, tstpsi, tstsi, compare, compare64, call,
+ mov8, mov16, mov24, mov32, reload_in16, reload_in24, reload_in32,
+ ufract, sfract, round,
+ xload, lpm, movmem,
+ ashlqi, ashrqi, lshrqi,
+ ashlhi, ashrhi, lshrhi,
+ ashlsi, ashrsi, lshrsi,
+ ashlpsi, ashrpsi, lshrpsi,
+ insert_bits,
+ no"
+ (const_string "no"))
+
+;; Flavours of instruction set architecture (ISA), used in enabled attribute
+
+;; mov : ISA has no MOVW movw : ISA has MOVW
+;; rjmp : ISA has no CALL/JMP jmp : ISA has CALL/JMP
+;; ijmp : ISA has no EICALL/EIJMP eijmp : ISA has EICALL/EIJMP
+;; lpm : ISA has no LPMX lpmx : ISA has LPMX
+;; elpm : ISA has ELPM but no ELPMX elpmx : ISA has ELPMX
+;; no_xmega: non-XMEGA core xmega : XMEGA core
+
+(define_attr "isa"
+ "mov,movw, rjmp,jmp, ijmp,eijmp, lpm,lpmx, elpm,elpmx, no_xmega,xmega,
+ standard"
+ (const_string "standard"))
+
+(define_attr "enabled" ""
+ (cond [(eq_attr "isa" "standard")
+ (const_int 1)
+
+ (and (eq_attr "isa" "mov")
+ (match_test "!AVR_HAVE_MOVW"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "movw")
+ (match_test "AVR_HAVE_MOVW"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "rjmp")
+ (match_test "!AVR_HAVE_JMP_CALL"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "jmp")
+ (match_test "AVR_HAVE_JMP_CALL"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "ijmp")
+ (match_test "!AVR_HAVE_EIJMP_EICALL"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "eijmp")
+ (match_test "AVR_HAVE_EIJMP_EICALL"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "lpm")
+ (match_test "!AVR_HAVE_LPMX"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "lpmx")
+ (match_test "AVR_HAVE_LPMX"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "elpm")
+ (match_test "AVR_HAVE_ELPM && !AVR_HAVE_ELPMX"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "elpmx")
+ (match_test "AVR_HAVE_ELPMX"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "xmega")
+ (match_test "AVR_XMEGA"))
+ (const_int 1)
+
+ (and (eq_attr "isa" "no_xmega")
+ (match_test "!AVR_XMEGA"))
+ (const_int 1)
+ ] (const_int 0)))
+
+
+;; Define mode iterators
+(define_mode_iterator QIHI [QI HI])
+(define_mode_iterator QIHI2 [QI HI])
+(define_mode_iterator QISI [QI HI PSI SI])
+(define_mode_iterator QIDI [QI HI PSI SI DI])
+(define_mode_iterator HISI [HI PSI SI])
+
+(define_mode_iterator ALL1 [QI QQ UQQ])
+(define_mode_iterator ALL2 [HI HQ UHQ HA UHA])
+(define_mode_iterator ALL4 [SI SQ USQ SA USA])
+
+;; All supported move-modes
+(define_mode_iterator MOVMODE [QI QQ UQQ
+ HI HQ UHQ HA UHA
+ SI SQ USQ SA USA
+ SF PSI])
+
+;; Supported ordered modes that are 2, 3, 4 bytes wide
+(define_mode_iterator ORDERED234 [HI SI PSI
+ HQ UHQ HA UHA
+ SQ USQ SA USA])
+
+;; Define code iterators
+;; Define two incarnations so that we can build the cross product.
+(define_code_iterator any_extend [sign_extend zero_extend])
+(define_code_iterator any_extend2 [sign_extend zero_extend])
+
+(define_code_iterator xior [xor ior])
+(define_code_iterator eqne [eq ne])
+
+(define_code_iterator ss_addsub [ss_plus ss_minus])
+(define_code_iterator us_addsub [us_plus us_minus])
+(define_code_iterator ss_abs_neg [ss_abs ss_neg])
+
+;; Define code attributes
+(define_code_attr extend_su
+ [(sign_extend "s")
+ (zero_extend "u")])
+
+(define_code_attr extend_u
+ [(sign_extend "")
+ (zero_extend "u")])
+
+(define_code_attr extend_s
+ [(sign_extend "s")
+ (zero_extend "")])
+
+;; Constrain input operand of widening multiply, i.e. MUL resp. MULS.
+(define_code_attr mul_r_d
+ [(zero_extend "r")
+ (sign_extend "d")])
+
+(define_code_attr abelian
+ [(ss_minus "") (us_minus "")
+ (ss_plus "%") (us_plus "%")])
+
+;; Map RTX code to its standard insn name
+(define_code_attr code_stdname
+ [(ashift "ashl")
+ (ashiftrt "ashr")
+ (lshiftrt "lshr")
+ (ior "ior")
+ (xor "xor")
+ (rotate "rotl")
+ (ss_plus "ssadd") (ss_minus "sssub") (ss_neg "ssneg") (ss_abs "ssabs")
+ (us_plus "usadd") (us_minus "ussub") (us_neg "usneg")
+ ])
+
+;;========================================================================
+;; The following is used by nonlocal_goto and setjmp.
+;; The receiver pattern will create no instructions since internally
+;; virtual_stack_vars = hard_frame_pointer + 1 so the RTL become R28=R28
+;; This avoids creating add/sub offsets in frame_pointer save/resore.
+;; The 'null' receiver also avoids problems with optimisation
+;; not recognising incoming jmp and removing code that resets frame_pointer.
+;; The code derived from builtins.c.
+
+(define_expand "nonlocal_goto_receiver"
+ [(set (reg:HI REG_Y)
+ (unspec_volatile:HI [(const_int 0)] UNSPECV_GOTO_RECEIVER))]
+ ""
+ {
+ emit_move_insn (virtual_stack_vars_rtx,
+ gen_rtx_PLUS (Pmode, hard_frame_pointer_rtx,
+ gen_int_mode (STARTING_FRAME_OFFSET,
+ Pmode)));
+ /* ; This might change the hard frame pointer in ways that aren't
+ ; apparent to early optimization passes, so force a clobber. */
+ emit_clobber (hard_frame_pointer_rtx);
+ DONE;
+ })
+
+
+;; Defining nonlocal_goto_receiver means we must also define this.
+;; even though its function is identical to that in builtins.c
+
+(define_expand "nonlocal_goto"
+ [(use (match_operand 0 "general_operand"))
+ (use (match_operand 1 "general_operand"))
+ (use (match_operand 2 "general_operand"))
+ (use (match_operand 3 "general_operand"))]
+ ""
+ {
+ rtx r_label = copy_to_reg (operands[1]);
+ rtx r_fp = operands[3];
+ rtx r_sp = operands[2];
+
+ emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
+
+ emit_clobber (gen_rtx_MEM (BLKmode, hard_frame_pointer_rtx));
+
+ emit_move_insn (hard_frame_pointer_rtx, r_fp);
+ emit_stack_restore (SAVE_NONLOCAL, r_sp);
+
+ emit_use (hard_frame_pointer_rtx);
+ emit_use (stack_pointer_rtx);
+
+ emit_indirect_jump (r_label);
+
+ DONE;
+ })
+
+;; "pushqi1"
+;; "pushqq1" "pushuqq1"
+(define_insn "push<mode>1"
+ [(set (mem:ALL1 (post_dec:HI (reg:HI REG_SP)))
+ (match_operand:ALL1 0 "reg_or_0_operand" "r,Y00"))]
+ ""
+ "@
+ push %0
+ push __zero_reg__"
+ [(set_attr "length" "1,1")])
+
+;; All modes for a multi-byte push. We must include complex modes here too,
+;; lest emit_single_push_insn "helpfully" create the auto-inc itself.
+(define_mode_iterator MPUSH
+ [CQI
+ HI CHI HA UHA HQ UHQ
+ SI CSI SA USA SQ USQ
+ DI CDI DA UDA DQ UDQ
+ TA UTA
+ SF SC
+ PSI])
+
+(define_expand "push<mode>1"
+ [(match_operand:MPUSH 0 "" "")]
+ ""
+ {
+ int i;
+ for (i = GET_MODE_SIZE (<MODE>mode) - 1; i >= 0; --i)
+ {
+ rtx part = simplify_gen_subreg (QImode, operands[0], <MODE>mode, i);
+ if (part != const0_rtx)
+ part = force_reg (QImode, part);
+ emit_insn (gen_pushqi1 (part));
+ }
+ DONE;
+ })
+
+;; Notice a special-case when adding N to SP where N results in a
+;; zero REG_ARGS_SIZE. This is equivalent to a move from FP.
+(define_split
+ [(set (reg:HI REG_SP)
+ (match_operand:HI 0 "register_operand" ""))]
+ "reload_completed
+ && frame_pointer_needed
+ && !cfun->calls_alloca
+ && find_reg_note (insn, REG_ARGS_SIZE, const0_rtx)"
+ [(set (reg:HI REG_SP)
+ (reg:HI REG_Y))])
+
+;;========================================================================
+;; Move stuff around
+
+;; Secondary input reload from non-generic 16-bit address spaces
+(define_insn "reload_in<mode>"
+ [(set (match_operand:MOVMODE 0 "register_operand" "=r")
+ (match_operand:MOVMODE 1 "flash_operand" "m"))
+ (clobber (match_operand:QI 2 "d_register_operand" "=d"))]
+ ;; Fixme: The insn condition must not test the address space.
+ ;; Because the gen tools refuse to generate insns for address spaces
+ ;; and will generate insn-codes.h to look like:
+ ;; #define CODE_FOR_reload_inhi CODE_FOR_nothing
+ "reload_completed || reload_in_progress"
+ {
+ return avr_out_lpm (insn, operands, NULL);
+ }
+ [(set_attr "adjust_len" "lpm")
+ (set_attr "cc" "clobber")])
+
+
+;; "loadqi_libgcc"
+;; "loadhi_libgcc"
+;; "loadpsi_libgcc"
+;; "loadsi_libgcc"
+;; "loadsf_libgcc"
+(define_expand "load<mode>_libgcc"
+ [(set (match_dup 3)
+ (match_dup 2))
+ (set (reg:MOVMODE 22)
+ (match_operand:MOVMODE 1 "memory_operand" ""))
+ (set (match_operand:MOVMODE 0 "register_operand" "")
+ (reg:MOVMODE 22))]
+ "avr_load_libgcc_p (operands[1])"
+ {
+ operands[3] = gen_rtx_REG (HImode, REG_Z);
+ operands[2] = force_operand (XEXP (operands[1], 0), NULL_RTX);
+ operands[1] = replace_equiv_address (operands[1], operands[3]);
+ set_mem_addr_space (operands[1], ADDR_SPACE_FLASH);
+ })
+
+;; "load_qi_libgcc"
+;; "load_hi_libgcc"
+;; "load_psi_libgcc"
+;; "load_si_libgcc"
+;; "load_sf_libgcc"
+(define_insn "load_<mode>_libgcc"
+ [(set (reg:MOVMODE 22)
+ (match_operand:MOVMODE 0 "memory_operand" "m,m"))]
+ "avr_load_libgcc_p (operands[0])
+ && REG_P (XEXP (operands[0], 0))
+ && REG_Z == REGNO (XEXP (operands[0], 0))"
+ {
+ operands[0] = GEN_INT (GET_MODE_SIZE (<MODE>mode));
+ return "%~call __load_%0";
+ }
+ [(set_attr "length" "1,2")
+ (set_attr "isa" "rjmp,jmp")
+ (set_attr "cc" "clobber")])
+
+
+;; "xload8qi_A"
+;; "xload8qq_A" "xload8uqq_A"
+(define_insn_and_split "xload8<mode>_A"
+ [(set (match_operand:ALL1 0 "register_operand" "=r")
+ (match_operand:ALL1 1 "memory_operand" "m"))
+ (clobber (reg:HI REG_Z))]
+ "can_create_pseudo_p()
+ && !avr_xload_libgcc_p (<MODE>mode)
+ && avr_mem_memx_p (operands[1])
+ && REG_P (XEXP (operands[1], 0))"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(clobber (const_int 0))]
+ {
+ /* ; Split away the high part of the address. GCC's register allocator
+ ; in not able to allocate segment registers and reload the resulting
+ ; expressions. Notice that no address register can hold a PSImode. */
+
+ rtx insn, addr = XEXP (operands[1], 0);
+ rtx hi8 = gen_reg_rtx (QImode);
+ rtx reg_z = gen_rtx_REG (HImode, REG_Z);
+
+ emit_move_insn (reg_z, simplify_gen_subreg (HImode, addr, PSImode, 0));
+ emit_move_insn (hi8, simplify_gen_subreg (QImode, addr, PSImode, 2));
+
+ insn = emit_insn (gen_xload<mode>_8 (operands[0], hi8));
+ set_mem_addr_space (SET_SRC (single_set (insn)),
+ MEM_ADDR_SPACE (operands[1]));
+ DONE;
+ })
+
+;; "xloadqi_A" "xloadqq_A" "xloaduqq_A"
+;; "xloadhi_A" "xloadhq_A" "xloaduhq_A" "xloadha_A" "xloaduha_A"
+;; "xloadsi_A" "xloadsq_A" "xloadusq_A" "xloadsa_A" "xloadusa_A"
+;; "xloadpsi_A"
+;; "xloadsf_A"
+(define_insn_and_split "xload<mode>_A"
+ [(set (match_operand:MOVMODE 0 "register_operand" "=r")
+ (match_operand:MOVMODE 1 "memory_operand" "m"))
+ (clobber (reg:MOVMODE 22))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI REG_Z))]
+ "can_create_pseudo_p()
+ && avr_mem_memx_p (operands[1])
+ && REG_P (XEXP (operands[1], 0))"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(clobber (const_int 0))]
+ {
+ rtx addr = XEXP (operands[1], 0);
+ rtx reg_z = gen_rtx_REG (HImode, REG_Z);
+ rtx addr_hi8 = simplify_gen_subreg (QImode, addr, PSImode, 2);
+ addr_space_t as = MEM_ADDR_SPACE (operands[1]);
+ rtx insn;
+
+ /* Split the address to R21:Z */
+ emit_move_insn (reg_z, simplify_gen_subreg (HImode, addr, PSImode, 0));
+ emit_move_insn (gen_rtx_REG (QImode, 21), addr_hi8);
+
+ /* Load with code from libgcc */
+ insn = emit_insn (gen_xload_<mode>_libgcc ());
+ set_mem_addr_space (SET_SRC (single_set (insn)), as);
+
+ /* Move to destination */
+ emit_move_insn (operands[0], gen_rtx_REG (<MODE>mode, 22));
+
+ DONE;
+ })
+
+;; Move value from address space memx to a register
+;; These insns must be prior to respective generic move insn.
+
+;; "xloadqi_8"
+;; "xloadqq_8" "xloaduqq_8"
+(define_insn "xload<mode>_8"
+ [(set (match_operand:ALL1 0 "register_operand" "=&r,r")
+ (mem:ALL1 (lo_sum:PSI (match_operand:QI 1 "register_operand" "r,r")
+ (reg:HI REG_Z))))]
+ "!avr_xload_libgcc_p (<MODE>mode)"
+ {
+ return avr_out_xload (insn, operands, NULL);
+ }
+ [(set_attr "length" "4,4")
+ (set_attr "adjust_len" "*,xload")
+ (set_attr "isa" "lpmx,lpm")
+ (set_attr "cc" "none")])
+
+;; R21:Z : 24-bit source address
+;; R22 : 1-4 byte output
+
+;; "xload_qi_libgcc" "xload_qq_libgcc" "xload_uqq_libgcc"
+;; "xload_hi_libgcc" "xload_hq_libgcc" "xload_uhq_libgcc" "xload_ha_libgcc" "xload_uha_libgcc"
+;; "xload_si_libgcc" "xload_sq_libgcc" "xload_usq_libgcc" "xload_sa_libgcc" "xload_usa_libgcc"
+;; "xload_sf_libgcc"
+;; "xload_psi_libgcc"
+(define_insn "xload_<mode>_libgcc"
+ [(set (reg:MOVMODE 22)
+ (mem:MOVMODE (lo_sum:PSI (reg:QI 21)
+ (reg:HI REG_Z))))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI REG_Z))]
+ "avr_xload_libgcc_p (<MODE>mode)"
+ {
+ rtx x_bytes = GEN_INT (GET_MODE_SIZE (<MODE>mode));
+
+ output_asm_insn ("%~call __xload_%0", &x_bytes);
+ return "";
+ }
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;; General move expanders
+
+;; "movqi" "movqq" "movuqq"
+;; "movhi" "movhq" "movuhq" "movha" "movuha"
+;; "movsi" "movsq" "movusq" "movsa" "movusa"
+;; "movsf"
+;; "movpsi"
+(define_expand "mov<mode>"
+ [(set (match_operand:MOVMODE 0 "nonimmediate_operand" "")
+ (match_operand:MOVMODE 1 "general_operand" ""))]
+ ""
+ {
+ rtx dest = operands[0];
+ rtx src = operands[1];
+
+ if (avr_mem_flash_p (dest))
+ DONE;
+
+ /* One of the operands has to be in a register. */
+ if (!register_operand (dest, <MODE>mode)
+ && !reg_or_0_operand (src, <MODE>mode))
+ {
+ operands[1] = src = copy_to_mode_reg (<MODE>mode, src);
+ }
+
+ if (avr_mem_memx_p (src))
+ {
+ rtx addr = XEXP (src, 0);
+
+ if (!REG_P (addr))
+ src = replace_equiv_address (src, copy_to_mode_reg (PSImode, addr));
+
+ if (!avr_xload_libgcc_p (<MODE>mode))
+ /* ; No <mode> here because gen_xload8<mode>_A only iterates over ALL1.
+ ; insn-emit does not depend on the mode, it's all about operands. */
+ emit_insn (gen_xload8qi_A (dest, src));
+ else
+ emit_insn (gen_xload<mode>_A (dest, src));
+
+ DONE;
+ }
+
+ if (avr_load_libgcc_p (src))
+ {
+ /* For the small devices, do loads per libgcc call. */
+ emit_insn (gen_load<mode>_libgcc (dest, src));
+ DONE;
+ }
+ })
+
+;;========================================================================
+;; move byte
+;; The last alternative (any immediate constant to any register) is
+;; very expensive. It should be optimized by peephole2 if a scratch
+;; register is available, but then that register could just as well be
+;; allocated for the variable we are loading. But, most of NO_LD_REGS
+;; are call-saved registers, and most of LD_REGS are call-used registers,
+;; so this may still be a win for registers live across function calls.
+
+;; "movqi_insn"
+;; "movqq_insn" "movuqq_insn"
+(define_insn "mov<mode>_insn"
+ [(set (match_operand:ALL1 0 "nonimmediate_operand" "=r ,d ,Qm ,r ,q,r,*r")
+ (match_operand:ALL1 1 "nox_general_operand" "r Y00,n Ynn,r Y00,Qm,r,q,i"))]
+ "register_operand (operands[0], <MODE>mode)
+ || reg_or_0_operand (operands[1], <MODE>mode)"
+ {
+ return output_movqi (insn, operands, NULL);
+ }
+ [(set_attr "length" "1,1,5,5,1,1,4")
+ (set_attr "adjust_len" "mov8")
+ (set_attr "cc" "ldi,none,clobber,clobber,none,none,clobber")])
+
+;; This is used in peephole2 to optimize loading immediate constants
+;; if a scratch register from LD_REGS happens to be available.
+
+;; "*reload_inqi"
+;; "*reload_inqq" "*reload_inuqq"
+(define_insn "*reload_in<mode>"
+ [(set (match_operand:ALL1 0 "register_operand" "=l")
+ (match_operand:ALL1 1 "const_operand" "i"))
+ (clobber (match_operand:QI 2 "register_operand" "=&d"))]
+ "reload_completed"
+ "ldi %2,lo8(%1)
+ mov %0,%2"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+(define_peephole2
+ [(match_scratch:QI 2 "d")
+ (set (match_operand:ALL1 0 "l_register_operand" "")
+ (match_operand:ALL1 1 "const_operand" ""))]
+ ; No need for a clobber reg for 0x0, 0x01 or 0xff
+ "!satisfies_constraint_Y00 (operands[1])
+ && !satisfies_constraint_Y01 (operands[1])
+ && !satisfies_constraint_Ym1 (operands[1])"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (clobber (match_dup 2))])])
+
+;;============================================================================
+;; move word (16 bit)
+
+;; Move register $1 to the Stack Pointer register SP.
+;; This insn is emit during function prologue/epilogue generation.
+;; $2 = 0: We know that IRQs are off
+;; $2 = 1: We know that IRQs are on
+;; $2 = 2: SP has 8 bits only, IRQ state does not matter
+;; $2 = -1: We don't know anything about IRQ on/off
+;; Always write SP via unspec, see PR50063
+
+(define_insn "movhi_sp_r"
+ [(set (match_operand:HI 0 "stack_register_operand" "=q,q,q,q,q")
+ (unspec_volatile:HI [(match_operand:HI 1 "register_operand" "r,r,r,r,r")
+ (match_operand:HI 2 "const_int_operand" "L,P,N,K,LPN")]
+ UNSPECV_WRITE_SP))]
+ ""
+ "@
+ out %B0,%B1\;out %A0,%A1
+ cli\;out %B0,%B1\;sei\;out %A0,%A1
+ in __tmp_reg__,__SREG__\;cli\;out %B0,%B1\;out __SREG__,__tmp_reg__\;out %A0,%A1
+ out %A0,%A1
+ out %A0,%A1\;out %B0,%B1"
+ [(set_attr "length" "2,4,5,1,2")
+ (set_attr "isa" "no_xmega,no_xmega,no_xmega,*,xmega")
+ (set_attr "cc" "none")])
+
+(define_peephole2
+ [(match_scratch:QI 2 "d")
+ (set (match_operand:ALL2 0 "l_register_operand" "")
+ (match_operand:ALL2 1 "const_or_immediate_operand" ""))]
+ "operands[1] != CONST0_RTX (<MODE>mode)"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (clobber (match_dup 2))])])
+
+;; '*' because it is not used in rtl generation, only in above peephole
+;; "*reload_inhi"
+;; "*reload_inhq" "*reload_inuhq"
+;; "*reload_inha" "*reload_inuha"
+(define_insn "*reload_in<mode>"
+ [(set (match_operand:ALL2 0 "l_register_operand" "=l")
+ (match_operand:ALL2 1 "immediate_operand" "i"))
+ (clobber (match_operand:QI 2 "register_operand" "=&d"))]
+ "reload_completed"
+ {
+ return output_reload_inhi (operands, operands[2], NULL);
+ }
+ [(set_attr "length" "4")
+ (set_attr "adjust_len" "reload_in16")
+ (set_attr "cc" "clobber")])
+
+;; "*movhi"
+;; "*movhq" "*movuhq"
+;; "*movha" "*movuha"
+(define_insn "*mov<mode>"
+ [(set (match_operand:ALL2 0 "nonimmediate_operand" "=r,r ,r,m ,d,*r,q,r")
+ (match_operand:ALL2 1 "nox_general_operand" "r,Y00,m,r Y00,i,i ,r,q"))]
+ "register_operand (operands[0], <MODE>mode)
+ || reg_or_0_operand (operands[1], <MODE>mode)"
+ {
+ return output_movhi (insn, operands, NULL);
+ }
+ [(set_attr "length" "2,2,6,7,2,6,5,2")
+ (set_attr "adjust_len" "mov16")
+ (set_attr "cc" "none,none,clobber,clobber,none,clobber,none,none")])
+
+(define_peephole2 ; movw
+ [(set (match_operand:ALL1 0 "even_register_operand" "")
+ (match_operand:ALL1 1 "even_register_operand" ""))
+ (set (match_operand:ALL1 2 "odd_register_operand" "")
+ (match_operand:ALL1 3 "odd_register_operand" ""))]
+ "AVR_HAVE_MOVW
+ && REGNO (operands[0]) == REGNO (operands[2]) - 1
+ && REGNO (operands[1]) == REGNO (operands[3]) - 1"
+ [(set (match_dup 4)
+ (match_dup 5))]
+ {
+ operands[4] = gen_rtx_REG (HImode, REGNO (operands[0]));
+ operands[5] = gen_rtx_REG (HImode, REGNO (operands[1]));
+ })
+
+(define_peephole2 ; movw_r
+ [(set (match_operand:ALL1 0 "odd_register_operand" "")
+ (match_operand:ALL1 1 "odd_register_operand" ""))
+ (set (match_operand:ALL1 2 "even_register_operand" "")
+ (match_operand:ALL1 3 "even_register_operand" ""))]
+ "AVR_HAVE_MOVW
+ && REGNO (operands[2]) == REGNO (operands[0]) - 1
+ && REGNO (operands[3]) == REGNO (operands[1]) - 1"
+ [(set (match_dup 4)
+ (match_dup 5))]
+ {
+ operands[4] = gen_rtx_REG (HImode, REGNO (operands[2]));
+ operands[5] = gen_rtx_REG (HImode, REGNO (operands[3]));
+ })
+
+;; For LPM loads from AS1 we split
+;; R = *Z
+;; to
+;; R = *Z++
+;; Z = Z - sizeof (R)
+;;
+;; so that the second instruction can be optimized out.
+
+(define_split ; "split-lpmx"
+ [(set (match_operand:HISI 0 "register_operand" "")
+ (match_operand:HISI 1 "memory_operand" ""))]
+ "reload_completed
+ && AVR_HAVE_LPMX"
+ [(set (match_dup 0)
+ (match_dup 2))
+ (set (match_dup 3)
+ (plus:HI (match_dup 3)
+ (match_dup 4)))]
+ {
+ rtx addr = XEXP (operands[1], 0);
+
+ if (!avr_mem_flash_p (operands[1])
+ || !REG_P (addr)
+ || reg_overlap_mentioned_p (addr, operands[0]))
+ {
+ FAIL;
+ }
+
+ operands[2] = replace_equiv_address (operands[1],
+ gen_rtx_POST_INC (Pmode, addr));
+ operands[3] = addr;
+ operands[4] = gen_int_mode (-GET_MODE_SIZE (<MODE>mode), HImode);
+ })
+
+;;==========================================================================
+;; xpointer move (24 bit)
+
+(define_peephole2 ; *reload_inpsi
+ [(match_scratch:QI 2 "d")
+ (set (match_operand:PSI 0 "l_register_operand" "")
+ (match_operand:PSI 1 "immediate_operand" ""))
+ (match_dup 2)]
+ "operands[1] != const0_rtx
+ && operands[1] != constm1_rtx"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (clobber (match_dup 2))])])
+
+;; '*' because it is not used in rtl generation.
+(define_insn "*reload_inpsi"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (match_operand:PSI 1 "immediate_operand" "i"))
+ (clobber (match_operand:QI 2 "register_operand" "=&d"))]
+ "reload_completed"
+ {
+ return avr_out_reload_inpsi (operands, operands[2], NULL);
+ }
+ [(set_attr "length" "6")
+ (set_attr "adjust_len" "reload_in24")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*movpsi"
+ [(set (match_operand:PSI 0 "nonimmediate_operand" "=r,r,r ,Qm,!d,r")
+ (match_operand:PSI 1 "nox_general_operand" "r,L,Qm,rL,i ,i"))]
+ "register_operand (operands[0], PSImode)
+ || register_operand (operands[1], PSImode)
+ || const0_rtx == operands[1]"
+ {
+ return avr_out_movpsi (insn, operands, NULL);
+ }
+ [(set_attr "length" "3,3,8,9,4,10")
+ (set_attr "adjust_len" "mov24")
+ (set_attr "cc" "none,none,clobber,clobber,none,clobber")])
+
+;;==========================================================================
+;; move double word (32 bit)
+
+(define_peephole2 ; *reload_insi
+ [(match_scratch:QI 2 "d")
+ (set (match_operand:ALL4 0 "l_register_operand" "")
+ (match_operand:ALL4 1 "immediate_operand" ""))
+ (match_dup 2)]
+ "operands[1] != CONST0_RTX (<MODE>mode)"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (clobber (match_dup 2))])])
+
+;; '*' because it is not used in rtl generation.
+;; "*reload_insi"
+;; "*reload_insq" "*reload_inusq"
+;; "*reload_insa" "*reload_inusa"
+(define_insn "*reload_insi"
+ [(set (match_operand:ALL4 0 "register_operand" "=r")
+ (match_operand:ALL4 1 "immediate_operand" "n Ynn"))
+ (clobber (match_operand:QI 2 "register_operand" "=&d"))]
+ "reload_completed"
+ {
+ return output_reload_insisf (operands, operands[2], NULL);
+ }
+ [(set_attr "length" "8")
+ (set_attr "adjust_len" "reload_in32")
+ (set_attr "cc" "clobber")])
+
+
+;; "*movsi"
+;; "*movsq" "*movusq"
+;; "*movsa" "*movusa"
+(define_insn "*mov<mode>"
+ [(set (match_operand:ALL4 0 "nonimmediate_operand" "=r,r ,r ,Qm ,!d,r")
+ (match_operand:ALL4 1 "nox_general_operand" "r,Y00,Qm,r Y00,i ,i"))]
+ "register_operand (operands[0], <MODE>mode)
+ || reg_or_0_operand (operands[1], <MODE>mode)"
+ {
+ return output_movsisf (insn, operands, NULL);
+ }
+ [(set_attr "length" "4,4,8,9,4,10")
+ (set_attr "adjust_len" "mov32")
+ (set_attr "cc" "none,none,clobber,clobber,none,clobber")])
+
+;; fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff
+;; move floating point numbers (32 bit)
+
+(define_insn "*movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,r ,Qm,!d,r")
+ (match_operand:SF 1 "nox_general_operand" "r,G,Qm,rG,F ,F"))]
+ "register_operand (operands[0], SFmode)
+ || reg_or_0_operand (operands[1], SFmode)"
+ {
+ return output_movsisf (insn, operands, NULL);
+ }
+ [(set_attr "length" "4,4,8,9,4,10")
+ (set_attr "adjust_len" "mov32")
+ (set_attr "cc" "none,none,clobber,clobber,none,clobber")])
+
+(define_peephole2 ; *reload_insf
+ [(match_scratch:QI 2 "d")
+ (set (match_operand:SF 0 "l_register_operand" "")
+ (match_operand:SF 1 "const_double_operand" ""))
+ (match_dup 2)]
+ "operands[1] != CONST0_RTX (SFmode)"
+ [(parallel [(set (match_dup 0)
+ (match_dup 1))
+ (clobber (match_dup 2))])])
+
+;; '*' because it is not used in rtl generation.
+(define_insn "*reload_insf"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (match_operand:SF 1 "const_double_operand" "F"))
+ (clobber (match_operand:QI 2 "register_operand" "=&d"))]
+ "reload_completed"
+ {
+ return output_reload_insisf (operands, operands[2], NULL);
+ }
+ [(set_attr "length" "8")
+ (set_attr "adjust_len" "reload_in32")
+ (set_attr "cc" "clobber")])
+
+;;=========================================================================
+;; move string (like memcpy)
+
+(define_expand "movmemhi"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand:BLK 1 "memory_operand" ""))
+ (use (match_operand:HI 2 "const_int_operand" ""))
+ (use (match_operand:HI 3 "const_int_operand" ""))])]
+ ""
+ {
+ if (avr_emit_movmemhi (operands))
+ DONE;
+
+ FAIL;
+ })
+
+(define_mode_attr MOVMEM_r_d [(QI "r")
+ (HI "wd")])
+
+;; $0 : Address Space
+;; $1, $2 : Loop register
+;; R30 : source address
+;; R26 : destination address
+
+;; "movmem_qi"
+;; "movmem_hi"
+(define_insn "movmem_<mode>"
+ [(set (mem:BLK (reg:HI REG_X))
+ (mem:BLK (reg:HI REG_Z)))
+ (unspec [(match_operand:QI 0 "const_int_operand" "n")]
+ UNSPEC_MOVMEM)
+ (use (match_operand:QIHI 1 "register_operand" "<MOVMEM_r_d>"))
+ (clobber (reg:HI REG_X))
+ (clobber (reg:HI REG_Z))
+ (clobber (reg:QI LPM_REGNO))
+ (clobber (match_operand:QIHI 2 "register_operand" "=1"))]
+ ""
+ {
+ return avr_out_movmem (insn, operands, NULL);
+ }
+ [(set_attr "adjust_len" "movmem")
+ (set_attr "cc" "clobber")])
+
+
+;; $0 : Address Space
+;; $1 : RAMPZ RAM address
+;; R24 : #bytes and loop register
+;; R23:Z : 24-bit source address
+;; R26 : 16-bit destination address
+
+;; "movmemx_qi"
+;; "movmemx_hi"
+(define_insn "movmemx_<mode>"
+ [(set (mem:BLK (reg:HI REG_X))
+ (mem:BLK (lo_sum:PSI (reg:QI 23)
+ (reg:HI REG_Z))))
+ (unspec [(match_operand:QI 0 "const_int_operand" "n")]
+ UNSPEC_MOVMEM)
+ (use (reg:QIHI 24))
+ (clobber (reg:HI REG_X))
+ (clobber (reg:HI REG_Z))
+ (clobber (reg:QI LPM_REGNO))
+ (clobber (reg:HI 24))
+ (clobber (reg:QI 23))
+ (clobber (mem:QI (match_operand:QI 1 "io_address_operand" "n")))]
+ ""
+ "%~call __movmemx_<mode>"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;; =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2 =%2
+;; memset (%0, %2, %1)
+
+(define_expand "setmemhi"
+ [(parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (match_operand 2 "const_int_operand" ""))
+ (use (match_operand:HI 1 "const_int_operand" ""))
+ (use (match_operand:HI 3 "const_int_operand" ""))
+ (clobber (match_scratch:HI 4 ""))
+ (clobber (match_dup 5))])]
+ ""
+ {
+ rtx addr0;
+ enum machine_mode mode;
+
+ /* If value to set is not zero, use the library routine. */
+ if (operands[2] != const0_rtx)
+ FAIL;
+
+ if (!CONST_INT_P (operands[1]))
+ FAIL;
+
+ mode = u8_operand (operands[1], VOIDmode) ? QImode : HImode;
+ operands[5] = gen_rtx_SCRATCH (mode);
+ operands[1] = copy_to_mode_reg (mode,
+ gen_int_mode (INTVAL (operands[1]), mode));
+ addr0 = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
+ operands[0] = gen_rtx_MEM (BLKmode, addr0);
+ })
+
+
+(define_insn "*clrmemqi"
+ [(set (mem:BLK (match_operand:HI 0 "register_operand" "e"))
+ (const_int 0))
+ (use (match_operand:QI 1 "register_operand" "r"))
+ (use (match_operand:QI 2 "const_int_operand" "n"))
+ (clobber (match_scratch:HI 3 "=0"))
+ (clobber (match_scratch:QI 4 "=&1"))]
+ ""
+ "0:\;st %a0+,__zero_reg__\;dec %1\;brne 0b"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+
+(define_insn "*clrmemhi"
+ [(set (mem:BLK (match_operand:HI 0 "register_operand" "e,e"))
+ (const_int 0))
+ (use (match_operand:HI 1 "register_operand" "!w,d"))
+ (use (match_operand:HI 2 "const_int_operand" "n,n"))
+ (clobber (match_scratch:HI 3 "=0,0"))
+ (clobber (match_scratch:HI 4 "=&1,&1"))]
+ ""
+ "@
+ 0:\;st %a0+,__zero_reg__\;sbiw %A1,1\;brne 0b
+ 0:\;st %a0+,__zero_reg__\;subi %A1,1\;sbci %B1,0\;brne 0b"
+ [(set_attr "length" "3,4")
+ (set_attr "cc" "clobber,clobber")])
+
+(define_expand "strlenhi"
+ [(set (match_dup 4)
+ (unspec:HI [(match_operand:BLK 1 "memory_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")
+ (match_operand:HI 3 "immediate_operand" "")]
+ UNSPEC_STRLEN))
+ (set (match_dup 4)
+ (plus:HI (match_dup 4)
+ (const_int -1)))
+ (parallel [(set (match_operand:HI 0 "register_operand" "")
+ (minus:HI (match_dup 4)
+ (match_dup 5)))
+ (clobber (scratch:QI))])]
+ ""
+ {
+ rtx addr;
+ if (operands[2] != const0_rtx)
+ FAIL;
+ addr = copy_to_mode_reg (Pmode, XEXP (operands[1], 0));
+ operands[1] = gen_rtx_MEM (BLKmode, addr);
+ operands[5] = addr;
+ operands[4] = gen_reg_rtx (HImode);
+ })
+
+(define_insn "*strlenhi"
+ [(set (match_operand:HI 0 "register_operand" "=e")
+ (unspec:HI [(mem:BLK (match_operand:HI 1 "register_operand" "0"))
+ (const_int 0)
+ (match_operand:HI 2 "immediate_operand" "i")]
+ UNSPEC_STRLEN))]
+ ""
+ "0:\;ld __tmp_reg__,%a0+\;tst __tmp_reg__\;brne 0b"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+;+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+; add bytes
+
+;; "addqi3"
+;; "addqq3" "adduqq3"
+(define_insn "add<mode>3"
+ [(set (match_operand:ALL1 0 "register_operand" "=r,d ,r ,r ,r ,r")
+ (plus:ALL1 (match_operand:ALL1 1 "register_operand" "%0,0 ,0 ,0 ,0 ,0")
+ (match_operand:ALL1 2 "nonmemory_operand" "r,n Ynn,Y01,Ym1,Y02,Ym2")))]
+ ""
+ "@
+ add %0,%2
+ subi %0,lo8(-(%2))
+ inc %0
+ dec %0
+ inc %0\;inc %0
+ dec %0\;dec %0"
+ [(set_attr "length" "1,1,1,1,2,2")
+ (set_attr "cc" "set_czn,set_czn,set_zn,set_zn,set_zn,set_zn")])
+
+;; "addhi3"
+;; "addhq3" "adduhq3"
+;; "addha3" "adduha3"
+(define_expand "add<mode>3"
+ [(set (match_operand:ALL2 0 "register_operand" "")
+ (plus:ALL2 (match_operand:ALL2 1 "register_operand" "")
+ (match_operand:ALL2 2 "nonmemory_or_const_operand" "")))]
+ ""
+ {
+ if (CONST_INT_P (operands[2]))
+ {
+ operands[2] = gen_int_mode (INTVAL (operands[2]), HImode);
+
+ if (can_create_pseudo_p()
+ && !stack_register_operand (operands[0], HImode)
+ && !stack_register_operand (operands[1], HImode)
+ && !d_register_operand (operands[0], HImode)
+ && !d_register_operand (operands[1], HImode))
+ {
+ emit_insn (gen_addhi3_clobber (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ }
+
+ if (CONST_FIXED_P (operands[2]))
+ {
+ emit_insn (gen_add<mode>3_clobber (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ })
+
+
+(define_insn "*addhi3_zero_extend"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:HI 2 "register_operand" "0")))]
+ ""
+ "add %A0,%1\;adc %B0,__zero_reg__"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_n")])
+
+(define_insn "*addhi3_zero_extend1"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (match_operand:HI 1 "register_operand" "0")
+ (zero_extend:HI (match_operand:QI 2 "register_operand" "r"))))]
+ ""
+ "add %A0,%2\;adc %B0,__zero_reg__"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_n")])
+
+(define_insn "*addhi3.sign_extend1"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:HI 2 "register_operand" "0")))]
+ ""
+ {
+ return reg_overlap_mentioned_p (operands[0], operands[1])
+ ? "mov __tmp_reg__,%1\;add %A0,%1\;adc %B0,__zero_reg__\;sbrc __tmp_reg__,7\;dec %B0"
+ : "add %A0,%1\;adc %B0,__zero_reg__\;sbrc %1,7\;dec %B0";
+ }
+ [(set_attr "length" "5")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*addhi3_sp"
+ [(set (match_operand:HI 1 "stack_register_operand" "=q")
+ (plus:HI (match_operand:HI 2 "stack_register_operand" "q")
+ (match_operand:HI 0 "avr_sp_immediate_operand" "Csp")))]
+ ""
+ {
+ return avr_out_addto_sp (operands, NULL);
+ }
+ [(set_attr "length" "6")
+ (set_attr "adjust_len" "addto_sp")])
+
+;; "*addhi3"
+;; "*addhq3" "*adduhq3"
+;; "*addha3" "*adduha3"
+(define_insn "*add<mode>3"
+ [(set (match_operand:ALL2 0 "register_operand" "=??r,d,!w ,d")
+ (plus:ALL2 (match_operand:ALL2 1 "register_operand" "%0,0,0 ,0")
+ (match_operand:ALL2 2 "nonmemory_or_const_operand" "r,s,IJ YIJ,n Ynn")))]
+ ""
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "length" "2")
+ (set_attr "adjust_len" "plus")
+ (set_attr "cc" "plus")])
+
+;; Adding a constant to NO_LD_REGS might have lead to a reload of
+;; that constant to LD_REGS. We don't add a scratch to *addhi3
+;; itself because that insn is special to reload.
+
+(define_peephole2 ; addhi3_clobber
+ [(set (match_operand:ALL2 0 "d_register_operand" "")
+ (match_operand:ALL2 1 "const_operand" ""))
+ (set (match_operand:ALL2 2 "l_register_operand" "")
+ (plus:ALL2 (match_dup 2)
+ (match_dup 0)))]
+ "peep2_reg_dead_p (2, operands[0])"
+ [(parallel [(set (match_dup 2)
+ (plus:ALL2 (match_dup 2)
+ (match_dup 1)))
+ (clobber (match_dup 3))])]
+ {
+ operands[3] = simplify_gen_subreg (QImode, operands[0], <MODE>mode, 0);
+ })
+
+;; Same, but with reload to NO_LD_REGS
+;; Combine *reload_inhi with *addhi3
+
+(define_peephole2 ; addhi3_clobber
+ [(parallel [(set (match_operand:ALL2 0 "l_register_operand" "")
+ (match_operand:ALL2 1 "const_operand" ""))
+ (clobber (match_operand:QI 2 "d_register_operand" ""))])
+ (set (match_operand:ALL2 3 "l_register_operand" "")
+ (plus:ALL2 (match_dup 3)
+ (match_dup 0)))]
+ "peep2_reg_dead_p (2, operands[0])"
+ [(parallel [(set (match_dup 3)
+ (plus:ALL2 (match_dup 3)
+ (match_dup 1)))
+ (clobber (match_dup 2))])])
+
+;; "addhi3_clobber"
+;; "addhq3_clobber" "adduhq3_clobber"
+;; "addha3_clobber" "adduha3_clobber"
+(define_insn "add<mode>3_clobber"
+ [(set (match_operand:ALL2 0 "register_operand" "=!w ,d ,r")
+ (plus:ALL2 (match_operand:ALL2 1 "register_operand" "%0 ,0 ,0")
+ (match_operand:ALL2 2 "const_operand" "IJ YIJ,n Ynn,n Ynn")))
+ (clobber (match_scratch:QI 3 "=X ,X ,&d"))]
+ ""
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "length" "4")
+ (set_attr "adjust_len" "plus")
+ (set_attr "cc" "plus")])
+
+
+;; "addsi3"
+;; "addsq3" "addusq3"
+;; "addsa3" "addusa3"
+(define_insn "add<mode>3"
+ [(set (match_operand:ALL4 0 "register_operand" "=??r,d ,r")
+ (plus:ALL4 (match_operand:ALL4 1 "register_operand" "%0,0 ,0")
+ (match_operand:ALL4 2 "nonmemory_operand" "r,i ,n Ynn")))
+ (clobber (match_scratch:QI 3 "=X,X ,&d"))]
+ ""
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "length" "4")
+ (set_attr "adjust_len" "plus")
+ (set_attr "cc" "plus")])
+
+(define_insn "*addpsi3_zero_extend.qi"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (plus:PSI (zero_extend:PSI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:PSI 2 "register_operand" "0")))]
+ ""
+ "add %A0,%A1\;adc %B0,__zero_reg__\;adc %C0,__zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "set_n")])
+
+(define_insn "*addpsi3_zero_extend.hi"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (plus:PSI (zero_extend:PSI (match_operand:HI 1 "register_operand" "r"))
+ (match_operand:PSI 2 "register_operand" "0")))]
+ ""
+ "add %A0,%A1\;adc %B0,%B1\;adc %C0,__zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "set_n")])
+
+(define_insn "*addpsi3_sign_extend.hi"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (plus:PSI (sign_extend:PSI (match_operand:HI 1 "register_operand" "r"))
+ (match_operand:PSI 2 "register_operand" "0")))]
+ ""
+ "add %A0,%1\;adc %B0,%B1\;adc %C0,__zero_reg__\;sbrc %B1,7\;dec %C0"
+ [(set_attr "length" "5")
+ (set_attr "cc" "set_n")])
+
+(define_insn "*addsi3_zero_extend"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (zero_extend:SI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "add %A0,%1\;adc %B0,__zero_reg__\;adc %C0,__zero_reg__\;adc %D0,__zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_n")])
+
+(define_insn "*addsi3_zero_extend.hi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (zero_extend:SI (match_operand:HI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "add %A0,%1\;adc %B0,%B1\;adc %C0,__zero_reg__\;adc %D0,__zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_n")])
+
+(define_insn "addpsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=??r,d ,d,r")
+ (plus:PSI (match_operand:PSI 1 "register_operand" "%0,0 ,0,0")
+ (match_operand:PSI 2 "nonmemory_operand" "r,s ,n,n")))
+ (clobber (match_scratch:QI 3 "=X,X ,X,&d"))]
+ ""
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "length" "3")
+ (set_attr "adjust_len" "plus")
+ (set_attr "cc" "plus")])
+
+(define_insn "subpsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (minus:PSI (match_operand:PSI 1 "register_operand" "0")
+ (match_operand:PSI 2 "register_operand" "r")))]
+ ""
+ "sub %0,%2\;sbc %B0,%B2\;sbc %C0,%C2"
+ [(set_attr "length" "3")
+ (set_attr "cc" "set_czn")])
+
+(define_insn "*subpsi3_zero_extend.qi"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (minus:PSI (match_operand:SI 1 "register_operand" "0")
+ (zero_extend:PSI (match_operand:QI 2 "register_operand" "r"))))]
+ ""
+ "sub %A0,%2\;sbc %B0,__zero_reg__\;sbc %C0,__zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "set_czn")])
+
+(define_insn "*subpsi3_zero_extend.hi"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (minus:PSI (match_operand:PSI 1 "register_operand" "0")
+ (zero_extend:PSI (match_operand:HI 2 "register_operand" "r"))))]
+ ""
+ "sub %A0,%2\;sbc %B0,%B2\;sbc %C0,__zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "set_czn")])
+
+(define_insn "*subpsi3_sign_extend.hi"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (minus:PSI (match_operand:PSI 1 "register_operand" "0")
+ (sign_extend:PSI (match_operand:HI 2 "register_operand" "r"))))]
+ ""
+ "sub %A0,%A2\;sbc %B0,%B2\;sbc %C0,__zero_reg__\;sbrc %B2,7\;inc %C0"
+ [(set_attr "length" "5")
+ (set_attr "cc" "set_czn")])
+
+;-----------------------------------------------------------------------------
+; sub bytes
+
+;; "subqi3"
+;; "subqq3" "subuqq3"
+(define_insn "sub<mode>3"
+ [(set (match_operand:ALL1 0 "register_operand" "=??r,d ,r ,r ,r ,r")
+ (minus:ALL1 (match_operand:ALL1 1 "register_operand" "0,0 ,0 ,0 ,0 ,0")
+ (match_operand:ALL1 2 "nonmemory_or_const_operand" "r,n Ynn,Y01,Ym1,Y02,Ym2")))]
+ ""
+ "@
+ sub %0,%2
+ subi %0,lo8(%2)
+ dec %0
+ inc %0
+ dec %0\;dec %0
+ inc %0\;inc %0"
+ [(set_attr "length" "1,1,1,1,2,2")
+ (set_attr "cc" "set_czn,set_czn,set_zn,set_zn,set_zn,set_zn")])
+
+;; "subhi3"
+;; "subhq3" "subuhq3"
+;; "subha3" "subuha3"
+(define_insn "sub<mode>3"
+ [(set (match_operand:ALL2 0 "register_operand" "=??r,d ,*r")
+ (minus:ALL2 (match_operand:ALL2 1 "register_operand" "0,0 ,0")
+ (match_operand:ALL2 2 "nonmemory_or_const_operand" "r,i Ynn,Ynn")))
+ (clobber (match_scratch:QI 3 "=X,X ,&d"))]
+ ""
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "adjust_len" "plus")
+ (set_attr "cc" "plus")])
+
+(define_insn "*subhi3_zero_extend1"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (minus:HI (match_operand:HI 1 "register_operand" "0")
+ (zero_extend:HI (match_operand:QI 2 "register_operand" "r"))))]
+ ""
+ "sub %A0,%2\;sbc %B0,__zero_reg__"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_czn")])
+
+(define_insn "*subhi3.sign_extend2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (minus:HI (match_operand:HI 1 "register_operand" "0")
+ (sign_extend:HI (match_operand:QI 2 "register_operand" "r"))))]
+ ""
+ {
+ return reg_overlap_mentioned_p (operands[0], operands[2])
+ ? "mov __tmp_reg__,%2\;sub %A0,%2\;sbc %B0,__zero_reg__\;sbrc __tmp_reg__,7\;inc %B0"
+ : "sub %A0,%2\;sbc %B0,__zero_reg__\;sbrc %2,7\;inc %B0";
+ }
+ [(set_attr "length" "5")
+ (set_attr "cc" "clobber")])
+
+;; "subsi3"
+;; "subsq3" "subusq3"
+;; "subsa3" "subusa3"
+(define_insn "sub<mode>3"
+ [(set (match_operand:ALL4 0 "register_operand" "=??r,d ,r")
+ (minus:ALL4 (match_operand:ALL4 1 "register_operand" "0,0 ,0")
+ (match_operand:ALL4 2 "nonmemory_or_const_operand" "r,n Ynn,Ynn")))
+ (clobber (match_scratch:QI 3 "=X,X ,&d"))]
+ ""
+ {
+ return avr_out_plus (insn, operands);
+ }
+ [(set_attr "adjust_len" "plus")
+ (set_attr "cc" "plus")])
+
+(define_insn "*subsi3_zero_extend"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0")
+ (zero_extend:SI (match_operand:QI 2 "register_operand" "r"))))]
+ ""
+ "sub %A0,%2\;sbc %B0,__zero_reg__\;sbc %C0,__zero_reg__\;sbc %D0,__zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_czn")])
+
+(define_insn "*subsi3_zero_extend.hi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "0")
+ (zero_extend:SI (match_operand:HI 2 "register_operand" "r"))))]
+ ""
+ "sub %A0,%2\;sbc %B0,%B2\;sbc %C0,__zero_reg__\;sbc %D0,__zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_czn")])
+
+;******************************************************************************
+; mul
+
+(define_expand "mulqi3"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (mult:QI (match_operand:QI 1 "register_operand" "")
+ (match_operand:QI 2 "register_operand" "")))]
+ ""
+ {
+ if (!AVR_HAVE_MUL)
+ {
+ emit_insn (gen_mulqi3_call (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ })
+
+(define_insn "*mulqi3_enh"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (mult:QI (match_operand:QI 1 "register_operand" "r")
+ (match_operand:QI 2 "register_operand" "r")))]
+ "AVR_HAVE_MUL"
+ "mul %1,%2
+ mov %0,r0
+ clr r1"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_expand "mulqi3_call"
+ [(set (reg:QI 24) (match_operand:QI 1 "register_operand" ""))
+ (set (reg:QI 22) (match_operand:QI 2 "register_operand" ""))
+ (parallel [(set (reg:QI 24) (mult:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))])
+ (set (match_operand:QI 0 "register_operand" "") (reg:QI 24))])
+
+(define_insn "*mulqi3_call"
+ [(set (reg:QI 24) (mult:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))]
+ "!AVR_HAVE_MUL"
+ "%~call __mulqi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; "umulqi3_highpart"
+;; "smulqi3_highpart"
+(define_insn "<extend_su>mulqi3_highpart"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (truncate:QI
+ (lshiftrt:HI (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "<mul_r_d>"))
+ (any_extend:HI (match_operand:QI 2 "register_operand" "<mul_r_d>")))
+ (const_int 8))))]
+ "AVR_HAVE_MUL"
+ "mul<extend_s> %1,%2
+ mov %0,r1
+ clr __zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+
+;; Used when expanding div or mod inline for some special values
+(define_insn "*subqi3.ashiftrt7"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (minus:QI (match_operand:QI 1 "register_operand" "0")
+ (ashiftrt:QI (match_operand:QI 2 "register_operand" "r")
+ (const_int 7))))]
+ ""
+ "sbrc %2,7\;inc %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*addqi3.lt0"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (plus:QI (lt:QI (match_operand:QI 1 "register_operand" "r")
+ (const_int 0))
+ (match_operand:QI 2 "register_operand" "0")))]
+ ""
+ "sbrc %1,7\;inc %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*addhi3.lt0"
+ [(set (match_operand:HI 0 "register_operand" "=w,r")
+ (plus:HI (lt:HI (match_operand:QI 1 "register_operand" "r,r")
+ (const_int 0))
+ (match_operand:HI 2 "register_operand" "0,0")))
+ (clobber (match_scratch:QI 3 "=X,&1"))]
+ ""
+ "@
+ sbrc %1,7\;adiw %0,1
+ lsl %1\;adc %A0,__zero_reg__\;adc %B0,__zero_reg__"
+ [(set_attr "length" "2,3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*addpsi3.lt0"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (plus:PSI (lshiftrt:PSI (match_operand:PSI 1 "register_operand" "r")
+ (const_int 23))
+ (match_operand:PSI 2 "register_operand" "0")))]
+ ""
+ "mov __tmp_reg__,%C1\;lsl __tmp_reg__
+ adc %A0,__zero_reg__\;adc %B0,__zero_reg__\;adc %C0,__zero_reg__"
+ [(set_attr "length" "5")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*addsi3.lt0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 31))
+ (match_operand:SI 2 "register_operand" "0")))]
+ ""
+ "mov __tmp_reg__,%D1\;lsl __tmp_reg__
+ adc %A0,__zero_reg__\;adc %B0,__zero_reg__\;adc %C0,__zero_reg__\;adc %D0,__zero_reg__"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*umulqihi3.call"
+ [(set (reg:HI 24)
+ (mult:HI (zero_extend:HI (reg:QI 22))
+ (zero_extend:HI (reg:QI 24))))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI 22))]
+ "!AVR_HAVE_MUL"
+ "%~call __umulqihi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; "umulqihi3"
+;; "mulqihi3"
+(define_insn "<extend_u>mulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "<mul_r_d>"))
+ (any_extend:HI (match_operand:QI 2 "register_operand" "<mul_r_d>"))))]
+ "AVR_HAVE_MUL"
+ "mul<extend_s> %1,%2
+ movw %0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "usmulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (sign_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
+ "AVR_HAVE_MUL"
+ "mulsu %2,%1
+ movw %0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+;; Above insn is not canonicalized by insn combine, so here is a version with
+;; operands swapped.
+
+(define_insn "*sumulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (zero_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
+ "AVR_HAVE_MUL"
+ "mulsu %1,%2
+ movw %0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+;; One-extend operand 1
+
+(define_insn "*osmulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=&r")
+ (mult:HI (not:HI (zero_extend:HI (not:QI (match_operand:QI 1 "register_operand" "a"))))
+ (sign_extend:HI (match_operand:QI 2 "register_operand" "a"))))]
+ "AVR_HAVE_MUL"
+ "mulsu %2,%1
+ movw %0,r0
+ sub %B0,%2
+ clr __zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*oumulqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=&r")
+ (mult:HI (not:HI (zero_extend:HI (not:QI (match_operand:QI 1 "register_operand" "r"))))
+ (zero_extend:HI (match_operand:QI 2 "register_operand" "r"))))]
+ "AVR_HAVE_MUL"
+ "mul %2,%1
+ movw %0,r0
+ sub %B0,%2
+ clr __zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;******************************************************************************
+; multiply-add/sub QI: $0 = $3 +/- $1*$2
+;******************************************************************************
+
+(define_insn "*maddqi4"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (plus:QI (mult:QI (match_operand:QI 1 "register_operand" "r")
+ (match_operand:QI 2 "register_operand" "r"))
+ (match_operand:QI 3 "register_operand" "0")))]
+
+ "AVR_HAVE_MUL"
+ "mul %1,%2
+ add %A0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*msubqi4"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (minus:QI (match_operand:QI 3 "register_operand" "0")
+ (mult:QI (match_operand:QI 1 "register_operand" "r")
+ (match_operand:QI 2 "register_operand" "r"))))]
+ "AVR_HAVE_MUL"
+ "mul %1,%2
+ sub %A0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "*maddqi4.const"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (plus:QI (mult:QI (match_operand:QI 1 "register_operand" "r")
+ (match_operand:QI 2 "const_int_operand" "n"))
+ (match_operand:QI 3 "register_operand" "0")))
+ (clobber (match_scratch:QI 4 "=&d"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4)
+ (match_dup 2))
+ ; *maddqi4
+ (set (match_dup 0)
+ (plus:QI (mult:QI (match_dup 1)
+ (match_dup 4))
+ (match_dup 3)))])
+
+(define_insn_and_split "*msubqi4.const"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (minus:QI (match_operand:QI 3 "register_operand" "0")
+ (mult:QI (match_operand:QI 1 "register_operand" "r")
+ (match_operand:QI 2 "const_int_operand" "n"))))
+ (clobber (match_scratch:QI 4 "=&d"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4)
+ (match_dup 2))
+ ; *msubqi4
+ (set (match_dup 0)
+ (minus:QI (match_dup 3)
+ (mult:QI (match_dup 1)
+ (match_dup 4))))])
+
+
+;******************************************************************************
+; multiply-add/sub HI: $0 = $3 +/- $1*$2 with 8-bit values $1, $2
+;******************************************************************************
+
+;; We don't use standard insns/expanders as they lead to cumbersome code for,
+;; e.g,
+;;
+;; int foo (unsigned char z)
+;; {
+;; extern int aInt[];
+;; return aInt[3*z+2];
+;; }
+;;
+;; because the constant +4 then is added explicitely instead of consuming it
+;; with the aInt symbol. Therefore, we rely on insn combine which takes costs
+;; into account more accurately and doesn't do burte-force multiply-add/sub.
+;; The implementational effort is the same so we are fine with that approach.
+
+
+;; "*maddqihi4"
+;; "*umaddqihi4"
+(define_insn "*<extend_u>maddqihi4"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "<mul_r_d>"))
+ (any_extend:HI (match_operand:QI 2 "register_operand" "<mul_r_d>")))
+ (match_operand:HI 3 "register_operand" "0")))]
+
+ "AVR_HAVE_MUL"
+ "mul<extend_s> %1,%2
+ add %A0,r0
+ adc %B0,r1
+ clr __zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;; "*msubqihi4"
+;; "*umsubqihi4"
+(define_insn "*<extend_u>msubqihi4"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (minus:HI (match_operand:HI 3 "register_operand" "0")
+ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "<mul_r_d>"))
+ (any_extend:HI (match_operand:QI 2 "register_operand" "<mul_r_d>")))))]
+ "AVR_HAVE_MUL"
+ "mul<extend_s> %1,%2
+ sub %A0,r0
+ sbc %B0,r1
+ clr __zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;; "*usmaddqihi4"
+;; "*sumaddqihi4"
+(define_insn "*<any_extend:extend_su><any_extend2:extend_su>msubqihi4"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (any_extend2:HI (match_operand:QI 2 "register_operand" "a")))
+ (match_operand:HI 3 "register_operand" "0")))]
+ "AVR_HAVE_MUL
+ && reload_completed
+ && <any_extend:CODE> != <any_extend2:CODE>"
+ {
+ output_asm_insn (<any_extend:CODE> == SIGN_EXTEND
+ ? "mulsu %1,%2" : "mulsu %2,%1", operands);
+
+ return "add %A0,r0\;adc %B0,r1\;clr __zero_reg__";
+ }
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;; "*usmsubqihi4"
+;; "*sumsubqihi4"
+(define_insn "*<any_extend:extend_su><any_extend2:extend_su>msubqihi4"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (minus:HI (match_operand:HI 3 "register_operand" "0")
+ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (any_extend2:HI (match_operand:QI 2 "register_operand" "a")))))]
+ "AVR_HAVE_MUL
+ && reload_completed
+ && <any_extend:CODE> != <any_extend2:CODE>"
+ {
+ output_asm_insn (<any_extend:CODE> == SIGN_EXTEND
+ ? "mulsu %1,%2" : "mulsu %2,%1", operands);
+
+ return "sub %A0,r0\;sbc %B0,r1\;clr __zero_reg__";
+ }
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+;; Handle small constants
+
+;; Special case of a += 2*b as frequently seen with accesses to int arrays.
+;; This is shorter, faster than MUL and has lower register pressure.
+
+(define_insn_and_split "*umaddqihi4.2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
+ (const_int 2))
+ (match_operand:HI 2 "register_operand" "r")))]
+ "!reload_completed
+ && !reg_overlap_mentioned_p (operands[0], operands[1])"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (match_dup 0)
+ (match_dup 2))
+ ; *addhi3_zero_extend
+ (set (match_dup 0)
+ (plus:HI (zero_extend:HI (match_dup 1))
+ (match_dup 0)))
+ ; *addhi3_zero_extend
+ (set (match_dup 0)
+ (plus:HI (zero_extend:HI (match_dup 1))
+ (match_dup 0)))])
+
+;; "umaddqihi4.uconst"
+;; "maddqihi4.sconst"
+(define_insn_and_split "*<extend_u>maddqihi4.<extend_su>const"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "<mul_r_d>"))
+ (match_operand:HI 2 "<extend_su>8_operand" "n"))
+ (match_operand:HI 3 "register_operand" "0")))
+ (clobber (match_scratch:QI 4 "=&d"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4)
+ (match_dup 2))
+ ; *umaddqihi4 resp. *maddqihi4
+ (set (match_dup 0)
+ (plus:HI (mult:HI (any_extend:HI (match_dup 1))
+ (any_extend:HI (match_dup 4)))
+ (match_dup 3)))]
+ {
+ operands[2] = gen_int_mode (INTVAL (operands[2]), QImode);
+ })
+
+;; "*umsubqihi4.uconst"
+;; "*msubqihi4.sconst"
+(define_insn_and_split "*<extend_u>msubqihi4.<extend_su>const"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (minus:HI (match_operand:HI 3 "register_operand" "0")
+ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "<mul_r_d>"))
+ (match_operand:HI 2 "<extend_su>8_operand" "n"))))
+ (clobber (match_scratch:QI 4 "=&d"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4)
+ (match_dup 2))
+ ; *umsubqihi4 resp. *msubqihi4
+ (set (match_dup 0)
+ (minus:HI (match_dup 3)
+ (mult:HI (any_extend:HI (match_dup 1))
+ (any_extend:HI (match_dup 4)))))]
+ {
+ operands[2] = gen_int_mode (INTVAL (operands[2]), QImode);
+ })
+
+;; Same as the insn above, but combiner tries versions canonicalized to ASHIFT
+;; for MULT with power of 2 and skips trying MULT insn above.
+
+(define_insn_and_split "*umsubqihi4.uconst.ashift"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (minus:HI (match_operand:HI 3 "register_operand" "0")
+ (ashift:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:HI 2 "const_2_to_7_operand" "n"))))
+ (clobber (match_scratch:QI 4 "=&d"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4)
+ (match_dup 2))
+ ; *umsubqihi4
+ (set (match_dup 0)
+ (minus:HI (match_dup 3)
+ (mult:HI (zero_extend:HI (match_dup 1))
+ (zero_extend:HI (match_dup 4)))))]
+ {
+ operands[2] = gen_int_mode (1 << INTVAL (operands[2]), QImode);
+ })
+
+;; Same as the insn above, but combiner tries versions canonicalized to ASHIFT
+;; for MULT with power of 2 and skips trying MULT insn above. We omit 128
+;; because this would require an extra pattern for just one value.
+
+(define_insn_and_split "*msubqihi4.sconst.ashift"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (minus:HI (match_operand:HI 3 "register_operand" "0")
+ (ashift:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "d"))
+ (match_operand:HI 2 "const_1_to_6_operand" "M"))))
+ (clobber (match_scratch:QI 4 "=&d"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4)
+ (match_dup 2))
+ ; *smsubqihi4
+ (set (match_dup 0)
+ (minus:HI (match_dup 3)
+ (mult:HI (sign_extend:HI (match_dup 1))
+ (sign_extend:HI (match_dup 4)))))]
+ {
+ operands[2] = gen_int_mode (1 << INTVAL (operands[2]), QImode);
+ })
+
+;; For signed/unsigned combinations that require narrow constraint "a"
+;; just provide a pattern if signed/unsigned combination is actually needed.
+
+(define_insn_and_split "*sumaddqihi4.uconst"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (plus:HI (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (match_operand:HI 2 "u8_operand" "M"))
+ (match_operand:HI 3 "register_operand" "0")))
+ (clobber (match_scratch:QI 4 "=&a"))]
+ "AVR_HAVE_MUL
+ && !s8_operand (operands[2], VOIDmode)"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4)
+ (match_dup 2))
+ ; *sumaddqihi4
+ (set (match_dup 0)
+ (plus:HI (mult:HI (sign_extend:HI (match_dup 1))
+ (zero_extend:HI (match_dup 4)))
+ (match_dup 3)))]
+ {
+ operands[2] = gen_int_mode (INTVAL (operands[2]), QImode);
+ })
+
+(define_insn_and_split "*sumsubqihi4.uconst"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (minus:HI (match_operand:HI 3 "register_operand" "0")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (match_operand:HI 2 "u8_operand" "M"))))
+ (clobber (match_scratch:QI 4 "=&a"))]
+ "AVR_HAVE_MUL
+ && !s8_operand (operands[2], VOIDmode)"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4)
+ (match_dup 2))
+ ; *sumsubqihi4
+ (set (match_dup 0)
+ (minus:HI (match_dup 3)
+ (mult:HI (sign_extend:HI (match_dup 1))
+ (zero_extend:HI (match_dup 4)))))]
+ {
+ operands[2] = gen_int_mode (INTVAL (operands[2]), QImode);
+ })
+
+;******************************************************************************
+; mul HI: $1 = sign/zero-extend, $2 = small constant
+;******************************************************************************
+
+;; "*muluqihi3.uconst"
+;; "*mulsqihi3.sconst"
+(define_insn_and_split "*mul<extend_su>qihi3.<extend_su>const"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (any_extend:HI (match_operand:QI 1 "register_operand" "<mul_r_d>"))
+ (match_operand:HI 2 "<extend_su>8_operand" "n")))
+ (clobber (match_scratch:QI 3 "=&d"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3)
+ (match_dup 2))
+ ; umulqihi3 resp. mulqihi3
+ (set (match_dup 0)
+ (mult:HI (any_extend:HI (match_dup 1))
+ (any_extend:HI (match_dup 3))))]
+ {
+ operands[2] = gen_int_mode (INTVAL (operands[2]), QImode);
+ })
+
+(define_insn_and_split "*muluqihi3.sconst"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (match_operand:HI 2 "s8_operand" "n")))
+ (clobber (match_scratch:QI 3 "=&a"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3)
+ (match_dup 2))
+ ; usmulqihi3
+ (set (match_dup 0)
+ (mult:HI (zero_extend:HI (match_dup 1))
+ (sign_extend:HI (match_dup 3))))]
+ {
+ operands[2] = gen_int_mode (INTVAL (operands[2]), QImode);
+ })
+
+(define_insn_and_split "*mulsqihi3.uconst"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (match_operand:HI 2 "u8_operand" "M")))
+ (clobber (match_scratch:QI 3 "=&a"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3)
+ (match_dup 2))
+ ; usmulqihi3
+ (set (match_dup 0)
+ (mult:HI (zero_extend:HI (match_dup 3))
+ (sign_extend:HI (match_dup 1))))]
+ {
+ operands[2] = gen_int_mode (INTVAL (operands[2]), QImode);
+ })
+
+(define_insn_and_split "*mulsqihi3.oconst"
+ [(set (match_operand:HI 0 "register_operand" "=&r")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (match_operand:HI 2 "o8_operand" "n")))
+ (clobber (match_scratch:QI 3 "=&a"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3)
+ (match_dup 2))
+ ; *osmulqihi3
+ (set (match_dup 0)
+ (mult:HI (not:HI (zero_extend:HI (not:QI (match_dup 3))))
+ (sign_extend:HI (match_dup 1))))]
+ {
+ operands[2] = gen_int_mode (INTVAL (operands[2]), QImode);
+ })
+
+;; The EXTEND of $1 only appears in combine, we don't see it in expand so that
+;; expand decides to use ASHIFT instead of MUL because ASHIFT costs are cheaper
+;; at that time. Fix that.
+
+(define_insn "*ashiftqihi2.signx.1"
+ [(set (match_operand:HI 0 "register_operand" "=r,*r")
+ (ashift:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "0,r"))
+ (const_int 1)))]
+ ""
+ "@
+ lsl %A0\;sbc %B0,%B0
+ mov %A0,%1\;lsl %A0\;sbc %B0,%B0"
+ [(set_attr "length" "2,3")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "*ashifthi3.signx.const"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (ashift:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "d"))
+ (match_operand:HI 2 "const_2_to_6_operand" "I")))
+ (clobber (match_scratch:QI 3 "=&d"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3)
+ (match_dup 2))
+ ; mulqihi3
+ (set (match_dup 0)
+ (mult:HI (sign_extend:HI (match_dup 1))
+ (sign_extend:HI (match_dup 3))))]
+ {
+ operands[2] = GEN_INT (1 << INTVAL (operands[2]));
+ })
+
+(define_insn_and_split "*ashifthi3.signx.const7"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (ashift:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (const_int 7)))
+ (clobber (match_scratch:QI 2 "=&a"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 2)
+ (match_dup 3))
+ ; usmulqihi3
+ (set (match_dup 0)
+ (mult:HI (zero_extend:HI (match_dup 2))
+ (sign_extend:HI (match_dup 1))))]
+ {
+ operands[3] = gen_int_mode (1 << 7, QImode);
+ })
+
+(define_insn_and_split "*ashifthi3.zerox.const"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (ashift:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:HI 2 "const_2_to_7_operand" "I")))
+ (clobber (match_scratch:QI 3 "=&d"))]
+ "AVR_HAVE_MUL"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 3)
+ (match_dup 2))
+ ; umulqihi3
+ (set (match_dup 0)
+ (mult:HI (zero_extend:HI (match_dup 1))
+ (zero_extend:HI (match_dup 3))))]
+ {
+ operands[2] = gen_int_mode (1 << INTVAL (operands[2]), QImode);
+ })
+
+;******************************************************************************
+; mul HI: $1 = sign-/zero-/one-extend, $2 = reg
+;******************************************************************************
+
+(define_insn "mulsqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=&r")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "a"))
+ (match_operand:HI 2 "register_operand" "a")))]
+ "AVR_HAVE_MUL"
+ "mulsu %1,%A2
+ movw %0,r0
+ mul %1,%B2
+ add %B0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "5")
+ (set_attr "cc" "clobber")])
+
+(define_insn "muluqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=&r")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:HI 2 "register_operand" "r")))]
+ "AVR_HAVE_MUL"
+ "mul %1,%A2
+ movw %0,r0
+ mul %1,%B2
+ add %B0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "5")
+ (set_attr "cc" "clobber")])
+
+;; one-extend operand 1
+
+(define_insn "muloqihi3"
+ [(set (match_operand:HI 0 "register_operand" "=&r")
+ (mult:HI (not:HI (zero_extend:HI (not:QI (match_operand:QI 1 "register_operand" "r"))))
+ (match_operand:HI 2 "register_operand" "r")))]
+ "AVR_HAVE_MUL"
+ "mul %1,%A2
+ movw %0,r0
+ mul %1,%B2
+ add %B0,r0
+ sub %B0,%A2
+ clr __zero_reg__"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+;******************************************************************************
+
+(define_expand "mulhi3"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (mult:HI (match_operand:HI 1 "register_operand" "")
+ (match_operand:HI 2 "register_or_s9_operand" "")))]
+ ""
+ {
+ if (!AVR_HAVE_MUL)
+ {
+ if (!register_operand (operands[2], HImode))
+ operands[2] = force_reg (HImode, operands[2]);
+
+ emit_insn (gen_mulhi3_call (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+
+ /* For small constants we can do better by extending them on the fly.
+ The constant can be loaded in one instruction and the widening
+ multiplication is shorter. First try the unsigned variant because it
+ allows constraint "d" instead of "a" for the signed version. */
+
+ if (s9_operand (operands[2], HImode))
+ {
+ rtx reg = force_reg (QImode, gen_int_mode (INTVAL (operands[2]), QImode));
+
+ if (u8_operand (operands[2], HImode))
+ {
+ emit_insn (gen_muluqihi3 (operands[0], reg, operands[1]));
+ }
+ else if (s8_operand (operands[2], HImode))
+ {
+ emit_insn (gen_mulsqihi3 (operands[0], reg, operands[1]));
+ }
+ else
+ {
+ emit_insn (gen_muloqihi3 (operands[0], reg, operands[1]));
+ }
+
+ DONE;
+ }
+
+ if (!register_operand (operands[2], HImode))
+ operands[2] = force_reg (HImode, operands[2]);
+ })
+
+(define_insn "*mulhi3_enh"
+ [(set (match_operand:HI 0 "register_operand" "=&r")
+ (mult:HI (match_operand:HI 1 "register_operand" "r")
+ (match_operand:HI 2 "register_operand" "r")))]
+ "AVR_HAVE_MUL"
+ {
+ return REGNO (operands[1]) == REGNO (operands[2])
+ ? "mul %A1,%A1\;movw %0,r0\;mul %A1,%B1\;add %B0,r0\;add %B0,r0\;clr r1"
+ : "mul %A1,%A2\;movw %0,r0\;mul %A1,%B2\;add %B0,r0\;mul %B1,%A2\;add %B0,r0\;clr r1";
+ }
+ [(set_attr "length" "7")
+ (set_attr "cc" "clobber")])
+
+(define_expand "mulhi3_call"
+ [(set (reg:HI 24) (match_operand:HI 1 "register_operand" ""))
+ (set (reg:HI 22) (match_operand:HI 2 "register_operand" ""))
+ (parallel [(set (reg:HI 24) (mult:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 22))
+ (clobber (reg:QI 21))])
+ (set (match_operand:HI 0 "register_operand" "") (reg:HI 24))])
+
+(define_insn "*mulhi3_call"
+ [(set (reg:HI 24) (mult:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 22))
+ (clobber (reg:QI 21))]
+ "!AVR_HAVE_MUL"
+ "%~call __mulhi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; To support widening multiplication with constant we postpone
+;; expanding to the implicit library call until post combine and
+;; prior to register allocation. Clobber all hard registers that
+;; might be used by the (widening) multiply until it is split and
+;; it's final register footprint is worked out.
+
+(define_expand "mulsi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "nonmemory_operand" "")))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))])]
+ "AVR_HAVE_MUL"
+ {
+ if (u16_operand (operands[2], SImode))
+ {
+ operands[2] = force_reg (HImode, gen_int_mode (INTVAL (operands[2]), HImode));
+ emit_insn (gen_muluhisi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ if (o16_operand (operands[2], SImode))
+ {
+ operands[2] = force_reg (HImode, gen_int_mode (INTVAL (operands[2]), HImode));
+ emit_insn (gen_mulohisi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+ })
+
+(define_insn_and_split "*mulsi3"
+ [(set (match_operand:SI 0 "pseudo_register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "pseudo_register_operand" "r")
+ (match_operand:SI 2 "pseudo_register_or_const_int_operand" "rn")))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))]
+ "AVR_HAVE_MUL && !reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (reg:SI 18)
+ (match_dup 1))
+ (set (reg:SI 22)
+ (match_dup 2))
+ (parallel [(set (reg:SI 22)
+ (mult:SI (reg:SI 22)
+ (reg:SI 18)))
+ (clobber (reg:HI 26))])
+ (set (match_dup 0)
+ (reg:SI 22))]
+ {
+ if (u16_operand (operands[2], SImode))
+ {
+ operands[2] = force_reg (HImode, gen_int_mode (INTVAL (operands[2]), HImode));
+ emit_insn (gen_muluhisi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+
+ if (o16_operand (operands[2], SImode))
+ {
+ operands[2] = force_reg (HImode, gen_int_mode (INTVAL (operands[2]), HImode));
+ emit_insn (gen_mulohisi3 (operands[0], operands[2], operands[1]));
+ DONE;
+ }
+ })
+
+;; "muluqisi3"
+;; "muluhisi3"
+(define_insn_and_split "mulu<mode>si3"
+ [(set (match_operand:SI 0 "pseudo_register_operand" "=r")
+ (mult:SI (zero_extend:SI (match_operand:QIHI 1 "pseudo_register_operand" "r"))
+ (match_operand:SI 2 "pseudo_register_or_const_int_operand" "rn")))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))]
+ "AVR_HAVE_MUL && !reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (reg:HI 26)
+ (match_dup 1))
+ (set (reg:SI 18)
+ (match_dup 2))
+ (set (reg:SI 22)
+ (mult:SI (zero_extend:SI (reg:HI 26))
+ (reg:SI 18)))
+ (set (match_dup 0)
+ (reg:SI 22))]
+ {
+ /* Do the QI -> HI extension explicitely before the multiplication. */
+ /* Do the HI -> SI extension implicitely and after the multiplication. */
+
+ if (QImode == <MODE>mode)
+ operands[1] = gen_rtx_ZERO_EXTEND (HImode, operands[1]);
+
+ if (u16_operand (operands[2], SImode))
+ {
+ operands[1] = force_reg (HImode, operands[1]);
+ operands[2] = force_reg (HImode, gen_int_mode (INTVAL (operands[2]), HImode));
+ emit_insn (gen_umulhisi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ })
+
+;; "mulsqisi3"
+;; "mulshisi3"
+(define_insn_and_split "muls<mode>si3"
+ [(set (match_operand:SI 0 "pseudo_register_operand" "=r")
+ (mult:SI (sign_extend:SI (match_operand:QIHI 1 "pseudo_register_operand" "r"))
+ (match_operand:SI 2 "pseudo_register_or_const_int_operand" "rn")))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))]
+ "AVR_HAVE_MUL && !reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (reg:HI 26)
+ (match_dup 1))
+ (set (reg:SI 18)
+ (match_dup 2))
+ (set (reg:SI 22)
+ (mult:SI (sign_extend:SI (reg:HI 26))
+ (reg:SI 18)))
+ (set (match_dup 0)
+ (reg:SI 22))]
+ {
+ /* Do the QI -> HI extension explicitely before the multiplication. */
+ /* Do the HI -> SI extension implicitely and after the multiplication. */
+
+ if (QImode == <MODE>mode)
+ operands[1] = gen_rtx_SIGN_EXTEND (HImode, operands[1]);
+
+ if (u16_operand (operands[2], SImode)
+ || s16_operand (operands[2], SImode))
+ {
+ rtx xop2 = force_reg (HImode, gen_int_mode (INTVAL (operands[2]), HImode));
+
+ operands[1] = force_reg (HImode, operands[1]);
+
+ if (u16_operand (operands[2], SImode))
+ emit_insn (gen_usmulhisi3 (operands[0], xop2, operands[1]));
+ else
+ emit_insn (gen_mulhisi3 (operands[0], operands[1], xop2));
+
+ DONE;
+ }
+ })
+
+;; One-extend operand 1
+
+(define_insn_and_split "mulohisi3"
+ [(set (match_operand:SI 0 "pseudo_register_operand" "=r")
+ (mult:SI (not:SI (zero_extend:SI
+ (not:HI (match_operand:HI 1 "pseudo_register_operand" "r"))))
+ (match_operand:SI 2 "pseudo_register_or_const_int_operand" "rn")))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))]
+ "AVR_HAVE_MUL && !reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (reg:HI 26)
+ (match_dup 1))
+ (set (reg:SI 18)
+ (match_dup 2))
+ (set (reg:SI 22)
+ (mult:SI (not:SI (zero_extend:SI (not:HI (reg:HI 26))))
+ (reg:SI 18)))
+ (set (match_dup 0)
+ (reg:SI 22))])
+
+;; "mulhisi3"
+;; "umulhisi3"
+(define_expand "<extend_u>mulhisi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (any_extend:SI (match_operand:HI 1 "register_operand" ""))
+ (any_extend:SI (match_operand:HI 2 "register_operand" ""))))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))])]
+ "AVR_HAVE_MUL")
+
+(define_expand "usmulhisi3"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (zero_extend:SI (match_operand:HI 1 "register_operand" ""))
+ (sign_extend:SI (match_operand:HI 2 "register_operand" ""))))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))])]
+ "AVR_HAVE_MUL")
+
+;; "*uumulqihisi3" "*uumulhiqisi3" "*uumulhihisi3" "*uumulqiqisi3"
+;; "*usmulqihisi3" "*usmulhiqisi3" "*usmulhihisi3" "*usmulqiqisi3"
+;; "*sumulqihisi3" "*sumulhiqisi3" "*sumulhihisi3" "*sumulqiqisi3"
+;; "*ssmulqihisi3" "*ssmulhiqisi3" "*ssmulhihisi3" "*ssmulqiqisi3"
+(define_insn_and_split
+ "*<any_extend:extend_su><any_extend2:extend_su>mul<QIHI:mode><QIHI2:mode>si3"
+ [(set (match_operand:SI 0 "pseudo_register_operand" "=r")
+ (mult:SI (any_extend:SI (match_operand:QIHI 1 "pseudo_register_operand" "r"))
+ (any_extend2:SI (match_operand:QIHI2 2 "pseudo_register_operand" "r"))))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))]
+ "AVR_HAVE_MUL && !reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (reg:HI 18)
+ (match_dup 1))
+ (set (reg:HI 26)
+ (match_dup 2))
+ (set (reg:SI 22)
+ (mult:SI (match_dup 3)
+ (match_dup 4)))
+ (set (match_dup 0)
+ (reg:SI 22))]
+ {
+ rtx xop1 = operands[1];
+ rtx xop2 = operands[2];
+
+ /* Do the QI -> HI extension explicitely before the multiplication. */
+ /* Do the HI -> SI extension implicitely and after the multiplication. */
+
+ if (QImode == <QIHI:MODE>mode)
+ xop1 = gen_rtx_fmt_e (<any_extend:CODE>, HImode, xop1);
+
+ if (QImode == <QIHI2:MODE>mode)
+ xop2 = gen_rtx_fmt_e (<any_extend2:CODE>, HImode, xop2);
+
+ if (<any_extend:CODE> == <any_extend2:CODE>
+ || <any_extend:CODE> == ZERO_EXTEND)
+ {
+ operands[1] = xop1;
+ operands[2] = xop2;
+ operands[3] = gen_rtx_fmt_e (<any_extend:CODE>, SImode, gen_rtx_REG (HImode, 18));
+ operands[4] = gen_rtx_fmt_e (<any_extend2:CODE>, SImode, gen_rtx_REG (HImode, 26));
+ }
+ else
+ {
+ /* <any_extend:CODE> = SIGN_EXTEND */
+ /* <any_extend2:CODE> = ZERO_EXTEND */
+
+ operands[1] = xop2;
+ operands[2] = xop1;
+ operands[3] = gen_rtx_ZERO_EXTEND (SImode, gen_rtx_REG (HImode, 18));
+ operands[4] = gen_rtx_SIGN_EXTEND (SImode, gen_rtx_REG (HImode, 26));
+ }
+ })
+
+;; "smulhi3_highpart"
+;; "umulhi3_highpart"
+(define_expand "<extend_su>mulhi3_highpart"
+ [(set (reg:HI 18)
+ (match_operand:HI 1 "nonmemory_operand" ""))
+ (set (reg:HI 26)
+ (match_operand:HI 2 "nonmemory_operand" ""))
+ (parallel [(set (reg:HI 24)
+ (truncate:HI (lshiftrt:SI (mult:SI (any_extend:SI (reg:HI 18))
+ (any_extend:SI (reg:HI 26)))
+ (const_int 16))))
+ (clobber (reg:HI 22))])
+ (set (match_operand:HI 0 "register_operand" "")
+ (reg:HI 24))]
+ "AVR_HAVE_MUL")
+
+
+(define_insn "*mulsi3_call"
+ [(set (reg:SI 22)
+ (mult:SI (reg:SI 22)
+ (reg:SI 18)))
+ (clobber (reg:HI 26))]
+ "AVR_HAVE_MUL"
+ "%~call __mulsi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; "*mulhisi3_call"
+;; "*umulhisi3_call"
+(define_insn "*<extend_u>mulhisi3_call"
+ [(set (reg:SI 22)
+ (mult:SI (any_extend:SI (reg:HI 18))
+ (any_extend:SI (reg:HI 26))))]
+ "AVR_HAVE_MUL"
+ "%~call __<extend_u>mulhisi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; "*umulhi3_highpart_call"
+;; "*smulhi3_highpart_call"
+(define_insn "*<extend_su>mulhi3_highpart_call"
+ [(set (reg:HI 24)
+ (truncate:HI (lshiftrt:SI (mult:SI (any_extend:SI (reg:HI 18))
+ (any_extend:SI (reg:HI 26)))
+ (const_int 16))))
+ (clobber (reg:HI 22))]
+ "AVR_HAVE_MUL"
+ "%~call __<extend_u>mulhisi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*usmulhisi3_call"
+ [(set (reg:SI 22)
+ (mult:SI (zero_extend:SI (reg:HI 18))
+ (sign_extend:SI (reg:HI 26))))]
+ "AVR_HAVE_MUL"
+ "%~call __usmulhisi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*mul<extend_su>hisi3_call"
+ [(set (reg:SI 22)
+ (mult:SI (any_extend:SI (reg:HI 26))
+ (reg:SI 18)))]
+ "AVR_HAVE_MUL"
+ "%~call __mul<extend_su>hisi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*mulohisi3_call"
+ [(set (reg:SI 22)
+ (mult:SI (not:SI (zero_extend:SI (not:HI (reg:HI 26))))
+ (reg:SI 18)))]
+ "AVR_HAVE_MUL"
+ "%~call __mulohisi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+; / % / % / % / % / % / % / % / % / % / % / % / % / % / % / % / % / % / % / %
+; divmod
+
+;; Generate lib1funcs.S calls ourselves, because:
+;; - we know exactly which registers are clobbered (for QI and HI
+;; modes, some of the call-used registers are preserved)
+;; - we get both the quotient and the remainder at no extra cost
+;; - we split the patterns only after the first CSE passes because
+;; CSE has problems to operate on hard regs.
+;;
+(define_insn_and_split "divmodqi4"
+ [(parallel [(set (match_operand:QI 0 "pseudo_register_operand" "")
+ (div:QI (match_operand:QI 1 "pseudo_register_operand" "")
+ (match_operand:QI 2 "pseudo_register_operand" "")))
+ (set (match_operand:QI 3 "pseudo_register_operand" "")
+ (mod:QI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 23))
+ (clobber (reg:QI 24))
+ (clobber (reg:QI 25))])]
+ ""
+ "this divmodqi4 pattern should have been splitted;"
+ ""
+ [(set (reg:QI 24) (match_dup 1))
+ (set (reg:QI 22) (match_dup 2))
+ (parallel [(set (reg:QI 24) (div:QI (reg:QI 24) (reg:QI 22)))
+ (set (reg:QI 25) (mod:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 23))])
+ (set (match_dup 0) (reg:QI 24))
+ (set (match_dup 3) (reg:QI 25))])
+
+(define_insn "*divmodqi4_call"
+ [(set (reg:QI 24) (div:QI (reg:QI 24) (reg:QI 22)))
+ (set (reg:QI 25) (mod:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 23))]
+ ""
+ "%~call __divmodqi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "udivmodqi4"
+ [(parallel [(set (match_operand:QI 0 "pseudo_register_operand" "")
+ (udiv:QI (match_operand:QI 1 "pseudo_register_operand" "")
+ (match_operand:QI 2 "pseudo_register_operand" "")))
+ (set (match_operand:QI 3 "pseudo_register_operand" "")
+ (umod:QI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 23))
+ (clobber (reg:QI 24))
+ (clobber (reg:QI 25))])]
+ ""
+ "this udivmodqi4 pattern should have been splitted;"
+ ""
+ [(set (reg:QI 24) (match_dup 1))
+ (set (reg:QI 22) (match_dup 2))
+ (parallel [(set (reg:QI 24) (udiv:QI (reg:QI 24) (reg:QI 22)))
+ (set (reg:QI 25) (umod:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 23))])
+ (set (match_dup 0) (reg:QI 24))
+ (set (match_dup 3) (reg:QI 25))])
+
+(define_insn "*udivmodqi4_call"
+ [(set (reg:QI 24) (udiv:QI (reg:QI 24) (reg:QI 22)))
+ (set (reg:QI 25) (umod:QI (reg:QI 24) (reg:QI 22)))
+ (clobber (reg:QI 23))]
+ ""
+ "%~call __udivmodqi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "divmodhi4"
+ [(parallel [(set (match_operand:HI 0 "pseudo_register_operand" "")
+ (div:HI (match_operand:HI 1 "pseudo_register_operand" "")
+ (match_operand:HI 2 "pseudo_register_operand" "")))
+ (set (match_operand:HI 3 "pseudo_register_operand" "")
+ (mod:HI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI 22))
+ (clobber (reg:HI 24))
+ (clobber (reg:HI 26))])]
+ ""
+ "this should have been splitted;"
+ ""
+ [(set (reg:HI 24) (match_dup 1))
+ (set (reg:HI 22) (match_dup 2))
+ (parallel [(set (reg:HI 22) (div:HI (reg:HI 24) (reg:HI 22)))
+ (set (reg:HI 24) (mod:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))])
+ (set (match_dup 0) (reg:HI 22))
+ (set (match_dup 3) (reg:HI 24))])
+
+(define_insn "*divmodhi4_call"
+ [(set (reg:HI 22) (div:HI (reg:HI 24) (reg:HI 22)))
+ (set (reg:HI 24) (mod:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))]
+ ""
+ "%~call __divmodhi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "udivmodhi4"
+ [(parallel [(set (match_operand:HI 0 "pseudo_register_operand" "")
+ (udiv:HI (match_operand:HI 1 "pseudo_register_operand" "")
+ (match_operand:HI 2 "pseudo_register_operand" "")))
+ (set (match_operand:HI 3 "pseudo_register_operand" "")
+ (umod:HI (match_dup 1) (match_dup 2)))
+ (clobber (reg:QI 21))
+ (clobber (reg:HI 22))
+ (clobber (reg:HI 24))
+ (clobber (reg:HI 26))])]
+ ""
+ "this udivmodhi4 pattern should have been splitted.;"
+ ""
+ [(set (reg:HI 24) (match_dup 1))
+ (set (reg:HI 22) (match_dup 2))
+ (parallel [(set (reg:HI 22) (udiv:HI (reg:HI 24) (reg:HI 22)))
+ (set (reg:HI 24) (umod:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))])
+ (set (match_dup 0) (reg:HI 22))
+ (set (match_dup 3) (reg:HI 24))])
+
+(define_insn "*udivmodhi4_call"
+ [(set (reg:HI 22) (udiv:HI (reg:HI 24) (reg:HI 22)))
+ (set (reg:HI 24) (umod:HI (reg:HI 24) (reg:HI 22)))
+ (clobber (reg:HI 26))
+ (clobber (reg:QI 21))]
+ ""
+ "%~call __udivmodhi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; 24-bit multiply
+
+;; To support widening multiplication with constant we postpone
+;; expanding to the implicit library call until post combine and
+;; prior to register allocation. Clobber all hard registers that
+;; might be used by the (widening) multiply until it is split and
+;; it's final register footprint is worked out.
+
+(define_expand "mulpsi3"
+ [(parallel [(set (match_operand:PSI 0 "register_operand" "")
+ (mult:PSI (match_operand:PSI 1 "register_operand" "")
+ (match_operand:PSI 2 "nonmemory_operand" "")))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))])]
+ "AVR_HAVE_MUL"
+ {
+ if (s8_operand (operands[2], PSImode))
+ {
+ rtx reg = force_reg (QImode, gen_int_mode (INTVAL (operands[2]), QImode));
+ emit_insn (gen_mulsqipsi3 (operands[0], reg, operands[1]));
+ DONE;
+ }
+ })
+
+(define_insn "*umulqihipsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=&r")
+ (mult:PSI (zero_extend:PSI (match_operand:QI 1 "register_operand" "r"))
+ (zero_extend:PSI (match_operand:HI 2 "register_operand" "r"))))]
+ "AVR_HAVE_MUL"
+ "mul %1,%A2
+ movw %A0,r0
+ mul %1,%B2
+ clr %C0
+ add %B0,r0
+ adc %C0,r1
+ clr __zero_reg__"
+ [(set_attr "length" "7")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*umulhiqipsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=&r")
+ (mult:PSI (zero_extend:PSI (match_operand:HI 2 "register_operand" "r"))
+ (zero_extend:PSI (match_operand:QI 1 "register_operand" "r"))))]
+ "AVR_HAVE_MUL"
+ "mul %1,%A2
+ movw %A0,r0
+ mul %1,%B2
+ add %B0,r0
+ mov %C0,r1
+ clr __zero_reg__
+ adc %C0,__zero_reg__"
+ [(set_attr "length" "7")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "mulsqipsi3"
+ [(set (match_operand:PSI 0 "pseudo_register_operand" "=r")
+ (mult:PSI (sign_extend:PSI (match_operand:QI 1 "pseudo_register_operand" "r"))
+ (match_operand:PSI 2 "pseudo_register_or_const_int_operand" "rn")))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))]
+ "AVR_HAVE_MUL && !reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (reg:QI 25)
+ (match_dup 1))
+ (set (reg:PSI 22)
+ (match_dup 2))
+ (set (reg:PSI 18)
+ (mult:PSI (sign_extend:PSI (reg:QI 25))
+ (reg:PSI 22)))
+ (set (match_dup 0)
+ (reg:PSI 18))])
+
+(define_insn_and_split "*mulpsi3"
+ [(set (match_operand:PSI 0 "pseudo_register_operand" "=r")
+ (mult:PSI (match_operand:PSI 1 "pseudo_register_operand" "r")
+ (match_operand:PSI 2 "pseudo_register_or_const_int_operand" "rn")))
+ (clobber (reg:HI 26))
+ (clobber (reg:DI 18))]
+ "AVR_HAVE_MUL && !reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (reg:PSI 18)
+ (match_dup 1))
+ (set (reg:PSI 22)
+ (match_dup 2))
+ (parallel [(set (reg:PSI 22)
+ (mult:PSI (reg:PSI 22)
+ (reg:PSI 18)))
+ (clobber (reg:QI 21))
+ (clobber (reg:QI 25))
+ (clobber (reg:HI 26))])
+ (set (match_dup 0)
+ (reg:PSI 22))]
+ {
+ if (s8_operand (operands[2], PSImode))
+ {
+ rtx reg = force_reg (QImode, gen_int_mode (INTVAL (operands[2]), QImode));
+ emit_insn (gen_mulsqipsi3 (operands[0], reg, operands[1]));
+ DONE;
+ }
+ })
+
+(define_insn "*mulsqipsi3.libgcc"
+ [(set (reg:PSI 18)
+ (mult:PSI (sign_extend:PSI (reg:QI 25))
+ (reg:PSI 22)))]
+ "AVR_HAVE_MUL"
+ "%~call __mulsqipsi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*mulpsi3.libgcc"
+ [(set (reg:PSI 22)
+ (mult:PSI (reg:PSI 22)
+ (reg:PSI 18)))
+ (clobber (reg:QI 21))
+ (clobber (reg:QI 25))
+ (clobber (reg:HI 26))]
+ "AVR_HAVE_MUL"
+ "%~call __mulpsi3"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;; 24-bit signed/unsigned division and modulo.
+;; Notice that the libgcc implementation return the quotient in R22
+;; and the remainder in R18 whereas the 32-bit [u]divmodsi4
+;; implementation works the other way round.
+
+(define_insn_and_split "divmodpsi4"
+ [(parallel [(set (match_operand:PSI 0 "pseudo_register_operand" "")
+ (div:PSI (match_operand:PSI 1 "pseudo_register_operand" "")
+ (match_operand:PSI 2 "pseudo_register_operand" "")))
+ (set (match_operand:PSI 3 "pseudo_register_operand" "")
+ (mod:PSI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:DI 18))
+ (clobber (reg:QI 26))])]
+ ""
+ { gcc_unreachable(); }
+ ""
+ [(set (reg:PSI 22) (match_dup 1))
+ (set (reg:PSI 18) (match_dup 2))
+ (parallel [(set (reg:PSI 22) (div:PSI (reg:PSI 22) (reg:PSI 18)))
+ (set (reg:PSI 18) (mod:PSI (reg:PSI 22) (reg:PSI 18)))
+ (clobber (reg:QI 21))
+ (clobber (reg:QI 25))
+ (clobber (reg:QI 26))])
+ (set (match_dup 0) (reg:PSI 22))
+ (set (match_dup 3) (reg:PSI 18))])
+
+(define_insn "*divmodpsi4_call"
+ [(set (reg:PSI 22) (div:PSI (reg:PSI 22) (reg:PSI 18)))
+ (set (reg:PSI 18) (mod:PSI (reg:PSI 22) (reg:PSI 18)))
+ (clobber (reg:QI 21))
+ (clobber (reg:QI 25))
+ (clobber (reg:QI 26))]
+ ""
+ "%~call __divmodpsi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "udivmodpsi4"
+ [(parallel [(set (match_operand:PSI 0 "pseudo_register_operand" "")
+ (udiv:PSI (match_operand:PSI 1 "pseudo_register_operand" "")
+ (match_operand:PSI 2 "pseudo_register_operand" "")))
+ (set (match_operand:PSI 3 "pseudo_register_operand" "")
+ (umod:PSI (match_dup 1)
+ (match_dup 2)))
+ (clobber (reg:DI 18))
+ (clobber (reg:QI 26))])]
+ ""
+ { gcc_unreachable(); }
+ ""
+ [(set (reg:PSI 22) (match_dup 1))
+ (set (reg:PSI 18) (match_dup 2))
+ (parallel [(set (reg:PSI 22) (udiv:PSI (reg:PSI 22) (reg:PSI 18)))
+ (set (reg:PSI 18) (umod:PSI (reg:PSI 22) (reg:PSI 18)))
+ (clobber (reg:QI 21))
+ (clobber (reg:QI 25))
+ (clobber (reg:QI 26))])
+ (set (match_dup 0) (reg:PSI 22))
+ (set (match_dup 3) (reg:PSI 18))])
+
+(define_insn "*udivmodpsi4_call"
+ [(set (reg:PSI 22) (udiv:PSI (reg:PSI 22) (reg:PSI 18)))
+ (set (reg:PSI 18) (umod:PSI (reg:PSI 22) (reg:PSI 18)))
+ (clobber (reg:QI 21))
+ (clobber (reg:QI 25))
+ (clobber (reg:QI 26))]
+ ""
+ "%~call __udivmodpsi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+
+(define_insn_and_split "divmodsi4"
+ [(parallel [(set (match_operand:SI 0 "pseudo_register_operand" "")
+ (div:SI (match_operand:SI 1 "pseudo_register_operand" "")
+ (match_operand:SI 2 "pseudo_register_operand" "")))
+ (set (match_operand:SI 3 "pseudo_register_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:SI 18))
+ (clobber (reg:SI 22))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])]
+ ""
+ "this divmodsi4 pattern should have been splitted;"
+ ""
+ [(set (reg:SI 22) (match_dup 1))
+ (set (reg:SI 18) (match_dup 2))
+ (parallel [(set (reg:SI 18) (div:SI (reg:SI 22) (reg:SI 18)))
+ (set (reg:SI 22) (mod:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])
+ (set (match_dup 0) (reg:SI 18))
+ (set (match_dup 3) (reg:SI 22))])
+
+(define_insn "*divmodsi4_call"
+ [(set (reg:SI 18) (div:SI (reg:SI 22) (reg:SI 18)))
+ (set (reg:SI 22) (mod:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))]
+ ""
+ "%~call __divmodsi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "udivmodsi4"
+ [(parallel [(set (match_operand:SI 0 "pseudo_register_operand" "")
+ (udiv:SI (match_operand:SI 1 "pseudo_register_operand" "")
+ (match_operand:SI 2 "pseudo_register_operand" "")))
+ (set (match_operand:SI 3 "pseudo_register_operand" "")
+ (umod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:SI 18))
+ (clobber (reg:SI 22))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])]
+ ""
+ "this udivmodsi4 pattern should have been splitted;"
+ ""
+ [(set (reg:SI 22) (match_dup 1))
+ (set (reg:SI 18) (match_dup 2))
+ (parallel [(set (reg:SI 18) (udiv:SI (reg:SI 22) (reg:SI 18)))
+ (set (reg:SI 22) (umod:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))])
+ (set (match_dup 0) (reg:SI 18))
+ (set (match_dup 3) (reg:SI 22))])
+
+(define_insn "*udivmodsi4_call"
+ [(set (reg:SI 18) (udiv:SI (reg:SI 22) (reg:SI 18)))
+ (set (reg:SI 22) (umod:SI (reg:SI 22) (reg:SI 18)))
+ (clobber (reg:HI 26))
+ (clobber (reg:HI 30))]
+ ""
+ "%~call __udivmodsi4"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
+; and
+
+(define_insn "andqi3"
+ [(set (match_operand:QI 0 "register_operand" "=??r,d")
+ (and:QI (match_operand:QI 1 "register_operand" "%0,0")
+ (match_operand:QI 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ and %0,%2
+ andi %0,lo8(%2)"
+ [(set_attr "length" "1,1")
+ (set_attr "cc" "set_zn,set_zn")])
+
+(define_insn "andhi3"
+ [(set (match_operand:HI 0 "register_operand" "=??r,d,d,r ,r")
+ (and:HI (match_operand:HI 1 "register_operand" "%0,0,0,0 ,0")
+ (match_operand:HI 2 "nonmemory_operand" "r,s,n,Ca2,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X ,&d"))]
+ ""
+ {
+ if (which_alternative == 0)
+ return "and %A0,%A2\;and %B0,%B2";
+ else if (which_alternative == 1)
+ return "andi %A0,lo8(%2)\;andi %B0,hi8(%2)";
+
+ return avr_out_bitop (insn, operands, NULL);
+ }
+ [(set_attr "length" "2,2,2,4,4")
+ (set_attr "adjust_len" "*,*,out_bitop,out_bitop,out_bitop")
+ (set_attr "cc" "set_n,set_n,clobber,clobber,clobber")])
+
+(define_insn "andpsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=??r,d,r ,r")
+ (and:PSI (match_operand:PSI 1 "register_operand" "%0,0,0 ,0")
+ (match_operand:PSI 2 "nonmemory_operand" "r,n,Ca3,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X ,&d"))]
+ ""
+ {
+ if (which_alternative == 0)
+ return "and %A0,%A2" CR_TAB
+ "and %B0,%B2" CR_TAB
+ "and %C0,%C2";
+
+ return avr_out_bitop (insn, operands, NULL);
+ }
+ [(set_attr "length" "3,3,6,6")
+ (set_attr "adjust_len" "*,out_bitop,out_bitop,out_bitop")
+ (set_attr "cc" "set_n,clobber,clobber,clobber")])
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=??r,d,r ,r")
+ (and:SI (match_operand:SI 1 "register_operand" "%0,0,0 ,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,n,Ca4,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X ,&d"))]
+ ""
+ {
+ if (which_alternative == 0)
+ return "and %0,%2" CR_TAB
+ "and %B0,%B2" CR_TAB
+ "and %C0,%C2" CR_TAB
+ "and %D0,%D2";
+
+ return avr_out_bitop (insn, operands, NULL);
+ }
+ [(set_attr "length" "4,4,8,8")
+ (set_attr "adjust_len" "*,out_bitop,out_bitop,out_bitop")
+ (set_attr "cc" "set_n,clobber,clobber,clobber")])
+
+(define_peephole2 ; andi
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (and:QI (match_dup 0)
+ (match_operand:QI 1 "const_int_operand" "")))
+ (set (match_dup 0)
+ (and:QI (match_dup 0)
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(set (match_dup 0) (and:QI (match_dup 0) (match_dup 1)))]
+ {
+ operands[1] = GEN_INT (INTVAL (operands[1]) & INTVAL (operands[2]));
+ })
+
+;;|||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||
+;; ior
+
+(define_insn "iorqi3"
+ [(set (match_operand:QI 0 "register_operand" "=??r,d")
+ (ior:QI (match_operand:QI 1 "register_operand" "%0,0")
+ (match_operand:QI 2 "nonmemory_operand" "r,i")))]
+ ""
+ "@
+ or %0,%2
+ ori %0,lo8(%2)"
+ [(set_attr "length" "1,1")
+ (set_attr "cc" "set_zn,set_zn")])
+
+(define_insn "iorhi3"
+ [(set (match_operand:HI 0 "register_operand" "=??r,d,d,r ,r")
+ (ior:HI (match_operand:HI 1 "register_operand" "%0,0,0,0 ,0")
+ (match_operand:HI 2 "nonmemory_operand" "r,s,n,Co2,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X ,&d"))]
+ ""
+ {
+ if (which_alternative == 0)
+ return "or %A0,%A2\;or %B0,%B2";
+ else if (which_alternative == 1)
+ return "ori %A0,lo8(%2)\;ori %B0,hi8(%2)";
+
+ return avr_out_bitop (insn, operands, NULL);
+ }
+ [(set_attr "length" "2,2,2,4,4")
+ (set_attr "adjust_len" "*,*,out_bitop,out_bitop,out_bitop")
+ (set_attr "cc" "set_n,set_n,clobber,clobber,clobber")])
+
+(define_insn "iorpsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=??r,d,r ,r")
+ (ior:PSI (match_operand:PSI 1 "register_operand" "%0,0,0 ,0")
+ (match_operand:PSI 2 "nonmemory_operand" "r,n,Co3,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X ,&d"))]
+ ""
+ {
+ if (which_alternative == 0)
+ return "or %A0,%A2" CR_TAB
+ "or %B0,%B2" CR_TAB
+ "or %C0,%C2";
+
+ return avr_out_bitop (insn, operands, NULL);
+ }
+ [(set_attr "length" "3,3,6,6")
+ (set_attr "adjust_len" "*,out_bitop,out_bitop,out_bitop")
+ (set_attr "cc" "set_n,clobber,clobber,clobber")])
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=??r,d,r ,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "%0,0,0 ,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,n,Co4,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X ,&d"))]
+ ""
+ {
+ if (which_alternative == 0)
+ return "or %0,%2" CR_TAB
+ "or %B0,%B2" CR_TAB
+ "or %C0,%C2" CR_TAB
+ "or %D0,%D2";
+
+ return avr_out_bitop (insn, operands, NULL);
+ }
+ [(set_attr "length" "4,4,8,8")
+ (set_attr "adjust_len" "*,out_bitop,out_bitop,out_bitop")
+ (set_attr "cc" "set_n,clobber,clobber,clobber")])
+
+;;^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+;; xor
+
+(define_insn "xorqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (xor:QI (match_operand:QI 1 "register_operand" "%0")
+ (match_operand:QI 2 "register_operand" "r")))]
+ ""
+ "eor %0,%2"
+ [(set_attr "length" "1")
+ (set_attr "cc" "set_zn")])
+
+(define_insn "xorhi3"
+ [(set (match_operand:HI 0 "register_operand" "=??r,r ,r")
+ (xor:HI (match_operand:HI 1 "register_operand" "%0,0 ,0")
+ (match_operand:HI 2 "nonmemory_operand" "r,Cx2,n")))
+ (clobber (match_scratch:QI 3 "=X,X ,&d"))]
+ ""
+ {
+ if (which_alternative == 0)
+ return "eor %A0,%A2\;eor %B0,%B2";
+
+ return avr_out_bitop (insn, operands, NULL);
+ }
+ [(set_attr "length" "2,2,4")
+ (set_attr "adjust_len" "*,out_bitop,out_bitop")
+ (set_attr "cc" "set_n,clobber,clobber")])
+
+(define_insn "xorpsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=??r,r ,r")
+ (xor:PSI (match_operand:PSI 1 "register_operand" "%0,0 ,0")
+ (match_operand:PSI 2 "nonmemory_operand" "r,Cx3,n")))
+ (clobber (match_scratch:QI 3 "=X,X ,&d"))]
+ ""
+ {
+ if (which_alternative == 0)
+ return "eor %A0,%A2" CR_TAB
+ "eor %B0,%B2" CR_TAB
+ "eor %C0,%C2";
+
+ return avr_out_bitop (insn, operands, NULL);
+ }
+ [(set_attr "length" "3,6,6")
+ (set_attr "adjust_len" "*,out_bitop,out_bitop")
+ (set_attr "cc" "set_n,clobber,clobber")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=??r,r ,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "%0,0 ,0")
+ (match_operand:SI 2 "nonmemory_operand" "r,Cx4,n")))
+ (clobber (match_scratch:QI 3 "=X,X ,&d"))]
+ ""
+ {
+ if (which_alternative == 0)
+ return "eor %0,%2" CR_TAB
+ "eor %B0,%B2" CR_TAB
+ "eor %C0,%C2" CR_TAB
+ "eor %D0,%D2";
+
+ return avr_out_bitop (insn, operands, NULL);
+ }
+ [(set_attr "length" "4,8,8")
+ (set_attr "adjust_len" "*,out_bitop,out_bitop")
+ (set_attr "cc" "set_n,clobber,clobber")])
+
+;; swap swap swap swap swap swap swap swap swap swap swap swap swap swap swap
+;; swap
+
+(define_expand "rotlqi3"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (rotate:QI (match_operand:QI 1 "register_operand" "")
+ (match_operand:QI 2 "const_0_to_7_operand" "")))]
+ ""
+ {
+ if (!CONST_INT_P (operands[2]))
+ FAIL;
+
+ operands[2] = gen_int_mode (INTVAL (operands[2]) & 7, QImode);
+ })
+
+;; Expander used by __builtin_avr_swap
+(define_expand "rotlqi3_4"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (rotate:QI (match_operand:QI 1 "register_operand" "")
+ (const_int 4)))])
+
+(define_insn "*rotlqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r ,r ,r ,r ,r ,r")
+ (rotate:QI (match_operand:QI 1 "register_operand" "0,0,0 ,0 ,0 ,0 ,0 ,0")
+ (match_operand:QI 2 "const_0_to_7_operand" "P,K,C03,C04,C05,C06,C07,L")))]
+ ""
+ "@
+ lsl %0\;adc %0,__zero_reg__
+ lsl %0\;adc %0,__zero_reg__\;lsl %0\;adc %0,__zero_reg__
+ swap %0\;bst %0,0\;ror %0\;bld %0,7
+ swap %0
+ swap %0\;lsl %0\;adc %0,__zero_reg__
+ swap %0\;lsl %0\;adc %0,__zero_reg__\;lsl %0\;adc %0,__zero_reg__
+ bst %0,0\;ror %0\;bld %0,7
+ "
+ [(set_attr "length" "2,4,4,1,3,5,3,0")
+ (set_attr "cc" "set_n,set_n,clobber,none,set_n,set_n,clobber,none")])
+
+;; Split all rotates of HI,SI and PSImode registers where rotation is by
+;; a whole number of bytes. The split creates the appropriate moves and
+;; considers all overlap situations.
+
+;; HImode does not need scratch. Use attribute for this constraint.
+
+(define_mode_attr rotx [(SI "&r,&r,X") (PSI "&r,&r,X") (HI "X,X,X")])
+(define_mode_attr rotsmode [(SI "HI") (PSI "QI") (HI "QI")])
+
+;; "rotlhi3"
+;; "rotlpsi3"
+;; "rotlsi3"
+(define_expand "rotl<mode>3"
+ [(parallel [(set (match_operand:HISI 0 "register_operand" "")
+ (rotate:HISI (match_operand:HISI 1 "register_operand" "")
+ (match_operand:VOID 2 "const_int_operand" "")))
+ (clobber (match_dup 3))])]
+ ""
+ {
+ int offset;
+
+ if (!CONST_INT_P (operands[2]))
+ FAIL;
+
+ offset = INTVAL (operands[2]);
+
+ if (0 == offset % 8)
+ {
+ if (AVR_HAVE_MOVW && 0 == offset % 16)
+ operands[3] = gen_rtx_SCRATCH (<rotsmode>mode);
+ else
+ operands[3] = gen_rtx_SCRATCH (QImode);
+ }
+ else if (offset == 1
+ || offset == GET_MODE_BITSIZE (<MODE>mode) -1)
+ {
+ /*; Support rotate left/right by 1 */
+
+ emit_move_insn (operands[0],
+ gen_rtx_ROTATE (<MODE>mode, operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+ })
+
+(define_insn "*rotlhi2.1"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (rotate:HI (match_operand:HI 1 "register_operand" "0")
+ (const_int 1)))]
+ ""
+ "lsl %A0\;rol %B0\;adc %A0,__zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*rotlhi2.15"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (rotate:HI (match_operand:HI 1 "register_operand" "0")
+ (const_int 15)))]
+ ""
+ "bst %A0,0\;ror %B0\;ror %A0\;bld %B0,7"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*rotlpsi2.1"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (rotate:PSI (match_operand:PSI 1 "register_operand" "0")
+ (const_int 1)))]
+ ""
+ "lsl %A0\;rol %B0\;rol %C0\;adc %A0,__zero_reg__"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*rotlpsi2.23"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (rotate:PSI (match_operand:PSI 1 "register_operand" "0")
+ (const_int 23)))]
+ ""
+ "bst %A0,0\;ror %C0\;ror %B0\;ror %A0\;bld %C0,7"
+ [(set_attr "length" "5")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*rotlsi2.1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 1)))]
+ ""
+ "lsl %A0\;rol %B0\;rol %C0\;rol %D0\;adc %A0,__zero_reg__"
+ [(set_attr "length" "5")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*rotlsi2.31"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "register_operand" "0")
+ (const_int 31)))]
+ ""
+ "bst %A0,0\;ror %D0\;ror %C0\;ror %B0\;ror %A0\;bld %D0,7"
+ [(set_attr "length" "6")
+ (set_attr "cc" "clobber")])
+
+;; Overlapping non-HImode registers often (but not always) need a scratch.
+;; The best we can do is use early clobber alternative "#&r" so that
+;; completely non-overlapping operands dont get a scratch but # so register
+;; allocation does not prefer non-overlapping.
+
+
+;; Split word aligned rotates using scratch that is mode dependent.
+
+;; "*rotwhi"
+;; "*rotwsi"
+(define_insn_and_split "*rotw<mode>"
+ [(set (match_operand:HISI 0 "register_operand" "=r,r,#&r")
+ (rotate:HISI (match_operand:HISI 1 "register_operand" "0,r,r")
+ (match_operand 2 "const_int_operand" "n,n,n")))
+ (clobber (match_scratch:<rotsmode> 3 "=<rotx>"))]
+ "AVR_HAVE_MOVW
+ && CONST_INT_P (operands[2])
+ && GET_MODE_SIZE (<MODE>mode) % 2 == 0
+ && 0 == INTVAL (operands[2]) % 16"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ avr_rotate_bytes (operands);
+ DONE;
+ })
+
+
+;; Split byte aligned rotates using scratch that is always QI mode.
+
+;; "*rotbhi"
+;; "*rotbpsi"
+;; "*rotbsi"
+(define_insn_and_split "*rotb<mode>"
+ [(set (match_operand:HISI 0 "register_operand" "=r,r,#&r")
+ (rotate:HISI (match_operand:HISI 1 "register_operand" "0,r,r")
+ (match_operand 2 "const_int_operand" "n,n,n")))
+ (clobber (match_scratch:QI 3 "=<rotx>"))]
+ "CONST_INT_P (operands[2])
+ && (8 == INTVAL (operands[2]) % 16
+ || ((!AVR_HAVE_MOVW
+ || GET_MODE_SIZE (<MODE>mode) % 2 != 0)
+ && 0 == INTVAL (operands[2]) % 16))"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ {
+ avr_rotate_bytes (operands);
+ DONE;
+ })
+
+
+;;<< << << << << << << << << << << << << << << << << << << << << << << << << <<
+;; arithmetic shift left
+
+;; "ashlqi3"
+;; "ashlqq3" "ashluqq3"
+(define_expand "ashl<mode>3"
+ [(set (match_operand:ALL1 0 "register_operand" "")
+ (ashift:ALL1 (match_operand:ALL1 1 "register_operand" "")
+ (match_operand:QI 2 "nop_general_operand" "")))])
+
+(define_split ; ashlqi3_const4
+ [(set (match_operand:ALL1 0 "d_register_operand" "")
+ (ashift:ALL1 (match_dup 0)
+ (const_int 4)))]
+ ""
+ [(set (match_dup 1)
+ (rotate:QI (match_dup 1)
+ (const_int 4)))
+ (set (match_dup 1)
+ (and:QI (match_dup 1)
+ (const_int -16)))]
+ {
+ operands[1] = avr_to_int_mode (operands[0]);
+ })
+
+(define_split ; ashlqi3_const5
+ [(set (match_operand:ALL1 0 "d_register_operand" "")
+ (ashift:ALL1 (match_dup 0)
+ (const_int 5)))]
+ ""
+ [(set (match_dup 1) (rotate:QI (match_dup 1) (const_int 4)))
+ (set (match_dup 1) (ashift:QI (match_dup 1) (const_int 1)))
+ (set (match_dup 1) (and:QI (match_dup 1) (const_int -32)))]
+ {
+ operands[1] = avr_to_int_mode (operands[0]);
+ })
+
+(define_split ; ashlqi3_const6
+ [(set (match_operand:ALL1 0 "d_register_operand" "")
+ (ashift:ALL1 (match_dup 0)
+ (const_int 6)))]
+ ""
+ [(set (match_dup 1) (rotate:QI (match_dup 1) (const_int 4)))
+ (set (match_dup 1) (ashift:QI (match_dup 1) (const_int 2)))
+ (set (match_dup 1) (and:QI (match_dup 1) (const_int -64)))]
+ {
+ operands[1] = avr_to_int_mode (operands[0]);
+ })
+
+;; "*ashlqi3"
+;; "*ashlqq3" "*ashluqq3"
+(define_insn "*ashl<mode>3"
+ [(set (match_operand:ALL1 0 "register_operand" "=r,r,r,r,!d,r,r")
+ (ashift:ALL1 (match_operand:ALL1 1 "register_operand" "0,0,0,0,0 ,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,K,n ,n,Qm")))]
+ ""
+ {
+ return ashlqi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "5,0,1,2,4,6,9")
+ (set_attr "adjust_len" "ashlqi")
+ (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
+
+(define_insn "ashl<mode>3"
+ [(set (match_operand:ALL2 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashift:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ {
+ return ashlhi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "6,0,2,2,4,10,10")
+ (set_attr "adjust_len" "ashlhi")
+ (set_attr "cc" "clobber,none,set_n,clobber,set_n,clobber,clobber")])
+
+
+;; Insns like the following are generated when (implicitly) extending 8-bit shifts
+;; like char1 = char2 << char3. Only the low-byte is needed in that situation.
+
+;; "*ashluqihiqi3"
+;; "*ashlsqihiqi3"
+(define_insn_and_split "*ashl<extend_su>qihiqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (subreg:QI (ashift:HI (any_extend:HI (match_operand:QI 1 "register_operand" "0"))
+ (match_operand:QI 2 "register_operand" "r"))
+ 0))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0)
+ (ashift:QI (match_dup 1)
+ (match_dup 2)))])
+
+;; ??? Combiner does not recognize that it could split the following insn;
+;; presumably because he has no register handy?
+
+;; "*ashluqihiqi3.mem"
+;; "*ashlsqihiqi3.mem"
+(define_insn_and_split "*ashl<extend_su>qihiqi3.mem"
+ [(set (match_operand:QI 0 "memory_operand" "=m")
+ (subreg:QI (ashift:HI (any_extend:HI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:QI 2 "register_operand" "r"))
+ 0))]
+ "!reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (match_dup 3)
+ (ashift:QI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (match_dup 3))]
+ {
+ operands[3] = gen_reg_rtx (QImode);
+ })
+
+;; Similar.
+
+(define_insn_and_split "*ashlhiqi3"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r")
+ (subreg:QI (ashift:HI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:QI 2 "register_operand" "r")) 0))]
+ "!reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (match_dup 4)
+ (ashift:QI (match_dup 3)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (match_dup 4))]
+ {
+ operands[3] = simplify_gen_subreg (QImode, operands[1], HImode, 0);
+ operands[4] = gen_reg_rtx (QImode);
+ })
+
+;; High part of 16-bit shift is unused after the instruction:
+;; No need to compute it, map to 8-bit shift.
+
+(define_peephole2
+ [(set (match_operand:HI 0 "register_operand" "")
+ (ashift:HI (match_dup 0)
+ (match_operand:QI 1 "register_operand" "")))]
+ ""
+ [(set (match_dup 2)
+ (ashift:QI (match_dup 2)
+ (match_dup 1)))
+ (clobber (match_dup 3))]
+ {
+ operands[3] = simplify_gen_subreg (QImode, operands[0], HImode, 1);
+
+ if (!peep2_reg_dead_p (1, operands[3]))
+ FAIL;
+
+ operands[2] = simplify_gen_subreg (QImode, operands[0], HImode, 0);
+ })
+
+
+;; "ashlsi3"
+;; "ashlsq3" "ashlusq3"
+;; "ashlsa3" "ashlusa3"
+(define_insn "ashl<mode>3"
+ [(set (match_operand:ALL4 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashift:ALL4 (match_operand:ALL4 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ {
+ return ashlsi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "8,0,4,4,8,10,12")
+ (set_attr "adjust_len" "ashlsi")
+ (set_attr "cc" "clobber,none,set_n,clobber,set_n,clobber,clobber")])
+
+;; Optimize if a scratch register from LD_REGS happens to be available.
+
+(define_peephole2 ; ashlqi3_l_const4
+ [(set (match_operand:ALL1 0 "l_register_operand" "")
+ (ashift:ALL1 (match_dup 0)
+ (const_int 4)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 2) (rotate:QI (match_dup 2) (const_int 4)))
+ (set (match_dup 1) (const_int -16))
+ (set (match_dup 2) (and:QI (match_dup 2) (match_dup 1)))]
+ {
+ operands[2] = avr_to_int_mode (operands[0]);
+ })
+
+(define_peephole2 ; ashlqi3_l_const5
+ [(set (match_operand:ALL1 0 "l_register_operand" "")
+ (ashift:ALL1 (match_dup 0)
+ (const_int 5)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 2) (rotate:QI (match_dup 2) (const_int 4)))
+ (set (match_dup 2) (ashift:QI (match_dup 2) (const_int 1)))
+ (set (match_dup 1) (const_int -32))
+ (set (match_dup 2) (and:QI (match_dup 2) (match_dup 1)))]
+ {
+ operands[2] = avr_to_int_mode (operands[0]);
+ })
+
+(define_peephole2 ; ashlqi3_l_const6
+ [(set (match_operand:ALL1 0 "l_register_operand" "")
+ (ashift:ALL1 (match_dup 0)
+ (const_int 6)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 2) (rotate:QI (match_dup 2) (const_int 4)))
+ (set (match_dup 2) (ashift:QI (match_dup 2) (const_int 2)))
+ (set (match_dup 1) (const_int -64))
+ (set (match_dup 2) (and:QI (match_dup 2) (match_dup 1)))]
+ {
+ operands[2] = avr_to_int_mode (operands[0]);
+ })
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:ALL2 0 "register_operand" "")
+ (ashift:ALL2 (match_operand:ALL2 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0)
+ (ashift:ALL2 (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 3))])])
+
+;; "*ashlhi3_const"
+;; "*ashlhq3_const" "*ashluhq3_const"
+;; "*ashlha3_const" "*ashluha3_const"
+(define_insn "*ashl<mode>3_const"
+ [(set (match_operand:ALL2 0 "register_operand" "=r,r,r,r,r")
+ (ashift:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,r,0,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ "reload_completed"
+ {
+ return ashlhi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "0,2,2,4,10")
+ (set_attr "adjust_len" "ashlhi")
+ (set_attr "cc" "none,set_n,clobber,set_n,clobber")])
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:ALL4 0 "register_operand" "")
+ (ashift:ALL4 (match_operand:ALL4 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0)
+ (ashift:ALL4 (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 3))])])
+
+;; "*ashlsi3_const"
+;; "*ashlsq3_const" "*ashlusq3_const"
+;; "*ashlsa3_const" "*ashlusa3_const"
+(define_insn "*ashl<mode>3_const"
+ [(set (match_operand:ALL4 0 "register_operand" "=r,r,r,r")
+ (ashift:ALL4 (match_operand:ALL4 1 "register_operand" "0,0,r,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ "reload_completed"
+ {
+ return ashlsi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "0,4,4,10")
+ (set_attr "adjust_len" "ashlsi")
+ (set_attr "cc" "none,set_n,clobber,clobber")])
+
+(define_expand "ashlpsi3"
+ [(parallel [(set (match_operand:PSI 0 "register_operand" "")
+ (ashift:PSI (match_operand:PSI 1 "register_operand" "")
+ (match_operand:QI 2 "nonmemory_operand" "")))
+ (clobber (scratch:QI))])]
+ ""
+ {
+ if (AVR_HAVE_MUL
+ && CONST_INT_P (operands[2]))
+ {
+ if (IN_RANGE (INTVAL (operands[2]), 3, 6))
+ {
+ rtx xoffset = force_reg (QImode, gen_int_mode (1 << INTVAL (operands[2]), QImode));
+ emit_insn (gen_mulsqipsi3 (operands[0], xoffset, operands[1]));
+ DONE;
+ }
+ else if (optimize_insn_for_speed_p ()
+ && INTVAL (operands[2]) != 16
+ && IN_RANGE (INTVAL (operands[2]), 9, 22))
+ {
+ rtx xoffset = force_reg (PSImode, gen_int_mode (1 << INTVAL (operands[2]), PSImode));
+ emit_insn (gen_mulpsi3 (operands[0], operands[1], xoffset));
+ DONE;
+ }
+ }
+ })
+
+(define_insn "*ashlpsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=r,r,r,r")
+ (ashift:PSI (match_operand:PSI 1 "register_operand" "0,0,r,0")
+ (match_operand:QI 2 "nonmemory_operand" "r,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ ""
+ {
+ return avr_out_ashlpsi3 (insn, operands, NULL);
+ }
+ [(set_attr "adjust_len" "ashlpsi")
+ (set_attr "cc" "clobber")])
+
+;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
+;; arithmetic shift right
+
+;; "ashrqi3"
+;; "ashrqq3" "ashruqq3"
+(define_insn "ashr<mode>3"
+ [(set (match_operand:ALL1 0 "register_operand" "=r,r,r,r,r ,r ,r")
+ (ashiftrt:ALL1 (match_operand:ALL1 1 "register_operand" "0,0,0,0,0 ,0 ,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,K,C03 C04 C05,C06 C07,Qm")))]
+ ""
+ {
+ return ashrqi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "5,0,1,2,5,4,9")
+ (set_attr "adjust_len" "ashrqi")
+ (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,clobber,clobber")])
+
+;; "ashrhi3"
+;; "ashrhq3" "ashruhq3"
+;; "ashrha3" "ashruha3"
+(define_insn "ashr<mode>3"
+ [(set (match_operand:ALL2 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ {
+ return ashrhi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "6,0,2,4,4,10,10")
+ (set_attr "adjust_len" "ashrhi")
+ (set_attr "cc" "clobber,none,clobber,set_n,clobber,clobber,clobber")])
+
+(define_insn "ashrpsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=r,r,r,r,r")
+ (ashiftrt:PSI (match_operand:PSI 1 "register_operand" "0,0,0,r,0")
+ (match_operand:QI 2 "nonmemory_operand" "r,P,K,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ ""
+ {
+ return avr_out_ashrpsi3 (insn, operands, NULL);
+ }
+ [(set_attr "adjust_len" "ashrpsi")
+ (set_attr "cc" "clobber")])
+
+;; "ashrsi3"
+;; "ashrsq3" "ashrusq3"
+;; "ashrsa3" "ashrusa3"
+(define_insn "ashr<mode>3"
+ [(set (match_operand:ALL4 0 "register_operand" "=r,r,r,r,r,r,r")
+ (ashiftrt:ALL4 (match_operand:ALL4 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ {
+ return ashrsi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "8,0,4,6,8,10,12")
+ (set_attr "adjust_len" "ashrsi")
+ (set_attr "cc" "clobber,none,clobber,set_n,clobber,clobber,clobber")])
+
+;; Optimize if a scratch register from LD_REGS happens to be available.
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:ALL2 0 "register_operand" "")
+ (ashiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0)
+ (ashiftrt:ALL2 (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 3))])])
+
+;; "*ashrhi3_const"
+;; "*ashrhq3_const" "*ashruhq3_const"
+;; "*ashrha3_const" "*ashruha3_const"
+(define_insn "*ashr<mode>3_const"
+ [(set (match_operand:ALL2 0 "register_operand" "=r,r,r,r,r")
+ (ashiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,r,0,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ "reload_completed"
+ {
+ return ashrhi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "0,2,4,4,10")
+ (set_attr "adjust_len" "ashrhi")
+ (set_attr "cc" "none,clobber,set_n,clobber,clobber")])
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:ALL4 0 "register_operand" "")
+ (ashiftrt:ALL4 (match_operand:ALL4 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0)
+ (ashiftrt:ALL4 (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 3))])])
+
+;; "*ashrsi3_const"
+;; "*ashrsq3_const" "*ashrusq3_const"
+;; "*ashrsa3_const" "*ashrusa3_const"
+(define_insn "*ashr<mode>3_const"
+ [(set (match_operand:ALL4 0 "register_operand" "=r,r,r,r")
+ (ashiftrt:ALL4 (match_operand:ALL4 1 "register_operand" "0,0,r,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ "reload_completed"
+ {
+ return ashrsi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "0,4,4,10")
+ (set_attr "adjust_len" "ashrsi")
+ (set_attr "cc" "none,clobber,set_n,clobber")])
+
+;; >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >> >>
+;; logical shift right
+
+;; "lshrqi3"
+;; "lshrqq3 "lshruqq3"
+(define_expand "lshr<mode>3"
+ [(set (match_operand:ALL1 0 "register_operand" "")
+ (lshiftrt:ALL1 (match_operand:ALL1 1 "register_operand" "")
+ (match_operand:QI 2 "nop_general_operand" "")))])
+
+(define_split ; lshrqi3_const4
+ [(set (match_operand:ALL1 0 "d_register_operand" "")
+ (lshiftrt:ALL1 (match_dup 0)
+ (const_int 4)))]
+ ""
+ [(set (match_dup 1)
+ (rotate:QI (match_dup 1)
+ (const_int 4)))
+ (set (match_dup 1)
+ (and:QI (match_dup 1)
+ (const_int 15)))]
+ {
+ operands[1] = avr_to_int_mode (operands[0]);
+ })
+
+(define_split ; lshrqi3_const5
+ [(set (match_operand:ALL1 0 "d_register_operand" "")
+ (lshiftrt:ALL1 (match_dup 0)
+ (const_int 5)))]
+ ""
+ [(set (match_dup 1) (rotate:QI (match_dup 1) (const_int 4)))
+ (set (match_dup 1) (lshiftrt:QI (match_dup 1) (const_int 1)))
+ (set (match_dup 1) (and:QI (match_dup 1) (const_int 7)))]
+ {
+ operands[1] = avr_to_int_mode (operands[0]);
+ })
+
+(define_split ; lshrqi3_const6
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (lshiftrt:QI (match_dup 0)
+ (const_int 6)))]
+ ""
+ [(set (match_dup 1) (rotate:QI (match_dup 1) (const_int 4)))
+ (set (match_dup 1) (lshiftrt:QI (match_dup 1) (const_int 2)))
+ (set (match_dup 1) (and:QI (match_dup 1) (const_int 3)))]
+ {
+ operands[1] = avr_to_int_mode (operands[0]);
+ })
+
+;; "*lshrqi3"
+;; "*lshrqq3"
+;; "*lshruqq3"
+(define_insn "*lshr<mode>3"
+ [(set (match_operand:ALL1 0 "register_operand" "=r,r,r,r,!d,r,r")
+ (lshiftrt:ALL1 (match_operand:ALL1 1 "register_operand" "0,0,0,0,0 ,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,K,n ,n,Qm")))]
+ ""
+ {
+ return lshrqi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "5,0,1,2,4,6,9")
+ (set_attr "adjust_len" "lshrqi")
+ (set_attr "cc" "clobber,none,set_czn,set_czn,set_czn,set_czn,clobber")])
+
+;; "lshrhi3"
+;; "lshrhq3" "lshruhq3"
+;; "lshrha3" "lshruha3"
+(define_insn "lshr<mode>3"
+ [(set (match_operand:ALL2 0 "register_operand" "=r,r,r,r,r,r,r")
+ (lshiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ {
+ return lshrhi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "6,0,2,2,4,10,10")
+ (set_attr "adjust_len" "lshrhi")
+ (set_attr "cc" "clobber,none,clobber,clobber,clobber,clobber,clobber")])
+
+(define_insn "lshrpsi3"
+ [(set (match_operand:PSI 0 "register_operand" "=r,r,r,r,r")
+ (lshiftrt:PSI (match_operand:PSI 1 "register_operand" "0,0,r,0,0")
+ (match_operand:QI 2 "nonmemory_operand" "r,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ ""
+ {
+ return avr_out_lshrpsi3 (insn, operands, NULL);
+ }
+ [(set_attr "adjust_len" "lshrpsi")
+ (set_attr "cc" "clobber")])
+
+;; "lshrsi3"
+;; "lshrsq3" "lshrusq3"
+;; "lshrsa3" "lshrusa3"
+(define_insn "lshr<mode>3"
+ [(set (match_operand:ALL4 0 "register_operand" "=r,r,r,r,r,r,r")
+ (lshiftrt:ALL4 (match_operand:ALL4 1 "register_operand" "0,0,0,r,0,0,0")
+ (match_operand:QI 2 "nop_general_operand" "r,L,P,O,K,n,Qm")))]
+ ""
+ {
+ return lshrsi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "8,0,4,4,8,10,12")
+ (set_attr "adjust_len" "lshrsi")
+ (set_attr "cc" "clobber,none,clobber,clobber,clobber,clobber,clobber")])
+
+;; Optimize if a scratch register from LD_REGS happens to be available.
+
+(define_peephole2 ; lshrqi3_l_const4
+ [(set (match_operand:ALL1 0 "l_register_operand" "")
+ (lshiftrt:ALL1 (match_dup 0)
+ (const_int 4)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 2) (rotate:QI (match_dup 2) (const_int 4)))
+ (set (match_dup 1) (const_int 15))
+ (set (match_dup 2) (and:QI (match_dup 2) (match_dup 1)))]
+ {
+ operands[2] = avr_to_int_mode (operands[0]);
+ })
+
+(define_peephole2 ; lshrqi3_l_const5
+ [(set (match_operand:ALL1 0 "l_register_operand" "")
+ (lshiftrt:ALL1 (match_dup 0)
+ (const_int 5)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 2) (rotate:QI (match_dup 2) (const_int 4)))
+ (set (match_dup 2) (lshiftrt:QI (match_dup 2) (const_int 1)))
+ (set (match_dup 1) (const_int 7))
+ (set (match_dup 2) (and:QI (match_dup 2) (match_dup 1)))]
+ {
+ operands[2] = avr_to_int_mode (operands[0]);
+ })
+
+(define_peephole2 ; lshrqi3_l_const6
+ [(set (match_operand:ALL1 0 "l_register_operand" "")
+ (lshiftrt:ALL1 (match_dup 0)
+ (const_int 6)))
+ (match_scratch:QI 1 "d")]
+ ""
+ [(set (match_dup 2) (rotate:QI (match_dup 2) (const_int 4)))
+ (set (match_dup 2) (lshiftrt:QI (match_dup 2) (const_int 2)))
+ (set (match_dup 1) (const_int 3))
+ (set (match_dup 2) (and:QI (match_dup 2) (match_dup 1)))]
+ {
+ operands[2] = avr_to_int_mode (operands[0]);
+ })
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:ALL2 0 "register_operand" "")
+ (lshiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0)
+ (lshiftrt:ALL2 (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 3))])])
+
+;; "*lshrhi3_const"
+;; "*lshrhq3_const" "*lshruhq3_const"
+;; "*lshrha3_const" "*lshruha3_const"
+(define_insn "*lshr<mode>3_const"
+ [(set (match_operand:ALL2 0 "register_operand" "=r,r,r,r,r")
+ (lshiftrt:ALL2 (match_operand:ALL2 1 "register_operand" "0,0,r,0,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,K,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,X,&d"))]
+ "reload_completed"
+ {
+ return lshrhi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "0,2,2,4,10")
+ (set_attr "adjust_len" "lshrhi")
+ (set_attr "cc" "none,clobber,clobber,clobber,clobber")])
+
+(define_peephole2
+ [(match_scratch:QI 3 "d")
+ (set (match_operand:ALL4 0 "register_operand" "")
+ (lshiftrt:ALL4 (match_operand:ALL4 1 "register_operand" "")
+ (match_operand:QI 2 "const_int_operand" "")))]
+ ""
+ [(parallel [(set (match_dup 0)
+ (lshiftrt:ALL4 (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 3))])])
+
+;; "*lshrsi3_const"
+;; "*lshrsq3_const" "*lshrusq3_const"
+;; "*lshrsa3_const" "*lshrusa3_const"
+(define_insn "*lshr<mode>3_const"
+ [(set (match_operand:ALL4 0 "register_operand" "=r,r,r,r")
+ (lshiftrt:ALL4 (match_operand:ALL4 1 "register_operand" "0,0,r,0")
+ (match_operand:QI 2 "const_int_operand" "L,P,O,n")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ "reload_completed"
+ {
+ return lshrsi3_out (insn, operands, NULL);
+ }
+ [(set_attr "length" "0,4,4,10")
+ (set_attr "adjust_len" "lshrsi")
+ (set_attr "cc" "none,clobber,clobber,clobber")])
+
+;; abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x) abs(x)
+;; abs
+
+(define_insn "absqi2"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (abs:QI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "sbrc %0,7
+ neg %0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "clobber")])
+
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=d,r")
+ (abs:SF (match_operand:SF 1 "register_operand" "0,0")))]
+ ""
+ "@
+ andi %D0,0x7f
+ clt\;bld %D0,7"
+ [(set_attr "length" "1,2")
+ (set_attr "cc" "set_n,clobber")])
+
+;; 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x 0 - x
+;; neg
+
+(define_insn "negqi2"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (neg:QI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "neg %0"
+ [(set_attr "length" "1")
+ (set_attr "cc" "set_zn")])
+
+(define_insn "*negqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (neg:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "0"))))]
+ ""
+ "clr %B0\;neg %A0\;brge .+2\;com %B0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_n")])
+
+(define_insn "neghi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,&r")
+ (neg:HI (match_operand:HI 1 "register_operand" "0,r")))]
+ ""
+ "@
+ neg %B0\;neg %A0\;sbc %B0,__zero_reg__
+ clr %A0\;clr %B0\;sub %A0,%A1\;sbc %B0,%B1"
+ [(set_attr "length" "3,4")
+ (set_attr "cc" "set_czn")])
+
+(define_insn "negpsi2"
+ [(set (match_operand:PSI 0 "register_operand" "=!d,r,&r")
+ (neg:PSI (match_operand:PSI 1 "register_operand" "0,0,r")))]
+ ""
+ "@
+ com %C0\;com %B0\;neg %A0\;sbci %B0,-1\;sbci %C0,-1
+ com %C0\;com %B0\;com %A0\;adc %A0,__zero_reg__\;adc %B0,__zero_reg__\;adc %C0,__zero_reg__
+ clr %A0\;clr %B0\;clr %C0\;sub %A0,%A1\;sbc %B0,%B1\;sbc %C0,%C1"
+ [(set_attr "length" "5,6,6")
+ (set_attr "cc" "set_czn,set_n,set_czn")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=!d,r,&r,&r")
+ (neg:SI (match_operand:SI 1 "register_operand" "0,0,r ,r")))]
+ ""
+ "@
+ com %D0\;com %C0\;com %B0\;neg %A0\;sbci %B0,lo8(-1)\;sbci %C0,lo8(-1)\;sbci %D0,lo8(-1)
+ com %D0\;com %C0\;com %B0\;com %A0\;adc %A0,__zero_reg__\;adc %B0,__zero_reg__\;adc %C0,__zero_reg__\;adc %D0,__zero_reg__
+ clr %A0\;clr %B0\;clr %C0\;clr %D0\;sub %A0,%A1\;sbc %B0,%B1\;sbc %C0,%C1\;sbc %D0,%D1
+ clr %A0\;clr %B0\;movw %C0,%A0\;sub %A0,%A1\;sbc %B0,%B1\;sbc %C0,%C1\;sbc %D0,%D1"
+ [(set_attr "length" "7,8,8,7")
+ (set_attr "isa" "*,*,mov,movw")
+ (set_attr "cc" "set_czn,set_n,set_czn,set_czn")])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=d,r")
+ (neg:SF (match_operand:SF 1 "register_operand" "0,0")))]
+ ""
+ "@
+ subi %D0,0x80
+ bst %D0,7\;com %D0\;bld %D0,7\;com %D0"
+ [(set_attr "length" "1,4")
+ (set_attr "cc" "set_n,set_n")])
+
+;; !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+;; not
+
+(define_insn "one_cmplqi2"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (not:QI (match_operand:QI 1 "register_operand" "0")))]
+ ""
+ "com %0"
+ [(set_attr "length" "1")
+ (set_attr "cc" "set_czn")])
+
+(define_insn "one_cmplhi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (not:HI (match_operand:HI 1 "register_operand" "0")))]
+ ""
+ "com %0
+ com %B0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "set_n")])
+
+(define_insn "one_cmplpsi2"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (not:PSI (match_operand:PSI 1 "register_operand" "0")))]
+ ""
+ "com %0\;com %B0\;com %C0"
+ [(set_attr "length" "3")
+ (set_attr "cc" "set_n")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_operand" "0")))]
+ ""
+ "com %0
+ com %B0
+ com %C0
+ com %D0"
+ [(set_attr "length" "4")
+ (set_attr "cc" "set_n")])
+
+;; xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x
+;; sign extend
+
+;; We keep combiner from inserting hard registers into the input of sign- and
+;; zero-extends. A hard register in the input operand is not wanted because
+;; 32-bit multiply patterns clobber some hard registers and extends with a
+;; hard register that overlaps these clobbers won't be combined to a widening
+;; multiplication. There is no need for combine to propagate hard registers,
+;; register allocation can do it just as well.
+
+(define_insn "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (sign_extend:HI (match_operand:QI 1 "combine_pseudo_register_operand" "0,*r")))]
+ ""
+ "@
+ clr %B0\;sbrc %0,7\;com %B0
+ mov %A0,%A1\;clr %B0\;sbrc %A0,7\;com %B0"
+ [(set_attr "length" "3,4")
+ (set_attr "cc" "set_n,set_n")])
+
+(define_insn "extendqipsi2"
+ [(set (match_operand:PSI 0 "register_operand" "=r,r")
+ (sign_extend:PSI (match_operand:QI 1 "combine_pseudo_register_operand" "0,*r")))]
+ ""
+ "@
+ clr %B0\;sbrc %A0,7\;com %B0\;mov %C0,%B0
+ mov %A0,%A1\;clr %B0\;sbrc %A0,7\;com %B0\;mov %C0,%B0"
+ [(set_attr "length" "4,5")
+ (set_attr "cc" "set_n,set_n")])
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (sign_extend:SI (match_operand:QI 1 "combine_pseudo_register_operand" "0,*r")))]
+ ""
+ "@
+ clr %B0\;sbrc %A0,7\;com %B0\;mov %C0,%B0\;mov %D0,%B0
+ mov %A0,%A1\;clr %B0\;sbrc %A0,7\;com %B0\;mov %C0,%B0\;mov %D0,%B0"
+ [(set_attr "length" "5,6")
+ (set_attr "cc" "set_n,set_n")])
+
+(define_insn "extendhipsi2"
+ [(set (match_operand:PSI 0 "register_operand" "=r,r ,r")
+ (sign_extend:PSI (match_operand:HI 1 "combine_pseudo_register_operand" "0,*r,*r")))]
+ ""
+ "@
+ clr %C0\;sbrc %B0,7\;com %C0
+ mov %A0,%A1\;mov %B0,%B1\;clr %C0\;sbrc %B0,7\;com %C0
+ movw %A0,%A1\;clr %C0\;sbrc %B0,7\;com %C0"
+ [(set_attr "length" "3,5,4")
+ (set_attr "isa" "*,mov,movw")
+ (set_attr "cc" "set_n")])
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r ,r")
+ (sign_extend:SI (match_operand:HI 1 "combine_pseudo_register_operand" "0,*r,*r")))]
+ ""
+ "@
+ clr %C0\;sbrc %B0,7\;com %C0\;mov %D0,%C0
+ mov %A0,%A1\;mov %B0,%B1\;clr %C0\;sbrc %B0,7\;com %C0\;mov %D0,%C0
+ movw %A0,%A1\;clr %C0\;sbrc %B0,7\;com %C0\;mov %D0,%C0"
+ [(set_attr "length" "4,6,5")
+ (set_attr "isa" "*,mov,movw")
+ (set_attr "cc" "set_n")])
+
+(define_insn "extendpsisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:PSI 1 "combine_pseudo_register_operand" "0")))]
+ ""
+ "clr %D0\;sbrc %C0,7\;com %D0"
+ [(set_attr "length" "3")
+ (set_attr "cc" "set_n")])
+
+;; xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x xx<---x
+;; zero extend
+
+(define_insn_and_split "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (zero_extend:HI (match_operand:QI 1 "combine_pseudo_register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+ {
+ unsigned int low_off = subreg_lowpart_offset (QImode, HImode);
+ unsigned int high_off = subreg_highpart_offset (QImode, HImode);
+
+ operands[2] = simplify_gen_subreg (QImode, operands[0], HImode, low_off);
+ operands[3] = simplify_gen_subreg (QImode, operands[0], HImode, high_off);
+ })
+
+(define_insn_and_split "zero_extendqipsi2"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (zero_extend:PSI (match_operand:QI 1 "combine_pseudo_register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))
+ (set (match_dup 4) (const_int 0))]
+ {
+ operands[2] = simplify_gen_subreg (QImode, operands[0], PSImode, 0);
+ operands[3] = simplify_gen_subreg (QImode, operands[0], PSImode, 1);
+ operands[4] = simplify_gen_subreg (QImode, operands[0], PSImode, 2);
+ })
+
+(define_insn_and_split "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:QI 1 "combine_pseudo_register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (zero_extend:HI (match_dup 1)))
+ (set (match_dup 3) (const_int 0))]
+ {
+ unsigned int low_off = subreg_lowpart_offset (HImode, SImode);
+ unsigned int high_off = subreg_highpart_offset (HImode, SImode);
+
+ operands[2] = simplify_gen_subreg (HImode, operands[0], SImode, low_off);
+ operands[3] = simplify_gen_subreg (HImode, operands[0], SImode, high_off);
+ })
+
+(define_insn_and_split "zero_extendhipsi2"
+ [(set (match_operand:PSI 0 "register_operand" "=r")
+ (zero_extend:PSI (match_operand:HI 1 "combine_pseudo_register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+ {
+ operands[2] = simplify_gen_subreg (HImode, operands[0], PSImode, 0);
+ operands[3] = simplify_gen_subreg (QImode, operands[0], PSImode, 2);
+ })
+
+(define_insn_and_split "n_extendhipsi2"
+ [(set (match_operand:PSI 0 "register_operand" "=r,r,d,r")
+ (lo_sum:PSI (match_operand:QI 1 "const_int_operand" "L,P,n,n")
+ (match_operand:HI 2 "register_operand" "r,r,r,r")))
+ (clobber (match_scratch:QI 3 "=X,X,X,&d"))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 4) (match_dup 2))
+ (set (match_dup 3) (match_dup 6))
+ ; no-op move in the case where no scratch is needed
+ (set (match_dup 5) (match_dup 3))]
+ {
+ operands[4] = simplify_gen_subreg (HImode, operands[0], PSImode, 0);
+ operands[5] = simplify_gen_subreg (QImode, operands[0], PSImode, 2);
+ operands[6] = operands[1];
+
+ if (GET_CODE (operands[3]) == SCRATCH)
+ operands[3] = operands[5];
+ })
+
+(define_insn_and_split "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "combine_pseudo_register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+ {
+ unsigned int low_off = subreg_lowpart_offset (HImode, SImode);
+ unsigned int high_off = subreg_highpart_offset (HImode, SImode);
+
+ operands[2] = simplify_gen_subreg (HImode, operands[0], SImode, low_off);
+ operands[3] = simplify_gen_subreg (HImode, operands[0], SImode, high_off);
+ })
+
+(define_insn_and_split "zero_extendpsisi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:PSI 1 "combine_pseudo_register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+ {
+ operands[2] = simplify_gen_subreg (PSImode, operands[0], SImode, 0);
+ operands[3] = simplify_gen_subreg (QImode, operands[0], SImode, 3);
+ })
+
+(define_insn_and_split "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:QI 1 "register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+ (set (match_dup 3) (const_int 0))]
+ {
+ unsigned int low_off = subreg_lowpart_offset (SImode, DImode);
+ unsigned int high_off = subreg_highpart_offset (SImode, DImode);
+
+ operands[2] = simplify_gen_subreg (SImode, operands[0], DImode, low_off);
+ operands[3] = simplify_gen_subreg (SImode, operands[0], DImode, high_off);
+ })
+
+(define_insn_and_split "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:HI 1 "register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (zero_extend:SI (match_dup 1)))
+ (set (match_dup 3) (const_int 0))]
+ {
+ unsigned int low_off = subreg_lowpart_offset (SImode, DImode);
+ unsigned int high_off = subreg_highpart_offset (SImode, DImode);
+
+ operands[2] = simplify_gen_subreg (SImode, operands[0], DImode, low_off);
+ operands[3] = simplify_gen_subreg (SImode, operands[0], DImode, high_off);
+ })
+
+(define_insn_and_split "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 3) (const_int 0))]
+ {
+ unsigned int low_off = subreg_lowpart_offset (SImode, DImode);
+ unsigned int high_off = subreg_highpart_offset (SImode, DImode);
+
+ operands[2] = simplify_gen_subreg (SImode, operands[0], DImode, low_off);
+ operands[3] = simplify_gen_subreg (SImode, operands[0], DImode, high_off);
+ })
+
+;;<=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=><=>
+;; compare
+
+; Optimize negated tests into reverse compare if overflow is undefined.
+(define_insn "*negated_tstqi"
+ [(set (cc0)
+ (compare (neg:QI (match_operand:QI 0 "register_operand" "r"))
+ (const_int 0)))]
+ "!flag_wrapv && !flag_trapv && flag_strict_overflow"
+ "cp __zero_reg__,%0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "1")])
+
+(define_insn "*reversed_tstqi"
+ [(set (cc0)
+ (compare (const_int 0)
+ (match_operand:QI 0 "register_operand" "r")))]
+ ""
+ "cp __zero_reg__,%0"
+[(set_attr "cc" "compare")
+ (set_attr "length" "2")])
+
+(define_insn "*negated_tsthi"
+ [(set (cc0)
+ (compare (neg:HI (match_operand:HI 0 "register_operand" "r"))
+ (const_int 0)))]
+ "!flag_wrapv && !flag_trapv && flag_strict_overflow"
+ "cp __zero_reg__,%A0
+ cpc __zero_reg__,%B0"
+[(set_attr "cc" "compare")
+ (set_attr "length" "2")])
+
+;; Leave here the clobber used by the cmphi pattern for simplicity, even
+;; though it is unused, because this pattern is synthesized by avr_reorg.
+(define_insn "*reversed_tsthi"
+ [(set (cc0)
+ (compare (const_int 0)
+ (match_operand:HI 0 "register_operand" "r")))
+ (clobber (match_scratch:QI 1 "=X"))]
+ ""
+ "cp __zero_reg__,%A0
+ cpc __zero_reg__,%B0"
+[(set_attr "cc" "compare")
+ (set_attr "length" "2")])
+
+(define_insn "*negated_tstpsi"
+ [(set (cc0)
+ (compare (neg:PSI (match_operand:PSI 0 "register_operand" "r"))
+ (const_int 0)))]
+ "!flag_wrapv && !flag_trapv && flag_strict_overflow"
+ "cp __zero_reg__,%A0\;cpc __zero_reg__,%B0\;cpc __zero_reg__,%C0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "3")])
+
+(define_insn "*reversed_tstpsi"
+ [(set (cc0)
+ (compare (const_int 0)
+ (match_operand:PSI 0 "register_operand" "r")))
+ (clobber (match_scratch:QI 1 "=X"))]
+ ""
+ "cp __zero_reg__,%A0\;cpc __zero_reg__,%B0\;cpc __zero_reg__,%C0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "3")])
+
+(define_insn "*negated_tstsi"
+ [(set (cc0)
+ (compare (neg:SI (match_operand:SI 0 "register_operand" "r"))
+ (const_int 0)))]
+ "!flag_wrapv && !flag_trapv && flag_strict_overflow"
+ "cp __zero_reg__,%A0
+ cpc __zero_reg__,%B0
+ cpc __zero_reg__,%C0
+ cpc __zero_reg__,%D0"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "4")])
+
+;; "*reversed_tstsi"
+;; "*reversed_tstsq" "*reversed_tstusq"
+;; "*reversed_tstsa" "*reversed_tstusa"
+(define_insn "*reversed_tst<mode>"
+ [(set (cc0)
+ (compare (match_operand:ALL4 0 "const0_operand" "Y00")
+ (match_operand:ALL4 1 "register_operand" "r")))
+ (clobber (match_scratch:QI 2 "=X"))]
+ ""
+ "cp __zero_reg__,%A1
+ cpc __zero_reg__,%B1
+ cpc __zero_reg__,%C1
+ cpc __zero_reg__,%D1"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "4")])
+
+
+;; "*cmpqi"
+;; "*cmpqq" "*cmpuqq"
+(define_insn "*cmp<mode>"
+ [(set (cc0)
+ (compare (match_operand:ALL1 0 "register_operand" "r ,r,d")
+ (match_operand:ALL1 1 "nonmemory_operand" "Y00,r,i")))]
+ ""
+ "@
+ tst %0
+ cp %0,%1
+ cpi %0,lo8(%1)"
+ [(set_attr "cc" "compare,compare,compare")
+ (set_attr "length" "1,1,1")])
+
+(define_insn "*cmpqi_sign_extend"
+ [(set (cc0)
+ (compare (sign_extend:HI (match_operand:QI 0 "register_operand" "d"))
+ (match_operand:HI 1 "s8_operand" "n")))]
+ ""
+ "cpi %0,lo8(%1)"
+ [(set_attr "cc" "compare")
+ (set_attr "length" "1")])
+
+;; "*cmphi"
+;; "*cmphq" "*cmpuhq"
+;; "*cmpha" "*cmpuha"
+(define_insn "*cmp<mode>"
+ [(set (cc0)
+ (compare (match_operand:ALL2 0 "register_operand" "!w ,r ,r,d ,r ,d,r")
+ (match_operand:ALL2 1 "nonmemory_operand" "Y00,Y00,r,s ,s ,M,n Ynn")))
+ (clobber (match_scratch:QI 2 "=X ,X ,X,&d,&d ,X,&d"))]
+ ""
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ case 1:
+ return avr_out_tsthi (insn, operands, NULL);
+
+ case 2:
+ return "cp %A0,%A1\;cpc %B0,%B1";
+
+ case 3:
+ if (<MODE>mode != HImode)
+ break;
+ return reg_unused_after (insn, operands[0])
+ ? "subi %A0,lo8(%1)\;sbci %B0,hi8(%1)"
+ : "ldi %2,hi8(%1)\;cpi %A0,lo8(%1)\;cpc %B0,%2";
+
+ case 4:
+ if (<MODE>mode != HImode)
+ break;
+ return "ldi %2,lo8(%1)\;cp %A0,%2\;ldi %2,hi8(%1)\;cpc %B0,%2";
+ }
+
+ return avr_out_compare (insn, operands, NULL);
+ }
+ [(set_attr "cc" "compare")
+ (set_attr "length" "1,2,2,3,4,2,4")
+ (set_attr "adjust_len" "tsthi,tsthi,*,*,*,compare,compare")])
+
+(define_insn "*cmppsi"
+ [(set (cc0)
+ (compare (match_operand:PSI 0 "register_operand" "r,r,d ,r ,d,r")
+ (match_operand:PSI 1 "nonmemory_operand" "L,r,s ,s ,M,n")))
+ (clobber (match_scratch:QI 2 "=X,X,&d,&d ,X,&d"))]
+ ""
+ {
+ switch (which_alternative)
+ {
+ case 0:
+ return avr_out_tstpsi (insn, operands, NULL);
+
+ case 1:
+ return "cp %A0,%A1\;cpc %B0,%B1\;cpc %C0,%C1";
+
+ case 2:
+ return reg_unused_after (insn, operands[0])
+ ? "subi %A0,lo8(%1)\;sbci %B0,hi8(%1)\;sbci %C0,hh8(%1)"
+ : "cpi %A0,lo8(%1)\;ldi %2,hi8(%1)\;cpc %B0,%2\;ldi %2,hh8(%1)\;cpc %C0,%2";
+
+ case 3:
+ return "ldi %2,lo8(%1)\;cp %A0,%2\;ldi %2,hi8(%1)\;cpc %B0,%2\;ldi %2,hh8(%1)\;cpc %C0,%2";
+ }
+
+ return avr_out_compare (insn, operands, NULL);
+ }
+ [(set_attr "cc" "compare")
+ (set_attr "length" "3,3,5,6,3,7")
+ (set_attr "adjust_len" "tstpsi,*,*,*,compare,compare")])
+
+;; "*cmpsi"
+;; "*cmpsq" "*cmpusq"
+;; "*cmpsa" "*cmpusa"
+(define_insn "*cmp<mode>"
+ [(set (cc0)
+ (compare (match_operand:ALL4 0 "register_operand" "r ,r ,d,r ,r")
+ (match_operand:ALL4 1 "nonmemory_operand" "Y00,r ,M,M ,n Ynn")))
+ (clobber (match_scratch:QI 2 "=X ,X ,X,&d,&d"))]
+ ""
+ {
+ if (0 == which_alternative)
+ return avr_out_tstsi (insn, operands, NULL);
+ else if (1 == which_alternative)
+ return "cp %A0,%A1\;cpc %B0,%B1\;cpc %C0,%C1\;cpc %D0,%D1";
+
+ return avr_out_compare (insn, operands, NULL);
+ }
+ [(set_attr "cc" "compare")
+ (set_attr "length" "4,4,4,5,8")
+ (set_attr "adjust_len" "tstsi,*,compare,compare,compare")])
+
+
+;; ----------------------------------------------------------------------
+;; JUMP INSTRUCTIONS
+;; ----------------------------------------------------------------------
+;; Conditional jump instructions
+
+;; "cbranchqi4"
+;; "cbranchqq4" "cbranchuqq4"
+(define_expand "cbranch<mode>4"
+ [(set (cc0)
+ (compare (match_operand:ALL1 1 "register_operand" "")
+ (match_operand:ALL1 2 "nonmemory_operand" "")))
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))])
+
+;; "cbranchhi4" "cbranchhq4" "cbranchuhq4" "cbranchha4" "cbranchuha4"
+;; "cbranchsi4" "cbranchsq4" "cbranchusq4" "cbranchsa4" "cbranchusa4"
+;; "cbranchpsi4"
+(define_expand "cbranch<mode>4"
+ [(parallel [(set (cc0)
+ (compare (match_operand:ORDERED234 1 "register_operand" "")
+ (match_operand:ORDERED234 2 "nonmemory_operand" "")))
+ (clobber (match_scratch:QI 4 ""))])
+ (set (pc)
+ (if_then_else
+ (match_operator 0 "ordered_comparison_operator" [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))])
+
+
+;; Test a single bit in a QI/HI/SImode register.
+;; Combine will create zero extract patterns for single bit tests.
+;; permit any mode in source pattern by using VOIDmode.
+
+(define_insn "*sbrx_branch<mode>"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "eqne_operator"
+ [(zero_extract:QIDI
+ (match_operand:VOID 1 "register_operand" "r")
+ (const_int 1)
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ {
+ return avr_out_sbxx_branch (insn, operands);
+ }
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 3)) (const_int -2046))
+ (le (minus (pc) (match_dup 3)) (const_int 2046)))
+ (const_int 2)
+ (if_then_else (match_test "!AVR_HAVE_JMP_CALL")
+ (const_int 2)
+ (const_int 4))))
+ (set_attr "cc" "clobber")])
+
+;; Same test based on bitwise AND. Keep this in case gcc changes patterns.
+;; or for old peepholes.
+;; Fixme - bitwise Mask will not work for DImode
+
+(define_insn "*sbrx_and_branch<mode>"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "eqne_operator"
+ [(and:QISI
+ (match_operand:QISI 1 "register_operand" "r")
+ (match_operand:QISI 2 "single_one_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ {
+ HOST_WIDE_INT bitnumber;
+ bitnumber = exact_log2 (GET_MODE_MASK (<MODE>mode) & INTVAL (operands[2]));
+ operands[2] = GEN_INT (bitnumber);
+ return avr_out_sbxx_branch (insn, operands);
+ }
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 3)) (const_int -2046))
+ (le (minus (pc) (match_dup 3)) (const_int 2046)))
+ (const_int 2)
+ (if_then_else (match_test "!AVR_HAVE_JMP_CALL")
+ (const_int 2)
+ (const_int 4))))
+ (set_attr "cc" "clobber")])
+
+;; Convert sign tests to bit 7/15/31 tests that match the above insns.
+(define_peephole2
+ [(set (cc0) (compare (match_operand:QI 0 "register_operand" "")
+ (const_int 0)))
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (eq (zero_extract:HI (match_dup 0)
+ (const_int 1)
+ (const_int 7))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))])
+
+(define_peephole2
+ [(set (cc0) (compare (match_operand:QI 0 "register_operand" "")
+ (const_int 0)))
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (ne (zero_extract:HI (match_dup 0)
+ (const_int 1)
+ (const_int 7))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))])
+
+(define_peephole2
+ [(parallel [(set (cc0) (compare (match_operand:HI 0 "register_operand" "")
+ (const_int 0)))
+ (clobber (match_operand:HI 2 ""))])
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (eq (and:HI (match_dup 0) (const_int -32768))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))])
+
+(define_peephole2
+ [(parallel [(set (cc0) (compare (match_operand:HI 0 "register_operand" "")
+ (const_int 0)))
+ (clobber (match_operand:HI 2 ""))])
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (ne (and:HI (match_dup 0) (const_int -32768))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))])
+
+(define_peephole2
+ [(parallel [(set (cc0) (compare (match_operand:SI 0 "register_operand" "")
+ (const_int 0)))
+ (clobber (match_operand:SI 2 ""))])
+ (set (pc) (if_then_else (ge (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (eq (and:SI (match_dup 0) (match_dup 2))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))]
+ "operands[2] = gen_int_mode (-2147483647 - 1, SImode);")
+
+(define_peephole2
+ [(parallel [(set (cc0) (compare (match_operand:SI 0 "register_operand" "")
+ (const_int 0)))
+ (clobber (match_operand:SI 2 ""))])
+ (set (pc) (if_then_else (lt (cc0) (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ [(set (pc) (if_then_else (ne (and:SI (match_dup 0) (match_dup 2))
+ (const_int 0))
+ (label_ref (match_dup 1))
+ (pc)))]
+ "operands[2] = gen_int_mode (-2147483647 - 1, SImode);")
+
+;; ************************************************************************
+;; Implementation of conditional jumps here.
+;; Compare with 0 (test) jumps
+;; ************************************************************************
+
+(define_insn "branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "simple_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ {
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 0);
+ }
+ [(set_attr "type" "branch")
+ (set_attr "cc" "clobber")])
+
+
+;; Same as above but wrap SET_SRC so that this branch won't be transformed
+;; or optimized in the remainder.
+
+(define_insn "branch_unspec"
+ [(set (pc)
+ (unspec [(if_then_else (match_operator 1 "simple_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc))
+ ] UNSPEC_IDENTITY))]
+ ""
+ {
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 0);
+ }
+ [(set_attr "type" "branch")
+ (set_attr "cc" "none")])
+
+;; ****************************************************************
+;; AVR does not have following conditional jumps: LE,LEU,GT,GTU.
+;; Convert them all to proper jumps.
+;; ****************************************************************/
+
+(define_insn "difficult_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "difficult_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ {
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 0);
+ }
+ [(set_attr "type" "branch1")
+ (set_attr "cc" "clobber")])
+
+;; revers branch
+
+(define_insn "rvbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "simple_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ {
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 1);
+ }
+ [(set_attr "type" "branch1")
+ (set_attr "cc" "clobber")])
+
+(define_insn "difficult_rvbranch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "difficult_comparison_operator"
+ [(cc0)
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ {
+ return ret_cond_branch (operands[1], avr_jump_mode (operands[0], insn), 1);
+ }
+ [(set_attr "type" "branch")
+ (set_attr "cc" "clobber")])
+
+;; **************************************************************************
+;; Unconditional and other jump instructions.
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ {
+ return AVR_HAVE_JMP_CALL && get_attr_length (insn) != 1
+ ? "jmp %x0"
+ : "rjmp %x0";
+ }
+ [(set (attr "length")
+ (if_then_else (match_operand 0 "symbol_ref_operand" "")
+ (if_then_else (match_test "!AVR_HAVE_JMP_CALL")
+ (const_int 1)
+ (const_int 2))
+ (if_then_else (and (ge (minus (pc) (match_dup 0)) (const_int -2047))
+ (le (minus (pc) (match_dup 0)) (const_int 2047)))
+ (const_int 1)
+ (const_int 2))))
+ (set_attr "cc" "none")])
+
+;; call
+
+;; Operand 1 not used on the AVR.
+;; Operand 2 is 1 for tail-call, 0 otherwise.
+(define_expand "call"
+ [(parallel[(call (match_operand:HI 0 "call_insn_operand" "")
+ (match_operand:HI 1 "general_operand" ""))
+ (use (const_int 0))])])
+
+;; Operand 1 not used on the AVR.
+;; Operand 2 is 1 for tail-call, 0 otherwise.
+(define_expand "sibcall"
+ [(parallel[(call (match_operand:HI 0 "call_insn_operand" "")
+ (match_operand:HI 1 "general_operand" ""))
+ (use (const_int 1))])])
+
+;; call value
+
+;; Operand 2 not used on the AVR.
+;; Operand 3 is 1 for tail-call, 0 otherwise.
+(define_expand "call_value"
+ [(parallel[(set (match_operand 0 "register_operand" "")
+ (call (match_operand:HI 1 "call_insn_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (use (const_int 0))])])
+
+;; Operand 2 not used on the AVR.
+;; Operand 3 is 1 for tail-call, 0 otherwise.
+(define_expand "sibcall_value"
+ [(parallel[(set (match_operand 0 "register_operand" "")
+ (call (match_operand:HI 1 "call_insn_operand" "")
+ (match_operand:HI 2 "general_operand" "")))
+ (use (const_int 1))])])
+
+(define_insn "call_insn"
+ [(parallel[(call (mem:HI (match_operand:HI 0 "nonmemory_operand" "z,s,z,s"))
+ (match_operand:HI 1 "general_operand" "X,X,X,X"))
+ (use (match_operand:HI 2 "const_int_operand" "L,L,P,P"))])]
+ ;; Operand 1 not used on the AVR.
+ ;; Operand 2 is 1 for tail-call, 0 otherwise.
+ ""
+ "@
+ %!icall
+ %~call %x0
+ %!ijmp
+ %~jmp %x0"
+ [(set_attr "cc" "clobber")
+ (set_attr "length" "1,*,1,*")
+ (set_attr "adjust_len" "*,call,*,call")])
+
+(define_insn "call_value_insn"
+ [(parallel[(set (match_operand 0 "register_operand" "=r,r,r,r")
+ (call (mem:HI (match_operand:HI 1 "nonmemory_operand" "z,s,z,s"))
+ (match_operand:HI 2 "general_operand" "X,X,X,X")))
+ (use (match_operand:HI 3 "const_int_operand" "L,L,P,P"))])]
+ ;; Operand 2 not used on the AVR.
+ ;; Operand 3 is 1 for tail-call, 0 otherwise.
+ ""
+ "@
+ %!icall
+ %~call %x1
+ %!ijmp
+ %~jmp %x1"
+ [(set_attr "cc" "clobber")
+ (set_attr "length" "1,*,1,*")
+ (set_attr "adjust_len" "*,call,*,call")])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+; indirect jump
+
+(define_expand "indirect_jump"
+ [(set (pc)
+ (match_operand:HI 0 "nonmemory_operand" ""))]
+ ""
+ {
+ if (!AVR_HAVE_JMP_CALL && !register_operand (operands[0], HImode))
+ {
+ operands[0] = copy_to_mode_reg (HImode, operands[0]);
+ }
+ })
+
+; indirect jump
+(define_insn "*indirect_jump"
+ [(set (pc)
+ (match_operand:HI 0 "nonmemory_operand" "i,i,!z,*r,z"))]
+ ""
+ "@
+ rjmp %x0
+ jmp %x0
+ ijmp
+ push %A0\;push %B0\;ret
+ eijmp"
+ [(set_attr "length" "1,2,1,3,1")
+ (set_attr "isa" "rjmp,jmp,ijmp,ijmp,eijmp")
+ (set_attr "cc" "none")])
+
+;; table jump
+;; For entries in jump table see avr_output_addr_vec_elt.
+
+;; Table made from
+;; "rjmp .L<n>" instructions for <= 8K devices
+;; ".word gs(.L<n>)" addresses for > 8K devices
+(define_insn "*tablejump"
+ [(set (pc)
+ (unspec:HI [(match_operand:HI 0 "register_operand" "!z,*r,z")]
+ UNSPEC_INDEX_JMP))
+ (use (label_ref (match_operand 1 "" "")))
+ (clobber (match_dup 0))]
+ ""
+ "@
+ ijmp
+ push %A0\;push %B0\;ret
+ jmp __tablejump2__"
+ [(set_attr "length" "1,3,2")
+ (set_attr "isa" "rjmp,rjmp,jmp")
+ (set_attr "cc" "none,none,clobber")])
+
+
+(define_expand "casesi"
+ [(parallel [(set (match_dup 6)
+ (minus:HI (subreg:HI (match_operand:SI 0 "register_operand" "") 0)
+ (match_operand:HI 1 "register_operand" "")))
+ (clobber (scratch:QI))])
+ (parallel [(set (cc0)
+ (compare (match_dup 6)
+ (match_operand:HI 2 "register_operand" "")))
+ (clobber (match_scratch:QI 9 ""))])
+
+ (set (pc)
+ (if_then_else (gtu (cc0)
+ (const_int 0))
+ (label_ref (match_operand 4 "" ""))
+ (pc)))
+
+ (set (match_dup 6)
+ (plus:HI (match_dup 6) (label_ref (match_operand:HI 3 "" ""))))
+
+ (parallel [(set (pc) (unspec:HI [(match_dup 6)] UNSPEC_INDEX_JMP))
+ (use (label_ref (match_dup 3)))
+ (clobber (match_dup 6))])]
+ ""
+ {
+ operands[6] = gen_reg_rtx (HImode);
+ })
+
+
+;; ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+;; This instruction sets Z flag
+
+(define_insn "sez"
+ [(set (cc0) (const_int 0))]
+ ""
+ "sez"
+ [(set_attr "length" "1")
+ (set_attr "cc" "compare")])
+
+;; Clear/set/test a single bit in I/O address space.
+
+(define_insn "*cbi"
+ [(set (mem:QI (match_operand 0 "low_io_address_operand" "n"))
+ (and:QI (mem:QI (match_dup 0))
+ (match_operand:QI 1 "single_zero_operand" "n")))]
+ ""
+ {
+ operands[2] = GEN_INT (exact_log2 (~INTVAL (operands[1]) & 0xff));
+ return "cbi %i0,%2";
+ }
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+(define_insn "*sbi"
+ [(set (mem:QI (match_operand 0 "low_io_address_operand" "n"))
+ (ior:QI (mem:QI (match_dup 0))
+ (match_operand:QI 1 "single_one_operand" "n")))]
+ ""
+ {
+ operands[2] = GEN_INT (exact_log2 (INTVAL (operands[1]) & 0xff));
+ return "sbi %i0,%2";
+ }
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+;; Lower half of the I/O space - use sbic/sbis directly.
+(define_insn "*sbix_branch"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "eqne_operator"
+ [(zero_extract:QIHI
+ (mem:QI (match_operand 1 "low_io_address_operand" "n"))
+ (const_int 1)
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ {
+ return avr_out_sbxx_branch (insn, operands);
+ }
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 3)) (const_int -2046))
+ (le (minus (pc) (match_dup 3)) (const_int 2046)))
+ (const_int 2)
+ (if_then_else (match_test "!AVR_HAVE_JMP_CALL")
+ (const_int 2)
+ (const_int 4))))
+ (set_attr "cc" "clobber")])
+
+;; Tests of bit 7 are pessimized to sign tests, so we need this too...
+(define_insn "*sbix_branch_bit7"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "gelt_operator"
+ [(mem:QI (match_operand 1 "low_io_address_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ {
+ operands[3] = operands[2];
+ operands[2] = GEN_INT (7);
+ return avr_out_sbxx_branch (insn, operands);
+ }
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 2)) (const_int -2046))
+ (le (minus (pc) (match_dup 2)) (const_int 2046)))
+ (const_int 2)
+ (if_then_else (match_test "!AVR_HAVE_JMP_CALL")
+ (const_int 2)
+ (const_int 4))))
+ (set_attr "cc" "clobber")])
+
+;; Upper half of the I/O space - read port to __tmp_reg__ and use sbrc/sbrs.
+(define_insn "*sbix_branch_tmp"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "eqne_operator"
+ [(zero_extract:QIHI
+ (mem:QI (match_operand 1 "high_io_address_operand" "n"))
+ (const_int 1)
+ (match_operand 2 "const_int_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ {
+ return avr_out_sbxx_branch (insn, operands);
+ }
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 3)) (const_int -2046))
+ (le (minus (pc) (match_dup 3)) (const_int 2045)))
+ (const_int 3)
+ (if_then_else (match_test "!AVR_HAVE_JMP_CALL")
+ (const_int 3)
+ (const_int 5))))
+ (set_attr "cc" "clobber")])
+
+(define_insn "*sbix_branch_tmp_bit7"
+ [(set (pc)
+ (if_then_else
+ (match_operator 0 "gelt_operator"
+ [(mem:QI (match_operand 1 "high_io_address_operand" "n"))
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ {
+ operands[3] = operands[2];
+ operands[2] = GEN_INT (7);
+ return avr_out_sbxx_branch (insn, operands);
+ }
+ [(set (attr "length")
+ (if_then_else (and (ge (minus (pc) (match_dup 2)) (const_int -2046))
+ (le (minus (pc) (match_dup 2)) (const_int 2045)))
+ (const_int 3)
+ (if_then_else (match_test "!AVR_HAVE_JMP_CALL")
+ (const_int 3)
+ (const_int 5))))
+ (set_attr "cc" "clobber")])
+
+;; ************************* Peepholes ********************************
+
+(define_peephole ; "*dec-and-branchsi!=-1.d.clobber"
+ [(parallel [(set (match_operand:SI 0 "d_register_operand" "")
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (clobber (scratch:QI))])
+ (parallel [(set (cc0)
+ (compare (match_dup 0)
+ (const_int -1)))
+ (clobber (match_operand:QI 1 "d_register_operand" ""))])
+ (set (pc)
+ (if_then_else (eqne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ {
+ const char *op;
+ int jump_mode;
+ CC_STATUS_INIT;
+ if (test_hard_reg_class (ADDW_REGS, operands[0]))
+ output_asm_insn ("sbiw %0,1" CR_TAB
+ "sbc %C0,__zero_reg__" CR_TAB
+ "sbc %D0,__zero_reg__", operands);
+ else
+ output_asm_insn ("subi %A0,1" CR_TAB
+ "sbc %B0,__zero_reg__" CR_TAB
+ "sbc %C0,__zero_reg__" CR_TAB
+ "sbc %D0,__zero_reg__", operands);
+
+ jump_mode = avr_jump_mode (operands[2], insn);
+ op = ((EQ == <CODE>) ^ (jump_mode == 1)) ? "brcc" : "brcs";
+ operands[1] = gen_rtx_CONST_STRING (VOIDmode, op);
+
+ switch (jump_mode)
+ {
+ case 1: return "%1 %2";
+ case 2: return "%1 .+2\;rjmp %2";
+ case 3: return "%1 .+4\;jmp %2";
+ }
+
+ gcc_unreachable();
+ return "";
+ })
+
+(define_peephole ; "*dec-and-branchhi!=-1"
+ [(set (match_operand:HI 0 "d_register_operand" "")
+ (plus:HI (match_dup 0)
+ (const_int -1)))
+ (parallel [(set (cc0)
+ (compare (match_dup 0)
+ (const_int -1)))
+ (clobber (match_operand:QI 1 "d_register_operand" ""))])
+ (set (pc)
+ (if_then_else (eqne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ {
+ const char *op;
+ int jump_mode;
+ CC_STATUS_INIT;
+ if (test_hard_reg_class (ADDW_REGS, operands[0]))
+ output_asm_insn ("sbiw %0,1", operands);
+ else
+ output_asm_insn ("subi %A0,1" CR_TAB
+ "sbc %B0,__zero_reg__", operands);
+
+ jump_mode = avr_jump_mode (operands[2], insn);
+ op = ((EQ == <CODE>) ^ (jump_mode == 1)) ? "brcc" : "brcs";
+ operands[1] = gen_rtx_CONST_STRING (VOIDmode, op);
+
+ switch (jump_mode)
+ {
+ case 1: return "%1 %2";
+ case 2: return "%1 .+2\;rjmp %2";
+ case 3: return "%1 .+4\;jmp %2";
+ }
+
+ gcc_unreachable();
+ return "";
+ })
+
+;; Same as above but with clobber flavour of addhi3
+(define_peephole ; "*dec-and-branchhi!=-1.d.clobber"
+ [(parallel [(set (match_operand:HI 0 "d_register_operand" "")
+ (plus:HI (match_dup 0)
+ (const_int -1)))
+ (clobber (scratch:QI))])
+ (parallel [(set (cc0)
+ (compare (match_dup 0)
+ (const_int -1)))
+ (clobber (match_operand:QI 1 "d_register_operand" ""))])
+ (set (pc)
+ (if_then_else (eqne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ {
+ const char *op;
+ int jump_mode;
+ CC_STATUS_INIT;
+ if (test_hard_reg_class (ADDW_REGS, operands[0]))
+ output_asm_insn ("sbiw %0,1", operands);
+ else
+ output_asm_insn ("subi %A0,1" CR_TAB
+ "sbc %B0,__zero_reg__", operands);
+
+ jump_mode = avr_jump_mode (operands[2], insn);
+ op = ((EQ == <CODE>) ^ (jump_mode == 1)) ? "brcc" : "brcs";
+ operands[1] = gen_rtx_CONST_STRING (VOIDmode, op);
+
+ switch (jump_mode)
+ {
+ case 1: return "%1 %2";
+ case 2: return "%1 .+2\;rjmp %2";
+ case 3: return "%1 .+4\;jmp %2";
+ }
+
+ gcc_unreachable();
+ return "";
+ })
+
+;; Same as above but with clobber flavour of addhi3
+(define_peephole ; "*dec-and-branchhi!=-1.l.clobber"
+ [(parallel [(set (match_operand:HI 0 "l_register_operand" "")
+ (plus:HI (match_dup 0)
+ (const_int -1)))
+ (clobber (match_operand:QI 3 "d_register_operand" ""))])
+ (parallel [(set (cc0)
+ (compare (match_dup 0)
+ (const_int -1)))
+ (clobber (match_operand:QI 1 "d_register_operand" ""))])
+ (set (pc)
+ (if_then_else (eqne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ {
+ const char *op;
+ int jump_mode;
+ CC_STATUS_INIT;
+ output_asm_insn ("ldi %3,1" CR_TAB
+ "sub %A0,%3" CR_TAB
+ "sbc %B0,__zero_reg__", operands);
+
+ jump_mode = avr_jump_mode (operands[2], insn);
+ op = ((EQ == <CODE>) ^ (jump_mode == 1)) ? "brcc" : "brcs";
+ operands[1] = gen_rtx_CONST_STRING (VOIDmode, op);
+
+ switch (jump_mode)
+ {
+ case 1: return "%1 %2";
+ case 2: return "%1 .+2\;rjmp %2";
+ case 3: return "%1 .+4\;jmp %2";
+ }
+
+ gcc_unreachable();
+ return "";
+ })
+
+(define_peephole ; "*dec-and-branchqi!=-1"
+ [(set (match_operand:QI 0 "d_register_operand" "")
+ (plus:QI (match_dup 0)
+ (const_int -1)))
+ (set (cc0)
+ (compare (match_dup 0)
+ (const_int -1)))
+ (set (pc)
+ (if_then_else (eqne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ {
+ const char *op;
+ int jump_mode;
+ CC_STATUS_INIT;
+ cc_status.value1 = operands[0];
+ cc_status.flags |= CC_OVERFLOW_UNUSABLE;
+
+ output_asm_insn ("subi %A0,1", operands);
+
+ jump_mode = avr_jump_mode (operands[1], insn);
+ op = ((EQ == <CODE>) ^ (jump_mode == 1)) ? "brcc" : "brcs";
+ operands[0] = gen_rtx_CONST_STRING (VOIDmode, op);
+
+ switch (jump_mode)
+ {
+ case 1: return "%0 %1";
+ case 2: return "%0 .+2\;rjmp %1";
+ case 3: return "%0 .+4\;jmp %1";
+ }
+
+ gcc_unreachable();
+ return "";
+ })
+
+
+(define_peephole ; "*cpse.eq"
+ [(set (cc0)
+ (compare (match_operand:ALL1 1 "register_operand" "r,r")
+ (match_operand:ALL1 2 "reg_or_0_operand" "r,Y00")))
+ (set (pc)
+ (if_then_else (eq (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "jump_over_one_insn_p (insn, operands[0])"
+ "@
+ cpse %1,%2
+ cpse %1,__zero_reg__")
+
+;; This peephole avoids code like
+;;
+;; TST Rn ; *cmpqi
+;; BREQ .+2 ; branch
+;; RJMP .Lm
+;;
+;; Notice that the peephole is always shorter than cmpqi + branch.
+;; The reason to write it as peephole is that sequences like
+;;
+;; AND Rm, Rn
+;; BRNE .La
+;;
+;; shall not be superseeded. With a respective combine pattern
+;; the latter sequence would be
+;;
+;; AND Rm, Rn
+;; CPSE Rm, __zero_reg__
+;; RJMP .La
+;;
+;; and thus longer and slower and not easy to be rolled back.
+
+(define_peephole ; "*cpse.ne"
+ [(set (cc0)
+ (compare (match_operand:ALL1 1 "register_operand" "")
+ (match_operand:ALL1 2 "reg_or_0_operand" "")))
+ (set (pc)
+ (if_then_else (ne (cc0)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ "!AVR_HAVE_JMP_CALL
+ || !avr_current_device->errata_skip"
+ {
+ if (operands[2] == CONST0_RTX (<MODE>mode))
+ operands[2] = zero_reg_rtx;
+
+ return 3 == avr_jump_mode (operands[0], insn)
+ ? "cpse %1,%2\;jmp %0"
+ : "cpse %1,%2\;rjmp %0";
+ })
+
+;;pppppppppppppppppppppppppppppppppppppppppppppppppppp
+;;prologue/epilogue support instructions
+
+(define_insn "popqi"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (mem:QI (pre_inc:HI (reg:HI REG_SP))))]
+ ""
+ "pop %0"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+;; Enable Interrupts
+(define_expand "enable_interrupt"
+ [(clobber (const_int 0))]
+ ""
+ {
+ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (mem) = 1;
+ emit_insn (gen_cli_sei (const1_rtx, mem));
+ DONE;
+ })
+
+;; Disable Interrupts
+(define_expand "disable_interrupt"
+ [(clobber (const_int 0))]
+ ""
+ {
+ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (mem) = 1;
+ emit_insn (gen_cli_sei (const0_rtx, mem));
+ DONE;
+ })
+
+(define_insn "cli_sei"
+ [(unspec_volatile [(match_operand:QI 0 "const_int_operand" "L,P")]
+ UNSPECV_ENABLE_IRQS)
+ (set (match_operand:BLK 1 "" "")
+ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))]
+ ""
+ "@
+ cli
+ sei"
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+;; Library prologue saves
+(define_insn "call_prologue_saves"
+ [(unspec_volatile:HI [(const_int 0)] UNSPECV_PROLOGUE_SAVES)
+ (match_operand:HI 0 "immediate_operand" "i,i")
+ (set (reg:HI REG_SP)
+ (minus:HI (reg:HI REG_SP)
+ (match_operand:HI 1 "immediate_operand" "i,i")))
+ (use (reg:HI REG_X))
+ (clobber (reg:HI REG_Z))]
+ ""
+ "ldi r30,lo8(gs(1f))
+ ldi r31,hi8(gs(1f))
+ %~jmp __prologue_saves__+((18 - %0) * 2)
+1:"
+ [(set_attr "length" "5,6")
+ (set_attr "cc" "clobber")
+ (set_attr "isa" "rjmp,jmp")])
+
+; epilogue restores using library
+(define_insn "epilogue_restores"
+ [(unspec_volatile:QI [(const_int 0)] UNSPECV_EPILOGUE_RESTORES)
+ (set (reg:HI REG_Y)
+ (plus:HI (reg:HI REG_Y)
+ (match_operand:HI 0 "immediate_operand" "i,i")))
+ (set (reg:HI REG_SP)
+ (plus:HI (reg:HI REG_Y)
+ (match_dup 0)))
+ (clobber (reg:QI REG_Z))]
+ ""
+ "ldi r30, lo8(%0)
+ %~jmp __epilogue_restores__ + ((18 - %0) * 2)"
+ [(set_attr "length" "2,3")
+ (set_attr "cc" "clobber")
+ (set_attr "isa" "rjmp,jmp")])
+
+; return
+(define_insn "return"
+ [(return)]
+ "reload_completed && avr_simple_epilogue ()"
+ "ret"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+(define_insn "return_from_epilogue"
+ [(return)]
+ "reload_completed
+ && cfun->machine
+ && !(cfun->machine->is_interrupt || cfun->machine->is_signal)
+ && !cfun->machine->is_naked"
+ "ret"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+(define_insn "return_from_interrupt_epilogue"
+ [(return)]
+ "reload_completed
+ && cfun->machine
+ && (cfun->machine->is_interrupt || cfun->machine->is_signal)
+ && !cfun->machine->is_naked"
+ "reti"
+ [(set_attr "cc" "none")
+ (set_attr "length" "1")])
+
+(define_insn "return_from_naked_epilogue"
+ [(return)]
+ "reload_completed
+ && cfun->machine
+ && cfun->machine->is_naked"
+ ""
+ [(set_attr "cc" "none")
+ (set_attr "length" "0")])
+
+(define_expand "prologue"
+ [(const_int 0)]
+ ""
+ {
+ avr_expand_prologue ();
+ DONE;
+ })
+
+(define_expand "epilogue"
+ [(const_int 0)]
+ ""
+ {
+ avr_expand_epilogue (false /* sibcall_p */);
+ DONE;
+ })
+
+(define_expand "sibcall_epilogue"
+ [(const_int 0)]
+ ""
+ {
+ avr_expand_epilogue (true /* sibcall_p */);
+ DONE;
+ })
+
+;; Some instructions resp. instruction sequences available
+;; via builtins.
+
+(define_insn "delay_cycles_1"
+ [(unspec_volatile [(match_operand:QI 0 "const_int_operand" "n")
+ (const_int 1)]
+ UNSPECV_DELAY_CYCLES)
+ (set (match_operand:BLK 1 "" "")
+ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
+ (clobber (match_scratch:QI 2 "=&d"))]
+ ""
+ "ldi %2,lo8(%0)
+ 1: dec %2
+ brne 1b"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "delay_cycles_2"
+ [(unspec_volatile [(match_operand:HI 0 "const_int_operand" "n")
+ (const_int 2)]
+ UNSPECV_DELAY_CYCLES)
+ (set (match_operand:BLK 1 "" "")
+ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
+ (clobber (match_scratch:HI 2 "=&w"))]
+ ""
+ "ldi %A2,lo8(%0)
+ ldi %B2,hi8(%0)
+ 1: sbiw %A2,1
+ brne 1b"
+ [(set_attr "length" "4")
+ (set_attr "cc" "clobber")])
+
+(define_insn "delay_cycles_3"
+ [(unspec_volatile [(match_operand:SI 0 "const_int_operand" "n")
+ (const_int 3)]
+ UNSPECV_DELAY_CYCLES)
+ (set (match_operand:BLK 1 "" "")
+ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
+ (clobber (match_scratch:QI 2 "=&d"))
+ (clobber (match_scratch:QI 3 "=&d"))
+ (clobber (match_scratch:QI 4 "=&d"))]
+ ""
+ "ldi %2,lo8(%0)
+ ldi %3,hi8(%0)
+ ldi %4,hlo8(%0)
+ 1: subi %2,1
+ sbci %3,0
+ sbci %4,0
+ brne 1b"
+ [(set_attr "length" "7")
+ (set_attr "cc" "clobber")])
+
+(define_insn "delay_cycles_4"
+ [(unspec_volatile [(match_operand:SI 0 "const_int_operand" "n")
+ (const_int 4)]
+ UNSPECV_DELAY_CYCLES)
+ (set (match_operand:BLK 1 "" "")
+ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))
+ (clobber (match_scratch:QI 2 "=&d"))
+ (clobber (match_scratch:QI 3 "=&d"))
+ (clobber (match_scratch:QI 4 "=&d"))
+ (clobber (match_scratch:QI 5 "=&d"))]
+ ""
+ "ldi %2,lo8(%0)
+ ldi %3,hi8(%0)
+ ldi %4,hlo8(%0)
+ ldi %5,hhi8(%0)
+ 1: subi %2,1
+ sbci %3,0
+ sbci %4,0
+ sbci %5,0
+ brne 1b"
+ [(set_attr "length" "9")
+ (set_attr "cc" "clobber")])
+
+
+;; __builtin_avr_insert_bits
+
+(define_insn "insert_bits"
+ [(set (match_operand:QI 0 "register_operand" "=r ,d ,r")
+ (unspec:QI [(match_operand:SI 1 "const_int_operand" "C0f,Cxf,C0f")
+ (match_operand:QI 2 "register_operand" "r ,r ,r")
+ (match_operand:QI 3 "nonmemory_operand" "n ,0 ,0")]
+ UNSPEC_INSERT_BITS))]
+ ""
+ {
+ return avr_out_insert_bits (operands, NULL);
+ }
+ [(set_attr "adjust_len" "insert_bits")
+ (set_attr "cc" "clobber")])
+
+
+;; __builtin_avr_flash_segment
+
+;; Just a helper for the next "official" expander.
+
+(define_expand "flash_segment1"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (subreg:QI (match_operand:PSI 1 "register_operand" "")
+ 2))
+ (set (cc0)
+ (compare (match_dup 0)
+ (const_int 0)))
+ (set (pc)
+ (if_then_else (ge (cc0)
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (const_int -1))])
+
+(define_expand "flash_segment"
+ [(parallel [(match_operand:QI 0 "register_operand" "")
+ (match_operand:PSI 1 "register_operand" "")])]
+ ""
+ {
+ rtx label = gen_label_rtx ();
+ emit (gen_flash_segment1 (operands[0], operands[1], label));
+ emit_label (label);
+ DONE;
+ })
+
+;; Actually, it's too late now to work out address spaces known at compiletime.
+;; Best place would be to fold ADDR_SPACE_CONVERT_EXPR in avr_fold_builtin.
+;; However, avr_addr_space_convert can add some built-in knowledge for PSTR
+;; so that ADDR_SPACE_CONVERT_EXPR in the built-in must not be resolved.
+
+(define_insn_and_split "*split.flash_segment"
+ [(set (match_operand:QI 0 "register_operand" "=d")
+ (subreg:QI (lo_sum:PSI (match_operand:QI 1 "nonmemory_operand" "ri")
+ (match_operand:HI 2 "register_operand" "r"))
+ 2))]
+ ""
+ { gcc_unreachable(); }
+ ""
+ [(set (match_dup 0)
+ (match_dup 1))])
+
+
+;; Parity
+
+;; Postpone expansion of 16-bit parity to libgcc call until after combine for
+;; better 8-bit parity recognition.
+
+(define_expand "parityhi2"
+ [(parallel [(set (match_operand:HI 0 "register_operand" "")
+ (parity:HI (match_operand:HI 1 "register_operand" "")))
+ (clobber (reg:HI 24))])])
+
+(define_insn_and_split "*parityhi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (parity:HI (match_operand:HI 1 "register_operand" "r")))
+ (clobber (reg:HI 24))]
+ "!reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (reg:HI 24)
+ (match_dup 1))
+ (set (reg:HI 24)
+ (parity:HI (reg:HI 24)))
+ (set (match_dup 0)
+ (reg:HI 24))])
+
+(define_insn_and_split "*parityqihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (parity:HI (match_operand:QI 1 "register_operand" "r")))
+ (clobber (reg:HI 24))]
+ "!reload_completed"
+ { gcc_unreachable(); }
+ "&& 1"
+ [(set (reg:QI 24)
+ (match_dup 1))
+ (set (reg:HI 24)
+ (zero_extend:HI (parity:QI (reg:QI 24))))
+ (set (match_dup 0)
+ (reg:HI 24))])
+
+(define_expand "paritysi2"
+ [(set (reg:SI 22)
+ (match_operand:SI 1 "register_operand" ""))
+ (set (reg:HI 24)
+ (truncate:HI (parity:SI (reg:SI 22))))
+ (set (match_dup 2)
+ (reg:HI 24))
+ (set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_dup 2)))]
+ ""
+ {
+ operands[2] = gen_reg_rtx (HImode);
+ })
+
+(define_insn "*parityhi2.libgcc"
+ [(set (reg:HI 24)
+ (parity:HI (reg:HI 24)))]
+ ""
+ "%~call __parityhi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*parityqihi2.libgcc"
+ [(set (reg:HI 24)
+ (zero_extend:HI (parity:QI (reg:QI 24))))]
+ ""
+ "%~call __parityqi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*paritysihi2.libgcc"
+ [(set (reg:HI 24)
+ (truncate:HI (parity:SI (reg:SI 22))))]
+ ""
+ "%~call __paritysi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;; Popcount
+
+(define_expand "popcounthi2"
+ [(set (reg:HI 24)
+ (match_operand:HI 1 "register_operand" ""))
+ (set (reg:HI 24)
+ (popcount:HI (reg:HI 24)))
+ (set (match_operand:HI 0 "register_operand" "")
+ (reg:HI 24))]
+ ""
+ "")
+
+(define_expand "popcountsi2"
+ [(set (reg:SI 22)
+ (match_operand:SI 1 "register_operand" ""))
+ (set (reg:HI 24)
+ (truncate:HI (popcount:SI (reg:SI 22))))
+ (set (match_dup 2)
+ (reg:HI 24))
+ (set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_dup 2)))]
+ ""
+ {
+ operands[2] = gen_reg_rtx (HImode);
+ })
+
+(define_insn "*popcounthi2.libgcc"
+ [(set (reg:HI 24)
+ (popcount:HI (reg:HI 24)))]
+ ""
+ "%~call __popcounthi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*popcountsi2.libgcc"
+ [(set (reg:HI 24)
+ (truncate:HI (popcount:SI (reg:SI 22))))]
+ ""
+ "%~call __popcountsi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*popcountqi2.libgcc"
+ [(set (reg:QI 24)
+ (popcount:QI (reg:QI 24)))]
+ ""
+ "%~call __popcountqi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn_and_split "*popcountqihi2.libgcc"
+ [(set (reg:HI 24)
+ (zero_extend:HI (popcount:QI (reg:QI 24))))]
+ ""
+ "#"
+ ""
+ [(set (reg:QI 24)
+ (popcount:QI (reg:QI 24)))
+ (set (reg:QI 25)
+ (const_int 0))])
+
+;; Count Leading Zeros
+
+(define_expand "clzhi2"
+ [(set (reg:HI 24)
+ (match_operand:HI 1 "register_operand" ""))
+ (parallel [(set (reg:HI 24)
+ (clz:HI (reg:HI 24)))
+ (clobber (reg:QI 26))])
+ (set (match_operand:HI 0 "register_operand" "")
+ (reg:HI 24))])
+
+(define_expand "clzsi2"
+ [(set (reg:SI 22)
+ (match_operand:SI 1 "register_operand" ""))
+ (parallel [(set (reg:HI 24)
+ (truncate:HI (clz:SI (reg:SI 22))))
+ (clobber (reg:QI 26))])
+ (set (match_dup 2)
+ (reg:HI 24))
+ (set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_dup 2)))]
+ ""
+ {
+ operands[2] = gen_reg_rtx (HImode);
+ })
+
+(define_insn "*clzhi2.libgcc"
+ [(set (reg:HI 24)
+ (clz:HI (reg:HI 24)))
+ (clobber (reg:QI 26))]
+ ""
+ "%~call __clzhi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*clzsihi2.libgcc"
+ [(set (reg:HI 24)
+ (truncate:HI (clz:SI (reg:SI 22))))
+ (clobber (reg:QI 26))]
+ ""
+ "%~call __clzsi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; Count Trailing Zeros
+
+(define_expand "ctzhi2"
+ [(set (reg:HI 24)
+ (match_operand:HI 1 "register_operand" ""))
+ (parallel [(set (reg:HI 24)
+ (ctz:HI (reg:HI 24)))
+ (clobber (reg:QI 26))])
+ (set (match_operand:HI 0 "register_operand" "")
+ (reg:HI 24))])
+
+(define_expand "ctzsi2"
+ [(set (reg:SI 22)
+ (match_operand:SI 1 "register_operand" ""))
+ (parallel [(set (reg:HI 24)
+ (truncate:HI (ctz:SI (reg:SI 22))))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 26))])
+ (set (match_dup 2)
+ (reg:HI 24))
+ (set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_dup 2)))]
+ ""
+ {
+ operands[2] = gen_reg_rtx (HImode);
+ })
+
+(define_insn "*ctzhi2.libgcc"
+ [(set (reg:HI 24)
+ (ctz:HI (reg:HI 24)))
+ (clobber (reg:QI 26))]
+ ""
+ "%~call __ctzhi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*ctzsihi2.libgcc"
+ [(set (reg:HI 24)
+ (truncate:HI (ctz:SI (reg:SI 22))))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 26))]
+ ""
+ "%~call __ctzsi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; Find First Set
+
+(define_expand "ffshi2"
+ [(set (reg:HI 24)
+ (match_operand:HI 1 "register_operand" ""))
+ (parallel [(set (reg:HI 24)
+ (ffs:HI (reg:HI 24)))
+ (clobber (reg:QI 26))])
+ (set (match_operand:HI 0 "register_operand" "")
+ (reg:HI 24))])
+
+(define_expand "ffssi2"
+ [(set (reg:SI 22)
+ (match_operand:SI 1 "register_operand" ""))
+ (parallel [(set (reg:HI 24)
+ (truncate:HI (ffs:SI (reg:SI 22))))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 26))])
+ (set (match_dup 2)
+ (reg:HI 24))
+ (set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_dup 2)))]
+ ""
+ {
+ operands[2] = gen_reg_rtx (HImode);
+ })
+
+(define_insn "*ffshi2.libgcc"
+ [(set (reg:HI 24)
+ (ffs:HI (reg:HI 24)))
+ (clobber (reg:QI 26))]
+ ""
+ "%~call __ffshi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*ffssihi2.libgcc"
+ [(set (reg:HI 24)
+ (truncate:HI (ffs:SI (reg:SI 22))))
+ (clobber (reg:QI 22))
+ (clobber (reg:QI 26))]
+ ""
+ "%~call __ffssi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; Copysign
+
+(define_insn "copysignsf3"
+ [(set (match_operand:SF 0 "register_operand" "=r")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "0")
+ (match_operand:SF 2 "register_operand" "r")]
+ UNSPEC_COPYSIGN))]
+ ""
+ "bst %D2,7\;bld %D0,7"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+;; Swap Bytes (change byte-endianess)
+
+(define_expand "bswapsi2"
+ [(set (reg:SI 22)
+ (match_operand:SI 1 "register_operand" ""))
+ (set (reg:SI 22)
+ (bswap:SI (reg:SI 22)))
+ (set (match_operand:SI 0 "register_operand" "")
+ (reg:SI 22))])
+
+(define_insn "*bswapsi2.libgcc"
+ [(set (reg:SI 22)
+ (bswap:SI (reg:SI 22)))]
+ ""
+ "%~call __bswapsi2"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;; CPU instructions
+
+;; NOP taking 1 or 2 Ticks
+(define_expand "nopv"
+ [(parallel [(unspec_volatile [(match_operand:SI 0 "const_int_operand" "")]
+ UNSPECV_NOP)
+ (set (match_dup 1)
+ (unspec_volatile:BLK [(match_dup 1)]
+ UNSPECV_MEMORY_BARRIER))])]
+ ""
+ {
+ operands[1] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[1]) = 1;
+ })
+
+(define_insn "*nopv"
+ [(unspec_volatile [(match_operand:SI 0 "const_int_operand" "P,K")]
+ UNSPECV_NOP)
+ (set (match_operand:BLK 1 "" "")
+ (unspec_volatile:BLK [(match_dup 1)] UNSPECV_MEMORY_BARRIER))]
+ ""
+ "@
+ nop
+ rjmp ."
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+;; SLEEP
+(define_expand "sleep"
+ [(parallel [(unspec_volatile [(const_int 0)] UNSPECV_SLEEP)
+ (set (match_dup 0)
+ (unspec_volatile:BLK [(match_dup 0)]
+ UNSPECV_MEMORY_BARRIER))])]
+ ""
+ {
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+ })
+
+(define_insn "*sleep"
+ [(unspec_volatile [(const_int 0)] UNSPECV_SLEEP)
+ (set (match_operand:BLK 0 "" "")
+ (unspec_volatile:BLK [(match_dup 0)] UNSPECV_MEMORY_BARRIER))]
+ ""
+ "sleep"
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+;; WDR
+(define_expand "wdr"
+ [(parallel [(unspec_volatile [(const_int 0)] UNSPECV_WDR)
+ (set (match_dup 0)
+ (unspec_volatile:BLK [(match_dup 0)]
+ UNSPECV_MEMORY_BARRIER))])]
+ ""
+ {
+ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+ MEM_VOLATILE_P (operands[0]) = 1;
+ })
+
+(define_insn "*wdr"
+ [(unspec_volatile [(const_int 0)] UNSPECV_WDR)
+ (set (match_operand:BLK 0 "" "")
+ (unspec_volatile:BLK [(match_dup 0)] UNSPECV_MEMORY_BARRIER))]
+ ""
+ "wdr"
+ [(set_attr "length" "1")
+ (set_attr "cc" "none")])
+
+;; FMUL
+(define_expand "fmul"
+ [(set (reg:QI 24)
+ (match_operand:QI 1 "register_operand" ""))
+ (set (reg:QI 25)
+ (match_operand:QI 2 "register_operand" ""))
+ (parallel [(set (reg:HI 22)
+ (unspec:HI [(reg:QI 24)
+ (reg:QI 25)] UNSPEC_FMUL))
+ (clobber (reg:HI 24))])
+ (set (match_operand:HI 0 "register_operand" "")
+ (reg:HI 22))]
+ ""
+ {
+ if (AVR_HAVE_MUL)
+ {
+ emit_insn (gen_fmul_insn (operand0, operand1, operand2));
+ DONE;
+ }
+ })
+
+(define_insn "fmul_insn"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (unspec:HI [(match_operand:QI 1 "register_operand" "a")
+ (match_operand:QI 2 "register_operand" "a")]
+ UNSPEC_FMUL))]
+ "AVR_HAVE_MUL"
+ "fmul %1,%2
+ movw %0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*fmul.call"
+ [(set (reg:HI 22)
+ (unspec:HI [(reg:QI 24)
+ (reg:QI 25)] UNSPEC_FMUL))
+ (clobber (reg:HI 24))]
+ "!AVR_HAVE_MUL"
+ "%~call __fmul"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; FMULS
+(define_expand "fmuls"
+ [(set (reg:QI 24)
+ (match_operand:QI 1 "register_operand" ""))
+ (set (reg:QI 25)
+ (match_operand:QI 2 "register_operand" ""))
+ (parallel [(set (reg:HI 22)
+ (unspec:HI [(reg:QI 24)
+ (reg:QI 25)] UNSPEC_FMULS))
+ (clobber (reg:HI 24))])
+ (set (match_operand:HI 0 "register_operand" "")
+ (reg:HI 22))]
+ ""
+ {
+ if (AVR_HAVE_MUL)
+ {
+ emit_insn (gen_fmuls_insn (operand0, operand1, operand2));
+ DONE;
+ }
+ })
+
+(define_insn "fmuls_insn"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (unspec:HI [(match_operand:QI 1 "register_operand" "a")
+ (match_operand:QI 2 "register_operand" "a")]
+ UNSPEC_FMULS))]
+ "AVR_HAVE_MUL"
+ "fmuls %1,%2
+ movw %0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*fmuls.call"
+ [(set (reg:HI 22)
+ (unspec:HI [(reg:QI 24)
+ (reg:QI 25)] UNSPEC_FMULS))
+ (clobber (reg:HI 24))]
+ "!AVR_HAVE_MUL"
+ "%~call __fmuls"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+;; FMULSU
+(define_expand "fmulsu"
+ [(set (reg:QI 24)
+ (match_operand:QI 1 "register_operand" ""))
+ (set (reg:QI 25)
+ (match_operand:QI 2 "register_operand" ""))
+ (parallel [(set (reg:HI 22)
+ (unspec:HI [(reg:QI 24)
+ (reg:QI 25)] UNSPEC_FMULSU))
+ (clobber (reg:HI 24))])
+ (set (match_operand:HI 0 "register_operand" "")
+ (reg:HI 22))]
+ ""
+ {
+ if (AVR_HAVE_MUL)
+ {
+ emit_insn (gen_fmulsu_insn (operand0, operand1, operand2));
+ DONE;
+ }
+ })
+
+(define_insn "fmulsu_insn"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (unspec:HI [(match_operand:QI 1 "register_operand" "a")
+ (match_operand:QI 2 "register_operand" "a")]
+ UNSPEC_FMULSU))]
+ "AVR_HAVE_MUL"
+ "fmulsu %1,%2
+ movw %0,r0
+ clr __zero_reg__"
+ [(set_attr "length" "3")
+ (set_attr "cc" "clobber")])
+
+(define_insn "*fmulsu.call"
+ [(set (reg:HI 22)
+ (unspec:HI [(reg:QI 24)
+ (reg:QI 25)] UNSPEC_FMULSU))
+ (clobber (reg:HI 24))]
+ "!AVR_HAVE_MUL"
+ "%~call __fmulsu"
+ [(set_attr "type" "xcall")
+ (set_attr "cc" "clobber")])
+
+
+;; Some combiner patterns dealing with bits.
+;; See PR42210
+
+;; Move bit $3.0 into bit $0.$4
+(define_insn "*movbitqi.1-6.a"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (ior:QI (and:QI (match_operand:QI 1 "register_operand" "0")
+ (match_operand:QI 2 "single_zero_operand" "n"))
+ (and:QI (ashift:QI (match_operand:QI 3 "register_operand" "r")
+ (match_operand:QI 4 "const_0_to_7_operand" "n"))
+ (match_operand:QI 5 "single_one_operand" "n"))))]
+ "INTVAL(operands[4]) == exact_log2 (~INTVAL(operands[2]) & GET_MODE_MASK (QImode))
+ && INTVAL(operands[4]) == exact_log2 (INTVAL(operands[5]) & GET_MODE_MASK (QImode))"
+ "bst %3,0\;bld %0,%4"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+;; Move bit $3.0 into bit $0.$4
+;; Variation of above. Unfortunately, there is no canonicalized representation
+;; of moving around bits. So what we see here depends on how user writes down
+;; bit manipulations.
+(define_insn "*movbitqi.1-6.b"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (ior:QI (and:QI (match_operand:QI 1 "register_operand" "0")
+ (match_operand:QI 2 "single_zero_operand" "n"))
+ (ashift:QI (and:QI (match_operand:QI 3 "register_operand" "r")
+ (const_int 1))
+ (match_operand:QI 4 "const_0_to_7_operand" "n"))))]
+ "INTVAL(operands[4]) == exact_log2 (~INTVAL(operands[2]) & GET_MODE_MASK (QImode))"
+ "bst %3,0\;bld %0,%4"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+;; Move bit $3.0 into bit $0.0.
+;; For bit 0, combiner generates slightly different pattern.
+(define_insn "*movbitqi.0"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (ior:QI (and:QI (match_operand:QI 1 "register_operand" "0")
+ (match_operand:QI 2 "single_zero_operand" "n"))
+ (and:QI (match_operand:QI 3 "register_operand" "r")
+ (const_int 1))))]
+ "0 == exact_log2 (~INTVAL(operands[2]) & GET_MODE_MASK (QImode))"
+ "bst %3,0\;bld %0,0"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+;; Move bit $2.0 into bit $0.7.
+;; For bit 7, combiner generates slightly different pattern
+(define_insn "*movbitqi.7"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (ior:QI (and:QI (match_operand:QI 1 "register_operand" "0")
+ (const_int 127))
+ (ashift:QI (match_operand:QI 2 "register_operand" "r")
+ (const_int 7))))]
+ ""
+ "bst %2,0\;bld %0,7"
+ [(set_attr "length" "2")
+ (set_attr "cc" "none")])
+
+;; Combiner transforms above four pattern into ZERO_EXTRACT if it sees MEM
+;; and input/output match. We provide a special pattern for this, because
+;; in contrast to a IN/BST/BLD/OUT sequence we need less registers and the
+;; operation on I/O is atomic.
+(define_insn "*insv.io"
+ [(set (zero_extract:QI (mem:QI (match_operand 0 "low_io_address_operand" "n,n,n"))
+ (const_int 1)
+ (match_operand:QI 1 "const_0_to_7_operand" "n,n,n"))
+ (match_operand:QI 2 "nonmemory_operand" "L,P,r"))]
+ ""
+ "@
+ cbi %i0,%1
+ sbi %i0,%1
+ sbrc %2,0\;sbi %i0,%1\;sbrs %2,0\;cbi %i0,%1"
+ [(set_attr "length" "1,1,4")
+ (set_attr "cc" "none")])
+
+(define_insn "*insv.not.io"
+ [(set (zero_extract:QI (mem:QI (match_operand 0 "low_io_address_operand" "n"))
+ (const_int 1)
+ (match_operand:QI 1 "const_0_to_7_operand" "n"))
+ (not:QI (match_operand:QI 2 "register_operand" "r")))]
+ ""
+ "sbrs %2,0\;sbi %i0,%1\;sbrc %2,0\;cbi %i0,%1"
+ [(set_attr "length" "4")
+ (set_attr "cc" "none")])
+
+;; The insv expander.
+;; We only support 1-bit inserts
+(define_expand "insv"
+ [(set (zero_extract:QI (match_operand:QI 0 "register_operand" "")
+ (match_operand:QI 1 "const1_operand" "") ; width
+ (match_operand:QI 2 "const_0_to_7_operand" "")) ; pos
+ (match_operand:QI 3 "nonmemory_operand" ""))]
+ "optimize")
+
+;; Insert bit $2.0 into $0.$1
+(define_insn "*insv.reg"
+ [(set (zero_extract:QI (match_operand:QI 0 "register_operand" "+r,d,d,l,l")
+ (const_int 1)
+ (match_operand:QI 1 "const_0_to_7_operand" "n,n,n,n,n"))
+ (match_operand:QI 2 "nonmemory_operand" "r,L,P,L,P"))]
+ ""
+ "@
+ bst %2,0\;bld %0,%1
+ andi %0,lo8(~(1<<%1))
+ ori %0,lo8(1<<%1)
+ clt\;bld %0,%1
+ set\;bld %0,%1"
+ [(set_attr "length" "2,1,1,2,2")
+ (set_attr "cc" "none,set_zn,set_zn,none,none")])
+
+
+;; Some combine patterns that try to fix bad code when a value is composed
+;; from byte parts like in PR27663.
+;; The patterns give some release but the code still is not optimal,
+;; in particular when subreg lowering (-fsplit-wide-types) is turned on.
+;; That switch obfuscates things here and in many other places.
+
+;; "*iorhiqi.byte0" "*iorpsiqi.byte0" "*iorsiqi.byte0"
+;; "*xorhiqi.byte0" "*xorpsiqi.byte0" "*xorsiqi.byte0"
+(define_insn_and_split "*<code_stdname><mode>qi.byte0"
+ [(set (match_operand:HISI 0 "register_operand" "=r")
+ (xior:HISI
+ (zero_extend:HISI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:HISI 2 "register_operand" "0")))]
+ ""
+ "#"
+ "reload_completed"
+ [(set (match_dup 3)
+ (xior:QI (match_dup 3)
+ (match_dup 1)))]
+ {
+ operands[3] = simplify_gen_subreg (QImode, operands[0], <MODE>mode, 0);
+ })
+
+;; "*iorhiqi.byte1-3" "*iorpsiqi.byte1-3" "*iorsiqi.byte1-3"
+;; "*xorhiqi.byte1-3" "*xorpsiqi.byte1-3" "*xorsiqi.byte1-3"
+(define_insn_and_split "*<code_stdname><mode>qi.byte1-3"
+ [(set (match_operand:HISI 0 "register_operand" "=r")
+ (xior:HISI
+ (ashift:HISI (zero_extend:HISI (match_operand:QI 1 "register_operand" "r"))
+ (match_operand:QI 2 "const_8_16_24_operand" "n"))
+ (match_operand:HISI 3 "register_operand" "0")))]
+ "INTVAL(operands[2]) < GET_MODE_BITSIZE (<MODE>mode)"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 4)
+ (xior:QI (match_dup 4)
+ (match_dup 1)))]
+ {
+ int byteno = INTVAL(operands[2]) / BITS_PER_UNIT;
+ operands[4] = simplify_gen_subreg (QImode, operands[0], <MODE>mode, byteno);
+ })
+
+(define_expand "extzv"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (zero_extract:QI (match_operand:QI 1 "register_operand" "")
+ (match_operand:QI 2 "const1_operand" "")
+ (match_operand:QI 3 "const_0_to_7_operand" "")))])
+
+(define_insn "*extzv"
+ [(set (match_operand:QI 0 "register_operand" "=*d,*d,*d,*d,r")
+ (zero_extract:QI (match_operand:QI 1 "register_operand" "0,r,0,0,r")
+ (const_int 1)
+ (match_operand:QI 2 "const_0_to_7_operand" "L,L,P,C04,n")))]
+ ""
+ "@
+ andi %0,1
+ mov %0,%1\;andi %0,1
+ lsr %0\;andi %0,1
+ swap %0\;andi %0,1
+ bst %1,%2\;clr %0\;bld %0,0"
+ [(set_attr "length" "1,2,2,2,3")
+ (set_attr "cc" "set_zn,set_zn,set_zn,set_zn,clobber")])
+
+(define_insn_and_split "*extzv.qihi1"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (zero_extract:HI (match_operand:QI 1 "register_operand" "r")
+ (const_int 1)
+ (match_operand:QI 2 "const_0_to_7_operand" "n")))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 3)
+ (zero_extract:QI (match_dup 1)
+ (const_int 1)
+ (match_dup 2)))
+ (set (match_dup 4)
+ (const_int 0))]
+ {
+ operands[3] = simplify_gen_subreg (QImode, operands[0], HImode, 0);
+ operands[4] = simplify_gen_subreg (QImode, operands[0], HImode, 1);
+ })
+
+(define_insn_and_split "*extzv.qihi2"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (zero_extend:HI
+ (zero_extract:QI (match_operand:QI 1 "register_operand" "r")
+ (const_int 1)
+ (match_operand:QI 2 "const_0_to_7_operand" "n"))))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 3)
+ (zero_extract:QI (match_dup 1)
+ (const_int 1)
+ (match_dup 2)))
+ (set (match_dup 4)
+ (const_int 0))]
+ {
+ operands[3] = simplify_gen_subreg (QImode, operands[0], HImode, 0);
+ operands[4] = simplify_gen_subreg (QImode, operands[0], HImode, 1);
+ })
+
+
+;; Fixed-point instructions
+(include "avr-fixed.md")
+
+;; Operations on 64-bit registers
+(include "avr-dimode.md")
diff --git a/gcc-4.9/gcc/config/avr/avr.opt b/gcc-4.9/gcc/config/avr/avr.opt
new file mode 100644
index 000000000..5be80aa2d
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avr.opt
@@ -0,0 +1,84 @@
+; Options for the ATMEL AVR port of the compiler.
+
+; Copyright (C) 2005-2014 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+mcall-prologues
+Target Report Mask(CALL_PROLOGUES)
+Use subroutines for function prologues and epilogues
+
+mmcu=
+Target RejectNegative Joined Var(avr_mcu_index) Init(0) Enum(avr_mcu)
+-mmcu=MCU Select the target MCU
+
+mdeb
+Target Report Undocumented Mask(ALL_DEBUG)
+
+mlog=
+Target RejectNegative Joined Undocumented Var(avr_log_details)
+
+mint8
+Target Report Mask(INT8)
+Use an 8-bit 'int' type
+
+mno-interrupts
+Target Report RejectNegative Mask(NO_INTERRUPTS)
+Change the stack pointer without disabling interrupts
+
+mbranch-cost=
+Target Report Joined RejectNegative UInteger Var(avr_branch_cost) Init(0)
+Set the branch costs for conditional branch instructions. Reasonable values are small, non-negative integers. The default branch cost is 0.
+
+morder1
+Target Report Undocumented Mask(ORDER_1)
+
+morder2
+Target Report Undocumented Mask(ORDER_2)
+
+mtiny-stack
+Target Report Mask(TINY_STACK)
+Change only the low 8 bits of the stack pointer
+
+mrelax
+Target Report
+Relax branches
+
+mpmem-wrap-around
+Target Report
+Make the linker relaxation machine assume that a program counter wrap-around occurs.
+
+maccumulate-args
+Target Report Mask(ACCUMULATE_OUTGOING_ARGS)
+Accumulate outgoing function arguments and acquire/release the needed stack space for outpoing function arguments in function prologue/epilogue. Without this option, outgoing arguments are pushed before calling a function and popped afterwards. This option can lead to reduced code size for functions that call many functions that get their arguments on the stack like, for example printf.
+
+mstrict-X
+Target Report Var(avr_strict_X) Init(0)
+When accessing RAM, use X as imposed by the hardware, i.e. just use pre-decrement, post-increment and indirect addressing with the X register. Without this option, the compiler may assume that there is an addressing mode X+const similar to Y+const and Z+const and emit instructions to emulate such an addressing mode for X.
+
+;; For rationale behind -msp8 see explanation in avr.h.
+msp8
+Target Report RejectNegative Var(avr_sp8) Init(0)
+The device has no SPH special function register. This option will be overridden by the compiler driver with the correct setting if presence/absence of SPH can be deduced from -mmcu=MCU.
+
+Waddr-space-convert
+Warning C Report Var(avr_warn_addr_space_convert) Init(0)
+Warn if the address space of an address is changed.
+
+mfract-convert-truncate
+Target Report Mask(FRACT_CONV_TRUNC)
+Allow to use truncation instead of rounding towards 0 for fractional int types
diff --git a/gcc-4.9/gcc/config/avr/avrlibc.h b/gcc-4.9/gcc/config/avr/avrlibc.h
new file mode 100644
index 000000000..fee685b6a
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/avrlibc.h
@@ -0,0 +1,30 @@
+/* Definitions of target machine for the GNU compiler collection
+ for Atmel AVR micro controller if configured for AVR-Libc.
+ Copyright (C) 2012-2014 Free Software Foundation, Inc.
+ Contributed by Georg-Johann Lay (avr@gjlay.de)
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* AVR-Libc implements functions from libgcc.a in libm.a, see PR54461. */
+
+#undef LIBGCC_SPEC
+#define LIBGCC_SPEC \
+ "%{!mmcu=at90s1*:%{!mmcu=attiny11:%{!mmcu=attiny12:%{!mmcu=attiny15:%{!mmcu=attiny28: -lgcc -lm }}}}}"
+
+#undef LINK_GCC_C_SEQUENCE_SPEC
+#define LINK_GCC_C_SEQUENCE_SPEC \
+ "--start-group %G %L --end-group"
diff --git a/gcc-4.9/gcc/config/avr/builtins.def b/gcc-4.9/gcc/config/avr/builtins.def
new file mode 100644
index 000000000..affcbaa34
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/builtins.def
@@ -0,0 +1,169 @@
+/* Copyright (C) 2012-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* This file contains the definitions and documentation for the
+ builtins defined in the AVR part of the GNU compiler.
+ Befor including this file, define a macro
+
+ DEF_BUILTIN(NAME, N_ARGS, TYPE, ICODE, LIBNAME)
+
+ NAME: `__builtin_avr_name' will be the user-level name of the builtin.
+ `AVR_BUILTIN_NAME' will be the internal builtin's id.
+ N_ARGS: Number of input arguments. If special treatment is needed,
+ set to -1 and handle it by hand, see avr.c:avr_expand_builtin().
+ TYPE: A tree node describing the prototype of the built-in.
+ ICODE: Name of attached insn or expander. If special treatment in avr.c
+ is needed to expand the built-in, use `nothing'.
+ LIBNAME: Name of the attached implementation in libgcc which is used if
+ the builtin cannot be folded away and there is no insn. */
+
+/* Mapped to respective instruction. */
+
+DEF_BUILTIN (NOP, -1, void_ftype_void, nothing, NULL)
+DEF_BUILTIN (SEI, 0, void_ftype_void, enable_interrupt, NULL)
+DEF_BUILTIN (CLI, 0, void_ftype_void, disable_interrupt, NULL)
+DEF_BUILTIN (WDR, 0, void_ftype_void, wdr, NULL)
+DEF_BUILTIN (SLEEP, 0, void_ftype_void, sleep, NULL)
+
+/* Mapped to respective instruction but might also be folded away
+ or emit as libgcc call if ISA does not provide the instruction. */
+
+DEF_BUILTIN (SWAP, 1, uchar_ftype_uchar, rotlqi3_4, NULL)
+DEF_BUILTIN (FMUL, 2, uint_ftype_uchar_uchar, fmul, NULL)
+DEF_BUILTIN (FMULS, 2, int_ftype_char_char, fmuls, NULL)
+DEF_BUILTIN (FMULSU, 2, int_ftype_char_uchar, fmulsu, NULL)
+
+/* More complex stuff that cannot be mapped 1:1 to an instruction. */
+
+DEF_BUILTIN (DELAY_CYCLES, -1, void_ftype_ulong, nothing, NULL)
+DEF_BUILTIN (INSERT_BITS, 3, uchar_ftype_ulong_uchar_uchar, insert_bits, NULL)
+DEF_BUILTIN (FLASH_SEGMENT, 1, char_ftype_const_memx_ptr, flash_segment, NULL)
+
+/* ISO/IEC TR 18037 "Embedded C"
+ The following builtins are undocumented and used by stdfix.h. */
+
+/* 7.18a.6 The fixed-point intrinsic functions. */
+
+/* 7.18a.6.2 The fixed-point absolute value functions. */
+
+DEF_BUILTIN (ABSHR, 1, hr_ftype_hr, ssabsqq2, "__ssabs_1")
+DEF_BUILTIN (ABSR, 1, nr_ftype_nr, ssabshq2, "__ssabs_2")
+DEF_BUILTIN (ABSLR, 1, lr_ftype_lr, ssabssq2, "__ssabs_4")
+DEF_BUILTIN (ABSLLR, -1, llr_ftype_llr, nothing, "__ssabsdq2") // GCC extension
+
+DEF_BUILTIN (ABSHK, 1, hk_ftype_hk, ssabsha2, "__ssabs_2")
+DEF_BUILTIN (ABSK, 1, nk_ftype_nk, ssabssa2, "__ssabs_4")
+DEF_BUILTIN (ABSLK, -1, lk_ftype_lk, nothing, "__ssabsda2")
+DEF_BUILTIN (ABSLLK, -1, llk_ftype_llk, nothing, "__ssabsta2") // GCC extension
+
+/* 7.18a.6.3 The fixed-point round functions. */
+
+DEF_BUILTIN (ROUNDHR, 2, hr_ftype_hr_int, roundqq3, "__roundhr")
+DEF_BUILTIN (ROUNDR, 2, nr_ftype_nr_int, roundhq3, "__roundr")
+DEF_BUILTIN (ROUNDLR, 2, lr_ftype_lr_int, roundsq3, "__roundlr")
+DEF_BUILTIN (ROUNDLLR, -1, llr_ftype_llr_int, nothing, "__rounddq3") // GCC extension
+
+DEF_BUILTIN (ROUNDUHR, 2, uhr_ftype_uhr_int, rounduqq3, "__rounduhr")
+DEF_BUILTIN (ROUNDUR, 2, unr_ftype_unr_int, rounduhq3, "__roundur")
+DEF_BUILTIN (ROUNDULR, 2, ulr_ftype_ulr_int, roundusq3, "__roundulr")
+DEF_BUILTIN (ROUNDULLR, -1, ullr_ftype_ullr_int, nothing, "__roundudq3") // GCC extension
+
+DEF_BUILTIN (ROUNDHK, 2, hk_ftype_hk_int, roundha3, "__roundhk")
+DEF_BUILTIN (ROUNDK, 2, nk_ftype_nk_int, roundsa3, "__roundk")
+DEF_BUILTIN (ROUNDLK, -1, lk_ftype_lk_int, nothing, "__roundda3")
+DEF_BUILTIN (ROUNDLLK, -1, llk_ftype_llk_int, nothing, "__roundta3") // GCC extension
+
+DEF_BUILTIN (ROUNDUHK, 2, uhk_ftype_uhk_int, rounduha3, "__rounduhk")
+DEF_BUILTIN (ROUNDUK, 2, unk_ftype_unk_int, roundusa3, "__rounduk")
+DEF_BUILTIN (ROUNDULK, -1, ulk_ftype_ulk_int, nothing, "__rounduda3")
+DEF_BUILTIN (ROUNDULLK, -1, ullk_ftype_ullk_int, nothing, "__rounduta3") // GCC extension
+
+/* 7.18a.6.4 The fixed-point bit countls functions. */
+
+DEF_BUILTIN (COUNTLSHR, -1, int_ftype_hr, nothing, "__countlsqi2")
+DEF_BUILTIN (COUNTLSR, -1, int_ftype_nr, nothing, "__countlshi2")
+DEF_BUILTIN (COUNTLSLR, -1, int_ftype_lr, nothing, "__countlssi2")
+DEF_BUILTIN (COUNTLSLLR, -1, int_ftype_llr, nothing, "__countlsdi2") // GCC extension
+
+DEF_BUILTIN (COUNTLSUHR, -1, int_ftype_uhr, nothing, "__countlsuqi2")
+DEF_BUILTIN (COUNTLSUR, -1, int_ftype_unr, nothing, "__countlsuhi2")
+DEF_BUILTIN (COUNTLSULR, -1, int_ftype_ulr, nothing, "__countlsusi2")
+DEF_BUILTIN (COUNTLSULLR, -1, int_ftype_ullr, nothing, "__countlsudi2") // GCC extension
+
+DEF_BUILTIN (COUNTLSHK, -1, int_ftype_hk, nothing, "__countlshi2")
+DEF_BUILTIN (COUNTLSK, -1, int_ftype_nk, nothing, "__countlssi2")
+DEF_BUILTIN (COUNTLSLK, -1, int_ftype_lk, nothing, "__countlsdi2")
+DEF_BUILTIN (COUNTLSLLK, -1, int_ftype_llk, nothing, "__countlsdi2") // GCC extension
+
+DEF_BUILTIN (COUNTLSUHK, -1, int_ftype_uhk, nothing, "__countlsuhi2")
+DEF_BUILTIN (COUNTLSUK, -1, int_ftype_unk, nothing, "__countlsusi2")
+DEF_BUILTIN (COUNTLSULK, -1, int_ftype_ulk, nothing, "__countlsudi2")
+DEF_BUILTIN (COUNTLSULLK, -1, int_ftype_ullk, nothing, "__countlsudi2") // GCC extension
+
+/* 7.18a.6.5 The bitwise fixed-point to integer conversion functions. */
+
+DEF_BUILTIN (BITSHR, -1, inthr_ftype_hr, nothing, "__ret")
+DEF_BUILTIN (BITSR, -1, intnr_ftype_nr, nothing, "__ret")
+DEF_BUILTIN (BITSLR, -1, intlr_ftype_lr, nothing, "__ret")
+DEF_BUILTIN (BITSLLR, -1, intllr_ftype_llr, nothing, "__ret") // GCC extension
+
+DEF_BUILTIN (BITSUHR, -1, intuhr_ftype_uhr, nothing, "__ret")
+DEF_BUILTIN (BITSUR, -1, intunr_ftype_unr, nothing, "__ret")
+DEF_BUILTIN (BITSULR, -1, intulr_ftype_ulr, nothing, "__ret")
+DEF_BUILTIN (BITSULLR, -1, intullr_ftype_ullr, nothing, "__ret") // GCC extension
+
+DEF_BUILTIN (BITSHK, -1, inthk_ftype_hk, nothing, "__ret")
+DEF_BUILTIN (BITSK, -1, intnk_ftype_nk, nothing, "__ret")
+DEF_BUILTIN (BITSLK, -1, intlk_ftype_lk, nothing, "__ret")
+DEF_BUILTIN (BITSLLK, -1, intllk_ftype_llk, nothing, "__ret") // GCC extension
+
+DEF_BUILTIN (BITSUHK, -1, intuhk_ftype_uhk, nothing, "__ret")
+DEF_BUILTIN (BITSUK, -1, intunk_ftype_unk, nothing, "__ret")
+DEF_BUILTIN (BITSULK, -1, intulk_ftype_ulk, nothing, "__ret")
+DEF_BUILTIN (BITSULLK, -1, intullk_ftype_ullk, nothing, "__ret") // GCC extension
+
+
+/* 7.18a.6.6 The bitwise integer to fixed-point conversion functions. */
+
+DEF_BUILTIN ( HRBITS, -1, hr_ftype_inthr, nothing, "__ret")
+DEF_BUILTIN ( RBITS, -1, nr_ftype_intnr, nothing, "__ret")
+DEF_BUILTIN ( LRBITS, -1, lr_ftype_intlr, nothing, "__ret")
+DEF_BUILTIN ( LLRBITS, -1, llr_ftype_intllr, nothing, "__ret") // GCC extension
+
+DEF_BUILTIN ( UHRBITS, -1, uhr_ftype_intuhr, nothing, "__ret")
+DEF_BUILTIN ( URBITS, -1, unr_ftype_intunr, nothing, "__ret")
+DEF_BUILTIN ( ULRBITS, -1, ulr_ftype_intulr, nothing, "__ret")
+DEF_BUILTIN (ULLRBITS, -1, ullr_ftype_intullr, nothing, "__ret") // GCC extension
+
+DEF_BUILTIN ( HKBITS, -1, hk_ftype_inthk, nothing, "__ret")
+DEF_BUILTIN ( KBITS, -1, nk_ftype_intnk, nothing, "__ret")
+DEF_BUILTIN ( LKBITS, -1, lk_ftype_intlk, nothing, "__ret")
+DEF_BUILTIN ( LLKBITS, -1, llk_ftype_intllk, nothing, "__ret") // GCC extension
+
+DEF_BUILTIN ( UHKBITS, -1, uhk_ftype_intuhk, nothing, "__ret")
+DEF_BUILTIN ( UKBITS, -1, unk_ftype_intunk, nothing, "__ret")
+DEF_BUILTIN ( ULKBITS, -1, ulk_ftype_intulk, nothing, "__ret")
+DEF_BUILTIN (ULLKBITS, -1, ullk_ftype_intullk, nothing, "__ret") // GCC extension
+
+/* Overloaded */
+
+/* 7.18a.6.7 Type-generic fixed-point functions. */
+
+DEF_BUILTIN (ABSFX, -1, void_ftype_void /* dummy */, nothing, NULL)
+DEF_BUILTIN (ROUNDFX, -1, void_ftype_void /* dummy */, nothing, NULL)
+DEF_BUILTIN (COUNTLSFX, -1, void_ftype_void /* dummy */, nothing, NULL)
diff --git a/gcc-4.9/gcc/config/avr/constraints.md b/gcc-4.9/gcc/config/avr/constraints.md
new file mode 100644
index 000000000..2f6e4ea1b
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/constraints.md
@@ -0,0 +1,238 @@
+;; Constraint definitions for ATMEL AVR micro controllers.
+;; Copyright (C) 2006-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Register constraints
+
+(define_register_constraint "t" "R0_REG"
+ "Temporary register r0")
+
+(define_register_constraint "b" "BASE_POINTER_REGS"
+ "Base pointer registers (r28--r31)")
+
+(define_register_constraint "e" "POINTER_REGS"
+ "Pointer registers (r26--r31)")
+
+(define_register_constraint "w" "ADDW_REGS"
+ "Registers from r24 to r31. These registers
+ can be used in @samp{adiw} command.")
+
+(define_register_constraint "d" "LD_REGS"
+ "Registers from r16 to r31.")
+
+(define_register_constraint "l" "NO_LD_REGS"
+ "Registers from r0 to r15.")
+
+(define_register_constraint "a" "SIMPLE_LD_REGS"
+ "Registers from r16 to r23.")
+
+(define_register_constraint "x" "POINTER_X_REGS"
+ "Register pair X (r27:r26).")
+
+(define_register_constraint "y" "POINTER_Y_REGS"
+ "Register pair Y (r29:r28).")
+
+(define_register_constraint "z" "POINTER_Z_REGS"
+ "Register pair Z (r31:r30).")
+
+(define_register_constraint "q" "STACK_REG"
+ "Stack pointer register (SPH:SPL).")
+
+(define_constraint "I"
+ "Integer constant in the range 0 @dots{} 63."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 63")))
+
+(define_constraint "J"
+ "Integer constant in the range -63 @dots{} 0."
+ (and (match_code "const_int")
+ (match_test "ival <= 0 && ival >= -63")))
+
+(define_constraint "K"
+ "Integer constant 2."
+ (and (match_code "const_int")
+ (match_test "ival == 2")))
+
+(define_constraint "L"
+ "Zero."
+ (and (match_code "const_int")
+ (match_test "ival == 0")))
+
+(define_constraint "M"
+ "Integer constant in the range 0 @dots{} 0xff."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 0xff")))
+
+(define_constraint "N"
+ "Constant integer @minus{}1."
+ (and (match_code "const_int")
+ (match_test "ival == -1")))
+
+(define_constraint "O"
+ "Constant integer 8, 16, or 24."
+ (and (match_code "const_int")
+ (match_test "ival == 8 || ival == 16 || ival == 24")))
+
+(define_constraint "P"
+ "Constant integer 1."
+ (and (match_code "const_int")
+ (match_test "ival == 1")))
+
+(define_constraint "G"
+ "Constant float 0."
+ (and (match_code "const_double")
+ (match_test "op == CONST0_RTX (SFmode)")))
+
+(define_memory_constraint "Q"
+ "A memory address based on Y or Z pointer with displacement."
+ (and (match_code "mem")
+ (match_test "extra_constraint_Q (op)")))
+
+(define_constraint "Cm2"
+ "Constant integer @minus{}2."
+ (and (match_code "const_int")
+ (match_test "ival == -2")))
+
+(define_constraint "C03"
+ "Constant integer 3."
+ (and (match_code "const_int")
+ (match_test "ival == 3")))
+
+(define_constraint "C04"
+ "Constant integer 4."
+ (and (match_code "const_int")
+ (match_test "ival == 4")))
+
+(define_constraint "C05"
+ "Constant integer 5."
+ (and (match_code "const_int")
+ (match_test "ival == 5")))
+
+(define_constraint "C06"
+ "Constant integer 6."
+ (and (match_code "const_int")
+ (match_test "ival == 6")))
+
+(define_constraint "C07"
+ "Constant integer 7."
+ (and (match_code "const_int")
+ (match_test "ival == 7")))
+
+(define_constraint "Ca2"
+ "Constant 2-byte integer that allows AND without clobber register."
+ (and (match_code "const_int")
+ (match_test "avr_popcount_each_byte (op, 2, (1<<0) | (1<<7) | (1<<8))")))
+
+(define_constraint "Ca3"
+ "Constant 3-byte integer that allows AND without clobber register."
+ (and (match_code "const_int")
+ (match_test "avr_popcount_each_byte (op, 3, (1<<0) | (1<<7) | (1<<8))")))
+
+(define_constraint "Ca4"
+ "Constant 4-byte integer that allows AND without clobber register."
+ (and (match_code "const_int")
+ (match_test "avr_popcount_each_byte (op, 4, (1<<0) | (1<<7) | (1<<8))")))
+
+(define_constraint "Co2"
+ "Constant 2-byte integer that allows OR without clobber register."
+ (and (match_code "const_int")
+ (match_test "avr_popcount_each_byte (op, 2, (1<<0) | (1<<1) | (1<<8))")))
+
+(define_constraint "Co3"
+ "Constant 3-byte integer that allows OR without clobber register."
+ (and (match_code "const_int")
+ (match_test "avr_popcount_each_byte (op, 3, (1<<0) | (1<<1) | (1<<8))")))
+
+(define_constraint "Co4"
+ "Constant 4-byte integer that allows OR without clobber register."
+ (and (match_code "const_int")
+ (match_test "avr_popcount_each_byte (op, 4, (1<<0) | (1<<1) | (1<<8))")))
+
+(define_constraint "Cx2"
+ "Constant 2-byte integer that allows XOR without clobber register."
+ (and (match_code "const_int")
+ (match_test "avr_popcount_each_byte (op, 2, (1<<0) | (1<<8))")))
+
+(define_constraint "Cx3"
+ "Constant 3-byte integer that allows XOR without clobber register."
+ (and (match_code "const_int")
+ (match_test "avr_popcount_each_byte (op, 3, (1<<0) | (1<<8))")))
+
+(define_constraint "Cx4"
+ "Constant 4-byte integer that allows XOR without clobber register."
+ (and (match_code "const_int")
+ (match_test "avr_popcount_each_byte (op, 4, (1<<0) | (1<<8))")))
+
+(define_constraint "Csp"
+ "Integer constant in the range -6 @dots{} 6."
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (ival, -6, 6)")))
+
+(define_constraint "Cxf"
+ "32-bit integer constant where at least one nibble is 0xf."
+ (and (match_code "const_int")
+ (match_test "avr_has_nibble_0xf (op)")))
+
+(define_constraint "C0f"
+ "32-bit integer constant where no nibble equals 0xf."
+ (and (match_code "const_int")
+ (match_test "!avr_has_nibble_0xf (op)")))
+
+;; CONST_FIXED is no element of 'n' so cook our own.
+;; "i" or "s" would match but because the insn uses iterators that cover
+;; INT_MODE, "i" or "s" is not always possible.
+
+(define_constraint "Ynn"
+ "Fixed-point constant known at compile time."
+ (match_code "const_fixed"))
+
+(define_constraint "Y00"
+ "Fixed-point or integer constant with bit representation 0x0"
+ (and (match_code "const_fixed,const_int")
+ (match_test "op == CONST0_RTX (GET_MODE (op))")))
+
+(define_constraint "Y01"
+ "Fixed-point or integer constant with bit representation 0x1"
+ (ior (and (match_code "const_fixed")
+ (match_test "1 == INTVAL (avr_to_int_mode (op))"))
+ (match_test "satisfies_constraint_P (op)")))
+
+(define_constraint "Ym1"
+ "Fixed-point or integer constant with bit representation -0x1"
+ (ior (and (match_code "const_fixed")
+ (match_test "-1 == INTVAL (avr_to_int_mode (op))"))
+ (match_test "satisfies_constraint_N (op)")))
+
+(define_constraint "Y02"
+ "Fixed-point or integer constant with bit representation 0x2"
+ (ior (and (match_code "const_fixed")
+ (match_test "2 == INTVAL (avr_to_int_mode (op))"))
+ (match_test "satisfies_constraint_K (op)")))
+
+(define_constraint "Ym2"
+ "Fixed-point or integer constant with bit representation -0x2"
+ (ior (and (match_code "const_fixed")
+ (match_test "-2 == INTVAL (avr_to_int_mode (op))"))
+ (match_test "satisfies_constraint_Cm2 (op)")))
+
+;; Similar to "IJ" used with ADIW/SBIW, but for CONST_FIXED.
+
+(define_constraint "YIJ"
+ "Fixed-point constant from @minus{}0x003f to 0x003f."
+ (and (match_code "const_fixed")
+ (match_test "IN_RANGE (INTVAL (avr_to_int_mode (op)), -63, 63)")))
diff --git a/gcc-4.9/gcc/config/avr/driver-avr.c b/gcc-4.9/gcc/config/avr/driver-avr.c
new file mode 100644
index 000000000..cb5dd1d1d
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/driver-avr.c
@@ -0,0 +1,150 @@
+/* Subroutines for the gcc driver.
+ Copyright (C) 2009-2014 Free Software Foundation, Inc.
+ Contributed by Anatoly Sokolov <aesok@post.ru>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+
+/* Current architecture. */
+const avr_arch_t *avr_current_arch = NULL;
+
+/* Current device. */
+const avr_mcu_t *avr_current_device = NULL;
+
+/* Initialize avr_current_arch and avr_current_device variables. */
+
+static void
+avr_set_current_device (const char *name)
+{
+
+ if (NULL != avr_current_arch)
+ return;
+
+ for (avr_current_device = avr_mcu_types; avr_current_device->name;
+ avr_current_device++)
+ {
+ if (strcmp (avr_current_device->name, name) == 0)
+ break;
+ }
+
+ avr_current_arch = &avr_arch_types[avr_current_device->arch];
+}
+
+/* Returns command line parameters to pass to as. */
+
+const char*
+avr_device_to_as (int argc, const char **argv)
+{
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ return concat ("-mmcu=", avr_current_arch->arch_name,
+ avr_current_device->errata_skip ? "" : " -mno-skip-bug",
+ NULL);
+}
+
+/* Returns command line parameters to pass to ld. */
+
+const char*
+avr_device_to_ld (int argc, const char **argv)
+{
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ return concat ("-m ", avr_current_arch->arch_name, NULL);
+}
+
+/* Returns command line parameters that describe start of date section. */
+
+const char *
+avr_device_to_data_start (int argc, const char **argv)
+{
+ unsigned long data_section_start;
+ char data_section_start_str[16];
+
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ if (avr_current_device->data_section_start
+ == avr_current_arch->default_data_section_start)
+ return NULL;
+
+ data_section_start = 0x800000 + avr_current_device->data_section_start;
+
+ snprintf (data_section_start_str, sizeof(data_section_start_str) - 1,
+ "0x%lX", data_section_start);
+
+ return concat ("-Tdata ", data_section_start_str, NULL);
+}
+
+/* Returns command line parameters that describe the device startfile. */
+
+const char *
+avr_device_to_startfiles (int argc, const char **argv)
+{
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ return concat ("crt", avr_current_device->library_name, ".o%s", NULL);
+}
+
+/* Returns command line parameters that describe the device library. */
+
+const char *
+avr_device_to_devicelib (int argc, const char **argv)
+{
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ return concat ("-l", avr_current_device->library_name, NULL);
+}
+
+const char*
+avr_device_to_sp8 (int argc, const char **argv)
+{
+ if (0 == argc)
+ return NULL;
+
+ avr_set_current_device (argv[0]);
+
+ /* Leave "avr2" and "avr25" alone. These two architectures are
+ the only ones that mix devices with 8-bit SP and 16-bit SP.
+ -msp8 is set by mmultilib machinery. */
+
+ if (avr_current_device->macro == NULL
+ && (avr_current_device->arch == ARCH_AVR2
+ || avr_current_device->arch == ARCH_AVR25))
+ return "";
+
+ return avr_current_device->short_sp
+ ? "-msp8"
+ : "%<msp8";
+}
diff --git a/gcc-4.9/gcc/config/avr/elf.h b/gcc-4.9/gcc/config/avr/elf.h
new file mode 100644
index 000000000..dc163e44e
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/elf.h
@@ -0,0 +1,41 @@
+/* Copyright (C) 2011-2014 Free Software Foundation, Inc.
+ Contributed by Georg-Johann Lay (avr@gjlay.de)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+
+/* Overriding some definitions from elfos.h for AVR. */
+
+#undef PCC_BITFIELD_TYPE_MATTERS
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#undef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT (32768 * 8)
+
+#undef STRING_LIMIT
+#define STRING_LIMIT ((unsigned) 64)
+
+/* Output alignment 2**1 for jump tables. */
+#undef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE, PREFIX, NUM, TABLE) \
+ ASM_OUTPUT_ALIGN (FILE, 1);
+
+/* Be conservative in crtstuff.c. */
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
diff --git a/gcc-4.9/gcc/config/avr/gen-avr-mmcu-texi.c b/gcc-4.9/gcc/config/avr/gen-avr-mmcu-texi.c
new file mode 100644
index 000000000..ea3e6f1ba
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/gen-avr-mmcu-texi.c
@@ -0,0 +1,144 @@
+/* Copyright (C) 2012-2014 Free Software Foundation, Inc.
+ Contributed by Georg-Johann Lay (avr@gjlay.de)
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#define IN_GEN_AVR_MMCU_TEXI
+
+#include "avr-arch.h"
+#include "avr-devices.c"
+
+static const char*
+mcu_name[sizeof avr_mcu_types / sizeof avr_mcu_types[0]];
+
+static int letter (char c)
+{
+ return c >= 'a' && c <= 'z';
+}
+
+static int digit (char c)
+{
+ return c >= '0' && c <= '9';
+}
+
+static int
+comparator (const void *va, const void *vb)
+{
+ const char *a = *(const char* const*) va;
+ const char *b = *(const char* const*) vb;
+
+ while (*a && *b)
+ {
+ /* Make letters smaller than digits so that `atmega16a' follows
+ `atmega16' without `atmega161' etc. between them. */
+
+ if (letter (*a) && digit (*b))
+ return -1;
+
+ if (digit (*a) && letter (*b))
+ return 1;
+
+ if (*a != *b)
+ return *a - *b;
+
+ a++;
+ b++;
+ }
+
+ return *a - *b;
+}
+
+static void
+print_mcus (size_t n_mcus)
+{
+ int duplicate = 0;
+ size_t i;
+
+ if (!n_mcus)
+ return;
+
+ qsort (mcu_name, n_mcus, sizeof (char*), comparator);
+
+ printf ("@*@var{mcu}@tie{}=");
+
+ for (i = 0; i < n_mcus; i++)
+ {
+ printf (" @code{%s}%s", mcu_name[i], i == n_mcus-1 ? ".\n\n" : ",");
+
+ if (i && !strcmp (mcu_name[i], mcu_name[i-1]))
+ {
+ /* Sanity-check: Fail on devices that are present more than once. */
+
+ duplicate = 1;
+ fprintf (stderr, "error: duplicate device: %s\n", mcu_name[i]);
+ }
+ }
+
+ if (duplicate)
+ exit (1);
+}
+
+int main (void)
+{
+ enum avr_arch arch = ARCH_UNKNOWN;
+ size_t i, n_mcus = 0;
+ const avr_mcu_t *mcu;
+
+ printf ("@c Copyright (C) 2012-2014 Free Software Foundation, Inc.\n");
+ printf ("@c This is part of the GCC manual.\n");
+ printf ("@c For copying conditions, see the file "
+ "gcc/doc/include/fdl.texi.\n\n");
+
+ printf ("@c This file is generated automatically using\n");
+ printf ("@c gcc/config/avr/gen-avr-mmcu-texi.c from:\n");
+ printf ("@c gcc/config/avr/avr-arch.h\n");
+ printf ("@c gcc/config/avr/avr-devices.c\n");
+ printf ("@c gcc/config/avr/avr-mcus.def\n\n");
+
+ printf ("@c Please do not edit manually.\n\n");
+
+ printf ("@table @code\n\n");
+
+ for (mcu = avr_mcu_types; mcu->name; mcu++)
+ {
+ if (mcu->macro == NULL)
+ {
+ arch = mcu->arch;
+
+ /* Start a new architecture: Flush the MCUs collected so far. */
+
+ print_mcus (n_mcus);
+ n_mcus = 0;
+
+ for (i = 0; i < sizeof (avr_texinfo) / sizeof (*avr_texinfo); i++)
+ if (arch == avr_texinfo[i].arch)
+ printf ("@item %s\n%s\n", mcu->name, avr_texinfo[i].texinfo);
+ }
+ else if (arch == (enum avr_arch) mcu->arch)
+ {
+ mcu_name[n_mcus++] = mcu->name;
+ }
+ }
+
+ print_mcus (n_mcus);
+ printf ("@end table\n");
+
+ return EXIT_SUCCESS;
+}
diff --git a/gcc-4.9/gcc/config/avr/genmultilib.awk b/gcc-4.9/gcc/config/avr/genmultilib.awk
new file mode 100644
index 000000000..90e5e5cfd
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/genmultilib.awk
@@ -0,0 +1,216 @@
+# Copyright (C) 2011-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 3, or (at your option) any later
+# version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+##################################################################
+#
+# Transform Core/Device Information from avr-mcus.def to a
+# Representation that is understood by GCC's multilib Machinery.
+#
+# The Script works as a Filter from STDIN to STDOUT.
+#
+# FORMAT = "Makefile": Generate Makefile Snipet that sets some
+# MULTILIB_* Variables as needed.
+#
+##################################################################
+
+BEGIN {
+ FS ="[(, \t]+"
+ option[""] = ""
+ tiny_stack[""] = 1
+ comment = 1
+ n_mcu = 0
+ n_cores = 0
+
+ mtiny[0] = ""
+ mtiny[1] = "tiny-stack"
+ option["tiny-stack"] = "msp8"
+}
+
+##################################################################
+# Add some Comments to the generated Files and copy-paste
+# Copyright Notice from above.
+##################################################################
+
+/^#/ {
+ if (!comment)
+ next
+ else if (comment == 1)
+ {
+ if (FORMAT == "Makefile")
+ {
+ print "# Auto-generated Makefile Snip"
+ print "# Generated by : ./gcc/config/avr/genmultilib.awk"
+ print "# Generated from : ./gcc/config/avr/avr-mcus.def"
+ print "# Used by : tmake_file from Makefile and genmultilib"
+ print ""
+ }
+ }
+
+ comment = 2;
+
+ print
+}
+
+/^$/ {
+ # The first empty line stops copy-pasting the GPL comments
+ # from this file to the generated file.
+
+ comment = 0
+}
+
+##################################################################
+# Run over all AVR_MCU Lines and gather Information:
+# cores[] : Enumerates the Cores (avr2, avr25, ...)
+# mcu[] : Enumerates the Devices
+# tiny_stack[]: Maps Core/Device to 0 (2-byte SP) or 1 (1-byte SP)
+# option[] : Maps Core/Device to the mmcu= option to get it
+# toCore[] : Maps Device to its Core
+##################################################################
+
+/^AVR_MCU/ {
+ name = $2
+ gsub ("\"", "", name)
+
+ if ($4 == "NULL")
+ {
+ core = name
+
+ # avr1 is supported for Assembler only: It gets no multilib
+ if (core == "avr1")
+ next
+
+ cores[n_cores] = core
+ n_cores++
+ tiny_stack[core] = 0
+ option[core] = "mmcu=" core
+
+ next
+ }
+
+ # avr1 is supported for Assembler only: Its Devices are ignored
+ if (core == "avr1")
+ next
+
+ tiny_stack[name] = $5
+ mcu[n_mcu] = name
+ n_mcu++
+ option[name] = "mmcu=" name
+ toCore[name] = core
+
+ if (tiny_stack[name] == 1)
+ tiny_stack[core] = 1
+}
+
+##################################################################
+#
+# We gathered all the Information, now build/output the following:
+#
+# awk Variable target Variable FORMAT
+# -----------------------------------------------------------
+# m_options <-> MULTILIB_OPTIONS Makefile
+# m_dirnames <-> MULTILIB_DIRNAMES "
+# m_exceptions <-> MULTILIB_EXCEPTIONS "
+# m_matches <-> MULTILIB_MATCHES "
+#
+##################################################################
+
+END {
+ m_options = "\nMULTILIB_OPTIONS = "
+ m_dirnames = "\nMULTILIB_DIRNAMES ="
+ m_exceptions = "\nMULTILIB_EXCEPTIONS ="
+ m_matches = "\nMULTILIB_MATCHES ="
+
+ ##############################################################
+ # Compose MULTILIB_OPTIONS. This represents the Cross-Product
+ # (avr2, avr25, ...) x msp8
+
+ sep = ""
+ for (c = 0; c < n_cores; c++)
+ {
+ m_options = m_options sep option[cores[c]]
+ sep = "/"
+ }
+
+ # The ... x msp8
+ m_options = m_options " " option[mtiny[1]]
+
+ ##############################################################
+ # Map Device to its multilib
+
+ for (t = 0; t < n_mcu; t++)
+ {
+ core = toCore[mcu[t]]
+
+ line = option[core] ":" option[mcu[t]]
+ gsub ("=", "?", line)
+ gsub (":", "=", line)
+
+ m_matches = m_matches " \\\n\t" line
+ }
+
+ ####################################################################
+ # Compose MULTILIB_DIRNAMES and MULTILIB_EXEPTIONS
+
+ n_mtiny = 2
+ for (t = 0; t < n_mtiny; t++)
+ for (c = -1; c < n_cores; c++)
+ {
+ if (c == -1)
+ core = ""
+ else
+ core = cores[c]
+
+ # The Directory Name for this multilib
+
+ if (core != "" && mtiny[t] != "")
+ {
+ mdir = core "/" mtiny[t]
+ mopt = option[core] "/" option[mtiny[t]]
+ }
+ else
+ {
+ mdir = core mtiny[t]
+ mopt = option[core] option[mtiny[t]]
+ }
+
+ if (core != "" && tiny_stack[core] == 0 && mtiny[t] != "")
+ {
+ # There's not a single SP = 8 Devices for this Core:
+ # Don't build respective multilib
+ m_exceptions = m_exceptions " \\\n\t" mopt
+ continue
+ }
+
+ if (core != "avr2" || mtiny[t] == "")
+ m_dirnames = m_dirnames " " mdir
+ }
+
+ ############################################################
+ # Output that Stuff
+ ############################################################
+
+ if (FORMAT == "Makefile")
+ {
+ # Intended Target: ./gcc/config/avr/t-multilib
+
+ print m_options
+ print m_dirnames
+ print m_exceptions
+ print m_matches
+ }
+}
diff --git a/gcc-4.9/gcc/config/avr/genopt.sh b/gcc-4.9/gcc/config/avr/genopt.sh
new file mode 100755
index 000000000..9838ec25a
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/genopt.sh
@@ -0,0 +1,59 @@
+#!/bin/sh
+# Generate avr-tables.opt from the list in avr-mcus.def.
+# Copyright (C) 2011-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+cat <<EOF
+; -*- buffer-read-only: t -*-
+; Generated automatically by genopt.sh from avr-mcus.def.
+
+; Copyright (C) 2011-2014 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+Enum
+Name(avr_mcu) Type(int)
+Known MCU names:
+
+EOF
+
+awk -F'[(, ]+' 'BEGIN {
+ value = 0
+}
+/^AVR_MCU/ {
+ name = $2
+ gsub("\"", "", name)
+ print "EnumValue"
+ print "Enum(avr_mcu) String(" name ") Value(" value ")"
+ print ""
+ value++
+}' $1
diff --git a/gcc-4.9/gcc/config/avr/predicates.md b/gcc-4.9/gcc/config/avr/predicates.md
new file mode 100644
index 000000000..85612e14a
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/predicates.md
@@ -0,0 +1,275 @@
+;; Predicate definitions for ATMEL AVR micro controllers.
+;; Copyright (C) 2006-2014 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Registers from r0 to r15.
+(define_predicate "l_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) <= 15")))
+
+;; Registers from r16 to r31.
+(define_predicate "d_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) >= 16 && REGNO (op) <= 31")))
+
+(define_predicate "even_register_operand"
+ (and (match_code "reg")
+ (and (match_test "REGNO (op) <= 31")
+ (match_test "(REGNO (op) & 1) == 0"))))
+
+(define_predicate "odd_register_operand"
+ (and (match_code "reg")
+ (and (match_test "REGNO (op) <= 31")
+ (match_test "(REGNO (op) & 1) != 0"))))
+
+;; SP register.
+(define_predicate "stack_register_operand"
+ (and (match_code "reg")
+ (match_test "REGNO (op) == REG_SP")))
+
+;; Return true if OP is a valid address for lower half of I/O space.
+(define_predicate "low_io_address_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op) - avr_current_arch->sfr_offset,
+ 0, 0x1f)")))
+
+;; Return true if OP is a valid address for high half of I/O space.
+(define_predicate "high_io_address_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op) - avr_current_arch->sfr_offset,
+ 0x20, 0x3F)")))
+
+;; Return true if OP is a valid address of I/O space.
+(define_predicate "io_address_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op) - avr_current_arch->sfr_offset,
+ 0, 0x40 - GET_MODE_SIZE (mode))")))
+
+;; Return 1 if OP is a general operand not in flash memory
+(define_predicate "nop_general_operand"
+ (and (match_operand 0 "general_operand")
+ (match_test "!avr_mem_flash_p (op)")))
+
+;; Return 1 if OP is an "ordinary" general operand, i.e. a general
+;; operand whose load is not handled by a libgcc call or ELPM.
+(define_predicate "nox_general_operand"
+ (and (match_operand 0 "general_operand")
+ (not (match_test "avr_load_libgcc_p (op)"))
+ (not (match_test "avr_mem_memx_p (op)"))))
+
+;; Return 1 if OP is a memory operand in one of the __flash* address spaces
+(define_predicate "flash_operand"
+ (and (match_operand 0 "memory_operand")
+ (match_test "Pmode == mode")
+ (ior (match_test "!MEM_P (op)")
+ (match_test "avr_mem_flash_p (op)"))))
+
+;; Return 1 if OP is the zero constant for MODE.
+(define_predicate "const0_operand"
+ (and (match_code "const_int,const_fixed,const_double")
+ (match_test "op == CONST0_RTX (mode)")))
+
+;; Return 1 if OP is the one constant integer for MODE.
+(define_predicate "const1_operand"
+ (and (match_code "const_int")
+ (match_test "op == CONST1_RTX (mode)")))
+
+
+;; Return 1 if OP is constant integer 0..7 for MODE.
+(define_predicate "const_0_to_7_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
+
+;; Return 1 if OP is constant integer 2..7 for MODE.
+(define_predicate "const_2_to_7_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 2, 7)")))
+
+;; Return 1 if OP is constant integer 1..6 for MODE.
+(define_predicate "const_1_to_6_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 1, 6)")))
+
+;; Return 1 if OP is constant integer 2..6 for MODE.
+(define_predicate "const_2_to_6_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 2, 6)")))
+
+;; Returns true if OP is either the constant zero or a register.
+(define_predicate "reg_or_0_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const0_operand")))
+
+;; Returns 1 if OP is a SYMBOL_REF.
+(define_predicate "symbol_ref_operand"
+ (match_code "symbol_ref"))
+
+;; Return true if OP is a text segment reference.
+;; This is needed for program memory address expressions.
+(define_predicate "text_segment_operand"
+ (match_code "code_label,label_ref,symbol_ref,plus,const")
+{
+ switch (GET_CODE (op))
+ {
+ case CODE_LABEL:
+ return true;
+ case LABEL_REF :
+ return true;
+ case SYMBOL_REF :
+ return SYMBOL_REF_FUNCTION_P (op);
+ case PLUS :
+ /* Assume canonical format of symbol + constant.
+ Fall through. */
+ case CONST :
+ return text_segment_operand (XEXP (op, 0), VOIDmode);
+ default :
+ return false;
+ }
+})
+
+;; Return true if OP is a constant that contains only one 1 in its
+;; binary representation.
+(define_predicate "single_one_operand"
+ (and (match_code "const_int")
+ (match_test "exact_log2(INTVAL (op) & GET_MODE_MASK (mode)) >= 0")))
+
+;; Return true if OP is a constant that contains only one 0 in its
+;; binary representation.
+(define_predicate "single_zero_operand"
+ (and (match_code "const_int")
+ (match_test "exact_log2(~INTVAL (op) & GET_MODE_MASK (mode)) >= 0")))
+
+;;
+(define_predicate "avr_sp_immediate_operand"
+ (and (match_code "const_int")
+ (match_test "satisfies_constraint_Csp (op)")))
+
+;; True for EQ & NE
+(define_predicate "eqne_operator"
+ (match_code "eq,ne"))
+
+;; True for GE & LT
+(define_predicate "gelt_operator"
+ (match_code "ge,lt"))
+
+;; True for GT, GTU, LE & LEU
+(define_predicate "difficult_comparison_operator"
+ (match_code "gt,gtu,le,leu"))
+
+;; False for GT, GTU, LE & LEU
+(define_predicate "simple_comparison_operator"
+ (and (match_operand 0 "comparison_operator")
+ (not (match_code "gt,gtu,le,leu"))))
+
+;; Return true if OP is a valid call operand.
+(define_predicate "call_insn_operand"
+ (and (match_code "mem")
+ (ior (match_test "register_operand (XEXP (op, 0), mode)")
+ (match_test "CONSTANT_ADDRESS_P (XEXP (op, 0))"))))
+
+;; For some insns we must ensure that no hard register is inserted
+;; into their operands because the insns are split and the split
+;; involves hard registers. An example are divmod insn that are
+;; split to insns that represent implicit library calls.
+
+;; True for register that is pseudo register.
+(define_predicate "pseudo_register_operand"
+ (and (match_operand 0 "register_operand")
+ (not (and (match_code "reg")
+ (match_test "HARD_REGISTER_P (op)")))))
+
+;; True for operand that is pseudo register or CONST_INT.
+(define_predicate "pseudo_register_or_const_int_operand"
+ (ior (match_operand 0 "const_int_operand")
+ (match_operand 0 "pseudo_register_operand")))
+
+;; We keep combiner from inserting hard registers into the input of sign- and
+;; zero-extends. A hard register in the input operand is not wanted because
+;; 32-bit multiply patterns clobber some hard registers and extends with a
+;; hard register that overlaps these clobbers won't combine to a widening
+;; multiplication. There is no need for combine to propagate or insert
+;; hard registers, register allocation can do it just as well.
+
+;; True for operand that is pseudo register at combine time.
+(define_predicate "combine_pseudo_register_operand"
+ (ior (match_operand 0 "pseudo_register_operand")
+ (and (match_operand 0 "register_operand")
+ (match_test "reload_completed || reload_in_progress"))))
+
+;; Return true if OP is a constant integer that is either
+;; 8 or 16 or 24.
+(define_predicate "const_8_16_24_operand"
+ (and (match_code "const_int")
+ (match_test "8 == INTVAL(op) || 16 == INTVAL(op) || 24 == INTVAL(op)")))
+
+;; Unsigned CONST_INT that fits in 8 bits, i.e. 0..255.
+(define_predicate "u8_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
+
+;; Signed CONST_INT that fits in 8 bits, i.e. -128..127.
+(define_predicate "s8_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -128, 127)")))
+
+;; One-extended CONST_INT that fits in 8 bits, i.e. -256..-1.
+(define_predicate "o8_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -256, -1)")))
+
+;; Signed CONST_INT that fits in 9 bits, i.e. -256..255.
+(define_predicate "s9_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -256, 255)")))
+
+(define_predicate "register_or_s9_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "s9_operand")))
+
+;; Unsigned CONST_INT that fits in 16 bits, i.e. 0..65536.
+(define_predicate "u16_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 0, (1<<16)-1)")))
+
+;; Signed CONST_INT that fits in 16 bits, i.e. -32768..32767.
+(define_predicate "s16_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -(1<<15), (1<<15)-1)")))
+
+;; One-extended CONST_INT that fits in 16 bits, i.e. -65536..-1.
+(define_predicate "o16_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -(1<<16), -1)")))
+
+;; Const int, fixed, or double operand
+(define_predicate "const_operand"
+ (ior (match_code "const_fixed")
+ (match_code "const_double")
+ (match_operand 0 "const_int_operand")))
+
+;; Const int, const fixed, or const double operand
+(define_predicate "nonmemory_or_const_operand"
+ (ior (match_code "const_fixed")
+ (match_code "const_double")
+ (match_operand 0 "nonmemory_operand")))
+
+;; Immediate, const fixed, or const double operand
+(define_predicate "const_or_immediate_operand"
+ (ior (match_code "const_fixed")
+ (match_code "const_double")
+ (match_operand 0 "immediate_operand")))
diff --git a/gcc-4.9/gcc/config/avr/rtems.h b/gcc-4.9/gcc/config/avr/rtems.h
new file mode 100644
index 000000000..473273b99
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/rtems.h
@@ -0,0 +1,27 @@
+/* Definitions for rtems targeting a AVR using ELF.
+ Copyright (C) 2004-2014 Free Software Foundation, Inc.
+ Contributed by Ralf Corsepius (ralf.corsepius@rtems.org).
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+/* Specify predefined symbols in preprocessor. */
+
+#define TARGET_OS_CPP_BUILTINS() \
+do { \
+ builtin_define ("__rtems__"); \
+ builtin_assert ("system=rtems"); \
+} while (0)
diff --git a/gcc-4.9/gcc/config/avr/stdfix.h b/gcc-4.9/gcc/config/avr/stdfix.h
new file mode 100644
index 000000000..38d80e4dc
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/stdfix.h
@@ -0,0 +1,236 @@
+/* Copyright (C) 2007-2014 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* ISO/IEC JTC1 SC22 WG14 N1169
+ * Date: 2006-04-04
+ * ISO/IEC TR 18037
+ * Programming languages - C - Extensions to support embedded processors
+ */
+
+#ifndef _AVRGCC_STDFIX_H
+#define _AVRGCC_STDFIX_H
+
+/* 7.18a.1 Introduction. */
+/* 7.18a.3 Precision macros. */
+
+#include <stdfix-gcc.h>
+
+
+#if __SIZEOF_INT__ == 2
+
+typedef signed char int_hr_t;
+typedef unsigned char uint_uhr_t;
+
+typedef short int int_r_t;
+typedef short unsigned int uint_ur_t;
+
+typedef short int int_hk_t;
+typedef short unsigned int uint_uhk_t;
+
+typedef long int int_lr_t;
+typedef long unsigned int uint_ulr_t;
+
+typedef long int int_k_t;
+typedef long unsigned int uint_uk_t;
+
+typedef long long int int_llr_t;
+typedef long long unsigned int uint_ullr_t;
+
+typedef long long int int_lk_t;
+typedef long long unsigned int uint_ulk_t;
+
+typedef long long int int_llk_t;
+typedef long long unsigned int uint_ullk_t;
+
+#elif __SIZEOF_INT__ == 1 /* -mint8 */
+
+typedef signed char int_hr_t;
+typedef unsigned char uint_uhr_t;
+
+typedef long int int_r_t;
+typedef long unsigned int uint_ur_t;
+
+typedef long int int_hk_t;
+typedef long unsigned int uint_uhk_t;
+
+typedef long long int int_lr_t;
+typedef long long unsigned int uint_ulr_t;
+
+typedef long long int int_k_t;
+typedef long long unsigned int uint_uk_t;
+
+#endif /* __SIZEOF_INT__ == 1, 2 */
+
+
+/* 7.18a.6 The fixed-point intrinsic functions. */
+
+
+/* 7.18a.6.2 The fixed-point absolute value functions. */
+
+#define abshr __builtin_avr_abshr
+#define absr __builtin_avr_absr
+#define abslr __builtin_avr_abslr
+
+#define abshk __builtin_avr_abshk
+#define absk __builtin_avr_absk
+
+#if __SIZEOF_INT__ == 2
+
+#define abslk __builtin_avr_abslk
+#define absllr __builtin_avr_absllr /* GCC Extension */
+#define absllk __builtin_avr_absllk /* GCC Extension */
+
+#endif /* sizeof (int) == 2 */
+
+
+/* 7.18a.6.3 The fixed-point round functions. */
+
+/* The Embedded-C paper specifies results only for rounding points
+
+ 0 < RP < FBIT
+
+ As an extension, the following functions work as expected
+ with rounding points
+
+ -IBIT < RP < FBIT
+
+ For example, rounding an accum with a rounding point of -1 will
+ result in an even integer value. */
+
+#define roundhr __builtin_avr_roundhr
+#define roundr __builtin_avr_roundr
+#define roundlr __builtin_avr_roundlr
+
+#define rounduhr __builtin_avr_rounduhr
+#define roundur __builtin_avr_roundur
+#define roundulr __builtin_avr_roundulr
+
+#define roundhk __builtin_avr_roundhk
+#define roundk __builtin_avr_roundk
+
+#define rounduhk __builtin_avr_rounduhk
+#define rounduk __builtin_avr_rounduk
+
+#if __SIZEOF_INT__ == 2
+
+#define roundlk __builtin_avr_roundlk
+#define roundulk __builtin_avr_roundulk
+#define roundllr __builtin_avr_roundllr /* GCC Extension */
+#define roundullr __builtin_avr_roundullr /* GCC Extension */
+#define roundllk __builtin_avr_roundllk /* GCC Extension */
+#define roundullk __builtin_avr_roundullk /* GCC Extension */
+
+#endif /* sizeof (int) == 2 */
+
+
+/* 7.18a.6.4 The fixed-point bit countls functions. */
+
+#define countlshr __builtin_avr_countlshr
+#define countlsr __builtin_avr_countlsr
+#define countlslr __builtin_avr_countlslr
+
+#define countlsuhr __builtin_avr_countlsuhr
+#define countlsur __builtin_avr_countlsur
+#define countlsulr __builtin_avr_countlsulr
+
+#define countlshk __builtin_avr_countlshk
+#define countlsk __builtin_avr_countlsk
+
+#define countlsuhk __builtin_avr_countlsuhk
+#define countlsuk __builtin_avr_countlsuk
+
+#if __SIZEOF_INT__ == 2
+
+#define countlslk __builtin_avr_countlslk
+#define countlsulk __builtin_avr_countlsulk
+#define countlsllr __builtin_avr_countlsllr /* GCC Extension */
+#define countlsullr __builtin_avr_countlsullr /* GCC Extension */
+#define countlsllk __builtin_avr_countlsllk /* GCC Extension */
+#define countlsullk __builtin_avr_countlsullk /* GCC Extension */
+
+#endif /* sizeof (int) == 2 */
+
+
+/* 7.18a.6.5 The bitwise fixed-point to integer conversion functions. */
+
+#define bitshr __builtin_avr_bitshr
+#define bitsr __builtin_avr_bitsr
+#define bitslr __builtin_avr_bitslr
+
+#define bitsuhr __builtin_avr_bitsuhr
+#define bitsur __builtin_avr_bitsur
+#define bitsulr __builtin_avr_bitsulr
+
+#define bitshk __builtin_avr_bitshk
+#define bitsk __builtin_avr_bitsk
+
+#define bitsuhk __builtin_avr_bitsuhk
+#define bitsuk __builtin_avr_bitsuk
+
+#if __SIZEOF_INT__ == 2
+
+#define bitslk __builtin_avr_bitslk
+#define bitsulk __builtin_avr_bitsulk
+#define bitsllr __builtin_avr_bitsllr /* GCC Extension */
+#define bitsullr __builtin_avr_bitsullr /* GCC Extension */
+#define bitsllk __builtin_avr_bitsllk /* GCC Extension */
+#define bitsullk __builtin_avr_bitsullk /* GCC Extension */
+
+#endif /* sizeof (int) == 2 */
+
+
+/* 7.18a.6.6 The bitwise integer to fixed-point conversion functions. */
+
+#define hrbits __builtin_avr_hrbits
+#define rbits __builtin_avr_rbits
+#define lrbits __builtin_avr_lrbits
+
+#define uhrbits __builtin_avr_uhrbits
+#define urbits __builtin_avr_urbits
+#define ulrbits __builtin_avr_ulrbits
+
+#define hkbits __builtin_avr_hkbits
+#define kbits __builtin_avr_kbits
+
+#define uhkbits __builtin_avr_uhkbits
+#define ukbits __builtin_avr_ukbits
+
+#if __SIZEOF_INT__ == 2
+
+#define lkbits __builtin_avr_lkbits
+#define ulkbits __builtin_avr_ulkbits
+#define llrbits __builtin_avr_llrbits /* GCC Extension */
+#define ullrbits __builtin_avr_ullrbits /* GCC Extension */
+#define llkbits __builtin_avr_llkbits /* GCC Extension */
+#define ullkbits __builtin_avr_ullkbits /* GCC Extension */
+
+#endif /* sizeof (int) == 2 */
+
+
+/* 7.18a.6.7 Type-generic fixed-point functions. */
+
+#define absfx __builtin_avr_absfx
+#define roundfx __builtin_avr_roundfx
+#define countlsfx __builtin_avr_countlsfx
+
+#endif /* _AVRGCC_STDFIX_H */
diff --git a/gcc-4.9/gcc/config/avr/t-avr b/gcc-4.9/gcc/config/avr/t-avr
new file mode 100644
index 000000000..75120ef1e
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/t-avr
@@ -0,0 +1,83 @@
+# Copyright (C) 2000-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+driver-avr.o: $(srcdir)/config/avr/driver-avr.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+avr-devices.o: $(srcdir)/config/avr/avr-devices.c \
+ $(srcdir)/config/avr/avr-mcus.def \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+avr-c.o: $(srcdir)/config/avr/avr-c.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(C_COMMON_H)
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+avr-log.o: $(srcdir)/config/avr/avr-log.c \
+ $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) $(TREE_H) $(INPUT_H) dumpfile.h
+ $(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+avr.o avr-c.o: $(srcdir)/config/avr/builtins.def
+
+# This overrides stdfix.h from USER_H which we supply and include
+# in our own stdint.h as stdint-gcc.h.
+
+EXTRA_HEADERS = $(srcdir)/config/avr/stdfix.h \
+ stdfix-gcc.h
+
+stdfix-gcc.h: $(srcdir)/ginclude/stdfix.h
+ -cp $< $@
+
+# Files and Variables auto-generated from avr-mcus.def
+
+AVR_MCUS = $(srcdir)/config/avr/avr-mcus.def
+
+# Run `avr-mcus' after you changed or added devices in avr-mcus.def
+
+.PHONY: avr-mcus
+
+avr-mcus: $(srcdir)/config/avr/t-multilib \
+ $(srcdir)/config/avr/avr-tables.opt \
+ $(srcdir)/doc/avr-mmcu.texi ; @true
+
+# Make sure that -mmcu= is supported for devices from avr-mcus.def and
+# all -mmcu= values are displayed on the help screen
+$(srcdir)/config/avr/avr-tables.opt: $(srcdir)/config/avr/genopt.sh $(AVR_MCUS)
+ $(SHELL) $< $(AVR_MCUS) > $@
+
+# Make sure that -mmcu= support is in sync with -mmcu= documentation.
+gen-avr-mmcu-texi$(build_exeext): $(srcdir)/config/avr/gen-avr-mmcu-texi.c \
+ $(AVR_MCUS) $(srcdir)/config/avr/avr-devices.c \
+ $(srcdir)/config/avr/avr-arch.h
+ $(CC_FOR_BUILD) $(CFLAGS_FOR_BUILD) $< -o $@
+
+$(srcdir)/doc/avr-mmcu.texi: gen-avr-mmcu-texi$(build_exeext)
+ $(RUN_GEN) ./$< > $@
+
+# Map -mmcu= to the right multilib variant
+# MULTILIB_OPTIONS
+# MULTILIB_DIRNAMES
+# MULTILIB_EXCEPTIONS
+# MULTILIB_MATCHES
+
+s-mlib: $(srcdir)/config/avr/t-multilib
+
+$(srcdir)/config/avr/t-multilib: $(srcdir)/config/avr/genmultilib.awk \
+ $(AVR_MCUS)
+ $(AWK) -f $< -v FORMAT=Makefile $< $(AVR_MCUS) > $@
diff --git a/gcc-4.9/gcc/config/avr/t-multilib b/gcc-4.9/gcc/config/avr/t-multilib
new file mode 100644
index 000000000..301f86496
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/t-multilib
@@ -0,0 +1,269 @@
+# Auto-generated Makefile Snip
+# Generated by : ./gcc/config/avr/genmultilib.awk
+# Generated from : ./gcc/config/avr/avr-mcus.def
+# Used by : tmake_file from Makefile and genmultilib
+
+# Copyright (C) 2011-2014 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 3, or (at your option) any later
+# version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+MULTILIB_OPTIONS = mmcu=avr2/mmcu=avr25/mmcu=avr3/mmcu=avr31/mmcu=avr35/mmcu=avr4/mmcu=avr5/mmcu=avr51/mmcu=avr6/mmcu=avrxmega2/mmcu=avrxmega4/mmcu=avrxmega5/mmcu=avrxmega6/mmcu=avrxmega7 msp8
+
+MULTILIB_DIRNAMES = avr2 avr25 avr3 avr31 avr35 avr4 avr5 avr51 avr6 avrxmega2 avrxmega4 avrxmega5 avrxmega6 avrxmega7 tiny-stack avr25/tiny-stack
+
+MULTILIB_EXCEPTIONS = \
+ mmcu=avr3/msp8 \
+ mmcu=avr31/msp8 \
+ mmcu=avr35/msp8 \
+ mmcu=avr4/msp8 \
+ mmcu=avr5/msp8 \
+ mmcu=avr51/msp8 \
+ mmcu=avr6/msp8 \
+ mmcu=avrxmega2/msp8 \
+ mmcu=avrxmega4/msp8 \
+ mmcu=avrxmega5/msp8 \
+ mmcu=avrxmega6/msp8 \
+ mmcu=avrxmega7/msp8
+
+MULTILIB_MATCHES = \
+ mmcu?avr2=mmcu?at90s2313 \
+ mmcu?avr2=mmcu?at90s2323 \
+ mmcu?avr2=mmcu?at90s2333 \
+ mmcu?avr2=mmcu?at90s2343 \
+ mmcu?avr2=mmcu?attiny22 \
+ mmcu?avr2=mmcu?attiny26 \
+ mmcu?avr2=mmcu?at90s4414 \
+ mmcu?avr2=mmcu?at90s4433 \
+ mmcu?avr2=mmcu?at90s4434 \
+ mmcu?avr2=mmcu?at90s8515 \
+ mmcu?avr2=mmcu?at90c8534 \
+ mmcu?avr2=mmcu?at90s8535 \
+ mmcu?avr25=mmcu?ata6289 \
+ mmcu?avr25=mmcu?ata5272 \
+ mmcu?avr25=mmcu?attiny13 \
+ mmcu?avr25=mmcu?attiny13a \
+ mmcu?avr25=mmcu?attiny2313 \
+ mmcu?avr25=mmcu?attiny2313a \
+ mmcu?avr25=mmcu?attiny24 \
+ mmcu?avr25=mmcu?attiny24a \
+ mmcu?avr25=mmcu?attiny4313 \
+ mmcu?avr25=mmcu?attiny44 \
+ mmcu?avr25=mmcu?attiny44a \
+ mmcu?avr25=mmcu?attiny84 \
+ mmcu?avr25=mmcu?attiny84a \
+ mmcu?avr25=mmcu?attiny25 \
+ mmcu?avr25=mmcu?attiny45 \
+ mmcu?avr25=mmcu?attiny85 \
+ mmcu?avr25=mmcu?attiny261 \
+ mmcu?avr25=mmcu?attiny261a \
+ mmcu?avr25=mmcu?attiny461 \
+ mmcu?avr25=mmcu?attiny461a \
+ mmcu?avr25=mmcu?attiny861 \
+ mmcu?avr25=mmcu?attiny861a \
+ mmcu?avr25=mmcu?attiny43u \
+ mmcu?avr25=mmcu?attiny87 \
+ mmcu?avr25=mmcu?attiny48 \
+ mmcu?avr25=mmcu?attiny88 \
+ mmcu?avr25=mmcu?at86rf401 \
+ mmcu?avr3=mmcu?at43usb355 \
+ mmcu?avr3=mmcu?at76c711 \
+ mmcu?avr31=mmcu?atmega103 \
+ mmcu?avr31=mmcu?at43usb320 \
+ mmcu?avr35=mmcu?ata5505 \
+ mmcu?avr35=mmcu?at90usb82 \
+ mmcu?avr35=mmcu?at90usb162 \
+ mmcu?avr35=mmcu?atmega8u2 \
+ mmcu?avr35=mmcu?atmega16u2 \
+ mmcu?avr35=mmcu?atmega32u2 \
+ mmcu?avr35=mmcu?attiny167 \
+ mmcu?avr35=mmcu?attiny1634 \
+ mmcu?avr4=mmcu?ata6285 \
+ mmcu?avr4=mmcu?ata6286 \
+ mmcu?avr4=mmcu?atmega8 \
+ mmcu?avr4=mmcu?atmega8a \
+ mmcu?avr4=mmcu?atmega48 \
+ mmcu?avr4=mmcu?atmega48a \
+ mmcu?avr4=mmcu?atmega48p \
+ mmcu?avr4=mmcu?atmega48pa \
+ mmcu?avr4=mmcu?atmega88 \
+ mmcu?avr4=mmcu?atmega88a \
+ mmcu?avr4=mmcu?atmega88p \
+ mmcu?avr4=mmcu?atmega88pa \
+ mmcu?avr4=mmcu?atmega8515 \
+ mmcu?avr4=mmcu?atmega8535 \
+ mmcu?avr4=mmcu?atmega8hva \
+ mmcu?avr4=mmcu?at90pwm1 \
+ mmcu?avr4=mmcu?at90pwm2 \
+ mmcu?avr4=mmcu?at90pwm2b \
+ mmcu?avr4=mmcu?at90pwm3 \
+ mmcu?avr4=mmcu?at90pwm3b \
+ mmcu?avr4=mmcu?at90pwm81 \
+ mmcu?avr5=mmcu?ata5790 \
+ mmcu?avr5=mmcu?ata5790n \
+ mmcu?avr5=mmcu?ata5795 \
+ mmcu?avr5=mmcu?atmega16 \
+ mmcu?avr5=mmcu?atmega16a \
+ mmcu?avr5=mmcu?atmega161 \
+ mmcu?avr5=mmcu?atmega162 \
+ mmcu?avr5=mmcu?atmega163 \
+ mmcu?avr5=mmcu?atmega164a \
+ mmcu?avr5=mmcu?atmega164p \
+ mmcu?avr5=mmcu?atmega164pa \
+ mmcu?avr5=mmcu?atmega165 \
+ mmcu?avr5=mmcu?atmega165a \
+ mmcu?avr5=mmcu?atmega165p \
+ mmcu?avr5=mmcu?atmega165pa \
+ mmcu?avr5=mmcu?atmega168 \
+ mmcu?avr5=mmcu?atmega168a \
+ mmcu?avr5=mmcu?atmega168p \
+ mmcu?avr5=mmcu?atmega168pa \
+ mmcu?avr5=mmcu?atmega169 \
+ mmcu?avr5=mmcu?atmega169a \
+ mmcu?avr5=mmcu?atmega169p \
+ mmcu?avr5=mmcu?atmega169pa \
+ mmcu?avr5=mmcu?atmega16hvb \
+ mmcu?avr5=mmcu?atmega16hvbrevb \
+ mmcu?avr5=mmcu?atmega16m1 \
+ mmcu?avr5=mmcu?atmega16u4 \
+ mmcu?avr5=mmcu?atmega26hvg \
+ mmcu?avr5=mmcu?atmega32a \
+ mmcu?avr5=mmcu?atmega32 \
+ mmcu?avr5=mmcu?atmega323 \
+ mmcu?avr5=mmcu?atmega324a \
+ mmcu?avr5=mmcu?atmega324p \
+ mmcu?avr5=mmcu?atmega324pa \
+ mmcu?avr5=mmcu?atmega325 \
+ mmcu?avr5=mmcu?atmega325a \
+ mmcu?avr5=mmcu?atmega325p \
+ mmcu?avr5=mmcu?atmega3250 \
+ mmcu?avr5=mmcu?atmega3250a \
+ mmcu?avr5=mmcu?atmega3250p \
+ mmcu?avr5=mmcu?atmega3250pa \
+ mmcu?avr5=mmcu?atmega328 \
+ mmcu?avr5=mmcu?atmega328p \
+ mmcu?avr5=mmcu?atmega329 \
+ mmcu?avr5=mmcu?atmega329a \
+ mmcu?avr5=mmcu?atmega329p \
+ mmcu?avr5=mmcu?atmega329pa \
+ mmcu?avr5=mmcu?atmega3290 \
+ mmcu?avr5=mmcu?atmega3290a \
+ mmcu?avr5=mmcu?atmega3290p \
+ mmcu?avr5=mmcu?atmega3290pa \
+ mmcu?avr5=mmcu?atmega32c1 \
+ mmcu?avr5=mmcu?atmega32m1 \
+ mmcu?avr5=mmcu?atmega32u4 \
+ mmcu?avr5=mmcu?atmega32u6 \
+ mmcu?avr5=mmcu?atmega406 \
+ mmcu?avr5=mmcu?atmega64 \
+ mmcu?avr5=mmcu?atmega64a \
+ mmcu?avr5=mmcu?atmega640 \
+ mmcu?avr5=mmcu?atmega644 \
+ mmcu?avr5=mmcu?atmega644a \
+ mmcu?avr5=mmcu?atmega644p \
+ mmcu?avr5=mmcu?atmega644pa \
+ mmcu?avr5=mmcu?atmega645 \
+ mmcu?avr5=mmcu?atmega645a \
+ mmcu?avr5=mmcu?atmega645p \
+ mmcu?avr5=mmcu?atmega6450 \
+ mmcu?avr5=mmcu?atmega6450a \
+ mmcu?avr5=mmcu?atmega6450p \
+ mmcu?avr5=mmcu?atmega649 \
+ mmcu?avr5=mmcu?atmega649a \
+ mmcu?avr5=mmcu?atmega649p \
+ mmcu?avr5=mmcu?atmega6490 \
+ mmcu?avr5=mmcu?atmega16hva \
+ mmcu?avr5=mmcu?atmega16hva2 \
+ mmcu?avr5=mmcu?atmega32hvb \
+ mmcu?avr5=mmcu?atmega6490a \
+ mmcu?avr5=mmcu?atmega6490p \
+ mmcu?avr5=mmcu?atmega64c1 \
+ mmcu?avr5=mmcu?atmega64m1 \
+ mmcu?avr5=mmcu?atmega64hve \
+ mmcu?avr5=mmcu?atmega64rfa2 \
+ mmcu?avr5=mmcu?atmega64rfr2 \
+ mmcu?avr5=mmcu?atmega32hvbrevb \
+ mmcu?avr5=mmcu?atmega48hvf \
+ mmcu?avr5=mmcu?at90can32 \
+ mmcu?avr5=mmcu?at90can64 \
+ mmcu?avr5=mmcu?at90pwm161 \
+ mmcu?avr5=mmcu?at90pwm216 \
+ mmcu?avr5=mmcu?at90pwm316 \
+ mmcu?avr5=mmcu?at90scr100 \
+ mmcu?avr5=mmcu?at90usb646 \
+ mmcu?avr5=mmcu?at90usb647 \
+ mmcu?avr5=mmcu?at94k \
+ mmcu?avr5=mmcu?m3000 \
+ mmcu?avr51=mmcu?atmega128 \
+ mmcu?avr51=mmcu?atmega128a \
+ mmcu?avr51=mmcu?atmega1280 \
+ mmcu?avr51=mmcu?atmega1281 \
+ mmcu?avr51=mmcu?atmega1284 \
+ mmcu?avr51=mmcu?atmega1284p \
+ mmcu?avr51=mmcu?atmega128rfa1 \
+ mmcu?avr51=mmcu?at90can128 \
+ mmcu?avr51=mmcu?at90usb1286 \
+ mmcu?avr51=mmcu?at90usb1287 \
+ mmcu?avr6=mmcu?atmega2560 \
+ mmcu?avr6=mmcu?atmega2561 \
+ mmcu?avrxmega2=mmcu?atxmega16a4 \
+ mmcu?avrxmega2=mmcu?atxmega16d4 \
+ mmcu?avrxmega2=mmcu?atxmega32a4 \
+ mmcu?avrxmega2=mmcu?atxmega32d4 \
+ mmcu?avrxmega2=mmcu?atxmega32x1 \
+ mmcu?avrxmega2=mmcu?atmxt112sl \
+ mmcu?avrxmega2=mmcu?atmxt224 \
+ mmcu?avrxmega2=mmcu?atmxt224e \
+ mmcu?avrxmega2=mmcu?atmxt336s \
+ mmcu?avrxmega2=mmcu?atxmega16a4u \
+ mmcu?avrxmega2=mmcu?atxmega16c4 \
+ mmcu?avrxmega2=mmcu?atxmega32a4u \
+ mmcu?avrxmega2=mmcu?atxmega32c4 \
+ mmcu?avrxmega2=mmcu?atxmega32e5 \
+ mmcu?avrxmega4=mmcu?atxmega64a3 \
+ mmcu?avrxmega4=mmcu?atxmega64d3 \
+ mmcu?avrxmega4=mmcu?atxmega64a3u \
+ mmcu?avrxmega4=mmcu?atxmega64a4u \
+ mmcu?avrxmega4=mmcu?atxmega64b1 \
+ mmcu?avrxmega4=mmcu?atxmega64b3 \
+ mmcu?avrxmega4=mmcu?atxmega64c3 \
+ mmcu?avrxmega4=mmcu?atxmega64d4 \
+ mmcu?avrxmega5=mmcu?atxmega64a1 \
+ mmcu?avrxmega5=mmcu?atxmega64a1u \
+ mmcu?avrxmega6=mmcu?atxmega128a3 \
+ mmcu?avrxmega6=mmcu?atxmega128d3 \
+ mmcu?avrxmega6=mmcu?atxmega192a3 \
+ mmcu?avrxmega6=mmcu?atxmega192d3 \
+ mmcu?avrxmega6=mmcu?atxmega256a3 \
+ mmcu?avrxmega6=mmcu?atxmega256a3b \
+ mmcu?avrxmega6=mmcu?atxmega256a3bu \
+ mmcu?avrxmega6=mmcu?atxmega256d3 \
+ mmcu?avrxmega6=mmcu?atxmega128a3u \
+ mmcu?avrxmega6=mmcu?atxmega128b1 \
+ mmcu?avrxmega6=mmcu?atxmega128b3 \
+ mmcu?avrxmega6=mmcu?atxmega128c3 \
+ mmcu?avrxmega6=mmcu?atxmega128d4 \
+ mmcu?avrxmega6=mmcu?atmxt540s \
+ mmcu?avrxmega6=mmcu?atmxt540sreva \
+ mmcu?avrxmega6=mmcu?atxmega192a3u \
+ mmcu?avrxmega6=mmcu?atxmega192c3 \
+ mmcu?avrxmega6=mmcu?atxmega256a3u \
+ mmcu?avrxmega6=mmcu?atxmega256c3 \
+ mmcu?avrxmega6=mmcu?atxmega384c3 \
+ mmcu?avrxmega6=mmcu?atxmega384d3 \
+ mmcu?avrxmega7=mmcu?atxmega128a1 \
+ mmcu?avrxmega7=mmcu?atxmega128a1u \
+ mmcu?avrxmega7=mmcu?atxmega128a4u
diff --git a/gcc-4.9/gcc/config/avr/t-rtems b/gcc-4.9/gcc/config/avr/t-rtems
new file mode 100644
index 000000000..a3ef8bd80
--- /dev/null
+++ b/gcc-4.9/gcc/config/avr/t-rtems
@@ -0,0 +1,3 @@
+# Multilibs for avr RTEMS targets.
+
+# ATM, this is just a stub