aboutsummaryrefslogtreecommitdiffstats
path: root/gcc-4.7/gcc/config
diff options
context:
space:
mode:
authorPavel Chupin <pavel.v.chupin@intel.com>2013-05-30 15:35:10 +0400
committerPavel Chupin <pavel.v.chupin@intel.com>2013-05-30 17:34:03 +0400
commit41eff3d706b202f682f64fcc7773c64abd59ac45 (patch)
tree7ae546411a62a34687d2342880a4213df13a9fe3 /gcc-4.7/gcc/config
parent53ac5cd1ae9fe073c4c14d3e068f778bfc1999d4 (diff)
downloadtoolchain_gcc-41eff3d706b202f682f64fcc7773c64abd59ac45.tar.gz
toolchain_gcc-41eff3d706b202f682f64fcc7773c64abd59ac45.tar.bz2
toolchain_gcc-41eff3d706b202f682f64fcc7773c64abd59ac45.zip
[4.7, 4.8] Release basic tuning for new Silvermont architecture
Support new switches: -march=slm/-mtune=slm This is backport of trunk r199444: 2013-05-30 Yuri Rumyantsev <yuri.s.rumyantsev@intel.com> Igor Zamyatin <igor.zamyatin@intel.com> Silvermont (SLM) architecture pipeline model, tuning and insn selection. * config.gcc: Add slm config options and target. * config/i386/slm.md: New. * config/i386/driver-i386.c (host_detect_local_cpu): Check * movbe. * gcc/config/i386/i386-c.c (ix86_target_macros_internal): New * case PROCESSOR_SLM. (ix86_target_macros_internal): Likewise. * gcc/config/i386/i386.c (slm_cost): New cost. (m_SLM): New macro flag. (initial_ix86_tune_features): Set m_SLM. (x86_accumulate_outgoing_args): Likewise. (x86_arch_always_fancy_math_387): Likewise. (processor_target_table): Add slm cost. (cpu_names): Add slm cpu name. (x86_option_override_internal): Set SLM ISA. (ix86_issue_rate): New case PROCESSOR_SLM. (ia32_multipass_dfa_lookahead): Likewise. (fold_builtin_cpu): Add slm. * config/i386/i386.h (TARGET_SLM): New target macro. (target_cpu_default): Add TARGET_CPU_DEFAULT_slm. (processor_type): Add PROCESSOR_SLM. * config/i386/i386.md (cpu): Add new value "slm". (slm.md): Include slm.md. * libgcc/config/i386/cpuinfo.c (INTEL_SLM): New enum value. Change-Id: I3ad6f5584e3fd5de52ac608dc699daaad24f2fe4 Signed-off-by: Pavel Chupin <pavel.v.chupin@intel.com>
Diffstat (limited to 'gcc-4.7/gcc/config')
-rw-r--r--gcc-4.7/gcc/config/i386/driver-i386.c10
-rw-r--r--gcc-4.7/gcc/config/i386/i386-c.c7
-rw-r--r--gcc-4.7/gcc/config/i386/i386.c125
-rw-r--r--gcc-4.7/gcc/config/i386/i386.h3
-rw-r--r--gcc-4.7/gcc/config/i386/i386.md3
-rw-r--r--gcc-4.7/gcc/config/i386/slm.md758
6 files changed, 881 insertions, 25 deletions
diff --git a/gcc-4.7/gcc/config/i386/driver-i386.c b/gcc-4.7/gcc/config/i386/driver-i386.c
index 0c006a342..9bd76fb0e 100644
--- a/gcc-4.7/gcc/config/i386/driver-i386.c
+++ b/gcc-4.7/gcc/config/i386/driver-i386.c
@@ -605,8 +605,14 @@ const char *host_detect_local_cpu (int argc, const char **argv)
/* Assume Sandy Bridge. */
cpu = "corei7-avx";
else if (has_sse4_2)
- /* Assume Core i7. */
- cpu = "corei7";
+ {
+ if (has_movbe)
+ /* Assume SLM. */
+ cpu = "slm";
+ else
+ /* Assume Core i7. */
+ cpu = "corei7";
+ }
else if (has_ssse3)
{
if (has_movbe)
diff --git a/gcc-4.7/gcc/config/i386/i386-c.c b/gcc-4.7/gcc/config/i386/i386-c.c
index 8fb3b3187..bf93236b4 100644
--- a/gcc-4.7/gcc/config/i386/i386-c.c
+++ b/gcc-4.7/gcc/config/i386/i386-c.c
@@ -140,6 +140,10 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
def_or_undef (parse_in, "__atom");
def_or_undef (parse_in, "__atom__");
break;
+ case PROCESSOR_SLM:
+ def_or_undef (parse_in, "__slm");
+ def_or_undef (parse_in, "__slm__");
+ break;
/* use PROCESSOR_max to not set/unset the arch macro. */
case PROCESSOR_max:
break;
@@ -225,6 +229,9 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
case PROCESSOR_ATOM:
def_or_undef (parse_in, "__tune_atom__");
break;
+ case PROCESSOR_SLM:
+ def_or_undef (parse_in, "__tune_slm__");
+ break;
case PROCESSOR_GENERIC32:
case PROCESSOR_GENERIC64:
break;
diff --git a/gcc-4.7/gcc/config/i386/i386.c b/gcc-4.7/gcc/config/i386/i386.c
index 50fe1e24e..5c2dd46c3 100644
--- a/gcc-4.7/gcc/config/i386/i386.c
+++ b/gcc-4.7/gcc/config/i386/i386.c
@@ -1725,6 +1725,79 @@ struct processor_costs atom_cost = {
1, /* cond_not_taken_branch_cost. */
};
+static const
+struct processor_costs slm_cost = {
+ COSTS_N_INSNS (1), /* cost of an add instruction */
+ COSTS_N_INSNS (1) + 1, /* cost of a lea instruction */
+ COSTS_N_INSNS (1), /* variable shift costs */
+ COSTS_N_INSNS (1), /* constant shift costs */
+ {COSTS_N_INSNS (3), /* cost of starting multiply for QI */
+ COSTS_N_INSNS (4), /* HI */
+ COSTS_N_INSNS (3), /* SI */
+ COSTS_N_INSNS (4), /* DI */
+ COSTS_N_INSNS (2)}, /* other */
+ 0, /* cost of multiply per each bit set */
+ {COSTS_N_INSNS (18), /* cost of a divide/mod for QI */
+ COSTS_N_INSNS (26), /* HI */
+ COSTS_N_INSNS (42), /* SI */
+ COSTS_N_INSNS (74), /* DI */
+ COSTS_N_INSNS (74)}, /* other */
+ COSTS_N_INSNS (1), /* cost of movsx */
+ COSTS_N_INSNS (1), /* cost of movzx */
+ 8, /* "large" insn */
+ 17, /* MOVE_RATIO */
+ 4, /* cost for loading QImode using movzbl */
+ {4, 4, 4}, /* cost of loading integer registers
+ in QImode, HImode and SImode.
+ Relative to reg-reg move (2). */
+ {4, 4, 4}, /* cost of storing integer registers */
+ 4, /* cost of reg,reg fld/fst */
+ {12, 12, 12}, /* cost of loading fp registers
+ in SFmode, DFmode and XFmode */
+ {6, 6, 8}, /* cost of storing fp registers
+ in SFmode, DFmode and XFmode */
+ 2, /* cost of moving MMX register */
+ {8, 8}, /* cost of loading MMX registers
+ in SImode and DImode */
+ {8, 8}, /* cost of storing MMX registers
+ in SImode and DImode */
+ 2, /* cost of moving SSE register */
+ {8, 8, 8}, /* cost of loading SSE registers
+ in SImode, DImode and TImode */
+ {8, 8, 8}, /* cost of storing SSE registers
+ in SImode, DImode and TImode */
+ 5, /* MMX or SSE register to integer */
+ 32, /* size of l1 cache. */
+ 256, /* size of l2 cache. */
+ 64, /* size of prefetch block */
+ 6, /* number of parallel prefetches */
+ 3, /* Branch cost */
+ COSTS_N_INSNS (8), /* cost of FADD and FSUB insns. */
+ COSTS_N_INSNS (8), /* cost of FMUL instruction. */
+ COSTS_N_INSNS (20), /* cost of FDIV instruction. */
+ COSTS_N_INSNS (8), /* cost of FABS instruction. */
+ COSTS_N_INSNS (8), /* cost of FCHS instruction. */
+ COSTS_N_INSNS (40), /* cost of FSQRT instruction. */
+ {{libcall, {{11, loop, false}, {-1, rep_prefix_4_byte, false}}},
+ {libcall, {{32, loop, false}, {64, rep_prefix_4_byte, false},
+ {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}},
+ {{libcall, {{8, loop, false}, {15, unrolled_loop, false},
+ {2048, rep_prefix_4_byte, false}, {-1, libcall, false}}},
+ {libcall, {{24, loop, false}, {32, unrolled_loop, false},
+ {8192, rep_prefix_8_byte, false}, {-1, libcall, false}}}},
+ 1, /* scalar_stmt_cost. */
+ 1, /* scalar load_cost. */
+ 1, /* scalar_store_cost. */
+ 1, /* vec_stmt_cost. */
+ 1, /* vec_to_scalar_cost. */
+ 1, /* scalar_to_vec_cost. */
+ 1, /* vec_align_load_cost. */
+ 2, /* vec_unalign_load_cost. */
+ 1, /* vec_store_cost. */
+ 3, /* cond_taken_branch_cost. */
+ 1, /* cond_not_taken_branch_cost. */
+};
+
/* Generic64 should produce code tuned for Nocona and K8. */
static const
struct processor_costs generic64_cost = {
@@ -1893,6 +1966,7 @@ const struct processor_costs *ix86_cost = &pentium_cost;
#define m_CORE2I7_64 (m_CORE2_64 | m_COREI7_64)
#define m_CORE2I7 (m_CORE2I7_32 | m_CORE2I7_64)
#define m_ATOM (1<<PROCESSOR_ATOM)
+#define m_SLM (1<<PROCESSOR_SLM)
#define m_GEODE (1<<PROCESSOR_GEODE)
#define m_K6 (1<<PROCESSOR_K6)
@@ -1933,7 +2007,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
m_486 | m_PENT,
/* X86_TUNE_UNROLL_STRLEN */
- m_486 | m_PENT | m_PPRO | m_ATOM | m_CORE2I7 | m_K6 | m_AMD_MULTIPLE | m_GENERIC,
+ m_486 | m_PENT | m_PPRO | m_ATOM | m_SLM | m_CORE2I7 | m_K6 | m_AMD_MULTIPLE | m_GENERIC,
/* X86_TUNE_BRANCH_PREDICTION_HINTS: Branch hints were put in P4 based
on simulation result. But after P4 was made, no performance benefit
@@ -1945,11 +2019,11 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
~m_386,
/* X86_TUNE_USE_SAHF */
- m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1 | m_GENERIC,
+ m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_K6_GEODE | m_K8 | m_AMDFAM10 | m_BDVER | m_BTVER1 | m_GENERIC,
/* X86_TUNE_MOVX: Enable to zero extend integer registers to avoid
partial dependencies. */
- m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GEODE | m_AMD_MULTIPLE | m_GENERIC,
+ m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_GEODE | m_AMD_MULTIPLE | m_GENERIC,
/* X86_TUNE_PARTIAL_REG_STALL: We probably ought to watch for partial
register stalls on Generic32 compilation setting as well. However
@@ -1968,13 +2042,13 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
m_386 | m_486 | m_K6_GEODE,
/* X86_TUNE_USE_SIMODE_FIOP */
- ~(m_PENT | m_PPRO | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC),
+ ~(m_PENT | m_PPRO | m_CORE2I7 | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC),
/* X86_TUNE_USE_MOV0 */
m_K6,
/* X86_TUNE_USE_CLTD */
- ~(m_PENT | m_CORE2I7 | m_ATOM | m_K6 | m_GENERIC),
+ ~(m_PENT | m_ATOM | m_SLM | m_K6),
/* X86_TUNE_USE_XCHGB: Use xchgb %rh,%rl instead of rolw/rorw $8,rx. */
m_PENT4,
@@ -1989,7 +2063,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
~(m_PENT | m_PPRO),
/* X86_TUNE_PROMOTE_QIMODE */
- m_386 | m_486 | m_PENT | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
+ m_386 | m_486 | m_PENT | m_CORE2I7 | m_ATOM | m_SLM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
/* X86_TUNE_FAST_PREFIX */
~(m_386 | m_486 | m_PENT),
@@ -2030,10 +2104,10 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
/* X86_TUNE_INTEGER_DFMODE_MOVES: Enable if integer moves are preferred
for DFmode copies */
- ~(m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GEODE | m_AMD_MULTIPLE | m_ATOM | m_GENERIC),
+ ~(m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_GEODE | m_AMD_MULTIPLE | m_GENERIC),
/* X86_TUNE_PARTIAL_REG_DEPENDENCY */
- m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
+ m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC,
/* X86_TUNE_SSE_PARTIAL_REG_DEPENDENCY: In the Generic model we have a
conflict here in between PPro/Pentium4 based chips that thread 128bit
@@ -2044,13 +2118,13 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
shows that disabling this option on P4 brings over 20% SPECfp regression,
while enabling it on K8 brings roughly 2.4% regression that can be partly
masked by careful scheduling of moves. */
- m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMDFAM10 | m_BDVER | m_GENERIC,
+ m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_AMDFAM10 | m_BDVER | m_GENERIC,
/* X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL */
- m_COREI7 | m_AMDFAM10 | m_BDVER | m_BTVER1,
+ m_COREI7 | m_AMDFAM10 | m_BDVER | m_BTVER1 | m_SLM,
/* X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL */
- m_COREI7 | m_BDVER,
+ m_COREI7 | m_BDVER | m_SLM,
/* X86_TUNE_SSE_PACKED_SINGLE_INSN_OPTIMAL */
m_BDVER ,
@@ -2068,7 +2142,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
m_PPRO | m_P4_NOCONA,
/* X86_TUNE_MEMORY_MISMATCH_STALL */
- m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
+ m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC,
/* X86_TUNE_PROLOGUE_USING_MOVE */
m_PPRO | m_CORE2I7 | m_ATOM | m_ATHLON_K8 | m_GENERIC,
@@ -2090,16 +2164,16 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
/* X86_TUNE_FOUR_JUMP_LIMIT: Some CPU cores are not able to predict more
than 4 branch instructions in the 16 byte window. */
- m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
+ m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC,
/* X86_TUNE_SCHEDULE */
- m_PENT | m_PPRO | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
+ m_PENT | m_PPRO | m_CORE2I7 | m_ATOM | m_SLM | m_K6_GEODE | m_AMD_MULTIPLE | m_GENERIC,
/* X86_TUNE_USE_BT */
- m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC,
+ m_CORE2I7 | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC,
/* X86_TUNE_USE_INCDEC */
- ~(m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_GENERIC),
+ ~(m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_GENERIC),
/* X86_TUNE_PAD_RETURNS */
m_CORE2I7 | m_AMD_MULTIPLE | m_GENERIC,
@@ -2108,7 +2182,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
m_ATOM,
/* X86_TUNE_EXT_80387_CONSTANTS */
- m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_K6_GEODE | m_ATHLON_K8 | m_GENERIC,
+ m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_K6_GEODE | m_ATHLON_K8 | m_GENERIC,
/* X86_TUNE_SHORTEN_X87_SSE */
~m_K8,
@@ -2156,7 +2230,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
/* X86_TUNE_OPT_AGU: Optimize for Address Generation Unit. This flag
will impact LEA instruction selection. */
- m_ATOM,
+ m_ATOM | m_SLM,
/* X86_TUNE_VECTORIZE_DOUBLE: Enable double precision vector
instructions. */
@@ -2177,7 +2251,7 @@ static unsigned int initial_ix86_tune_features[X86_TUNE_LAST] = {
/* X86_TUNE_REASSOC_FP_TO_PARALLEL: Try to produce parallel computations
during reassociation of fp computation. */
- m_ATOM
+ m_ATOM | m_SLM,
};
/* Feature tests against the various architecture variations. */
@@ -2203,10 +2277,10 @@ static unsigned int initial_ix86_arch_features[X86_ARCH_LAST] = {
};
static const unsigned int x86_accumulate_outgoing_args
- = m_PPRO | m_P4_NOCONA | m_ATOM | m_CORE2I7 | m_AMD_MULTIPLE | m_GENERIC;
+ = m_PPRO | m_P4_NOCONA | m_ATOM | m_SLM | m_CORE2I7 | m_AMD_MULTIPLE | m_GENERIC;
static const unsigned int x86_arch_always_fancy_math_387
- = m_PENT | m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_AMD_MULTIPLE | m_GENERIC;
+ = m_PENT | m_PPRO | m_P4_NOCONA | m_CORE2I7 | m_ATOM | m_SLM | m_AMD_MULTIPLE | m_GENERIC;
static const unsigned int x86_avx256_split_unaligned_load
= m_COREI7 | m_GENERIC;
@@ -2601,7 +2675,8 @@ static const struct ptt processor_target_table[PROCESSOR_max] =
{&bdver1_cost, 32, 24, 32, 7, 32},
{&bdver2_cost, 32, 24, 32, 7, 32},
{&btver1_cost, 32, 24, 32, 7, 32},
- {&atom_cost, 16, 15, 16, 7, 16}
+ {&atom_cost, 16, 15, 16, 7, 16},
+ {&slm_cost, 16, 15, 16, 7, 16}
};
static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
@@ -2621,6 +2696,7 @@ static const char *const cpu_names[TARGET_CPU_DEFAULT_max] =
"core2",
"corei7",
"atom",
+ "slm",
"geode",
"k6",
"k6-2",
@@ -3016,6 +3092,9 @@ ix86_option_override_internal (bool main_args_p)
{"atom", PROCESSOR_ATOM, CPU_ATOM,
PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
| PTA_SSSE3 | PTA_CX16 | PTA_MOVBE},
+ {"slm", PROCESSOR_SLM, CPU_SLM,
+ PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2 | PTA_SSE3
+ | PTA_SSSE3 | PTA_SSE4_1 | PTA_SSE4_2 | PTA_CX16 | PTA_MOVBE},
{"geode", PROCESSOR_GEODE, CPU_GEODE,
PTA_MMX | PTA_3DNOW | PTA_3DNOW_A | PTA_PREFETCH_SSE},
{"k6", PROCESSOR_K6, CPU_K6, PTA_MMX},
@@ -23727,6 +23806,7 @@ ix86_issue_rate (void)
{
case PROCESSOR_PENTIUM:
case PROCESSOR_ATOM:
+ case PROCESSOR_SLM:
case PROCESSOR_K6:
return 2;
@@ -23992,6 +24072,7 @@ ia32_multipass_dfa_lookahead (void)
case PROCESSOR_COREI7_32:
case PROCESSOR_COREI7_64:
case PROCESSOR_ATOM:
+ case PROCESSOR_SLM:
/* Generally, we want haifa-sched:max_issue() to look ahead as far
as many instructions can be executed on a cycle, i.e.,
issue_rate. I wonder why tuning for many CPUs does not do this. */
diff --git a/gcc-4.7/gcc/config/i386/i386.h b/gcc-4.7/gcc/config/i386/i386.h
index bca5eb0e8..e01cc4797 100644
--- a/gcc-4.7/gcc/config/i386/i386.h
+++ b/gcc-4.7/gcc/config/i386/i386.h
@@ -248,6 +248,7 @@ extern const struct processor_costs ix86_size_cost;
#define TARGET_BDVER2 (ix86_tune == PROCESSOR_BDVER2)
#define TARGET_BTVER1 (ix86_tune == PROCESSOR_BTVER1)
#define TARGET_ATOM (ix86_tune == PROCESSOR_ATOM)
+#define TARGET_SLM (ix86_tune == PROCESSOR_SLM)
/* Feature tests against the various tunings. */
enum ix86_tune_indices {
@@ -593,6 +594,7 @@ enum target_cpu_default
TARGET_CPU_DEFAULT_core2,
TARGET_CPU_DEFAULT_corei7,
TARGET_CPU_DEFAULT_atom,
+ TARGET_CPU_DEFAULT_slm,
TARGET_CPU_DEFAULT_geode,
TARGET_CPU_DEFAULT_k6,
@@ -2072,6 +2074,7 @@ enum processor_type
PROCESSOR_BDVER2,
PROCESSOR_BTVER1,
PROCESSOR_ATOM,
+ PROCESSOR_SLM,
PROCESSOR_max
};
diff --git a/gcc-4.7/gcc/config/i386/i386.md b/gcc-4.7/gcc/config/i386/i386.md
index 73aa21747..1b630da8e 100644
--- a/gcc-4.7/gcc/config/i386/i386.md
+++ b/gcc-4.7/gcc/config/i386/i386.md
@@ -302,7 +302,7 @@
;; Processor type.
(define_attr "cpu" "none,pentium,pentiumpro,geode,k6,athlon,k8,core2,corei7,
- atom,generic64,amdfam10,bdver1,bdver2,btver1"
+ atom,slm,generic64,amdfam10,bdver1,bdver2,btver1"
(const (symbol_ref "ix86_schedule")))
;; A basic instruction type. Refinements due to arguments to be
@@ -922,6 +922,7 @@
(include "bdver1.md")
(include "geode.md")
(include "atom.md")
+(include "slm.md")
(include "core2.md")
diff --git a/gcc-4.7/gcc/config/i386/slm.md b/gcc-4.7/gcc/config/i386/slm.md
new file mode 100644
index 000000000..3ac919e37
--- /dev/null
+++ b/gcc-4.7/gcc/config/i386/slm.md
@@ -0,0 +1,758 @@
+;; Slivermont(SLM) Scheduling
+;; Copyright (C) 2009, 2010 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+;;
+;; Silvermont has 2 out-of-order IEC, 2 in-order FEC and 1 in-order MEC.
+
+
+(define_automaton "slm")
+
+;; EU: Execution Unit
+;; Silvermont EUs are connected by port 0 or port 1.
+
+;; SLM has two ports: port 0 and port 1 connecting to all execution units
+(define_cpu_unit "slm-port-0,slm-port-1" "slm")
+
+(define_cpu_unit "slm-ieu-0, slm-ieu-1,
+ slm-imul, slm-feu-0, slm-feu-1"
+ "slm")
+
+(define_reservation "slm-all-ieu" "(slm-ieu-0 + slm-ieu-1 + slm-imul)")
+(define_reservation "slm-all-feu" "(slm-feu-0 + slm-feu-1)")
+(define_reservation "slm-all-eu" "(slm-all-ieu + slm-all-feu)")
+(define_reservation "slm-fp-0" "(slm-port-0 + slm-feu-0)")
+
+;; Some EUs have duplicated copied and can be accessed via either
+;; port 0 or port 1
+;; (define_reservation "slm-port-either" "(slm-port-0 | slm-port-1)"
+(define_reservation "slm-port-dual" "(slm-port-0 + slm-port-1)")
+
+;;; fmul insn can have 4 or 5 cycles latency
+(define_reservation "slm-fmul-5c"
+ "(slm-port-0 + slm-feu-0), slm-feu-0, nothing*3")
+(define_reservation "slm-fmul-4c" "(slm-port-0 + slm-feu-0), nothing*3")
+
+;;; fadd can has 3 cycles latency depends on instruction forms
+(define_reservation "slm-fadd-3c" "(slm-port-1 + slm-feu-1), nothing*2")
+(define_reservation "slm-fadd-4c"
+ "(slm-port-1 + slm-feu-1), slm-feu-1, nothing*2")
+
+;;; imul insn has 3 cycles latency for SI operands
+(define_reservation "slm-imul-32"
+ "(slm-port-1 + slm-imul), nothing*2")
+(define_reservation "slm-imul-mem-32"
+ "(slm-port-1 + slm-imul + slm-port-0), nothing*2")
+;;; imul has 4 cycles latency for DI operands with 1/2 tput
+(define_reservation "slm-imul-64"
+ "(slm-port-1 + slm-imul), slm-imul, nothing*2")
+
+;;; dual-execution instructions can have 1,2,4,5 cycles latency depends on
+;;; instruction forms
+(define_reservation "slm-dual-1c" "(slm-port-dual + slm-all-eu)")
+(define_reservation "slm-dual-2c"
+ "(slm-port-dual + slm-all-eu, nothing)")
+
+;;; Most of simple ALU instructions have 1 cycle latency. Some of them
+;;; issue in port 0, some in port 0 and some in either port.
+(define_reservation "slm-simple-0" "(slm-port-0 + slm-ieu-0)")
+(define_reservation "slm-simple-1" "(slm-port-1 + slm-ieu-1)")
+(define_reservation "slm-simple-either" "(slm-simple-0 | slm-simple-1)")
+
+;;; Complex macro-instruction has variants of latency, and uses both ports.
+(define_reservation "slm-complex" "(slm-port-dual + slm-all-eu)")
+
+(define_insn_reservation "slm_other" 9
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "other")
+ (eq_attr "atom_unit" "!jeu")))
+ "slm-complex, slm-all-eu*8")
+
+;; return has type "other" with atom_unit "jeu"
+(define_insn_reservation "slm_other_2" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "other")
+ (eq_attr "atom_unit" "jeu")))
+ "slm-dual-1c")
+
+(define_insn_reservation "slm_multi" 9
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "multi"))
+ "slm-complex, slm-all-eu*8")
+
+;; Normal alu insns without carry
+(define_insn_reservation "slm_alu" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "alu")
+ (and (eq_attr "memory" "none")
+ (eq_attr "use_carry" "0"))))
+ "slm-simple-either")
+
+;; Normal alu insns without carry, but use MEC.
+(define_insn_reservation "slm_alu_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "alu")
+ (and (eq_attr "memory" "!none")
+ (eq_attr "use_carry" "0"))))
+ "slm-simple-either")
+
+;; Alu insn consuming CF, such as add/sbb
+(define_insn_reservation "slm_alu_carry" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "alu")
+ (and (eq_attr "memory" "none")
+ (eq_attr "use_carry" "1"))))
+ "slm-simple-either, nothing")
+
+;; Alu insn consuming CF, such as add/sbb
+(define_insn_reservation "slm_alu_carry_mem" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "alu")
+ (and (eq_attr "memory" "!none")
+ (eq_attr "use_carry" "1"))))
+ "slm-simple-either, nothing")
+
+(define_insn_reservation "slm_alu1" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "alu1")
+ (eq_attr "memory" "none") (eq_attr "prefix_0f" "0")))
+ "slm-simple-either")
+
+;; bsf and bsf insn
+(define_insn_reservation "slm_alu1_1" 10
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "alu1")
+ (eq_attr "memory" "none") (eq_attr "prefix_0f" "1")))
+ "slm-simple-1, slm-ieu-1*9")
+
+(define_insn_reservation "slm_alu1_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "alu1")
+ (eq_attr "memory" "!none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_negnot" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "negnot")
+ (eq_attr "memory" "none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_negnot_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "negnot")
+ (eq_attr "memory" "!none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_imov" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imov")
+ (eq_attr "memory" "none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_imov_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imov")
+ (eq_attr "memory" "!none")))
+ "slm-simple-0")
+
+;; 16<-16, 32<-32
+(define_insn_reservation "slm_imovx" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imovx")
+ (and (eq_attr "memory" "none")
+ (ior (and (match_operand:HI 0 "register_operand")
+ (match_operand:HI 1 "general_operand"))
+ (and (match_operand:SI 0 "register_operand")
+ (match_operand:SI 1 "general_operand"))))))
+ "slm-simple-either")
+
+;; 16<-16, 32<-32, mem
+(define_insn_reservation "slm_imovx_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imovx")
+ (and (eq_attr "memory" "!none")
+ (ior (and (match_operand:HI 0 "register_operand")
+ (match_operand:HI 1 "general_operand"))
+ (and (match_operand:SI 0 "register_operand")
+ (match_operand:SI 1 "general_operand"))))))
+ "slm-simple-either")
+
+;; 32<-16, 32<-8, 64<-16, 64<-8, 64<-32, 8<-8
+(define_insn_reservation "slm_imovx_2" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imovx")
+ (and (eq_attr "memory" "none")
+ (ior (match_operand:QI 0 "register_operand")
+ (ior (and (match_operand:SI 0 "register_operand")
+ (not (match_operand:SI 1 "general_operand")))
+ (match_operand:DI 0 "register_operand"))))))
+ "slm-simple-either")
+
+;; 32<-16, 32<-8, 64<-16, 64<-8, 64<-32, 8<-8, mem
+(define_insn_reservation "slm_imovx_2_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imovx")
+ (and (eq_attr "memory" "!none")
+ (ior (match_operand:QI 0 "register_operand")
+ (ior (and (match_operand:SI 0 "register_operand")
+ (not (match_operand:SI 1 "general_operand")))
+ (match_operand:DI 0 "register_operand"))))))
+ "slm-simple-0")
+
+;; 16<-8
+(define_insn_reservation "slm_imovx_3" 3
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imovx")
+ (and (match_operand:HI 0 "register_operand")
+ (match_operand:QI 1 "general_operand"))))
+ "slm-simple-0, nothing*2")
+
+(define_insn_reservation "slm_lea" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "lea")
+ (eq_attr "mode" "!HI")))
+ "slm-simple-either")
+
+;; lea 16bit address is complex insn
+(define_insn_reservation "slm_lea_2" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "lea")
+ (eq_attr "mode" "HI")))
+ "slm-complex, slm-all-eu")
+
+(define_insn_reservation "slm_incdec" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "incdec")
+ (eq_attr "memory" "none")))
+ "slm-simple-0")
+
+(define_insn_reservation "slm_incdec_mem" 3
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "incdec")
+ (eq_attr "memory" "!none")))
+ "slm-simple-0, nothing*2")
+
+;; simple shift instruction use SHIFT eu, none memory
+(define_insn_reservation "slm_ishift" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ishift")
+ (and (eq_attr "memory" "none") (eq_attr "prefix_0f" "0"))))
+ "slm-simple-0")
+
+;; simple shift instruction use SHIFT eu, memory
+(define_insn_reservation "slm_ishift_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ishift")
+ (and (eq_attr "memory" "!none") (eq_attr "prefix_0f" "0"))))
+ "slm-simple-0")
+
+;; DF shift (prefixed with 0f) is complex insn with latency of 4 cycles
+(define_insn_reservation "slm_ishift_3" 4
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ishift")
+ (eq_attr "prefix_0f" "1")))
+ "slm-complex, slm-all-eu*3")
+
+(define_insn_reservation "slm_ishift1" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ishift1")
+ (eq_attr "memory" "none")))
+ "slm-simple-0")
+
+(define_insn_reservation "slm_ishift1_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ishift1")
+ (eq_attr "memory" "!none")))
+ "slm-simple-0")
+
+(define_insn_reservation "slm_rotate" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "rotate")
+ (eq_attr "memory" "none")))
+ "slm-simple-0")
+
+(define_insn_reservation "slm_rotate_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "rotate")
+ (eq_attr "memory" "!none")))
+ "slm-simple-0")
+
+(define_insn_reservation "slm_rotate1" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "rotate1")
+ (eq_attr "memory" "none")))
+ "slm-simple-0")
+
+(define_insn_reservation "slm_rotate1_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "rotate1")
+ (eq_attr "memory" "!none")))
+ "slm-simple-0")
+
+(define_insn_reservation "slm_imul" 3
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imul")
+ (and (eq_attr "memory" "none") (eq_attr "mode" "SI"))))
+ "slm-imul-32")
+
+(define_insn_reservation "slm_imul_mem" 3
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imul")
+ (and (eq_attr "memory" "!none") (eq_attr "mode" "SI"))))
+ "slm-imul-mem-32")
+
+;; latency set to 4 as common 64x64 imul with 1/2 tput
+(define_insn_reservation "slm_imul_3" 4
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "imul")
+ (eq_attr "mode" "!SI")))
+ "slm-imul-64")
+
+(define_insn_reservation "slm_idiv" 33
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "idiv"))
+ "slm-complex, slm-all-eu*16, nothing*16")
+
+(define_insn_reservation "slm_icmp" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "icmp")
+ (eq_attr "memory" "none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_icmp_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "icmp")
+ (eq_attr "memory" "!none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_test" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "test")
+ (eq_attr "memory" "none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_test_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "test")
+ (eq_attr "memory" "!none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_ibr" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ibr")
+ (eq_attr "memory" "!load")))
+ "slm-simple-1")
+
+;; complex if jump target is from address
+(define_insn_reservation "slm_ibr_2" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ibr")
+ (eq_attr "memory" "load")))
+ "slm-complex, slm-all-eu")
+
+(define_insn_reservation "slm_setcc" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "setcc")
+ (eq_attr "memory" "!store")))
+ "slm-simple-either")
+
+;; 2 cycles complex if target is in memory
+(define_insn_reservation "slm_setcc_2" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "setcc")
+ (eq_attr "memory" "store")))
+ "slm-complex, slm-all-eu")
+
+(define_insn_reservation "slm_icmov" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "icmov")
+ (eq_attr "memory" "none")))
+ "slm-simple-either, nothing")
+
+(define_insn_reservation "slm_icmov_mem" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "icmov")
+ (eq_attr "memory" "!none")))
+ "slm-simple-0, nothing")
+
+;; UCODE if segreg, ignored
+(define_insn_reservation "slm_push" 2
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "push"))
+ "slm-dual-2c")
+
+;; pop r64 is 1 cycle. UCODE if segreg, ignored
+(define_insn_reservation "slm_pop" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "pop")
+ (eq_attr "mode" "DI")))
+ "slm-dual-1c")
+
+;; pop non-r64 is 2 cycles. UCODE if segreg, ignored
+(define_insn_reservation "slm_pop_2" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "pop")
+ (eq_attr "mode" "!DI")))
+ "slm-dual-2c")
+
+;; UCODE if segreg, ignored
+(define_insn_reservation "slm_call" 1
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "call"))
+ "slm-dual-1c")
+
+(define_insn_reservation "slm_callv" 1
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "callv"))
+ "slm-dual-1c")
+
+(define_insn_reservation "slm_leave" 3
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "leave"))
+ "slm-complex, slm-all-eu*2")
+
+(define_insn_reservation "slm_str" 3
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "str"))
+ "slm-complex, slm-all-eu*2")
+
+(define_insn_reservation "slm_sselog" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sselog")
+ (eq_attr "memory" "none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_sselog_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sselog")
+ (eq_attr "memory" "!none")))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_sselog1" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sselog1")
+ (eq_attr "memory" "none")))
+ "slm-simple-0")
+
+(define_insn_reservation "slm_sselog1_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sselog1")
+ (eq_attr "memory" "!none")))
+ "slm-simple-0")
+
+;; not pmad, not psad
+(define_insn_reservation "slm_sseiadd" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseiadd")
+ (and (not (match_operand:V2DI 0 "register_operand"))
+ (and (eq_attr "atom_unit" "!simul")
+ (eq_attr "atom_unit" "!complex")))))
+ "slm-simple-either")
+
+;; pmad, psad and 64
+(define_insn_reservation "slm_sseiadd_2" 4
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseiadd")
+ (and (not (match_operand:V2DI 0 "register_operand"))
+ (and (eq_attr "atom_unit" "simul" )
+ (eq_attr "mode" "DI")))))
+ "slm-fmul-4c")
+
+;; pmad, psad and 128
+(define_insn_reservation "slm_sseiadd_3" 5
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseiadd")
+ (and (not (match_operand:V2DI 0 "register_operand"))
+ (and (eq_attr "atom_unit" "simul" )
+ (eq_attr "mode" "TI")))))
+ "slm-fmul-5c")
+
+;; if paddq(64 bit op), phadd/phsub
+(define_insn_reservation "slm_sseiadd_4" 4
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseiadd")
+ (ior (match_operand:V2DI 0 "register_operand")
+ (eq_attr "atom_unit" "complex"))))
+ "slm-fadd-4c")
+
+;; if immediate op.
+(define_insn_reservation "slm_sseishft" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseishft")
+ (and (eq_attr "atom_unit" "!sishuf")
+ (match_operand 2 "immediate_operand"))))
+ "slm-simple-either")
+
+;; if palignr or psrldq
+(define_insn_reservation "slm_sseishft_2" 1
+ (and (eq_attr "cpu" "slm")
+ (ior (eq_attr "type" "sseishft1")
+ (and (eq_attr "type" "sseishft")
+ (and (eq_attr "atom_unit" "sishuf")
+ (match_operand 2 "immediate_operand")))))
+ "slm-simple-0")
+
+;; if reg/mem op
+(define_insn_reservation "slm_sseishft_3" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseishft")
+ (not (match_operand 2 "immediate_operand"))))
+ "slm-complex, slm-all-eu")
+
+(define_insn_reservation "slm_sseimul" 5
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "sseimul"))
+ "slm-fmul-5c")
+
+;; rcpss or rsqrtss
+(define_insn_reservation "slm_sse" 4
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sse")
+ (and (eq_attr "atom_sse_attr" "rcp") (eq_attr "mode" "SF"))))
+ "slm-fmul-4c")
+
+;; movshdup, movsldup. Suggest to type sseishft
+(define_insn_reservation "slm_sse_2" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sse")
+ (eq_attr "atom_sse_attr" "movdup")))
+ "slm-simple-0")
+
+;; lfence
+(define_insn_reservation "slm_sse_3" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sse")
+ (eq_attr "atom_sse_attr" "lfence")))
+ "slm-simple-either")
+
+;; sfence,clflush,mfence, prefetch
+(define_insn_reservation "slm_sse_4" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sse")
+ (ior (eq_attr "atom_sse_attr" "fence")
+ (eq_attr "atom_sse_attr" "prefetch"))))
+ "slm-simple-0")
+
+;; rcpps, rsqrtss, sqrt, ldmxcsr
+(define_insn_reservation "slm_sse_5" 9
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sse")
+ (ior (ior (eq_attr "atom_sse_attr" "sqrt")
+ (eq_attr "atom_sse_attr" "mxcsr"))
+ (and (eq_attr "atom_sse_attr" "rcp")
+ (eq_attr "mode" "V4SF")))))
+ "slm-complex, slm-all-eu*7, nothing")
+
+;; xmm->xmm
+(define_insn_reservation "slm_ssemov" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssemov")
+ (and (match_operand 0 "register_operand" "xy")
+ (match_operand 1 "register_operand" "xy"))))
+ "slm-simple-either")
+
+;; reg->xmm
+(define_insn_reservation "slm_ssemov_2" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssemov")
+ (and (match_operand 0 "register_operand" "xy")
+ (match_operand 1 "register_operand" "r"))))
+ "slm-simple-0")
+
+;; xmm->reg
+(define_insn_reservation "slm_ssemov_3" 3
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssemov")
+ (and (match_operand 0 "register_operand" "r")
+ (match_operand 1 "register_operand" "xy"))))
+ "slm-simple-0, nothing*2")
+
+;; mov mem
+(define_insn_reservation "slm_ssemov_4" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssemov")
+ (and (eq_attr "movu" "0") (eq_attr "memory" "!none"))))
+ "slm-simple-0")
+
+;; movu mem
+(define_insn_reservation "slm_ssemov_5" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssemov")
+ (ior (eq_attr "movu" "1") (eq_attr "memory" "!none"))))
+ "slm-simple-0, nothing")
+
+;; no memory simple
+(define_insn_reservation "slm_sseadd" 3
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseadd")
+ (and (eq_attr "memory" "none")
+ (and (eq_attr "mode" "!V2DF")
+ (eq_attr "atom_unit" "!complex")))))
+ "slm-fadd-3c")
+
+;; memory simple
+(define_insn_reservation "slm_sseadd_mem" 3
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseadd")
+ (and (eq_attr "memory" "!none")
+ (and (eq_attr "mode" "!V2DF")
+ (eq_attr "atom_unit" "!complex")))))
+ "slm-fadd-3c")
+
+;; maxps, minps, *pd, hadd, hsub
+(define_insn_reservation "slm_sseadd_3" 4
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseadd")
+ (ior (eq_attr "mode" "V2DF") (eq_attr "atom_unit" "complex"))))
+ "slm-fadd-4c")
+
+;; Except dppd/dpps
+(define_insn_reservation "slm_ssemul" 5
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssemul")
+ (eq_attr "mode" "!SF")))
+ "slm-fmul-5c")
+
+;; Except dppd/dpps, 4 cycle if mulss
+(define_insn_reservation "slm_ssemul_2" 4
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssemul")
+ (eq_attr "mode" "SF")))
+ "slm-fmul-4c")
+
+(define_insn_reservation "slm_ssecmp" 1
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "ssecmp"))
+ "slm-simple-either")
+
+(define_insn_reservation "slm_ssecomi" 1
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "ssecomi"))
+ "slm-simple-0")
+
+;; no memory and cvtpi2ps, cvtps2pi, cvttps2pi
+(define_insn_reservation "slm_ssecvt" 5
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssecvt")
+ (ior (and (match_operand:V2SI 0 "register_operand")
+ (match_operand:V4SF 1 "register_operand"))
+ (and (match_operand:V4SF 0 "register_operand")
+ (match_operand:V2SI 1 "register_operand")))))
+ "slm-fp-0, slm-feu-0, nothing*3")
+
+;; memory and cvtpi2ps, cvtps2pi, cvttps2pi
+(define_insn_reservation "slm_ssecvt_mem" 5
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssecvt")
+ (ior (and (match_operand:V2SI 0 "register_operand")
+ (match_operand:V4SF 1 "memory_operand"))
+ (and (match_operand:V4SF 0 "register_operand")
+ (match_operand:V2SI 1 "memory_operand")))))
+"slm-fp-0, slm-feu-0, nothing*3")
+
+;; cvtpd2pi, cvtpi2pd
+(define_insn_reservation "slm_ssecvt_1" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssecvt")
+ (ior (and (match_operand:V2DF 0 "register_operand")
+ (match_operand:V2SI 1 "register_operand"))
+ (and (match_operand:V2SI 0 "register_operand")
+ (match_operand:V2DF 1 "register_operand")))))
+ "slm-fp-0, slm-feu-0")
+
+;; memory and cvtpd2pi, cvtpi2pd
+(define_insn_reservation "slm_ssecvt_1_mem" 2
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssecvt")
+ (ior (and (match_operand:V2DF 0 "register_operand")
+ (match_operand:V2SI 1 "memory_operand"))
+ (and (match_operand:V2SI 0 "register_operand")
+ (match_operand:V2DF 1 "memory_operand")))))
+ "slm-fp-0, slm-feu-0")
+
+;; otherwise. 4 cycles average for cvtss2sd
+(define_insn_reservation "slm_ssecvt_3" 4
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "ssecvt")
+ (not (ior (and (match_operand:V2SI 0 "register_operand")
+ (match_operand:V4SF 1 "nonimmediate_operand"))
+ (and (match_operand:V4SF 0 "register_operand")
+ (match_operand:V2SI 1 "nonimmediate_operand"))))))
+ "slm-fp-0, nothing*3")
+
+;; memory and cvtsi2sd
+(define_insn_reservation "slm_sseicvt" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseicvt")
+ (and (match_operand:V2DF 0 "register_operand")
+ (match_operand:SI 1 "nonimmediate_operand"))))
+ "slm-fp-0")
+
+;; otherwise. 8 cycles average for cvtsd2si
+(define_insn_reservation "slm_sseicvt_2" 4
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "sseicvt")
+ (not (and (match_operand:V2DF 0 "register_operand")
+ (match_operand:SI 1 "memory_operand")))))
+ "slm-fp-0, nothing*3")
+
+(define_insn_reservation "slm_ssediv" 13
+ (and (eq_attr "cpu" "slm")
+ (eq_attr "type" "ssediv"))
+ "slm-fp-0, slm-feu-0*10, nothing*2")
+
+;; simple for fmov
+(define_insn_reservation "slm_fmov" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "fmov")
+ (eq_attr "memory" "none")))
+ "slm-simple-either")
+
+;; simple for fmov
+(define_insn_reservation "slm_fmov_mem" 1
+ (and (eq_attr "cpu" "slm")
+ (and (eq_attr "type" "fmov")
+ (eq_attr "memory" "!none")))
+ "slm-simple-either")
+
+;; Define bypass here
+
+;; There will be 0 cycle stall from cmp/test to jcc
+
+;; There will be 1 cycle stall from flag producer to cmov and adc/sbb
+(define_bypass 2 "slm_icmp, slm_test, slm_alu, slm_alu_carry,
+ slm_alu1, slm_negnot, slm_incdec, slm_ishift,
+ slm_ishift1, slm_rotate, slm_rotate1"
+ "slm_icmov, slm_alu_carry")
+
+;; lea to shift source stall is 1 cycle
+(define_bypass 2 "slm_lea"
+ "slm_ishift, slm_ishift1, slm_rotate, slm_rotate1"
+ "!ix86_dep_by_shift_count")
+
+;; non-lea to shift count stall is 1 cycle
+(define_bypass 2 "slm_alu_carry,
+ slm_alu,slm_alu1,slm_negnot,slm_imov,slm_imovx,
+ slm_incdec,slm_ishift,slm_ishift1,slm_rotate,
+ slm_rotate1, slm_setcc, slm_icmov, slm_pop,
+ slm_alu_mem, slm_alu_carry_mem, slm_alu1_mem,
+ slm_imovx_mem, slm_imovx_2_mem,
+ slm_imov_mem, slm_icmov_mem, slm_fmov_mem"
+ "slm_ishift, slm_ishift1, slm_rotate, slm_rotate1,
+ slm_ishift_mem, slm_ishift1_mem,
+ slm_rotate_mem, slm_rotate1_mem"
+ "ix86_dep_by_shift_count")