aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorDuncan Sands <baldrick@free.fr>2008-09-30 10:00:30 +0000
committerDuncan Sands <baldrick@free.fr>2008-09-30 10:00:30 +0000
commit46292be362c1b0bb0aecf737cd4dbb81fdd38586 (patch)
tree4369c730990f493744a82d575670b0ddcf041092 /lib/CodeGen
parentd41474d2c09a9fd0d1251554b920a783839b2787 (diff)
downloadexternal_llvm-46292be362c1b0bb0aecf737cd4dbb81fdd38586.tar.gz
external_llvm-46292be362c1b0bb0aecf737cd4dbb81fdd38586.tar.bz2
external_llvm-46292be362c1b0bb0aecf737cd4dbb81fdd38586.zip
Revert commit 56835 since it breaks the build.
"If a re-materializable instruction has a register operand, the spiller will change the register operand's spill weight to HUGE_VAL to avoid it being spilled. However, if the operand is already in the queue ready to be spilled, avoid re-materializing it". git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@56837 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/LiveIntervalAnalysis.cpp20
-rw-r--r--lib/CodeGen/RegAllocLinearScan.cpp5
-rw-r--r--lib/CodeGen/SimpleRegisterCoalescing.cpp3
3 files changed, 8 insertions, 20 deletions
diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp
index 4ed456bf11..bc66321b4a 100644
--- a/lib/CodeGen/LiveIntervalAnalysis.cpp
+++ b/lib/CodeGen/LiveIntervalAnalysis.cpp
@@ -819,7 +819,6 @@ bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
const VNInfo *ValNo, MachineInstr *MI,
- SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad) {
if (DisableReMat)
return false;
@@ -856,8 +855,8 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
// If the instruction accesses memory and the memory could be non-constant,
// assume the instruction is not rematerializable.
- for (std::list<MachineMemOperand>::const_iterator
- I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
+ for (std::list<MachineMemOperand>::const_iterator I = MI->memoperands_begin(),
+ E = MI->memoperands_end(); I != E; ++I) {
const MachineMemOperand &MMO = *I;
if (MMO.isVolatile() || MMO.isStore())
return false;
@@ -925,21 +924,13 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
return false;
}
-
- // If a register operand of the re-materialized instruction is going to
- // be spilled next, then it's not legal to re-materialize this instruction.
- for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
- if (ImpUse == SpillIs[i]->reg)
- return false;
}
return true;
}
/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable.
-bool LiveIntervals::isReMaterializable(const LiveInterval &li,
- SmallVectorImpl<LiveInterval*> &SpillIs,
- bool &isLoad) {
+bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
isLoad = false;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
@@ -953,7 +944,7 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
bool DefIsLoad = false;
if (!ReMatDefMI ||
- !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
+ !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
return false;
isLoad |= DefIsLoad;
}
@@ -1737,7 +1728,6 @@ addIntervalsForSpillsFast(const LiveInterval &li,
std::vector<LiveInterval*> LiveIntervals::
addIntervalsForSpills(const LiveInterval &li,
- SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
float &SSWeight) {
@@ -1841,7 +1831,7 @@ addIntervalsForSpills(const LiveInterval &li,
MachineInstr *ReMatDefMI = (DefIdx == ~0u)
? 0 : getInstructionFromIndex(DefIdx);
bool dummy;
- if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
+ if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
// Remember how to remat the def of this val#.
ReMatOrigDefs[VN] = ReMatDefMI;
// Original def may be modified so we have to make a copy here.
diff --git a/lib/CodeGen/RegAllocLinearScan.cpp b/lib/CodeGen/RegAllocLinearScan.cpp
index 7291e12bbe..d834031ae6 100644
--- a/lib/CodeGen/RegAllocLinearScan.cpp
+++ b/lib/CodeGen/RegAllocLinearScan.cpp
@@ -879,9 +879,8 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
if (cur->weight != HUGE_VALF && cur->weight <= minWeight) {
DOUT << "\t\t\tspilling(c): " << *cur << '\n';
float SSWeight;
- SmallVector<LiveInterval*, 8> spillIs;
std::vector<LiveInterval*> added =
- li_->addIntervalsForSpills(*cur, spillIs, loopInfo, *vrm_, SSWeight);
+ li_->addIntervalsForSpills(*cur, loopInfo, *vrm_, SSWeight);
addStackInterval(cur, ls_, li_, SSWeight, *vrm_);
if (added.empty())
return; // Early exit if all spills were folded.
@@ -932,7 +931,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
earliestStart = std::min(earliestStart, sli->beginNumber());
float SSWeight;
std::vector<LiveInterval*> newIs =
- li_->addIntervalsForSpills(*sli, spillIs, loopInfo, *vrm_, SSWeight);
+ li_->addIntervalsForSpills(*sli, loopInfo, *vrm_, SSWeight);
addStackInterval(sli, ls_, li_, SSWeight, *vrm_);
std::copy(newIs.begin(), newIs.end(), std::back_inserter(added));
spilled.insert(sli->reg);
diff --git a/lib/CodeGen/SimpleRegisterCoalescing.cpp b/lib/CodeGen/SimpleRegisterCoalescing.cpp
index 86065f069f..bdc37b16b6 100644
--- a/lib/CodeGen/SimpleRegisterCoalescing.cpp
+++ b/lib/CodeGen/SimpleRegisterCoalescing.cpp
@@ -2361,8 +2361,7 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
LI.weight = HUGE_VALF;
else {
bool isLoad = false;
- SmallVector<LiveInterval*, 4> SpillIs;
- if (li_->isReMaterializable(LI, SpillIs, isLoad)) {
+ if (li_->isReMaterializable(LI, isLoad)) {
// If all of the definitions of the interval are re-materializable,
// it is a preferred candidate for spilling. If non of the defs are
// loads, then it's potentially very cheap to re-materialize.