aboutsummaryrefslogtreecommitdiffstats
path: root/include/llvm/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'include/llvm/CodeGen')
-rw-r--r--include/llvm/CodeGen/AsmPrinter.h6
-rw-r--r--include/llvm/CodeGen/FastISel.h22
-rw-r--r--include/llvm/CodeGen/GCs.h6
-rw-r--r--include/llvm/CodeGen/ISDOpcodes.h8
-rw-r--r--include/llvm/CodeGen/LinkAllAsmWriterComponents.h1
-rw-r--r--include/llvm/CodeGen/LinkAllCodegenComponents.h1
-rw-r--r--include/llvm/CodeGen/LiveInterval.h9
-rw-r--r--include/llvm/CodeGen/LiveIntervalAnalysis.h3
-rw-r--r--include/llvm/CodeGen/LiveRangeEdit.h2
-rw-r--r--include/llvm/CodeGen/LiveVariables.h4
-rw-r--r--include/llvm/CodeGen/MachineBasicBlock.h5
-rw-r--r--include/llvm/CodeGen/MachineBranchProbabilityInfo.h1
-rw-r--r--include/llvm/CodeGen/MachineFunction.h4
-rw-r--r--include/llvm/CodeGen/MachineMemOperand.h30
-rw-r--r--include/llvm/CodeGen/MachineModuleInfo.h2
-rw-r--r--include/llvm/CodeGen/MachineRegisterInfo.h6
-rw-r--r--include/llvm/CodeGen/MachineScheduler.h9
-rw-r--r--include/llvm/CodeGen/MachineTraceMetrics.h33
-rw-r--r--include/llvm/CodeGen/Passes.h67
-rw-r--r--include/llvm/CodeGen/RegAllocPBQP.h19
-rw-r--r--include/llvm/CodeGen/RegisterScavenging.h79
-rw-r--r--include/llvm/CodeGen/ScheduleDAG.h30
-rw-r--r--include/llvm/CodeGen/ScheduleDAGInstrs.h7
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h31
-rw-r--r--include/llvm/CodeGen/SelectionDAGISel.h3
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h18
-rw-r--r--include/llvm/CodeGen/SlotIndexes.h49
-rw-r--r--include/llvm/CodeGen/TargetLoweringObjectFileImpl.h6
-rw-r--r--include/llvm/CodeGen/ValueTypes.td4
29 files changed, 329 insertions, 136 deletions
diff --git a/include/llvm/CodeGen/AsmPrinter.h b/include/llvm/CodeGen/AsmPrinter.h
index e0a6e3f402..c2fd6ce367 100644
--- a/include/llvm/CodeGen/AsmPrinter.h
+++ b/include/llvm/CodeGen/AsmPrinter.h
@@ -25,6 +25,7 @@ namespace llvm {
class BlockAddress;
class GCStrategy;
class Constant;
+ class ConstantArray;
class GCMetadataPrinter;
class GlobalValue;
class GlobalVariable;
@@ -134,6 +135,9 @@ namespace llvm {
/// getDataLayout - Return information about data layout.
const DataLayout &getDataLayout() const;
+ /// getTargetTriple - Return the target triple string.
+ StringRef getTargetTriple() const;
+
/// getCurrentSection() - Return the current section we are emitting to.
const MCSection *getCurrentSection() const;
@@ -480,7 +484,7 @@ namespace llvm {
void EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB,
unsigned uid) const;
- void EmitLLVMUsedList(const Constant *List);
+ void EmitLLVMUsedList(const ConstantArray *InitList);
void EmitXXStructorList(const Constant *List, bool isCtor);
GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy *C);
};
diff --git a/include/llvm/CodeGen/FastISel.h b/include/llvm/CodeGen/FastISel.h
index 705db7e643..471e9bfc40 100644
--- a/include/llvm/CodeGen/FastISel.h
+++ b/include/llvm/CodeGen/FastISel.h
@@ -123,12 +123,28 @@ public:
/// index value.
std::pair<unsigned, bool> getRegForGEPIndex(const Value *V);
- /// TryToFoldLoad - The specified machine instr operand is a vreg, and that
+ /// \brief We're checking to see if we can fold \p LI into \p FoldInst.
+ /// Note that we could have a sequence where multiple LLVM IR instructions
+ /// are folded into the same machineinstr. For example we could have:
+ /// A: x = load i32 *P
+ /// B: y = icmp A, 42
+ /// C: br y, ...
+ ///
+ /// In this scenario, \p LI is "A", and \p FoldInst is "C". We know
+ /// about "B" (and any other folded instructions) because it is between
+ /// A and C.
+ ///
+ /// If we succeed folding, return true.
+ ///
+ bool tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst);
+
+ /// \brief The specified machine instr operand is a vreg, and that
/// vreg is being provided by the specified load instruction. If possible,
/// try to fold the load as an operand to the instruction, returning true if
/// possible.
- virtual bool TryToFoldLoad(MachineInstr * /*MI*/, unsigned /*OpNo*/,
- const LoadInst * /*LI*/) {
+ /// This method should be implemented by targets.
+ virtual bool tryToFoldLoadIntoMI(MachineInstr * /*MI*/, unsigned /*OpNo*/,
+ const LoadInst * /*LI*/) {
return false;
}
diff --git a/include/llvm/CodeGen/GCs.h b/include/llvm/CodeGen/GCs.h
index c407b61674..456d2dcb51 100644
--- a/include/llvm/CodeGen/GCs.h
+++ b/include/llvm/CodeGen/GCs.h
@@ -26,6 +26,12 @@ namespace llvm {
/// Creates an ocaml-compatible metadata printer.
void linkOcamlGCPrinter();
+
+ /// Creates an erlang-compatible garbage collector.
+ void linkErlangGC();
+
+ /// Creates an erlang-compatible metadata printer.
+ void linkErlangGCPrinter();
/// Creates a shadow stack garbage collector. This collector requires no code
/// generator support.
diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h
index 442729b5d7..0fd211b4a8 100644
--- a/include/llvm/CodeGen/ISDOpcodes.h
+++ b/include/llvm/CodeGen/ISDOpcodes.h
@@ -602,14 +602,6 @@ namespace ISD {
/// specifier.
PREFETCH,
- /// OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load,
- /// store-store, device)
- /// This corresponds to the memory.barrier intrinsic.
- /// it takes an input chain, 4 operands to specify the type of barrier, an
- /// operand specifying if the barrier applies to device and uncached memory
- /// and produces an output chain.
- MEMBARRIER,
-
/// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
/// This corresponds to the fence instruction. It takes an input chain, and
/// two integer constants: an AtomicOrdering and a SynchronizationScope.
diff --git a/include/llvm/CodeGen/LinkAllAsmWriterComponents.h b/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
index 4b941730a2..722b0de6f7 100644
--- a/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
+++ b/include/llvm/CodeGen/LinkAllAsmWriterComponents.h
@@ -30,6 +30,7 @@ namespace {
return;
llvm::linkOcamlGCPrinter();
+ llvm::linkErlangGCPrinter();
}
} ForceAsmWriterLinking; // Force link by creating a global definition.
diff --git a/include/llvm/CodeGen/LinkAllCodegenComponents.h b/include/llvm/CodeGen/LinkAllCodegenComponents.h
index 2145eb80bd..916c0f233e 100644
--- a/include/llvm/CodeGen/LinkAllCodegenComponents.h
+++ b/include/llvm/CodeGen/LinkAllCodegenComponents.h
@@ -37,6 +37,7 @@ namespace {
(void) llvm::createDefaultPBQPRegisterAllocator();
llvm::linkOcamlGC();
+ llvm::linkErlangGC();
llvm::linkShadowStackGC();
(void) llvm::createBURRListDAGScheduler(NULL, llvm::CodeGenOpt::Default);
diff --git a/include/llvm/CodeGen/LiveInterval.h b/include/llvm/CodeGen/LiveInterval.h
index 244be9c501..cb09a49666 100644
--- a/include/llvm/CodeGen/LiveInterval.h
+++ b/include/llvm/CodeGen/LiveInterval.h
@@ -399,6 +399,15 @@ namespace llvm {
return r != end() && r->containsRange(Start, End);
}
+ /// True iff this live range is a single segment that lies between the
+ /// specified boundaries, exclusively. Vregs live across a backedge are not
+ /// considered local. The boundaries are expected to lie within an extended
+ /// basic block, so vregs that are not live out should contain no holes.
+ bool isLocal(SlotIndex Start, SlotIndex End) const {
+ return beginIndex() > Start.getBaseIndex() &&
+ endIndex() < End.getBoundaryIndex();
+ }
+
/// removeRange - Remove the specified range from this interval. Note that
/// the range must be a single LiveRange in its entirety.
void removeRange(SlotIndex Start, SlotIndex End,
diff --git a/include/llvm/CodeGen/LiveIntervalAnalysis.h b/include/llvm/CodeGen/LiveIntervalAnalysis.h
index 4632368157..7d72f37255 100644
--- a/include/llvm/CodeGen/LiveIntervalAnalysis.h
+++ b/include/llvm/CodeGen/LiveIntervalAnalysis.h
@@ -20,9 +20,7 @@
#ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
#define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
-#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/IndexedMap.h"
-#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
@@ -36,6 +34,7 @@
namespace llvm {
class AliasAnalysis;
+ class BitVector;
class LiveRangeCalc;
class LiveVariables;
class MachineDominatorTree;
diff --git a/include/llvm/CodeGen/LiveRangeEdit.h b/include/llvm/CodeGen/LiveRangeEdit.h
index def7b00ce7..8a32a3c11a 100644
--- a/include/llvm/CodeGen/LiveRangeEdit.h
+++ b/include/llvm/CodeGen/LiveRangeEdit.h
@@ -83,7 +83,7 @@ private:
/// allUsesAvailableAt - Return true if all registers used by OrigMI at
/// OrigIdx are also available with the same value at UseIdx.
bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
- SlotIndex UseIdx);
+ SlotIndex UseIdx) const;
/// foldAsLoad - If LI has a single use and a single def that can be folded as
/// a load, eliminate the register by folding the def into the use.
diff --git a/include/llvm/CodeGen/LiveVariables.h b/include/llvm/CodeGen/LiveVariables.h
index 3c5ed9297e..6628fd278e 100644
--- a/include/llvm/CodeGen/LiveVariables.h
+++ b/include/llvm/CodeGen/LiveVariables.h
@@ -29,21 +29,19 @@
#ifndef LLVM_CODEGEN_LIVEVARIABLES_H
#define LLVM_CODEGEN_LIVEVARIABLES_H
-#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SparseBitVector.h"
-#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/Target/TargetRegisterInfo.h"
namespace llvm {
+class MachineBasicBlock;
class MachineRegisterInfo;
-class TargetRegisterInfo;
class LiveVariables : public MachineFunctionPass {
public:
diff --git a/include/llvm/CodeGen/MachineBasicBlock.h b/include/llvm/CodeGen/MachineBasicBlock.h
index 492a3ff49f..0f2f8746b3 100644
--- a/include/llvm/CodeGen/MachineBasicBlock.h
+++ b/include/llvm/CodeGen/MachineBasicBlock.h
@@ -71,7 +71,6 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
std::vector<MachineBasicBlock *> Predecessors;
std::vector<MachineBasicBlock *> Successors;
-
/// Weights - Keep track of the weights to the successors. This vector
/// has the same order as Successors, or it is empty if we don't use it
/// (disable optimization).
@@ -96,6 +95,10 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
/// target of an indirect branch.
bool AddressTaken;
+ /// \brief since getSymbol is a relatively heavy-weight operation, the symbol
+ /// is only computed once and is cached.
+ mutable MCSymbol *CachedMCSymbol;
+
// Intrusive list support
MachineBasicBlock() {}
diff --git a/include/llvm/CodeGen/MachineBranchProbabilityInfo.h b/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
index 1c9bdd954c..98dd03b45c 100644
--- a/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
+++ b/include/llvm/CodeGen/MachineBranchProbabilityInfo.h
@@ -1,4 +1,3 @@
-
//==- MachineBranchProbabilityInfo.h - Machine Branch Probability Analysis -==//
//
// The LLVM Compiler Infrastructure
diff --git a/include/llvm/CodeGen/MachineFunction.h b/include/llvm/CodeGen/MachineFunction.h
index 82c4cd6598..c886e256e0 100644
--- a/include/llvm/CodeGen/MachineFunction.h
+++ b/include/llvm/CodeGen/MachineFunction.h
@@ -352,8 +352,8 @@ public:
// Internal functions used to automatically number MachineBasicBlocks
//
- /// getNextMBBNumber - Returns the next unique number to be assigned
- /// to a MachineBasicBlock in this MachineFunction.
+ /// \brief Adds the MBB to the internal numbering. Returns the unique number
+ /// assigned to the MBB.
///
unsigned addToMBBNumbering(MachineBasicBlock *MBB) {
MBBNumbering.push_back(MBB);
diff --git a/include/llvm/CodeGen/MachineMemOperand.h b/include/llvm/CodeGen/MachineMemOperand.h
index ddb127120f..00a55b57f3 100644
--- a/include/llvm/CodeGen/MachineMemOperand.h
+++ b/include/llvm/CodeGen/MachineMemOperand.h
@@ -34,22 +34,22 @@ struct MachinePointerInfo {
/// If this is null, then the access is to a pointer in the default address
/// space.
const Value *V;
-
+
/// Offset - This is an offset from the base Value*.
int64_t Offset;
-
+
explicit MachinePointerInfo(const Value *v = 0, int64_t offset = 0)
: V(v), Offset(offset) {}
-
+
MachinePointerInfo getWithOffset(int64_t O) const {
if (V == 0) return MachinePointerInfo(0, 0);
return MachinePointerInfo(V, Offset+O);
}
-
+
/// getAddrSpace - Return the LLVM IR address space number that this pointer
/// points into.
unsigned getAddrSpace() const;
-
+
/// getConstantPool - Return a MachinePointerInfo record that refers to the
/// constant pool.
static MachinePointerInfo getConstantPool();
@@ -57,20 +57,20 @@ struct MachinePointerInfo {
/// getFixedStack - Return a MachinePointerInfo record that refers to the
/// the specified FrameIndex.
static MachinePointerInfo getFixedStack(int FI, int64_t offset = 0);
-
+
/// getJumpTable - Return a MachinePointerInfo record that refers to a
/// jump table entry.
static MachinePointerInfo getJumpTable();
-
+
/// getGOT - Return a MachinePointerInfo record that refers to a
/// GOT entry.
static MachinePointerInfo getGOT();
-
+
/// getStack - stack pointer relative access.
static MachinePointerInfo getStack(int64_t Offset);
};
-
-
+
+
//===----------------------------------------------------------------------===//
/// MachineMemOperand - A description of a memory reference used in the backend.
/// Instead of holding a StoreInst or LoadInst, this class holds the address
@@ -99,8 +99,11 @@ public:
MONonTemporal = 8,
/// The memory access is invariant.
MOInvariant = 16,
+ // Target hints allow target passes to annotate memory operations.
+ MOTargetStartBit = 5,
+ MOTargetNumBits = 3,
// This is the number of bits we need to represent flags.
- MOMaxBits = 5
+ MOMaxBits = 8
};
/// MachineMemOperand - Construct an MachineMemOperand object with the
@@ -110,7 +113,7 @@ public:
const MDNode *Ranges = 0);
const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }
-
+
/// getValue - Return the base address of the memory access. This may either
/// be a normal LLVM IR Value, or one of the special values used in CodeGen.
/// Special values are those obtained via
@@ -123,6 +126,9 @@ public:
/// getFlags - Return the raw flags of the source value, \see MemOperandFlags.
unsigned int getFlags() const { return Flags & ((1 << MOMaxBits) - 1); }
+ /// Bitwise OR the current flags with the given flags.
+ void setFlags(unsigned f) { Flags |= (f & ((1 << MOMaxBits) - 1)); }
+
/// getOffset - For normal values, this is a byte offset added to the base
/// address. For PseudoSourceValue::FPRel values, this is the FrameIndex
/// number.
diff --git a/include/llvm/CodeGen/MachineModuleInfo.h b/include/llvm/CodeGen/MachineModuleInfo.h
index 1db6252ef4..a3acec8095 100644
--- a/include/llvm/CodeGen/MachineModuleInfo.h
+++ b/include/llvm/CodeGen/MachineModuleInfo.h
@@ -295,7 +295,7 @@ public:
/// isUsedFunction - Return true if the functions in the llvm.used list. This
/// does not return true for things in llvm.compiler.used unless they are also
/// in llvm.used.
- bool isUsedFunction(const Function *F) {
+ bool isUsedFunction(const Function *F) const {
return UsedFunctions.count(F);
}
diff --git a/include/llvm/CodeGen/MachineRegisterInfo.h b/include/llvm/CodeGen/MachineRegisterInfo.h
index 4b43cc1095..24ba7bb1ac 100644
--- a/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -157,6 +157,12 @@ public:
// Strictly for use by MachineInstr.cpp.
void moveOperands(MachineOperand *Dst, MachineOperand *Src, unsigned NumOps);
+ /// Verify the sanity of the use list for Reg.
+ void verifyUseList(unsigned Reg) const;
+
+ /// Verify the use list of all registers.
+ void verifyUseLists() const;
+
/// reg_begin/reg_end - Provide iteration support to walk over all definitions
/// and uses of a register within the MachineFunction that corresponds to this
/// MachineRegisterInfo object.
diff --git a/include/llvm/CodeGen/MachineScheduler.h b/include/llvm/CodeGen/MachineScheduler.h
index 57febe7746..769e4b42a5 100644
--- a/include/llvm/CodeGen/MachineScheduler.h
+++ b/include/llvm/CodeGen/MachineScheduler.h
@@ -274,6 +274,10 @@ public:
Mutations.push_back(Mutation);
}
+ /// \brief True if an edge can be added from PredSU to SuccSU without creating
+ /// a cycle.
+ bool canAddEdge(SUnit *SuccSU, SUnit *PredSU);
+
/// \brief Add a DAG edge to the given SU with the given predecessor
/// dependence data.
///
@@ -297,6 +301,10 @@ public:
/// reorderable instructions.
virtual void schedule();
+ /// Change the position of an instruction within the basic block and update
+ /// live ranges and region boundary iterators.
+ void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
+
/// Get current register pressure for the top scheduled instructions.
const IntervalPressure &getTopPressure() const { return TopPressure; }
const RegPressureTracker &getTopRPTracker() const { return TopRPTracker; }
@@ -362,7 +370,6 @@ protected:
void updateScheduledPressure(const std::vector<unsigned> &NewMaxPressure);
- void moveInstruction(MachineInstr *MI, MachineBasicBlock::iterator InsertPos);
bool checkSchedLimit();
void findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
diff --git a/include/llvm/CodeGen/MachineTraceMetrics.h b/include/llvm/CodeGen/MachineTraceMetrics.h
index eaaa70a67d..4e087fc62d 100644
--- a/include/llvm/CodeGen/MachineTraceMetrics.h
+++ b/include/llvm/CodeGen/MachineTraceMetrics.h
@@ -107,6 +107,13 @@ public:
/// Get the fixed resource information about MBB. Compute it on demand.
const FixedBlockInfo *getResources(const MachineBasicBlock*);
+ /// Get the scaled number of cycles used per processor resource in MBB.
+ /// This is an array with SchedModel.getNumProcResourceKinds() entries.
+ /// The getResources() function above must have been called first.
+ ///
+ /// These numbers have already been scaled by SchedModel.getResourceFactor().
+ ArrayRef<unsigned> getProcResourceCycles(unsigned MBBNum) const;
+
/// A virtual register or regunit required by a basic block or its trace
/// successors.
struct LiveInReg {
@@ -253,9 +260,13 @@ public:
/// independent, exposing the maximum instruction-level parallelism.
///
/// Any blocks in Extrablocks are included as if they were part of the
- /// trace.
+ /// trace. Likewise, extra resources required by the specified scheduling
+ /// classes are included. For the caller to account for extra machine
+ /// instructions, it must first resolve each instruction's scheduling class.
unsigned getResourceLength(ArrayRef<const MachineBasicBlock*> Extrablocks =
- ArrayRef<const MachineBasicBlock*>()) const;
+ ArrayRef<const MachineBasicBlock*>(),
+ ArrayRef<const MCSchedClassDesc*> ExtraInstrs =
+ ArrayRef<const MCSchedClassDesc*>()) const;
/// Return the length of the (data dependency) critical path through the
/// trace.
@@ -284,6 +295,8 @@ public:
class Ensemble {
SmallVector<TraceBlockInfo, 4> BlockInfo;
DenseMap<const MachineInstr*, InstrCycles> Cycles;
+ SmallVector<unsigned, 0> ProcResourceDepths;
+ SmallVector<unsigned, 0> ProcResourceHeights;
friend class Trace;
void computeTrace(const MachineBasicBlock*);
@@ -303,6 +316,8 @@ public:
const MachineLoop *getLoopFor(const MachineBasicBlock*) const;
const TraceBlockInfo *getDepthResources(const MachineBasicBlock*) const;
const TraceBlockInfo *getHeightResources(const MachineBasicBlock*) const;
+ ArrayRef<unsigned> getProcResourceDepths(unsigned MBBNum) const;
+ ArrayRef<unsigned> getProcResourceHeights(unsigned MBBNum) const;
public:
virtual ~Ensemble();
@@ -343,8 +358,22 @@ private:
// One entry per basic block, indexed by block number.
SmallVector<FixedBlockInfo, 4> BlockInfo;
+ // Cycles consumed on each processor resource per block.
+ // The number of processor resource kinds is constant for a given subtarget,
+ // but it is not known at compile time. The number of cycles consumed by
+ // block B on processor resource R is at ProcResourceCycles[B*Kinds + R]
+ // where Kinds = SchedModel.getNumProcResourceKinds().
+ SmallVector<unsigned, 0> ProcResourceCycles;
+
// One ensemble per strategy.
Ensemble* Ensembles[TS_NumStrategies];
+
+ // Convert scaled resource usage to a cycle count that can be compared with
+ // latencies.
+ unsigned getCycles(unsigned Scaled) {
+ unsigned Factor = SchedModel.getLatencyFactor();
+ return (Scaled + Factor - 1) / Factor;
+ }
};
inline raw_ostream &operator<<(raw_ostream &OS,
diff --git a/include/llvm/CodeGen/Passes.h b/include/llvm/CodeGen/Passes.h
index 4d559b5799..b02f63e70b 100644
--- a/include/llvm/CodeGen/Passes.h
+++ b/include/llvm/CodeGen/Passes.h
@@ -35,6 +35,48 @@ namespace llvm {
class PassConfigImpl;
+/// Discriminated union of Pass ID types.
+///
+/// The PassConfig API prefers dealing with IDs because they are safer and more
+/// efficient. IDs decouple configuration from instantiation. This way, when a
+/// pass is overriden, it isn't unnecessarily instantiated. It is also unsafe to
+/// refer to a Pass pointer after adding it to a pass manager, which deletes
+/// redundant pass instances.
+///
+/// However, it is convient to directly instantiate target passes with
+/// non-default ctors. These often don't have a registered PassInfo. Rather than
+/// force all target passes to implement the pass registry boilerplate, allow
+/// the PassConfig API to handle either type.
+///
+/// AnalysisID is sadly char*, so PointerIntPair won't work.
+class IdentifyingPassPtr {
+ union {
+ AnalysisID ID;
+ Pass *P;
+ };
+ bool IsInstance;
+public:
+ IdentifyingPassPtr() : P(0), IsInstance(false) {}
+ IdentifyingPassPtr(AnalysisID IDPtr) : ID(IDPtr), IsInstance(false) {}
+ IdentifyingPassPtr(Pass *InstancePtr) : P(InstancePtr), IsInstance(true) {}
+
+ bool isValid() const { return P; }
+ bool isInstance() const { return IsInstance; }
+
+ AnalysisID getID() const {
+ assert(!IsInstance && "Not a Pass ID");
+ return ID;
+ }
+ Pass *getInstance() const {
+ assert(IsInstance && "Not a Pass Instance");
+ return P;
+ }
+};
+
+template <> struct isPodLike<IdentifyingPassPtr> {
+ static const bool value = true;
+};
+
/// Target-Independent Code Generator Pass Configuration Options.
///
/// This is an ImmutablePass solely for the purpose of exposing CodeGen options
@@ -117,20 +159,22 @@ public:
/// Allow the target to override a specific pass without overriding the pass
/// pipeline. When passes are added to the standard pipeline at the
/// point where StandardID is expected, add TargetID in its place.
- void substitutePass(AnalysisID StandardID, AnalysisID TargetID);
+ void substitutePass(AnalysisID StandardID, IdentifyingPassPtr TargetID);
/// Insert InsertedPassID pass after TargetPassID pass.
- void insertPass(AnalysisID TargetPassID, AnalysisID InsertedPassID);
+ void insertPass(AnalysisID TargetPassID, IdentifyingPassPtr InsertedPassID);
/// Allow the target to enable a specific standard pass by default.
void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }
/// Allow the target to disable a specific standard pass by default.
- void disablePass(AnalysisID PassID) { substitutePass(PassID, 0); }
+ void disablePass(AnalysisID PassID) {
+ substitutePass(PassID, IdentifyingPassPtr());
+ }
/// Return the pass substituted for StandardID by the target.
/// If no substitution exists, return StandardID.
- AnalysisID getPassSubstitution(AnalysisID StandardID) const;
+ IdentifyingPassPtr getPassSubstitution(AnalysisID StandardID) const;
/// Return true if the optimized regalloc pipeline is enabled.
bool getOptimizeRegAlloc() const;
@@ -222,17 +266,6 @@ protected:
return false;
}
- /// addFinalizeRegAlloc - This method may be implemented by targets that want
- /// to run passes within the regalloc pipeline, immediately after the register
- /// allocation pass itself. These passes run as soon as virtual regisiters
- /// have been rewritten to physical registers but before and other postRA
- /// optimization happens. Targets that have marked instructions for bundling
- /// must have finalized those bundles by the time these passes have run,
- /// because subsequent passes are not guaranteed to be bundle-aware.
- virtual bool addFinalizeRegAlloc() {
- return false;
- }
-
/// addPostRegAlloc - This method may be implemented by targets that want to
/// run passes after register allocation pass pipeline but before
/// prolog-epilog insertion. This should return true if -print-machineinstrs
@@ -444,10 +477,6 @@ namespace llvm {
/// information.
extern char &MachineBlockPlacementStatsID;
- /// Code Placement - This pass optimize code placement and aligns loop
- /// headers to target specific alignment boundary.
- extern char &CodePlacementOptID;
-
/// GCLowering Pass - Performs target-independent LLVM IR transformations for
/// highly portable strategies.
///
diff --git a/include/llvm/CodeGen/RegAllocPBQP.h b/include/llvm/CodeGen/RegAllocPBQP.h
index b617c14558..8b8e3d90f7 100644
--- a/include/llvm/CodeGen/RegAllocPBQP.h
+++ b/include/llvm/CodeGen/RegAllocPBQP.h
@@ -29,6 +29,7 @@ namespace llvm {
class MachineFunction;
class MachineLoopInfo;
class TargetRegisterInfo;
+ template<class T> class OwningPtr;
/// This class wraps up a PBQP instance representing a register allocation
/// problem, plus the structures necessary to map back from the PBQP solution
@@ -123,11 +124,9 @@ namespace llvm {
/// Build a PBQP instance to represent the register allocation problem for
/// the given MachineFunction.
- virtual std::auto_ptr<PBQPRAProblem> build(
- MachineFunction *mf,
- const LiveIntervals *lis,
- const MachineLoopInfo *loopInfo,
- const RegSet &vregs);
+ virtual PBQPRAProblem *build(MachineFunction *mf, const LiveIntervals *lis,
+ const MachineLoopInfo *loopInfo,
+ const RegSet &vregs);
private:
void addSpillCosts(PBQP::Vector &costVec, PBQP::PBQPNum spillCost);
@@ -144,11 +143,9 @@ namespace llvm {
/// Build a PBQP instance to represent the register allocation problem for
/// the given MachineFunction.
- virtual std::auto_ptr<PBQPRAProblem> build(
- MachineFunction *mf,
- const LiveIntervals *lis,
- const MachineLoopInfo *loopInfo,
- const RegSet &vregs);
+ virtual PBQPRAProblem *build(MachineFunction *mf, const LiveIntervals *lis,
+ const MachineLoopInfo *loopInfo,
+ const RegSet &vregs);
private:
@@ -161,7 +158,7 @@ namespace llvm {
PBQP::PBQPNum benefit);
};
- FunctionPass* createPBQPRegisterAllocator(std::auto_ptr<PBQPBuilder> builder,
+ FunctionPass* createPBQPRegisterAllocator(OwningPtr<PBQPBuilder> &builder,
char *customPassID=0);
}
diff --git a/include/llvm/CodeGen/RegisterScavenging.h b/include/llvm/CodeGen/RegisterScavenging.h
index 01199205b5..95bf29167c 100644
--- a/include/llvm/CodeGen/RegisterScavenging.h
+++ b/include/llvm/CodeGen/RegisterScavenging.h
@@ -40,21 +40,23 @@ class RegScavenger {
/// registers.
bool Tracking;
- /// ScavengingFrameIndex - Special spill slot used for scavenging a register
- /// post register allocation.
- int ScavengingFrameIndex;
+ /// Information on scavenged registers (held in a spill slot).
+ struct ScavengedInfo {
+ ScavengedInfo(int FI = -1) : FrameIndex(FI), Reg(0), Restore(NULL) {}
- /// ScavengedReg - If none zero, the specific register is currently being
- /// scavenged. That is, it is spilled to the special scavenging stack slot.
- unsigned ScavengedReg;
+ /// A spill slot used for scavenging a register post register allocation.
+ int FrameIndex;
- /// ScavengedRC - Register class of the scavenged register.
- ///
- const TargetRegisterClass *ScavengedRC;
+ /// If non-zero, the specific register is currently being
+ /// scavenged. That is, it is spilled to this scavenging stack slot.
+ unsigned Reg;
+
+ /// The instruction that restores the scavenged register from stack.
+ const MachineInstr *Restore;
+ };
- /// ScavengeRestore - Instruction that restores the scavenged register from
- /// stack.
- const MachineInstr *ScavengeRestore;
+ /// A vector of information on scavenged registers.
+ SmallVector<ScavengedInfo, 2> Scavenged;
/// CalleeSavedrRegs - A bitvector of callee saved registers for the target.
///
@@ -71,8 +73,7 @@ class RegScavenger {
public:
RegScavenger()
- : MBB(NULL), NumPhysRegs(0), Tracking(false),
- ScavengingFrameIndex(-1), ScavengedReg(0), ScavengedRC(NULL) {}
+ : MBB(NULL), NumPhysRegs(0), Tracking(false) {}
/// enterBasicBlock - Start tracking liveness from the begin of the specific
/// basic block.
@@ -92,9 +93,25 @@ public:
while (MBBI != I) forward();
}
+ /// Invert the behavior of forward() on the current instruction (undo the
+ /// changes to the available registers made by forward()).
+ void unprocess();
+
+ /// Unprocess instructions until you reach the provided iterator.
+ void unprocess(MachineBasicBlock::iterator I) {
+ while (MBBI != I) unprocess();
+ }
+
/// skipTo - Move the internal MBB iterator but do not update register states.
- ///
- void skipTo(MachineBasicBlock::iterator I) { MBBI = I; }
+ void skipTo(MachineBasicBlock::iterator I) {
+ if (I == MachineBasicBlock::iterator(NULL))
+ Tracking = false;
+ MBBI = I;
+ }
+
+ MachineBasicBlock::iterator getCurrentPosition() const {
+ return MBBI;
+ }
/// getRegsUsed - return all registers currently in use in used.
void getRegsUsed(BitVector &used, bool includeReserved);
@@ -107,10 +124,28 @@ public:
/// Return 0 if none is found.
unsigned FindUnusedReg(const TargetRegisterClass *RegClass) const;
- /// setScavengingFrameIndex / getScavengingFrameIndex - accessor and setter of
- /// ScavengingFrameIndex.
- void setScavengingFrameIndex(int FI) { ScavengingFrameIndex = FI; }
- int getScavengingFrameIndex() const { return ScavengingFrameIndex; }
+ /// Add a scavenging frame index.
+ void addScavengingFrameIndex(int FI) {
+ Scavenged.push_back(ScavengedInfo(FI));
+ }
+
+ /// Query whether a frame index is a scavenging frame index.
+ bool isScavengingFrameIndex(int FI) const {
+ for (SmallVector<ScavengedInfo, 2>::const_iterator I = Scavenged.begin(),
+ IE = Scavenged.end(); I != IE; ++I)
+ if (I->FrameIndex == FI)
+ return true;
+
+ return false;
+ }
+
+ /// Get an array of scavenging frame indices.
+ void getScavengingFrameIndices(SmallVectorImpl<int> &A) const {
+ for (SmallVector<ScavengedInfo, 2>::const_iterator I = Scavenged.begin(),
+ IE = Scavenged.end(); I != IE; ++I)
+ if (I->FrameIndex >= 0)
+ A.push_back(I->FrameIndex);
+ }
/// scavengeRegister - Make a register of the specific register class
/// available and do the appropriate bookkeeping. SPAdj is the stack
@@ -149,6 +184,10 @@ private:
RegsAvailable |= Regs;
}
+ /// Processes the current instruction and fill the KillRegs and DefRegs bit
+ /// vectors.
+ void determineKillsAndDefs();
+
/// Add Reg and all its sub-registers to BV.
void addRegWithSubRegs(BitVector &BV, unsigned Reg);
diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h
index 8c959da696..7cff27e172 100644
--- a/include/llvm/CodeGen/ScheduleDAG.h
+++ b/include/llvm/CodeGen/ScheduleDAG.h
@@ -302,6 +302,7 @@ namespace llvm {
bool isCallOp : 1; // Is a function call operand.
bool isTwoAddress : 1; // Is a two-address instruction.
bool isCommutable : 1; // Is a commutable instruction.
+ bool hasPhysRegUses : 1; // Has physreg uses.
bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not.
bool isPending : 1; // True once pending.
@@ -331,10 +332,10 @@ namespace llvm {
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
- isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
- hasPhysRegClobbers(false), isPending(false), isAvailable(false),
- isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
- isCloned(false), SchedulingPref(Sched::None),
+ isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
+ hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
+ isAvailable(false), isScheduled(false), isScheduleHigh(false),
+ isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
@@ -345,10 +346,10 @@ namespace llvm {
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
- isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
- hasPhysRegClobbers(false), isPending(false), isAvailable(false),
- isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
- isCloned(false), SchedulingPref(Sched::None),
+ isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
+ hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
+ isAvailable(false), isScheduled(false), isScheduleHigh(false),
+ isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
@@ -358,10 +359,10 @@ namespace llvm {
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
- isTwoAddress(false), isCommutable(false), hasPhysRegDefs(false),
- hasPhysRegClobbers(false), isPending(false), isAvailable(false),
- isScheduled(false), isScheduleHigh(false), isScheduleLow(false),
- isCloned(false), SchedulingPref(Sched::None),
+ isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
+ hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
+ isAvailable(false), isScheduled(false), isScheduleHigh(false),
+ isScheduleLow(false), isCloned(false), SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
@@ -726,9 +727,8 @@ namespace llvm {
/// IsReachable - Checks if SU is reachable from TargetSU.
bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
- /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU
- /// will create a cycle.
- bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
+ /// WillCreateCycle - Return true if addPred(TargetSU, SU) creates a cycle.
+ bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
/// AddPred - Updates the topological ordering to accommodate an edge
/// to be added from SUnit X to SUnit Y.
diff --git a/include/llvm/CodeGen/ScheduleDAGInstrs.h b/include/llvm/CodeGen/ScheduleDAGInstrs.h
index 2219520ca1..990cac6348 100644
--- a/include/llvm/CodeGen/ScheduleDAGInstrs.h
+++ b/include/llvm/CodeGen/ScheduleDAGInstrs.h
@@ -105,6 +105,10 @@ namespace llvm {
MachineBasicBlock::iterator RegionEnd;
/// The index in BB of RegionEnd.
+ ///
+ /// This is the instruction number from the top of the current block, not
+ /// the SlotIndex. It is only used by the AntiDepBreaker and should be
+ /// removed once that client is obsolete.
unsigned EndIndex;
/// After calling BuildSchedGraph, each machine instruction in the current
@@ -146,6 +150,9 @@ namespace llvm {
virtual ~ScheduleDAGInstrs() {}
+ /// \brief Expose LiveIntervals for use in DAG mutators and such.
+ LiveIntervals *getLIS() const { return LIS; }
+
/// \brief Get the machine model for instruction scheduling.
const TargetSchedModel *getSchedModel() const { return &SchedModel; }
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index e5adf67249..8c064bf2d4 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -810,31 +810,32 @@ public:
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
- SDValue Op1, SDValue Op2, SDValue Op3);
+ SDValue Op1, SDValue Op2, SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
- const SDValue *Ops, unsigned NumOps);
+ ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2,
- SDValue Op1);
- MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
- EVT VT2, SDValue Op1, SDValue Op2);
- MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
- EVT VT2, SDValue Op1, SDValue Op2, SDValue Op3);
+ SDValue Op1);
+ MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2,
+ SDValue Op1, SDValue Op2);
+ MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2,
+ SDValue Op1, SDValue Op2, SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2,
- const SDValue *Ops, unsigned NumOps);
+ ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2,
- EVT VT3, SDValue Op1, SDValue Op2);
+ EVT VT3, SDValue Op1, SDValue Op2);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2,
- EVT VT3, SDValue Op1, SDValue Op2, SDValue Op3);
+ EVT VT3, SDValue Op1, SDValue Op2,
+ SDValue Op3);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2,
- EVT VT3, const SDValue *Ops, unsigned NumOps);
+ EVT VT3, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2,
- EVT VT3, EVT VT4, const SDValue *Ops, unsigned NumOps);
+ EVT VT3, EVT VT4, ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl,
- ArrayRef<EVT> ResultTys, const SDValue *Ops,
- unsigned NumOps);
+ ArrayRef<EVT> ResultTys,
+ ArrayRef<SDValue> Ops);
MachineSDNode *getMachineNode(unsigned Opcode, DebugLoc dl, SDVTList VTs,
- const SDValue *Ops, unsigned NumOps);
+ ArrayRef<SDValue> Ops);
/// getTargetExtractSubreg - A convenience function for creating
/// TargetInstrInfo::EXTRACT_SUBREG nodes.
diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h
index 5f503deff1..a4721db685 100644
--- a/include/llvm/CodeGen/SelectionDAGISel.h
+++ b/include/llvm/CodeGen/SelectionDAGISel.h
@@ -259,9 +259,6 @@ private:
void SelectBasicBlock(BasicBlock::const_iterator Begin,
BasicBlock::const_iterator End,
bool &HadTailCall);
-
- bool TryToFoldFastISelLoad(const LoadInst *LI, const Instruction *FoldInst,
- FastISel *FastIS);
void FinishBasicBlock();
void CodeGenAndEmitDAG();
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index 05f3b1494f..fef567f56b 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -196,14 +196,14 @@ template <> struct isPodLike<SDValue> { static const bool value = true; };
/// SDValues as if they were SDNode*'s.
template<> struct simplify_type<SDValue> {
typedef SDNode* SimpleType;
- static SimpleType getSimplifiedValue(const SDValue &Val) {
- return static_cast<SimpleType>(Val.getNode());
+ static SimpleType getSimplifiedValue(SDValue &Val) {
+ return Val.getNode();
}
};
template<> struct simplify_type<const SDValue> {
- typedef SDNode* SimpleType;
+ typedef /*const*/ SDNode* SimpleType;
static SimpleType getSimplifiedValue(const SDValue &Val) {
- return static_cast<SimpleType>(Val.getNode());
+ return Val.getNode();
}
};
@@ -295,14 +295,8 @@ private:
/// SDValues as if they were SDNode*'s.
template<> struct simplify_type<SDUse> {
typedef SDNode* SimpleType;
- static SimpleType getSimplifiedValue(const SDUse &Val) {
- return static_cast<SimpleType>(Val.getNode());
- }
-};
-template<> struct simplify_type<const SDUse> {
- typedef SDNode* SimpleType;
- static SimpleType getSimplifiedValue(const SDUse &Val) {
- return static_cast<SimpleType>(Val.getNode());
+ static SimpleType getSimplifiedValue(SDUse &Val) {
+ return Val.getNode();
}
};
diff --git a/include/llvm/CodeGen/SlotIndexes.h b/include/llvm/CodeGen/SlotIndexes.h
index a277080466..26d0433f3e 100644
--- a/include/llvm/CodeGen/SlotIndexes.h
+++ b/include/llvm/CodeGen/SlotIndexes.h
@@ -53,6 +53,20 @@ namespace llvm {
this->index = index;
}
+#ifdef EXPENSIVE_CHECKS
+ // When EXPENSIVE_CHECKS is defined, "erased" index list entries will
+ // actually be moved to a "graveyard" list, and have their pointers
+ // poisoned, so that dangling SlotIndex access can be reliably detected.
+ void setPoison() {
+ intptr_t tmp = reinterpret_cast<intptr_t>(mi);
+ assert(((tmp & 0x1) == 0x0) && "Pointer already poisoned?");
+ tmp |= 0x1;
+ mi = reinterpret_cast<MachineInstr*>(tmp);
+ }
+
+ bool isPoisoned() const { return (reinterpret_cast<intptr_t>(mi) & 0x1) == 0x1; }
+#endif // EXPENSIVE_CHECKS
+
};
template <>
@@ -109,6 +123,10 @@ namespace llvm {
IndexListEntry* listEntry() const {
assert(isValid() && "Attempt to compare reserved index.");
+#ifdef EXPENSIVE_CHECKS
+ assert(!lie.getPointer()->isPoisoned() &&
+ "Attempt to access deleted list-entry.");
+#endif // EXPENSIVE_CHECKS
return lie.getPointer();
}
@@ -282,7 +300,6 @@ namespace llvm {
template <> struct isPodLike<SlotIndex> { static const bool value = true; };
-
inline raw_ostream& operator<<(raw_ostream &os, SlotIndex li) {
li.print(os);
return os;
@@ -313,6 +330,10 @@ namespace llvm {
typedef ilist<IndexListEntry> IndexList;
IndexList indexList;
+#ifdef EXPENSIVE_CHECKS
+ IndexList graveyardList;
+#endif // EXPENSIVE_CHECKS
+
MachineFunction *mf;
typedef DenseMap<const MachineInstr*, SlotIndex> Mi2IndexMap;
@@ -643,6 +664,32 @@ namespace llvm {
std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
}
+ /// \brief Free the resources that were required to maintain a SlotIndex.
+ ///
+ /// Once an index is no longer needed (for instance because the instruction
+ /// at that index has been moved), the resources required to maintain the
+ /// index can be relinquished to reduce memory use and improve renumbering
+ /// performance. Any remaining SlotIndex objects that point to the same
+ /// index are left 'dangling' (much the same as a dangling pointer to a
+ /// freed object) and should not be accessed, except to destruct them.
+ ///
+ /// Like dangling pointers, access to dangling SlotIndexes can cause
+ /// painful-to-track-down bugs, especially if the memory for the index
+ /// previously pointed to has been re-used. To detect dangling SlotIndex
+ /// bugs, build with EXPENSIVE_CHECKS=1. This will cause "erased" indexes to
+ /// be retained in a graveyard instead of being freed. Operations on indexes
+ /// in the graveyard will trigger an assertion.
+ void eraseIndex(SlotIndex index) {
+ IndexListEntry *entry = index.listEntry();
+#ifdef EXPENSIVE_CHECKS
+ indexList.remove(entry);
+ graveyardList.push_back(entry);
+ entry->setPoison();
+#else
+ indexList.erase(entry);
+#endif
+ }
+
};
diff --git a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index e7098e48bf..5b22c9c685 100644
--- a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -128,6 +128,12 @@ public:
virtual const MCSection *
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
Mangler *Mang, const TargetMachine &TM) const;
+
+ /// emitModuleFlags - Emit Obj-C garbage collection and linker options. Only
+ /// linker option emission is implemented for COFF.
+ virtual void emitModuleFlags(MCStreamer &Streamer,
+ ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
+ Mangler *Mang, const TargetMachine &TM) const;
};
} // end namespace llvm
diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td
index 76df6ac8e6..da26985949 100644
--- a/include/llvm/CodeGen/ValueTypes.td
+++ b/include/llvm/CodeGen/ValueTypes.td
@@ -44,13 +44,13 @@ def v4i8 : ValueType<32 , 20>; // 4 x i8 vector value
def v8i8 : ValueType<64 , 21>; // 8 x i8 vector value
def v16i8 : ValueType<128, 22>; // 16 x i8 vector value
def v32i8 : ValueType<256, 23>; // 32 x i8 vector value
-def v64i8 : ValueType<256, 24>; // 64 x i8 vector value
+def v64i8 : ValueType<512, 24>; // 64 x i8 vector value
def v1i16 : ValueType<16 , 25>; // 1 x i16 vector value
def v2i16 : ValueType<32 , 26>; // 2 x i16 vector value
def v4i16 : ValueType<64 , 27>; // 4 x i16 vector value
def v8i16 : ValueType<128, 28>; // 8 x i16 vector value
def v16i16 : ValueType<256, 29>; // 16 x i16 vector value
-def v32i16 : ValueType<256, 30>; // 32 x i16 vector value
+def v32i16 : ValueType<512, 30>; // 32 x i16 vector value
def v1i32 : ValueType<32 , 31>; // 1 x i32 vector value
def v2i32 : ValueType<64 , 32>; // 2 x i32 vector value
def v4i32 : ValueType<128, 33>; // 4 x i32 vector value