aboutsummaryrefslogtreecommitdiffstats
path: root/include/llvm/Support/Allocator.h
diff options
context:
space:
mode:
authorStephen Hines <srhines@google.com>2014-04-23 16:57:46 -0700
committerStephen Hines <srhines@google.com>2014-04-24 15:53:16 -0700
commit36b56886974eae4f9c5ebc96befd3e7bfe5de338 (patch)
treee6cfb69fbbd937f450eeb83bfb83b9da3b01275a /include/llvm/Support/Allocator.h
parent69a8640022b04415ae9fac62f8ab090601d8f889 (diff)
downloadexternal_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.gz
external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.tar.bz2
external_llvm-36b56886974eae4f9c5ebc96befd3e7bfe5de338.zip
Update to LLVM 3.5a.
Change-Id: Ifadecab779f128e62e430c2b4f6ddd84953ed617
Diffstat (limited to 'include/llvm/Support/Allocator.h')
-rw-r--r--include/llvm/Support/Allocator.h332
1 files changed, 218 insertions, 114 deletions
diff --git a/include/llvm/Support/Allocator.h b/include/llvm/Support/Allocator.h
index 397f50fbe3..06413225a9 100644
--- a/include/llvm/Support/Allocator.h
+++ b/include/llvm/Support/Allocator.h
@@ -17,14 +17,19 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Memory.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <cstdlib>
namespace llvm {
-template <typename T> struct ReferenceAdder { typedef T& result; };
-template <typename T> struct ReferenceAdder<T&> { typedef T result; };
+template <typename T> struct ReferenceAdder {
+ typedef T &result;
+};
+template <typename T> struct ReferenceAdder<T &> {
+ typedef T result;
+};
class MallocAllocator {
public:
@@ -35,15 +40,15 @@ public:
void *Allocate(size_t Size, size_t /*Alignment*/) { return malloc(Size); }
- template <typename T>
- T *Allocate() { return static_cast<T*>(malloc(sizeof(T))); }
+ template <typename T> T *Allocate() {
+ return static_cast<T *>(malloc(sizeof(T)));
+ }
- template <typename T>
- T *Allocate(size_t Num) {
- return static_cast<T*>(malloc(sizeof(T)*Num));
+ template <typename T> T *Allocate(size_t Num) {
+ return static_cast<T *>(malloc(sizeof(T) * Num));
}
- void Deallocate(const void *Ptr) { free(const_cast<void*>(Ptr)); }
+ void Deallocate(const void *Ptr) { free(const_cast<void *>(Ptr)); }
void PrintStats() const {}
};
@@ -77,128 +82,224 @@ class MallocSlabAllocator : public SlabAllocator {
MallocAllocator Allocator;
public:
- MallocSlabAllocator() : Allocator() { }
+ MallocSlabAllocator() : Allocator() {}
virtual ~MallocSlabAllocator();
- virtual MemSlab *Allocate(size_t Size) LLVM_OVERRIDE;
- virtual void Deallocate(MemSlab *Slab) LLVM_OVERRIDE;
+ MemSlab *Allocate(size_t Size) override;
+ void Deallocate(MemSlab *Slab) override;
};
-/// BumpPtrAllocator - This allocator is useful for containers that need
-/// very simple memory allocation strategies. In particular, this just keeps
-/// allocating memory, and never deletes it until the entire block is dead. This
-/// makes allocation speedy, but must only be used when the trade-off is ok.
-class BumpPtrAllocator {
- BumpPtrAllocator(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
- void operator=(const BumpPtrAllocator &) LLVM_DELETED_FUNCTION;
-
- /// SlabSize - Allocate data into slabs of this size unless we get an
- /// allocation above SizeThreshold.
- size_t SlabSize;
-
- /// SizeThreshold - For any allocation larger than this threshold, we should
- /// allocate a separate slab.
- size_t SizeThreshold;
-
- /// \brief the default allocator used if one is not provided
- MallocSlabAllocator DefaultSlabAllocator;
+/// \brief Non-templated base class for the \c BumpPtrAllocatorImpl template.
+class BumpPtrAllocatorBase {
+public:
+ void Deallocate(const void * /*Ptr*/) {}
+ void PrintStats() const;
- /// Allocator - The underlying allocator we use to get slabs of memory. This
- /// defaults to MallocSlabAllocator, which wraps malloc, but it could be
- /// changed to use a custom allocator.
- SlabAllocator &Allocator;
+ /// \brief Returns the total physical memory allocated by this allocator.
+ size_t getTotalMemory() const;
- /// CurSlab - The slab that we are currently allocating into.
- ///
+protected:
+ /// \brief The slab that we are currently allocating into.
MemSlab *CurSlab;
- /// CurPtr - The current pointer into the current slab. This points to the
- /// next free byte in the slab.
- char *CurPtr;
-
- /// End - The end of the current slab.
+ /// \brief How many bytes we've allocated.
///
- char *End;
-
- /// BytesAllocated - This field tracks how many bytes we've allocated, so
- /// that we can compute how much space was wasted.
+ /// Used so that we can compute how much space was wasted.
size_t BytesAllocated;
- /// AlignPtr - Align Ptr to Alignment bytes, rounding up. Alignment should
- /// be a power of two. This method rounds up, so AlignPtr(7, 4) == 8 and
- /// AlignPtr(8, 4) == 8.
- static char *AlignPtr(char *Ptr, size_t Alignment);
-
- /// StartNewSlab - Allocate a new slab and move the bump pointers over into
- /// the new slab. Modifies CurPtr and End.
- void StartNewSlab();
+ BumpPtrAllocatorBase() : CurSlab(0), BytesAllocated(0) {}
+};
- /// DeallocateSlabs - Deallocate all memory slabs after and including this
- /// one.
- void DeallocateSlabs(MemSlab *Slab);
+/// \brief Allocate memory in an ever growing pool, as if by bump-pointer.
+///
+/// This isn't strictly a bump-pointer allocator as it uses backing slabs of
+/// memory rather than relying on boundless contiguous heap. However, it has
+/// bump-pointer semantics in that is a monotonically growing pool of memory
+/// where every allocation is found by merely allocating the next N bytes in
+/// the slab, or the next N bytes in the next slab.
+///
+/// Note that this also has a threshold for forcing allocations above a certain
+/// size into their own slab.
+template <size_t SlabSize = 4096, size_t SizeThreshold = SlabSize>
+class BumpPtrAllocatorImpl : public BumpPtrAllocatorBase {
+ BumpPtrAllocatorImpl(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION;
+ void operator=(const BumpPtrAllocatorImpl &) LLVM_DELETED_FUNCTION;
- template<typename T> friend class SpecificBumpPtrAllocator;
public:
- BumpPtrAllocator(size_t size = 4096, size_t threshold = 4096);
- BumpPtrAllocator(size_t size, size_t threshold, SlabAllocator &allocator);
- ~BumpPtrAllocator();
-
- /// Reset - Deallocate all but the current slab and reset the current pointer
+ static_assert(SizeThreshold <= SlabSize,
+ "The SizeThreshold must be at most the SlabSize to ensure "
+ "that objects larger than a slab go into their own memory "
+ "allocation.");
+
+ BumpPtrAllocatorImpl()
+ : Allocator(DefaultSlabAllocator), NumSlabs(0) {}
+ BumpPtrAllocatorImpl(SlabAllocator &Allocator)
+ : Allocator(Allocator), NumSlabs(0) {}
+ ~BumpPtrAllocatorImpl() { DeallocateSlabs(CurSlab); }
+
+ /// \brief Deallocate all but the current slab and reset the current pointer
/// to the beginning of it, freeing all memory allocated so far.
- void Reset();
+ void Reset() {
+ if (!CurSlab)
+ return;
+ DeallocateSlabs(CurSlab->NextPtr);
+ CurSlab->NextPtr = 0;
+ CurPtr = (char *)(CurSlab + 1);
+ End = ((char *)CurSlab) + CurSlab->Size;
+ BytesAllocated = 0;
+ }
- /// Allocate - Allocate space at the specified alignment.
- ///
- void *Allocate(size_t Size, size_t Alignment);
+ /// \brief Allocate space at the specified alignment.
+ void *Allocate(size_t Size, size_t Alignment) {
+ if (!CurSlab) // Start a new slab if we haven't allocated one already.
+ StartNewSlab();
+
+ // Keep track of how many bytes we've allocated.
+ BytesAllocated += Size;
+
+ // 0-byte alignment means 1-byte alignment.
+ if (Alignment == 0)
+ Alignment = 1;
+
+ // Allocate the aligned space, going forwards from CurPtr.
+ char *Ptr = alignPtr(CurPtr, Alignment);
+
+ // Check if we can hold it.
+ if (Ptr + Size <= End) {
+ CurPtr = Ptr + Size;
+ // Update the allocation point of this memory block in MemorySanitizer.
+ // Without this, MemorySanitizer messages for values originated from here
+ // will point to the allocation of the entire slab.
+ __msan_allocated_memory(Ptr, Size);
+ return Ptr;
+ }
- /// Allocate space, but do not construct, one object.
- ///
- template <typename T>
- T *Allocate() {
- return static_cast<T*>(Allocate(sizeof(T),AlignOf<T>::Alignment));
+ // If Size is really big, allocate a separate slab for it.
+ size_t PaddedSize = Size + sizeof(MemSlab) + Alignment - 1;
+ if (PaddedSize > SizeThreshold) {
+ ++NumSlabs;
+ MemSlab *NewSlab = Allocator.Allocate(PaddedSize);
+
+ // Put the new slab after the current slab, since we are not allocating
+ // into it.
+ NewSlab->NextPtr = CurSlab->NextPtr;
+ CurSlab->NextPtr = NewSlab;
+
+ Ptr = alignPtr((char *)(NewSlab + 1), Alignment);
+ assert((uintptr_t)Ptr + Size <= (uintptr_t)NewSlab + NewSlab->Size);
+ __msan_allocated_memory(Ptr, Size);
+ return Ptr;
+ }
+
+ // Otherwise, start a new slab and try again.
+ StartNewSlab();
+ Ptr = alignPtr(CurPtr, Alignment);
+ CurPtr = Ptr + Size;
+ assert(CurPtr <= End && "Unable to allocate memory!");
+ __msan_allocated_memory(Ptr, Size);
+ return Ptr;
}
- /// Allocate space for an array of objects. This does not construct the
- /// objects though.
- template <typename T>
- T *Allocate(size_t Num) {
- return static_cast<T*>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment));
+ /// \brief Allocate space for one object without constructing it.
+ template <typename T> T *Allocate() {
+ return static_cast<T *>(Allocate(sizeof(T), AlignOf<T>::Alignment));
}
- /// Allocate space for a specific count of elements and with a specified
- /// alignment.
- template <typename T>
- T *Allocate(size_t Num, size_t Alignment) {
+ /// \brief Allocate space for an array of objects without constructing them.
+ template <typename T> T *Allocate(size_t Num) {
+ return static_cast<T *>(Allocate(Num * sizeof(T), AlignOf<T>::Alignment));
+ }
+
+ /// \brief Allocate space for an array of objects with the specified alignment
+ /// and without constructing them.
+ template <typename T> T *Allocate(size_t Num, size_t Alignment) {
// Round EltSize up to the specified alignment.
- size_t EltSize = (sizeof(T)+Alignment-1)&(-Alignment);
- return static_cast<T*>(Allocate(Num * EltSize, Alignment));
+ size_t EltSize = (sizeof(T) + Alignment - 1) & (-Alignment);
+ return static_cast<T *>(Allocate(Num * EltSize, Alignment));
}
- void Deallocate(const void * /*Ptr*/) {}
+ size_t GetNumSlabs() const { return NumSlabs; }
- unsigned GetNumSlabs() const;
+private:
+ /// \brief The default allocator used if one is not provided.
+ MallocSlabAllocator DefaultSlabAllocator;
- void PrintStats() const;
-
- /// Compute the total physical memory allocated by this allocator.
- size_t getTotalMemory() const;
+ /// \brief The underlying allocator we use to get slabs of memory.
+ ///
+ /// This defaults to MallocSlabAllocator, which wraps malloc, but it could be
+ /// changed to use a custom allocator.
+ SlabAllocator &Allocator;
+
+ /// \brief The current pointer into the current slab.
+ ///
+ /// This points to the next free byte in the slab.
+ char *CurPtr;
+
+ /// \brief The end of the current slab.
+ char *End;
+
+ /// \brief How many slabs we've allocated.
+ ///
+ /// Used to scale the size of each slab and reduce the number of allocations
+ /// for extremely heavy memory use scenarios.
+ size_t NumSlabs;
+
+ /// \brief Allocate a new slab and move the bump pointers over into the new
+ /// slab, modifying CurPtr and End.
+ void StartNewSlab() {
+ ++NumSlabs;
+ // Scale the actual allocated slab size based on the number of slabs
+ // allocated. Every 128 slabs allocated, we double the allocated size to
+ // reduce allocation frequency, but saturate at multiplying the slab size by
+ // 2^30.
+ // FIXME: Currently, this count includes special slabs for objects above the
+ // size threshold. That will be fixed in a subsequent commit to make the
+ // growth even more predictable.
+ size_t AllocatedSlabSize =
+ SlabSize * ((size_t)1 << std::min<size_t>(30, NumSlabs / 128));
+
+ MemSlab *NewSlab = Allocator.Allocate(AllocatedSlabSize);
+ NewSlab->NextPtr = CurSlab;
+ CurSlab = NewSlab;
+ CurPtr = (char *)(CurSlab + 1);
+ End = ((char *)CurSlab) + CurSlab->Size;
+ }
+
+ /// \brief Deallocate all memory slabs after and including this one.
+ void DeallocateSlabs(MemSlab *Slab) {
+ while (Slab) {
+ MemSlab *NextSlab = Slab->NextPtr;
+#ifndef NDEBUG
+ // Poison the memory so stale pointers crash sooner. Note we must
+ // preserve the Size and NextPtr fields at the beginning.
+ sys::Memory::setRangeWritable(Slab + 1, Slab->Size - sizeof(MemSlab));
+ memset(Slab + 1, 0xCD, Slab->Size - sizeof(MemSlab));
+#endif
+ Allocator.Deallocate(Slab);
+ Slab = NextSlab;
+ --NumSlabs;
+ }
+ }
+
+ template <typename T> friend class SpecificBumpPtrAllocator;
};
-/// SpecificBumpPtrAllocator - Same as BumpPtrAllocator but allows only
-/// elements of one type to be allocated. This allows calling the destructor
-/// in DestroyAll() and when the allocator is destroyed.
-template <typename T>
-class SpecificBumpPtrAllocator {
+/// \brief The standard BumpPtrAllocator which just uses the default template
+/// paramaters.
+typedef BumpPtrAllocatorImpl<> BumpPtrAllocator;
+
+/// \brief A BumpPtrAllocator that allows only elements of a specific type to be
+/// allocated.
+///
+/// This allows calling the destructor in DestroyAll() and when the allocator is
+/// destroyed.
+template <typename T> class SpecificBumpPtrAllocator {
BumpPtrAllocator Allocator;
+
public:
- SpecificBumpPtrAllocator(size_t size = 4096, size_t threshold = 4096)
- : Allocator(size, threshold) {}
- SpecificBumpPtrAllocator(size_t size, size_t threshold,
- SlabAllocator &allocator)
- : Allocator(size, threshold, allocator) {}
-
- ~SpecificBumpPtrAllocator() {
- DestroyAll();
- }
+ SpecificBumpPtrAllocator() : Allocator() {}
+ SpecificBumpPtrAllocator(SlabAllocator &allocator) : Allocator(allocator) {}
+
+ ~SpecificBumpPtrAllocator() { DestroyAll(); }
/// Call the destructor of each allocated object and deallocate all but the
/// current slab and reset the current pointer to the beginning of it, freeing
@@ -206,27 +307,28 @@ public:
void DestroyAll() {
MemSlab *Slab = Allocator.CurSlab;
while (Slab) {
- char *End = Slab == Allocator.CurSlab ? Allocator.CurPtr :
- (char *)Slab + Slab->Size;
- for (char *Ptr = (char*)(Slab+1); Ptr < End; Ptr += sizeof(T)) {
- Ptr = Allocator.AlignPtr(Ptr, alignOf<T>());
+ char *End = Slab == Allocator.CurSlab ? Allocator.CurPtr
+ : (char *)Slab + Slab->Size;
+ for (char *Ptr = (char *)(Slab + 1); Ptr < End; Ptr += sizeof(T)) {
+ Ptr = alignPtr(Ptr, alignOf<T>());
if (Ptr + sizeof(T) <= End)
- reinterpret_cast<T*>(Ptr)->~T();
+ reinterpret_cast<T *>(Ptr)->~T();
}
Slab = Slab->NextPtr;
}
Allocator.Reset();
}
- /// Allocate space for a specific count of elements.
- T *Allocate(size_t num = 1) {
- return Allocator.Allocate<T>(num);
- }
+ /// \brief Allocate space for an array of objects without constructing them.
+ T *Allocate(size_t num = 1) { return Allocator.Allocate<T>(num); }
};
} // end namespace llvm
-inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) {
+template <size_t SlabSize, size_t SizeThreshold>
+void *
+operator new(size_t Size,
+ llvm::BumpPtrAllocatorImpl<SlabSize, SizeThreshold> &Allocator) {
struct S {
char c;
union {
@@ -236,10 +338,12 @@ inline void *operator new(size_t Size, llvm::BumpPtrAllocator &Allocator) {
void *P;
} x;
};
- return Allocator.Allocate(Size, std::min((size_t)llvm::NextPowerOf2(Size),
- offsetof(S, x)));
+ return Allocator.Allocate(
+ Size, std::min((size_t)llvm::NextPowerOf2(Size), offsetof(S, x)));
}
-inline void operator delete(void *, llvm::BumpPtrAllocator &) {}
+template <size_t SlabSize, size_t SizeThreshold>
+void operator delete(void *,
+ llvm::BumpPtrAllocatorImpl<SlabSize, SizeThreshold> &) {}
#endif // LLVM_SUPPORT_ALLOCATOR_H