summaryrefslogtreecommitdiffstats
path: root/runtime/base
diff options
context:
space:
mode:
authorMathieu Chartier <mathieuc@google.com>2015-03-12 10:06:33 -0700
committerMathieu Chartier <mathieuc@google.com>2015-03-12 10:59:33 -0700
commitc6201fa2ec66a218c4d0320fbcddd2fbb65cfa4a (patch)
treed7e2742906c340d61a7bf24aab6e0b343e0580d2 /runtime/base
parent4cfe74cb50b73f5f4b6dd32aabed55d044afe348 (diff)
downloadart-c6201fa2ec66a218c4d0320fbcddd2fbb65cfa4a.tar.gz
art-c6201fa2ec66a218c4d0320fbcddd2fbb65cfa4a.tar.bz2
art-c6201fa2ec66a218c4d0320fbcddd2fbb65cfa4a.zip
Add way to select arena type at runtime
We now use MemMap for JIT, and malloc for everything else. This should help fix the allegedly regressed compile times. Change-Id: I6a6552738933f9d7ee3bd23f45e310818b19b70d
Diffstat (limited to 'runtime/base')
-rw-r--r--runtime/base/arena_allocator.cc71
-rw-r--r--runtime/base/arena_allocator.h34
2 files changed, 57 insertions, 48 deletions
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index 70d138d4b9..e37aca1031 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -26,10 +26,6 @@
namespace art {
-// Memmap is a bit slower than malloc to allocate, but this is mitigated by the arena pool which
-// only allocates few arenas and recycles them afterwards.
-static constexpr bool kUseMemMap = true;
-static constexpr bool kUseMemSet = true && kUseMemMap;
static constexpr size_t kValgrindRedZoneBytes = 8;
constexpr size_t Arena::kDefaultSize;
@@ -124,33 +120,30 @@ void ArenaAllocatorStatsImpl<kCount>::Dump(std::ostream& os, const Arena* first,
// Explicitly instantiate the used implementation.
template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
-Arena::Arena(size_t size)
- : bytes_allocated_(0),
- map_(nullptr),
- next_(nullptr) {
- if (kUseMemMap) {
- std::string error_msg;
- map_ = MemMap::MapAnonymous("dalvik-LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, false,
- false, &error_msg);
- CHECK(map_ != nullptr) << error_msg;
- memory_ = map_->Begin();
- size_ = map_->Size();
- } else {
- memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
- size_ = size;
- }
+Arena::Arena() : bytes_allocated_(0), next_(nullptr) {
}
-Arena::~Arena() {
- if (kUseMemMap) {
- delete map_;
- } else {
- free(reinterpret_cast<void*>(memory_));
- }
+MallocArena::MallocArena(size_t size) {
+ memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
+ size_ = size;
+}
+
+MallocArena::~MallocArena() {
+ free(reinterpret_cast<void*>(memory_));
+}
+
+MemMapArena::MemMapArena(size_t size) {
+ std::string error_msg;
+ map_.reset(
+ MemMap::MapAnonymous("dalvik-LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, false,
+ false, &error_msg));
+ CHECK(map_.get() != nullptr) << error_msg;
+ memory_ = map_->Begin();
+ size_ = map_->Size();
}
-void Arena::Release() {
- if (kUseMemMap && bytes_allocated_ > 0) {
+void MemMapArena::Release() {
+ if (bytes_allocated_ > 0) {
map_->MadviseDontNeedAndZero();
bytes_allocated_ = 0;
}
@@ -158,19 +151,14 @@ void Arena::Release() {
void Arena::Reset() {
if (bytes_allocated_ > 0) {
- if (kUseMemSet || !kUseMemMap) {
- memset(Begin(), 0, bytes_allocated_);
- } else {
- map_->MadviseDontNeedAndZero();
- }
+ memset(Begin(), 0, bytes_allocated_);
bytes_allocated_ = 0;
}
}
-ArenaPool::ArenaPool()
- : lock_("Arena pool lock"),
- free_arenas_(nullptr) {
- if (kUseMemMap) {
+ArenaPool::ArenaPool(bool use_malloc)
+ : use_malloc_(use_malloc), lock_("Arena pool lock"), free_arenas_(nullptr) {
+ if (!use_malloc) {
MemMap::Init();
}
}
@@ -194,16 +182,19 @@ Arena* ArenaPool::AllocArena(size_t size) {
}
}
if (ret == nullptr) {
- ret = new Arena(size);
+ ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) : new MemMapArena(size);
}
ret->Reset();
return ret;
}
void ArenaPool::TrimMaps() {
- MutexLock lock(Thread::Current(), lock_);
- for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
- arena->Release();
+ if (!use_malloc_) {
+ // Doesn't work for malloc.
+ MutexLock lock(Thread::Current(), lock_);
+ for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+ arena->Release();
+ }
}
}
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 04ca3eabb8..cc7b856e84 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -116,12 +116,12 @@ typedef ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations> ArenaAllocatorS
class Arena {
public:
static constexpr size_t kDefaultSize = 128 * KB;
- explicit Arena(size_t size = kDefaultSize);
- ~Arena();
+ Arena();
+ virtual ~Arena() { }
// Reset is for pre-use and uses memset for performance.
void Reset();
// Release is used inbetween uses and uses madvise for memory usage.
- void Release();
+ virtual void Release() { }
uint8_t* Begin() {
return memory_;
}
@@ -142,32 +142,50 @@ class Arena {
return bytes_allocated_;
}
- private:
+ protected:
size_t bytes_allocated_;
uint8_t* memory_;
size_t size_;
- MemMap* map_;
Arena* next_;
friend class ArenaPool;
friend class ArenaAllocator;
friend class ArenaStack;
friend class ScopedArenaAllocator;
template <bool kCount> friend class ArenaAllocatorStatsImpl;
+
+ private:
DISALLOW_COPY_AND_ASSIGN(Arena);
};
+class MallocArena FINAL : public Arena {
+ public:
+ explicit MallocArena(size_t size = Arena::kDefaultSize);
+ virtual ~MallocArena();
+};
+
+class MemMapArena FINAL : public Arena {
+ public:
+ explicit MemMapArena(size_t size = Arena::kDefaultSize);
+ virtual ~MemMapArena() { }
+ void Release() OVERRIDE;
+
+ private:
+ std::unique_ptr<MemMap> map_;
+};
+
class ArenaPool {
public:
- ArenaPool();
+ explicit ArenaPool(bool use_malloc = true);
~ArenaPool();
Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_);
- // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works if
- // kUseMemMap is true.
+ // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works
+ // use_malloc is false.
void TrimMaps() LOCKS_EXCLUDED(lock_);
private:
+ const bool use_malloc_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Arena* free_arenas_ GUARDED_BY(lock_);
DISALLOW_COPY_AND_ASSIGN(ArenaPool);