summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--compiler/Android.mk1
-rw-r--r--compiler/dex/bit_vector_block_iterator.h2
-rw-r--r--compiler/dex/compiler_ir.h2
-rw-r--r--compiler/dex/frontend.cc10
-rw-r--r--compiler/dex/local_value_numbering.h60
-rw-r--r--compiler/dex/local_value_numbering_test.cc13
-rw-r--r--compiler/dex/mir_dataflow.cc26
-rw-r--r--compiler/dex/mir_graph.cc18
-rw-r--r--compiler/dex/mir_graph.h2
-rw-r--r--compiler/dex/mir_optimization.cc14
-rw-r--r--compiler/dex/quick/arm/call_arm.cc10
-rw-r--r--compiler/dex/quick/arm/target_arm.cc6
-rw-r--r--compiler/dex/quick/codegen_util.cc4
-rw-r--r--compiler/dex/quick/local_optimizations.cc4
-rw-r--r--compiler/dex/quick/mips/call_mips.cc10
-rw-r--r--compiler/dex/quick/mips/target_mips.cc6
-rw-r--r--compiler/dex/quick/mir_to_lir-inl.h2
-rw-r--r--compiler/dex/quick/mir_to_lir.cc2
-rw-r--r--compiler/dex/quick/mir_to_lir.h4
-rw-r--r--compiler/dex/quick/ralloc_util.cc6
-rw-r--r--compiler/dex/quick/x86/call_x86.cc6
-rw-r--r--compiler/dex/quick/x86/target_x86.cc6
-rw-r--r--compiler/dex/ssa_transformation.cc14
-rw-r--r--compiler/dex/vreg_analysis.cc2
-rw-r--r--compiler/utils/allocation.h2
-rw-r--r--compiler/utils/arena_allocator.cc143
-rw-r--r--compiler/utils/arena_allocator.h120
-rw-r--r--compiler/utils/arena_bit_vector.cc4
-rw-r--r--compiler/utils/arena_bit_vector.h2
-rw-r--r--compiler/utils/debug_stack.h138
-rw-r--r--compiler/utils/growable_array.h6
-rw-r--r--compiler/utils/scoped_arena_allocator.cc126
-rw-r--r--compiler/utils/scoped_arena_allocator.h244
-rw-r--r--runtime/safe_map.h15
34 files changed, 838 insertions, 192 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 499f23f6a5..691cee03b8 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -86,6 +86,7 @@ LIBART_COMPILER_SRC_FILES := \
utils/mips/managed_register_mips.cc \
utils/x86/assembler_x86.cc \
utils/x86/managed_register_x86.cc \
+ utils/scoped_arena_allocator.cc \
buffered_output_stream.cc \
compiler_backend.cc \
elf_fixup.cc \
diff --git a/compiler/dex/bit_vector_block_iterator.h b/compiler/dex/bit_vector_block_iterator.h
index 0821e9e238..0f1c2b6756 100644
--- a/compiler/dex/bit_vector_block_iterator.h
+++ b/compiler/dex/bit_vector_block_iterator.h
@@ -44,7 +44,7 @@ class BitVectorBlockIterator {
BasicBlock* Next();
void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(size, ArenaAllocator::kAllocGrowableArray);
+ return arena->Alloc(size, kArenaAllocGrowableArray);
};
void operator delete(void* p) {} // Nop.
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index ee880417ac..c71f0473f1 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -25,6 +25,7 @@
#include "driver/compiler_driver.h"
#include "driver/dex_compilation_unit.h"
#include "safe_map.h"
+#include "utils/scoped_arena_allocator.h"
#include "base/timing_logger.h"
#include "utils/arena_allocator.h"
@@ -82,6 +83,7 @@ struct CompilationUnit {
// TODO: move memory management to mir_graph, or just switch to using standard containers.
ArenaAllocator arena;
+ ArenaStack arena_stack; // Arenas for ScopedArenaAllocator.
UniquePtr<MIRGraph> mir_graph; // MIR container.
UniquePtr<Backend> cg; // Target-specific codegen.
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index b55b4715eb..1c2d16f6ca 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -98,6 +98,7 @@ CompilationUnit::CompilationUnit(ArenaPool* pool)
num_regs(0),
compiler_flip_match(false),
arena(pool),
+ arena_stack(pool),
mir_graph(NULL),
cg(NULL),
timings("QuickCompiler", true, false) {
@@ -247,9 +248,12 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
}
if (cu.enable_debug & (1 << kDebugShowMemoryUsage)) {
- if (cu.arena.BytesAllocated() > (5 * 1024 *1024)) {
- MemStats mem_stats(cu.arena);
- LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats);
+ if (cu.arena.BytesAllocated() > (1 * 1024 *1024) ||
+ cu.arena_stack.PeakBytesAllocated() > 256 * 1024) {
+ MemStats mem_stats(cu.arena.GetMemStats());
+ MemStats peak_stats(cu.arena_stack.GetPeakStats());
+ LOG(INFO) << PrettyMethod(method_idx, dex_file) << " " << Dumpable<MemStats>(mem_stats)
+ << Dumpable<MemStats>(peak_stats);
}
}
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 348bedcc75..535b613ba1 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -18,6 +18,8 @@
#define ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
#include "compiler_internals.h"
+#include "UniquePtr.h"
+#include "utils/scoped_arena_allocator.h"
#define NO_VALUE 0xffff
#define ARRAY_REF 0xfffe
@@ -73,28 +75,26 @@ class LocalValueNumbering {
};
// Key is s_reg, value is value name.
- typedef SafeMap<uint16_t, uint16_t> SregValueMap;
+ typedef SafeMap<uint16_t, uint16_t, std::less<uint16_t>,
+ ScopedArenaAllocatorAdapter<std::pair<uint16_t, uint16_t> > > SregValueMap;
// Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
- typedef SafeMap<uint64_t, uint16_t> ValueMap;
+ typedef SafeMap<uint64_t, uint16_t, std::less<uint64_t>,
+ ScopedArenaAllocatorAdapter<std::pair<uint64_t, uint16_t> > > ValueMap;
// Key represents a memory address, value is generation.
- typedef SafeMap<MemoryVersionKey, uint16_t, MemoryVersionKeyComparator> MemoryVersionMap;
+ typedef SafeMap<MemoryVersionKey, uint16_t, MemoryVersionKeyComparator,
+ ScopedArenaAllocatorAdapter<std::pair<MemoryVersionKey, uint16_t> > > MemoryVersionMap;
// Maps field key to field id for resolved fields.
- typedef SafeMap<FieldReference, uint32_t, FieldReferenceComparator> FieldIndexMap;
+ typedef SafeMap<FieldReference, uint32_t, FieldReferenceComparator,
+ ScopedArenaAllocatorAdapter<std::pair<FieldReference, uint16_t> > > FieldIndexMap;
+ // A set of value names.
+ typedef std::set<uint16_t, std::less<uint16_t>,
+ ScopedArenaAllocatorAdapter<uint16_t> > ValueNameSet;
public:
- explicit LocalValueNumbering(CompilationUnit* cu)
- : cu_(cu),
- sreg_value_map_(),
- sreg_wide_value_map_(),
- value_map_(),
- next_memory_version_(1u),
- global_memory_version_(0u),
- memory_version_map_(),
- field_index_map_(),
- non_aliasing_refs_(),
- null_checked_() {
- std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
- std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
+ static LocalValueNumbering* Create(CompilationUnit* cu) {
+ UniquePtr<ScopedArenaAllocator> allocator(ScopedArenaAllocator::Create(&cu->arena_stack));
+ void* addr = allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
+ return new(addr) LocalValueNumbering(cu, allocator.release());
}
static uint64_t BuildKey(uint16_t op, uint16_t operand1, uint16_t operand2, uint16_t modifier) {
@@ -167,7 +167,26 @@ class LocalValueNumbering {
uint16_t GetValueNumber(MIR* mir);
+ // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
+ static void operator delete(void* ptr) { UNUSED(ptr); }
+
private:
+ LocalValueNumbering(CompilationUnit* cu, ScopedArenaAllocator* allocator)
+ : cu_(cu),
+ allocator_(allocator),
+ sreg_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ sreg_wide_value_map_(std::less<uint16_t>(), allocator->Adapter()),
+ value_map_(std::less<uint64_t>(), allocator->Adapter()),
+ next_memory_version_(1u),
+ global_memory_version_(0u),
+ memory_version_map_(MemoryVersionKeyComparator(), allocator->Adapter()),
+ field_index_map_(FieldReferenceComparator(), allocator->Adapter()),
+ non_aliasing_refs_(std::less<uint16_t>(), allocator->Adapter()),
+ null_checked_(std::less<uint16_t>(), allocator->Adapter()) {
+ std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
+ std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
+ }
+
uint16_t GetFieldId(const DexFile* dex_file, uint16_t field_idx);
void AdvanceGlobalMemory();
uint16_t GetMemoryVersion(uint16_t base, uint16_t field, uint16_t type);
@@ -179,6 +198,7 @@ class LocalValueNumbering {
void HandlePutObject(MIR* mir);
CompilationUnit* const cu_;
+ UniquePtr<ScopedArenaAllocator> allocator_;
SregValueMap sreg_value_map_;
SregValueMap sreg_wide_value_map_;
ValueMap value_map_;
@@ -189,8 +209,10 @@ class LocalValueNumbering {
MemoryVersionMap memory_version_map_;
FieldIndexMap field_index_map_;
// Value names of references to objects that cannot be reached through a different value name.
- std::set<uint16_t> non_aliasing_refs_;
- std::set<uint16_t> null_checked_;
+ ValueNameSet non_aliasing_refs_;
+ ValueNameSet null_checked_;
+
+ DISALLOW_COPY_AND_ASSIGN(LocalValueNumbering);
};
} // namespace art
diff --git a/compiler/dex/local_value_numbering_test.cc b/compiler/dex/local_value_numbering_test.cc
index 4599612db6..ebac871b2d 100644
--- a/compiler/dex/local_value_numbering_test.cc
+++ b/compiler/dex/local_value_numbering_test.cc
@@ -120,7 +120,7 @@ class LocalValueNumberingTest : public testing::Test {
void DoPrepareMIRs(const MIRDef* defs, size_t count) {
mir_count_ = count;
- mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, ArenaAllocator::kAllocMIR));
+ mirs_ = reinterpret_cast<MIR*>(cu_.arena.Alloc(sizeof(MIR) * count, kArenaAllocMIR));
ssa_reps_.resize(count);
for (size_t i = 0u; i != count; ++i) {
const MIRDef* def = &defs[i];
@@ -162,11 +162,16 @@ class LocalValueNumberingTest : public testing::Test {
void PerformLVN() {
value_names_.resize(mir_count_);
for (size_t i = 0; i != mir_count_; ++i) {
- value_names_[i] = lvn_.GetValueNumber(&mirs_[i]);
+ value_names_[i] = lvn_->GetValueNumber(&mirs_[i]);
}
}
- LocalValueNumberingTest() : pool_(), cu_(&pool_), mir_count_(0u), mirs_(nullptr), lvn_(&cu_) {
+ LocalValueNumberingTest()
+ : pool_(),
+ cu_(&pool_),
+ mir_count_(0u),
+ mirs_(nullptr),
+ lvn_(LocalValueNumbering::Create(&cu_)) {
cu_.mir_graph.reset(new MIRGraph(&cu_, &cu_.arena));
}
@@ -176,7 +181,7 @@ class LocalValueNumberingTest : public testing::Test {
MIR* mirs_;
std::vector<SSARepresentation> ssa_reps_;
std::vector<uint16_t> value_names_;
- LocalValueNumbering lvn_;
+ UniquePtr<LocalValueNumbering> lvn_;
};
TEST_F(LocalValueNumberingTest, TestIGetIGetInvokeIGet) {
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index 96804503fe..c3954fe3d7 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -955,10 +955,10 @@ void MIRGraph::DataFlowSSAFormat35C(MIR* mir) {
mir->ssa_rep->num_uses = num_uses;
mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
// NOTE: will be filled in during type & size inference pass
mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
for (i = 0; i < num_uses; i++) {
HandleSSAUse(mir->ssa_rep->uses, d_insn->arg[i], i);
@@ -973,10 +973,10 @@ void MIRGraph::DataFlowSSAFormat3RC(MIR* mir) {
mir->ssa_rep->num_uses = num_uses;
mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
// NOTE: will be filled in during type & size inference pass
mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
for (i = 0; i < num_uses; i++) {
HandleSSAUse(mir->ssa_rep->uses, d_insn->vC+i, i);
@@ -992,7 +992,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
mir->ssa_rep =
static_cast<struct SSARepresentation *>(arena_->Alloc(sizeof(SSARepresentation),
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
uint64_t df_attributes = oat_data_flow_attributes_[mir->dalvikInsn.opcode];
@@ -1042,9 +1042,9 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
if (num_uses) {
mir->ssa_rep->num_uses = num_uses;
mir->ssa_rep->uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
mir->ssa_rep->fp_use = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
}
int num_defs = 0;
@@ -1059,9 +1059,9 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
if (num_defs) {
mir->ssa_rep->num_defs = num_defs;
mir->ssa_rep->defs = static_cast<int*>(arena_->Alloc(sizeof(int) * num_defs,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
mir->ssa_rep->fp_def = static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_defs,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
}
DecodedInstruction *d_insn = &mir->dalvikInsn;
@@ -1110,7 +1110,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
*/
bb->data_flow_info->vreg_to_ssa_map =
static_cast<int*>(arena_->Alloc(sizeof(int) * cu_->num_dalvik_registers,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
memcpy(bb->data_flow_info->vreg_to_ssa_map, vreg_to_ssa_map_,
sizeof(int) * cu_->num_dalvik_registers);
@@ -1147,11 +1147,11 @@ void MIRGraph::CompilerInitializeSSAConversion() {
*/
vreg_to_ssa_map_ =
static_cast<int*>(arena_->Alloc(sizeof(int) * num_dalvik_reg,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
/* Keep track of the higest def for each dalvik reg */
ssa_last_defs_ =
static_cast<int*>(arena_->Alloc(sizeof(int) * num_dalvik_reg,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
for (unsigned int i = 0; i < num_dalvik_reg; i++) {
vreg_to_ssa_map_[i] = i;
@@ -1175,7 +1175,7 @@ void MIRGraph::CompilerInitializeSSAConversion() {
bb->block_type == kExitBlock) {
bb->data_flow_info =
static_cast<BasicBlockDataFlow*>(arena_->Alloc(sizeof(BasicBlockDataFlow),
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
}
}
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index 46e854fb2b..868730fc37 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -411,7 +411,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
/* create */ true, /* immed_pred_block_p */ &cur_block);
SuccessorBlockInfo *successor_block_info =
static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
- ArenaAllocator::kAllocSuccessor));
+ kArenaAllocSuccessor));
successor_block_info->block = case_block->id;
successor_block_info->key =
(insn->dalvikInsn.opcode == Instruction::PACKED_SWITCH) ?
@@ -459,7 +459,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
catches_.insert(catch_block->start_offset);
}
SuccessorBlockInfo *successor_block_info = reinterpret_cast<SuccessorBlockInfo*>
- (arena_->Alloc(sizeof(SuccessorBlockInfo), ArenaAllocator::kAllocSuccessor));
+ (arena_->Alloc(sizeof(SuccessorBlockInfo), kArenaAllocSuccessor));
successor_block_info->block = catch_block->id;
successor_block_info->key = iterator.GetHandlerTypeIndex();
cur_block->successor_blocks->Insert(successor_block_info);
@@ -518,7 +518,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
new_block->start_offset = insn->offset;
cur_block->fall_through = new_block->id;
new_block->predecessors->Insert(cur_block->id);
- MIR* new_insn = static_cast<MIR*>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR));
+ MIR* new_insn = static_cast<MIR*>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
*new_insn = *insn;
insn->dalvikInsn.opcode =
static_cast<Instruction::Code>(kMirOpCheck);
@@ -602,7 +602,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
/* Parse all instructions and put them into containing basic blocks */
while (code_ptr < code_end) {
- MIR *insn = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocMIR));
+ MIR *insn = static_cast<MIR *>(arena_->Alloc(sizeof(MIR), kArenaAllocMIR));
insn->offset = current_offset_;
insn->m_unit_index = current_method_;
int width = ParseInsn(code_ptr, &insn->dalvikInsn);
@@ -1042,7 +1042,7 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
str.append("]--optimized away");
}
int length = str.length() + 1;
- ret = static_cast<char*>(arena_->Alloc(length, ArenaAllocator::kAllocDFInfo));
+ ret = static_cast<char*>(arena_->Alloc(length, kArenaAllocDFInfo));
strncpy(ret, str.c_str(), length);
return ret;
}
@@ -1157,7 +1157,7 @@ void MIRGraph::DumpMIRGraph() {
CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
bool is_range) {
CallInfo* info = static_cast<CallInfo*>(arena_->Alloc(sizeof(CallInfo),
- ArenaAllocator::kAllocMisc));
+ kArenaAllocMisc));
MIR* move_result_mir = FindMoveResult(bb, mir);
if (move_result_mir == NULL) {
info->result.location = kLocInvalid;
@@ -1167,7 +1167,7 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
}
info->num_arg_words = mir->ssa_rep->num_uses;
info->args = (info->num_arg_words == 0) ? NULL : static_cast<RegLocation*>
- (arena_->Alloc(sizeof(RegLocation) * info->num_arg_words, ArenaAllocator::kAllocMisc));
+ (arena_->Alloc(sizeof(RegLocation) * info->num_arg_words, kArenaAllocMisc));
for (int i = 0; i < info->num_arg_words; i++) {
info->args[i] = GetRawSrc(mir, i);
}
@@ -1182,7 +1182,7 @@ CallInfo* MIRGraph::NewMemCallInfo(BasicBlock* bb, MIR* mir, InvokeType type,
// Allocate a new basic block.
BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
BasicBlock* bb = static_cast<BasicBlock*>(arena_->Alloc(sizeof(BasicBlock),
- ArenaAllocator::kAllocBB));
+ kArenaAllocBB));
bb->block_type = block_type;
bb->id = block_id;
// TUNING: better estimate of the exit block predecessors?
@@ -1196,7 +1196,7 @@ BasicBlock* MIRGraph::NewMemBB(BBType block_type, int block_id) {
void MIRGraph::InitializeConstantPropagation() {
is_constant_v_ = new (arena_) ArenaBitVector(arena_, GetNumSSARegs(), false);
- constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(), ArenaAllocator::kAllocDFInfo));
+ constant_values_ = static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumSSARegs(), kArenaAllocDFInfo));
}
void MIRGraph::InitializeMethodUses() {
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 1eb9ef9bef..85d6d894b0 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -457,7 +457,7 @@ class MIRGraph {
void EnableOpcodeCounting() {
opcode_count_ = static_cast<int*>(arena_->Alloc(kNumPackedOpcodes * sizeof(int),
- ArenaAllocator::kAllocMisc));
+ kArenaAllocMisc));
}
void ShowOpcodeStats();
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 243452e968..03fc091e4d 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -245,7 +245,7 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
}
CompilerTemp *compiler_temp = static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp),
- ArenaAllocator::kAllocRegAlloc));
+ kArenaAllocRegAlloc));
// Create the type of temp requested. Special temps need special handling because
// they have a specific virtual register assignment.
@@ -313,7 +313,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
bool use_lvn = bb->use_lvn;
UniquePtr<LocalValueNumbering> local_valnum;
if (use_lvn) {
- local_valnum.reset(new LocalValueNumbering(cu_));
+ local_valnum.reset(LocalValueNumbering::Create(cu_));
}
while (bb != NULL) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
@@ -479,7 +479,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
DCHECK_EQ(SelectKind(if_true), kSelectMove);
DCHECK_EQ(SelectKind(if_false), kSelectMove);
int* src_ssa =
- static_cast<int*>(arena_->Alloc(sizeof(int) * 3, ArenaAllocator::kAllocDFInfo));
+ static_cast<int*>(arena_->Alloc(sizeof(int) * 3, kArenaAllocDFInfo));
src_ssa[0] = mir->ssa_rep->uses[0];
src_ssa[1] = if_true->ssa_rep->uses[0];
src_ssa[2] = if_false->ssa_rep->uses[0];
@@ -488,14 +488,14 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
mir->ssa_rep->num_defs = 1;
mir->ssa_rep->defs =
- static_cast<int*>(arena_->Alloc(sizeof(int) * 1, ArenaAllocator::kAllocDFInfo));
+ static_cast<int*>(arena_->Alloc(sizeof(int) * 1, kArenaAllocDFInfo));
mir->ssa_rep->fp_def =
- static_cast<bool*>(arena_->Alloc(sizeof(bool) * 1, ArenaAllocator::kAllocDFInfo));
+ static_cast<bool*>(arena_->Alloc(sizeof(bool) * 1, kArenaAllocDFInfo));
mir->ssa_rep->fp_def[0] = if_true->ssa_rep->fp_def[0];
// Match type of uses to def.
mir->ssa_rep->fp_use =
static_cast<bool*>(arena_->Alloc(sizeof(bool) * mir->ssa_rep->num_uses,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
mir->ssa_rep->fp_use[i] = mir->ssa_rep->fp_def[0];
}
@@ -878,7 +878,7 @@ bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
void MIRGraph::DumpCheckStats() {
Checkstats* stats =
- static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), ArenaAllocator::kAllocDFInfo));
+ static_cast<Checkstats*>(arena_->Alloc(sizeof(Checkstats), kArenaAllocDFInfo));
checkstats_ = stats;
AllNodesIterator iter(this);
for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index f426055068..0fce5bbb3d 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -50,12 +50,12 @@ void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
}
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
- static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
+ static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- ArenaAllocator::kAllocLIR));
+ kArenaAllocLIR));
switch_tables_.Insert(tab_rec);
// Get the switch value
@@ -99,12 +99,12 @@ void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
}
// Add the table to the list - we'll process it later
SwitchTable *tab_rec =
- static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
+ static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint32_t size = table[1];
tab_rec->targets =
- static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), ArenaAllocator::kAllocLIR));
+ static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
switch_tables_.Insert(tab_rec);
// Get the switch value
@@ -152,7 +152,7 @@ void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
// Add the table to the list - we'll process it later
FillArrayData *tab_rec =
- static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), ArenaAllocator::kAllocData));
+ static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint16_t width = tab_rec->table[1];
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index ab1a053489..01d669b90c 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -554,13 +554,13 @@ void ArmMir2Lir::CompilerInitializeRegAlloc() {
int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
- ArenaAllocator::kAllocRegAlloc));
+ kArenaAllocRegAlloc));
reg_pool_->num_core_regs = num_regs;
reg_pool_->core_regs = reinterpret_cast<RegisterInfo*>
- (arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs), ArenaAllocator::kAllocRegAlloc));
+ (arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs), kArenaAllocRegAlloc));
reg_pool_->num_fp_regs = num_fp_regs;
reg_pool_->FPRegs = static_cast<RegisterInfo*>
- (arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs), ArenaAllocator::kAllocRegAlloc));
+ (arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs), kArenaAllocRegAlloc));
CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
// Keep special registers from being allocated
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 14469b61c3..34d3834682 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -358,7 +358,7 @@ LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
/* Add the constant to the literal pool */
if (constant_list_p) {
- LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocData));
+ LIR* new_value = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocData));
new_value->operands[0] = value;
new_value->next = *constant_list_p;
*constant_list_p = new_value;
@@ -829,7 +829,7 @@ LIR* Mir2Lir::InsertCaseLabel(DexOffset vaddr, int keyVal) {
LIR* res = boundary_lir;
if (cu_->verbose) {
// Only pay the expense if we're pretty-printing.
- LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
+ LIR* new_label = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
new_label->dalvik_offset = vaddr;
new_label->opcode = kPseudoCaseLabel;
new_label->operands[0] = keyVal;
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 7a2dce13dc..6df91e674a 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -248,7 +248,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
/* Only sink store instructions */
if (sink_distance && !is_this_lir_load) {
LIR* new_store_lir =
- static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
+ static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
*new_store_lir = *this_lir;
/*
* Stop point found - insert *before* the check_lir
@@ -445,7 +445,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
if (slot >= 0) {
LIR* cur_lir = prev_inst_list[slot];
LIR* new_load_lir =
- static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
+ static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
*new_load_lir = *this_lir;
/*
* Insertion is guaranteed to succeed since check_lir
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 88f46fd59a..234299e472 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -68,12 +68,12 @@ void MipsMir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset,
}
// Add the table to the list - we'll process it later
SwitchTable* tab_rec =
- static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
+ static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int elements = table[1];
tab_rec->targets =
- static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), ArenaAllocator::kAllocLIR));
+ static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), kArenaAllocLIR));
switch_tables_.Insert(tab_rec);
// The table is composed of 8-byte key/disp pairs
@@ -146,12 +146,12 @@ void MipsMir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
}
// Add the table to the list - we'll process it later
SwitchTable* tab_rec =
- static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
+ static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- ArenaAllocator::kAllocLIR));
+ kArenaAllocLIR));
switch_tables_.Insert(tab_rec);
// Get the switch value
@@ -226,7 +226,7 @@ void MipsMir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
// Add the table to the list - we'll process it later
FillArrayData* tab_rec =
reinterpret_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData),
- ArenaAllocator::kAllocData));
+ kArenaAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint16_t width = tab_rec->table[1];
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index 85c250da0f..4f495ee0fc 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -467,13 +467,13 @@ void MipsMir2Lir::CompilerInitializeRegAlloc() {
int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
- ArenaAllocator::kAllocRegAlloc));
+ kArenaAllocRegAlloc));
reg_pool_->num_core_regs = num_regs;
reg_pool_->core_regs = static_cast<RegisterInfo*>
- (arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs), ArenaAllocator::kAllocRegAlloc));
+ (arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs), kArenaAllocRegAlloc));
reg_pool_->num_fp_regs = num_fp_regs;
reg_pool_->FPRegs = static_cast<RegisterInfo*>
- (arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs), ArenaAllocator::kAllocRegAlloc));
+ (arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs), kArenaAllocRegAlloc));
CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
// Keep special registers from being allocated
diff --git a/compiler/dex/quick/mir_to_lir-inl.h b/compiler/dex/quick/mir_to_lir-inl.h
index c2d12f6481..8b1f81d47f 100644
--- a/compiler/dex/quick/mir_to_lir-inl.h
+++ b/compiler/dex/quick/mir_to_lir-inl.h
@@ -45,7 +45,7 @@ inline void Mir2Lir::ClobberBody(RegisterInfo* p) {
inline LIR* Mir2Lir::RawLIR(DexOffset dalvik_offset, int opcode, int op0,
int op1, int op2, int op3, int op4, LIR* target) {
- LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), ArenaAllocator::kAllocLIR));
+ LIR* insn = static_cast<LIR*>(arena_->Alloc(sizeof(LIR), kArenaAllocLIR));
insn->dalvik_offset = dalvik_offset;
insn->opcode = opcode;
insn->operands[0] = op0;
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index d9b241e864..40ed5ef535 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -1066,7 +1066,7 @@ void Mir2Lir::MethodMIR2LIR() {
// Hold the labels of each block.
block_label_list_ =
static_cast<LIR*>(arena_->Alloc(sizeof(LIR) * mir_graph_->GetNumBlocks(),
- ArenaAllocator::kAllocLIR));
+ kArenaAllocLIR));
PreOrderDfsIterator iter(mir_graph_);
BasicBlock* curr_bb = iter.Next();
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 9e0e29995e..6955577670 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -311,7 +311,7 @@ class Mir2Lir : public Backend {
virtual void Compile() = 0;
static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(size, ArenaAllocator::kAllocData);
+ return arena->Alloc(size, kArenaAllocData);
}
protected:
@@ -363,7 +363,7 @@ class Mir2Lir : public Backend {
// strdup(), but allocates from the arena.
char* ArenaStrdup(const char* str) {
size_t len = strlen(str) + 1;
- char* res = reinterpret_cast<char*>(arena_->Alloc(len, ArenaAllocator::kAllocMisc));
+ char* res = reinterpret_cast<char*>(arena_->Alloc(len, kArenaAllocMisc));
if (res != NULL) {
strncpy(res, str, len);
}
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 3a8942e46e..3cb6fd01c1 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -907,7 +907,7 @@ void Mir2Lir::DoPromotion() {
const int promotion_threshold = 1;
// Allocate the promotion map - one entry for each Dalvik vReg or compiler temp
promotion_map_ = static_cast<PromotionMap*>
- (arena_->Alloc(num_regs * sizeof(promotion_map_[0]), ArenaAllocator::kAllocRegAlloc));
+ (arena_->Alloc(num_regs * sizeof(promotion_map_[0]), kArenaAllocRegAlloc));
// Allow target code to add any special registers
AdjustSpillMask();
@@ -925,10 +925,10 @@ void Mir2Lir::DoPromotion() {
*/
RefCounts *core_regs =
static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * num_regs,
- ArenaAllocator::kAllocRegAlloc));
+ kArenaAllocRegAlloc));
RefCounts *FpRegs =
static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * num_regs * 2,
- ArenaAllocator::kAllocRegAlloc));
+ kArenaAllocRegAlloc));
// Set ssa names for original Dalvik registers
for (int i = 0; i < dalvik_regs; i++) {
core_regs[i].s_reg = FpRegs[i].s_reg = i;
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index c92d2bb730..577f216f5e 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -69,12 +69,12 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset,
}
// Add the table to the list - we'll process it later
SwitchTable* tab_rec =
- static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
+ static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
int size = table[1];
tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
- ArenaAllocator::kAllocLIR));
+ kArenaAllocLIR));
switch_tables_.Insert(tab_rec);
// Get the switch value
@@ -134,7 +134,7 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
// Add the table to the list - we'll process it later
FillArrayData* tab_rec =
- static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), ArenaAllocator::kAllocData));
+ static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData));
tab_rec->table = table;
tab_rec->vaddr = current_dalvik_offset_;
uint16_t width = tab_rec->table[1];
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 78a216923f..083fccb2b4 100644
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -457,15 +457,15 @@ void X86Mir2Lir::CompilerInitializeRegAlloc() {
int num_fp_regs = sizeof(FpRegs)/sizeof(*FpRegs);
int num_fp_temps = sizeof(fp_temps)/sizeof(*fp_temps);
reg_pool_ = static_cast<RegisterPool*>(arena_->Alloc(sizeof(*reg_pool_),
- ArenaAllocator::kAllocRegAlloc));
+ kArenaAllocRegAlloc));
reg_pool_->num_core_regs = num_regs;
reg_pool_->core_regs =
static_cast<RegisterInfo*>(arena_->Alloc(num_regs * sizeof(*reg_pool_->core_regs),
- ArenaAllocator::kAllocRegAlloc));
+ kArenaAllocRegAlloc));
reg_pool_->num_fp_regs = num_fp_regs;
reg_pool_->FPRegs =
static_cast<RegisterInfo *>(arena_->Alloc(num_fp_regs * sizeof(*reg_pool_->FPRegs),
- ArenaAllocator::kAllocRegAlloc));
+ kArenaAllocRegAlloc));
CompilerInitPool(reg_pool_->core_regs, core_regs, reg_pool_->num_core_regs);
CompilerInitPool(reg_pool_->FPRegs, FpRegs, reg_pool_->num_fp_regs);
// Keep special registers from being allocated
diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc
index 4e258ef7c7..8091528809 100644
--- a/compiler/dex/ssa_transformation.cc
+++ b/compiler/dex/ssa_transformation.cc
@@ -144,7 +144,7 @@ void MIRGraph::ComputeDefBlockMatrix() {
/* Allocate num_dalvik_registers bit vector pointers */
def_block_matrix_ = static_cast<ArenaBitVector**>
(arena_->Alloc(sizeof(ArenaBitVector *) * num_registers,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
int i;
/* Initialize num_register vectors with num_blocks bits each */
@@ -384,7 +384,7 @@ void MIRGraph::ComputeDominators() {
/* Initalize & Clear i_dom_list */
if (i_dom_list_ == NULL) {
i_dom_list_ = static_cast<int*>(arena_->Alloc(sizeof(int) * num_reachable_blocks,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
}
for (int i = 0; i < num_reachable_blocks; i++) {
i_dom_list_[i] = NOTVISITED;
@@ -565,7 +565,7 @@ void MIRGraph::InsertPhiNodes() {
continue;
}
MIR *phi =
- static_cast<MIR*>(arena_->Alloc(sizeof(MIR), ArenaAllocator::kAllocDFInfo));
+ static_cast<MIR*>(arena_->Alloc(sizeof(MIR), kArenaAllocDFInfo));
phi->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpPhi);
phi->dalvikInsn.vA = dalvik_reg;
phi->offset = phi_bb->start_offset;
@@ -593,13 +593,13 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) {
size_t num_uses = bb->predecessors->Size();
mir->ssa_rep->num_uses = num_uses;
int* uses = static_cast<int*>(arena_->Alloc(sizeof(int) * num_uses,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
mir->ssa_rep->uses = uses;
mir->ssa_rep->fp_use =
- static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, ArenaAllocator::kAllocDFInfo));
+ static_cast<bool*>(arena_->Alloc(sizeof(bool) * num_uses, kArenaAllocDFInfo));
BasicBlockId* incoming =
static_cast<BasicBlockId*>(arena_->Alloc(sizeof(BasicBlockId) * num_uses,
- ArenaAllocator::kAllocDFInfo));
+ kArenaAllocDFInfo));
mir->meta.phi_incoming = incoming;
int idx = 0;
while (true) {
@@ -629,7 +629,7 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
/* Save SSA map snapshot */
int* saved_ssa_map =
- static_cast<int*>(arena_->Alloc(map_size, ArenaAllocator::kAllocDalvikToSSAMap));
+ static_cast<int*>(arena_->Alloc(map_size, kArenaAllocDalvikToSSAMap));
memcpy(saved_ssa_map, vreg_to_ssa_map_, map_size);
if (block->fall_through != NullBasicBlockId) {
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index 4d2c05166b..876973625d 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -410,7 +410,7 @@ void MIRGraph::InitRegLocations() {
/* Allocate the location map */
int max_regs = GetNumSSARegs() + GetMaxPossibleCompilerTemps();
RegLocation* loc = static_cast<RegLocation*>(arena_->Alloc(max_regs * sizeof(*loc),
- ArenaAllocator::kAllocRegAlloc));
+ kArenaAllocRegAlloc));
for (int i = 0; i < GetNumSSARegs(); i++) {
loc[i] = fresh_loc;
loc[i].s_reg_low = i;
diff --git a/compiler/utils/allocation.h b/compiler/utils/allocation.h
index 07cd39788e..b0947cac68 100644
--- a/compiler/utils/allocation.h
+++ b/compiler/utils/allocation.h
@@ -26,7 +26,7 @@ class ArenaObject {
public:
// Allocate a new ArenaObject of 'size' bytes in the Arena.
void* operator new(size_t size, ArenaAllocator* allocator) {
- return allocator->Alloc(size, ArenaAllocator::kAllocMisc);
+ return allocator->Alloc(size, kArenaAllocMisc);
}
void operator delete(void*, size_t) {
diff --git a/compiler/utils/arena_allocator.cc b/compiler/utils/arena_allocator.cc
index 00c3c578df..365b094e95 100644
--- a/compiler/utils/arena_allocator.cc
+++ b/compiler/utils/arena_allocator.cc
@@ -14,6 +14,9 @@
* limitations under the License.
*/
+#include <algorithm>
+#include <numeric>
+
#include "arena_allocator.h"
#include "base/logging.h"
#include "base/mutex.h"
@@ -28,7 +31,7 @@ static constexpr bool kUseMemSet = true && kUseMemMap;
static constexpr size_t kValgrindRedZoneBytes = 8;
constexpr size_t Arena::kDefaultSize;
-static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = {
+static const char* alloc_names[kNumArenaAllocKinds] = {
"Misc ",
"BasicBlock ",
"LIR ",
@@ -42,8 +45,69 @@ static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = {
"RegAlloc ",
"Data ",
"Preds ",
+ "STL ",
};
+template <bool kCount>
+ArenaAllocatorStatsImpl<kCount>::ArenaAllocatorStatsImpl()
+ : num_allocations_(0u) {
+ std::fill_n(alloc_stats_, arraysize(alloc_stats_), 0u);
+}
+
+template <bool kCount>
+void ArenaAllocatorStatsImpl<kCount>::Copy(const ArenaAllocatorStatsImpl& other) {
+ num_allocations_ = other.num_allocations_;
+ std::copy(other.alloc_stats_, other.alloc_stats_ + arraysize(alloc_stats_), alloc_stats_);
+}
+
+template <bool kCount>
+void ArenaAllocatorStatsImpl<kCount>::RecordAlloc(size_t bytes, ArenaAllocKind kind) {
+ alloc_stats_[kind] += bytes;
+ ++num_allocations_;
+}
+
+template <bool kCount>
+size_t ArenaAllocatorStatsImpl<kCount>::NumAllocations() const {
+ return num_allocations_;
+}
+
+template <bool kCount>
+size_t ArenaAllocatorStatsImpl<kCount>::BytesAllocated() const {
+ const size_t init = 0u; // Initial value of the correct type.
+ return std::accumulate(alloc_stats_, alloc_stats_ + arraysize(alloc_stats_), init);
+}
+
+template <bool kCount>
+void ArenaAllocatorStatsImpl<kCount>::Dump(std::ostream& os, const Arena* first,
+ ssize_t lost_bytes_adjustment) const {
+ size_t malloc_bytes = 0u;
+ size_t lost_bytes = 0u;
+ size_t num_arenas = 0u;
+ for (const Arena* arena = first; arena != nullptr; arena = arena->next_) {
+ malloc_bytes += arena->Size();
+ lost_bytes += arena->RemainingSpace();
+ ++num_arenas;
+ }
+ // The lost_bytes_adjustment is used to make up for the fact that the current arena
+ // may not have the bytes_allocated_ updated correctly.
+ lost_bytes += lost_bytes_adjustment;
+ const size_t bytes_allocated = BytesAllocated();
+ os << " MEM: used: " << bytes_allocated << ", allocated: " << malloc_bytes
+ << ", lost: " << lost_bytes << "\n";
+ size_t num_allocations = ArenaAllocatorStats::NumAllocations();
+ if (num_allocations != 0) {
+ os << "Number of arenas allocated: " << num_arenas << ", Number of allocations: "
+ << num_allocations << ", avg size: " << bytes_allocated / num_allocations << "\n";
+ }
+ os << "===== Allocation by kind\n";
+ for (int i = 0; i < kNumArenaAllocKinds; i++) {
+ os << alloc_names[i] << std::setw(10) << alloc_stats_[i] << "\n";
+ }
+}
+
+// Explicitly instantiate the used implementation.
+template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
+
Arena::Arena(size_t size)
: bytes_allocated_(0),
map_(nullptr),
@@ -110,24 +174,26 @@ Arena* ArenaPool::AllocArena(size_t size) {
return ret;
}
-void ArenaPool::FreeArena(Arena* arena) {
- Thread* self = Thread::Current();
+void ArenaPool::FreeArenaChain(Arena* first) {
if (UNLIKELY(RUNNING_ON_VALGRIND > 0)) {
- VALGRIND_MAKE_MEM_UNDEFINED(arena->memory_, arena->bytes_allocated_);
+ for (Arena* arena = first; arena != nullptr; arena = arena->next_) {
+ VALGRIND_MAKE_MEM_UNDEFINED(arena->memory_, arena->bytes_allocated_);
+ }
}
- {
+ if (first != nullptr) {
+ Arena* last = first;
+ while (last->next_ != nullptr) {
+ last = last->next_;
+ }
+ Thread* self = Thread::Current();
MutexLock lock(self, lock_);
- arena->next_ = free_arenas_;
- free_arenas_ = arena;
+ last->next_ = free_arenas_;
+ free_arenas_ = first;
}
}
size_t ArenaAllocator::BytesAllocated() const {
- size_t total = 0;
- for (int i = 0; i < kNumAllocKinds; i++) {
- total += alloc_stats_[i];
- }
- return total;
+ return ArenaAllocatorStats::BytesAllocated();
}
ArenaAllocator::ArenaAllocator(ArenaPool* pool)
@@ -136,9 +202,7 @@ ArenaAllocator::ArenaAllocator(ArenaPool* pool)
end_(nullptr),
ptr_(nullptr),
arena_head_(nullptr),
- num_allocations_(0),
running_on_valgrind_(RUNNING_ON_VALGRIND > 0) {
- memset(&alloc_stats_[0], 0, sizeof(alloc_stats_));
}
void ArenaAllocator::UpdateBytesAllocated() {
@@ -158,10 +222,7 @@ void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
return nullptr;
}
}
- if (kCountAllocations) {
- alloc_stats_[kind] += rounded_bytes;
- ++num_allocations_;
- }
+ ArenaAllocatorStats::RecordAlloc(rounded_bytes, kind);
uint8_t* ret = ptr_;
ptr_ += rounded_bytes;
// Check that the memory is already zeroed out.
@@ -175,11 +236,7 @@ void* ArenaAllocator::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
ArenaAllocator::~ArenaAllocator() {
// Reclaim all the arenas by giving them back to the thread pool.
UpdateBytesAllocated();
- while (arena_head_ != nullptr) {
- Arena* arena = arena_head_;
- arena_head_ = arena_head_->next_;
- pool_->FreeArena(arena);
- }
+ pool_->FreeArenaChain(arena_head_);
}
void ArenaAllocator::ObtainNewArenaForAllocation(size_t allocation_size) {
@@ -192,30 +249,24 @@ void ArenaAllocator::ObtainNewArenaForAllocation(size_t allocation_size) {
end_ = new_arena->End();
}
+MemStats::MemStats(const char* name, const ArenaAllocatorStats* stats, const Arena* first_arena,
+ ssize_t lost_bytes_adjustment)
+ : name_(name),
+ stats_(stats),
+ first_arena_(first_arena),
+ lost_bytes_adjustment_(lost_bytes_adjustment) {
+}
+
+void MemStats::Dump(std::ostream& os) const {
+ os << name_ << " stats:\n";
+ stats_->Dump(os, first_arena_, lost_bytes_adjustment_);
+}
+
// Dump memory usage stats.
-void ArenaAllocator::DumpMemStats(std::ostream& os) const {
- size_t malloc_bytes = 0;
- // Start out with how many lost bytes we have in the arena we are currently allocating into.
- size_t lost_bytes(end_ - ptr_);
- size_t num_arenas = 0;
- for (Arena* arena = arena_head_; arena != nullptr; arena = arena->next_) {
- malloc_bytes += arena->Size();
- if (arena != arena_head_) {
- lost_bytes += arena->RemainingSpace();
- }
- ++num_arenas;
- }
- const size_t bytes_allocated = BytesAllocated();
- os << " MEM: used: " << bytes_allocated << ", allocated: " << malloc_bytes
- << ", lost: " << lost_bytes << "\n";
- if (num_allocations_ != 0) {
- os << "Number of arenas allocated: " << num_arenas << ", Number of allocations: "
- << num_allocations_ << ", avg size: " << bytes_allocated / num_allocations_ << "\n";
- }
- os << "===== Allocation by kind\n";
- for (int i = 0; i < kNumAllocKinds; i++) {
- os << alloc_names[i] << std::setw(10) << alloc_stats_[i] << "\n";
- }
+MemStats ArenaAllocator::GetMemStats() const {
+ ssize_t lost_bytes_adjustment =
+ (arena_head_ == nullptr) ? 0 : (end_ - ptr_) - arena_head_->RemainingSpace();
+ return MemStats("ArenaAllocator", this, arena_head_, lost_bytes_adjustment);
}
} // namespace art
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index 56cedfefd5..a6b74f77f5 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include <stddef.h>
+#include "base/macros.h"
#include "base/mutex.h"
#include "mem_map.h"
@@ -28,6 +29,70 @@ namespace art {
class Arena;
class ArenaPool;
class ArenaAllocator;
+class ArenaStack;
+class ScopedArenaAllocator;
+class MemStats;
+
+static constexpr bool kArenaAllocatorCountAllocations = false;
+
+// Type of allocation for memory tuning.
+enum ArenaAllocKind {
+ kArenaAllocMisc,
+ kArenaAllocBB,
+ kArenaAllocLIR,
+ kArenaAllocMIR,
+ kArenaAllocDFInfo,
+ kArenaAllocGrowableArray,
+ kArenaAllocGrowableBitMap,
+ kArenaAllocDalvikToSSAMap,
+ kArenaAllocDebugInfo,
+ kArenaAllocSuccessor,
+ kArenaAllocRegAlloc,
+ kArenaAllocData,
+ kArenaAllocPredecessors,
+ kArenaAllocSTL,
+ kNumArenaAllocKinds
+};
+
+template <bool kCount>
+class ArenaAllocatorStatsImpl;
+
+template <>
+class ArenaAllocatorStatsImpl<false> {
+ public:
+ ArenaAllocatorStatsImpl() = default;
+ ArenaAllocatorStatsImpl(const ArenaAllocatorStatsImpl& other) = default;
+ ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
+
+ void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
+ void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes); UNUSED(kind); }
+ size_t NumAllocations() const { return 0u; }
+ size_t BytesAllocated() const { return 0u; }
+ void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
+ UNUSED(os); UNUSED(first); UNUSED(lost_bytes_adjustment);
+ }
+};
+
+template <bool kCount>
+class ArenaAllocatorStatsImpl {
+ public:
+ ArenaAllocatorStatsImpl();
+ ArenaAllocatorStatsImpl(const ArenaAllocatorStatsImpl& other) = default;
+ ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
+
+ void Copy(const ArenaAllocatorStatsImpl& other);
+ void RecordAlloc(size_t bytes, ArenaAllocKind kind);
+ size_t NumAllocations() const;
+ size_t BytesAllocated() const;
+ void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const;
+
+ private:
+ size_t num_allocations_;
+ // TODO: Use std::array<size_t, kNumArenaAllocKinds> from C++11 when we upgrade the STL.
+ size_t alloc_stats_[kNumArenaAllocKinds]; // Bytes used by various allocation kinds.
+};
+
+typedef ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations> ArenaAllocatorStats;
class Arena {
public:
@@ -59,6 +124,9 @@ class Arena {
Arena* next_;
friend class ArenaPool;
friend class ArenaAllocator;
+ friend class ArenaStack;
+ friend class ScopedArenaAllocator;
+ template <bool kCount> friend class ArenaAllocatorStatsImpl;
DISALLOW_COPY_AND_ASSIGN(Arena);
};
@@ -67,7 +135,7 @@ class ArenaPool {
ArenaPool();
~ArenaPool();
Arena* AllocArena(size_t size);
- void FreeArena(Arena* arena);
+ void FreeArenaChain(Arena* first);
private:
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
@@ -75,28 +143,8 @@ class ArenaPool {
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
};
-class ArenaAllocator {
+class ArenaAllocator : private ArenaAllocatorStats {
public:
- // Type of allocation for memory tuning.
- enum ArenaAllocKind {
- kAllocMisc,
- kAllocBB,
- kAllocLIR,
- kAllocMIR,
- kAllocDFInfo,
- kAllocGrowableArray,
- kAllocGrowableBitMap,
- kAllocDalvikToSSAMap,
- kAllocDebugInfo,
- kAllocSuccessor,
- kAllocRegAlloc,
- kAllocData,
- kAllocPredecessors,
- kNumAllocKinds
- };
-
- static constexpr bool kCountAllocations = false;
-
explicit ArenaAllocator(ArenaPool* pool);
~ArenaAllocator();
@@ -113,10 +161,7 @@ class ArenaAllocator {
return nullptr;
}
}
- if (kCountAllocations) {
- alloc_stats_[kind] += bytes;
- ++num_allocations_;
- }
+ ArenaAllocatorStats::RecordAlloc(bytes, kind);
uint8_t* ret = ptr_;
ptr_ += bytes;
return ret;
@@ -125,7 +170,7 @@ class ArenaAllocator {
void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
void ObtainNewArenaForAllocation(size_t allocation_size);
size_t BytesAllocated() const;
- void DumpMemStats(std::ostream& os) const;
+ MemStats GetMemStats() const;
private:
void UpdateBytesAllocated();
@@ -135,21 +180,22 @@ class ArenaAllocator {
uint8_t* end_;
uint8_t* ptr_;
Arena* arena_head_;
- size_t num_allocations_;
- size_t alloc_stats_[kNumAllocKinds]; // Bytes used by various allocation kinds.
bool running_on_valgrind_;
DISALLOW_COPY_AND_ASSIGN(ArenaAllocator);
}; // ArenaAllocator
-struct MemStats {
- public:
- void Dump(std::ostream& os) const {
- arena_.DumpMemStats(os);
- }
- explicit MemStats(const ArenaAllocator &arena) : arena_(arena) {}
- private:
- const ArenaAllocator &arena_;
+class MemStats {
+ public:
+ MemStats(const char* name, const ArenaAllocatorStats* stats, const Arena* first_arena,
+ ssize_t lost_bytes_adjustment = 0);
+ void Dump(std::ostream& os) const;
+
+ private:
+ const char* const name_;
+ const ArenaAllocatorStats* const stats_;
+ const Arena* const first_arena_;
+ const ssize_t lost_bytes_adjustment_;
}; // MemStats
} // namespace art
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index 220ff14baa..eff9778612 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -25,13 +25,13 @@ class ArenaBitVectorAllocator : public Allocator {
~ArenaBitVectorAllocator() {}
virtual void* Alloc(size_t size) {
- return arena_->Alloc(size, ArenaAllocator::kAllocGrowableBitMap);
+ return arena_->Alloc(size, kArenaAllocGrowableBitMap);
}
virtual void Free(void*) {} // Nop.
static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVectorAllocator), ArenaAllocator::kAllocGrowableBitMap);
+ return arena->Alloc(sizeof(ArenaBitVectorAllocator), kArenaAllocGrowableBitMap);
}
static void operator delete(void* p) {} // Nop.
diff --git a/compiler/utils/arena_bit_vector.h b/compiler/utils/arena_bit_vector.h
index 6c1461727a..1a3d6a3e34 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/compiler/utils/arena_bit_vector.h
@@ -55,7 +55,7 @@ class ArenaBitVector : public BitVector {
~ArenaBitVector() {}
static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), ArenaAllocator::kAllocGrowableBitMap);
+ return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
}
static void operator delete(void* p) {} // Nop.
diff --git a/compiler/utils/debug_stack.h b/compiler/utils/debug_stack.h
new file mode 100644
index 0000000000..2e02b438b9
--- /dev/null
+++ b/compiler/utils/debug_stack.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_DEBUG_STACK_H_
+#define ART_COMPILER_UTILS_DEBUG_STACK_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "globals.h"
+
+namespace art {
+
+// Helper classes for reference counting to enforce construction/destruction order and
+// usage of the top element of a stack in debug mode with no overhead in release mode.
+
+// Reference counter. No references allowed in destructor or in explicitly called CheckNoRefs().
+template <bool kIsDebug>
+class DebugStackRefCounterImpl;
+// Reference. Allows an explicit check that it's the top reference.
+template <bool kIsDebug>
+class DebugStackReferenceImpl;
+// Indirect top reference. Checks that the reference is the top reference when used.
+template <bool kIsDebug>
+class DebugStackIndirectTopRefImpl;
+
+typedef DebugStackRefCounterImpl<kIsDebugBuild> DebugStackRefCounter;
+typedef DebugStackReferenceImpl<kIsDebugBuild> DebugStackReference;
+typedef DebugStackIndirectTopRefImpl<kIsDebugBuild> DebugStackIndirectTopRef;
+
+// Non-debug mode specializations. This should be optimized away.
+
+template <>
+class DebugStackRefCounterImpl<false> {
+ public:
+ size_t IncrementRefCount() { return 0u; }
+ void DecrementRefCount() { }
+ size_t GetRefCount() const { return 0u; }
+ void CheckNoRefs() const { }
+};
+
+template <>
+class DebugStackReferenceImpl<false> {
+ public:
+ explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<false>* counter) { UNUSED(counter); }
+ DebugStackReferenceImpl(const DebugStackReferenceImpl& other) = default;
+ DebugStackReferenceImpl& operator=(const DebugStackReferenceImpl& other) = default;
+ void CheckTop() { }
+};
+
+template <>
+class DebugStackIndirectTopRefImpl<false> {
+ public:
+ explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<false>* ref) { UNUSED(ref); }
+ DebugStackIndirectTopRefImpl(const DebugStackIndirectTopRefImpl& other) = default;
+ DebugStackIndirectTopRefImpl& operator=(const DebugStackIndirectTopRefImpl& other) = default;
+ void CheckTop() { }
+};
+
+// Debug mode versions.
+
+template <bool kIsDebug>
+class DebugStackRefCounterImpl {
+ public:
+ DebugStackRefCounterImpl() : ref_count_(0u) { }
+ ~DebugStackRefCounterImpl() { CheckNoRefs(); }
+ size_t IncrementRefCount() { return ++ref_count_; }
+ void DecrementRefCount() { --ref_count_; }
+ size_t GetRefCount() const { return ref_count_; }
+ void CheckNoRefs() const { CHECK_EQ(ref_count_, 0u); }
+
+ private:
+ size_t ref_count_;
+};
+
+template <bool kIsDebug>
+class DebugStackReferenceImpl {
+ public:
+ explicit DebugStackReferenceImpl(DebugStackRefCounterImpl<kIsDebug>* counter)
+ : counter_(counter), ref_count_(counter->IncrementRefCount()) {
+ }
+ DebugStackReferenceImpl(const DebugStackReferenceImpl& other)
+ : counter_(other.counter_), ref_count_(counter_->IncrementRefCount()) {
+ }
+ DebugStackReferenceImpl& operator=(const DebugStackReferenceImpl& other) {
+ CHECK(counter_ == other.counter_);
+ return *this;
+ }
+ ~DebugStackReferenceImpl() { counter_->DecrementRefCount(); }
+ void CheckTop() { CHECK_EQ(counter_->GetRefCount(), ref_count_); }
+
+ private:
+ DebugStackRefCounterImpl<true>* counter_;
+ size_t ref_count_;
+};
+
+template <bool kIsDebug>
+class DebugStackIndirectTopRefImpl {
+ public:
+ explicit DebugStackIndirectTopRefImpl(DebugStackReferenceImpl<kIsDebug>* ref)
+ : ref_(ref) {
+ CheckTop();
+ }
+ DebugStackIndirectTopRefImpl(const DebugStackIndirectTopRefImpl& other)
+ : ref_(other.ref_) {
+ CheckTop();
+ }
+ DebugStackIndirectTopRefImpl& operator=(const DebugStackIndirectTopRefImpl& other) {
+ CHECK(ref_ == other->ref_);
+ CheckTop();
+ return *this;
+ }
+ ~DebugStackIndirectTopRefImpl() {
+ CheckTop();
+ }
+ void CheckTop() {
+ ref_->CheckTop();
+ }
+
+ private:
+ DebugStackReferenceImpl<kIsDebug>* ref_;
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_DEBUG_STACK_H_
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index 82b6a607e7..a7d1f0e5a5 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -75,7 +75,7 @@ class GrowableArray {
num_used_(0),
kind_(kind) {
elem_list_ = static_cast<T*>(arena_->Alloc(sizeof(T) * init_length,
- ArenaAllocator::kAllocGrowableArray));
+ kArenaAllocGrowableArray));
};
@@ -89,7 +89,7 @@ class GrowableArray {
target_length = new_length;
}
T* new_array = static_cast<T*>(arena_->Alloc(sizeof(T) * target_length,
- ArenaAllocator::kAllocGrowableArray));
+ kArenaAllocGrowableArray));
memcpy(new_array, elem_list_, sizeof(T) * num_allocated_);
num_allocated_ = target_length;
elem_list_ = new_array;
@@ -181,7 +181,7 @@ class GrowableArray {
T* GetRawStorage() const { return elem_list_; }
static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(GrowableArray<T>), ArenaAllocator::kAllocGrowableArray);
+ return arena->Alloc(sizeof(GrowableArray<T>), kArenaAllocGrowableArray);
};
static void operator delete(void* p) {} // Nop.
diff --git a/compiler/utils/scoped_arena_allocator.cc b/compiler/utils/scoped_arena_allocator.cc
new file mode 100644
index 0000000000..ee3b07ebe9
--- /dev/null
+++ b/compiler/utils/scoped_arena_allocator.cc
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "scoped_arena_allocator.h"
+
+#include "utils/arena_allocator.h"
+#include <memcheck/memcheck.h>
+
+namespace art {
+
+static constexpr size_t kValgrindRedZoneBytes = 8;
+
+ArenaStack::ArenaStack(ArenaPool* arena_pool)
+ : DebugStackRefCounter(),
+ stats_and_pool_(arena_pool),
+ bottom_arena_(nullptr),
+ top_arena_(nullptr),
+ top_ptr_(nullptr),
+ top_end_(nullptr),
+ running_on_valgrind_(RUNNING_ON_VALGRIND > 0) {
+}
+
+ArenaStack::~ArenaStack() {
+ stats_and_pool_.pool->FreeArenaChain(bottom_arena_);
+}
+
+MemStats ArenaStack::GetPeakStats() const {
+ DebugStackRefCounter::CheckNoRefs();
+ return MemStats("ArenaStack peak", static_cast<const TaggedStats<Peak>*>(&stats_and_pool_),
+ bottom_arena_);
+}
+
+uint8_t* ArenaStack::AllocateFromNextArena(size_t rounded_bytes) {
+ UpdateBytesAllocated();
+ size_t allocation_size = std::max(Arena::kDefaultSize, rounded_bytes);
+ if (UNLIKELY(top_arena_ == nullptr)) {
+ top_arena_ = bottom_arena_ = stats_and_pool_.pool->AllocArena(allocation_size);
+ top_arena_->next_ = nullptr;
+ } else if (top_arena_->next_ != nullptr && top_arena_->next_->Size() >= allocation_size) {
+ top_arena_ = top_arena_->next_;
+ } else {
+ Arena* tail = top_arena_->next_;
+ top_arena_->next_ = stats_and_pool_.pool->AllocArena(allocation_size);
+ top_arena_ = top_arena_->next_;
+ top_arena_->next_ = tail;
+ }
+ top_end_ = top_arena_->End();
+ // top_ptr_ shall be updated by ScopedArenaAllocator.
+ return top_arena_->Begin();
+}
+
+void ArenaStack::UpdatePeakStatsAndRestore(const ArenaAllocatorStats& restore_stats) {
+ if (PeakStats()->BytesAllocated() < CurrentStats()->BytesAllocated()) {
+ PeakStats()->Copy(*CurrentStats());
+ }
+ CurrentStats()->Copy(restore_stats);
+}
+
+void ArenaStack::UpdateBytesAllocated() {
+ if (top_arena_ != nullptr) {
+ // Update how many bytes we have allocated into the arena so that the arena pool knows how
+ // much memory to zero out. Though ScopedArenaAllocator doesn't guarantee the memory is
+ // zero-initialized, the Arena may be reused by ArenaAllocator which does guarantee this.
+ size_t allocated = static_cast<size_t>(top_ptr_ - top_arena_->Begin());
+ if (top_arena_->bytes_allocated_ < allocated) {
+ top_arena_->bytes_allocated_ = allocated;
+ }
+ }
+}
+
+void* ArenaStack::AllocValgrind(size_t bytes, ArenaAllocKind kind) {
+ size_t rounded_bytes = (bytes + kValgrindRedZoneBytes + 3) & ~3;
+ uint8_t* ptr = top_ptr_;
+ if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
+ ptr = AllocateFromNextArena(rounded_bytes);
+ }
+ CurrentStats()->RecordAlloc(bytes, kind);
+ top_ptr_ = ptr + rounded_bytes;
+ VALGRIND_MAKE_MEM_NOACCESS(ptr + bytes, rounded_bytes - bytes);
+ return ptr;
+}
+
+ScopedArenaAllocator::ScopedArenaAllocator(ArenaStack* arena_stack)
+ : DebugStackReference(arena_stack),
+ DebugStackRefCounter(),
+ ArenaAllocatorStats(*arena_stack->CurrentStats()),
+ arena_stack_(arena_stack),
+ mark_arena_(arena_stack->top_arena_),
+ mark_ptr_(arena_stack->top_ptr_),
+ mark_end_(arena_stack->top_end_) {
+}
+
+ScopedArenaAllocator::~ScopedArenaAllocator() {
+ Reset();
+}
+
+void ScopedArenaAllocator::Reset() {
+ DebugStackReference::CheckTop();
+ DebugStackRefCounter::CheckNoRefs();
+ arena_stack_->UpdatePeakStatsAndRestore(*this);
+ arena_stack_->UpdateBytesAllocated();
+ if (LIKELY(mark_arena_ != nullptr)) {
+ arena_stack_->top_arena_ = mark_arena_;
+ arena_stack_->top_ptr_ = mark_ptr_;
+ arena_stack_->top_end_ = mark_end_;
+ } else if (arena_stack_->bottom_arena_ != nullptr) {
+ mark_arena_ = arena_stack_->top_arena_ = arena_stack_->bottom_arena_;
+ mark_ptr_ = arena_stack_->top_ptr_ = mark_arena_->Begin();
+ mark_end_ = arena_stack_->top_end_ = mark_arena_->End();
+ }
+}
+
+} // namespace art
diff --git a/compiler/utils/scoped_arena_allocator.h b/compiler/utils/scoped_arena_allocator.h
new file mode 100644
index 0000000000..24a8afea6e
--- /dev/null
+++ b/compiler/utils/scoped_arena_allocator.h
@@ -0,0 +1,244 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_SCOPED_ARENA_ALLOCATOR_H_
+#define ART_COMPILER_UTILS_SCOPED_ARENA_ALLOCATOR_H_
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "utils/arena_allocator.h"
+#include "utils/debug_stack.h"
+#include "globals.h"
+
+namespace art {
+
+class ArenaStack;
+class ScopedArenaAllocator;
+
+template <typename T>
+class ScopedArenaAllocatorAdapter;
+
+// Holds a list of Arenas for use by ScopedArenaAllocator stack.
+class ArenaStack : private DebugStackRefCounter {
+ public:
+ explicit ArenaStack(ArenaPool* arena_pool);
+ ~ArenaStack();
+
+ size_t PeakBytesAllocated() {
+ return PeakStats()->BytesAllocated();
+ }
+
+ MemStats GetPeakStats() const;
+
+ private:
+ struct Peak;
+ struct Current;
+ template <typename Tag> struct TaggedStats : ArenaAllocatorStats { };
+ struct StatsAndPool : TaggedStats<Peak>, TaggedStats<Current> {
+ explicit StatsAndPool(ArenaPool* arena_pool) : pool(arena_pool) { }
+ ArenaPool* const pool;
+ };
+
+ ArenaAllocatorStats* PeakStats() {
+ return static_cast<TaggedStats<Peak>*>(&stats_and_pool_);
+ }
+
+ ArenaAllocatorStats* CurrentStats() {
+ return static_cast<TaggedStats<Current>*>(&stats_and_pool_);
+ }
+
+ // Private - access via ScopedArenaAllocator or ScopedArenaAllocatorAdapter.
+ void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
+ if (UNLIKELY(running_on_valgrind_)) {
+ return AllocValgrind(bytes, kind);
+ }
+ size_t rounded_bytes = (bytes + 3) & ~3;
+ uint8_t* ptr = top_ptr_;
+ if (UNLIKELY(static_cast<size_t>(top_end_ - ptr) < rounded_bytes)) {
+ ptr = AllocateFromNextArena(rounded_bytes);
+ }
+ CurrentStats()->RecordAlloc(bytes, kind);
+ top_ptr_ = ptr + rounded_bytes;
+ return ptr;
+ }
+
+ uint8_t* AllocateFromNextArena(size_t rounded_bytes);
+ void UpdatePeakStatsAndRestore(const ArenaAllocatorStats& restore_stats);
+ void UpdateBytesAllocated();
+ void* AllocValgrind(size_t bytes, ArenaAllocKind kind);
+
+ StatsAndPool stats_and_pool_;
+ Arena* bottom_arena_;
+ Arena* top_arena_;
+ uint8_t* top_ptr_;
+ uint8_t* top_end_;
+
+ const bool running_on_valgrind_;
+
+ friend class ScopedArenaAllocator;
+ template <typename T>
+ friend class ScopedArenaAllocatorAdapter;
+
+ DISALLOW_COPY_AND_ASSIGN(ArenaStack);
+};
+
+class ScopedArenaAllocator
+ : private DebugStackReference, private DebugStackRefCounter, private ArenaAllocatorStats {
+ public:
+ // Create a ScopedArenaAllocator directly on the ArenaStack when the scope of
+ // the allocator is not exactly a C++ block scope. For example, an optimization
+ // pass can create the scoped allocator in Start() and destroy it in End().
+ static ScopedArenaAllocator* Create(ArenaStack* arena_stack) {
+ void* addr = arena_stack->Alloc(sizeof(ScopedArenaAllocator), kArenaAllocMisc);
+ ScopedArenaAllocator* allocator = new(addr) ScopedArenaAllocator(arena_stack);
+ allocator->mark_ptr_ = reinterpret_cast<uint8_t*>(addr);
+ return allocator;
+ }
+
+ explicit ScopedArenaAllocator(ArenaStack* arena_stack);
+ ~ScopedArenaAllocator();
+
+ void Reset();
+
+ void* Alloc(size_t bytes, ArenaAllocKind kind) ALWAYS_INLINE {
+ DebugStackReference::CheckTop();
+ return arena_stack_->Alloc(bytes, kind);
+ }
+
+ // ScopedArenaAllocatorAdapter is incomplete here, we need to define this later.
+ ScopedArenaAllocatorAdapter<void> Adapter();
+
+ // Allow a delete-expression to destroy but not deallocate allocators created by Create().
+ static void operator delete(void* ptr) { UNUSED(ptr); }
+
+ private:
+ ArenaStack* const arena_stack_;
+ Arena* mark_arena_;
+ uint8_t* mark_ptr_;
+ uint8_t* mark_end_;
+
+ template <typename T>
+ friend class ScopedArenaAllocatorAdapter;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedArenaAllocator);
+};
+
+template <>
+class ScopedArenaAllocatorAdapter<void>
+ : private DebugStackReference, private DebugStackIndirectTopRef {
+ public:
+ typedef void value_type;
+ typedef void* pointer;
+ typedef const void* const_pointer;
+
+ template <typename U>
+ struct rebind {
+ typedef ScopedArenaAllocatorAdapter<U> other;
+ };
+
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator)
+ : DebugStackReference(arena_allocator),
+ DebugStackIndirectTopRef(arena_allocator),
+ arena_stack_(arena_allocator->arena_stack_) {
+ }
+ template <typename U>
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
+ : DebugStackReference(other),
+ DebugStackIndirectTopRef(other),
+ arena_stack_(other.arena_stack_) {
+ }
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
+ ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+ ~ScopedArenaAllocatorAdapter() = default;
+
+ private:
+ ArenaStack* arena_stack_;
+
+ template <typename U>
+ friend class ScopedArenaAllocatorAdapter;
+};
+
+// Adapter for use of ScopedArenaAllocator in STL containers.
+template <typename T>
+class ScopedArenaAllocatorAdapter : private DebugStackReference, private DebugStackIndirectTopRef {
+ public:
+ typedef T value_type;
+ typedef T* pointer;
+ typedef T& reference;
+ typedef const T* const_pointer;
+ typedef const T& const_reference;
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+
+ template <typename U>
+ struct rebind {
+ typedef ScopedArenaAllocatorAdapter<U> other;
+ };
+
+ explicit ScopedArenaAllocatorAdapter(ScopedArenaAllocator* arena_allocator)
+ : DebugStackReference(arena_allocator),
+ DebugStackIndirectTopRef(arena_allocator),
+ arena_stack_(arena_allocator->arena_stack_) {
+ }
+ template <typename U>
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter<U>& other)
+ : DebugStackReference(other),
+ DebugStackIndirectTopRef(other),
+ arena_stack_(other.arena_stack_) {
+ }
+ ScopedArenaAllocatorAdapter(const ScopedArenaAllocatorAdapter& other) = default;
+ ScopedArenaAllocatorAdapter& operator=(const ScopedArenaAllocatorAdapter& other) = default;
+ ~ScopedArenaAllocatorAdapter() = default;
+
+ size_type max_size() const {
+ return static_cast<size_type>(-1) / sizeof(T);
+ }
+
+ pointer address(reference x) const { return &x; }
+ const_pointer address(const_reference x) const { return &x; }
+
+ pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ DCHECK_LE(n, max_size());
+ DebugStackIndirectTopRef::CheckTop();
+ return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T), kArenaAllocSTL));
+ }
+ void deallocate(pointer p, size_type n) {
+ DebugStackIndirectTopRef::CheckTop();
+ }
+
+ void construct(pointer p, const_reference val) {
+ DebugStackIndirectTopRef::CheckTop();
+ new (static_cast<void*>(p)) value_type(val);
+ }
+ void destroy(pointer p) {
+ DebugStackIndirectTopRef::CheckTop();
+ p->~value_type();
+ }
+
+ private:
+ ArenaStack* arena_stack_;
+
+ template <typename U>
+ friend class ScopedArenaAllocatorAdapter;
+};
+
+inline ScopedArenaAllocatorAdapter<void> ScopedArenaAllocator::Adapter() {
+ return ScopedArenaAllocatorAdapter<void>(this);
+}
+
+} // namespace art
+
+#endif // ART_COMPILER_UTILS_SCOPED_ARENA_ALLOCATOR_H_
diff --git a/runtime/safe_map.h b/runtime/safe_map.h
index 89da927cc2..393bf92ba2 100644
--- a/runtime/safe_map.h
+++ b/runtime/safe_map.h
@@ -33,10 +33,17 @@ class SafeMap {
typedef SafeMap<K, V, Comparator, Allocator> Self;
public:
- typedef typename ::std::map<K, V, Comparator>::iterator iterator;
- typedef typename ::std::map<K, V, Comparator>::const_iterator const_iterator;
- typedef typename ::std::map<K, V, Comparator>::size_type size_type;
- typedef typename ::std::map<K, V, Comparator>::value_type value_type;
+ typedef typename ::std::map<K, V, Comparator, Allocator>::key_compare key_compare;
+ typedef typename ::std::map<K, V, Comparator, Allocator>::allocator_type allocator_type;
+ typedef typename ::std::map<K, V, Comparator, Allocator>::iterator iterator;
+ typedef typename ::std::map<K, V, Comparator, Allocator>::const_iterator const_iterator;
+ typedef typename ::std::map<K, V, Comparator, Allocator>::size_type size_type;
+ typedef typename ::std::map<K, V, Comparator, Allocator>::value_type value_type;
+
+ SafeMap() = default;
+ explicit SafeMap(const key_compare& cmp, const allocator_type& allocator = allocator_type())
+ : map_(cmp, allocator) {
+ }
Self& operator=(const Self& rhs) {
map_ = rhs.map_;