summaryrefslogtreecommitdiffstats
path: root/compiler/optimizing/optimizing_compiler.cc
diff options
context:
space:
mode:
authorCalin Juravle <calin@google.com>2014-12-15 16:05:52 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2014-12-15 16:05:53 +0000
commit37cbe23bf1d183b63ddae1ca5e4fe9a84d3f2061 (patch)
treee934fa1ef20c2294d087a82fe1e8c85db6911c1d /compiler/optimizing/optimizing_compiler.cc
parent4de14bbf154daeacafaa60b12a0b855adb0f3705 (diff)
parent48c2b03965830c73cdddeae8aea8030f08430137 (diff)
downloadart-37cbe23bf1d183b63ddae1ca5e4fe9a84d3f2061.tar.gz
art-37cbe23bf1d183b63ddae1ca5e4fe9a84d3f2061.tar.bz2
art-37cbe23bf1d183b63ddae1ca5e4fe9a84d3f2061.zip
Merge "Add more compilation stats to optimizing"
Diffstat (limited to 'compiler/optimizing/optimizing_compiler.cc')
-rw-r--r--compiler/optimizing/optimizing_compiler.cc49
1 files changed, 27 insertions, 22 deletions
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 89a0cf9117..d47217fa3f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -121,9 +121,8 @@ class OptimizingCompiler FINAL : public Compiler {
// Whether we should run any optimization or register allocation. If false, will
// just run the code generation after the graph was built.
const bool run_optimizations_;
- mutable AtomicInteger total_compiled_methods_;
- mutable AtomicInteger unoptimized_compiled_methods_;
- mutable AtomicInteger optimized_compiled_methods_;
+
+ mutable OptimizingCompilerStats compilation_stats_;
std::unique_ptr<std::ostream> visualizer_output_;
@@ -136,24 +135,14 @@ OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
: Compiler(driver, kMaximumCompilationTimeBeforeWarning),
run_optimizations_(
driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime),
- total_compiled_methods_(0),
- unoptimized_compiled_methods_(0),
- optimized_compiled_methods_(0) {
+ compilation_stats_() {
if (kIsVisualizerEnabled) {
visualizer_output_.reset(new std::ofstream("art.cfg"));
}
}
OptimizingCompiler::~OptimizingCompiler() {
- if (total_compiled_methods_ == 0) {
- LOG(INFO) << "Did not compile any method.";
- } else {
- size_t unoptimized_percent = (unoptimized_compiled_methods_ * 100 / total_compiled_methods_);
- size_t optimized_percent = (optimized_compiled_methods_ * 100 / total_compiled_methods_);
- LOG(INFO) << "Compiled " << total_compiled_methods_ << " methods: "
- << unoptimized_percent << "% (" << unoptimized_compiled_methods_ << ") unoptimized, "
- << optimized_percent << "% (" << optimized_compiled_methods_ << ") optimized.";
- }
+ compilation_stats_.Log();
}
bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
@@ -257,7 +246,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
jobject class_loader,
const DexFile& dex_file) const {
UNUSED(invoke_type);
- total_compiled_methods_++;
+ compilation_stats_.RecordStat(MethodCompilationStat::kAttemptCompilation);
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
// overflow checks) assume thumb2.
@@ -267,10 +256,12 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledUnsupportedIsa);
return nullptr;
}
if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledPathological);
return nullptr;
}
@@ -287,7 +278,10 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
ArenaPool pool;
ArenaAllocator arena(&pool);
- HGraphBuilder builder(&arena, &dex_compilation_unit, &dex_file, GetCompilerDriver());
+ HGraphBuilder builder(&arena,
+ &dex_compilation_unit,
+ &dex_file, GetCompilerDriver(),
+ &compilation_stats_);
HGraph* graph = builder.BuildGraph(*code_item);
if (graph == nullptr) {
@@ -298,6 +292,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
CodeGenerator* codegen = CodeGenerator::Create(&arena, graph, instruction_set);
if (codegen == nullptr) {
CHECK(!shouldCompile) << "Could not find code generator for optimizing compiler";
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
@@ -307,13 +302,13 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
CodeVectorAllocator allocator;
- if (run_optimizations_
- && CanOptimize(*code_item)
- && RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set)) {
+ bool can_optimize = CanOptimize(*code_item);
+ bool can_allocate_registers = RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set);
+ if (run_optimizations_ && can_optimize && can_allocate_registers) {
VLOG(compiler) << "Optimizing " << PrettyMethod(method_idx, dex_file);
- optimized_compiled_methods_++;
if (!TryBuildingSsa(graph, dex_compilation_unit, visualizer)) {
// We could not transform the graph to SSA, bailout.
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledCannotBuildSSA);
return nullptr;
}
RunOptimizations(graph, visualizer);
@@ -332,6 +327,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
std::vector<uint8_t> stack_map;
codegen->BuildStackMaps(&stack_map);
+ compilation_stats_.RecordStat(MethodCompilationStat::kCompiledOptimized);
return new CompiledMethod(GetCompilerDriver(),
instruction_set,
allocator.GetMemory(),
@@ -344,7 +340,15 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
UNREACHABLE();
} else {
VLOG(compiler) << "Compile baseline " << PrettyMethod(method_idx, dex_file);
- unoptimized_compiled_methods_++;
+
+ if (!run_optimizations_) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedDisabled);
+ } else if (!can_optimize) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedTryCatch);
+ } else if (!can_allocate_registers) {
+ compilation_stats_.RecordStat(MethodCompilationStat::kNotOptimizedRegisterAllocator);
+ }
+
codegen->CompileBaseline(&allocator);
std::vector<uint8_t> mapping_table;
@@ -357,6 +361,7 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
std::vector<uint8_t> gc_map;
codegen->BuildNativeGCMap(&gc_map, dex_compilation_unit);
+ compilation_stats_.RecordStat(MethodCompilationStat::kCompiledBaseline);
return new CompiledMethod(GetCompilerDriver(),
instruction_set,
allocator.GetMemory(),