summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Srbecky <dsrbecky@google.com>2015-04-07 20:32:43 +0100
committerDavid Srbecky <dsrbecky@google.com>2015-04-09 16:47:50 +0100
commitc6b4dd8980350aaf250f0185f73e9c42ec17cd57 (patch)
treeef8d73e37abc04aecb430072a8bc463c73398fee
parentdd97393aca1a3ff2abec4dc4f78d7724300971bc (diff)
downloadandroid_art-c6b4dd8980350aaf250f0185f73e9c42ec17cd57.tar.gz
android_art-c6b4dd8980350aaf250f0185f73e9c42ec17cd57.tar.bz2
android_art-c6b4dd8980350aaf250f0185f73e9c42ec17cd57.zip
Implement CFI for Optimizing.
CFI is necessary for stack unwinding in gdb, lldb, and libunwind. Change-Id: I1a3480e3a4a99f48bf7e6e63c4e83a80cfee40a2
-rw-r--r--build/Android.gtest.mk1
-rw-r--r--compiler/compiled_method.cc33
-rw-r--r--compiler/compiled_method.h21
-rw-r--r--compiler/dex/quick/codegen_util.cc2
-rw-r--r--compiler/dex/quick/lazy_debug_frame_opcode_writer.cc2
-rw-r--r--compiler/dex/quick/lazy_debug_frame_opcode_writer.h67
-rw-r--r--compiler/dwarf/debug_frame_opcode_writer.h307
-rw-r--r--compiler/elf_writer_quick.cc2
-rw-r--r--compiler/jni/quick/jni_compiler.cc21
-rw-r--r--compiler/optimizing/code_generator.cc19
-rw-r--r--compiler/optimizing/code_generator.h5
-rw-r--r--compiler/optimizing/code_generator_arm.cc31
-rw-r--r--compiler/optimizing/code_generator_arm64.cc61
-rw-r--r--compiler/optimizing/code_generator_arm64.h2
-rw-r--r--compiler/optimizing/code_generator_x86.cc23
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc36
-rw-r--r--compiler/optimizing/optimizing_cfi_test.cc108
-rw-r--r--compiler/optimizing/optimizing_cfi_test_expected.inc141
-rw-r--r--compiler/optimizing/optimizing_compiler.cc26
19 files changed, 624 insertions, 284 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index 62a11c24e9..d9d09bcc63 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -214,6 +214,7 @@ COMPILER_GTEST_COMMON_SRC_FILES := \
compiler/optimizing/live_interval_test.cc \
compiler/optimizing/live_ranges_test.cc \
compiler/optimizing/nodes_test.cc \
+ compiler/optimizing/optimizing_cfi_test.cc \
compiler/optimizing/parallel_move_test.cc \
compiler/optimizing/pretty_printer_test.cc \
compiler/optimizing/register_allocator_test.cc \
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index eeed877444..4f7a970fdd 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -188,39 +188,6 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod(
return ret;
}
-CompiledMethod* CompiledMethod::SwapAllocCompiledMethodStackMap(
- CompilerDriver* driver,
- InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
- const ArrayRef<const uint8_t>& stack_map) {
- SwapAllocator<CompiledMethod> alloc(driver->GetSwapSpaceAllocator());
- CompiledMethod* ret = alloc.allocate(1);
- alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask,
- fp_spill_mask, nullptr, ArrayRef<const uint8_t>(), stack_map,
- ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
- ArrayRef<const LinkerPatch>());
- return ret;
-}
-
-CompiledMethod* CompiledMethod::SwapAllocCompiledMethodCFI(
- CompilerDriver* driver,
- InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
- const ArrayRef<const uint8_t>& cfi_info) {
- SwapAllocator<CompiledMethod> alloc(driver->GetSwapSpaceAllocator());
- CompiledMethod* ret = alloc.allocate(1);
- alloc.construct(ret, driver, instruction_set, quick_code, frame_size_in_bytes, core_spill_mask,
- fp_spill_mask, nullptr, ArrayRef<const uint8_t>(),
- ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(),
- cfi_info, ArrayRef<const LinkerPatch>());
- return ret;
-}
void CompiledMethod::ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m) {
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 506b47b68a..480d021db0 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -320,7 +320,7 @@ class CompiledMethod FINAL : public CompiledCode {
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
- const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>());
+ const ArrayRef<const LinkerPatch>& patches);
virtual ~CompiledMethod();
@@ -336,24 +336,7 @@ class CompiledMethod FINAL : public CompiledCode {
const ArrayRef<const uint8_t>& vmap_table,
const ArrayRef<const uint8_t>& native_gc_map,
const ArrayRef<const uint8_t>& cfi_info,
- const ArrayRef<const LinkerPatch>& patches = ArrayRef<const LinkerPatch>());
-
- static CompiledMethod* SwapAllocCompiledMethodStackMap(
- CompilerDriver* driver,
- InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
- const ArrayRef<const uint8_t>& stack_map);
-
- static CompiledMethod* SwapAllocCompiledMethodCFI(CompilerDriver* driver,
- InstructionSet instruction_set,
- const ArrayRef<const uint8_t>& quick_code,
- const size_t frame_size_in_bytes,
- const uint32_t core_spill_mask,
- const uint32_t fp_spill_mask,
- const ArrayRef<const uint8_t>& cfi_info);
+ const ArrayRef<const LinkerPatch>& patches);
static void ReleaseSwapAllocatedCompiledMethod(CompilerDriver* driver, CompiledMethod* m);
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index ff5f735255..9c9205fbb9 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -1073,7 +1073,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
pc_rel_temp_(nullptr),
dex_cache_arrays_min_offset_(std::numeric_limits<uint32_t>::max()),
cfi_(&last_lir_insn_,
- cu->compiler_driver->GetCompilerOptions().GetGenerateGDBInformation(),
+ cu->compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols(),
arena),
in_to_reg_storage_mapping_(arena) {
switch_tables_.reserve(4);
diff --git a/compiler/dex/quick/lazy_debug_frame_opcode_writer.cc b/compiler/dex/quick/lazy_debug_frame_opcode_writer.cc
index 03cf4bef8b..5cfb0ff557 100644
--- a/compiler/dex/quick/lazy_debug_frame_opcode_writer.cc
+++ b/compiler/dex/quick/lazy_debug_frame_opcode_writer.cc
@@ -21,7 +21,7 @@ namespace art {
namespace dwarf {
const ArenaVector<uint8_t>* LazyDebugFrameOpCodeWriter::Patch(size_t code_size) {
- if (!enable_writes_) {
+ if (!this->enabled_) {
DCHECK(this->data()->empty());
return this->data();
}
diff --git a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h b/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
index d71a87d567..94ffd7f957 100644
--- a/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
+++ b/compiler/dex/quick/lazy_debug_frame_opcode_writer.h
@@ -28,7 +28,7 @@ namespace dwarf {
// When we are generating the CFI code, we do not know the instuction offsets,
// this class stores the LIR references and patches the instruction stream later.
class LazyDebugFrameOpCodeWriter FINAL
- : private DebugFrameOpCodeWriter<ArenaAllocatorAdapter<uint8_t>> {
+ : public DebugFrameOpCodeWriter<ArenaAllocatorAdapter<uint8_t>> {
typedef DebugFrameOpCodeWriter<ArenaAllocatorAdapter<uint8_t>> Base;
public:
// This method is implicitely called the by opcode writers.
@@ -38,72 +38,12 @@ class LazyDebugFrameOpCodeWriter FINAL
advances_.push_back({this->data()->size(), *last_lir_insn_});
}
- // The register was unspilled.
- void Restore(Reg reg) {
- if (enable_writes_) {
- Base::Restore(reg);
- }
- }
-
- // Custom alias - unspill many registers based on bitmask.
- void RestoreMany(Reg reg_base, uint32_t reg_mask) {
- if (enable_writes_) {
- Base::RestoreMany(reg_base, reg_mask);
- }
- }
-
- // Remember the state of register spills.
- void RememberState() {
- if (enable_writes_) {
- Base::RememberState();
- }
- }
-
- // Restore the state of register spills.
- void RestoreState() {
- if (enable_writes_) {
- Base::RestoreState();
- }
- }
-
- // Set the frame pointer (CFA) to (stack_pointer + offset).
- void DefCFAOffset(int offset) {
- if (enable_writes_) {
- Base::DefCFAOffset(offset);
- }
- this->current_cfa_offset_ = offset;
- }
-
- // The stack size was increased by given delta.
- void AdjustCFAOffset(int delta) {
- DefCFAOffset(this->current_cfa_offset_ + delta);
- }
-
- // The register was spilled to (stack_pointer + offset).
- void RelOffset(Reg reg, int offset) {
- if (enable_writes_) {
- Base::RelOffset(reg, offset);
- }
- }
-
- // Custom alias - spill many registers based on bitmask.
- void RelOffsetForMany(Reg reg_base, int offset, uint32_t reg_mask, int reg_size) {
- if (enable_writes_) {
- Base::RelOffsetForMany(reg_base, offset, reg_mask, reg_size);
- }
- }
-
- using Base::GetCurrentCFAOffset;
- using Base::SetCurrentCFAOffset;
- using Base::GetCurrentPC;
-
const ArenaVector<uint8_t>* Patch(size_t code_size);
explicit LazyDebugFrameOpCodeWriter(LIR** last_lir_insn, bool enable_writes,
ArenaAllocator* allocator)
- : Base(allocator->Adapter()),
+ : Base(enable_writes, allocator->Adapter()),
last_lir_insn_(last_lir_insn),
- enable_writes_(enable_writes),
advances_(allocator->Adapter()),
patched_(false) {
}
@@ -114,8 +54,9 @@ class LazyDebugFrameOpCodeWriter FINAL
LIR* last_lir_insn;
} Advance;
+ using Base::data; // Hidden. Use Patch method instead.
+
LIR** last_lir_insn_;
- bool enable_writes_;
ArenaVector<Advance> advances_;
bool patched_;
diff --git a/compiler/dwarf/debug_frame_opcode_writer.h b/compiler/dwarf/debug_frame_opcode_writer.h
index 85186bbc22..d0d182106f 100644
--- a/compiler/dwarf/debug_frame_opcode_writer.h
+++ b/compiler/dwarf/debug_frame_opcode_writer.h
@@ -20,6 +20,7 @@
#include "dwarf.h"
#include "register.h"
#include "writer.h"
+#include "utils.h"
namespace art {
namespace dwarf {
@@ -41,45 +42,51 @@ class DebugFrameOpCodeWriter : private Writer<Allocator> {
static constexpr int kCodeAlignmentFactor = 1;
// Explicitely advance the program counter to given location.
- void AdvancePC(int absolute_pc) {
+ void ALWAYS_INLINE AdvancePC(int absolute_pc) {
DCHECK_GE(absolute_pc, current_pc_);
- int delta = FactorCodeOffset(absolute_pc - current_pc_);
- if (delta != 0) {
- if (delta <= 0x3F) {
- this->PushUint8(DW_CFA_advance_loc | delta);
- } else if (delta <= UINT8_MAX) {
- this->PushUint8(DW_CFA_advance_loc1);
- this->PushUint8(delta);
- } else if (delta <= UINT16_MAX) {
- this->PushUint8(DW_CFA_advance_loc2);
- this->PushUint16(delta);
- } else {
- this->PushUint8(DW_CFA_advance_loc4);
- this->PushUint32(delta);
+ if (UNLIKELY(enabled_)) {
+ int delta = FactorCodeOffset(absolute_pc - current_pc_);
+ if (delta != 0) {
+ if (delta <= 0x3F) {
+ this->PushUint8(DW_CFA_advance_loc | delta);
+ } else if (delta <= UINT8_MAX) {
+ this->PushUint8(DW_CFA_advance_loc1);
+ this->PushUint8(delta);
+ } else if (delta <= UINT16_MAX) {
+ this->PushUint8(DW_CFA_advance_loc2);
+ this->PushUint16(delta);
+ } else {
+ this->PushUint8(DW_CFA_advance_loc4);
+ this->PushUint32(delta);
+ }
}
+ current_pc_ = absolute_pc;
}
- current_pc_ = absolute_pc;
}
// Override this method to automatically advance the PC before each opcode.
virtual void ImplicitlyAdvancePC() { }
// Common alias in assemblers - spill relative to current stack pointer.
- void RelOffset(Reg reg, int offset) {
+ void ALWAYS_INLINE RelOffset(Reg reg, int offset) {
Offset(reg, offset - current_cfa_offset_);
}
// Common alias in assemblers - increase stack frame size.
- void AdjustCFAOffset(int delta) {
+ void ALWAYS_INLINE AdjustCFAOffset(int delta) {
DefCFAOffset(current_cfa_offset_ + delta);
}
// Custom alias - spill many registers based on bitmask.
- void RelOffsetForMany(Reg reg_base, int offset, uint32_t reg_mask,
- int reg_size) {
+ void ALWAYS_INLINE RelOffsetForMany(Reg reg_base, int offset,
+ uint32_t reg_mask, int reg_size) {
DCHECK(reg_size == 4 || reg_size == 8);
- for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) {
- if ((reg_mask & 1) != 0u) {
+ if (UNLIKELY(enabled_)) {
+ for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) {
+ // Skip zero bits and go to the set bit.
+ int num_zeros = CTZ(reg_mask);
+ i += num_zeros;
+ reg_mask >>= num_zeros;
RelOffset(Reg(reg_base.num() + i), offset);
offset += reg_size;
}
@@ -87,175 +94,214 @@ class DebugFrameOpCodeWriter : private Writer<Allocator> {
}
// Custom alias - unspill many registers based on bitmask.
- void RestoreMany(Reg reg_base, uint32_t reg_mask) {
- for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) {
- if ((reg_mask & 1) != 0u) {
+ void ALWAYS_INLINE RestoreMany(Reg reg_base, uint32_t reg_mask) {
+ if (UNLIKELY(enabled_)) {
+ for (int i = 0; reg_mask != 0u; reg_mask >>= 1, i++) {
+ // Skip zero bits and go to the set bit.
+ int num_zeros = CTZ(reg_mask);
+ i += num_zeros;
+ reg_mask >>= num_zeros;
Restore(Reg(reg_base.num() + i));
}
}
}
- void Nop() {
- this->PushUint8(DW_CFA_nop);
+ void ALWAYS_INLINE Nop() {
+ if (UNLIKELY(enabled_)) {
+ this->PushUint8(DW_CFA_nop);
+ }
}
- void Offset(Reg reg, int offset) {
- ImplicitlyAdvancePC();
- int factored_offset = FactorDataOffset(offset); // May change sign.
- if (factored_offset >= 0) {
- if (0 <= reg.num() && reg.num() <= 0x3F) {
- this->PushUint8(DW_CFA_offset | reg.num());
- this->PushUleb128(factored_offset);
+ void ALWAYS_INLINE Offset(Reg reg, int offset) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ int factored_offset = FactorDataOffset(offset); // May change sign.
+ if (factored_offset >= 0) {
+ if (0 <= reg.num() && reg.num() <= 0x3F) {
+ this->PushUint8(DW_CFA_offset | reg.num());
+ this->PushUleb128(factored_offset);
+ } else {
+ this->PushUint8(DW_CFA_offset_extended);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(factored_offset);
+ }
} else {
- this->PushUint8(DW_CFA_offset_extended);
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_offset_extended_sf);
this->PushUleb128(reg.num());
- this->PushUleb128(factored_offset);
+ this->PushSleb128(factored_offset);
}
- } else {
- uses_dwarf3_features_ = true;
- this->PushUint8(DW_CFA_offset_extended_sf);
- this->PushUleb128(reg.num());
- this->PushSleb128(factored_offset);
}
}
- void Restore(Reg reg) {
- ImplicitlyAdvancePC();
- if (0 <= reg.num() && reg.num() <= 0x3F) {
- this->PushUint8(DW_CFA_restore | reg.num());
- } else {
- this->PushUint8(DW_CFA_restore_extended);
- this->PushUleb128(reg.num());
+ void ALWAYS_INLINE Restore(Reg reg) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ if (0 <= reg.num() && reg.num() <= 0x3F) {
+ this->PushUint8(DW_CFA_restore | reg.num());
+ } else {
+ this->PushUint8(DW_CFA_restore_extended);
+ this->PushUleb128(reg.num());
+ }
}
}
- void Undefined(Reg reg) {
- ImplicitlyAdvancePC();
- this->PushUint8(DW_CFA_undefined);
- this->PushUleb128(reg.num());
+ void ALWAYS_INLINE Undefined(Reg reg) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_undefined);
+ this->PushUleb128(reg.num());
+ }
}
- void SameValue(Reg reg) {
- ImplicitlyAdvancePC();
- this->PushUint8(DW_CFA_same_value);
- this->PushUleb128(reg.num());
+ void ALWAYS_INLINE SameValue(Reg reg) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_same_value);
+ this->PushUleb128(reg.num());
+ }
}
// The previous value of "reg" is stored in register "new_reg".
- void Register(Reg reg, Reg new_reg) {
- ImplicitlyAdvancePC();
- this->PushUint8(DW_CFA_register);
- this->PushUleb128(reg.num());
- this->PushUleb128(new_reg.num());
- }
-
- void RememberState() {
- ImplicitlyAdvancePC();
- this->PushUint8(DW_CFA_remember_state);
- }
-
- void RestoreState() {
- ImplicitlyAdvancePC();
- this->PushUint8(DW_CFA_restore_state);
+ void ALWAYS_INLINE Register(Reg reg, Reg new_reg) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_register);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(new_reg.num());
+ }
}
- void DefCFA(Reg reg, int offset) {
- ImplicitlyAdvancePC();
- if (offset >= 0) {
- this->PushUint8(DW_CFA_def_cfa);
- this->PushUleb128(reg.num());
- this->PushUleb128(offset); // Non-factored.
- } else {
- uses_dwarf3_features_ = true;
- this->PushUint8(DW_CFA_def_cfa_sf);
- this->PushUleb128(reg.num());
- this->PushSleb128(FactorDataOffset(offset));
+ void ALWAYS_INLINE RememberState() {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_remember_state);
}
- current_cfa_offset_ = offset;
}
- void DefCFARegister(Reg reg) {
- ImplicitlyAdvancePC();
- this->PushUint8(DW_CFA_def_cfa_register);
- this->PushUleb128(reg.num());
+ void ALWAYS_INLINE RestoreState() {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_restore_state);
+ }
}
- void DefCFAOffset(int offset) {
- if (current_cfa_offset_ != offset) {
+ void ALWAYS_INLINE DefCFA(Reg reg, int offset) {
+ if (UNLIKELY(enabled_)) {
ImplicitlyAdvancePC();
if (offset >= 0) {
- this->PushUint8(DW_CFA_def_cfa_offset);
+ this->PushUint8(DW_CFA_def_cfa);
+ this->PushUleb128(reg.num());
this->PushUleb128(offset); // Non-factored.
} else {
uses_dwarf3_features_ = true;
- this->PushUint8(DW_CFA_def_cfa_offset_sf);
+ this->PushUint8(DW_CFA_def_cfa_sf);
+ this->PushUleb128(reg.num());
this->PushSleb128(FactorDataOffset(offset));
}
- current_cfa_offset_ = offset;
}
+ current_cfa_offset_ = offset;
}
- void ValOffset(Reg reg, int offset) {
- ImplicitlyAdvancePC();
- uses_dwarf3_features_ = true;
- int factored_offset = FactorDataOffset(offset); // May change sign.
- if (factored_offset >= 0) {
- this->PushUint8(DW_CFA_val_offset);
- this->PushUleb128(reg.num());
- this->PushUleb128(factored_offset);
- } else {
- this->PushUint8(DW_CFA_val_offset_sf);
+ void ALWAYS_INLINE DefCFARegister(Reg reg) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ this->PushUint8(DW_CFA_def_cfa_register);
this->PushUleb128(reg.num());
- this->PushSleb128(factored_offset);
}
}
- void DefCFAExpression(void* expr, int expr_size) {
- ImplicitlyAdvancePC();
- uses_dwarf3_features_ = true;
- this->PushUint8(DW_CFA_def_cfa_expression);
- this->PushUleb128(expr_size);
- this->PushData(expr, expr_size);
+ void ALWAYS_INLINE DefCFAOffset(int offset) {
+ if (UNLIKELY(enabled_)) {
+ if (current_cfa_offset_ != offset) {
+ ImplicitlyAdvancePC();
+ if (offset >= 0) {
+ this->PushUint8(DW_CFA_def_cfa_offset);
+ this->PushUleb128(offset); // Non-factored.
+ } else {
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_def_cfa_offset_sf);
+ this->PushSleb128(FactorDataOffset(offset));
+ }
+ }
+ }
+ // Uncoditional so that the user can still get and check the value.
+ current_cfa_offset_ = offset;
}
- void Expression(Reg reg, void* expr, int expr_size) {
- ImplicitlyAdvancePC();
- uses_dwarf3_features_ = true;
- this->PushUint8(DW_CFA_expression);
- this->PushUleb128(reg.num());
- this->PushUleb128(expr_size);
- this->PushData(expr, expr_size);
+ void ALWAYS_INLINE ValOffset(Reg reg, int offset) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ uses_dwarf3_features_ = true;
+ int factored_offset = FactorDataOffset(offset); // May change sign.
+ if (factored_offset >= 0) {
+ this->PushUint8(DW_CFA_val_offset);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(factored_offset);
+ } else {
+ this->PushUint8(DW_CFA_val_offset_sf);
+ this->PushUleb128(reg.num());
+ this->PushSleb128(factored_offset);
+ }
+ }
}
- void ValExpression(Reg reg, void* expr, int expr_size) {
- ImplicitlyAdvancePC();
- uses_dwarf3_features_ = true;
- this->PushUint8(DW_CFA_val_expression);
- this->PushUleb128(reg.num());
- this->PushUleb128(expr_size);
- this->PushData(expr, expr_size);
+ void ALWAYS_INLINE DefCFAExpression(void * expr, int expr_size) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_def_cfa_expression);
+ this->PushUleb128(expr_size);
+ this->PushData(expr, expr_size);
+ }
}
- int GetCurrentPC() const {
- return current_pc_;
+ void ALWAYS_INLINE Expression(Reg reg, void * expr, int expr_size) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_expression);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(expr_size);
+ this->PushData(expr, expr_size);
+ }
}
- int GetCurrentCFAOffset() const {
- return current_cfa_offset_;
+ void ALWAYS_INLINE ValExpression(Reg reg, void * expr, int expr_size) {
+ if (UNLIKELY(enabled_)) {
+ ImplicitlyAdvancePC();
+ uses_dwarf3_features_ = true;
+ this->PushUint8(DW_CFA_val_expression);
+ this->PushUleb128(reg.num());
+ this->PushUleb128(expr_size);
+ this->PushData(expr, expr_size);
+ }
}
- void SetCurrentCFAOffset(int offset) {
- current_cfa_offset_ = offset;
- }
+ bool IsEnabled() const { return enabled_; }
+
+ void SetEnabled(bool value) { enabled_ = value; }
+
+ int GetCurrentPC() const { return current_pc_; }
+
+ int GetCurrentCFAOffset() const { return current_cfa_offset_; }
+
+ void SetCurrentCFAOffset(int offset) { current_cfa_offset_ = offset; }
using Writer<Allocator>::data;
- DebugFrameOpCodeWriter(const Allocator& alloc = Allocator())
+ DebugFrameOpCodeWriter(bool enabled = true,
+ const Allocator& alloc = Allocator())
: Writer<Allocator>(&opcodes_),
+ enabled_(enabled),
opcodes_(alloc),
current_cfa_offset_(0),
current_pc_(0),
uses_dwarf3_features_(false) {
+ if (enabled) {
+ // Best guess based on couple of observed outputs.
+ opcodes_.reserve(16);
+ }
}
virtual ~DebugFrameOpCodeWriter() { }
@@ -271,6 +317,7 @@ class DebugFrameOpCodeWriter : private Writer<Allocator> {
return offset / kCodeAlignmentFactor;
}
+ bool enabled_; // If disabled all writes are no-ops.
std::vector<uint8_t, Allocator> opcodes_;
int current_cfa_offset_;
int current_pc_;
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 354c71ec12..193cbe281e 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -679,7 +679,7 @@ static void WriteDebugSymbols(const CompilerDriver* compiler_driver,
}
if (!method_info.empty() &&
- compiler_driver->GetCompilerOptions().GetGenerateGDBInformation()) {
+ compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols()) {
ElfRawSectionBuilder<Elf_Word, Elf_Sword, Elf_Shdr> debug_info(".debug_info",
SHT_PROGBITS,
0, nullptr, 0, 1, 0);
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index 49b7cd1aa5..8a14038074 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -28,6 +28,7 @@
#include "compiled_method.h"
#include "dex_file-inl.h"
#include "driver/compiler_driver.h"
+#include "driver/compiler_options.h"
#include "entrypoints/quick/quick_entrypoints.h"
#include "jni_env_ext.h"
#include "mirror/art_method.h"
@@ -93,6 +94,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// Assembler that holds generated instructions
std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set));
+ jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GetIncludeDebugSymbols());
// Offsets into data structures
// TODO: if cross compiling these offsets are for the host not the target
@@ -435,13 +437,18 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
MemoryRegion code(&managed_code[0], managed_code.size());
__ FinalizeInstructions(code);
- return CompiledMethod::SwapAllocCompiledMethodCFI(driver,
- instruction_set,
- ArrayRef<const uint8_t>(managed_code),
- frame_size,
- main_jni_conv->CoreSpillMask(),
- main_jni_conv->FpSpillMask(),
- ArrayRef<const uint8_t>(*jni_asm->cfi().data()));
+ return CompiledMethod::SwapAllocCompiledMethod(driver,
+ instruction_set,
+ ArrayRef<const uint8_t>(managed_code),
+ frame_size,
+ main_jni_conv->CoreSpillMask(),
+ main_jni_conv->FpSpillMask(),
+ nullptr, // src_mapping_table.
+ ArrayRef<const uint8_t>(), // mapping_table.
+ ArrayRef<const uint8_t>(), // vmap_table.
+ ArrayRef<const uint8_t>(), // native_gc_map.
+ ArrayRef<const uint8_t>(*jni_asm->cfi().data()),
+ ArrayRef<const LinkerPatch>());
}
// Copy a single parameter from the managed to the JNI calling convention
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index da28dc7ecb..8736374306 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -82,6 +82,7 @@ void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline)
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
DCHECK_EQ(current_block_index_, 0u);
GenerateFrameEntry();
+ DCHECK_EQ(GetAssembler()->cfi().GetCurrentCFAOffset(), static_cast<int>(frame_size_));
for (size_t e = block_order_->Size(); current_block_index_ < e; ++current_block_index_) {
HBasicBlock* block = block_order_->Get(current_block_index_);
// Don't generate code for an empty block. Its predecessors will branch to its successor
@@ -415,7 +416,16 @@ void CodeGenerator::BuildNativeGCMap(
}
}
-void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, DefaultSrcMap* src_map) const {
+void CodeGenerator::BuildSourceMap(DefaultSrcMap* src_map) const {
+ for (size_t i = 0; i < pc_infos_.Size(); i++) {
+ struct PcInfo pc_info = pc_infos_.Get(i);
+ uint32_t pc2dex_offset = pc_info.native_pc;
+ int32_t pc2dex_dalvik_offset = pc_info.dex_pc;
+ src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
+ }
+}
+
+void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
uint32_t pc2dex_data_size = 0u;
uint32_t pc2dex_entries = pc_infos_.Size();
uint32_t pc2dex_offset = 0u;
@@ -425,19 +435,12 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, DefaultSrcMap*
uint32_t dex2pc_offset = 0u;
int32_t dex2pc_dalvik_offset = 0;
- if (src_map != nullptr) {
- src_map->reserve(pc2dex_entries);
- }
-
for (size_t i = 0; i < pc2dex_entries; i++) {
struct PcInfo pc_info = pc_infos_.Get(i);
pc2dex_data_size += UnsignedLeb128Size(pc_info.native_pc - pc2dex_offset);
pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
pc2dex_offset = pc_info.native_pc;
pc2dex_dalvik_offset = pc_info.dex_pc;
- if (src_map != nullptr) {
- src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
- }
}
// Walk over the blocks and find which ones correspond to catch block entries.
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 07ca6b1ccf..b888aca264 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -205,7 +205,8 @@ class CodeGenerator {
slow_paths_.Add(slow_path);
}
- void BuildMappingTable(std::vector<uint8_t>* vector, DefaultSrcMap* src_map) const;
+ void BuildSourceMap(DefaultSrcMap* src_map) const;
+ void BuildMappingTable(std::vector<uint8_t>* vector) const;
void BuildVMapTable(std::vector<uint8_t>* vector) const;
void BuildNativeGCMap(
std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
@@ -425,6 +426,8 @@ class CodeGenerator {
StackMapStream stack_map_stream_;
+ friend class OptimizingCFITest;
+
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index cfc798a34e..a799a519c0 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -513,6 +513,14 @@ void CodeGeneratorARM::ComputeSpillMask() {
}
}
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::ArmCore(static_cast<int>(reg));
+}
+
+static dwarf::Reg DWARFReg(SRegister reg) {
+ return dwarf::Reg::ArmFp(static_cast<int>(reg));
+}
+
void CodeGeneratorARM::GenerateFrameEntry() {
bool skip_overflow_check =
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kArm);
@@ -531,12 +539,19 @@ void CodeGeneratorARM::GenerateFrameEntry() {
// PC is in the list of callee-save to mimic Quick, but we need to push
// LR at entry instead.
- __ PushList((core_spill_mask_ & (~(1 << PC))) | 1 << LR);
+ uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
+ __ PushList(push_mask);
+ __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
+ __ cfi().RelOffsetForMany(DWARFReg(Register(0)), 0, push_mask, kArmWordSize);
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
+ __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(fpu_spill_mask_));
+ __ cfi().RelOffsetForMany(DWARFReg(SRegister(0)), 0, fpu_spill_mask_, kArmWordSize);
}
- __ AddConstant(SP, -(GetFrameSize() - FrameEntrySpillSize()));
+ int adjust = GetFrameSize() - FrameEntrySpillSize();
+ __ AddConstant(SP, -adjust);
+ __ cfi().AdjustCFAOffset(adjust);
__ StoreToOffset(kStoreWord, R0, SP, 0);
}
@@ -545,10 +560,14 @@ void CodeGeneratorARM::GenerateFrameExit() {
__ bx(LR);
return;
}
- __ AddConstant(SP, GetFrameSize() - FrameEntrySpillSize());
+ int adjust = GetFrameSize() - FrameEntrySpillSize();
+ __ AddConstant(SP, adjust);
+ __ cfi().AdjustCFAOffset(-adjust);
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpops(start_register, POPCOUNT(fpu_spill_mask_));
+ __ cfi().AdjustCFAOffset(-kArmPointerSize * POPCOUNT(fpu_spill_mask_));
+ __ cfi().RestoreMany(DWARFReg(SRegister(0)), fpu_spill_mask_);
}
__ PopList(core_spill_mask_);
}
@@ -1190,7 +1209,10 @@ void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
UNUSED(ret);
+ __ cfi().RememberState();
codegen_->GenerateFrameExit();
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderARM::VisitReturn(HReturn* ret) {
@@ -1201,7 +1223,10 @@ void LocationsBuilderARM::VisitReturn(HReturn* ret) {
void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
UNUSED(ret);
+ __ cfi().RememberState();
codegen_->GenerateFrameExit();
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 439e85ca6c..12eb0c78ea 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -465,20 +465,67 @@ void CodeGeneratorARM64::GenerateFrameEntry() {
// ... : reserved frame space.
// sp[0] : current method.
__ Str(kArtMethodRegister, MemOperand(sp, -frame_size, PreIndex));
- __ PokeCPURegList(GetFramePreservedCoreRegisters(), frame_size - GetCoreSpillSize());
- __ PokeCPURegList(GetFramePreservedFPRegisters(), frame_size - FrameEntrySpillSize());
+ GetAssembler()->cfi().AdjustCFAOffset(frame_size);
+ SpillRegisters(GetFramePreservedCoreRegisters(), frame_size - GetCoreSpillSize());
+ SpillRegisters(GetFramePreservedFPRegisters(), frame_size - FrameEntrySpillSize());
}
}
void CodeGeneratorARM64::GenerateFrameExit() {
if (!HasEmptyFrame()) {
int frame_size = GetFrameSize();
- __ PeekCPURegList(GetFramePreservedFPRegisters(), frame_size - FrameEntrySpillSize());
- __ PeekCPURegList(GetFramePreservedCoreRegisters(), frame_size - GetCoreSpillSize());
+ UnspillRegisters(GetFramePreservedFPRegisters(), frame_size - FrameEntrySpillSize());
+ UnspillRegisters(GetFramePreservedCoreRegisters(), frame_size - GetCoreSpillSize());
__ Drop(frame_size);
+ GetAssembler()->cfi().AdjustCFAOffset(-frame_size);
}
}
+static inline dwarf::Reg DWARFReg(CPURegister reg) {
+ if (reg.IsFPRegister()) {
+ return dwarf::Reg::Arm64Fp(reg.code());
+ } else {
+ DCHECK_LT(reg.code(), 31u); // X0 - X30.
+ return dwarf::Reg::Arm64Core(reg.code());
+ }
+}
+
+void CodeGeneratorARM64::SpillRegisters(vixl::CPURegList registers, int offset) {
+ int size = registers.RegisterSizeInBytes();
+ while (registers.Count() >= 2) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ __ Stp(dst0, dst1, MemOperand(__ StackPointer(), offset));
+ GetAssembler()->cfi().RelOffset(DWARFReg(dst0), offset);
+ GetAssembler()->cfi().RelOffset(DWARFReg(dst1), offset + size);
+ offset += 2 * size;
+ }
+ if (!registers.IsEmpty()) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ __ Str(dst0, MemOperand(__ StackPointer(), offset));
+ GetAssembler()->cfi().RelOffset(DWARFReg(dst0), offset);
+ }
+ DCHECK(registers.IsEmpty());
+}
+
+void CodeGeneratorARM64::UnspillRegisters(vixl::CPURegList registers, int offset) {
+ int size = registers.RegisterSizeInBytes();
+ while (registers.Count() >= 2) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ __ Ldp(dst0, dst1, MemOperand(__ StackPointer(), offset));
+ GetAssembler()->cfi().Restore(DWARFReg(dst0));
+ GetAssembler()->cfi().Restore(DWARFReg(dst1));
+ offset += 2 * size;
+ }
+ if (!registers.IsEmpty()) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ __ Ldr(dst0, MemOperand(__ StackPointer(), offset));
+ GetAssembler()->cfi().Restore(DWARFReg(dst0));
+ }
+ DCHECK(registers.IsEmpty());
+}
+
void CodeGeneratorARM64::Bind(HBasicBlock* block) {
__ Bind(GetLabelOf(block));
}
@@ -2403,8 +2450,11 @@ void LocationsBuilderARM64::VisitReturn(HReturn* instruction) {
void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction) {
UNUSED(instruction);
+ GetAssembler()->cfi().RememberState();
codegen_->GenerateFrameExit();
__ Ret();
+ GetAssembler()->cfi().RestoreState();
+ GetAssembler()->cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
@@ -2413,8 +2463,11 @@ void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
UNUSED(instruction);
+ GetAssembler()->cfi().RememberState();
codegen_->GenerateFrameExit();
__ Ret();
+ GetAssembler()->cfi().RestoreState();
+ GetAssembler()->cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderARM64::VisitShl(HShl* shl) {
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index 7edb129880..9430e31037 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -227,6 +227,8 @@ class CodeGeneratorARM64 : public CodeGenerator {
void GenerateFrameEntry() OVERRIDE;
void GenerateFrameExit() OVERRIDE;
+ void SpillRegisters(vixl::CPURegList registers, int offset);
+ void UnspillRegisters(vixl::CPURegList registers, int offset);
vixl::CPURegList GetFramePreservedCoreRegisters() const {
return vixl::CPURegList(vixl::CPURegister::kRegister, vixl::kXRegSize,
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 92b62e2c84..8fbc64ea5e 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -459,7 +459,12 @@ InstructionCodeGeneratorX86::InstructionCodeGeneratorX86(HGraph* graph, CodeGene
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::X86Core(static_cast<int>(reg));
+}
+
void CodeGeneratorX86::GenerateFrameEntry() {
+ __ cfi().SetCurrentCFAOffset(kX86WordSize); // return address
__ Bind(&frame_entry_label_);
bool skip_overflow_check =
IsLeafMethod() && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86);
@@ -478,10 +483,14 @@ void CodeGeneratorX86::GenerateFrameEntry() {
Register reg = kCoreCalleeSaves[i];
if (allocated_registers_.ContainsCoreRegister(reg)) {
__ pushl(reg);
+ __ cfi().AdjustCFAOffset(kX86WordSize);
+ __ cfi().RelOffset(DWARFReg(reg), 0);
}
}
- __ subl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
+ int adjust = GetFrameSize() - FrameEntrySpillSize();
+ __ subl(ESP, Immediate(adjust));
+ __ cfi().AdjustCFAOffset(adjust);
__ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
}
@@ -490,12 +499,16 @@ void CodeGeneratorX86::GenerateFrameExit() {
return;
}
- __ addl(ESP, Immediate(GetFrameSize() - FrameEntrySpillSize()));
+ int adjust = GetFrameSize() - FrameEntrySpillSize();
+ __ addl(ESP, Immediate(adjust));
+ __ cfi().AdjustCFAOffset(-adjust);
for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
Register reg = kCoreCalleeSaves[i];
if (allocated_registers_.ContainsCoreRegister(reg)) {
__ popl(reg);
+ __ cfi().AdjustCFAOffset(-static_cast<int>(kX86WordSize));
+ __ cfi().Restore(DWARFReg(reg));
}
}
}
@@ -1102,8 +1115,11 @@ void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
UNUSED(ret);
+ __ cfi().RememberState();
codegen_->GenerateFrameExit();
__ ret();
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderX86::VisitReturn(HReturn* ret) {
@@ -1161,8 +1177,11 @@ void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType();
}
}
+ __ cfi().RememberState();
codegen_->GenerateFrameExit();
__ ret();
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index cdbc7780a8..b5e038fb05 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -481,7 +481,15 @@ void CodeGeneratorX86_64::SetupBlockedRegisters(bool is_baseline) const {
}
}
+static dwarf::Reg DWARFReg(Register reg) {
+ return dwarf::Reg::X86_64Core(static_cast<int>(reg));
+}
+static dwarf::Reg DWARFReg(FloatRegister reg) {
+ return dwarf::Reg::X86_64Fp(static_cast<int>(reg));
+}
+
void CodeGeneratorX86_64::GenerateFrameEntry() {
+ __ cfi().SetCurrentCFAOffset(kX86_64WordSize); // return address
__ Bind(&frame_entry_label_);
bool skip_overflow_check = IsLeafMethod()
&& !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
@@ -501,17 +509,22 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
Register reg = kCoreCalleeSaves[i];
if (allocated_registers_.ContainsCoreRegister(reg)) {
__ pushq(CpuRegister(reg));
+ __ cfi().AdjustCFAOffset(kX86_64WordSize);
+ __ cfi().RelOffset(DWARFReg(reg), 0);
}
}
- __ subq(CpuRegister(RSP), Immediate(GetFrameSize() - GetCoreSpillSize()));
+ int adjust = GetFrameSize() - GetCoreSpillSize();
+ __ subq(CpuRegister(RSP), Immediate(adjust));
+ __ cfi().AdjustCFAOffset(adjust);
uint32_t xmm_spill_location = GetFpuSpillStart();
size_t xmm_spill_slot_size = GetFloatingPointSpillSlotSize();
for (int i = arraysize(kFpuCalleeSaves) - 1; i >= 0; --i) {
if (allocated_registers_.ContainsFloatingPointRegister(kFpuCalleeSaves[i])) {
- __ movsd(Address(CpuRegister(RSP), xmm_spill_location + (xmm_spill_slot_size * i)),
- XmmRegister(kFpuCalleeSaves[i]));
+ int offset = xmm_spill_location + (xmm_spill_slot_size * i);
+ __ movsd(Address(CpuRegister(RSP), offset), XmmRegister(kFpuCalleeSaves[i]));
+ __ cfi().RelOffset(DWARFReg(kFpuCalleeSaves[i]), offset);
}
}
@@ -526,17 +539,22 @@ void CodeGeneratorX86_64::GenerateFrameExit() {
size_t xmm_spill_slot_size = GetFloatingPointSpillSlotSize();
for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
if (allocated_registers_.ContainsFloatingPointRegister(kFpuCalleeSaves[i])) {
- __ movsd(XmmRegister(kFpuCalleeSaves[i]),
- Address(CpuRegister(RSP), xmm_spill_location + (xmm_spill_slot_size * i)));
+ int offset = xmm_spill_location + (xmm_spill_slot_size * i);
+ __ movsd(XmmRegister(kFpuCalleeSaves[i]), Address(CpuRegister(RSP), offset));
+ __ cfi().Restore(DWARFReg(kFpuCalleeSaves[i]));
}
}
- __ addq(CpuRegister(RSP), Immediate(GetFrameSize() - GetCoreSpillSize()));
+ int adjust = GetFrameSize() - GetCoreSpillSize();
+ __ addq(CpuRegister(RSP), Immediate(adjust));
+ __ cfi().AdjustCFAOffset(-adjust);
for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
Register reg = kCoreCalleeSaves[i];
if (allocated_registers_.ContainsCoreRegister(reg)) {
__ popq(CpuRegister(reg));
+ __ cfi().AdjustCFAOffset(-static_cast<int>(kX86_64WordSize));
+ __ cfi().Restore(DWARFReg(reg));
}
}
}
@@ -1123,8 +1141,11 @@ void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
UNUSED(ret);
+ __ cfi().RememberState();
codegen_->GenerateFrameExit();
__ ret();
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
void LocationsBuilderX86_64::VisitReturn(HReturn* ret) {
@@ -1175,8 +1196,11 @@ void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) {
LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType();
}
}
+ __ cfi().RememberState();
codegen_->GenerateFrameExit();
__ ret();
+ __ cfi().RestoreState();
+ __ cfi().DefCFAOffset(codegen_->GetFrameSize());
}
Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type) {
diff --git a/compiler/optimizing/optimizing_cfi_test.cc b/compiler/optimizing/optimizing_cfi_test.cc
new file mode 100644
index 0000000000..8602255cc5
--- /dev/null
+++ b/compiler/optimizing/optimizing_cfi_test.cc
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <memory>
+#include <vector>
+
+#include "arch/instruction_set.h"
+#include "cfi_test.h"
+#include "gtest/gtest.h"
+#include "optimizing/code_generator.h"
+#include "utils/assembler.h"
+
+#include "optimizing/optimizing_cfi_test_expected.inc"
+
+namespace art {
+
+// Run the tests only on host.
+#ifndef HAVE_ANDROID_OS
+
+class OptimizingCFITest : public CFITest {
+ public:
+ // Enable this flag to generate the expected outputs.
+ static constexpr bool kGenerateExpected = false;
+
+ void TestImpl(InstructionSet isa, const char* isa_str,
+ const std::vector<uint8_t>& expected_asm,
+ const std::vector<uint8_t>& expected_cfi) {
+ // Setup simple context.
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ CompilerOptions opts;
+ std::unique_ptr<const InstructionSetFeatures> isa_features;
+ std::string error;
+ isa_features.reset(InstructionSetFeatures::FromVariant(isa, "default", &error));
+ HGraph graph(&allocator);
+ // Generate simple frame with some spills.
+ auto code_gen = CodeGenerator::Create(&graph, isa, *isa_features.get(), opts);
+ const int frame_size = 64;
+ int core_reg = 0;
+ int fp_reg = 0;
+ for (int i = 0; i < 2; i++) { // Two registers of each kind.
+ for (; core_reg < 32; core_reg++) {
+ if (code_gen->IsCoreCalleeSaveRegister(core_reg)) {
+ auto location = Location::RegisterLocation(core_reg);
+ code_gen->AddAllocatedRegister(location);
+ core_reg++;
+ break;
+ }
+ }
+ for (; fp_reg < 32; fp_reg++) {
+ if (code_gen->IsFloatingPointCalleeSaveRegister(fp_reg)) {
+ auto location = Location::FpuRegisterLocation(fp_reg);
+ code_gen->AddAllocatedRegister(location);
+ fp_reg++;
+ break;
+ }
+ }
+ }
+ code_gen->ComputeSpillMask();
+ code_gen->SetFrameSize(frame_size);
+ code_gen->GenerateFrameEntry();
+ code_gen->GetInstructionVisitor()->VisitReturnVoid(new (&allocator) HReturnVoid());
+ // Get the outputs.
+ Assembler* opt_asm = code_gen->GetAssembler();
+ std::vector<uint8_t> actual_asm(opt_asm->CodeSize());
+ MemoryRegion code(&actual_asm[0], actual_asm.size());
+ opt_asm->FinalizeInstructions(code);
+ const std::vector<uint8_t>& actual_cfi = *(opt_asm->cfi().data());
+
+ if (kGenerateExpected) {
+ GenerateExpected(stdout, isa, isa_str, actual_asm, actual_cfi);
+ } else {
+ EXPECT_EQ(expected_asm, actual_asm);
+ EXPECT_EQ(expected_cfi, actual_cfi);
+ }
+ }
+};
+
+#define TEST_ISA(isa) \
+ TEST_F(OptimizingCFITest, isa) { \
+ std::vector<uint8_t> expected_asm(expected_asm_##isa, \
+ expected_asm_##isa + arraysize(expected_asm_##isa)); \
+ std::vector<uint8_t> expected_cfi(expected_cfi_##isa, \
+ expected_cfi_##isa + arraysize(expected_cfi_##isa)); \
+ TestImpl(isa, #isa, expected_asm, expected_cfi); \
+ }
+
+TEST_ISA(kThumb2)
+TEST_ISA(kArm64)
+TEST_ISA(kX86)
+TEST_ISA(kX86_64)
+
+#endif // HAVE_ANDROID_OS
+
+} // namespace art
diff --git a/compiler/optimizing/optimizing_cfi_test_expected.inc b/compiler/optimizing/optimizing_cfi_test_expected.inc
new file mode 100644
index 0000000000..2125f6eb01
--- /dev/null
+++ b/compiler/optimizing/optimizing_cfi_test_expected.inc
@@ -0,0 +1,141 @@
+static constexpr uint8_t expected_asm_kThumb2[] = {
+ 0x60, 0xB5, 0x2D, 0xED, 0x02, 0x8A, 0x8B, 0xB0, 0x00, 0x90, 0x0B, 0xB0,
+ 0xBD, 0xEC, 0x02, 0x8A, 0x60, 0xBD,
+};
+static constexpr uint8_t expected_cfi_kThumb2[] = {
+ 0x42, 0x0E, 0x0C, 0x85, 0x03, 0x86, 0x02, 0x8E, 0x01, 0x44, 0x0E, 0x14,
+ 0x05, 0x50, 0x05, 0x05, 0x51, 0x04, 0x42, 0x0E, 0x40, 0x42, 0x0A, 0x42,
+ 0x0E, 0x14, 0x44, 0x0E, 0x0C, 0x06, 0x50, 0x06, 0x51, 0x42, 0x0B, 0x0E,
+ 0x40,
+};
+// 0x00000000: push {r5, r6, lr}
+// 0x00000002: .cfi_def_cfa_offset: 12
+// 0x00000002: .cfi_offset: r5 at cfa-12
+// 0x00000002: .cfi_offset: r6 at cfa-8
+// 0x00000002: .cfi_offset: r14 at cfa-4
+// 0x00000002: vpush.f32 {s16-s17}
+// 0x00000006: .cfi_def_cfa_offset: 20
+// 0x00000006: .cfi_offset_extended: r80 at cfa-20
+// 0x00000006: .cfi_offset_extended: r81 at cfa-16
+// 0x00000006: sub sp, sp, #44
+// 0x00000008: .cfi_def_cfa_offset: 64
+// 0x00000008: str r0, [sp, #0]
+// 0x0000000a: .cfi_remember_state
+// 0x0000000a: add sp, sp, #44
+// 0x0000000c: .cfi_def_cfa_offset: 20
+// 0x0000000c: vpop.f32 {s16-s17}
+// 0x00000010: .cfi_def_cfa_offset: 12
+// 0x00000010: .cfi_restore_extended: r80
+// 0x00000010: .cfi_restore_extended: r81
+// 0x00000010: pop {r5, r6, pc}
+// 0x00000012: .cfi_restore_state
+// 0x00000012: .cfi_def_cfa_offset: 64
+
+static constexpr uint8_t expected_asm_kArm64[] = {
+ 0xE0, 0x0F, 0x1C, 0xB8, 0xF3, 0xD3, 0x02, 0xA9, 0xFE, 0x1F, 0x00, 0xF9,
+ 0xE8, 0xA7, 0x01, 0x6D, 0xE8, 0xA7, 0x41, 0x6D, 0xF3, 0xD3, 0x42, 0xA9,
+ 0xFE, 0x1F, 0x40, 0xF9, 0xFF, 0x03, 0x01, 0x91, 0xC0, 0x03, 0x5F, 0xD6,
+};
+static constexpr uint8_t expected_cfi_kArm64[] = {
+ 0x44, 0x0E, 0x40, 0x44, 0x93, 0x06, 0x94, 0x04, 0x44, 0x9E, 0x02, 0x44,
+ 0x05, 0x48, 0x0A, 0x05, 0x49, 0x08, 0x0A, 0x44, 0x06, 0x48, 0x06, 0x49,
+ 0x44, 0xD3, 0xD4, 0x44, 0xDE, 0x44, 0x0E, 0x00, 0x44, 0x0B, 0x0E, 0x40,
+};
+// 0x00000000: str w0, [sp, #-64]!
+// 0x00000004: .cfi_def_cfa_offset: 64
+// 0x00000004: stp x19, x20, [sp, #40]
+// 0x00000008: .cfi_offset: r19 at cfa-24
+// 0x00000008: .cfi_offset: r20 at cfa-16
+// 0x00000008: str lr, [sp, #56]
+// 0x0000000c: .cfi_offset: r30 at cfa-8
+// 0x0000000c: stp d8, d9, [sp, #24]
+// 0x00000010: .cfi_offset_extended: r72 at cfa-40
+// 0x00000010: .cfi_offset_extended: r73 at cfa-32
+// 0x00000010: .cfi_remember_state
+// 0x00000010: ldp d8, d9, [sp, #24]
+// 0x00000014: .cfi_restore_extended: r72
+// 0x00000014: .cfi_restore_extended: r73
+// 0x00000014: ldp x19, x20, [sp, #40]
+// 0x00000018: .cfi_restore: r19
+// 0x00000018: .cfi_restore: r20
+// 0x00000018: ldr lr, [sp, #56]
+// 0x0000001c: .cfi_restore: r30
+// 0x0000001c: add sp, sp, #0x40 (64)
+// 0x00000020: .cfi_def_cfa_offset: 0
+// 0x00000020: ret
+// 0x00000024: .cfi_restore_state
+// 0x00000024: .cfi_def_cfa_offset: 64
+
+static constexpr uint8_t expected_asm_kX86[] = {
+ 0x56, 0x55, 0x83, 0xEC, 0x34, 0x89, 0x04, 0x24, 0x83, 0xC4, 0x34, 0x5D,
+ 0x5E, 0xC3,
+};
+static constexpr uint8_t expected_cfi_kX86[] = {
+ 0x41, 0x0E, 0x08, 0x86, 0x02, 0x41, 0x0E, 0x0C, 0x85, 0x03, 0x43, 0x0E,
+ 0x40, 0x43, 0x0A, 0x43, 0x0E, 0x0C, 0x41, 0x0E, 0x08, 0xC5, 0x41, 0x0E,
+ 0x04, 0xC6, 0x41, 0x0B, 0x0E, 0x40,
+};
+// 0x00000000: push esi
+// 0x00000001: .cfi_def_cfa_offset: 8
+// 0x00000001: .cfi_offset: r6 at cfa-8
+// 0x00000001: push ebp
+// 0x00000002: .cfi_def_cfa_offset: 12
+// 0x00000002: .cfi_offset: r5 at cfa-12
+// 0x00000002: sub esp, 52
+// 0x00000005: .cfi_def_cfa_offset: 64
+// 0x00000005: mov [esp], eax
+// 0x00000008: .cfi_remember_state
+// 0x00000008: add esp, 52
+// 0x0000000b: .cfi_def_cfa_offset: 12
+// 0x0000000b: pop ebp
+// 0x0000000c: .cfi_def_cfa_offset: 8
+// 0x0000000c: .cfi_restore: r5
+// 0x0000000c: pop esi
+// 0x0000000d: .cfi_def_cfa_offset: 4
+// 0x0000000d: .cfi_restore: r6
+// 0x0000000d: ret
+// 0x0000000e: .cfi_restore_state
+// 0x0000000e: .cfi_def_cfa_offset: 64
+
+static constexpr uint8_t expected_asm_kX86_64[] = {
+ 0x55, 0x53, 0x48, 0x83, 0xEC, 0x28, 0xF2, 0x44, 0x0F, 0x11, 0x6C, 0x24,
+ 0x20, 0xF2, 0x44, 0x0F, 0x11, 0x64, 0x24, 0x18, 0x89, 0x3C, 0x24, 0xF2,
+ 0x44, 0x0F, 0x10, 0x64, 0x24, 0x18, 0xF2, 0x44, 0x0F, 0x10, 0x6C, 0x24,
+ 0x20, 0x48, 0x83, 0xC4, 0x28, 0x5B, 0x5D, 0xC3,
+};
+static constexpr uint8_t expected_cfi_kX86_64[] = {
+ 0x41, 0x0E, 0x10, 0x86, 0x04, 0x41, 0x0E, 0x18, 0x83, 0x06, 0x44, 0x0E,
+ 0x40, 0x47, 0x9E, 0x08, 0x47, 0x9D, 0x0A, 0x43, 0x0A, 0x47, 0xDD, 0x47,
+ 0xDE, 0x44, 0x0E, 0x18, 0x41, 0x0E, 0x10, 0xC3, 0x41, 0x0E, 0x08, 0xC6,
+ 0x41, 0x0B, 0x0E, 0x40,
+};
+// 0x00000000: push rbp
+// 0x00000001: .cfi_def_cfa_offset: 16
+// 0x00000001: .cfi_offset: r6 at cfa-16
+// 0x00000001: push rbx
+// 0x00000002: .cfi_def_cfa_offset: 24
+// 0x00000002: .cfi_offset: r3 at cfa-24
+// 0x00000002: subq rsp, 40
+// 0x00000006: .cfi_def_cfa_offset: 64
+// 0x00000006: movsd [rsp + 32], xmm13
+// 0x0000000d: .cfi_offset: r30 at cfa-32
+// 0x0000000d: movsd [rsp + 24], xmm12
+// 0x00000014: .cfi_offset: r29 at cfa-40
+// 0x00000014: mov [rsp], edi
+// 0x00000017: .cfi_remember_state
+// 0x00000017: movsd xmm12, [rsp + 24]
+// 0x0000001e: .cfi_restore: r29
+// 0x0000001e: movsd xmm13, [rsp + 32]
+// 0x00000025: .cfi_restore: r30
+// 0x00000025: addq rsp, 40
+// 0x00000029: .cfi_def_cfa_offset: 24
+// 0x00000029: pop rbx
+// 0x0000002a: .cfi_def_cfa_offset: 16
+// 0x0000002a: .cfi_restore: r3
+// 0x0000002a: pop rbp
+// 0x0000002b: .cfi_def_cfa_offset: 8
+// 0x0000002b: .cfi_restore: r6
+// 0x0000002b: ret
+// 0x0000002c: .cfi_restore_state
+// 0x0000002c: .cfi_def_cfa_offset: 64
+
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 12798edac5..9cb0004aa5 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -50,6 +50,7 @@
#include "ssa_builder.h"
#include "ssa_phi_elimination.h"
#include "ssa_liveness_analysis.h"
+#include "utils/assembler.h"
#include "reference_type_propagation.h"
namespace art {
@@ -395,12 +396,17 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
CodeVectorAllocator allocator;
codegen->CompileOptimized(&allocator);
+ DefaultSrcMap src_mapping_table;
+ if (compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols()) {
+ codegen->BuildSourceMap(&src_mapping_table);
+ }
+
std::vector<uint8_t> stack_map;
codegen->BuildStackMaps(&stack_map);
compilation_stats_.RecordStat(MethodCompilationStat::kCompiledOptimized);
- return CompiledMethod::SwapAllocCompiledMethodStackMap(
+ return CompiledMethod::SwapAllocCompiledMethod(
compiler_driver,
codegen->GetInstructionSet(),
ArrayRef<const uint8_t>(allocator.GetMemory()),
@@ -410,7 +416,12 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
codegen->GetCoreSpillMask(),
codegen->GetFpuSpillMask(),
- ArrayRef<const uint8_t>(stack_map));
+ &src_mapping_table,
+ ArrayRef<const uint8_t>(), // mapping_table.
+ ArrayRef<const uint8_t>(stack_map),
+ ArrayRef<const uint8_t>(), // native_gc_map.
+ ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
+ ArrayRef<const LinkerPatch>());
}
@@ -422,9 +433,11 @@ CompiledMethod* OptimizingCompiler::CompileBaseline(
codegen->CompileBaseline(&allocator);
std::vector<uint8_t> mapping_table;
+ codegen->BuildMappingTable(&mapping_table);
DefaultSrcMap src_mapping_table;
- bool include_debug_symbol = compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols();
- codegen->BuildMappingTable(&mapping_table, include_debug_symbol ? &src_mapping_table : nullptr);
+ if (compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols()) {
+ codegen->BuildSourceMap(&src_mapping_table);
+ }
std::vector<uint8_t> vmap_table;
codegen->BuildVMapTable(&vmap_table);
std::vector<uint8_t> gc_map;
@@ -445,7 +458,8 @@ CompiledMethod* OptimizingCompiler::CompileBaseline(
AlignVectorSize(mapping_table),
AlignVectorSize(vmap_table),
AlignVectorSize(gc_map),
- ArrayRef<const uint8_t>());
+ ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
+ ArrayRef<const LinkerPatch>());
}
CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
@@ -511,6 +525,8 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
compilation_stats_.RecordStat(MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
+ codegen->GetAssembler()->cfi().SetEnabled(
+ compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols());
PassInfoPrinter pass_info_printer(graph,
method_name.c_str(),