summaryrefslogtreecommitdiffstats
path: root/compiler
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-10-31 00:33:20 -0700
committerIan Rogers <irogers@google.com>2014-11-03 20:01:04 -0800
commit6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f (patch)
tree9df58b57af13240a93a6da4eefcf03f70cce9ad9 /compiler
parentc6e0955737e15f7c0c3575d4e13789b3411f4993 (diff)
downloadandroid_art-6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f.tar.gz
android_art-6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f.tar.bz2
android_art-6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f.zip
Remove -Wno-unused-parameter and -Wno-sign-promo from base cflags.
Fix associated errors about unused paramenters and implict sign conversions. For sign conversion this was largely in the area of enums, so add ostream operators for the effected enums and fix tools/generate-operator-out.py. Tidy arena allocation code and arena allocated data types, rather than fixing new and delete operators. Remove dead code. Change-Id: I5b433e722d2f75baacfacae4d32aef4a828bfe1b
Diffstat (limited to 'compiler')
-rw-r--r--compiler/Android.mk25
-rw-r--r--compiler/common_compiler_test.cc3
-rw-r--r--compiler/compiler.cc48
-rw-r--r--compiler/compiler.h1
-rw-r--r--compiler/dex/backend.h7
-rw-r--r--compiler/dex/compiler_enums.h5
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc9
-rw-r--r--compiler/dex/global_value_numbering.h39
-rw-r--r--compiler/dex/local_value_numbering.cc20
-rw-r--r--compiler/dex/local_value_numbering.h13
-rw-r--r--compiler/dex/mir_dataflow.cc2
-rw-r--r--compiler/dex/mir_graph.cc57
-rw-r--r--compiler/dex/mir_graph.h30
-rw-r--r--compiler/dex/mir_optimization.cc13
-rw-r--r--compiler/dex/pass.h17
-rw-r--r--compiler/dex/pass_me.h32
-rw-r--r--compiler/dex/portable/mir_to_gbc.h2
-rw-r--r--compiler/dex/quick/arm/arm_lir.h4
-rw-r--r--compiler/dex/quick/arm/call_arm.cc4
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h6
-rw-r--r--compiler/dex/quick/arm/int_arm.cc207
-rw-r--r--compiler/dex/quick/arm/target_arm.cc9
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc13
-rw-r--r--compiler/dex/quick/arm64/arm64_lir.h8
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc4
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h6
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc46
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc9
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc25
-rw-r--r--compiler/dex/quick/codegen_util.cc89
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc22
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h8
-rw-r--r--compiler/dex/quick/gen_common.cc8
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc51
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc4
-rw-r--r--compiler/dex/quick/mips/call_mips.cc4
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h6
-rw-r--r--compiler/dex/quick/mips/fp_mips.cc7
-rw-r--r--compiler/dex/quick/mips/int_mips.cc52
-rw-r--r--compiler/dex/quick/mips/mips_lir.h16
-rw-r--r--compiler/dex/quick/mips/target_mips.cc10
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc21
-rw-r--r--compiler/dex/quick/mir_to_lir.cc11
-rw-r--r--compiler/dex/quick/mir_to_lir.h27
-rw-r--r--compiler/dex/quick/quick_compiler.cc1
-rw-r--r--compiler/dex/quick/ralloc_util.cc9
-rw-r--r--compiler/dex/quick/resource_mask.h12
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc6
-rw-r--r--compiler/dex/quick/x86/call_x86.cc3
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h77
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc6
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc83
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc140
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc38
-rw-r--r--compiler/dex/quick/x86/x86_lir.h20
-rw-r--r--compiler/dex/reg_storage.h8
-rw-r--r--compiler/dex/verification_results.cc2
-rw-r--r--compiler/driver/compiler_driver.h1
-rw-r--r--compiler/driver/compiler_options.h2
-rw-r--r--compiler/elf_builder.h2
-rw-r--r--compiler/elf_writer_quick.cc6
-rw-r--r--compiler/image_writer.cc5
-rw-r--r--compiler/jni/jni_compiler_test.cc21
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc1
-rw-r--r--compiler/llvm/llvm_compiler.cc6
-rw-r--r--compiler/oat_writer.cc19
-rw-r--r--compiler/optimizing/code_generator.h14
-rw-r--r--compiler/optimizing/code_generator_arm.cc13
-rw-r--r--compiler/optimizing/code_generator_arm64.cc10
-rw-r--r--compiler/optimizing/code_generator_arm64.h4
-rw-r--r--compiler/optimizing/code_generator_x86.cc12
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc12
-rw-r--r--compiler/optimizing/gvn.h4
-rw-r--r--compiler/optimizing/locations.h4
-rw-r--r--compiler/optimizing/nodes.cc12
-rw-r--r--compiler/optimizing/nodes.h72
-rw-r--r--compiler/optimizing/optimizing_compiler.cc1
-rw-r--r--compiler/optimizing/parallel_move_test.cc4
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h14
-rw-r--r--compiler/output_stream.cc31
-rw-r--r--compiler/output_stream.h5
-rw-r--r--compiler/utils/arena_allocator.h2
-rw-r--r--compiler/utils/arena_bit_vector.cc9
-rw-r--r--compiler/utils/arena_bit_vector.h18
-rw-r--r--compiler/utils/arena_containers.h4
-rw-r--r--compiler/utils/arena_object.h29
-rw-r--r--compiler/utils/arm/assembler_arm.h38
-rw-r--r--compiler/utils/arm/assembler_arm32.cc5
-rw-r--r--compiler/utils/arm/assembler_arm32.h5
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc43
-rw-r--r--compiler/utils/arm/assembler_thumb2.h59
-rw-r--r--compiler/utils/arm/constants_arm.h83
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc2
-rw-r--r--compiler/utils/array_ref.h12
-rw-r--r--compiler/utils/assembler.cc66
-rw-r--r--compiler/utils/assembler.h2
-rw-r--r--compiler/utils/growable_array.h22
-rw-r--r--compiler/utils/scoped_arena_containers.h3
-rw-r--r--compiler/utils/stack_checks.h1
99 files changed, 1129 insertions, 954 deletions
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 2d38629db1..610f453816 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -58,22 +58,22 @@ LIBART_COMPILER_SRC_FILES := \
dex/quick/x86/target_x86.cc \
dex/quick/x86/utility_x86.cc \
dex/dex_to_dex_compiler.cc \
+ dex/bb_optimizations.cc \
+ dex/compiler_ir.cc \
+ dex/frontend.cc \
+ dex/mir_analysis.cc \
dex/mir_dataflow.cc \
dex/mir_field_info.cc \
+ dex/mir_graph.cc \
dex/mir_method_info.cc \
dex/mir_optimization.cc \
- dex/bb_optimizations.cc \
- dex/compiler_ir.cc \
dex/post_opt_passes.cc \
dex/pass_driver_me_opts.cc \
dex/pass_driver_me_post_opt.cc \
- dex/frontend.cc \
- dex/mir_graph.cc \
- dex/mir_analysis.cc \
+ dex/ssa_transformation.cc \
dex/verified_method.cc \
dex/verification_results.cc \
dex/vreg_analysis.cc \
- dex/ssa_transformation.cc \
dex/quick_compiler_callbacks.cc \
driver/compiler_driver.cc \
driver/dex_compilation_unit.cc \
@@ -133,6 +133,7 @@ LIBART_COMPILER_SRC_FILES := \
file_output_stream.cc \
image_writer.cc \
oat_writer.cc \
+ output_stream.cc \
vector_output_stream.cc
ifeq ($(ART_SEA_IR_MODE),true)
@@ -168,7 +169,17 @@ LIBART_COMPILER_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
- dex/compiler_enums.h
+ dex/quick/arm/arm_lir.h \
+ dex/quick/arm64/arm64_lir.h \
+ dex/quick/mips/mips_lir.h \
+ dex/quick/resource_mask.h \
+ dex/compiler_enums.h \
+ dex/global_value_numbering.h \
+ dex/pass_me.h \
+ driver/compiler_driver.h \
+ driver/compiler_options.h \
+ optimizing/locations.h \
+ utils/arm/constants_arm.h
# $(1): target or host
# $(2): ndebug or debug
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 7e19e15961..bfdb537427 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -144,8 +144,7 @@ void CommonCompilerTest::SetUp() {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(
- runtime_->CreateCalleeSaveMethod(type), type);
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index 744bafa0fe..b9fcf5bab6 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -25,13 +25,24 @@
namespace art {
#ifdef ART_SEA_IR_MODE
-extern "C" art::CompiledMethod* SeaIrCompileMethod(const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
+constexpr bool kCanUseSeaIR = true;
+#else
+constexpr bool kCanUseSeaIR = false;
+#endif
+
+extern "C" art::CompiledMethod* SeaIrCompileMethod(const art::DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t access_flags ATTRIBUTE_UNUSED,
+ art::InvokeType invoke_type ATTRIBUTE_UNUSED,
+ uint16_t class_def_idx ATTRIBUTE_UNUSED,
+ uint32_t method_idx ATTRIBUTE_UNUSED,
+ jobject class_loader ATTRIBUTE_UNUSED,
+ const art::DexFile& dex_file ATTRIBUTE_UNUSED)
+#ifdef ART_SEA_IR_MODE
+; // NOLINT(whitespace/semicolon)
+#else
+{
+ UNREACHABLE();
+}
#endif
@@ -42,19 +53,18 @@ CompiledMethod* Compiler::TryCompileWithSeaIR(const art::DexFile::CodeItem* code
uint32_t method_idx,
jobject class_loader,
const art::DexFile& dex_file) {
-#ifdef ART_SEA_IR_MODE
- bool use_sea = (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
- if (use_sea) {
- LOG(INFO) << "Using SEA IR to compile..." << std::endl;
- return SeaIrCompileMethod(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
+ bool use_sea = kCanUseSeaIR &&
+ (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
+ if (use_sea) {
+ LOG(INFO) << "Using SEA IR to compile..." << std::endl;
+ return SeaIrCompileMethod(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
}
-#endif
return nullptr;
}
diff --git a/compiler/compiler.h b/compiler/compiler.h
index b92eda7942..c2c15ff9cf 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -115,6 +115,7 @@ class Compiler {
*/
virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver)
const {
+ UNUSED(driver);
return nullptr;
}
diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h
index cab3427e38..9cad9338fc 100644
--- a/compiler/dex/backend.h
+++ b/compiler/dex/backend.h
@@ -38,7 +38,7 @@ class Backend {
/*
* Return the number of reservable vector registers supported
- * @param long_or_fp ‘true’ if floating point computations will be
+ * @param long_or_fp, true if floating point computations will be
* executed or the operations will be long type while vector
* registers are reserved.
* @return the number of vector registers that are available
@@ -46,7 +46,10 @@ class Backend {
* are held back to generate scalar code without exhausting vector
* registers, if scalar code also uses the vector registers.
*/
- virtual int NumReservableVectorRegisters(bool long_or_fp) { return 0; }
+ virtual int NumReservableVectorRegisters(bool long_or_fp) {
+ UNUSED(long_or_fp);
+ return 0;
+ }
protected:
explicit Backend(ArenaAllocator* arena) : arena_(arena) {}
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 0b769991b3..1297ba9c7f 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -28,6 +28,7 @@ enum RegisterClass {
kRefReg,
kAnyReg,
};
+std::ostream& operator<<(std::ostream& os, const RegisterClass& rhs);
enum BitsUsed {
kSize32Bits,
@@ -82,6 +83,7 @@ enum RegLocationType {
kLocCompilerTemp,
kLocInvalid
};
+std::ostream& operator<<(std::ostream& os, const RegLocationType& rhs);
enum BBType {
kNullBlock,
@@ -91,6 +93,7 @@ enum BBType {
kExceptionHandling,
kDead,
};
+std::ostream& operator<<(std::ostream& os, const BBType& code);
// Shared pseudo opcodes - must be < 0.
enum LIRPseudoOpcode {
@@ -111,6 +114,7 @@ enum LIRPseudoOpcode {
kPseudoEHBlockLabel = -2,
kPseudoNormalBlockLabel = -1,
};
+std::ostream& operator<<(std::ostream& os, const LIRPseudoOpcode& rhs);
enum ExtendedMIROpcode {
kMirOpFirst = kNumPackedOpcodes,
@@ -334,6 +338,7 @@ enum BlockListType {
kPackedSwitch,
kSparseSwitch,
};
+std::ostream& operator<<(std::ostream& os, const BlockListType& rhs);
enum AssemblerStatus {
kSuccess,
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index f9a05c2eba..205a5218f2 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -282,10 +282,11 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst,
} // namespace art
extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::CodeItem* code_item,
- uint32_t access_flags, art::InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
- const art::DexFile& dex_file,
- art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ uint32_t access_flags, art::InvokeType invoke_type,
+ uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
+ const art::DexFile& dex_file,
+ art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ UNUSED(invoke_type);
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index a4a7602c4b..72d111244d 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -19,14 +19,14 @@
#include "base/macros.h"
#include "compiler_internals.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
namespace art {
class LocalValueNumbering;
class MirFieldInfo;
-class GlobalValueNumbering {
+class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
public:
enum Mode {
kModeGvn,
@@ -55,33 +55,17 @@ class GlobalValueNumbering {
}
// Allow modifications.
- void StartPostProcessing() {
- DCHECK(Good());
- DCHECK_EQ(mode_, kModeGvn);
- mode_ = kModeGvnPostProcessing;
- }
+ void StartPostProcessing();
bool CanModify() const {
return modifications_allowed_ && Good();
}
- // GlobalValueNumbering should be allocated on the ArenaStack (or the native stack).
- static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMisc);
- }
-
- // Allow delete-expression to destroy a GlobalValueNumbering object without deallocation.
- static void operator delete(void* ptr) { UNUSED(ptr); }
-
private:
static constexpr uint16_t kNoValue = 0xffffu;
// Allocate a new value name.
- uint16_t NewValueName() {
- DCHECK_NE(mode_, kModeGvnPostProcessing);
- ++last_value_;
- return last_value_;
- }
+ uint16_t NewValueName();
// Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
@@ -228,7 +212,7 @@ class GlobalValueNumbering {
}
CompilationUnit* const cu_;
- MIRGraph* mir_graph_;
+ MIRGraph* const mir_graph_;
ScopedArenaAllocator* const allocator_;
// The maximum number of nested loops that we accept for GVN.
@@ -270,6 +254,19 @@ class GlobalValueNumbering {
DISALLOW_COPY_AND_ASSIGN(GlobalValueNumbering);
};
+std::ostream& operator<<(std::ostream& os, const GlobalValueNumbering::Mode& rhs);
+
+inline void GlobalValueNumbering::StartPostProcessing() {
+ DCHECK(Good());
+ DCHECK_EQ(mode_, kModeGvn);
+ mode_ = kModeGvnPostProcessing;
+}
+
+inline uint16_t GlobalValueNumbering::NewValueName() {
+ DCHECK_NE(mode_, kModeGvnPostProcessing);
+ ++last_value_;
+ return last_value_;
+}
} // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index a171d7c2b5..a7d93538d6 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -107,7 +107,8 @@ class LocalValueNumbering::AliasingIFieldVersions {
class LocalValueNumbering::NonAliasingArrayVersions {
public:
- static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+ static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn,
+ const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
uint16_t array) {
return gvn->LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, kNoValue);
}
@@ -129,8 +130,9 @@ class LocalValueNumbering::NonAliasingArrayVersions {
gvn, lvn, &lvn->non_aliasing_array_value_map_, array, index);
}
- static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t array) {
+ static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
+ uint16_t array ATTRIBUTE_UNUSED) {
return false; // Not affected by global_memory_version_.
}
@@ -164,8 +166,9 @@ class LocalValueNumbering::AliasingArrayVersions {
return gvn->LookupValue(kAliasingArrayOp, type, location, memory_version);
}
- static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t type, uint16_t location) {
+ static uint16_t LookupMergeValue(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn,
+ uint16_t type ATTRIBUTE_UNUSED, uint16_t location) {
// If the location is non-aliasing in lvn, use the non-aliasing value.
uint16_t array = gvn->GetArrayLocationBase(location);
if (lvn->IsNonAliasingArray(array, type)) {
@@ -176,8 +179,11 @@ class LocalValueNumbering::AliasingArrayVersions {
gvn, lvn, &lvn->aliasing_array_value_map_, type, location);
}
- static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t type) {
+ static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn,
+ uint16_t type ATTRIBUTE_UNUSED) {
+ UNUSED(gvn);
+ UNUSED(type);
return lvn->global_memory_version_ == lvn->merge_new_memory_version_;
}
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index dd8d2db8f4..535fba18b3 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -21,8 +21,7 @@
#include "compiler_internals.h"
#include "global_value_numbering.h"
-#include "utils/scoped_arena_allocator.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
namespace art {
@@ -31,7 +30,7 @@ class DexFile;
// Enable/disable tracking values stored in the FILLED_NEW_ARRAY result.
static constexpr bool kLocalValueNumberingEnableFilledNewArrayTracking = true;
-class LocalValueNumbering {
+class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
private:
static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
@@ -69,14 +68,6 @@ class LocalValueNumbering {
uint16_t GetValueNumber(MIR* mir);
- // LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
- static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
- }
-
- // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
- static void operator delete(void* ptr) { UNUSED(ptr); }
-
private:
// A set of value names.
typedef GlobalValueNumbering::ValueNameSet ValueNameSet;
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index e6a8cead00..5b7ac3ca1b 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1343,7 +1343,7 @@ void MIRGraph::CompilerInitializeSSAConversion() {
* counts explicitly used s_regs. A later phase will add implicit
* counts for things such as Method*, null-checked references, etc.
*/
-void MIRGraph::CountUses(struct BasicBlock* bb) {
+void MIRGraph::CountUses(class BasicBlock* bb) {
if (bb->block_type != kDalvikByteCode) {
return;
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index e0f471ebeb..b87ab66347 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -302,28 +302,28 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
* (by the caller)
* Utilizes a map for fast lookup of the typical cases.
*/
-BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
+BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
BasicBlock** immed_pred_block_p) {
if (code_offset >= current_code_item_->insns_size_in_code_units_) {
- return NULL;
+ return nullptr;
}
int block_id = dex_pc_to_block_map_[code_offset];
BasicBlock* bb = GetBasicBlock(block_id);
- if ((bb != NULL) && (bb->start_offset == code_offset)) {
+ if ((bb != nullptr) && (bb->start_offset == code_offset)) {
// Does this containing block start with the desired instruction?
return bb;
}
// No direct hit.
if (!create) {
- return NULL;
+ return nullptr;
}
- if (bb != NULL) {
+ if (bb != nullptr) {
// The target exists somewhere in an existing block.
- return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : NULL);
+ return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : nullptr);
}
// Create a new block.
@@ -360,8 +360,7 @@ void MIRGraph::ProcessTryCatchBlocks() {
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t address = iterator.GetHandlerAddress();
- FindBlock(address, false /* split */, true /*create*/,
- /* immed_pred_block_p */ NULL);
+ FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr);
}
handlers_ptr = iterator.EndDataPointer();
}
@@ -466,7 +465,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
}
CountBranch(target);
- BasicBlock* taken_block = FindBlock(target, /* split */ true, /* create */ true,
+ BasicBlock* taken_block = FindBlock(target, /* create */ true,
/* immed_pred_block_p */ &cur_block);
cur_block->taken = taken_block->id;
taken_block->predecessors.push_back(cur_block->id);
@@ -474,19 +473,6 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
/* Always terminate the current block for conditional branches */
if (flags & Instruction::kContinue) {
BasicBlock* fallthrough_block = FindBlock(cur_offset + width,
- /*
- * If the method is processed
- * in sequential order from the
- * beginning, we don't need to
- * specify split for continue
- * blocks. However, this
- * routine can be called by
- * compileLoop, which starts
- * parsing the method from an
- * arbitrary address in the
- * method body.
- */
- true,
/* create */
true,
/* immed_pred_block_p */
@@ -494,8 +480,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
} else if (code_ptr < code_end) {
- FindBlock(cur_offset + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
return cur_block;
}
@@ -503,6 +488,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
/* Process instructions with the kSwitch flag */
BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags) {
+ UNUSED(flags);
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
int size;
@@ -554,8 +540,8 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
cur_block->successor_blocks.reserve(size);
for (i = 0; i < size; i++) {
- BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
- /* create */ true, /* immed_pred_block_p */ &cur_block);
+ BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true,
+ /* immed_pred_block_p */ &cur_block);
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
@@ -568,8 +554,8 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
}
/* Fall-through case */
- BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false,
- /* create */ true, /* immed_pred_block_p */ NULL);
+ BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true,
+ /* immed_pred_block_p */ nullptr);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
return cur_block;
@@ -579,6 +565,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags, ArenaBitVector* try_block_addr,
const uint16_t* code_ptr, const uint16_t* code_end) {
+ UNUSED(flags);
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
@@ -593,8 +580,8 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
}
for (; iterator.HasNext(); iterator.Next()) {
- BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
- false /* creat */, NULL /* immed_pred_block_p */);
+ BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */,
+ nullptr /* immed_pred_block_p */);
if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT &&
IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) {
// Don't allow monitor-exit to catch its own exception, http://b/15745363 .
@@ -629,8 +616,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
cur_block->explicit_throw = true;
if (code_ptr < code_end) {
// Force creation of new block following THROW via side-effect.
- FindBlock(cur_offset + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
if (!in_try_block) {
// Don't split a THROW that can't rethrow - we're done.
@@ -813,8 +799,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
* Create a fallthrough block for real instructions
* (incl. NOP).
*/
- FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(current_offset_ + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
} else if (flags & Instruction::kThrow) {
cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
@@ -837,8 +822,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
}
current_offset_ += width;
- BasicBlock* next_block = FindBlock(current_offset_, /* split */ false, /* create */
- false, /* immed_pred_block_p */ NULL);
+ BasicBlock* next_block = FindBlock(current_offset_, /* create */ false,
+ /* immed_pred_block_p */ nullptr);
if (next_block) {
/*
* The next instruction could be the target of a previously parsed
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index fd4c473444..76f68e28c0 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -228,7 +228,8 @@ struct SSARepresentation {
* The Midlevel Intermediate Representation node, which may be largely considered a
* wrapper around a Dalvik byte code.
*/
-struct MIR {
+class MIR : public ArenaObject<kArenaAllocMIR> {
+ public:
/*
* TODO: remove embedded DecodedInstruction to save space, keeping only opcode. Recover
* additional fields on as-needed basis. Question: how to support MIR Pseudo-ops; probably
@@ -344,16 +345,12 @@ struct MIR {
MIR* Copy(CompilationUnit *c_unit);
MIR* Copy(MIRGraph* mir_Graph);
-
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(MIR), kArenaAllocMIR);
- }
- static void operator delete(void* p) {} // Nop.
};
struct SuccessorBlockInfo;
-struct BasicBlock {
+class BasicBlock : public DeletableArenaObject<kArenaAllocBB> {
+ public:
BasicBlock(BasicBlockId block_id, BBType type, ArenaAllocator* allocator)
: id(block_id),
dfs_id(), start_offset(), fall_through(), taken(), i_dom(), nesting_depth(),
@@ -457,10 +454,8 @@ struct BasicBlock {
MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
bool IsExceptionBlock() const;
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(BasicBlock), kArenaAllocBB);
- }
- static void operator delete(void* p) {} // Nop.
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
/*
@@ -548,7 +543,7 @@ class MIRGraph {
/* Find existing block */
BasicBlock* FindBlock(DexOffset code_offset) {
- return FindBlock(code_offset, false, false, NULL);
+ return FindBlock(code_offset, false, NULL);
}
const uint16_t* GetCurrentInsns() const {
@@ -627,7 +622,7 @@ class MIRGraph {
return def_count_;
}
- ArenaAllocator* GetArena() {
+ ArenaAllocator* GetArena() const {
return arena_;
}
@@ -1135,7 +1130,7 @@ class MIRGraph {
* @brief Count the uses in the BasicBlock
* @param bb the BasicBlock
*/
- void CountUses(struct BasicBlock* bb);
+ void CountUses(class BasicBlock* bb);
static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
static uint64_t GetDataFlowAttributes(MIR* mir);
@@ -1208,8 +1203,7 @@ class MIRGraph {
bool ContentIsInsn(const uint16_t* code_ptr);
BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
BasicBlock** immed_pred_block_p);
- BasicBlock* FindBlock(DexOffset code_offset, bool split, bool create,
- BasicBlock** immed_pred_block_p);
+ BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p);
void ProcessTryCatchBlocks();
bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset);
BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
@@ -1233,7 +1227,7 @@ class MIRGraph {
void ComputeDomPostOrderTraversal(BasicBlock* bb);
int GetSSAUseCount(int s_reg);
bool BasicBlockOpt(BasicBlock* bb);
- bool BuildExtendedBBList(struct BasicBlock* bb);
+ bool BuildExtendedBBList(class BasicBlock* bb);
bool FillDefBlockMatrix(BasicBlock* bb);
void InitializeDominationInfo(BasicBlock* bb);
bool ComputeblockIDom(BasicBlock* bb);
@@ -1305,7 +1299,7 @@ class MIRGraph {
int method_sreg_;
unsigned int attributes_;
Checkstats* checkstats_;
- ArenaAllocator* arena_;
+ ArenaAllocator* const arena_;
int backward_branches_;
int forward_branches_;
size_t num_non_special_compiler_temps_; // Keeps track of allocated non-special compiler temps. These are VRs that are in compiler temp region on stack.
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 8e583ccfff..3604eb9e07 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -16,12 +16,11 @@
#include "base/bit_vector-inl.h"
#include "compiler_internals.h"
+#include "dataflow_iterator-inl.h"
#include "global_value_numbering.h"
#include "local_value_numbering.h"
-#include "dataflow_iterator-inl.h"
-#include "dex/global_value_numbering.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "quick/dex_file_method_inliner.h"
+#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
#include "utils/scoped_arena_containers.h"
@@ -660,7 +659,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
/* Collect stats on number of checks removed */
-void MIRGraph::CountChecks(struct BasicBlock* bb) {
+void MIRGraph::CountChecks(class BasicBlock* bb) {
if (bb->data_flow_info != NULL) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
if (mir->ssa_rep == NULL) {
@@ -750,7 +749,7 @@ bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
}
/* Combine any basic blocks terminated by instructions that we now know can't throw */
-void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
+void MIRGraph::CombineBlocks(class BasicBlock* bb) {
// Loop here to allow combining a sequence of blocks
while ((bb->block_type == kDalvikByteCode) &&
(bb->last_mir_insn != nullptr) &&
@@ -1510,7 +1509,7 @@ void MIRGraph::DumpCheckStats() {
}
}
-bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) {
+bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
if (bb->visited) return false;
if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock))) {
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index e349eed76d..d3e54a0bf7 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -19,11 +19,13 @@
#include <string>
-#include "base/macros.h"
+#include "compiler_ir.h"
+#include "base/logging.h"
+
namespace art {
// Forward declarations.
-struct BasicBlock;
+class BasicBlock;
struct CompilationUnit;
class Pass;
@@ -81,15 +83,10 @@ class Pass {
* @param data the object containing data necessary for the pass.
* @return whether or not there is a change when walking the BasicBlock
*/
- virtual bool Worker(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
-
+ virtual bool Worker(PassDataHolder* data ATTRIBUTE_UNUSED) const {
// Passes that do all their work in Start() or End() should not allow useless node iteration.
- DCHECK(false) << "Unsupported default Worker() used for " << GetName();
-
- // BasicBlock did not change.
- return false;
+ LOG(FATAL) << "Unsupported default Worker() used for " << GetName();
+ UNREACHABLE();
}
static void BasePrintMessage(CompilationUnit* c_unit, const char* pass_name, const char* message, ...) {
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
index 2f3c8b2217..d0b450a3ac 100644
--- a/compiler/dex/pass_me.h
+++ b/compiler/dex/pass_me.h
@@ -23,7 +23,7 @@
namespace art {
// Forward declarations.
-struct BasicBlock;
+class BasicBlock;
struct CompilationUnit;
class Pass;
@@ -32,10 +32,11 @@ class Pass;
* @details Each enum should be a power of 2 to be correctly used.
*/
enum OptimizationFlag {
- kOptimizationBasicBlockChange = 1, /**< @brief Has there been a change to a BasicBlock? */
- kOptimizationDefUsesChange = 2, /**< @brief Has there been a change to a def-use? */
- kLoopStructureChange = 4, /**< @brief Has there been a loop structural change? */
+ kOptimizationBasicBlockChange = 1, /// @brief Has there been a change to a BasicBlock?
+ kOptimizationDefUsesChange = 2, /// @brief Has there been a change to a def-use?
+ kLoopStructureChange = 4, /// @brief Has there been a loop structural change?
};
+std::ostream& operator<<(std::ostream& os, const OptimizationFlag& rhs);
// Data holder class.
class PassMEDataHolder: public PassDataHolder {
@@ -47,24 +48,25 @@ class PassMEDataHolder: public PassDataHolder {
};
enum DataFlowAnalysisMode {
- kAllNodes = 0, /**< @brief All nodes. */
- kPreOrderDFSTraversal, /**< @brief Depth-First-Search / Pre-Order. */
- kRepeatingPreOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Pre-Order. */
- kReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Reverse Post-Order. */
- kRepeatingPostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Post-Order. */
- kRepeatingReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
- kPostOrderDOMTraversal, /**< @brief Dominator tree / Post-Order. */
- kTopologicalSortTraversal, /**< @brief Topological Order traversal. */
- kLoopRepeatingTopologicalSortTraversal, /**< @brief Loop-repeating Topological Order traversal. */
- kNoNodes, /**< @brief Skip BasicBlock traversal. */
+ kAllNodes = 0, /// @brief All nodes.
+ kPreOrderDFSTraversal, /// @brief Depth-First-Search / Pre-Order.
+ kRepeatingPreOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Pre-Order.
+ kReversePostOrderDFSTraversal, /// @brief Depth-First-Search / Reverse Post-Order.
+ kRepeatingPostOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Post-Order.
+ kRepeatingReversePostOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Reverse Post-Order.
+ kPostOrderDOMTraversal, /// @brief Dominator tree / Post-Order.
+ kTopologicalSortTraversal, /// @brief Topological Order traversal.
+ kLoopRepeatingTopologicalSortTraversal, /// @brief Loop-repeating Topological Order traversal.
+ kNoNodes, /// @brief Skip BasicBlock traversal.
};
+std::ostream& operator<<(std::ostream& os, const DataFlowAnalysisMode& rhs);
/**
* @class Pass
* @brief Pass is the Pass structure for the optimizations.
* @details The following structure has the different optimization passes that we are going to do.
*/
-class PassME: public Pass {
+class PassME : public Pass {
public:
explicit PassME(const char* name, DataFlowAnalysisMode type = kAllNodes,
unsigned int flags = 0u, const char* dump = "")
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
index 94ae3f7e5f..bc4f5c4100 100644
--- a/compiler/dex/portable/mir_to_gbc.h
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -73,7 +73,7 @@ class LLVMInfo {
std::unique_ptr<art::llvm::IRBuilder> ir_builder_;
};
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
struct CompilationUnit;
struct MIR;
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index 36cb7a4efc..b2db36d831 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -109,7 +109,7 @@ enum ArmResourceEncodingPos {
kArmRegEnd = 48,
};
-enum ArmNativeRegisterPool {
+enum ArmNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
r0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
r1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
r2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
@@ -546,6 +546,7 @@ enum ArmOpcode {
kThumb2StrdI8, // strd rt, rt2, [rn +-/1024].
kArmLast,
};
+std::ostream& operator<<(std::ostream& os, const ArmOpcode& rhs);
enum ArmOpDmbOptions {
kSY = 0xf,
@@ -577,6 +578,7 @@ enum ArmEncodingKind {
kFmtOff24, // 24-bit Thumb2 unconditional branch encoding.
kFmtSkip, // Unused field, but continue to next.
};
+std::ostream& operator<<(std::ostream& os, const ArmEncodingKind& rhs);
// Struct used to define the snippet positions for each Thumb opcode.
struct ArmEncodingMap {
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 13b9bf09ed..b4eebb320e 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -475,9 +475,9 @@ static bool ArmUseRelativeCall(CompilationUnit* cu, const MethodReference& targe
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
+static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t unused_idx ATTRIBUTE_UNUSED,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 442c4fcec6..179ba02175 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -196,7 +196,7 @@ class ArmMir2Lir FINAL : public Mir2Lir {
void GenSelect(BasicBlock* bb, MIR* mir);
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMonitorEnter(int opt_flags, RegLocation rl_src);
void GenMonitorExit(int opt_flags, RegLocation rl_src);
@@ -251,10 +251,10 @@ class ArmMir2Lir FINAL : public Mir2Lir {
RegStorage AllocPreservedDouble(int s_reg);
RegStorage AllocPreservedSingle(int s_reg);
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return false; // Wide GPRs are formed by pairing.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return false; // Wide FPRs are formed by pairing.
}
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index ce31b278a8..ebf1905579 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -209,7 +209,8 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
// TODO: Generalize the IT below to accept more than one-instruction loads.
DCHECK(InexpensiveConstantInt(true_val));
DCHECK(InexpensiveConstantInt(false_val));
@@ -232,6 +233,7 @@ void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi
}
void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -504,6 +506,7 @@ static const MagicTable magic_table[] = {
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
return false;
}
@@ -687,14 +690,17 @@ bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit)
}
RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
+ RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
- return rl_dest;
+ UNREACHABLE();
}
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -1072,6 +1078,7 @@ LIR* ArmMir2Lir::OpVstm(RegStorage r_base, int count) {
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
@@ -1168,108 +1175,109 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- /*
- * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
- * dest = src1.lo * src2.lo;
- * tmp1 += src1.lo * src2.hi;
- * dest.hi += tmp1;
- *
- * To pull off inline multiply, we have a worst-case requirement of 7 temporary
- * registers. Normally for Arm, we get 5. We can get to 6 by including
- * lr in the temp set. The only problematic case is all operands and result are
- * distinct, and none have been promoted. In that case, we can succeed by aggressively
- * freeing operand temp registers after they are no longer needed. All other cases
- * can proceed normally. We'll just punt on the case of the result having a misaligned
- * overlap with either operand and send that case to a runtime handler.
- */
- RegLocation rl_result;
- if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
- FlushAllRegs();
- CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
- rl_result = GetReturnWide(kCoreReg);
- StoreValueWide(rl_dest, rl_result);
- return;
- }
+ UNUSED(opcode);
+ /*
+ * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
+ * dest = src1.lo * src2.lo;
+ * tmp1 += src1.lo * src2.hi;
+ * dest.hi += tmp1;
+ *
+ * To pull off inline multiply, we have a worst-case requirement of 7 temporary
+ * registers. Normally for Arm, we get 5. We can get to 6 by including
+ * lr in the temp set. The only problematic case is all operands and result are
+ * distinct, and none have been promoted. In that case, we can succeed by aggressively
+ * freeing operand temp registers after they are no longer needed. All other cases
+ * can proceed normally. We'll just punt on the case of the result having a misaligned
+ * overlap with either operand and send that case to a runtime handler.
+ */
+ RegLocation rl_result;
+ if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
+ FlushAllRegs();
+ CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(kCoreReg);
+ StoreValueWide(rl_dest, rl_result);
+ return;
+ }
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-
- int reg_status = 0;
- RegStorage res_lo;
- RegStorage res_hi;
- bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
- !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
- bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
- bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
- // Check if rl_dest is *not* either operand and we have enough temp registers.
- if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
- (dest_promoted || src1_promoted || src2_promoted)) {
- // In this case, we do not need to manually allocate temp registers for result.
- rl_result = EvalLoc(rl_dest, kCoreReg, true);
- res_lo = rl_result.reg.GetLow();
- res_hi = rl_result.reg.GetHigh();
- } else {
- res_lo = AllocTemp();
- if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
- // In this case, we have enough temp registers to be allocated for result.
- res_hi = AllocTemp();
- reg_status = 1;
- } else {
- // In this case, all temps are now allocated.
- // res_hi will be allocated after we can free src1_hi.
- reg_status = 2;
- }
- }
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- // Temporarily add LR to the temp pool, and assign it to tmp1
- MarkTemp(rs_rARM_LR);
- FreeTemp(rs_rARM_LR);
- RegStorage tmp1 = rs_rARM_LR;
- LockTemp(rs_rARM_LR);
-
- if (rl_src1.reg == rl_src2.reg) {
- DCHECK(res_hi.Valid());
- DCHECK(res_lo.Valid());
- NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
- NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
- rl_src1.reg.GetLowReg());
- OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+ int reg_status = 0;
+ RegStorage res_lo;
+ RegStorage res_hi;
+ bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
+ !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
+ bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
+ bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
+ // Check if rl_dest is *not* either operand and we have enough temp registers.
+ if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
+ (dest_promoted || src1_promoted || src2_promoted)) {
+ // In this case, we do not need to manually allocate temp registers for result.
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ res_lo = rl_result.reg.GetLow();
+ res_hi = rl_result.reg.GetHigh();
+ } else {
+ res_lo = AllocTemp();
+ if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
+ // In this case, we have enough temp registers to be allocated for result.
+ res_hi = AllocTemp();
+ reg_status = 1;
} else {
- NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
- if (reg_status == 2) {
- DCHECK(!res_hi.Valid());
- DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
- DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- // Will force free src1_hi, so must clobber.
- Clobber(rl_src1.reg);
- FreeTemp(rl_src1.reg.GetHigh());
- res_hi = AllocTemp();
- }
- DCHECK(res_hi.Valid());
- DCHECK(res_lo.Valid());
- NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
- rl_src1.reg.GetLowReg());
- NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
- tmp1.GetReg());
- NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
- if (reg_status == 2) {
- FreeTemp(rl_src1.reg.GetLow());
- }
+ // In this case, all temps are now allocated.
+ // res_hi will be allocated after we can free src1_hi.
+ reg_status = 2;
}
+ }
- // Now, restore lr to its non-temp status.
- FreeTemp(tmp1);
- Clobber(rs_rARM_LR);
- UnmarkTemp(rs_rARM_LR);
+ // Temporarily add LR to the temp pool, and assign it to tmp1
+ MarkTemp(rs_rARM_LR);
+ FreeTemp(rs_rARM_LR);
+ RegStorage tmp1 = rs_rARM_LR;
+ LockTemp(rs_rARM_LR);
- if (reg_status != 0) {
- // We had manually allocated registers for rl_result.
- // Now construct a RegLocation.
- rl_result = GetReturnWide(kCoreReg); // Just using as a template.
- rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
+ if (rl_src1.reg == rl_src2.reg) {
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
+ OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+ } else {
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ if (reg_status == 2) {
+ DCHECK(!res_hi.Valid());
+ DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ // Will force free src1_hi, so must clobber.
+ Clobber(rl_src1.reg);
+ FreeTemp(rl_src1.reg.GetHigh());
+ res_hi = AllocTemp();
}
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
+ NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
+ tmp1.GetReg());
+ NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
+ if (reg_status == 2) {
+ FreeTemp(rl_src1.reg.GetLow());
+ }
+ }
- StoreValueWide(rl_dest, rl_result);
+ // Now, restore lr to its non-temp status.
+ FreeTemp(tmp1);
+ Clobber(rs_rARM_LR);
+ UnmarkTemp(rs_rARM_LR);
+
+ if (reg_status != 0) {
+ // We had manually allocated registers for rl_result.
+ // Now construct a RegLocation.
+ rl_result = GetReturnWide(kCoreReg); // Just using as a template.
+ rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
+ }
+
+ StoreValueWide(rl_dest, rl_result);
}
void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -1471,6 +1479,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
int flags) {
+ UNUSED(flags);
rl_src = LoadValueWide(rl_src, kCoreReg);
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 7100a285a6..1f26a815da 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -559,11 +559,10 @@ ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
call_method_insns_.reserve(100);
// Sanity check - make sure encoding map lines up.
for (int i = 0; i < kArmLast; i++) {
- if (ArmMir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(ArmMir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index ce2de65abf..448e80f715 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -373,18 +373,21 @@ LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
}
LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
- return NULL;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -1167,11 +1170,13 @@ LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
}
LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for Arm";
- return NULL;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index a87b06aeb4..973279e8b7 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -127,7 +127,7 @@ enum A64ResourceEncodingPos {
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
// Registers (integer) values.
-enum A64NativeRegisterPool {
+enum A64NativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
# define A64_DEFINE_REGISTERS(nr) \
rw##nr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | nr, \
rx##nr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | nr, \
@@ -362,9 +362,10 @@ enum A64Opcode {
kA64Tbz3rht, // tbz imm_6_b5[31] [0110110] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
kA64Ubfm4rrdd, // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64Last,
- kA64NotWide = 0, // Flag used to select the first instruction variant.
- kA64Wide = 0x1000 // Flag used to select the second instruction variant.
+ kA64NotWide = kA64First, // 0 - Flag used to select the first instruction variant.
+ kA64Wide = 0x1000 // Flag used to select the second instruction variant.
};
+std::ostream& operator<<(std::ostream& os, const A64Opcode& rhs);
/*
* The A64 instruction set provides two variants for many instructions. For example, "mov wN, wM"
@@ -414,6 +415,7 @@ enum A64EncodingKind {
kFmtExtend, // Register extend, 9-bit at [23..21, 15..10].
kFmtSkip, // Unused field, but continue to next.
};
+std::ostream& operator<<(std::ostream& os, const A64EncodingKind & rhs);
// Struct used to define the snippet positions for each A64 opcode.
struct A64EncodingMap {
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index c898e2d205..a9a58a3bfb 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -401,6 +401,7 @@ void Arm64Mir2Lir::GenSpecialExitSequence() {
}
static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) {
+ UNUSED(cu, target_method);
// Always emit relative calls.
return true;
}
@@ -411,9 +412,10 @@ static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& tar
*/
static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t unused_idx,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info, unused_idx);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_code != 0 && direct_method != 0) {
switch (state) {
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 9f0260635d..bd363c4fd2 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -188,7 +188,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
@@ -249,10 +249,10 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
bool skip_this) OVERRIDE;
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return true; // 64b architecture.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return true; // 64b architecture.
}
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 418d81efe6..965759b59d 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -32,11 +32,13 @@ LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage s
}
LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
+ UNUSED(ccode, guide);
LOG(FATAL) << "Unexpected use of OpIT for Arm64";
- return NULL;
+ UNREACHABLE();
}
void Arm64Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
}
@@ -174,13 +176,14 @@ void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode
void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
DCHECK(rs_dest.Valid());
OpRegReg(kOpCmp, left_op, right_op);
GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
}
void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
// rl_src may be aliased with rl_result/rl_dest, so do compare early.
@@ -406,6 +409,7 @@ static const MagicTable magic_table[] = {
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -450,6 +454,7 @@ bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_d
bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -590,13 +595,16 @@ bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_d
}
bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
- return false;
+ UNREACHABLE();
}
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -615,8 +623,9 @@ RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int
RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
@@ -929,25 +938,27 @@ LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
}
LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
- return NULL;
+ UNREACHABLE();
}
void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
+ RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
+ int first_bit, int second_bit) {
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
if (first_bit != 0) {
OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
}
}
-void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
+void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
}
@@ -1311,7 +1322,7 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
- int flags) {
+ int flags ATTRIBUTE_UNUSED) {
OpKind op = kOpBkpt;
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
@@ -1467,8 +1478,8 @@ static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t
}
}
-static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
- uint32_t fp_reg_mask, int frame_size) {
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+ int frame_size) {
m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
int core_count = POPCOUNT(core_reg_mask);
@@ -1490,7 +1501,7 @@ static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg
}
static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
- uint32_t fp_reg_mask, int frame_size) {
+ uint32_t fp_reg_mask) {
// Otherwise, spill both core and fp regs at the same time.
// The very first instruction will be an stp with pre-indexed address, moving the stack pointer
// down. From then on, we fill upwards. This will generate overall the same number of instructions
@@ -1613,9 +1624,9 @@ int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp
// This case is also optimal when we have an odd number of core spills, and an even (non-zero)
// number of fp spills.
if ((RoundUp(frame_size, 8) / 8 <= 63)) {
- return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ return SpillRegsPreSub(this, core_reg_mask, fp_reg_mask, frame_size);
} else {
- return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask);
}
}
@@ -1653,6 +1664,7 @@ static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32
void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
int frame_size) {
+ DCHECK(base == rs_sp);
// Restore saves and drop stack frame.
// 2 versions:
//
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index ba47883d9e..094ff51eee 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -589,11 +589,10 @@ Arm64Mir2Lir::Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAlloca
call_method_insns_(arena->Adapter()) {
// Sanity check - make sure encoding map lines up.
for (int i = 0; i < kA64Last; i++) {
- if (UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode) != i) {
- LOG(FATAL) << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode), i)
+ << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 6985b73845..47ccc46976 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -306,7 +306,7 @@ static int GetNumFastHalfWords(uint64_t value) {
// algorithm will give it a low priority for promotion, even when it is referenced many times in
// the code.
-bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
+bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
// A 32-bit int can always be loaded with 2 instructions (and without using the literal pool).
// We therefore return true and give it a low priority for promotion.
return true;
@@ -673,19 +673,24 @@ LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2
}
}
-LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+ MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
+ MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
return nullptr;
}
LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -1386,16 +1391,20 @@ LIR* Arm64Mir2Lir::StoreRefDisp(RegStorage r_base, int displacement, RegStorage
}
LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ UNUSED(r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for Arm64";
- return NULL;
+ UNREACHABLE();
}
-LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt,
+ QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
+ // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 80a1ac4c52..4bc8967ce5 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -105,17 +105,17 @@ void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
void Mir2Lir::UnlinkLIR(LIR* lir) {
if (UNLIKELY(lir == first_lir_insn_)) {
first_lir_insn_ = lir->next;
- if (lir->next != NULL) {
- lir->next->prev = NULL;
+ if (lir->next != nullptr) {
+ lir->next->prev = nullptr;
} else {
- DCHECK(lir->next == NULL);
+ DCHECK(lir->next == nullptr);
DCHECK(lir == last_lir_insn_);
- last_lir_insn_ = NULL;
+ last_lir_insn_ = nullptr;
}
} else if (lir == last_lir_insn_) {
last_lir_insn_ = lir->prev;
- lir->prev->next = NULL;
- } else if ((lir->prev != NULL) && (lir->next != NULL)) {
+ lir->prev->next = nullptr;
+ } else if ((lir->prev != nullptr) && (lir->next != nullptr)) {
lir->prev->next = lir->next;
lir->next->prev = lir->prev;
}
@@ -334,10 +334,10 @@ void Mir2Lir::CodegenDump() {
<< static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
DumpPromotionMap();
UpdateLIROffsets();
- for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
+ for (lir_insn = first_lir_insn_; lir_insn != nullptr; lir_insn = lir_insn->next) {
DumpLIRInsn(lir_insn, 0);
}
- for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
+ for (lir_insn = literal_list_; lir_insn != nullptr; lir_insn = lir_insn->next) {
LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
lir_insn->operands[0]);
}
@@ -368,13 +368,13 @@ LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) {
return data_target;
data_target = data_target->next;
}
- return NULL;
+ return nullptr;
}
/* Search the existing constants in the literal pool for an exact wide match */
LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
bool lo_match = false;
- LIR* lo_target = NULL;
+ LIR* lo_target = nullptr;
while (data_target) {
if (lo_match && (data_target->operands[0] == val_hi)) {
// Record high word in case we need to expand this later.
@@ -388,7 +388,7 @@ LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
}
data_target = data_target->next;
}
- return NULL;
+ return nullptr;
}
/* Search the existing constants in the literal pool for an exact method match */
@@ -431,7 +431,7 @@ LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
estimated_native_code_size_ += sizeof(value);
return new_value;
}
- return NULL;
+ return nullptr;
}
/* Add a 64-bit constant to the constant pool or mixed with code */
@@ -469,14 +469,14 @@ static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
void Mir2Lir::InstallLiteralPools() {
AlignBuffer(code_buffer_, data_offset_);
LIR* data_lir = literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
Push32(code_buffer_, data_lir->operands[0]);
data_lir = NEXT_LIR(data_lir);
}
// TODO: patches_.reserve() as needed.
// Push code and method literals, record offsets for the compiler to patch.
data_lir = code_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
const DexFile* target_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -486,7 +486,7 @@ void Mir2Lir::InstallLiteralPools() {
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
const DexFile* target_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -497,7 +497,7 @@ void Mir2Lir::InstallLiteralPools() {
}
// Push class literals.
data_lir = class_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_type_idx = data_lir->operands[0];
const DexFile* class_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -577,7 +577,7 @@ void Mir2Lir::InstallFillArrayData() {
}
static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
- for (; lir != NULL; lir = lir->next) {
+ for (; lir != nullptr; lir = lir->next) {
lir->offset = offset;
offset += 4;
}
@@ -588,7 +588,7 @@ static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset,
unsigned int element_size) {
// Align to natural pointer size.
offset = RoundUp(offset, element_size);
- for (; lir != NULL; lir = lir->next) {
+ for (; lir != nullptr; lir = lir->next) {
lir->offset = offset;
offset += element_size;
}
@@ -642,7 +642,7 @@ void Mir2Lir::CreateMappingTables() {
uint32_t dex2pc_entries = 0u;
uint32_t dex2pc_offset = 0u;
uint32_t dex2pc_dalvik_offset = 0u;
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
pc2dex_src_entries++;
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
pc2dex_entries += 1;
@@ -682,7 +682,7 @@ void Mir2Lir::CreateMappingTables() {
pc2dex_dalvik_offset = 0u;
dex2pc_offset = 0u;
dex2pc_dalvik_offset = 0u;
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
if (generate_src_map && !tgt_lir->flags.is_nop) {
src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
static_cast<int32_t>(tgt_lir->dalvik_offset)}));
@@ -717,7 +717,7 @@ void Mir2Lir::CreateMappingTables() {
CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
auto it = table.PcToDexBegin();
auto it2 = table.DexToPcBegin();
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
CHECK_EQ(tgt_lir->offset, it.NativePcOffset());
CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc());
@@ -758,7 +758,7 @@ void Mir2Lir::CreateNativeGcMap() {
uint32_t native_offset = it.NativePcOffset();
uint32_t dex_pc = it.DexPc();
const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
- CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
+ CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
": " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
native_gc_map_builder.AddEntry(native_offset, references);
}
@@ -904,6 +904,7 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
+ UNUSED(offset);
// NOTE: only used for debug listings.
NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
@@ -925,7 +926,7 @@ bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src
case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
default:
LOG(FATAL) << "Unexpected opcode " << opcode;
- is_taken = false;
+ UNREACHABLE();
}
return is_taken;
}
@@ -941,8 +942,8 @@ ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
case kCondLe: res = kCondGe; break;
case kCondGe: res = kCondLe; break;
default:
- res = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected ccode " << before;
+ UNREACHABLE();
}
return res;
}
@@ -957,8 +958,8 @@ ConditionCode Mir2Lir::NegateComparison(ConditionCode before) {
case kCondLe: res = kCondGt; break;
case kCondGe: res = kCondLt; break;
default:
- res = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected ccode " << before;
+ UNREACHABLE();
}
return res;
}
@@ -966,11 +967,11 @@ ConditionCode Mir2Lir::NegateComparison(ConditionCode before) {
// TODO: move to mir_to_lir.cc
Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Backend(arena),
- literal_list_(NULL),
- method_literal_list_(NULL),
- class_literal_list_(NULL),
- code_literal_list_(NULL),
- first_fixup_(NULL),
+ literal_list_(nullptr),
+ method_literal_list_(nullptr),
+ class_literal_list_(nullptr),
+ code_literal_list_(nullptr),
+ first_fixup_(nullptr),
cu_(cu),
mir_graph_(mir_graph),
switch_tables_(arena->Adapter(kArenaAllocSwitchTable)),
@@ -980,8 +981,8 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
pointer_storage_(arena->Adapter()),
data_offset_(0),
total_size_(0),
- block_label_list_(NULL),
- promotion_map_(NULL),
+ block_label_list_(nullptr),
+ promotion_map_(nullptr),
current_dalvik_offset_(0),
estimated_native_code_size_(0),
reg_pool_(nullptr),
@@ -994,8 +995,8 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
frame_size_(0),
core_spill_mask_(0),
fp_spill_mask_(0),
- first_lir_insn_(NULL),
- last_lir_insn_(NULL),
+ first_lir_insn_(nullptr),
+ last_lir_insn_(nullptr),
slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
mem_ref_type_(ResourceMask::kHeapRef),
mask_cache_(arena) {
@@ -1005,8 +1006,8 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
reginfo_map_.reserve(RegStorage::kMaxRegs);
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
- // Reserve pointer id 0 for NULL.
- size_t null_idx = WrapPointer(NULL);
+ // Reserve pointer id 0 for nullptr.
+ size_t null_idx = WrapPointer(nullptr);
DCHECK_EQ(null_idx, 0U);
}
@@ -1126,14 +1127,14 @@ int Mir2Lir::ComputeFrameSize() {
* unit
*/
void Mir2Lir::AppendLIR(LIR* lir) {
- if (first_lir_insn_ == NULL) {
- DCHECK(last_lir_insn_ == NULL);
+ if (first_lir_insn_ == nullptr) {
+ DCHECK(last_lir_insn_ == nullptr);
last_lir_insn_ = first_lir_insn_ = lir;
- lir->prev = lir->next = NULL;
+ lir->prev = lir->next = nullptr;
} else {
last_lir_insn_->next = lir;
lir->prev = last_lir_insn_;
- lir->next = NULL;
+ lir->next = nullptr;
last_lir_insn_ = lir;
}
}
@@ -1145,7 +1146,7 @@ void Mir2Lir::AppendLIR(LIR* lir) {
* prev_lir <-> new_lir <-> current_lir
*/
void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
- DCHECK(current_lir->prev != NULL);
+ DCHECK(current_lir->prev != nullptr);
LIR *prev_lir = current_lir->prev;
prev_lir->next = new_lir;
@@ -1216,7 +1217,7 @@ void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) {
void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&code_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
// NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1233,7 +1234,7 @@ void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType t
void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&method_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
// NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1291,7 +1292,9 @@ RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) {
}
void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0f1d765d8c..453a7f0e34 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -101,7 +101,7 @@ COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafePut], UnsafePut_must_not_be_s
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
SystemArrayCopyCharArray_must_be_static);
-MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
+MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke) {
MIR* insn = mir_graph->NewMIR();
insn->offset = invoke->offset;
insn->optimization_flags = MIR_CALLEE;
@@ -555,11 +555,11 @@ bool DexFileMethodInliner::GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* i
break;
case kInlineOpIGet:
move_result = mir_graph->FindMoveResult(bb, invoke);
- result = GenInlineIGet(mir_graph, bb, invoke, move_result, method, method_idx);
+ result = GenInlineIGet(mir_graph, bb, invoke, move_result, method);
break;
case kInlineOpIPut:
move_result = mir_graph->FindMoveResult(bb, invoke);
- result = GenInlineIPut(mir_graph, bb, invoke, move_result, method, method_idx);
+ result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
break;
default:
LOG(FATAL) << "Unexpected inline op: " << method.opcode;
@@ -737,7 +737,7 @@ bool DexFileMethodInliner::GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, M
method.d.data == 0u));
// Insert the CONST instruction.
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = Instruction::CONST;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = method.d.data;
@@ -775,7 +775,7 @@ bool DexFileMethodInliner::GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* b
}
// Insert the move instruction
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = arg;
@@ -784,8 +784,7 @@ bool DexFileMethodInliner::GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* b
}
bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method,
- uint32_t method_idx) {
+ MIR* move_result, const InlineMethod& method) {
CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
return false;
@@ -819,7 +818,7 @@ bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MI
invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->offset = invoke->offset;
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
@@ -836,8 +835,7 @@ bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MI
}
bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method,
- uint32_t method_idx) {
+ MIR* move_result, const InlineMethod& method) {
CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
return false;
@@ -881,7 +879,7 @@ bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MI
invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = src_reg;
insn->dalvikInsn.vB = object_reg;
@@ -895,7 +893,7 @@ bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MI
bb->InsertMIRAfter(invoke, insn);
if (move_result != nullptr) {
- MIR* move = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* move = AllocReplacementMIR(mir_graph, invoke);
move->offset = move_result->offset;
if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
move->dalvikInsn.opcode = Instruction::MOVE_FROM16;
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 30a2d9081a..cb521da9df 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -31,9 +31,9 @@ namespace verifier {
class MethodVerifier;
} // namespace verifier
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
-struct MIR;
+class MIR;
class MIRGraph;
class Mir2Lir;
@@ -315,9 +315,9 @@ class DexFileMethodInliner {
static bool GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
MIR* move_result, const InlineMethod& method);
static bool GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+ MIR* move_result, const InlineMethod& method);
static bool GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+ MIR* move_result, const InlineMethod& method);
ReaderWriterMutex lock_;
/*
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 215257ba2f..9410f7e83b 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -212,8 +212,7 @@ void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
}
void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
- RegLocation rl_src2, LIR* taken,
- LIR* fall_through) {
+ RegLocation rl_src2, LIR* taken) {
ConditionCode cond;
RegisterClass reg_class = (rl_src1.ref || rl_src2.ref) ? kRefReg : kCoreReg;
switch (opcode) {
@@ -276,8 +275,7 @@ void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
}
-void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
- LIR* fall_through) {
+void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken) {
ConditionCode cond;
RegisterClass reg_class = rl_src.ref ? kRefReg : kCoreReg;
rl_src = LoadValue(rl_src, reg_class);
@@ -2134,12 +2132,14 @@ void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
/* Call out to helper assembly routine that will null check obj and then lock it. */
void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
+ UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
}
/* Call out to helper assembly routine that will null check obj and then unlock it. */
void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
+ UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index bc4d00b6cd..c7449c8eae 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -473,8 +473,7 @@ static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
cg->MarkPossibleNullPointerException(info->opt_flags);
}
-static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
- const RegStorage* alt_from,
+static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
const CompilationUnit* cu, Mir2Lir* cg) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
@@ -492,9 +491,10 @@ static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
*/
static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info);
DCHECK(cu->instruction_set != kX86 && cu->instruction_set != kX86_64 &&
cu->instruction_set != kThumb2 && cu->instruction_set != kArm &&
cu->instruction_set != kArm64);
@@ -547,7 +547,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
break;
case 3: // Grab the code from the method*
if (direct_code == 0) {
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(&arg0_ref, cu, cg)) {
break; // kInvokeTgt := arg0_ref->entrypoint
}
} else {
@@ -571,8 +571,9 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
*/
static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
- InvokeType unused3) {
+ uint32_t method_idx, uintptr_t, uintptr_t,
+ InvokeType) {
+ UNUSED(target_method);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
* This is the fast path in which the target virtual method is
@@ -595,7 +596,7 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
break;
}
case 3:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -614,8 +615,7 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
*/
static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused,
- uintptr_t direct_method, InvokeType unused2) {
+ uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
switch (state) {
@@ -641,7 +641,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
break;
}
case 4:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -655,9 +655,9 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
QuickEntrypointEnum trampoline, int state,
const MethodReference& target_method, uint32_t method_idx) {
+ UNUSED(info, method_idx);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-
/*
* This handles the case in which the base method is not fully
* resolved at compile time, we bail to a runtime helper.
@@ -684,32 +684,28 @@ static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -717,8 +713,7 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -1400,28 +1395,34 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
}
bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, size);
return false;
}
bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, is_min, is_double);
return false;
}
bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ UNUSED(info, is_double);
return false;
}
@@ -1448,6 +1449,7 @@ bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
}
bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ UNUSED(info);
return false;
}
@@ -1690,7 +1692,6 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
- BeginInvoke(info);
InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
info->type = method_info.GetSharpType();
bool fast_path = method_info.FastPath();
@@ -1734,7 +1735,6 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
method_info.DirectCode(), method_info.DirectMethod(), original_type);
}
LIR* call_insn = GenCallInsn(method_info);
- EndInvoke(info);
MarkSafepointPC(call_insn);
ClobberCallerSave();
@@ -1755,6 +1755,7 @@ NextCallInsn Mir2Lir::GetNextSDCallInsn() {
}
LIR* Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
+ UNUSED(method_info);
DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64 &&
cu_->instruction_set != kThumb2 && cu_->instruction_set != kArm &&
cu_->instruction_set != kArm64);
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 01d1a1e0db..310e1e980b 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -146,12 +146,10 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
"div", "!0r,!1r", 4),
-#if __mips_isa_rev >= 2
ENCODING_MAP(kMipsExt, 0x7c000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
"ext", "!0r,!1r,!2d,!3D", 4),
-#endif
ENCODING_MAP(kMipsJal, 0x0c000000,
kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
@@ -240,7 +238,6 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
"sb", "!0r,!1d(!2r)", 4),
-#if __mips_isa_rev >= 2
ENCODING_MAP(kMipsSeb, 0x7c000420,
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
@@ -249,7 +246,6 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"seh", "!0r,!1r", 4),
-#endif
ENCODING_MAP(kMipsSh, 0xA4000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 8b5bc45683..01784e2fec 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -24,9 +24,9 @@
namespace art {
-bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
- const InlineMethod& special) {
+bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
// TODO
+ UNUSED(bb, mir, special);
return false;
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 508d474404..dc6930c45b 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -121,7 +121,7 @@ class MipsMir2Lir FINAL : public Mir2Lir {
void GenSelect(BasicBlock* bb, MIR* mir);
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -172,10 +172,10 @@ class MipsMir2Lir FINAL : public Mir2Lir {
bool InexpensiveConstantLong(int64_t value);
bool InexpensiveConstantDouble(int64_t value);
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return false; // Wide GPRs are formed by pairing.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return false; // Wide FPRs are formed by pairing.
}
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 3a4128a79c..431591512e 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -207,8 +207,8 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
StoreValue(rl_dest, rl_result);
}
-void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir,
- bool gt_bias, bool is_double) {
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
+ UNUSED(bb, mir, gt_bias, is_double);
UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
}
@@ -230,7 +230,8 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
}
bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
- // TODO: need Mips implementation
+ // TODO: need Mips implementation.
+ UNUSED(info, is_min, is_long);
return false;
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index baf7311398..d58ddb0d09 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -217,7 +217,8 @@ void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
@@ -228,10 +229,12 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
}
void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Need codegen for select";
}
void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
@@ -262,34 +265,39 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Mips";
- return rl_dest;
+ UNREACHABLE();
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips";
- return rl_dest;
+ UNREACHABLE();
}
bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
- DCHECK_NE(cu_->instruction_set, kThumb2);
+ UNUSED(info, is_long, is_object);
return false;
}
bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- // TODO - add Mips implementation
+ UNUSED(info);
+ // TODO: add Mips implementation.
return false;
}
bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- // TODO - add Mips implementation
+ UNUSED(info);
+ // TODO: add Mips implementation.
return false;
}
bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
- DCHECK_NE(cu_->instruction_set, kThumb2);
+ UNUSED(info);
return false;
}
@@ -325,23 +333,27 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
}
LIR* MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ UNUSED(reg, target);
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for Mips";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for Mips";
- return NULL;
+ UNREACHABLE();
}
void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -373,27 +385,31 @@ LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targ
bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
- return false;
+ UNREACHABLE();
}
bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
- return false;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
LOG(FATAL) << "Unexpected use of OpIT in Mips";
- return NULL;
+ UNREACHABLE();
}
void MipsMir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
}
-
void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
+ UNUSED(opcode);
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -416,6 +432,7 @@ void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
+ UNUSED(opcode);
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -629,6 +646,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift, int flags) {
+ UNUSED(flags);
// Default implementation is just to ignore the constant case.
GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
}
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 495eb1686c..3615916201 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -142,7 +142,7 @@ enum MipsResourceEncodingPos {
// This bit determines how the CPU access FP registers.
#define FR_BIT 0
-enum MipsNativeRegisterPool {
+enum MipsNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
@@ -408,9 +408,7 @@ enum MipsOpCode {
kMipsBnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
kMipsBne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
kMipsDiv, // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
-#if __mips_isa_rev >= 2
kMipsExt, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
-#endif
kMipsJal, // jal t [000011] t[25..0].
kMipsJalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
kMipsJr, // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
@@ -433,10 +431,8 @@ enum MipsOpCode {
kMipsOri, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
kMipsPref, // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
kMipsSb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
-#if __mips_isa_rev >= 2
kMipsSeb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
kMipsSeh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
-#endif
kMipsSh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
kMipsSll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
kMipsSllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
@@ -481,15 +477,17 @@ enum MipsOpCode {
kMipsUndefined, // undefined [011001xxxxxxxxxxxxxxxx].
kMipsLast
};
+std::ostream& operator<<(std::ostream& os, const MipsOpCode& rhs);
// Instruction assembly field_loc kind.
enum MipsEncodingKind {
kFmtUnused,
- kFmtBitBlt, /* Bit string using end/start */
- kFmtDfp, /* Double FP reg */
- kFmtSfp, /* Single FP reg */
- kFmtBlt5_2, /* Same 5-bit field to 2 locations */
+ kFmtBitBlt, // Bit string using end/start.
+ kFmtDfp, // Double FP reg.
+ kFmtSfp, // Single FP reg
+ kFmtBlt5_2, // Same 5-bit field to 2 locations.
};
+std::ostream& operator<<(std::ostream& os, const MipsEncodingKind& rhs);
// Struct used to define the snippet positions for each MIPS opcode.
struct MipsEncodingMap {
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index d3719ab312..4a340ecca0 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -421,6 +421,7 @@ void MipsMir2Lir::FreeCallTemps() {
}
bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
+ UNUSED(barrier_kind);
#if ANDROID_SMP != 0
NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
return true;
@@ -574,11 +575,10 @@ RegisterClass MipsMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volati
MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena) {
for (int i = 0; i < kMipsLast; i++) {
- if (MipsMir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 7178edec8b..044972cc5f 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -56,14 +56,17 @@ bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
}
bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
+ UNUSED(value);
return false; // TUNING
}
bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
return false; // TUNING
}
bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
+ UNUSED(value);
return false; // TUNING
}
@@ -320,25 +323,28 @@ LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
default:
LOG(FATAL) << "Bad case in OpRegReg";
- break;
+ UNREACHABLE();
}
return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
}
LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
@@ -681,16 +687,19 @@ LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage
}
LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
+ UNUSED(cc, target);
LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 408606d366..533a6778b5 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -647,7 +647,6 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::IF_GT:
case Instruction::IF_LE: {
LIR* taken = &label_list[bb->taken];
- LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const && rl_src[1].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
@@ -664,7 +663,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
!mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
GenSuspendTest(opt_flags);
}
- GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
+ GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
}
break;
}
@@ -676,7 +675,6 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
LIR* taken = &label_list[bb->taken];
- LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
@@ -692,7 +690,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
!mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
GenSuspendTest(opt_flags);
}
- GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
+ GenCompareZeroAndBranch(opcode, rl_src[0], taken);
}
break;
}
@@ -1377,8 +1375,9 @@ void Mir2Lir::CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const
}
size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
- UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
- return 0;
+ UNUSED(lir);
+ UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f4e6dfead2..ef1e7e3c14 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -33,6 +33,7 @@
#include "utils/array_ref.h"
#include "utils/arena_allocator.h"
#include "utils/arena_containers.h"
+#include "utils/arena_object.h"
#include "utils/stack_checks.h"
namespace art {
@@ -129,11 +130,11 @@ namespace art {
#define INVALID_SREG (-1)
#endif
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
struct CompilationUnit;
struct InlineMethod;
-struct MIR;
+class MIR;
struct LIR;
struct RegisterInfo;
class DexFileMethodInliner;
@@ -501,12 +502,11 @@ class Mir2Lir : public Backend {
// has completed.
//
- class LIRSlowPath {
+ class LIRSlowPath : public ArenaObject<kArenaAllocSlowPaths> {
public:
LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
LIR* cont = nullptr) :
m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
- m2l->StartSlowPath(this);
}
virtual ~LIRSlowPath() {}
virtual void Compile() = 0;
@@ -694,11 +694,6 @@ class Mir2Lir : public Backend {
void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
- virtual void StartSlowPath(LIRSlowPath* slowpath) {}
- virtual void BeginInvoke(CallInfo* info) {}
- virtual void EndInvoke(CallInfo* info) {}
-
-
// Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated.
virtual RegLocation NarrowRegLoc(RegLocation loc);
@@ -822,10 +817,9 @@ class Mir2Lir : public Backend {
LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
- void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
- RegLocation rl_src2, LIR* taken, LIR* fall_through);
- void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
- LIR* taken, LIR* fall_through);
+ void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2,
+ LIR* taken);
+ void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken);
virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src);
@@ -1350,7 +1344,7 @@ class Mir2Lir : public Backend {
*/
virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) = 0;
+ RegisterClass dest_reg_class) = 0;
/**
* @brief Used to generate a memory barrier in an architecture specific way.
@@ -1452,6 +1446,7 @@ class Mir2Lir : public Backend {
virtual bool InexpensiveConstantLong(int64_t value) = 0;
virtual bool InexpensiveConstantDouble(int64_t value) = 0;
virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+ UNUSED(opcode);
return InexpensiveConstantInt(value);
}
@@ -1642,12 +1637,12 @@ class Mir2Lir : public Backend {
/**
* Returns true iff wide GPRs are just different views on the same physical register.
*/
- virtual bool WideGPRsAreAliases() = 0;
+ virtual bool WideGPRsAreAliases() const = 0;
/**
* Returns true iff wide FPRs are just different views on the same physical register.
*/
- virtual bool WideFPRsAreAliases() = 0;
+ virtual bool WideFPRsAreAliases() const = 0;
enum class WidenessCheck { // private
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 8f7bd3033a..5d49a9133d 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -625,6 +625,7 @@ bool QuickCompiler::WriteElf(art::File* file,
}
Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+ UNUSED(compilation_unit);
Mir2Lir* mir_to_lir = nullptr;
switch (cu->instruction_set) {
case kThumb2:
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 6305b22ded..0a98c800a8 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -316,16 +316,16 @@ RegStorage Mir2Lir::AllocPreservedFpReg(int s_reg) {
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
- RegStorage res;
+ UNUSED(s_reg);
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
- return res;
+ UNREACHABLE();
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
- RegStorage res;
+ UNUSED(s_reg);
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
- return res;
+ UNREACHABLE();
}
@@ -1392,6 +1392,7 @@ int Mir2Lir::GetSRegHi(int lowSreg) {
}
bool Mir2Lir::LiveOut(int s_reg) {
+ UNUSED(s_reg);
// For now.
return true;
}
diff --git a/compiler/dex/quick/resource_mask.h b/compiler/dex/quick/resource_mask.h
index 436cdb5440..78e81b2ed1 100644
--- a/compiler/dex/quick/resource_mask.h
+++ b/compiler/dex/quick/resource_mask.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include "base/logging.h"
+#include "base/value_object.h"
#include "dex/reg_storage.h"
namespace art {
@@ -113,10 +114,7 @@ class ResourceMask {
return (masks_[0] & other.masks_[0]) != 0u || (masks_[1] & other.masks_[1]) != 0u;
}
- void SetBit(size_t bit) {
- DCHECK_LE(bit, kHighestCommonResource);
- masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
- }
+ void SetBit(size_t bit);
constexpr bool HasBit(size_t bit) const {
return (masks_[bit / 64u] & (UINT64_C(1) << (bit & 63u))) != 0u;
@@ -139,6 +137,12 @@ class ResourceMask {
friend class ResourceMaskCache;
};
+std::ostream& operator<<(std::ostream& os, const ResourceMask::ResourceBit& rhs);
+
+inline void ResourceMask::SetBit(size_t bit) {
+ DCHECK_LE(bit, kHighestCommonResource);
+ masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
+}
constexpr ResourceMask kEncodeNone = ResourceMask::NoBits();
constexpr ResourceMask kEncodeAll = ResourceMask::AllBits();
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index dce2b73fff..85a3b3210d 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -547,6 +547,11 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
{ kX86RepneScasw, kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
};
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs) {
+ os << X86Mir2Lir::EncodingMap[rhs].name;
+ return os;
+}
+
static bool NeedsRex(int32_t raw_reg) {
return RegStorage::RegNum(raw_reg) > 7;
}
@@ -1631,6 +1636,7 @@ void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) {
* sequence or request that the trace be shortened and retried.
*/
AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+ UNUSED(start_addr);
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 86efc1e52c..497ef94c27 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -290,9 +290,10 @@ void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
*/
static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info, direct_code);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_method != 0) {
switch (state) {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 7b5b831e38..dec99aefab 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -250,7 +250,7 @@ class X86Mir2Lir : public Mir2Lir {
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
void GenMoveException(RegLocation rl_dest) OVERRIDE;
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -499,7 +499,7 @@ class X86Mir2Lir : public Mir2Lir {
void GenConstWide(RegLocation rl_dest, int64_t value);
void GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2);
void GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2);
- void GenShiftByteVector(BasicBlock *bb, MIR *mir);
+ void GenShiftByteVector(MIR* mir);
void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
uint32_t m4);
void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
@@ -557,88 +557,80 @@ class X86Mir2Lir : public Mir2Lir {
/*
* @brief Load 128 bit constant into vector register.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector
* @note vA is the TypeSize for the register.
* @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
*/
- void GenConst128(BasicBlock* bb, MIR* mir);
+ void GenConst128(MIR* mir);
/*
* @brief MIR to move a vectorized register to another.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination
* @note vC: source
*/
- void GenMoveVector(BasicBlock *bb, MIR *mir);
+ void GenMoveVector(MIR* mir);
/*
* @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
* the type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenMultiplyVector(BasicBlock *bb, MIR *mir);
+ void GenMultiplyVector(MIR* mir);
/*
* @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenAddVector(BasicBlock *bb, MIR *mir);
+ void GenAddVector(MIR* mir);
/*
* @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenSubtractVector(BasicBlock *bb, MIR *mir);
+ void GenSubtractVector(MIR* mir);
/*
* @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
+ void GenShiftLeftVector(MIR* mir);
/*
* @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
* know the type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
+ void GenSignedShiftRightVector(MIR* mir);
/*
* @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
* to know the type of the vector.
- * @param bb The basic block in which the MIR is from..
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
+ void GenUnsignedShiftRightVector(MIR* mir);
/*
* @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
@@ -647,51 +639,47 @@ class X86Mir2Lir : public Mir2Lir {
* @note vB: destination and source
* @note vC: source
*/
- void GenAndVector(BasicBlock *bb, MIR *mir);
+ void GenAndVector(MIR* mir);
/*
* @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenOrVector(BasicBlock *bb, MIR *mir);
+ void GenOrVector(MIR* mir);
/*
* @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenXorVector(BasicBlock *bb, MIR *mir);
+ void GenXorVector(MIR* mir);
/*
* @brief Reduce a 128-bit packed element into a single VR by taking lower bits
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
* @note vA: TypeSize
* @note vB: destination and source VR (not vector register)
* @note vC: source (vector register)
*/
- void GenAddReduceVector(BasicBlock *bb, MIR *mir);
+ void GenAddReduceVector(MIR* mir);
/*
* @brief Extract a packed element into a single VR.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination VR (not vector register)
* @note vC: source (vector register)
* @note arg[0]: The index to use for extraction from vector register (which packed element).
*/
- void GenReduceVector(BasicBlock *bb, MIR *mir);
+ void GenReduceVector(MIR* mir);
/*
* @brief Create a vector value, with all TypeSize values equal to vC
@@ -701,21 +689,21 @@ class X86Mir2Lir : public Mir2Lir {
* @note vB: destination vector register.
* @note vC: source VR (not vector register).
*/
- void GenSetVector(BasicBlock *bb, MIR *mir);
+ void GenSetVector(MIR* mir);
/**
* @brief Used to generate code for kMirOpPackedArrayGet.
* @param bb The basic block of MIR.
* @param mir The mir whose opcode is kMirOpPackedArrayGet.
*/
- void GenPackedArrayGet(BasicBlock *bb, MIR *mir);
+ void GenPackedArrayGet(BasicBlock* bb, MIR* mir);
/**
* @brief Used to generate code for kMirOpPackedArrayPut.
* @param bb The basic block of MIR.
* @param mir The mir whose opcode is kMirOpPackedArrayPut.
*/
- void GenPackedArrayPut(BasicBlock *bb, MIR *mir);
+ void GenPackedArrayPut(BasicBlock* bb, MIR* mir);
/*
* @brief Generate code for a vector opcode.
@@ -890,8 +878,8 @@ class X86Mir2Lir : public Mir2Lir {
* the value is live in a temp register of the correct class. Additionally, if the value is in
* a temp register of the wrong register class, it will be clobbered.
*/
- RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
- RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
+ RegLocation UpdateLocTyped(RegLocation loc);
+ RegLocation UpdateLocWideTyped(RegLocation loc);
/*
* @brief Analyze MIR before generating code, to prepare for the code generation.
@@ -902,7 +890,7 @@ class X86Mir2Lir : public Mir2Lir {
* @brief Analyze one basic block.
* @param bb Basic block to analyze.
*/
- void AnalyzeBB(BasicBlock * bb);
+ void AnalyzeBB(BasicBlock* bb);
/*
* @brief Analyze one extended MIR instruction
@@ -910,7 +898,7 @@ class X86Mir2Lir : public Mir2Lir {
* @param bb Basic block containing instruction.
* @param mir Extended instruction to analyze.
*/
- void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
+ void AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one MIR instruction
@@ -918,7 +906,7 @@ class X86Mir2Lir : public Mir2Lir {
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
+ virtual void AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one MIR float/double instruction
@@ -926,7 +914,7 @@ class X86Mir2Lir : public Mir2Lir {
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- virtual void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
+ virtual void AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one use of a double operand.
@@ -940,7 +928,7 @@ class X86Mir2Lir : public Mir2Lir {
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
+ void AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir);
// Information derived from analysis of MIR
@@ -987,12 +975,11 @@ class X86Mir2Lir : public Mir2Lir {
*/
LIR* AddVectorLiteral(int32_t* constants);
- InToRegStorageMapping in_to_reg_storage_mapping_;
-
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return cu_->target64; // On 64b, we have 64b GPRs.
}
- bool WideFPRsAreAliases() OVERRIDE {
+
+ bool WideFPRsAreAliases() const OVERRIDE {
return true; // xmm registers have 64b views even on x86.
}
@@ -1002,11 +989,17 @@ class X86Mir2Lir : public Mir2Lir {
*/
static void DumpRegLocation(RegLocation loc);
- static const X86EncodingMap EncodingMap[kX86Last];
+ InToRegStorageMapping in_to_reg_storage_mapping_;
private:
void SwapBits(RegStorage result_reg, int shift, int32_t value);
void SwapBits64(RegStorage result_reg, int shift, int64_t value);
+
+ static const X86EncodingMap EncodingMap[kX86Last];
+
+ friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
+
+ DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
};
} // namespace art
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 21d1a5cec2..254d90fa8c 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -169,8 +169,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
* If the result's location is in memory, then we do not need to do anything
* more since the fstp has already placed the correct value in memory.
*/
- RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
- UpdateLocTyped(rl_dest, kFPReg);
+ RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
/*
* We already know that the result is in a physical register but do not know if it is the
@@ -431,8 +430,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
* If the result's location is in memory, then we do not need to do anything
* more since the fstp has already placed the correct value in memory.
*/
- RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
- UpdateLocTyped(rl_dest, kFPReg);
+ RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index aa1bf7fe6d..7229318331 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -208,7 +208,7 @@ void X86Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
DCHECK(!left_op.IsPair() && !right_op.IsPair() && !rs_dest.IsPair());
DCHECK(!left_op.IsFloat() && !right_op.IsFloat() && !rs_dest.IsFloat());
@@ -268,6 +268,7 @@ void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi
}
void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -594,8 +595,9 @@ void X86Mir2Lir::CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& sh
}
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
+ UNUSED(rl_dest, reg_lo, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
@@ -763,12 +765,14 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
bool is_div) {
+ UNUSED(rl_dest, reg_lo, reg_hi, is_div);
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest);
// We have to use fixed registers, so flush all the temps.
// Prepare for explicit register usage.
@@ -1022,7 +1026,7 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// In 32-bit mode the only EAX..EDX registers can be used with Mov8MR.
if (!cu_->target64 && size == kSignedByte) {
- rl_src_value = UpdateLocTyped(rl_src_value, kCoreReg);
+ rl_src_value = UpdateLocTyped(rl_src_value);
if (rl_src_value.location == kLocPhysReg && !IsByteRegister(rl_src_value.reg)) {
RegStorage temp = AllocateByteRegister();
OpRegCopy(temp, rl_src_value.reg);
@@ -1309,18 +1313,21 @@ LIR* X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
}
LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for x86";
- return NULL;
+ UNREACHABLE();
}
LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for x86";
- return NULL;
+ UNREACHABLE();
}
void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -1453,22 +1460,27 @@ LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targe
bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
- return false;
+ UNREACHABLE();
}
bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of easyMultiply in x86";
- return false;
+ UNREACHABLE();
}
LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
LOG(FATAL) << "Unexpected use of OpIT in x86";
- return NULL;
+ UNREACHABLE();
}
void X86Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT in x86";
+ UNREACHABLE();
}
void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
@@ -1486,6 +1498,7 @@ void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
}
void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
+ UNUSED(sreg);
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1616,7 +1629,7 @@ bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64
int32_t val_hi = High32Bits(val);
// Prepare for explicit register usage.
ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
bool src1_in_reg = rl_src1.location == kLocPhysReg;
int displacement = SRegOffset(rl_src1.s_reg_low);
@@ -1700,8 +1713,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// Prepare for explicit register usage.
ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
// At this point, the VRs are in their home locations.
bool src1_in_reg = rl_src1.location == kLocPhysReg;
@@ -1837,12 +1850,12 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
}
void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) {
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
if (rl_dest.location == kLocPhysReg) {
// Ensure we are in a register pair
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
GenLongRegOrMemOp(rl_result, rl_src, op);
StoreFinalValueWide(rl_dest, rl_result);
return;
@@ -1850,7 +1863,7 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi
// Handle the case when src and dest are intersect.
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
GenLongRegOrMemOp(rl_result, rl_src, op);
StoreFinalValueWide(rl_dest, rl_result);
return;
@@ -1910,7 +1923,7 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
rl_result = ForceTempWide(rl_result);
// Perform the operation using the RHS.
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
GenLongRegOrMemOp(rl_result, rl_src2, op);
// And now record that the result is in the temp.
@@ -1919,10 +1932,9 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
}
// It wasn't in registers, so it better be in memory.
- DCHECK((rl_dest.location == kLocDalvikFrame) ||
- (rl_dest.location == kLocCompilerTemp));
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ DCHECK((rl_dest.location == kLocDalvikFrame) || (rl_dest.location == kLocCompilerTemp));
+ rl_src1 = UpdateLocWideTyped(rl_src1);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
// Get one of the source operands into temporary register.
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
@@ -2088,7 +2100,7 @@ void X86Mir2Lir::GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src,
NewLIR1(kX86Imul64DaR, numerator_reg.GetReg());
} else {
// Only need this once. Multiply directly from the value.
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
if (rl_src.location != kLocPhysReg) {
// Okay, we can do this from memory.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2395,6 +2407,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src, int shift_amount, int flags) {
+ UNUSED(flags);
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
@@ -2692,7 +2705,7 @@ X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation loc, bool is_h
return in_mem ? kX86Xor32MI : kX86Xor32RI;
default:
LOG(FATAL) << "Unexpected opcode: " << op;
- return kX86Add32MI;
+ UNREACHABLE();
}
}
@@ -2706,7 +2719,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
return false;
}
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
@@ -2736,7 +2749,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
int32_t val_lo = Low32Bits(val);
int32_t val_hi = High32Bits(val);
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
// Can we just do this into memory?
if ((rl_dest.location == kLocDalvikFrame) ||
@@ -2812,8 +2825,8 @@ bool X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1,
int32_t val_lo = Low32Bits(val);
int32_t val_hi = High32Bits(val);
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
// Can we do this directly into the destination registers?
if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
@@ -3035,7 +3048,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
if (unary) {
rl_lhs = LoadValue(rl_lhs, kCoreReg);
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
OpRegReg(op, rl_result.reg, rl_lhs.reg);
} else {
@@ -3045,7 +3058,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
LoadValueDirectFixed(rl_rhs, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory
OpMemReg(op, rl_result, t_reg.GetReg());
@@ -3068,12 +3081,12 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
// Multiply is 3 operand only (sort of).
if (is_two_addr && op != kOpMul) {
// Can we do this directly into memory?
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
// Ensure res is in a core reg
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Can we do this from memory directly?
- rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+ rl_rhs = UpdateLocTyped(rl_rhs);
if (rl_rhs.location != kLocPhysReg) {
OpRegMem(op, rl_result.reg, rl_rhs);
StoreFinalValue(rl_dest, rl_result);
@@ -3088,7 +3101,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
// It might happen rl_rhs and rl_dest are the same VR
// in this case rl_dest is in reg after LoadValue while
// rl_result is not updated yet, so do this
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory.
OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
@@ -3105,8 +3118,8 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
}
} else {
// Try to use reg/memory instructions.
- rl_lhs = UpdateLocTyped(rl_lhs, kCoreReg);
- rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+ rl_lhs = UpdateLocTyped(rl_lhs);
+ rl_rhs = UpdateLocTyped(rl_rhs);
// We can't optimize with FP registers.
if (!IsOperationSafeWithoutTemps(rl_lhs, rl_rhs)) {
// Something is difficult, so fall back to the standard case.
@@ -3178,7 +3191,7 @@ void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
Mir2Lir::GenIntToLong(rl_dest, rl_src);
return;
}
- rl_src = UpdateLocTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocTyped(rl_src);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (rl_src.location == kLocPhysReg) {
NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
@@ -3278,7 +3291,7 @@ void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
LoadValueDirectFixed(rl_shift, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
- rl_result = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocWideTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 79d5eebe17..9616d8fa71 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -143,25 +143,6 @@ static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
RegStorage rs_rX86_SP;
-X86NativeRegisterPool rX86_ARG0;
-X86NativeRegisterPool rX86_ARG1;
-X86NativeRegisterPool rX86_ARG2;
-X86NativeRegisterPool rX86_ARG3;
-X86NativeRegisterPool rX86_ARG4;
-X86NativeRegisterPool rX86_ARG5;
-X86NativeRegisterPool rX86_FARG0;
-X86NativeRegisterPool rX86_FARG1;
-X86NativeRegisterPool rX86_FARG2;
-X86NativeRegisterPool rX86_FARG3;
-X86NativeRegisterPool rX86_FARG4;
-X86NativeRegisterPool rX86_FARG5;
-X86NativeRegisterPool rX86_FARG6;
-X86NativeRegisterPool rX86_FARG7;
-X86NativeRegisterPool rX86_RET0;
-X86NativeRegisterPool rX86_RET1;
-X86NativeRegisterPool rX86_INVOKE_TGT;
-X86NativeRegisterPool rX86_COUNT;
-
RegStorage rs_rX86_ARG0;
RegStorage rs_rX86_ARG1;
RegStorage rs_rX86_ARG2;
@@ -237,8 +218,9 @@ RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
}
RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+ UNUSED(reg);
LOG(FATAL) << "Do not use this function!!!";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
/*
@@ -795,14 +777,11 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
class_type_address_insns_.reserve(100);
call_method_insns_.reserve(100);
store_method_addr_used_ = false;
- if (kIsDebugBuild) {
for (int i = 0; i < kX86Last; i++) {
- if (X86Mir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
- }
- }
+ DCHECK_EQ(X86Mir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
}
if (cu_->target64) {
rs_rX86_SP = rs_rX86_SP_64;
@@ -821,20 +800,6 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
rs_rX86_FARG5 = rs_fr5;
rs_rX86_FARG6 = rs_fr6;
rs_rX86_FARG7 = rs_fr7;
- rX86_ARG0 = rDI;
- rX86_ARG1 = rSI;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rCX;
- rX86_ARG4 = r8;
- rX86_ARG5 = r9;
- rX86_FARG0 = fr0;
- rX86_FARG1 = fr1;
- rX86_FARG2 = fr2;
- rX86_FARG3 = fr3;
- rX86_FARG4 = fr4;
- rX86_FARG5 = fr5;
- rX86_FARG6 = fr6;
- rX86_FARG7 = fr7;
rs_rX86_INVOKE_TGT = rs_rDI;
} else {
rs_rX86_SP = rs_rX86_SP_32;
@@ -853,14 +818,6 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
rs_rX86_FARG5 = RegStorage::InvalidReg();
rs_rX86_FARG6 = RegStorage::InvalidReg();
rs_rX86_FARG7 = RegStorage::InvalidReg();
- rX86_ARG0 = rAX;
- rX86_ARG1 = rCX;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rBX;
- rX86_FARG0 = rAX;
- rX86_FARG1 = rCX;
- rX86_FARG2 = rDX;
- rX86_FARG3 = rBX;
rs_rX86_INVOKE_TGT = rs_rAX;
// TODO(64): Initialize with invalid reg
// rX86_ARG4 = RegStorage::InvalidReg();
@@ -869,10 +826,6 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
rs_rX86_RET0 = rs_rAX;
rs_rX86_RET1 = rs_rDX;
rs_rX86_COUNT = rs_rCX;
- rX86_RET0 = rAX;
- rX86_RET1 = rDX;
- rX86_INVOKE_TGT = rAX;
- rX86_COUNT = rCX;
}
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -882,8 +835,9 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
// Not used in x86(-64)
RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline);
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
@@ -1548,46 +1502,46 @@ void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
ReturnVectorRegisters(mir);
break;
case kMirOpConstVector:
- GenConst128(bb, mir);
+ GenConst128(mir);
break;
case kMirOpMoveVector:
- GenMoveVector(bb, mir);
+ GenMoveVector(mir);
break;
case kMirOpPackedMultiply:
- GenMultiplyVector(bb, mir);
+ GenMultiplyVector(mir);
break;
case kMirOpPackedAddition:
- GenAddVector(bb, mir);
+ GenAddVector(mir);
break;
case kMirOpPackedSubtract:
- GenSubtractVector(bb, mir);
+ GenSubtractVector(mir);
break;
case kMirOpPackedShiftLeft:
- GenShiftLeftVector(bb, mir);
+ GenShiftLeftVector(mir);
break;
case kMirOpPackedSignedShiftRight:
- GenSignedShiftRightVector(bb, mir);
+ GenSignedShiftRightVector(mir);
break;
case kMirOpPackedUnsignedShiftRight:
- GenUnsignedShiftRightVector(bb, mir);
+ GenUnsignedShiftRightVector(mir);
break;
case kMirOpPackedAnd:
- GenAndVector(bb, mir);
+ GenAndVector(mir);
break;
case kMirOpPackedOr:
- GenOrVector(bb, mir);
+ GenOrVector(mir);
break;
case kMirOpPackedXor:
- GenXorVector(bb, mir);
+ GenXorVector(mir);
break;
case kMirOpPackedAddReduce:
- GenAddReduceVector(bb, mir);
+ GenAddReduceVector(mir);
break;
case kMirOpPackedReduce:
- GenReduceVector(bb, mir);
+ GenReduceVector(mir);
break;
case kMirOpPackedSet:
- GenSetVector(bb, mir);
+ GenSetVector(mir);
break;
case kMirOpMemBarrier:
GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
@@ -1638,7 +1592,7 @@ void X86Mir2Lir::ReturnVectorRegisters(MIR* mir) {
}
}
-void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
+void X86Mir2Lir::GenConst128(MIR* mir) {
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
Clobber(rs_dest);
@@ -1689,7 +1643,7 @@ void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) {
load->target = data_target;
}
-void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMoveVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1805,7 +1759,7 @@ void X86Mir2Lir::GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_sr
NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_1.GetReg());
}
-void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMultiplyVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1839,7 +1793,7 @@ void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1874,7 +1828,7 @@ void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSubtractVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1909,7 +1863,7 @@ void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftByteVector(MIR* mir) {
// Destination does not need clobbered because it has already been as part
// of the general packed shift handler (caller of this method).
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1953,7 +1907,7 @@ void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
AndMaskVectorRegister(rs_dest_src1, int_mask, int_mask, int_mask, int_mask);
}
-void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftLeftVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1973,7 +1927,7 @@ void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector shift left " << opsize;
@@ -1982,7 +1936,7 @@ void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1999,18 +1953,18 @@ void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
case k64:
// TODO Implement emulated shift algorithm.
default:
LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
- break;
+ UNREACHABLE();
}
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenUnsignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2030,7 +1984,7 @@ void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
@@ -2039,7 +1993,7 @@ void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAndVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2048,7 +2002,7 @@ void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenOrVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2057,7 +2011,7 @@ void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenXorVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2084,7 +2038,7 @@ void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32
AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
}
-void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
bool is_wide = opsize == k64 || opsize == kDouble;
@@ -2219,7 +2173,7 @@ void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
// except the rhs is not a VR but a physical register allocated above.
// No load of source VR is done because it assumes that rl_result will
// share physical register / memory location.
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
// Ensure res is in a core reg.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -2232,7 +2186,7 @@ void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
}
}
-void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegLocation rl_dest = mir_graph_->GetDest(mir);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
@@ -2286,7 +2240,7 @@ void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
} else {
int extract_index = mir->dalvikInsn.arg[0];
int extr_opcode = 0;
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
// Handle the rest of integral types now.
switch (opsize) {
@@ -2302,7 +2256,7 @@ void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
break;
default:
LOG(FATAL) << "Unsupported vector reduce " << opsize;
- return;
+ UNREACHABLE();
}
if (rl_result.location == kLocPhysReg) {
@@ -2331,7 +2285,7 @@ void X86Mir2Lir::LoadVectorRegister(RegStorage rs_dest, RegStorage rs_src,
}
}
-void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSetVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2406,11 +2360,13 @@ void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
}
}
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
}
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 8d5dabc5fd..cb9a24a336 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -54,14 +54,16 @@ LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
}
bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
+ UNUSED(value);
return true;
}
bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
- return false;
+ return value == 0;
}
bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
return true;
}
@@ -934,13 +936,14 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target, LIR** compare) {
- LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
- offset, check_value);
- if (compare != nullptr) {
- *compare = inst;
- }
- LIR* branch = OpCondBranch(cond, target);
- return branch;
+ UNUSED(temp_reg); // Comparison performed directly with memory.
+ LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
+ offset, check_value);
+ if (compare != nullptr) {
+ *compare = inst;
+ }
+ LIR* branch = OpCondBranch(cond, target);
+ return branch;
}
void X86Mir2Lir::AnalyzeMIR() {
@@ -965,13 +968,13 @@ void X86Mir2Lir::AnalyzeMIR() {
}
}
-void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
+void X86Mir2Lir::AnalyzeBB(BasicBlock* bb) {
if (bb->block_type == kDead) {
// Ignore dead blocks
return;
}
- for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
AnalyzeExtendedMIR(opcode, bb, mir);
@@ -982,7 +985,7 @@ void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
}
-void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir) {
switch (opcode) {
// Instructions referencing doubles.
case kMirOpFusedCmplDouble:
@@ -1009,7 +1012,7 @@ void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
}
}
-void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir) {
// Looking for
// - Do we need a pointer to the code (used for packed switches and double lits)?
@@ -1046,7 +1049,8 @@ void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
}
}
-void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
// Look at all the uses, and see if they are double constants.
uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
int next_sreg = 0;
@@ -1080,7 +1084,7 @@ void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
}
}
-RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc) {
loc = UpdateLoc(loc);
if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1094,7 +1098,7 @@ RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
return loc;
}
-RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
loc = UpdateLocWide(loc);
if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1108,7 +1112,8 @@ RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
return loc;
}
-void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir) {
+ UNUSED(opcode, bb);
// For now this is only actual for x86-32.
if (cu_->target64) {
return;
@@ -1132,6 +1137,7 @@ void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
}
LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(r_tgt); // Call to absolute memory location doesn't need a temporary target register.
if (cu_->target64) {
return OpThreadMem(op, GetThreadOffset<8>(trampoline));
} else {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 22a2f30d4d..afdc244dac 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -313,25 +313,6 @@ constexpr RegStorage rs_xr13(RegStorage::kValid | xr13);
constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
-extern X86NativeRegisterPool rX86_ARG0;
-extern X86NativeRegisterPool rX86_ARG1;
-extern X86NativeRegisterPool rX86_ARG2;
-extern X86NativeRegisterPool rX86_ARG3;
-extern X86NativeRegisterPool rX86_ARG4;
-extern X86NativeRegisterPool rX86_ARG5;
-extern X86NativeRegisterPool rX86_FARG0;
-extern X86NativeRegisterPool rX86_FARG1;
-extern X86NativeRegisterPool rX86_FARG2;
-extern X86NativeRegisterPool rX86_FARG3;
-extern X86NativeRegisterPool rX86_FARG4;
-extern X86NativeRegisterPool rX86_FARG5;
-extern X86NativeRegisterPool rX86_FARG6;
-extern X86NativeRegisterPool rX86_FARG7;
-extern X86NativeRegisterPool rX86_RET0;
-extern X86NativeRegisterPool rX86_RET1;
-extern X86NativeRegisterPool rX86_INVOKE_TGT;
-extern X86NativeRegisterPool rX86_COUNT;
-
extern RegStorage rs_rX86_ARG0;
extern RegStorage rs_rX86_ARG1;
extern RegStorage rs_rX86_ARG2;
@@ -674,6 +655,7 @@ enum X86OpCode {
kX86RepneScasw, // repne scasw
kX86Last
};
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
/* Instruction assembly field_loc kind */
enum X86EncodingKind {
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 706933a1b4..4a84ff2516 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DEX_REG_STORAGE_H_
#include "base/logging.h"
+#include "base/value_object.h"
#include "compiler_enums.h" // For WideKind
namespace art {
@@ -72,7 +73,7 @@ namespace art {
* records.
*/
-class RegStorage {
+class RegStorage : public ValueObject {
public:
enum RegStorageKind {
kValidMask = 0x8000,
@@ -112,7 +113,7 @@ class RegStorage {
}
constexpr RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg)
: reg_(
- DCHECK_CONSTEXPR(rs_kind == k64BitPair, << rs_kind, 0u)
+ DCHECK_CONSTEXPR(rs_kind == k64BitPair, << static_cast<int>(rs_kind), 0u)
DCHECK_CONSTEXPR((low_reg & kFloatingPoint) == (high_reg & kFloatingPoint),
<< low_reg << ", " << high_reg, 0u)
DCHECK_CONSTEXPR((high_reg & kRegNumMask) <= kHighRegNumMask,
@@ -331,9 +332,8 @@ class RegStorage {
case k256BitSolo: return 32;
case k512BitSolo: return 64;
case k1024BitSolo: return 128;
- default: LOG(FATAL) << "Unexpected shape";
+ default: LOG(FATAL) << "Unexpected shape"; UNREACHABLE();
}
- return 0;
}
private:
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a8e6b3c8df..4929b5b2b0 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -106,6 +106,8 @@ bool VerificationResults::IsCandidateForCompilation(MethodReference& method_ref,
if (use_sea) {
return true;
}
+#else
+ UNUSED(method_ref);
#endif
if (!compiler_options_->IsCompilationEnabled()) {
return false;
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 65a842d605..682b17a7d1 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -75,6 +75,7 @@ enum DexToDexCompilationLevel {
kRequired, // Dex-to-dex compilation required for correctness.
kOptimize // Perform required transformation and peep-hole optimizations.
};
+std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs);
class CompilerDriver {
public:
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index fb7aeb9b06..0592f0cf1e 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -21,6 +21,7 @@
#include <vector>
#include "base/macros.h"
+#include "globals.h"
namespace art {
@@ -242,6 +243,7 @@ class CompilerOptions FINAL {
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
};
+std::ostream& operator<<(std::ostream& os, const CompilerOptions::CompilerFilter& rhs);
} // namespace art
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index c32bdb45dc..7f3056500c 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -494,7 +494,7 @@ class ElfFileOatTextPiece FINAL : public ElfFilePiece<Elf_Word> {
output_(output) {}
protected:
- bool DoActualWrite(File* elf_file) OVERRIDE {
+ bool DoActualWrite(File* elf_file ATTRIBUTE_UNUSED) OVERRIDE {
// All data is written by the ElfFileRodataPiece right now, as the oat writer writes in one
// piece. This is for future flexibility.
UNUSED(output_);
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 575886b8e0..25cf086696 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -222,9 +222,9 @@ template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
typename Elf_Phdr, typename Elf_Shdr>
bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Write(OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files_unused,
- const std::string& android_root_unused,
- bool is_host_unused) {
+ const std::vector<const DexFile*>& dex_files_unused ATTRIBUTE_UNUSED,
+ const std::string& android_root_unused ATTRIBUTE_UNUSED,
+ bool is_host_unused ATTRIBUTE_UNUSED) {
constexpr bool debug = false;
const OatHeader& oat_header = oat_writer->GetOatHeader();
Elf_Word oat_data_size = oat_header.GetExecutableOffset();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 861a182a71..2fd5a52745 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -265,7 +265,7 @@ bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
return true;
}
-void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
+void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
if (!obj->GetClass()->IsStringClass()) {
return;
}
@@ -661,7 +661,8 @@ class FixupClassVisitor FINAL : public FixupVisitor {
}
}
- void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
+ mirror::Reference* ref ATTRIBUTE_UNUSED) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 0fea2a7de7..113204635c 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -530,7 +530,7 @@ JNI_TEST(CompileAndRunStaticDoubleDoubleMethod)
// point return value would be in xmm0. We use log, to somehow ensure
// the compiler will use the floating point stack.
-jdouble Java_MyClassNatives_logD(JNIEnv* env, jclass klass, jdouble x) {
+jdouble Java_MyClassNatives_logD(JNIEnv*, jclass, jdouble x) {
return log(x);
}
@@ -544,7 +544,7 @@ void JniCompilerTest::RunStaticLogDoubleMethodImpl() {
JNI_TEST(RunStaticLogDoubleMethod)
-jfloat Java_MyClassNatives_logF(JNIEnv* env, jclass klass, jfloat x) {
+jfloat Java_MyClassNatives_logF(JNIEnv*, jclass, jfloat x) {
return logf(x);
}
@@ -558,15 +558,15 @@ void JniCompilerTest::RunStaticLogFloatMethodImpl() {
JNI_TEST(RunStaticLogFloatMethod)
-jboolean Java_MyClassNatives_returnTrue(JNIEnv* env, jclass klass) {
+jboolean Java_MyClassNatives_returnTrue(JNIEnv*, jclass) {
return JNI_TRUE;
}
-jboolean Java_MyClassNatives_returnFalse(JNIEnv* env, jclass klass) {
+jboolean Java_MyClassNatives_returnFalse(JNIEnv*, jclass) {
return JNI_FALSE;
}
-jint Java_MyClassNatives_returnInt(JNIEnv* env, jclass klass) {
+jint Java_MyClassNatives_returnInt(JNIEnv*, jclass) {
return 42;
}
@@ -1056,7 +1056,10 @@ void JniCompilerTest::CompileAndRunFloatFloatMethodImpl() {
JNI_TEST(CompileAndRunFloatFloatMethod)
-void Java_MyClassNatives_checkParameterAlign(JNIEnv* env, jobject thisObj, jint i1, jlong l1) {
+void Java_MyClassNatives_checkParameterAlign(JNIEnv* env ATTRIBUTE_UNUSED,
+ jobject thisObj ATTRIBUTE_UNUSED,
+ jint i1 ATTRIBUTE_UNUSED,
+ jlong l1 ATTRIBUTE_UNUSED) {
// EXPECT_EQ(kNative, Thread::Current()->GetState());
// EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
// EXPECT_TRUE(thisObj != nullptr);
@@ -1520,7 +1523,7 @@ void JniCompilerTest::WithoutImplementationImpl() {
JNI_TEST(WithoutImplementation)
-void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv* env, jclass klass, jint i1, jint i2, jint i3,
+void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, jint i3,
jint i4, jint i5, jint i6, jint i7, jint i8, jint i9,
jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4,
jfloat f5, jfloat f6, jfloat f7, jfloat f8, jfloat f9,
@@ -1591,7 +1594,7 @@ void JniCompilerTest::StackArgsIntsFirstImpl() {
JNI_TEST(StackArgsIntsFirst)
-void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv* env, jclass klass, jfloat f1, jfloat f2,
+void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv*, jclass, jfloat f1, jfloat f2,
jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7,
jfloat f8, jfloat f9, jfloat f10, jint i1, jint i2,
jint i3, jint i4, jint i5, jint i6, jint i7, jint i8,
@@ -1662,7 +1665,7 @@ void JniCompilerTest::StackArgsFloatsFirstImpl() {
JNI_TEST(StackArgsFloatsFirst)
-void Java_MyClassNatives_stackArgsMixed(JNIEnv* env, jclass klass, jint i1, jfloat f1, jint i2,
+void Java_MyClassNatives_stackArgsMixed(JNIEnv*, jclass, jint i1, jfloat f1, jint i2,
jfloat f2, jint i3, jfloat f3, jint i4, jfloat f4, jint i5,
jfloat f5, jint i6, jfloat f6, jint i7, jfloat f7, jint i8,
jfloat f8, jint i9, jfloat f9, jint i10, jfloat f10) {
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 525f05c522..a100552695 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -38,6 +38,7 @@ ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
}
static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+ UNUSED(jni);
if (shorty[0] == 'F' || shorty[0] == 'D') {
return X86_64ManagedRegister::FromXmmRegister(XMM0);
} else if (shorty[0] == 'J') {
diff --git a/compiler/llvm/llvm_compiler.cc b/compiler/llvm/llvm_compiler.cc
index 55af6145be..fa93e00e19 100644
--- a/compiler/llvm/llvm_compiler.cc
+++ b/compiler/llvm/llvm_compiler.cc
@@ -16,6 +16,7 @@
#include "llvm_compiler.h"
+#include "base/macros.h"
#ifdef ART_USE_PORTABLE_COMPILER
#include "compiler.h"
#include "compiler_llvm.h"
@@ -152,9 +153,10 @@ class LLVMCompiler FINAL : public Compiler {
Compiler* CreateLLVMCompiler(CompilerDriver* driver) {
#ifdef ART_USE_PORTABLE_COMPILER
- return new llvm::LLVMCompiler(driver);
+ return new llvm::LLVMCompiler(driver);
#else
- return nullptr;
+ UNUSED(driver);
+ return nullptr;
#endif
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index be52f40a0b..6138411b11 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -70,16 +70,18 @@ class OatWriter::NoRelativeCallPatcher FINAL : public RelativeCallPatcher {
public:
NoRelativeCallPatcher() { }
- uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method) OVERRIDE {
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED) OVERRIDE {
return offset; // No space reserved; no patches expected.
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
return offset; // No thunks added; no patches expected.
}
- void Patch(std::vector<uint8_t>* code, uint32_t literal_offset, uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE {
+ void Patch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED, uint32_t literal_offset ATTRIBUTE_UNUSED,
+ uint32_t patch_offset ATTRIBUTE_UNUSED,
+ uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
LOG(FATAL) << "Unexpected relative patch.";
}
@@ -91,11 +93,12 @@ class OatWriter::X86RelativeCallPatcher FINAL : public RelativeCallPatcher {
public:
X86RelativeCallPatcher() { }
- uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method) OVERRIDE {
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED) OVERRIDE {
return offset; // No space reserved; no limit on relative call distance.
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
return offset; // No thunks added; no limit on relative call distance.
}
@@ -648,7 +651,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
return true;
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED, const ClassDataItemIterator& it) {
// Fill in the compiled_methods_ array for methods that have a
// CompiledMethod. We track the number of non-null entries in
// num_non_null_compiled_methods_ since we only want to allocate
@@ -860,7 +863,7 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
: OatDexMethodVisitor(writer, offset) {
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 71f0b1b850..01c5cc9637 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -51,7 +51,7 @@ struct PcInfo {
uintptr_t native_pc;
};
-class SlowPathCode : public ArenaObject {
+class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
public:
SlowPathCode() {}
virtual ~SlowPathCode() {}
@@ -62,7 +62,7 @@ class SlowPathCode : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
-class CodeGenerator : public ArenaObject {
+class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
// succeeded.
@@ -115,12 +115,14 @@ class CodeGenerator : public ArenaObject {
// Restores the register from the stack. Returns the size taken on stack.
virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- LOG(FATAL) << "Unimplemented";
- return 0u;
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- LOG(FATAL) << "Unimplemented";
- return 0u;
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index c812f6b416..0cec4b404a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -794,6 +794,7 @@ void LocationsBuilderARM::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ bkpt(0);
@@ -959,6 +960,7 @@ void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
@@ -986,6 +988,7 @@ void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -996,6 +999,7 @@ void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -1006,6 +1010,7 @@ void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
@@ -1016,6 +1021,7 @@ void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1026,6 +1032,7 @@ void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
@@ -1033,6 +1040,7 @@ void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
}
void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
@@ -1043,6 +1051,7 @@ void LocationsBuilderARM::VisitReturn(HReturn* ret) {
}
void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
@@ -1508,6 +1517,7 @@ void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderARM::VisitNot(HNot* not_) {
@@ -1596,6 +1606,7 @@ void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1998,9 +2009,11 @@ void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index ec9af73a71..6ac7a31d3f 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -556,6 +556,7 @@ enum UnimplementedInstructionBreakCode {
#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
+ UNUSED(instr); \
__ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
} \
void LocationsBuilderARM64::Visit##name(H##name* instr) { \
@@ -711,6 +712,7 @@ void LocationsBuilderARM64::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
__ Brk(0); // TODO: Introduce special markers for such code locations.
@@ -877,6 +879,7 @@ void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
@@ -967,6 +970,7 @@ void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderARM64::VisitLocal(HLocal* local) {
@@ -984,6 +988,7 @@ void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM64::VisitMul(HMul* mul) {
@@ -1109,6 +1114,7 @@ void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
@@ -1120,6 +1126,7 @@ void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1164,6 +1171,7 @@ void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
}
void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ UNUSED(instruction);
codegen_->GenerateFrameExit();
__ Br(lr);
}
@@ -1191,6 +1199,7 @@ void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderARM64::VisitSub(HSub* instruction) {
@@ -1242,6 +1251,7 @@ void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
} // namespace arm64
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a4003ffea5..5530f46065 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -180,11 +180,15 @@ class CodeGeneratorARM64 : public CodeGenerator {
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
return 0;
}
virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
return 0;
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d41d5a00a8..ac328c319c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -645,6 +645,7 @@ void LocationsBuilderX86::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -734,6 +735,7 @@ void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) {
void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
@@ -762,6 +764,7 @@ void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86::VisitCondition(HCondition* comp) {
@@ -851,6 +854,7 @@ void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -861,6 +865,7 @@ void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
@@ -871,6 +876,7 @@ void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -881,6 +887,7 @@ void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
@@ -888,6 +895,7 @@ void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
}
void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -1456,6 +1464,7 @@ void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
}
void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
+ UNUSED(instruction);
}
void LocationsBuilderX86::VisitNot(HNot* not_) {
@@ -1550,6 +1559,7 @@ void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -2026,9 +2036,11 @@ void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index bda3520708..0bc2bad896 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -556,6 +556,7 @@ void LocationsBuilderX86_64::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -644,6 +645,7 @@ void LocationsBuilderX86_64::VisitLoadLocal(HLoadLocal* local) {
void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
@@ -671,6 +673,7 @@ void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86_64::VisitCondition(HCondition* comp) {
@@ -793,6 +796,7 @@ void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -803,6 +807,7 @@ void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
@@ -813,6 +818,7 @@ void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -823,6 +829,7 @@ void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
@@ -830,6 +837,7 @@ void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
}
void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -1381,6 +1389,7 @@ void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderX86_64::VisitNot(HNot* not_) {
@@ -1423,6 +1432,7 @@ void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
@@ -1902,9 +1912,11 @@ void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index a98d714476..8d2c77475c 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -25,7 +25,7 @@ namespace art {
* A node in the collision list of a ValueSet. Encodes the instruction,
* the hash code, and the next node in the collision list.
*/
-class ValueSetNode : public ArenaObject {
+class ValueSetNode : public ArenaObject<kArenaAllocMisc> {
public:
ValueSetNode(HInstruction* instruction, size_t hash_code, ValueSetNode* next)
: instruction_(instruction), hash_code_(hash_code), next_(next) {}
@@ -52,7 +52,7 @@ class ValueSetNode : public ArenaObject {
* if there is one in the set. In GVN, we would say those instructions have the
* same "number".
*/
-class ValueSet : public ArenaObject {
+class ValueSet : public ArenaObject<kArenaAllocMisc> {
public:
explicit ValueSet(ArenaAllocator* allocator)
: allocator_(allocator), number_of_entries_(0), collisions_(nullptr) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index d7295aa112..914a0c46cb 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -351,6 +351,8 @@ class Location : public ValueObject {
// way that none of them can be interpreted as a kConstant tag.
uintptr_t value_;
};
+std::ostream& operator<<(std::ostream& os, const Location::Kind& rhs);
+std::ostream& operator<<(std::ostream& os, const Location::Policy& rhs);
class RegisterSet : public ValueObject {
public:
@@ -401,7 +403,7 @@ class RegisterSet : public ValueObject {
* The intent is to have the code for generating the instruction independent of
* register allocation. A register allocator just has to provide a LocationSummary.
*/
-class LocationSummary : public ArenaObject {
+class LocationSummary : public ArenaObject<kArenaAllocMisc> {
public:
enum CallKind {
kNoCall,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d624ad5e5e..8cb2ef6de8 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -647,4 +647,16 @@ bool HInstruction::Equals(HInstruction* other) const {
return true;
}
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs) {
+#define DECLARE_CASE(type, super) case HInstruction::k##type: os << #type; break;
+ switch (rhs) {
+ FOR_EACH_INSTRUCTION(DECLARE_CASE)
+ default:
+ os << "Unknown instruction kind " << static_cast<int>(rhs);
+ break;
+ }
+#undef DECLARE_CASE
+ return os;
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 33bfe19081..7549ebfbe4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -79,12 +79,14 @@ class HInstructionList {
};
// Control-flow graph of a method. Contains a list of basic blocks.
-class HGraph : public ArenaObject {
+class HGraph : public ArenaObject<kArenaAllocMisc> {
public:
explicit HGraph(ArenaAllocator* arena)
: arena_(arena),
blocks_(arena, kDefaultNumberOfBlocks),
reverse_post_order_(arena, kDefaultNumberOfBlocks),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
number_of_vregs_(0),
number_of_in_vregs_(0),
@@ -199,7 +201,7 @@ class HGraph : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
-class HLoopInformation : public ArenaObject {
+class HLoopInformation : public ArenaObject<kArenaAllocMisc> {
public:
HLoopInformation(HBasicBlock* header, HGraph* graph)
: header_(header),
@@ -278,7 +280,7 @@ static constexpr uint32_t kNoDexPc = -1;
// as a double linked list. Each block knows its predecessors and
// successors.
-class HBasicBlock : public ArenaObject {
+class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
public:
explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
@@ -537,7 +539,7 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
virtual void Accept(HGraphVisitor* visitor)
template <typename T>
-class HUseListNode : public ArenaObject {
+class HUseListNode : public ArenaObject<kArenaAllocMisc> {
public:
HUseListNode(T* user, size_t index, HUseListNode* tail)
: user_(user), index_(index), tail_(tail) {}
@@ -619,7 +621,7 @@ class SideEffects : public ValueObject {
size_t flags_;
};
-class HInstruction : public ArenaObject {
+class HInstruction : public ArenaObject<kArenaAllocMisc> {
public:
explicit HInstruction(SideEffects side_effects)
: previous_(nullptr),
@@ -738,12 +740,18 @@ class HInstruction : public ArenaObject {
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
- virtual bool InstructionTypeEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionTypeEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether any data encoded in the two instructions is equal.
// This method does not look at the inputs. Both instructions must be
// of the same type, otherwise the method has undefined behavior.
- virtual bool InstructionDataEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether two instructions are equal, that is:
// 1) They have the same type and contain the same data,
@@ -808,6 +816,7 @@ class HInstruction : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(HInstruction);
};
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
template<typename T>
class HUseIterator : public ValueObject {
@@ -833,7 +842,7 @@ class HUseIterator : public ValueObject {
};
// A HEnvironment object contains the values of virtual registers at a given location.
-class HEnvironment : public ArenaObject {
+class HEnvironment : public ArenaObject<kArenaAllocMisc> {
public:
HEnvironment(ArenaAllocator* arena, size_t number_of_vregs) : vregs_(arena, number_of_vregs) {
vregs_.SetSize(number_of_vregs);
@@ -965,14 +974,14 @@ class EmbeddedArray<T, 0> {
public:
intptr_t length() const { return 0; }
const T& operator[](intptr_t i) const {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
T& operator[](intptr_t i) {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
};
@@ -1110,7 +1119,10 @@ class HUnaryOperation : public HExpression<1> {
Primitive::Type GetResultType() const { return GetType(); }
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
@@ -1143,7 +1155,10 @@ class HBinaryOperation : public HExpression<2> {
virtual bool IsCommutative() { return false; }
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
@@ -1732,7 +1747,10 @@ class HNot : public HUnaryOperation {
: HUnaryOperation(result_type, input) {}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
virtual int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
@@ -1792,7 +1810,10 @@ class HNullCheck : public HExpression<1> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -1884,7 +1905,10 @@ class HArrayGet : public HExpression<2> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
void SetType(Primitive::Type type) { type_ = type; }
DECLARE_INSTRUCTION(ArrayGet);
@@ -1948,7 +1972,10 @@ class HArrayLength : public HExpression<1> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
DECLARE_INSTRUCTION(ArrayLength);
@@ -1966,7 +1993,10 @@ class HBoundsCheck : public HExpression<2> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -2177,7 +2207,7 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
-class MoveOperands : public ArenaObject {
+class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source, Location destination, HInstruction* instruction)
: source_(source), destination_(destination), instruction_(instruction) {}
@@ -2278,7 +2308,7 @@ class HGraphVisitor : public ValueObject {
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
virtual ~HGraphVisitor() {}
- virtual void VisitInstruction(HInstruction* instruction) {}
+ virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); }
virtual void VisitBasicBlock(HBasicBlock* block);
// Visit the graph following basic block insertion order.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d3fe1c4afc..08b74c7988 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -213,6 +213,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
uint32_t method_idx,
jobject class_loader,
const DexFile& dex_file) const {
+ UNUSED(invoke_type);
total_compiled_methods_++;
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 2bdcc61b04..62629bcd0c 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -50,8 +50,8 @@ class TestParallelMoveResolver : public ParallelMoveResolver {
<< ")";
}
- virtual void SpillScratch(int reg) {}
- virtual void RestoreScratch(int reg) {}
+ virtual void SpillScratch(int reg ATTRIBUTE_UNUSED) {}
+ virtual void RestoreScratch(int reg ATTRIBUTE_UNUSED) {}
std::string GetMessage() const {
return message_.str();
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 8811ac8939..ca08d5b3e6 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -25,7 +25,7 @@ class CodeGenerator;
static constexpr int kNoRegister = -1;
-class BlockInfo : public ArenaObject {
+class BlockInfo : public ArenaObject<kArenaAllocMisc> {
public:
BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
@@ -53,7 +53,7 @@ class BlockInfo : public ArenaObject {
* A live range contains the start and end of a range where an instruction or a temporary
* is live.
*/
-class LiveRange : public ArenaObject {
+class LiveRange FINAL : public ArenaObject<kArenaAllocMisc> {
public:
LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
DCHECK_LT(start, end);
@@ -64,16 +64,16 @@ class LiveRange : public ArenaObject {
size_t GetEnd() const { return end_; }
LiveRange* GetNext() const { return next_; }
- bool IntersectsWith(const LiveRange& other) {
+ bool IntersectsWith(const LiveRange& other) const {
return (start_ >= other.start_ && start_ < other.end_)
|| (other.start_ >= start_ && other.start_ < end_);
}
- bool IsBefore(const LiveRange& other) {
+ bool IsBefore(const LiveRange& other) const {
return end_ <= other.start_;
}
- void Dump(std::ostream& stream) {
+ void Dump(std::ostream& stream) const {
stream << "[" << start_ << ", " << end_ << ")";
}
@@ -90,7 +90,7 @@ class LiveRange : public ArenaObject {
/**
* A use position represents a live interval use at a given position.
*/
-class UsePosition : public ArenaObject {
+class UsePosition : public ArenaObject<kArenaAllocMisc> {
public:
UsePosition(HInstruction* user,
size_t input_index,
@@ -137,7 +137,7 @@ class UsePosition : public ArenaObject {
* An interval is a list of disjoint live ranges where an instruction is live.
* Each instruction that has uses gets an interval.
*/
-class LiveInterval : public ArenaObject {
+class LiveInterval : public ArenaObject<kArenaAllocMisc> {
public:
static LiveInterval* MakeInterval(ArenaAllocator* allocator,
Primitive::Type type,
diff --git a/compiler/output_stream.cc b/compiler/output_stream.cc
new file mode 100644
index 0000000000..a8b64ca1ce
--- /dev/null
+++ b/compiler/output_stream.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "output_stream.h"
+
+namespace art {
+
+std::ostream& operator<<(std::ostream& os, const Whence& rhs) {
+ switch (rhs) {
+ case kSeekSet: os << "SEEK_SET"; break;
+ case kSeekCurrent: os << "SEEK_CUR"; break;
+ case kSeekEnd: os << "SEEK_END"; break;
+ default: UNREACHABLE();
+ }
+ return os;
+}
+
+} // namespace art
diff --git a/compiler/output_stream.h b/compiler/output_stream.h
index 97ccc2caa6..4d30b83234 100644
--- a/compiler/output_stream.h
+++ b/compiler/output_stream.h
@@ -17,9 +17,7 @@
#ifndef ART_COMPILER_OUTPUT_STREAM_H_
#define ART_COMPILER_OUTPUT_STREAM_H_
-#include <stdint.h>
-#include <sys/types.h>
-
+#include <ostream>
#include <string>
#include "base/macros.h"
@@ -31,6 +29,7 @@ enum Whence {
kSeekCurrent = SEEK_CUR,
kSeekEnd = SEEK_END,
};
+std::ostream& operator<<(std::ostream& os, const Whence& rhs);
class OutputStream {
public:
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index b2f5ca9755..6d213991d3 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -82,7 +82,7 @@ class ArenaAllocatorStatsImpl<false> {
ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
- void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes); UNUSED(kind); }
+ void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); }
size_t NumAllocations() const { return 0u; }
size_t BytesAllocated() const { return 0u; }
void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index de35f3d197..f17e5a92a4 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -16,12 +16,12 @@
#include "arena_allocator.h"
#include "arena_bit_vector.h"
-#include "base/allocator.h"
namespace art {
template <typename ArenaAlloc>
-class ArenaBitVectorAllocator FINAL : public Allocator {
+class ArenaBitVectorAllocator FINAL : public Allocator,
+ public ArenaObject<kArenaAllocGrowableBitMap> {
public:
explicit ArenaBitVectorAllocator(ArenaAlloc* arena) : arena_(arena) {}
~ArenaBitVectorAllocator() {}
@@ -32,11 +32,6 @@ class ArenaBitVectorAllocator FINAL : public Allocator {
virtual void Free(void*) {} // Nop.
- static void* operator new(size_t size, ArenaAlloc* arena) {
- return arena->Alloc(sizeof(ArenaBitVectorAllocator), kArenaAllocGrowableBitMap);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
ArenaAlloc* const arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
diff --git a/compiler/utils/arena_bit_vector.h b/compiler/utils/arena_bit_vector.h
index c92658f7d6..34f1ca9129 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/compiler/utils/arena_bit_vector.h
@@ -17,12 +17,14 @@
#ifndef ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
#define ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#include "arena_object.h"
#include "base/bit_vector.h"
-#include "utils/arena_allocator.h"
-#include "utils/scoped_arena_allocator.h"
namespace art {
+class ArenaAllocator;
+class ScopedArenaAllocator;
+
// Type of growable bitmap for memory tuning.
enum OatBitMapKind {
kBitMapMisc = 0,
@@ -50,7 +52,7 @@ std::ostream& operator<<(std::ostream& os, const OatBitMapKind& kind);
/*
* A BitVector implementation that uses Arena allocation.
*/
-class ArenaBitVector : public BitVector {
+class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableBitMap> {
public:
ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
OatBitMapKind kind = kBitMapMisc);
@@ -58,16 +60,10 @@ class ArenaBitVector : public BitVector {
OatBitMapKind kind = kBitMapMisc);
~ArenaBitVector() {}
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
- }
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused.
+
+ DISALLOW_COPY_AND_ASSIGN(ArenaBitVector);
};
diff --git a/compiler/utils/arena_containers.h b/compiler/utils/arena_containers.h
index c48b0c81ec..825259157a 100644
--- a/compiler/utils/arena_containers.h
+++ b/compiler/utils/arena_containers.h
@@ -66,7 +66,7 @@ template <>
class ArenaAllocatorAdapterKindImpl<false> {
public:
// Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
- explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { }
+ explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { UNUSED(kind); }
ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
ArenaAllocKind Kind() { return kArenaAllocSTL; }
};
@@ -159,11 +159,13 @@ class ArenaAllocatorAdapter : private DebugStackReference, private ArenaAllocato
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ UNUSED(hint);
DCHECK_LE(n, max_size());
return reinterpret_cast<T*>(arena_allocator_->Alloc(n * sizeof(T),
ArenaAllocatorAdapterKind::Kind()));
}
void deallocate(pointer p, size_type n) {
+ UNUSED(p, n);
}
void construct(pointer p, const_reference val) {
diff --git a/compiler/utils/arena_object.h b/compiler/utils/arena_object.h
index 8f6965edc5..d64c419954 100644
--- a/compiler/utils/arena_object.h
+++ b/compiler/utils/arena_object.h
@@ -19,14 +19,21 @@
#include "arena_allocator.h"
#include "base/logging.h"
+#include "scoped_arena_allocator.h"
namespace art {
+// Parent for arena allocated objects giving appropriate new and delete operators.
+template<enum ArenaAllocKind kAllocKind>
class ArenaObject {
public:
// Allocate a new ArenaObject of 'size' bytes in the Arena.
void* operator new(size_t size, ArenaAllocator* allocator) {
- return allocator->Alloc(size, kArenaAllocMisc);
+ return allocator->Alloc(size, kAllocKind);
+ }
+
+ static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
@@ -35,6 +42,26 @@ class ArenaObject {
}
};
+
+// Parent for arena allocated objects that get deleted, gives appropriate new and delete operators.
+// Currently this is used by the quick compiler for debug reference counting arena allocations.
+template<enum ArenaAllocKind kAllocKind>
+class DeletableArenaObject {
+ public:
+ // Allocate a new ArenaObject of 'size' bytes in the Arena.
+ void* operator new(size_t size, ArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
+ }
+
+ static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kAllocKind);
+ }
+
+ void operator delete(void*, size_t) {
+ // Nop.
+ }
+};
+
} // namespace art
#endif // ART_COMPILER_UTILS_ARENA_OBJECT_H_
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index c1ed6a239d..dca2ab7517 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -20,6 +20,7 @@
#include <vector>
#include "base/logging.h"
+#include "base/value_object.h"
#include "constants_arm.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/assembler.h"
@@ -179,8 +180,12 @@ enum BlockAddressMode {
DB_W = (8|0|1) << 21, // decrement before with writeback to base
IB_W = (8|4|1) << 21 // increment before with writeback to base
};
+inline std::ostream& operator<<(std::ostream& os, const BlockAddressMode& rhs) {
+ os << static_cast<int>(rhs);
+ return os;
+}
-class Address {
+class Address : public ValueObject {
public:
// Memory operand addressing mode (in ARM encoding form. For others we need
// to adjust)
@@ -260,13 +265,17 @@ class Address {
}
private:
- Register rn_;
- Register rm_;
- int32_t offset_; // Used as shift amount for register offset.
- Mode am_;
- bool is_immed_offset_;
- Shift shift_;
+ const Register rn_;
+ const Register rm_;
+ const int32_t offset_; // Used as shift amount for register offset.
+ const Mode am_;
+ const bool is_immed_offset_;
+ const Shift shift_;
};
+inline std::ostream& operator<<(std::ostream& os, const Address::Mode& rhs) {
+ os << static_cast<int>(rhs);
+ return os;
+}
// Instruction encoding bits.
enum {
@@ -344,10 +353,6 @@ constexpr uint32_t kInvalidModifiedImmediate = -1;
extern const char* kRegisterNames[];
extern const char* kConditionNames[];
-extern std::ostream& operator<<(std::ostream& os, const Register& rhs);
-extern std::ostream& operator<<(std::ostream& os, const SRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const Condition& rhs);
// This is an abstract ARM assembler. Subclasses provide assemblers for the individual
// instruction sets (ARM32, Thumb2, etc.)
@@ -448,8 +453,10 @@ class ArmAssembler : public Assembler {
virtual void bkpt(uint16_t imm16) = 0;
virtual void svc(uint32_t imm24) = 0;
- virtual void it(Condition firstcond, ItState i1 = kItOmitted,
- ItState i2 = kItOmitted, ItState i3 = kItOmitted) {
+ virtual void it(Condition firstcond ATTRIBUTE_UNUSED,
+ ItState i1 ATTRIBUTE_UNUSED = kItOmitted,
+ ItState i2 ATTRIBUTE_UNUSED = kItOmitted,
+ ItState i3 ATTRIBUTE_UNUSED = kItOmitted) {
// Ignored if not supported.
}
@@ -537,14 +544,9 @@ class ArmAssembler : public Assembler {
Condition cond = AL) = 0;
virtual void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) = 0;
- virtual void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) = 0;
// Load and Store. May clobber IP.
virtual void LoadImmediate(Register rd, int32_t value, Condition cond = AL) = 0;
- virtual void LoadSImmediate(SRegister sd, float value, Condition cond = AL) = 0;
- virtual void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) = 0;
virtual void MarkExceptionHandler(Label* label) = 0;
virtual void LoadFromOffset(LoadOperandType type,
Register reg,
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index d262b6acd6..c8a57b1873 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1303,7 +1303,6 @@ void Arm32Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value
}
}
-
void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
@@ -1483,12 +1482,12 @@ void Arm32Assembler::dmb(DmbOptions flavor) {
}
-void Arm32Assembler::cbz(Register rn, Label* target) {
+void Arm32Assembler::cbz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "cbz is not supported on ARM32";
}
-void Arm32Assembler::cbnz(Register rn, Label* target) {
+void Arm32Assembler::cbnz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "cbnz is not supported on ARM32";
}
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index cfc300b8a6..dbabb99933 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -238,14 +238,9 @@ class Arm32Assembler FINAL : public ArmAssembler {
Condition cond = AL) OVERRIDE;
void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) OVERRIDE;
- void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) {}
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
- void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) {}
void MarkExceptionHandler(Label* label) OVERRIDE;
void LoadFromOffset(LoadOperandType type,
Register reg,
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 633f55b929..fd2613a89e 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -152,6 +152,8 @@ void Thumb2Assembler::mvns(Register rd, const ShifterOperand& so, Condition cond
void Thumb2Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
if (rd == rm && !IsHighRegister(rd) && !IsHighRegister(rn) && !force_32bit_) {
// 16 bit.
int16_t encoding = B14 | B9 | B8 | B6 |
@@ -176,6 +178,8 @@ void Thumb2Assembler::mul(Register rd, Register rn, Register rm, Condition cond)
void Thumb2Assembler::mla(Register rd, Register rn, Register rm, Register ra,
Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 0U /* 0b000 */;
uint32_t op2 = 0U /* 0b00 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -192,6 +196,8 @@ void Thumb2Assembler::mla(Register rd, Register rn, Register rm, Register ra,
void Thumb2Assembler::mls(Register rd, Register rn, Register rm, Register ra,
Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 0U /* 0b000 */;
uint32_t op2 = 01 /* 0b01 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -208,6 +214,8 @@ void Thumb2Assembler::mls(Register rd, Register rn, Register rm, Register ra,
void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 2U /* 0b010; */;
uint32_t op2 = 0U /* 0b0000 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
@@ -223,6 +231,8 @@ void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
void Thumb2Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 1U /* 0b001 */;
uint32_t op2 = 15U /* 0b1111 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B20 |
@@ -238,6 +248,8 @@ void Thumb2Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond
void Thumb2Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 1U /* 0b001 */;
uint32_t op2 = 15U /* 0b1111 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B21 | B20 |
@@ -293,6 +305,7 @@ void Thumb2Assembler::ldrsh(Register rd, const Address& ad, Condition cond) {
void Thumb2Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
+ CheckCondition(cond);
CHECK_EQ(rd % 2, 0);
// This is different from other loads. The encoding is like ARM.
int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
@@ -304,6 +317,7 @@ void Thumb2Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
void Thumb2Assembler::strd(Register rd, const Address& ad, Condition cond) {
+ CheckCondition(cond);
CHECK_EQ(rd % 2, 0);
// This is different from other loads. The encoding is like ARM.
int32_t encoding = B31 | B30 | B29 | B27 | B22 |
@@ -609,9 +623,9 @@ void Thumb2Assembler::Emit16(int16_t value) {
}
-bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
+bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc ATTRIBUTE_UNUSED,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -727,9 +741,9 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
}
-void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
+void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -789,7 +803,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
}
encoding = B31 | B30 | B29 | B28 |
thumb_opcode << 21 |
- set_cc << 20 |
+ (set_cc ? 1 : 0) << 20 |
rn << 16 |
rd << 8 |
imm;
@@ -798,7 +812,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
// Register (possibly shifted)
encoding = B31 | B30 | B29 | B27 | B25 |
thumb_opcode << 21 |
- set_cc << 20 |
+ (set_cc ? 1 : 0) << 20 |
rn << 16 |
rd << 8 |
so.encodingThumb();
@@ -809,7 +823,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -936,9 +950,9 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
// ADD and SUB are complex enough to warrant their own emitter.
-void Thumb2Assembler::Emit16BitAddSub(Condition cond,
+void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc ATTRIBUTE_UNUSED,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1075,7 +1089,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond,
void Thumb2Assembler::EmitDataProcessing(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1405,7 +1419,7 @@ void Thumb2Assembler::EmitLoadStore(Condition cond,
void Thumb2Assembler::EmitMultiMemOp(Condition cond,
- BlockAddressMode am,
+ BlockAddressMode bam,
bool load,
Register base,
RegList regs) {
@@ -1417,7 +1431,7 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
must_be_32bit = true;
}
- uint32_t w_bit = am == IA_W || am == DB_W || am == DA_W || am == IB_W;
+ bool w_bit = bam == IA_W || bam == DB_W || bam == DA_W || bam == IB_W;
// 16 bit always uses writeback.
if (!w_bit) {
must_be_32bit = true;
@@ -1425,7 +1439,7 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
if (must_be_32bit) {
uint32_t op = 0;
- switch (am) {
+ switch (bam) {
case IA:
case IA_W:
op = 1U /* 0b01 */;
@@ -1438,7 +1452,7 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
case IB:
case DA_W:
case IB_W:
- LOG(FATAL) << "LDM/STM mode not supported on thumb: " << am;
+ LOG(FATAL) << "LDM/STM mode not supported on thumb: " << bam;
}
if (load) {
// Cannot have SP in the list.
@@ -2354,7 +2368,6 @@ void Thumb2Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t valu
}
}
-
void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperand::CanHoldThumb(rd, R0, MOV, value, &shifter_op)) {
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index b26173fe28..9ccdef7e1e 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -269,14 +269,9 @@ class Thumb2Assembler FINAL : public ArmAssembler {
Condition cond = AL) OVERRIDE;
void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) OVERRIDE;
- void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) {}
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
- void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) {}
void MarkExceptionHandler(Label* label) OVERRIDE;
void LoadFromOffset(LoadOperandType type,
Register reg,
@@ -324,40 +319,40 @@ class Thumb2Assembler FINAL : public ArmAssembler {
private:
// Emit a single 32 or 16 bit data processing instruction.
void EmitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Must the instruction be 32 bits or can it possibly be encoded
// in 16 bits?
bool Is32BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Emit a 32 bit data processing instruction.
void Emit32BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Emit a 16 bit data processing instruction.
void Emit16BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
void Emit16BitAddSub(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
@@ -365,12 +360,12 @@ class Thumb2Assembler FINAL : public ArmAssembler {
uint16_t EmitCompareAndBranch(Register rn, uint16_t prev, bool n);
void EmitLoadStore(Condition cond,
- bool load,
- bool byte,
- bool half,
- bool is_signed,
- Register rd,
- const Address& ad);
+ bool load,
+ bool byte,
+ bool half,
+ bool is_signed,
+ Register rd,
+ const Address& ad);
void EmitMemOpAddressMode3(Condition cond,
int32_t mode,
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 702e03a277..1513296c2d 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -38,15 +38,6 @@ namespace arm {
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
-
-// We support both VFPv3-D16 and VFPv3-D32 profiles, but currently only one at
-// a time, so that compile time optimizations can be applied.
-// Warning: VFPv3-D32 is untested.
-#define VFPv3_D16
-#if defined(VFPv3_D16) == defined(VFPv3_D32)
-#error "Exactly one of VFPv3_D16 or VFPv3_D32 can be defined at a time."
-#endif
-
// 4 bits option for the dmb instruction.
// Order and values follows those of the ARM Architecture Reference Manual.
enum DmbOptions {
@@ -66,26 +57,23 @@ enum ScaleFactor {
};
// Values for double-precision floating point registers.
-enum DRegister {
- D0 = 0,
- D1 = 1,
- D2 = 2,
- D3 = 3,
- D4 = 4,
- D5 = 5,
- D6 = 6,
- D7 = 7,
- D8 = 8,
- D9 = 9,
+enum DRegister { // private marker to avoid generate-operator-out.py from processing.
+ D0 = 0,
+ D1 = 1,
+ D2 = 2,
+ D3 = 3,
+ D4 = 4,
+ D5 = 5,
+ D6 = 6,
+ D7 = 7,
+ D8 = 8,
+ D9 = 9,
D10 = 10,
D11 = 11,
D12 = 12,
D13 = 13,
D14 = 14,
D15 = 15,
-#ifdef VFPv3_D16
- kNumberOfDRegisters = 16,
-#else
D16 = 16,
D17 = 17,
D18 = 18,
@@ -103,7 +91,6 @@ enum DRegister {
D30 = 30,
D31 = 31,
kNumberOfDRegisters = 32,
-#endif
kNumberOfOverlappingDRegisters = 16,
kNoDRegister = -1,
};
@@ -111,18 +98,18 @@ std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
// Values for the condition field as defined in section A3.2.
-enum Condition {
+enum Condition { // private marker to avoid generate-operator-out.py from processing.
kNoCondition = -1,
- EQ = 0, // equal
- NE = 1, // not equal
- CS = 2, // carry set/unsigned higher or same
- CC = 3, // carry clear/unsigned lower
- MI = 4, // minus/negative
- PL = 5, // plus/positive or zero
- VS = 6, // overflow
- VC = 7, // no overflow
- HI = 8, // unsigned higher
- LS = 9, // unsigned lower or same
+ EQ = 0, // equal
+ NE = 1, // not equal
+ CS = 2, // carry set/unsigned higher or same
+ CC = 3, // carry clear/unsigned lower
+ MI = 4, // minus/negative
+ PL = 5, // plus/positive or zero
+ VS = 6, // overflow
+ VC = 7, // no overflow
+ HI = 8, // unsigned higher
+ LS = 9, // unsigned lower or same
GE = 10, // signed greater than or equal
LT = 11, // signed less than
GT = 12, // signed greater than
@@ -138,16 +125,16 @@ std::ostream& operator<<(std::ostream& os, const Condition& rhs);
// as defined in section A3.4
enum Opcode {
kNoOperand = -1,
- AND = 0, // Logical AND
- EOR = 1, // Logical Exclusive OR
- SUB = 2, // Subtract
- RSB = 3, // Reverse Subtract
- ADD = 4, // Add
- ADC = 5, // Add with Carry
- SBC = 6, // Subtract with Carry
- RSC = 7, // Reverse Subtract with Carry
- TST = 8, // Test
- TEQ = 9, // Test Equivalence
+ AND = 0, // Logical AND
+ EOR = 1, // Logical Exclusive OR
+ SUB = 2, // Subtract
+ RSB = 3, // Reverse Subtract
+ ADD = 4, // Add
+ ADC = 5, // Add with Carry
+ SBC = 6, // Subtract with Carry
+ RSC = 7, // Reverse Subtract with Carry
+ TST = 8, // Test
+ TEQ = 9, // Test Equivalence
CMP = 10, // Compare
CMN = 11, // Compare Negated
ORR = 12, // Logical (inclusive) OR
@@ -156,7 +143,7 @@ enum Opcode {
MVN = 15, // Move Not
kMaxOperand = 16
};
-
+std::ostream& operator<<(std::ostream& os, const Opcode& rhs);
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum Shift {
@@ -168,11 +155,11 @@ enum Shift {
RRX = 4, // Rotate right with extend.
kMaxShift
};
-
+std::ostream& operator<<(std::ostream& os, const Shift& rhs);
// Constants used for the decoding or encoding of the individual fields of
// instructions. Based on the "Figure 3-1 ARM instruction set summary".
-enum InstructionFields {
+enum InstructionFields { // private marker to avoid generate-operator-out.py from processing.
kConditionShift = 28,
kConditionBits = 4,
kTypeShift = 25,
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 1af7374a3e..02011b87a0 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -474,7 +474,7 @@ void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
}
-void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) {
+void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
// TODO: Should we check that m_scratch is IP? - see arm.
#if ANDROID_SMP != 0
___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h
index e6b4a6a47c..c137e46804 100644
--- a/compiler/utils/array_ref.h
+++ b/compiler/utils/array_ref.h
@@ -68,7 +68,8 @@ class ArrayRef {
template <typename U, size_t size>
constexpr ArrayRef(U (&array)[size],
- typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
+ typename std::enable_if<std::is_same<T, const U>::value, tag>::type
+ t ATTRIBUTE_UNUSED = tag())
: array_(array), size_(size) {
}
@@ -76,12 +77,6 @@ class ArrayRef {
: array_(array), size_(size) {
}
- template <typename U>
- constexpr ArrayRef(U* array, size_t size,
- typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
- : array_(array), size_(size) {
- }
-
template <typename Alloc>
explicit ArrayRef(std::vector<T, Alloc>& v)
: array_(v.data()), size_(v.size()) {
@@ -89,7 +84,8 @@ class ArrayRef {
template <typename U, typename Alloc>
ArrayRef(const std::vector<U, Alloc>& v,
- typename std::enable_if<std::is_same<T, const U>::value, tag>::tag t = tag())
+ typename std::enable_if<std::is_same<T, const U>::value, tag>::tag
+ t ATTRIBUTE_UNUSED = tag())
: array_(v.data()), size_(v.size()) {
}
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 8a1289dc17..68345129c3 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -125,77 +125,91 @@ Assembler* Assembler::Create(InstructionSet instruction_set) {
}
}
-void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest ATTRIBUTE_UNUSED,
+ uint32_t imm ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
- ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest ATTRIBUTE_UNUSED,
+ uint32_t imm ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) {
+void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<4> src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) {
+void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<8> src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) {
+void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<4> offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) {
+void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<8> offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread32(ThreadOffset<4> offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread64(ThreadOffset<8> offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 2b0c94c9e0..e1b6d7c21d 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -366,7 +366,7 @@ class Assembler {
}
// TODO: Implement with disassembler.
- virtual void Comment(const char* format, ...) { }
+ virtual void Comment(const char* format, ...) { UNUSED(format); }
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index 61e420c222..fde65e79c3 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -19,26 +19,20 @@
#include <stdint.h>
#include <stddef.h>
-#include "arena_allocator.h"
-namespace art {
+#include "arena_object.h"
-// Type of growable list for memory tuning.
-enum OatListKind {
- kGrowableArrayMisc = 0,
- kGNumListKinds
-};
+namespace art {
// Deprecated
// TODO: Replace all uses with ArenaVector<T>.
template<typename T>
-class GrowableArray {
+class GrowableArray : public ArenaObject<kArenaAllocGrowableArray> {
public:
- GrowableArray(ArenaAllocator* arena, size_t init_length, OatListKind kind = kGrowableArrayMisc)
+ GrowableArray(ArenaAllocator* arena, size_t init_length)
: arena_(arena),
num_allocated_(init_length),
- num_used_(0),
- kind_(kind) {
+ num_used_(0) {
elem_list_ = static_cast<T*>(arena_->Alloc(sizeof(T) * init_length,
kArenaAllocGrowableArray));
}
@@ -152,16 +146,10 @@ class GrowableArray {
T* GetRawStorage() const { return elem_list_; }
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(GrowableArray<T>), kArenaAllocGrowableArray);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
ArenaAllocator* const arena_;
size_t num_allocated_;
size_t num_used_;
- OatListKind kind_;
T* elem_list_;
};
diff --git a/compiler/utils/scoped_arena_containers.h b/compiler/utils/scoped_arena_containers.h
index 0de7403c07..df93b273d1 100644
--- a/compiler/utils/scoped_arena_containers.h
+++ b/compiler/utils/scoped_arena_containers.h
@@ -140,12 +140,15 @@ class ScopedArenaAllocatorAdapter
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ UNUSED(hint);
DCHECK_LE(n, max_size());
DebugStackIndirectTopRef::CheckTop();
return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
ArenaAllocatorAdapterKind::Kind()));
}
void deallocate(pointer p, size_type n) {
+ UNUSED(p);
+ UNUSED(n);
DebugStackIndirectTopRef::CheckTop();
}
diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h
index ce01077808..e762f7d266 100644
--- a/compiler/utils/stack_checks.h
+++ b/compiler/utils/stack_checks.h
@@ -35,6 +35,7 @@ static constexpr size_t kSmallFrameSize = 1 * KB;
//
// A frame is considered large when it's above kLargeFrameSize.
static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa) {
+ UNUSED(isa);
return size >= kLargeFrameSize;
}