summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-10-31 00:33:20 -0700
committerIan Rogers <irogers@google.com>2014-11-03 20:01:04 -0800
commit6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f (patch)
tree9df58b57af13240a93a6da4eefcf03f70cce9ad9
parentc6e0955737e15f7c0c3575d4e13789b3411f4993 (diff)
downloadandroid_art-6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f.tar.gz
android_art-6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f.tar.bz2
android_art-6a3c1fcb4ba42ad4d5d142c17a3712a6ddd3866f.zip
Remove -Wno-unused-parameter and -Wno-sign-promo from base cflags.
Fix associated errors about unused paramenters and implict sign conversions. For sign conversion this was largely in the area of enums, so add ostream operators for the effected enums and fix tools/generate-operator-out.py. Tidy arena allocation code and arena allocated data types, rather than fixing new and delete operators. Remove dead code. Change-Id: I5b433e722d2f75baacfacae4d32aef4a828bfe1b
-rw-r--r--build/Android.common_build.mk2
-rw-r--r--build/Android.gtest.mk1
-rw-r--r--compiler/Android.mk25
-rw-r--r--compiler/common_compiler_test.cc3
-rw-r--r--compiler/compiler.cc48
-rw-r--r--compiler/compiler.h1
-rw-r--r--compiler/dex/backend.h7
-rw-r--r--compiler/dex/compiler_enums.h5
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc9
-rw-r--r--compiler/dex/global_value_numbering.h39
-rw-r--r--compiler/dex/local_value_numbering.cc20
-rw-r--r--compiler/dex/local_value_numbering.h13
-rw-r--r--compiler/dex/mir_dataflow.cc2
-rw-r--r--compiler/dex/mir_graph.cc57
-rw-r--r--compiler/dex/mir_graph.h30
-rw-r--r--compiler/dex/mir_optimization.cc13
-rw-r--r--compiler/dex/pass.h17
-rw-r--r--compiler/dex/pass_me.h32
-rw-r--r--compiler/dex/portable/mir_to_gbc.h2
-rw-r--r--compiler/dex/quick/arm/arm_lir.h4
-rw-r--r--compiler/dex/quick/arm/call_arm.cc4
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h6
-rw-r--r--compiler/dex/quick/arm/int_arm.cc207
-rw-r--r--compiler/dex/quick/arm/target_arm.cc9
-rw-r--r--compiler/dex/quick/arm/utility_arm.cc13
-rw-r--r--compiler/dex/quick/arm64/arm64_lir.h8
-rw-r--r--compiler/dex/quick/arm64/call_arm64.cc4
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h6
-rw-r--r--compiler/dex/quick/arm64/int_arm64.cc46
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc9
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc25
-rw-r--r--compiler/dex/quick/codegen_util.cc89
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.cc22
-rw-r--r--compiler/dex/quick/dex_file_method_inliner.h8
-rw-r--r--compiler/dex/quick/gen_common.cc8
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc51
-rw-r--r--compiler/dex/quick/mips/assemble_mips.cc4
-rw-r--r--compiler/dex/quick/mips/call_mips.cc4
-rw-r--r--compiler/dex/quick/mips/codegen_mips.h6
-rw-r--r--compiler/dex/quick/mips/fp_mips.cc7
-rw-r--r--compiler/dex/quick/mips/int_mips.cc52
-rw-r--r--compiler/dex/quick/mips/mips_lir.h16
-rw-r--r--compiler/dex/quick/mips/target_mips.cc10
-rw-r--r--compiler/dex/quick/mips/utility_mips.cc21
-rw-r--r--compiler/dex/quick/mir_to_lir.cc11
-rw-r--r--compiler/dex/quick/mir_to_lir.h27
-rw-r--r--compiler/dex/quick/quick_compiler.cc1
-rw-r--r--compiler/dex/quick/ralloc_util.cc9
-rw-r--r--compiler/dex/quick/resource_mask.h12
-rw-r--r--compiler/dex/quick/x86/assemble_x86.cc6
-rw-r--r--compiler/dex/quick/x86/call_x86.cc3
-rw-r--r--compiler/dex/quick/x86/codegen_x86.h77
-rwxr-xr-xcompiler/dex/quick/x86/fp_x86.cc6
-rwxr-xr-xcompiler/dex/quick/x86/int_x86.cc83
-rwxr-xr-xcompiler/dex/quick/x86/target_x86.cc140
-rw-r--r--compiler/dex/quick/x86/utility_x86.cc38
-rw-r--r--compiler/dex/quick/x86/x86_lir.h20
-rw-r--r--compiler/dex/reg_storage.h8
-rw-r--r--compiler/dex/verification_results.cc2
-rw-r--r--compiler/driver/compiler_driver.h1
-rw-r--r--compiler/driver/compiler_options.h2
-rw-r--r--compiler/elf_builder.h2
-rw-r--r--compiler/elf_writer_quick.cc6
-rw-r--r--compiler/image_writer.cc5
-rw-r--r--compiler/jni/jni_compiler_test.cc21
-rw-r--r--compiler/jni/quick/x86_64/calling_convention_x86_64.cc1
-rw-r--r--compiler/llvm/llvm_compiler.cc6
-rw-r--r--compiler/oat_writer.cc19
-rw-r--r--compiler/optimizing/code_generator.h14
-rw-r--r--compiler/optimizing/code_generator_arm.cc13
-rw-r--r--compiler/optimizing/code_generator_arm64.cc10
-rw-r--r--compiler/optimizing/code_generator_arm64.h4
-rw-r--r--compiler/optimizing/code_generator_x86.cc12
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc12
-rw-r--r--compiler/optimizing/gvn.h4
-rw-r--r--compiler/optimizing/locations.h4
-rw-r--r--compiler/optimizing/nodes.cc12
-rw-r--r--compiler/optimizing/nodes.h72
-rw-r--r--compiler/optimizing/optimizing_compiler.cc1
-rw-r--r--compiler/optimizing/parallel_move_test.cc4
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h14
-rw-r--r--compiler/output_stream.cc31
-rw-r--r--compiler/output_stream.h5
-rw-r--r--compiler/utils/arena_allocator.h2
-rw-r--r--compiler/utils/arena_bit_vector.cc9
-rw-r--r--compiler/utils/arena_bit_vector.h18
-rw-r--r--compiler/utils/arena_containers.h4
-rw-r--r--compiler/utils/arena_object.h29
-rw-r--r--compiler/utils/arm/assembler_arm.h38
-rw-r--r--compiler/utils/arm/assembler_arm32.cc5
-rw-r--r--compiler/utils/arm/assembler_arm32.h5
-rw-r--r--compiler/utils/arm/assembler_thumb2.cc43
-rw-r--r--compiler/utils/arm/assembler_thumb2.h59
-rw-r--r--compiler/utils/arm/constants_arm.h83
-rw-r--r--compiler/utils/arm64/assembler_arm64.cc2
-rw-r--r--compiler/utils/array_ref.h12
-rw-r--r--compiler/utils/assembler.cc66
-rw-r--r--compiler/utils/assembler.h2
-rw-r--r--compiler/utils/growable_array.h22
-rw-r--r--compiler/utils/scoped_arena_containers.h3
-rw-r--r--compiler/utils/stack_checks.h1
-rw-r--r--dex2oat/dex2oat.cc4
-rw-r--r--oatdump/oatdump.cc24
-rw-r--r--patchoat/patchoat.cc5
-rw-r--r--runtime/Android.mk8
-rw-r--r--runtime/arch/arch_test.cc2
-rw-r--r--runtime/arch/stub_test.cc60
-rw-r--r--runtime/arch/x86/context_x86.cc10
-rw-r--r--runtime/arch/x86/context_x86.h10
-rw-r--r--runtime/arch/x86/fault_handler_x86.cc8
-rw-r--r--runtime/base/allocator.cc12
-rw-r--r--runtime/base/allocator.h2
-rw-r--r--runtime/base/macros.h14
-rw-r--r--runtime/base/unix_file/mapped_file.cc166
-rw-r--r--runtime/base/unix_file/mapped_file.h102
-rw-r--r--runtime/base/unix_file/mapped_file_test.cc272
-rw-r--r--runtime/base/unix_file/null_file.cc6
-rw-r--r--runtime/check_jni.cc8
-rw-r--r--runtime/class_linker-inl.h5
-rw-r--r--runtime/class_linker.cc14
-rw-r--r--runtime/class_linker.h8
-rw-r--r--runtime/common_runtime_test.h2
-rw-r--r--runtime/debugger.cc11
-rw-r--r--runtime/debugger.h3
-rw-r--r--runtime/dex_file.h2
-rw-r--r--runtime/dex_instruction.h7
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h11
-rw-r--r--runtime/entrypoints/entrypoint_utils.h3
-rw-r--r--runtime/entrypoints/portable/portable_fillarray_entrypoints.cc1
-rw-r--r--runtime/entrypoints/portable/portable_trampoline_entrypoints.cc1
-rw-r--r--runtime/entrypoints/quick/quick_alloc_entrypoints.cc9
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc22
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc2
-rw-r--r--runtime/exception_test.cc2
-rw-r--r--runtime/fault_handler.cc2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc2
-rw-r--r--runtime/gc/accounting/remembered_set.cc1
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc2
-rw-r--r--runtime/gc/allocator/dlmalloc.cc12
-rw-r--r--runtime/gc/allocator/rosalloc.cc6
-rw-r--r--runtime/gc/allocator/rosalloc.h23
-rw-r--r--runtime/gc/allocator_type.h3
-rw-r--r--runtime/gc/collector/concurrent_copying.h4
-rw-r--r--runtime/gc/collector/mark_sweep.cc1
-rw-r--r--runtime/gc/collector/sticky_mark_sweep.cc1
-rw-r--r--runtime/gc/heap.cc15
-rw-r--r--runtime/gc/heap.h3
-rw-r--r--runtime/gc/space/dlmalloc_space.cc1
-rw-r--r--runtime/gc/space/large_object_space.cc6
-rw-r--r--runtime/gc/space/valgrind_malloc_space.h1
-rw-r--r--runtime/gc/space/zygote_space.cc22
-rw-r--r--runtime/handle_scope.h2
-rw-r--r--runtime/instruction_set.cc5
-rw-r--r--runtime/instrumentation.cc1
-rw-r--r--runtime/instrumentation.h15
-rw-r--r--runtime/intern_table.cc3
-rw-r--r--runtime/interpreter/interpreter.cc7
-rw-r--r--runtime/jdwp/jdwp_handler.cc1
-rw-r--r--runtime/jdwp/object_registry.cc1
-rw-r--r--runtime/jni_internal.cc3
-rw-r--r--runtime/lock_word.h2
-rw-r--r--runtime/mem_map.cc3
-rw-r--r--runtime/mirror/array-inl.h1
-rw-r--r--runtime/mirror/class.h2
-rw-r--r--runtime/mirror/object-inl.h2
-rw-r--r--runtime/monitor_pool.h3
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc16
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc3
-rw-r--r--runtime/native/java_lang_reflect_Field.cc20
-rw-r--r--runtime/noop_compiler_callbacks.h4
-rw-r--r--runtime/parsed_options.cc2
-rw-r--r--runtime/profiler_options.h1
-rw-r--r--runtime/quick_exception_handler.cc4
-rw-r--r--runtime/read_barrier-inl.h4
-rw-r--r--runtime/reflection.cc5
-rw-r--r--runtime/runtime.cc10
-rw-r--r--runtime/runtime.h8
-rw-r--r--runtime/stack.h6
-rw-r--r--runtime/thread_list.cc8
-rw-r--r--runtime/thread_pool.cc50
-rw-r--r--runtime/thread_pool.h4
-rw-r--r--runtime/thread_state.h3
-rw-r--r--runtime/trace.cc26
-rw-r--r--runtime/transaction.cc62
-rw-r--r--runtime/transaction.h17
-rw-r--r--runtime/utils.h7
-rw-r--r--test/004-JniTest/jni_test.cc12
-rw-r--r--test/004-SignalTest/signaltest.cc9
-rw-r--r--test/115-native-bridge/nativebridge.cc8
-rw-r--r--test/116-nodex2oat/nodex2oat.cc2
-rw-r--r--test/118-noimage-dex2oat/noimage-dex2oat.cc4
-rwxr-xr-xtools/generate-operator-out.py28
192 files changed, 1543 insertions, 1838 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk
index d90a31e832..84d77f8ab4 100644
--- a/build/Android.common_build.mk
+++ b/build/Android.common_build.mk
@@ -184,8 +184,6 @@ art_cflags := \
-Wall \
-Werror \
-Wextra \
- -Wno-sign-promo \
- -Wno-unused-parameter \
-Wstrict-aliasing \
-fstrict-aliasing \
-Wunreachable-code \
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk
index cc3fb208e9..63200b7848 100644
--- a/build/Android.gtest.mk
+++ b/build/Android.gtest.mk
@@ -83,7 +83,6 @@ RUNTIME_GTEST_COMMON_SRC_FILES := \
runtime/base/stringprintf_test.cc \
runtime/base/timing_logger_test.cc \
runtime/base/unix_file/fd_file_test.cc \
- runtime/base/unix_file/mapped_file_test.cc \
runtime/base/unix_file/null_file_test.cc \
runtime/base/unix_file/random_access_file_utils_test.cc \
runtime/base/unix_file/string_file_test.cc \
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 2d38629db1..610f453816 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -58,22 +58,22 @@ LIBART_COMPILER_SRC_FILES := \
dex/quick/x86/target_x86.cc \
dex/quick/x86/utility_x86.cc \
dex/dex_to_dex_compiler.cc \
+ dex/bb_optimizations.cc \
+ dex/compiler_ir.cc \
+ dex/frontend.cc \
+ dex/mir_analysis.cc \
dex/mir_dataflow.cc \
dex/mir_field_info.cc \
+ dex/mir_graph.cc \
dex/mir_method_info.cc \
dex/mir_optimization.cc \
- dex/bb_optimizations.cc \
- dex/compiler_ir.cc \
dex/post_opt_passes.cc \
dex/pass_driver_me_opts.cc \
dex/pass_driver_me_post_opt.cc \
- dex/frontend.cc \
- dex/mir_graph.cc \
- dex/mir_analysis.cc \
+ dex/ssa_transformation.cc \
dex/verified_method.cc \
dex/verification_results.cc \
dex/vreg_analysis.cc \
- dex/ssa_transformation.cc \
dex/quick_compiler_callbacks.cc \
driver/compiler_driver.cc \
driver/dex_compilation_unit.cc \
@@ -133,6 +133,7 @@ LIBART_COMPILER_SRC_FILES := \
file_output_stream.cc \
image_writer.cc \
oat_writer.cc \
+ output_stream.cc \
vector_output_stream.cc
ifeq ($(ART_SEA_IR_MODE),true)
@@ -168,7 +169,17 @@ LIBART_COMPILER_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
LIBART_COMPILER_ENUM_OPERATOR_OUT_HEADER_FILES := \
- dex/compiler_enums.h
+ dex/quick/arm/arm_lir.h \
+ dex/quick/arm64/arm64_lir.h \
+ dex/quick/mips/mips_lir.h \
+ dex/quick/resource_mask.h \
+ dex/compiler_enums.h \
+ dex/global_value_numbering.h \
+ dex/pass_me.h \
+ driver/compiler_driver.h \
+ driver/compiler_options.h \
+ optimizing/locations.h \
+ utils/arm/constants_arm.h
# $(1): target or host
# $(2): ndebug or debug
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 7e19e15961..bfdb537427 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -144,8 +144,7 @@ void CommonCompilerTest::SetUp() {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(
- runtime_->CreateCalleeSaveMethod(type), type);
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
index 744bafa0fe..b9fcf5bab6 100644
--- a/compiler/compiler.cc
+++ b/compiler/compiler.cc
@@ -25,13 +25,24 @@
namespace art {
#ifdef ART_SEA_IR_MODE
-extern "C" art::CompiledMethod* SeaIrCompileMethod(const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
+constexpr bool kCanUseSeaIR = true;
+#else
+constexpr bool kCanUseSeaIR = false;
+#endif
+
+extern "C" art::CompiledMethod* SeaIrCompileMethod(const art::DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t access_flags ATTRIBUTE_UNUSED,
+ art::InvokeType invoke_type ATTRIBUTE_UNUSED,
+ uint16_t class_def_idx ATTRIBUTE_UNUSED,
+ uint32_t method_idx ATTRIBUTE_UNUSED,
+ jobject class_loader ATTRIBUTE_UNUSED,
+ const art::DexFile& dex_file ATTRIBUTE_UNUSED)
+#ifdef ART_SEA_IR_MODE
+; // NOLINT(whitespace/semicolon)
+#else
+{
+ UNREACHABLE();
+}
#endif
@@ -42,19 +53,18 @@ CompiledMethod* Compiler::TryCompileWithSeaIR(const art::DexFile::CodeItem* code
uint32_t method_idx,
jobject class_loader,
const art::DexFile& dex_file) {
-#ifdef ART_SEA_IR_MODE
- bool use_sea = (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
- if (use_sea) {
- LOG(INFO) << "Using SEA IR to compile..." << std::endl;
- return SeaIrCompileMethod(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
+ bool use_sea = kCanUseSeaIR &&
+ (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
+ if (use_sea) {
+ LOG(INFO) << "Using SEA IR to compile..." << std::endl;
+ return SeaIrCompileMethod(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
}
-#endif
return nullptr;
}
diff --git a/compiler/compiler.h b/compiler/compiler.h
index b92eda7942..c2c15ff9cf 100644
--- a/compiler/compiler.h
+++ b/compiler/compiler.h
@@ -115,6 +115,7 @@ class Compiler {
*/
virtual std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver)
const {
+ UNUSED(driver);
return nullptr;
}
diff --git a/compiler/dex/backend.h b/compiler/dex/backend.h
index cab3427e38..9cad9338fc 100644
--- a/compiler/dex/backend.h
+++ b/compiler/dex/backend.h
@@ -38,7 +38,7 @@ class Backend {
/*
* Return the number of reservable vector registers supported
- * @param long_or_fp ‘true’ if floating point computations will be
+ * @param long_or_fp, true if floating point computations will be
* executed or the operations will be long type while vector
* registers are reserved.
* @return the number of vector registers that are available
@@ -46,7 +46,10 @@ class Backend {
* are held back to generate scalar code without exhausting vector
* registers, if scalar code also uses the vector registers.
*/
- virtual int NumReservableVectorRegisters(bool long_or_fp) { return 0; }
+ virtual int NumReservableVectorRegisters(bool long_or_fp) {
+ UNUSED(long_or_fp);
+ return 0;
+ }
protected:
explicit Backend(ArenaAllocator* arena) : arena_(arena) {}
diff --git a/compiler/dex/compiler_enums.h b/compiler/dex/compiler_enums.h
index 0b769991b3..1297ba9c7f 100644
--- a/compiler/dex/compiler_enums.h
+++ b/compiler/dex/compiler_enums.h
@@ -28,6 +28,7 @@ enum RegisterClass {
kRefReg,
kAnyReg,
};
+std::ostream& operator<<(std::ostream& os, const RegisterClass& rhs);
enum BitsUsed {
kSize32Bits,
@@ -82,6 +83,7 @@ enum RegLocationType {
kLocCompilerTemp,
kLocInvalid
};
+std::ostream& operator<<(std::ostream& os, const RegLocationType& rhs);
enum BBType {
kNullBlock,
@@ -91,6 +93,7 @@ enum BBType {
kExceptionHandling,
kDead,
};
+std::ostream& operator<<(std::ostream& os, const BBType& code);
// Shared pseudo opcodes - must be < 0.
enum LIRPseudoOpcode {
@@ -111,6 +114,7 @@ enum LIRPseudoOpcode {
kPseudoEHBlockLabel = -2,
kPseudoNormalBlockLabel = -1,
};
+std::ostream& operator<<(std::ostream& os, const LIRPseudoOpcode& rhs);
enum ExtendedMIROpcode {
kMirOpFirst = kNumPackedOpcodes,
@@ -334,6 +338,7 @@ enum BlockListType {
kPackedSwitch,
kSparseSwitch,
};
+std::ostream& operator<<(std::ostream& os, const BlockListType& rhs);
enum AssemblerStatus {
kSuccess,
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index f9a05c2eba..205a5218f2 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -282,10 +282,11 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst,
} // namespace art
extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::CodeItem* code_item,
- uint32_t access_flags, art::InvokeType invoke_type,
- uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
- const art::DexFile& dex_file,
- art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ uint32_t access_flags, art::InvokeType invoke_type,
+ uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
+ const art::DexFile& dex_file,
+ art::DexToDexCompilationLevel dex_to_dex_compilation_level) {
+ UNUSED(invoke_type);
if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) {
art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(),
dex_file, code_item, class_def_idx, method_idx, access_flags,
diff --git a/compiler/dex/global_value_numbering.h b/compiler/dex/global_value_numbering.h
index a4a7602c4b..72d111244d 100644
--- a/compiler/dex/global_value_numbering.h
+++ b/compiler/dex/global_value_numbering.h
@@ -19,14 +19,14 @@
#include "base/macros.h"
#include "compiler_internals.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
namespace art {
class LocalValueNumbering;
class MirFieldInfo;
-class GlobalValueNumbering {
+class GlobalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
public:
enum Mode {
kModeGvn,
@@ -55,33 +55,17 @@ class GlobalValueNumbering {
}
// Allow modifications.
- void StartPostProcessing() {
- DCHECK(Good());
- DCHECK_EQ(mode_, kModeGvn);
- mode_ = kModeGvnPostProcessing;
- }
+ void StartPostProcessing();
bool CanModify() const {
return modifications_allowed_ && Good();
}
- // GlobalValueNumbering should be allocated on the ArenaStack (or the native stack).
- static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(GlobalValueNumbering), kArenaAllocMisc);
- }
-
- // Allow delete-expression to destroy a GlobalValueNumbering object without deallocation.
- static void operator delete(void* ptr) { UNUSED(ptr); }
-
private:
static constexpr uint16_t kNoValue = 0xffffu;
// Allocate a new value name.
- uint16_t NewValueName() {
- DCHECK_NE(mode_, kModeGvnPostProcessing);
- ++last_value_;
- return last_value_;
- }
+ uint16_t NewValueName();
// Key is concatenation of opcode, operand1, operand2 and modifier, value is value name.
typedef ScopedArenaSafeMap<uint64_t, uint16_t> ValueMap;
@@ -228,7 +212,7 @@ class GlobalValueNumbering {
}
CompilationUnit* const cu_;
- MIRGraph* mir_graph_;
+ MIRGraph* const mir_graph_;
ScopedArenaAllocator* const allocator_;
// The maximum number of nested loops that we accept for GVN.
@@ -270,6 +254,19 @@ class GlobalValueNumbering {
DISALLOW_COPY_AND_ASSIGN(GlobalValueNumbering);
};
+std::ostream& operator<<(std::ostream& os, const GlobalValueNumbering::Mode& rhs);
+
+inline void GlobalValueNumbering::StartPostProcessing() {
+ DCHECK(Good());
+ DCHECK_EQ(mode_, kModeGvn);
+ mode_ = kModeGvnPostProcessing;
+}
+
+inline uint16_t GlobalValueNumbering::NewValueName() {
+ DCHECK_NE(mode_, kModeGvnPostProcessing);
+ ++last_value_;
+ return last_value_;
+}
} // namespace art
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index a171d7c2b5..a7d93538d6 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -107,7 +107,8 @@ class LocalValueNumbering::AliasingIFieldVersions {
class LocalValueNumbering::NonAliasingArrayVersions {
public:
- static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
+ static uint16_t StartMemoryVersion(GlobalValueNumbering* gvn,
+ const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
uint16_t array) {
return gvn->LookupValue(kNonAliasingArrayStartVersionOp, array, kNoValue, kNoValue);
}
@@ -129,8 +130,9 @@ class LocalValueNumbering::NonAliasingArrayVersions {
gvn, lvn, &lvn->non_aliasing_array_value_map_, array, index);
}
- static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t array) {
+ static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn ATTRIBUTE_UNUSED,
+ uint16_t array ATTRIBUTE_UNUSED) {
return false; // Not affected by global_memory_version_.
}
@@ -164,8 +166,9 @@ class LocalValueNumbering::AliasingArrayVersions {
return gvn->LookupValue(kAliasingArrayOp, type, location, memory_version);
}
- static uint16_t LookupMergeValue(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t type, uint16_t location) {
+ static uint16_t LookupMergeValue(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn,
+ uint16_t type ATTRIBUTE_UNUSED, uint16_t location) {
// If the location is non-aliasing in lvn, use the non-aliasing value.
uint16_t array = gvn->GetArrayLocationBase(location);
if (lvn->IsNonAliasingArray(array, type)) {
@@ -176,8 +179,11 @@ class LocalValueNumbering::AliasingArrayVersions {
gvn, lvn, &lvn->aliasing_array_value_map_, type, location);
}
- static bool HasNewBaseVersion(GlobalValueNumbering* gvn, const LocalValueNumbering* lvn,
- uint16_t type) {
+ static bool HasNewBaseVersion(GlobalValueNumbering* gvn ATTRIBUTE_UNUSED,
+ const LocalValueNumbering* lvn,
+ uint16_t type ATTRIBUTE_UNUSED) {
+ UNUSED(gvn);
+ UNUSED(type);
return lvn->global_memory_version_ == lvn->merge_new_memory_version_;
}
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index dd8d2db8f4..535fba18b3 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -21,8 +21,7 @@
#include "compiler_internals.h"
#include "global_value_numbering.h"
-#include "utils/scoped_arena_allocator.h"
-#include "utils/scoped_arena_containers.h"
+#include "utils/arena_object.h"
namespace art {
@@ -31,7 +30,7 @@ class DexFile;
// Enable/disable tracking values stored in the FILLED_NEW_ARRAY result.
static constexpr bool kLocalValueNumberingEnableFilledNewArrayTracking = true;
-class LocalValueNumbering {
+class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
private:
static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
@@ -69,14 +68,6 @@ class LocalValueNumbering {
uint16_t GetValueNumber(MIR* mir);
- // LocalValueNumbering should be allocated on the ArenaStack (or the native stack).
- static void* operator new(size_t size, ScopedArenaAllocator* allocator) {
- return allocator->Alloc(sizeof(LocalValueNumbering), kArenaAllocMisc);
- }
-
- // Allow delete-expression to destroy a LocalValueNumbering object without deallocation.
- static void operator delete(void* ptr) { UNUSED(ptr); }
-
private:
// A set of value names.
typedef GlobalValueNumbering::ValueNameSet ValueNameSet;
diff --git a/compiler/dex/mir_dataflow.cc b/compiler/dex/mir_dataflow.cc
index e6a8cead00..5b7ac3ca1b 100644
--- a/compiler/dex/mir_dataflow.cc
+++ b/compiler/dex/mir_dataflow.cc
@@ -1343,7 +1343,7 @@ void MIRGraph::CompilerInitializeSSAConversion() {
* counts explicitly used s_regs. A later phase will add implicit
* counts for things such as Method*, null-checked references, etc.
*/
-void MIRGraph::CountUses(struct BasicBlock* bb) {
+void MIRGraph::CountUses(class BasicBlock* bb) {
if (bb->block_type != kDalvikByteCode) {
return;
}
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index e0f471ebeb..b87ab66347 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -302,28 +302,28 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
* (by the caller)
* Utilizes a map for fast lookup of the typical cases.
*/
-BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
+BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool create,
BasicBlock** immed_pred_block_p) {
if (code_offset >= current_code_item_->insns_size_in_code_units_) {
- return NULL;
+ return nullptr;
}
int block_id = dex_pc_to_block_map_[code_offset];
BasicBlock* bb = GetBasicBlock(block_id);
- if ((bb != NULL) && (bb->start_offset == code_offset)) {
+ if ((bb != nullptr) && (bb->start_offset == code_offset)) {
// Does this containing block start with the desired instruction?
return bb;
}
// No direct hit.
if (!create) {
- return NULL;
+ return nullptr;
}
- if (bb != NULL) {
+ if (bb != nullptr) {
// The target exists somewhere in an existing block.
- return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : NULL);
+ return SplitBlock(code_offset, bb, bb == *immed_pred_block_p ? immed_pred_block_p : nullptr);
}
// Create a new block.
@@ -360,8 +360,7 @@ void MIRGraph::ProcessTryCatchBlocks() {
CatchHandlerIterator iterator(handlers_ptr);
for (; iterator.HasNext(); iterator.Next()) {
uint32_t address = iterator.GetHandlerAddress();
- FindBlock(address, false /* split */, true /*create*/,
- /* immed_pred_block_p */ NULL);
+ FindBlock(address, true /*create*/, /* immed_pred_block_p */ nullptr);
}
handlers_ptr = iterator.EndDataPointer();
}
@@ -466,7 +465,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
LOG(FATAL) << "Unexpected opcode(" << insn->dalvikInsn.opcode << ") with kBranch set";
}
CountBranch(target);
- BasicBlock* taken_block = FindBlock(target, /* split */ true, /* create */ true,
+ BasicBlock* taken_block = FindBlock(target, /* create */ true,
/* immed_pred_block_p */ &cur_block);
cur_block->taken = taken_block->id;
taken_block->predecessors.push_back(cur_block->id);
@@ -474,19 +473,6 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
/* Always terminate the current block for conditional branches */
if (flags & Instruction::kContinue) {
BasicBlock* fallthrough_block = FindBlock(cur_offset + width,
- /*
- * If the method is processed
- * in sequential order from the
- * beginning, we don't need to
- * specify split for continue
- * blocks. However, this
- * routine can be called by
- * compileLoop, which starts
- * parsing the method from an
- * arbitrary address in the
- * method body.
- */
- true,
/* create */
true,
/* immed_pred_block_p */
@@ -494,8 +480,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
} else if (code_ptr < code_end) {
- FindBlock(cur_offset + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
return cur_block;
}
@@ -503,6 +488,7 @@ BasicBlock* MIRGraph::ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffs
/* Process instructions with the kSwitch flag */
BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags) {
+ UNUSED(flags);
const uint16_t* switch_data =
reinterpret_cast<const uint16_t*>(GetCurrentInsns() + cur_offset + insn->dalvikInsn.vB);
int size;
@@ -554,8 +540,8 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
cur_block->successor_blocks.reserve(size);
for (i = 0; i < size; i++) {
- BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* split */ true,
- /* create */ true, /* immed_pred_block_p */ &cur_block);
+ BasicBlock* case_block = FindBlock(cur_offset + target_table[i], /* create */ true,
+ /* immed_pred_block_p */ &cur_block);
SuccessorBlockInfo* successor_block_info =
static_cast<SuccessorBlockInfo*>(arena_->Alloc(sizeof(SuccessorBlockInfo),
kArenaAllocSuccessor));
@@ -568,8 +554,8 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
}
/* Fall-through case */
- BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* split */ false,
- /* create */ true, /* immed_pred_block_p */ NULL);
+ BasicBlock* fallthrough_block = FindBlock(cur_offset + width, /* create */ true,
+ /* immed_pred_block_p */ nullptr);
cur_block->fall_through = fallthrough_block->id;
fallthrough_block->predecessors.push_back(cur_block->id);
return cur_block;
@@ -579,6 +565,7 @@ BasicBlock* MIRGraph::ProcessCanSwitch(BasicBlock* cur_block, MIR* insn, DexOffs
BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset,
int width, int flags, ArenaBitVector* try_block_addr,
const uint16_t* code_ptr, const uint16_t* code_end) {
+ UNUSED(flags);
bool in_try_block = try_block_addr->IsBitSet(cur_offset);
bool is_throw = (insn->dalvikInsn.opcode == Instruction::THROW);
@@ -593,8 +580,8 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
}
for (; iterator.HasNext(); iterator.Next()) {
- BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* split*/,
- false /* creat */, NULL /* immed_pred_block_p */);
+ BasicBlock* catch_block = FindBlock(iterator.GetHandlerAddress(), false /* create */,
+ nullptr /* immed_pred_block_p */);
if (insn->dalvikInsn.opcode == Instruction::MONITOR_EXIT &&
IsBadMonitorExitCatch(insn->offset, catch_block->start_offset)) {
// Don't allow monitor-exit to catch its own exception, http://b/15745363 .
@@ -629,8 +616,7 @@ BasicBlock* MIRGraph::ProcessCanThrow(BasicBlock* cur_block, MIR* insn, DexOffse
cur_block->explicit_throw = true;
if (code_ptr < code_end) {
// Force creation of new block following THROW via side-effect.
- FindBlock(cur_offset + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(cur_offset + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
if (!in_try_block) {
// Don't split a THROW that can't rethrow - we're done.
@@ -813,8 +799,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
* Create a fallthrough block for real instructions
* (incl. NOP).
*/
- FindBlock(current_offset_ + width, /* split */ false, /* create */ true,
- /* immed_pred_block_p */ NULL);
+ FindBlock(current_offset_ + width, /* create */ true, /* immed_pred_block_p */ nullptr);
}
} else if (flags & Instruction::kThrow) {
cur_block = ProcessCanThrow(cur_block, insn, current_offset_, width, flags, try_block_addr_,
@@ -837,8 +822,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
}
}
current_offset_ += width;
- BasicBlock* next_block = FindBlock(current_offset_, /* split */ false, /* create */
- false, /* immed_pred_block_p */ NULL);
+ BasicBlock* next_block = FindBlock(current_offset_, /* create */ false,
+ /* immed_pred_block_p */ nullptr);
if (next_block) {
/*
* The next instruction could be the target of a previously parsed
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index fd4c473444..76f68e28c0 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -228,7 +228,8 @@ struct SSARepresentation {
* The Midlevel Intermediate Representation node, which may be largely considered a
* wrapper around a Dalvik byte code.
*/
-struct MIR {
+class MIR : public ArenaObject<kArenaAllocMIR> {
+ public:
/*
* TODO: remove embedded DecodedInstruction to save space, keeping only opcode. Recover
* additional fields on as-needed basis. Question: how to support MIR Pseudo-ops; probably
@@ -344,16 +345,12 @@ struct MIR {
MIR* Copy(CompilationUnit *c_unit);
MIR* Copy(MIRGraph* mir_Graph);
-
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(MIR), kArenaAllocMIR);
- }
- static void operator delete(void* p) {} // Nop.
};
struct SuccessorBlockInfo;
-struct BasicBlock {
+class BasicBlock : public DeletableArenaObject<kArenaAllocBB> {
+ public:
BasicBlock(BasicBlockId block_id, BBType type, ArenaAllocator* allocator)
: id(block_id),
dfs_id(), start_offset(), fall_through(), taken(), i_dom(), nesting_depth(),
@@ -457,10 +454,8 @@ struct BasicBlock {
MIR* GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current);
bool IsExceptionBlock() const;
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(BasicBlock), kArenaAllocBB);
- }
- static void operator delete(void* p) {} // Nop.
+ private:
+ DISALLOW_COPY_AND_ASSIGN(BasicBlock);
};
/*
@@ -548,7 +543,7 @@ class MIRGraph {
/* Find existing block */
BasicBlock* FindBlock(DexOffset code_offset) {
- return FindBlock(code_offset, false, false, NULL);
+ return FindBlock(code_offset, false, NULL);
}
const uint16_t* GetCurrentInsns() const {
@@ -627,7 +622,7 @@ class MIRGraph {
return def_count_;
}
- ArenaAllocator* GetArena() {
+ ArenaAllocator* GetArena() const {
return arena_;
}
@@ -1135,7 +1130,7 @@ class MIRGraph {
* @brief Count the uses in the BasicBlock
* @param bb the BasicBlock
*/
- void CountUses(struct BasicBlock* bb);
+ void CountUses(class BasicBlock* bb);
static uint64_t GetDataFlowAttributes(Instruction::Code opcode);
static uint64_t GetDataFlowAttributes(MIR* mir);
@@ -1208,8 +1203,7 @@ class MIRGraph {
bool ContentIsInsn(const uint16_t* code_ptr);
BasicBlock* SplitBlock(DexOffset code_offset, BasicBlock* orig_block,
BasicBlock** immed_pred_block_p);
- BasicBlock* FindBlock(DexOffset code_offset, bool split, bool create,
- BasicBlock** immed_pred_block_p);
+ BasicBlock* FindBlock(DexOffset code_offset, bool create, BasicBlock** immed_pred_block_p);
void ProcessTryCatchBlocks();
bool IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset, NarrowDexOffset catch_offset);
BasicBlock* ProcessCanBranch(BasicBlock* cur_block, MIR* insn, DexOffset cur_offset, int width,
@@ -1233,7 +1227,7 @@ class MIRGraph {
void ComputeDomPostOrderTraversal(BasicBlock* bb);
int GetSSAUseCount(int s_reg);
bool BasicBlockOpt(BasicBlock* bb);
- bool BuildExtendedBBList(struct BasicBlock* bb);
+ bool BuildExtendedBBList(class BasicBlock* bb);
bool FillDefBlockMatrix(BasicBlock* bb);
void InitializeDominationInfo(BasicBlock* bb);
bool ComputeblockIDom(BasicBlock* bb);
@@ -1305,7 +1299,7 @@ class MIRGraph {
int method_sreg_;
unsigned int attributes_;
Checkstats* checkstats_;
- ArenaAllocator* arena_;
+ ArenaAllocator* const arena_;
int backward_branches_;
int forward_branches_;
size_t num_non_special_compiler_temps_; // Keeps track of allocated non-special compiler temps. These are VRs that are in compiler temp region on stack.
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 8e583ccfff..3604eb9e07 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -16,12 +16,11 @@
#include "base/bit_vector-inl.h"
#include "compiler_internals.h"
+#include "dataflow_iterator-inl.h"
#include "global_value_numbering.h"
#include "local_value_numbering.h"
-#include "dataflow_iterator-inl.h"
-#include "dex/global_value_numbering.h"
-#include "dex/quick/dex_file_method_inliner.h"
-#include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "quick/dex_file_method_inliner.h"
+#include "quick/dex_file_to_method_inliner_map.h"
#include "stack.h"
#include "utils/scoped_arena_containers.h"
@@ -660,7 +659,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) {
}
/* Collect stats on number of checks removed */
-void MIRGraph::CountChecks(struct BasicBlock* bb) {
+void MIRGraph::CountChecks(class BasicBlock* bb) {
if (bb->data_flow_info != NULL) {
for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
if (mir->ssa_rep == NULL) {
@@ -750,7 +749,7 @@ bool MIRGraph::LayoutBlocks(BasicBlock* bb) {
}
/* Combine any basic blocks terminated by instructions that we now know can't throw */
-void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
+void MIRGraph::CombineBlocks(class BasicBlock* bb) {
// Loop here to allow combining a sequence of blocks
while ((bb->block_type == kDalvikByteCode) &&
(bb->last_mir_insn != nullptr) &&
@@ -1510,7 +1509,7 @@ void MIRGraph::DumpCheckStats() {
}
}
-bool MIRGraph::BuildExtendedBBList(struct BasicBlock* bb) {
+bool MIRGraph::BuildExtendedBBList(class BasicBlock* bb) {
if (bb->visited) return false;
if (!((bb->block_type == kEntryBlock) || (bb->block_type == kDalvikByteCode)
|| (bb->block_type == kExitBlock))) {
diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h
index e349eed76d..d3e54a0bf7 100644
--- a/compiler/dex/pass.h
+++ b/compiler/dex/pass.h
@@ -19,11 +19,13 @@
#include <string>
-#include "base/macros.h"
+#include "compiler_ir.h"
+#include "base/logging.h"
+
namespace art {
// Forward declarations.
-struct BasicBlock;
+class BasicBlock;
struct CompilationUnit;
class Pass;
@@ -81,15 +83,10 @@ class Pass {
* @param data the object containing data necessary for the pass.
* @return whether or not there is a change when walking the BasicBlock
*/
- virtual bool Worker(PassDataHolder* data) const {
- // Unused parameter.
- UNUSED(data);
-
+ virtual bool Worker(PassDataHolder* data ATTRIBUTE_UNUSED) const {
// Passes that do all their work in Start() or End() should not allow useless node iteration.
- DCHECK(false) << "Unsupported default Worker() used for " << GetName();
-
- // BasicBlock did not change.
- return false;
+ LOG(FATAL) << "Unsupported default Worker() used for " << GetName();
+ UNREACHABLE();
}
static void BasePrintMessage(CompilationUnit* c_unit, const char* pass_name, const char* message, ...) {
diff --git a/compiler/dex/pass_me.h b/compiler/dex/pass_me.h
index 2f3c8b2217..d0b450a3ac 100644
--- a/compiler/dex/pass_me.h
+++ b/compiler/dex/pass_me.h
@@ -23,7 +23,7 @@
namespace art {
// Forward declarations.
-struct BasicBlock;
+class BasicBlock;
struct CompilationUnit;
class Pass;
@@ -32,10 +32,11 @@ class Pass;
* @details Each enum should be a power of 2 to be correctly used.
*/
enum OptimizationFlag {
- kOptimizationBasicBlockChange = 1, /**< @brief Has there been a change to a BasicBlock? */
- kOptimizationDefUsesChange = 2, /**< @brief Has there been a change to a def-use? */
- kLoopStructureChange = 4, /**< @brief Has there been a loop structural change? */
+ kOptimizationBasicBlockChange = 1, /// @brief Has there been a change to a BasicBlock?
+ kOptimizationDefUsesChange = 2, /// @brief Has there been a change to a def-use?
+ kLoopStructureChange = 4, /// @brief Has there been a loop structural change?
};
+std::ostream& operator<<(std::ostream& os, const OptimizationFlag& rhs);
// Data holder class.
class PassMEDataHolder: public PassDataHolder {
@@ -47,24 +48,25 @@ class PassMEDataHolder: public PassDataHolder {
};
enum DataFlowAnalysisMode {
- kAllNodes = 0, /**< @brief All nodes. */
- kPreOrderDFSTraversal, /**< @brief Depth-First-Search / Pre-Order. */
- kRepeatingPreOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Pre-Order. */
- kReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Reverse Post-Order. */
- kRepeatingPostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Post-Order. */
- kRepeatingReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */
- kPostOrderDOMTraversal, /**< @brief Dominator tree / Post-Order. */
- kTopologicalSortTraversal, /**< @brief Topological Order traversal. */
- kLoopRepeatingTopologicalSortTraversal, /**< @brief Loop-repeating Topological Order traversal. */
- kNoNodes, /**< @brief Skip BasicBlock traversal. */
+ kAllNodes = 0, /// @brief All nodes.
+ kPreOrderDFSTraversal, /// @brief Depth-First-Search / Pre-Order.
+ kRepeatingPreOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Pre-Order.
+ kReversePostOrderDFSTraversal, /// @brief Depth-First-Search / Reverse Post-Order.
+ kRepeatingPostOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Post-Order.
+ kRepeatingReversePostOrderDFSTraversal, /// @brief Depth-First-Search / Repeating Reverse Post-Order.
+ kPostOrderDOMTraversal, /// @brief Dominator tree / Post-Order.
+ kTopologicalSortTraversal, /// @brief Topological Order traversal.
+ kLoopRepeatingTopologicalSortTraversal, /// @brief Loop-repeating Topological Order traversal.
+ kNoNodes, /// @brief Skip BasicBlock traversal.
};
+std::ostream& operator<<(std::ostream& os, const DataFlowAnalysisMode& rhs);
/**
* @class Pass
* @brief Pass is the Pass structure for the optimizations.
* @details The following structure has the different optimization passes that we are going to do.
*/
-class PassME: public Pass {
+class PassME : public Pass {
public:
explicit PassME(const char* name, DataFlowAnalysisMode type = kAllNodes,
unsigned int flags = 0u, const char* dump = "")
diff --git a/compiler/dex/portable/mir_to_gbc.h b/compiler/dex/portable/mir_to_gbc.h
index 94ae3f7e5f..bc4f5c4100 100644
--- a/compiler/dex/portable/mir_to_gbc.h
+++ b/compiler/dex/portable/mir_to_gbc.h
@@ -73,7 +73,7 @@ class LLVMInfo {
std::unique_ptr<art::llvm::IRBuilder> ir_builder_;
};
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
struct CompilationUnit;
struct MIR;
diff --git a/compiler/dex/quick/arm/arm_lir.h b/compiler/dex/quick/arm/arm_lir.h
index 36cb7a4efc..b2db36d831 100644
--- a/compiler/dex/quick/arm/arm_lir.h
+++ b/compiler/dex/quick/arm/arm_lir.h
@@ -109,7 +109,7 @@ enum ArmResourceEncodingPos {
kArmRegEnd = 48,
};
-enum ArmNativeRegisterPool {
+enum ArmNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
r0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
r1 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
r2 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
@@ -546,6 +546,7 @@ enum ArmOpcode {
kThumb2StrdI8, // strd rt, rt2, [rn +-/1024].
kArmLast,
};
+std::ostream& operator<<(std::ostream& os, const ArmOpcode& rhs);
enum ArmOpDmbOptions {
kSY = 0xf,
@@ -577,6 +578,7 @@ enum ArmEncodingKind {
kFmtOff24, // 24-bit Thumb2 unconditional branch encoding.
kFmtSkip, // Unused field, but continue to next.
};
+std::ostream& operator<<(std::ostream& os, const ArmEncodingKind& rhs);
// Struct used to define the snippet positions for each Thumb opcode.
struct ArmEncodingMap {
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 13b9bf09ed..b4eebb320e 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -475,9 +475,9 @@ static bool ArmUseRelativeCall(CompilationUnit* cu, const MethodReference& targe
* Bit of a hack here - in the absence of a real scheduling pass,
* emit the next instruction in static & direct invoke sequences.
*/
-static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info,
+static int ArmNextSDCallInsn(CompilationUnit* cu, CallInfo* info ATTRIBUTE_UNUSED,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t unused_idx ATTRIBUTE_UNUSED,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 442c4fcec6..179ba02175 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -196,7 +196,7 @@ class ArmMir2Lir FINAL : public Mir2Lir {
void GenSelect(BasicBlock* bb, MIR* mir);
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMonitorEnter(int opt_flags, RegLocation rl_src);
void GenMonitorExit(int opt_flags, RegLocation rl_src);
@@ -251,10 +251,10 @@ class ArmMir2Lir FINAL : public Mir2Lir {
RegStorage AllocPreservedDouble(int s_reg);
RegStorage AllocPreservedSingle(int s_reg);
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return false; // Wide GPRs are formed by pairing.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return false; // Wide FPRs are formed by pairing.
}
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index ce31b278a8..ebf1905579 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -209,7 +209,8 @@ void ArmMir2Lir::GenFusedLongCmpImmBranch(BasicBlock* bb, RegLocation rl_src1,
void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
// TODO: Generalize the IT below to accept more than one-instruction loads.
DCHECK(InexpensiveConstantInt(true_val));
DCHECK(InexpensiveConstantInt(false_val));
@@ -232,6 +233,7 @@ void ArmMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi
}
void ArmMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -504,6 +506,7 @@ static const MagicTable magic_table[] = {
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
bool ArmMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(sizeof(magic_table)/sizeof(magic_table[0])))) {
return false;
}
@@ -687,14 +690,17 @@ bool ArmMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit)
}
RegLocation ArmMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
+ RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Arm";
- return rl_dest;
+ UNREACHABLE();
}
-RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation ArmMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -1072,6 +1078,7 @@ LIR* ArmMir2Lir::OpVstm(RegStorage r_base, int count) {
void ArmMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg,
EncodeShift(kArmLsl, second_bit - first_bit));
if (first_bit != 0) {
@@ -1168,108 +1175,109 @@ void ArmMir2Lir::GenNegLong(RegLocation rl_dest, RegLocation rl_src) {
void ArmMir2Lir::GenMulLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
- /*
- * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
- * dest = src1.lo * src2.lo;
- * tmp1 += src1.lo * src2.hi;
- * dest.hi += tmp1;
- *
- * To pull off inline multiply, we have a worst-case requirement of 7 temporary
- * registers. Normally for Arm, we get 5. We can get to 6 by including
- * lr in the temp set. The only problematic case is all operands and result are
- * distinct, and none have been promoted. In that case, we can succeed by aggressively
- * freeing operand temp registers after they are no longer needed. All other cases
- * can proceed normally. We'll just punt on the case of the result having a misaligned
- * overlap with either operand and send that case to a runtime handler.
- */
- RegLocation rl_result;
- if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
- FlushAllRegs();
- CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
- rl_result = GetReturnWide(kCoreReg);
- StoreValueWide(rl_dest, rl_result);
- return;
- }
+ UNUSED(opcode);
+ /*
+ * tmp1 = src1.hi * src2.lo; // src1.hi is no longer needed
+ * dest = src1.lo * src2.lo;
+ * tmp1 += src1.lo * src2.hi;
+ * dest.hi += tmp1;
+ *
+ * To pull off inline multiply, we have a worst-case requirement of 7 temporary
+ * registers. Normally for Arm, we get 5. We can get to 6 by including
+ * lr in the temp set. The only problematic case is all operands and result are
+ * distinct, and none have been promoted. In that case, we can succeed by aggressively
+ * freeing operand temp registers after they are no longer needed. All other cases
+ * can proceed normally. We'll just punt on the case of the result having a misaligned
+ * overlap with either operand and send that case to a runtime handler.
+ */
+ RegLocation rl_result;
+ if (PartiallyIntersects(rl_src1, rl_dest) || (PartiallyIntersects(rl_src2, rl_dest))) {
+ FlushAllRegs();
+ CallRuntimeHelperRegLocationRegLocation(kQuickLmul, rl_src1, rl_src2, false);
+ rl_result = GetReturnWide(kCoreReg);
+ StoreValueWide(rl_dest, rl_result);
+ return;
+ }
- rl_src1 = LoadValueWide(rl_src1, kCoreReg);
- rl_src2 = LoadValueWide(rl_src2, kCoreReg);
-
- int reg_status = 0;
- RegStorage res_lo;
- RegStorage res_hi;
- bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
- !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
- bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
- bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
- // Check if rl_dest is *not* either operand and we have enough temp registers.
- if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
- (dest_promoted || src1_promoted || src2_promoted)) {
- // In this case, we do not need to manually allocate temp registers for result.
- rl_result = EvalLoc(rl_dest, kCoreReg, true);
- res_lo = rl_result.reg.GetLow();
- res_hi = rl_result.reg.GetHigh();
- } else {
- res_lo = AllocTemp();
- if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
- // In this case, we have enough temp registers to be allocated for result.
- res_hi = AllocTemp();
- reg_status = 1;
- } else {
- // In this case, all temps are now allocated.
- // res_hi will be allocated after we can free src1_hi.
- reg_status = 2;
- }
- }
+ rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+ rl_src2 = LoadValueWide(rl_src2, kCoreReg);
- // Temporarily add LR to the temp pool, and assign it to tmp1
- MarkTemp(rs_rARM_LR);
- FreeTemp(rs_rARM_LR);
- RegStorage tmp1 = rs_rARM_LR;
- LockTemp(rs_rARM_LR);
-
- if (rl_src1.reg == rl_src2.reg) {
- DCHECK(res_hi.Valid());
- DCHECK(res_lo.Valid());
- NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
- NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
- rl_src1.reg.GetLowReg());
- OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+ int reg_status = 0;
+ RegStorage res_lo;
+ RegStorage res_hi;
+ bool dest_promoted = rl_dest.location == kLocPhysReg && rl_dest.reg.Valid() &&
+ !IsTemp(rl_dest.reg.GetLow()) && !IsTemp(rl_dest.reg.GetHigh());
+ bool src1_promoted = !IsTemp(rl_src1.reg.GetLow()) && !IsTemp(rl_src1.reg.GetHigh());
+ bool src2_promoted = !IsTemp(rl_src2.reg.GetLow()) && !IsTemp(rl_src2.reg.GetHigh());
+ // Check if rl_dest is *not* either operand and we have enough temp registers.
+ if ((rl_dest.s_reg_low != rl_src1.s_reg_low && rl_dest.s_reg_low != rl_src2.s_reg_low) &&
+ (dest_promoted || src1_promoted || src2_promoted)) {
+ // In this case, we do not need to manually allocate temp registers for result.
+ rl_result = EvalLoc(rl_dest, kCoreReg, true);
+ res_lo = rl_result.reg.GetLow();
+ res_hi = rl_result.reg.GetHigh();
+ } else {
+ res_lo = AllocTemp();
+ if ((rl_src1.s_reg_low == rl_src2.s_reg_low) || src1_promoted || src2_promoted) {
+ // In this case, we have enough temp registers to be allocated for result.
+ res_hi = AllocTemp();
+ reg_status = 1;
} else {
- NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
- if (reg_status == 2) {
- DCHECK(!res_hi.Valid());
- DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
- DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
- // Will force free src1_hi, so must clobber.
- Clobber(rl_src1.reg);
- FreeTemp(rl_src1.reg.GetHigh());
- res_hi = AllocTemp();
- }
- DCHECK(res_hi.Valid());
- DCHECK(res_lo.Valid());
- NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
- rl_src1.reg.GetLowReg());
- NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
- tmp1.GetReg());
- NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
- if (reg_status == 2) {
- FreeTemp(rl_src1.reg.GetLow());
- }
+ // In this case, all temps are now allocated.
+ // res_hi will be allocated after we can free src1_hi.
+ reg_status = 2;
}
+ }
- // Now, restore lr to its non-temp status.
- FreeTemp(tmp1);
- Clobber(rs_rARM_LR);
- UnmarkTemp(rs_rARM_LR);
+ // Temporarily add LR to the temp pool, and assign it to tmp1
+ MarkTemp(rs_rARM_LR);
+ FreeTemp(rs_rARM_LR);
+ RegStorage tmp1 = rs_rARM_LR;
+ LockTemp(rs_rARM_LR);
- if (reg_status != 0) {
- // We had manually allocated registers for rl_result.
- // Now construct a RegLocation.
- rl_result = GetReturnWide(kCoreReg); // Just using as a template.
- rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
+ if (rl_src1.reg == rl_src2.reg) {
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src1.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
+ OpRegRegRegShift(kOpAdd, res_hi, res_hi, tmp1, EncodeShift(kArmLsl, 1));
+ } else {
+ NewLIR3(kThumb2MulRRR, tmp1.GetReg(), rl_src2.reg.GetLowReg(), rl_src1.reg.GetHighReg());
+ if (reg_status == 2) {
+ DCHECK(!res_hi.Valid());
+ DCHECK_NE(rl_src1.reg.GetLowReg(), rl_src2.reg.GetLowReg());
+ DCHECK_NE(rl_src1.reg.GetHighReg(), rl_src2.reg.GetHighReg());
+ // Will force free src1_hi, so must clobber.
+ Clobber(rl_src1.reg);
+ FreeTemp(rl_src1.reg.GetHigh());
+ res_hi = AllocTemp();
}
+ DCHECK(res_hi.Valid());
+ DCHECK(res_lo.Valid());
+ NewLIR4(kThumb2Umull, res_lo.GetReg(), res_hi.GetReg(), rl_src2.reg.GetLowReg(),
+ rl_src1.reg.GetLowReg());
+ NewLIR4(kThumb2Mla, tmp1.GetReg(), rl_src1.reg.GetLowReg(), rl_src2.reg.GetHighReg(),
+ tmp1.GetReg());
+ NewLIR4(kThumb2AddRRR, res_hi.GetReg(), tmp1.GetReg(), res_hi.GetReg(), 0);
+ if (reg_status == 2) {
+ FreeTemp(rl_src1.reg.GetLow());
+ }
+ }
- StoreValueWide(rl_dest, rl_result);
+ // Now, restore lr to its non-temp status.
+ FreeTemp(tmp1);
+ Clobber(rs_rARM_LR);
+ UnmarkTemp(rs_rARM_LR);
+
+ if (reg_status != 0) {
+ // We had manually allocated registers for rl_result.
+ // Now construct a RegLocation.
+ rl_result = GetReturnWide(kCoreReg); // Just using as a template.
+ rl_result.reg = RegStorage::MakeRegPair(res_lo, res_hi);
+ }
+
+ StoreValueWide(rl_dest, rl_result);
}
void ArmMir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1,
@@ -1471,6 +1479,7 @@ void ArmMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
void ArmMir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
int flags) {
+ UNUSED(flags);
rl_src = LoadValueWide(rl_src, kCoreReg);
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
diff --git a/compiler/dex/quick/arm/target_arm.cc b/compiler/dex/quick/arm/target_arm.cc
index 7100a285a6..1f26a815da 100644
--- a/compiler/dex/quick/arm/target_arm.cc
+++ b/compiler/dex/quick/arm/target_arm.cc
@@ -559,11 +559,10 @@ ArmMir2Lir::ArmMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
call_method_insns_.reserve(100);
// Sanity check - make sure encoding map lines up.
for (int i = 0; i < kArmLast; i++) {
- if (ArmMir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(ArmMir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << ArmMir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(ArmMir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc
index ce2de65abf..448e80f715 100644
--- a/compiler/dex/quick/arm/utility_arm.cc
+++ b/compiler/dex/quick/arm/utility_arm.cc
@@ -373,18 +373,21 @@ LIR* ArmMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
}
LIR* ArmMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm";
- return NULL;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -1167,11 +1170,13 @@ LIR* ArmMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
}
LIR* ArmMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for Arm";
- return NULL;
+ UNREACHABLE();
}
LIR* ArmMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index a87b06aeb4..973279e8b7 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -127,7 +127,7 @@ enum A64ResourceEncodingPos {
R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
// Registers (integer) values.
-enum A64NativeRegisterPool {
+enum A64NativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
# define A64_DEFINE_REGISTERS(nr) \
rw##nr = RegStorage::k32BitSolo | RegStorage::kCoreRegister | nr, \
rx##nr = RegStorage::k64BitSolo | RegStorage::kCoreRegister | nr, \
@@ -362,9 +362,10 @@ enum A64Opcode {
kA64Tbz3rht, // tbz imm_6_b5[31] [0110110] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
kA64Ubfm4rrdd, // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
kA64Last,
- kA64NotWide = 0, // Flag used to select the first instruction variant.
- kA64Wide = 0x1000 // Flag used to select the second instruction variant.
+ kA64NotWide = kA64First, // 0 - Flag used to select the first instruction variant.
+ kA64Wide = 0x1000 // Flag used to select the second instruction variant.
};
+std::ostream& operator<<(std::ostream& os, const A64Opcode& rhs);
/*
* The A64 instruction set provides two variants for many instructions. For example, "mov wN, wM"
@@ -414,6 +415,7 @@ enum A64EncodingKind {
kFmtExtend, // Register extend, 9-bit at [23..21, 15..10].
kFmtSkip, // Unused field, but continue to next.
};
+std::ostream& operator<<(std::ostream& os, const A64EncodingKind & rhs);
// Struct used to define the snippet positions for each A64 opcode.
struct A64EncodingMap {
diff --git a/compiler/dex/quick/arm64/call_arm64.cc b/compiler/dex/quick/arm64/call_arm64.cc
index c898e2d205..a9a58a3bfb 100644
--- a/compiler/dex/quick/arm64/call_arm64.cc
+++ b/compiler/dex/quick/arm64/call_arm64.cc
@@ -401,6 +401,7 @@ void Arm64Mir2Lir::GenSpecialExitSequence() {
}
static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& target_method) {
+ UNUSED(cu, target_method);
// Always emit relative calls.
return true;
}
@@ -411,9 +412,10 @@ static bool Arm64UseRelativeCall(CompilationUnit* cu, const MethodReference& tar
*/
static int Arm64NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t unused_idx,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info, unused_idx);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_code != 0 && direct_method != 0) {
switch (state) {
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 9f0260635d..bd363c4fd2 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -188,7 +188,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
void GenMonitorEnter(int opt_flags, RegLocation rl_src) OVERRIDE;
@@ -249,10 +249,10 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
uintptr_t direct_code, uintptr_t direct_method, InvokeType type,
bool skip_this) OVERRIDE;
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return true; // 64b architecture.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return true; // 64b architecture.
}
diff --git a/compiler/dex/quick/arm64/int_arm64.cc b/compiler/dex/quick/arm64/int_arm64.cc
index 418d81efe6..965759b59d 100644
--- a/compiler/dex/quick/arm64/int_arm64.cc
+++ b/compiler/dex/quick/arm64/int_arm64.cc
@@ -32,11 +32,13 @@ LIR* Arm64Mir2Lir::OpCmpBranch(ConditionCode cond, RegStorage src1, RegStorage s
}
LIR* Arm64Mir2Lir::OpIT(ConditionCode ccode, const char* guide) {
+ UNUSED(ccode, guide);
LOG(FATAL) << "Unexpected use of OpIT for Arm64";
- return NULL;
+ UNREACHABLE();
}
void Arm64Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT for Arm64";
}
@@ -174,13 +176,14 @@ void Arm64Mir2Lir::GenSelect(int32_t true_val, int32_t false_val, ConditionCode
void Arm64Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
DCHECK(rs_dest.Valid());
OpRegReg(kOpCmp, left_op, right_op);
GenSelect(true_val, false_val, code, rs_dest, dest_reg_class);
}
void Arm64Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
rl_src = LoadValue(rl_src, rl_src.ref ? kRefReg : kCoreReg);
// rl_src may be aliased with rl_result/rl_dest, so do compare early.
@@ -406,6 +409,7 @@ static const MagicTable magic_table[] = {
// Integer division by constant via reciprocal multiply (Hacker's Delight, 10-4)
bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -450,6 +454,7 @@ bool Arm64Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_d
bool Arm64Mir2Lir::SmallLiteralDivRem64(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int64_t lit) {
+ UNUSED(dalvik_opcode);
if ((lit < 0) || (lit >= static_cast<int>(arraysize(magic_table)))) {
return false;
}
@@ -590,13 +595,16 @@ bool Arm64Mir2Lir::HandleEasyDivRem64(Instruction::Code dalvik_opcode, bool is_d
}
bool Arm64Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of EasyMultiply for Arm64";
- return false;
+ UNREACHABLE();
}
-RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Arm64";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int lit, bool is_div) {
@@ -615,8 +623,9 @@ RegLocation Arm64Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int
RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Arm64";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegStorage r_src2,
@@ -929,25 +938,27 @@ LIR* Arm64Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
}
LIR* Arm64Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for Arm64";
- return NULL;
+ UNREACHABLE();
}
void Arm64Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
- RegLocation rl_result, int lit,
- int first_bit, int second_bit) {
+ RegLocation rl_result, int lit ATTRIBUTE_UNUSED,
+ int first_bit, int second_bit) {
OpRegRegRegShift(kOpAdd, rl_result.reg, rl_src.reg, rl_src.reg, EncodeShift(kA64Lsl, second_bit - first_bit));
if (first_bit != 0) {
OpRegRegImm(kOpLsl, rl_result.reg, rl_result.reg, first_bit);
}
}
-void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg) {
+void Arm64Mir2Lir::GenDivZeroCheckWide(RegStorage reg ATTRIBUTE_UNUSED) {
LOG(FATAL) << "Unexpected use of GenDivZero for Arm64";
}
@@ -1311,7 +1322,7 @@ void Arm64Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
void Arm64Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode,
RegLocation rl_dest, RegLocation rl_src, RegLocation rl_shift,
- int flags) {
+ int flags ATTRIBUTE_UNUSED) {
OpKind op = kOpBkpt;
// Per spec, we only care about low 6 bits of shift amount.
int shift_amount = mir_graph_->ConstantValue(rl_shift) & 0x3f;
@@ -1467,8 +1478,8 @@ static void SpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32_t
}
}
-static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
- uint32_t fp_reg_mask, int frame_size) {
+static int SpillRegsPreSub(Arm64Mir2Lir* m2l, uint32_t core_reg_mask, uint32_t fp_reg_mask,
+ int frame_size) {
m2l->OpRegRegImm(kOpSub, rs_sp, rs_sp, frame_size);
int core_count = POPCOUNT(core_reg_mask);
@@ -1490,7 +1501,7 @@ static int SpillRegsPreSub(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg
}
static int SpillRegsPreIndexed(Arm64Mir2Lir* m2l, RegStorage base, uint32_t core_reg_mask,
- uint32_t fp_reg_mask, int frame_size) {
+ uint32_t fp_reg_mask) {
// Otherwise, spill both core and fp regs at the same time.
// The very first instruction will be an stp with pre-indexed address, moving the stack pointer
// down. From then on, we fill upwards. This will generate overall the same number of instructions
@@ -1613,9 +1624,9 @@ int Arm64Mir2Lir::SpillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp
// This case is also optimal when we have an odd number of core spills, and an even (non-zero)
// number of fp spills.
if ((RoundUp(frame_size, 8) / 8 <= 63)) {
- return SpillRegsPreSub(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ return SpillRegsPreSub(this, core_reg_mask, fp_reg_mask, frame_size);
} else {
- return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask, frame_size);
+ return SpillRegsPreIndexed(this, base, core_reg_mask, fp_reg_mask);
}
}
@@ -1653,6 +1664,7 @@ static void UnSpillFPRegs(Arm64Mir2Lir* m2l, RegStorage base, int offset, uint32
void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t fp_reg_mask,
int frame_size) {
+ DCHECK(base == rs_sp);
// Restore saves and drop stack frame.
// 2 versions:
//
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index ba47883d9e..094ff51eee 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -589,11 +589,10 @@ Arm64Mir2Lir::Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAlloca
call_method_insns_(arena->Adapter()) {
// Sanity check - make sure encoding map lines up.
for (int i = 0; i < kA64Last; i++) {
- if (UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode) != i) {
- LOG(FATAL) << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(UNWIDE(Arm64Mir2Lir::EncodingMap[i].opcode), i)
+ << "Encoding order for " << Arm64Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 6985b73845..47ccc46976 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -306,7 +306,7 @@ static int GetNumFastHalfWords(uint64_t value) {
// algorithm will give it a low priority for promotion, even when it is referenced many times in
// the code.
-bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value) {
+bool Arm64Mir2Lir::InexpensiveConstantInt(int32_t value ATTRIBUTE_UNUSED) {
// A 32-bit int can always be loaded with 2 instructions (and without using the literal pool).
// We therefore return true and give it a low priority for promotion.
return true;
@@ -673,19 +673,24 @@ LIR* Arm64Mir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2
}
}
-LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
+ MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
-LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src,
+ MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
return nullptr;
}
LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_src1,
@@ -1386,16 +1391,20 @@ LIR* Arm64Mir2Lir::StoreRefDisp(RegStorage r_base, int displacement, RegStorage
}
LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
+ UNUSED(r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
- return NULL;
+ UNREACHABLE();
}
LIR* Arm64Mir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for Arm64";
- return NULL;
+ UNREACHABLE();
}
-LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+LIR* Arm64Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt,
+ QuickEntrypointEnum trampoline ATTRIBUTE_UNUSED) {
+ // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 80a1ac4c52..4bc8967ce5 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -105,17 +105,17 @@ void Mir2Lir::MarkSafepointPCAfter(LIR* after) {
void Mir2Lir::UnlinkLIR(LIR* lir) {
if (UNLIKELY(lir == first_lir_insn_)) {
first_lir_insn_ = lir->next;
- if (lir->next != NULL) {
- lir->next->prev = NULL;
+ if (lir->next != nullptr) {
+ lir->next->prev = nullptr;
} else {
- DCHECK(lir->next == NULL);
+ DCHECK(lir->next == nullptr);
DCHECK(lir == last_lir_insn_);
- last_lir_insn_ = NULL;
+ last_lir_insn_ = nullptr;
}
} else if (lir == last_lir_insn_) {
last_lir_insn_ = lir->prev;
- lir->prev->next = NULL;
- } else if ((lir->prev != NULL) && (lir->next != NULL)) {
+ lir->prev->next = nullptr;
+ } else if ((lir->prev != nullptr) && (lir->next != nullptr)) {
lir->prev->next = lir->next;
lir->next->prev = lir->prev;
}
@@ -334,10 +334,10 @@ void Mir2Lir::CodegenDump() {
<< static_cast<float>(total_size_) / static_cast<float>(insns_size * 2);
DumpPromotionMap();
UpdateLIROffsets();
- for (lir_insn = first_lir_insn_; lir_insn != NULL; lir_insn = lir_insn->next) {
+ for (lir_insn = first_lir_insn_; lir_insn != nullptr; lir_insn = lir_insn->next) {
DumpLIRInsn(lir_insn, 0);
}
- for (lir_insn = literal_list_; lir_insn != NULL; lir_insn = lir_insn->next) {
+ for (lir_insn = literal_list_; lir_insn != nullptr; lir_insn = lir_insn->next) {
LOG(INFO) << StringPrintf("%x (%04x): .word (%#x)", lir_insn->offset, lir_insn->offset,
lir_insn->operands[0]);
}
@@ -368,13 +368,13 @@ LIR* Mir2Lir::ScanLiteralPool(LIR* data_target, int value, unsigned int delta) {
return data_target;
data_target = data_target->next;
}
- return NULL;
+ return nullptr;
}
/* Search the existing constants in the literal pool for an exact wide match */
LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
bool lo_match = false;
- LIR* lo_target = NULL;
+ LIR* lo_target = nullptr;
while (data_target) {
if (lo_match && (data_target->operands[0] == val_hi)) {
// Record high word in case we need to expand this later.
@@ -388,7 +388,7 @@ LIR* Mir2Lir::ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi) {
}
data_target = data_target->next;
}
- return NULL;
+ return nullptr;
}
/* Search the existing constants in the literal pool for an exact method match */
@@ -431,7 +431,7 @@ LIR* Mir2Lir::AddWordData(LIR* *constant_list_p, int value) {
estimated_native_code_size_ += sizeof(value);
return new_value;
}
- return NULL;
+ return nullptr;
}
/* Add a 64-bit constant to the constant pool or mixed with code */
@@ -469,14 +469,14 @@ static void AlignBuffer(std::vector<uint8_t>&buf, size_t offset) {
void Mir2Lir::InstallLiteralPools() {
AlignBuffer(code_buffer_, data_offset_);
LIR* data_lir = literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
Push32(code_buffer_, data_lir->operands[0]);
data_lir = NEXT_LIR(data_lir);
}
// TODO: patches_.reserve() as needed.
// Push code and method literals, record offsets for the compiler to patch.
data_lir = code_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
const DexFile* target_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -486,7 +486,7 @@ void Mir2Lir::InstallLiteralPools() {
data_lir = NEXT_LIR(data_lir);
}
data_lir = method_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_method_idx = data_lir->operands[0];
const DexFile* target_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -497,7 +497,7 @@ void Mir2Lir::InstallLiteralPools() {
}
// Push class literals.
data_lir = class_literal_list_;
- while (data_lir != NULL) {
+ while (data_lir != nullptr) {
uint32_t target_type_idx = data_lir->operands[0];
const DexFile* class_dex_file =
reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
@@ -577,7 +577,7 @@ void Mir2Lir::InstallFillArrayData() {
}
static int AssignLiteralOffsetCommon(LIR* lir, CodeOffset offset) {
- for (; lir != NULL; lir = lir->next) {
+ for (; lir != nullptr; lir = lir->next) {
lir->offset = offset;
offset += 4;
}
@@ -588,7 +588,7 @@ static int AssignLiteralPointerOffsetCommon(LIR* lir, CodeOffset offset,
unsigned int element_size) {
// Align to natural pointer size.
offset = RoundUp(offset, element_size);
- for (; lir != NULL; lir = lir->next) {
+ for (; lir != nullptr; lir = lir->next) {
lir->offset = offset;
offset += element_size;
}
@@ -642,7 +642,7 @@ void Mir2Lir::CreateMappingTables() {
uint32_t dex2pc_entries = 0u;
uint32_t dex2pc_offset = 0u;
uint32_t dex2pc_dalvik_offset = 0u;
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
pc2dex_src_entries++;
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
pc2dex_entries += 1;
@@ -682,7 +682,7 @@ void Mir2Lir::CreateMappingTables() {
pc2dex_dalvik_offset = 0u;
dex2pc_offset = 0u;
dex2pc_dalvik_offset = 0u;
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
if (generate_src_map && !tgt_lir->flags.is_nop) {
src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
static_cast<int32_t>(tgt_lir->dalvik_offset)}));
@@ -717,7 +717,7 @@ void Mir2Lir::CreateMappingTables() {
CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
auto it = table.PcToDexBegin();
auto it2 = table.DexToPcBegin();
- for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+ for (LIR* tgt_lir = first_lir_insn_; tgt_lir != nullptr; tgt_lir = NEXT_LIR(tgt_lir)) {
if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
CHECK_EQ(tgt_lir->offset, it.NativePcOffset());
CHECK_EQ(tgt_lir->dalvik_offset, it.DexPc());
@@ -758,7 +758,7 @@ void Mir2Lir::CreateNativeGcMap() {
uint32_t native_offset = it.NativePcOffset();
uint32_t dex_pc = it.DexPc();
const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
- CHECK(references != NULL) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
+ CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc <<
": " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
native_gc_map_builder.AddEntry(native_offset, references);
}
@@ -904,6 +904,7 @@ void Mir2Lir::DumpPackedSwitchTable(const uint16_t* table) {
/* Set up special LIR to mark a Dalvik byte-code instruction start for pretty printing */
void Mir2Lir::MarkBoundary(DexOffset offset, const char* inst_str) {
+ UNUSED(offset);
// NOTE: only used for debug listings.
NewLIR1(kPseudoDalvikByteCodeBoundary, WrapPointer(ArenaStrdup(inst_str)));
}
@@ -925,7 +926,7 @@ bool Mir2Lir::EvaluateBranch(Instruction::Code opcode, int32_t src1, int32_t src
case Instruction::IF_LEZ: is_taken = (src1 <= 0); break;
default:
LOG(FATAL) << "Unexpected opcode " << opcode;
- is_taken = false;
+ UNREACHABLE();
}
return is_taken;
}
@@ -941,8 +942,8 @@ ConditionCode Mir2Lir::FlipComparisonOrder(ConditionCode before) {
case kCondLe: res = kCondGe; break;
case kCondGe: res = kCondLe; break;
default:
- res = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected ccode " << before;
+ UNREACHABLE();
}
return res;
}
@@ -957,8 +958,8 @@ ConditionCode Mir2Lir::NegateComparison(ConditionCode before) {
case kCondLe: res = kCondGt; break;
case kCondGe: res = kCondLt; break;
default:
- res = static_cast<ConditionCode>(0);
LOG(FATAL) << "Unexpected ccode " << before;
+ UNREACHABLE();
}
return res;
}
@@ -966,11 +967,11 @@ ConditionCode Mir2Lir::NegateComparison(ConditionCode before) {
// TODO: move to mir_to_lir.cc
Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Backend(arena),
- literal_list_(NULL),
- method_literal_list_(NULL),
- class_literal_list_(NULL),
- code_literal_list_(NULL),
- first_fixup_(NULL),
+ literal_list_(nullptr),
+ method_literal_list_(nullptr),
+ class_literal_list_(nullptr),
+ code_literal_list_(nullptr),
+ first_fixup_(nullptr),
cu_(cu),
mir_graph_(mir_graph),
switch_tables_(arena->Adapter(kArenaAllocSwitchTable)),
@@ -980,8 +981,8 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
pointer_storage_(arena->Adapter()),
data_offset_(0),
total_size_(0),
- block_label_list_(NULL),
- promotion_map_(NULL),
+ block_label_list_(nullptr),
+ promotion_map_(nullptr),
current_dalvik_offset_(0),
estimated_native_code_size_(0),
reg_pool_(nullptr),
@@ -994,8 +995,8 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
frame_size_(0),
core_spill_mask_(0),
fp_spill_mask_(0),
- first_lir_insn_(NULL),
- last_lir_insn_(NULL),
+ first_lir_insn_(nullptr),
+ last_lir_insn_(nullptr),
slow_paths_(arena->Adapter(kArenaAllocSlowPaths)),
mem_ref_type_(ResourceMask::kHeapRef),
mask_cache_(arena) {
@@ -1005,8 +1006,8 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena
reginfo_map_.reserve(RegStorage::kMaxRegs);
pointer_storage_.reserve(128);
slow_paths_.reserve(32);
- // Reserve pointer id 0 for NULL.
- size_t null_idx = WrapPointer(NULL);
+ // Reserve pointer id 0 for nullptr.
+ size_t null_idx = WrapPointer(nullptr);
DCHECK_EQ(null_idx, 0U);
}
@@ -1126,14 +1127,14 @@ int Mir2Lir::ComputeFrameSize() {
* unit
*/
void Mir2Lir::AppendLIR(LIR* lir) {
- if (first_lir_insn_ == NULL) {
- DCHECK(last_lir_insn_ == NULL);
+ if (first_lir_insn_ == nullptr) {
+ DCHECK(last_lir_insn_ == nullptr);
last_lir_insn_ = first_lir_insn_ = lir;
- lir->prev = lir->next = NULL;
+ lir->prev = lir->next = nullptr;
} else {
last_lir_insn_->next = lir;
lir->prev = last_lir_insn_;
- lir->next = NULL;
+ lir->next = nullptr;
last_lir_insn_ = lir;
}
}
@@ -1145,7 +1146,7 @@ void Mir2Lir::AppendLIR(LIR* lir) {
* prev_lir <-> new_lir <-> current_lir
*/
void Mir2Lir::InsertLIRBefore(LIR* current_lir, LIR* new_lir) {
- DCHECK(current_lir->prev != NULL);
+ DCHECK(current_lir->prev != nullptr);
LIR *prev_lir = current_lir->prev;
prev_lir->next = new_lir;
@@ -1216,7 +1217,7 @@ void Mir2Lir::AddSlowPath(LIRSlowPath* slowpath) {
void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
LIR* data_target = ScanLiteralPoolMethod(code_literal_list_, target_method);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&code_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
// NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1233,7 +1234,7 @@ void Mir2Lir::LoadCodeAddress(const MethodReference& target_method, InvokeType t
void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType type,
SpecialTargetRegister symbolic_reg) {
LIR* data_target = ScanLiteralPoolMethod(method_literal_list_, target_method);
- if (data_target == NULL) {
+ if (data_target == nullptr) {
data_target = AddWordData(&method_literal_list_, target_method.dex_method_index);
data_target->operands[1] = WrapPointer(const_cast<DexFile*>(target_method.dex_file));
// NOTE: The invoke type doesn't contribute to the literal identity. In fact, we can have
@@ -1291,7 +1292,9 @@ RegLocation Mir2Lir::NarrowRegLoc(RegLocation loc) {
}
void Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
LOG(FATAL) << "Unknown MIR opcode not supported on this architecture";
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index 0f1d765d8c..453a7f0e34 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -101,7 +101,7 @@ COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicUnsafePut], UnsafePut_must_not_be_s
COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicSystemArrayCopyCharArray],
SystemArrayCopyCharArray_must_be_static);
-MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke, MIR* move_return) {
+MIR* AllocReplacementMIR(MIRGraph* mir_graph, MIR* invoke) {
MIR* insn = mir_graph->NewMIR();
insn->offset = invoke->offset;
insn->optimization_flags = MIR_CALLEE;
@@ -555,11 +555,11 @@ bool DexFileMethodInliner::GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* i
break;
case kInlineOpIGet:
move_result = mir_graph->FindMoveResult(bb, invoke);
- result = GenInlineIGet(mir_graph, bb, invoke, move_result, method, method_idx);
+ result = GenInlineIGet(mir_graph, bb, invoke, move_result, method);
break;
case kInlineOpIPut:
move_result = mir_graph->FindMoveResult(bb, invoke);
- result = GenInlineIPut(mir_graph, bb, invoke, move_result, method, method_idx);
+ result = GenInlineIPut(mir_graph, bb, invoke, move_result, method);
break;
default:
LOG(FATAL) << "Unexpected inline op: " << method.opcode;
@@ -737,7 +737,7 @@ bool DexFileMethodInliner::GenInlineConst(MIRGraph* mir_graph, BasicBlock* bb, M
method.d.data == 0u));
// Insert the CONST instruction.
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = Instruction::CONST;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = method.d.data;
@@ -775,7 +775,7 @@ bool DexFileMethodInliner::GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* b
}
// Insert the move instruction
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
insn->dalvikInsn.vB = arg;
@@ -784,8 +784,7 @@ bool DexFileMethodInliner::GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* b
}
bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method,
- uint32_t method_idx) {
+ MIR* move_result, const InlineMethod& method) {
CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
return false;
@@ -819,7 +818,7 @@ bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MI
invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->offset = invoke->offset;
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = move_result->dalvikInsn.vA;
@@ -836,8 +835,7 @@ bool DexFileMethodInliner::GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MI
}
bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method,
- uint32_t method_idx) {
+ MIR* move_result, const InlineMethod& method) {
CompilationUnit* cu = mir_graph->GetCurrentDexCompilationUnit()->GetCompilationUnit();
if (cu->enable_debug & (1 << kDebugSlowFieldPath)) {
return false;
@@ -881,7 +879,7 @@ bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MI
invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
}
- MIR* insn = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* insn = AllocReplacementMIR(mir_graph, invoke);
insn->dalvikInsn.opcode = opcode;
insn->dalvikInsn.vA = src_reg;
insn->dalvikInsn.vB = object_reg;
@@ -895,7 +893,7 @@ bool DexFileMethodInliner::GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MI
bb->InsertMIRAfter(invoke, insn);
if (move_result != nullptr) {
- MIR* move = AllocReplacementMIR(mir_graph, invoke, move_result);
+ MIR* move = AllocReplacementMIR(mir_graph, invoke);
move->offset = move_result->offset;
if (move_result->dalvikInsn.opcode == Instruction::MOVE_RESULT) {
move->dalvikInsn.opcode = Instruction::MOVE_FROM16;
diff --git a/compiler/dex/quick/dex_file_method_inliner.h b/compiler/dex/quick/dex_file_method_inliner.h
index 30a2d9081a..cb521da9df 100644
--- a/compiler/dex/quick/dex_file_method_inliner.h
+++ b/compiler/dex/quick/dex_file_method_inliner.h
@@ -31,9 +31,9 @@ namespace verifier {
class MethodVerifier;
} // namespace verifier
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
-struct MIR;
+class MIR;
class MIRGraph;
class Mir2Lir;
@@ -315,9 +315,9 @@ class DexFileMethodInliner {
static bool GenInlineReturnArg(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
MIR* move_result, const InlineMethod& method);
static bool GenInlineIGet(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+ MIR* move_result, const InlineMethod& method);
static bool GenInlineIPut(MIRGraph* mir_graph, BasicBlock* bb, MIR* invoke,
- MIR* move_result, const InlineMethod& method, uint32_t method_idx);
+ MIR* move_result, const InlineMethod& method);
ReaderWriterMutex lock_;
/*
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 215257ba2f..9410f7e83b 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -212,8 +212,7 @@ void Mir2Lir::ForceImplicitNullCheck(RegStorage reg, int opt_flags) {
}
void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
- RegLocation rl_src2, LIR* taken,
- LIR* fall_through) {
+ RegLocation rl_src2, LIR* taken) {
ConditionCode cond;
RegisterClass reg_class = (rl_src1.ref || rl_src2.ref) ? kRefReg : kCoreReg;
switch (opcode) {
@@ -276,8 +275,7 @@ void Mir2Lir::GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
OpCmpBranch(cond, rl_src1.reg, rl_src2.reg, taken);
}
-void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken,
- LIR* fall_through) {
+void Mir2Lir::GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken) {
ConditionCode cond;
RegisterClass reg_class = rl_src.ref ? kRefReg : kCoreReg;
rl_src = LoadValue(rl_src, reg_class);
@@ -2134,12 +2132,14 @@ void Mir2Lir::GenSuspendTestAndBranch(int opt_flags, LIR* target) {
/* Call out to helper assembly routine that will null check obj and then lock it. */
void Mir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
+ UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickLockObject, rl_src, true);
}
/* Call out to helper assembly routine that will null check obj and then unlock it. */
void Mir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
+ UNUSED(opt_flags); // TODO: avoid null check with specialized non-null helper.
FlushAllRegs();
CallRuntimeHelperRegLocation(kQuickUnlockObject, rl_src, true);
}
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index bc4d00b6cd..c7449c8eae 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -473,8 +473,7 @@ static void CommonCallCodeLoadClassIntoArg0(const CallInfo* info, Mir2Lir* cg) {
cg->MarkPossibleNullPointerException(info->opt_flags);
}
-static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
- const RegStorage* alt_from,
+static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const RegStorage* alt_from,
const CompilationUnit* cu, Mir2Lir* cg) {
if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) {
// Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt]
@@ -492,9 +491,10 @@ static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info,
*/
static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info);
DCHECK(cu->instruction_set != kX86 && cu->instruction_set != kX86_64 &&
cu->instruction_set != kThumb2 && cu->instruction_set != kArm &&
cu->instruction_set != kArm64);
@@ -547,7 +547,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
break;
case 3: // Grab the code from the method*
if (direct_code == 0) {
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, &arg0_ref, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(&arg0_ref, cu, cg)) {
break; // kInvokeTgt := arg0_ref->entrypoint
}
} else {
@@ -571,8 +571,9 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
*/
static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
- InvokeType unused3) {
+ uint32_t method_idx, uintptr_t, uintptr_t,
+ InvokeType) {
+ UNUSED(target_method);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
/*
* This is the fast path in which the target virtual method is
@@ -595,7 +596,7 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
break;
}
case 3:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -614,8 +615,7 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
*/
static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t method_idx, uintptr_t unused,
- uintptr_t direct_method, InvokeType unused2) {
+ uint32_t method_idx, uintptr_t, uintptr_t, InvokeType) {
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
switch (state) {
@@ -641,7 +641,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
break;
}
case 4:
- if (CommonCallCodeLoadCodePointerIntoInvokeTgt(info, nullptr, cu, cg)) {
+ if (CommonCallCodeLoadCodePointerIntoInvokeTgt(nullptr, cu, cg)) {
break; // kInvokeTgt := kArg0->entrypoint
}
DCHECK(cu->instruction_set == kX86 || cu->instruction_set == kX86_64);
@@ -655,9 +655,9 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
QuickEntrypointEnum trampoline, int state,
const MethodReference& target_method, uint32_t method_idx) {
+ UNUSED(info, method_idx);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
-
/*
* This handles the case in which the base method is not fully
* resolved at compile time, we bail to a runtime helper.
@@ -684,32 +684,28 @@ static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info,
static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeStaticTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeDirectTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeSuperTrampolineWithAccessCheck, state,
target_method, 0);
}
static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeVirtualTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -717,8 +713,7 @@ static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
CallInfo* info, int state,
const MethodReference& target_method,
- uint32_t unused, uintptr_t unused2,
- uintptr_t unused3, InvokeType unused4) {
+ uint32_t, uintptr_t, uintptr_t, InvokeType) {
return NextInvokeInsnSP(cu, info, kQuickInvokeInterfaceTrampolineWithAccessCheck, state,
target_method, 0);
}
@@ -1400,28 +1395,34 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
}
bool Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, size);
return false;
}
bool Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
- // Currently implemented only for ARM64
+ // Currently implemented only for ARM64.
+ UNUSED(info, is_min, is_double);
return false;
}
bool Mir2Lir::GenInlinedCeil(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedFloor(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRint(CallInfo* info) {
+ UNUSED(info);
return false;
}
bool Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
+ UNUSED(info, is_double);
return false;
}
@@ -1448,6 +1449,7 @@ bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
}
bool Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
+ UNUSED(info);
return false;
}
@@ -1690,7 +1692,6 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
const MirMethodLoweringInfo& method_info = mir_graph_->GetMethodLoweringInfo(info->mir);
cu_->compiler_driver->ProcessedInvoke(method_info.GetInvokeType(), method_info.StatsFlags());
- BeginInvoke(info);
InvokeType original_type = static_cast<InvokeType>(method_info.GetInvokeType());
info->type = method_info.GetSharpType();
bool fast_path = method_info.FastPath();
@@ -1734,7 +1735,6 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) {
method_info.DirectCode(), method_info.DirectMethod(), original_type);
}
LIR* call_insn = GenCallInsn(method_info);
- EndInvoke(info);
MarkSafepointPC(call_insn);
ClobberCallerSave();
@@ -1755,6 +1755,7 @@ NextCallInsn Mir2Lir::GetNextSDCallInsn() {
}
LIR* Mir2Lir::GenCallInsn(const MirMethodLoweringInfo& method_info) {
+ UNUSED(method_info);
DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64 &&
cu_->instruction_set != kThumb2 && cu_->instruction_set != kArm &&
cu_->instruction_set != kArm64);
diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc
index 01d1a1e0db..310e1e980b 100644
--- a/compiler/dex/quick/mips/assemble_mips.cc
+++ b/compiler/dex/quick/mips/assemble_mips.cc
@@ -146,12 +146,10 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 25, 21, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF_HI | REG_DEF_LO | REG_USE01,
"div", "!0r,!1r", 4),
-#if __mips_isa_rev >= 2
ENCODING_MAP(kMipsExt, 0x7c000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 25, 21, kFmtBitBlt, 10, 6,
kFmtBitBlt, 15, 11, IS_QUAD_OP | REG_DEF0 | REG_USE1,
"ext", "!0r,!1r,!2d,!3D", 4),
-#endif
ENCODING_MAP(kMipsJal, 0x0c000000,
kFmtBitBlt, 25, 0, kFmtUnused, -1, -1, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_UNARY_OP | IS_BRANCH | REG_DEF_LR,
@@ -240,7 +238,6 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
"sb", "!0r,!1d(!2r)", 4),
-#if __mips_isa_rev >= 2
ENCODING_MAP(kMipsSeb, 0x7c000420,
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
@@ -249,7 +246,6 @@ const MipsEncodingMap MipsMir2Lir::EncodingMap[kMipsLast] = {
kFmtBitBlt, 15, 11, kFmtBitBlt, 20, 16, kFmtUnused, -1, -1,
kFmtUnused, -1, -1, IS_BINARY_OP | REG_DEF0_USE1,
"seh", "!0r,!1r", 4),
-#endif
ENCODING_MAP(kMipsSh, 0xA4000000,
kFmtBitBlt, 20, 16, kFmtBitBlt, 15, 0, kFmtBitBlt, 25, 21,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE02 | IS_STORE,
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index 8b5bc45683..01784e2fec 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -24,9 +24,9 @@
namespace art {
-bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
- const InlineMethod& special) {
+bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir, const InlineMethod& special) {
// TODO
+ UNUSED(bb, mir, special);
return false;
}
diff --git a/compiler/dex/quick/mips/codegen_mips.h b/compiler/dex/quick/mips/codegen_mips.h
index 508d474404..dc6930c45b 100644
--- a/compiler/dex/quick/mips/codegen_mips.h
+++ b/compiler/dex/quick/mips/codegen_mips.h
@@ -121,7 +121,7 @@ class MipsMir2Lir FINAL : public Mir2Lir {
void GenSelect(BasicBlock* bb, MIR* mir);
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind);
void GenMoveException(RegLocation rl_dest);
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -172,10 +172,10 @@ class MipsMir2Lir FINAL : public Mir2Lir {
bool InexpensiveConstantLong(int64_t value);
bool InexpensiveConstantDouble(int64_t value);
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return false; // Wide GPRs are formed by pairing.
}
- bool WideFPRsAreAliases() OVERRIDE {
+ bool WideFPRsAreAliases() const OVERRIDE {
return false; // Wide FPRs are formed by pairing.
}
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 3a4128a79c..431591512e 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -207,8 +207,8 @@ void MipsMir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
StoreValue(rl_dest, rl_result);
}
-void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir,
- bool gt_bias, bool is_double) {
+void MipsMir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) {
+ UNUSED(bb, mir, gt_bias, is_double);
UNIMPLEMENTED(FATAL) << "Need codegen for fused fp cmp branch";
}
@@ -230,7 +230,8 @@ void MipsMir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
}
bool MipsMir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
- // TODO: need Mips implementation
+ // TODO: need Mips implementation.
+ UNUSED(info, is_min, is_long);
return false;
}
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index baf7311398..d58ddb0d09 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -217,7 +217,8 @@ void MipsMir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
+ UNUSED(dest_reg_class);
// Implement as a branch-over.
// TODO: Conditional move?
LoadConstant(rs_dest, true_val);
@@ -228,10 +229,12 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
}
void MipsMir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Need codegen for select";
}
void MipsMir2Lir::GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Need codegen for fused long cmp branch";
}
@@ -262,34 +265,39 @@ RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg1, int
return rl_result;
}
-RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
- RegLocation rl_src2, bool is_div, int flags) {
+RegLocation MipsMir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2,
+ bool is_div, int flags) {
+ UNUSED(rl_dest, rl_src1, rl_src2, is_div, flags);
LOG(FATAL) << "Unexpected use of GenDivRem for Mips";
- return rl_dest;
+ UNREACHABLE();
}
-RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit, bool is_div) {
+RegLocation MipsMir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src1, int lit,
+ bool is_div) {
+ UNUSED(rl_dest, rl_src1, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for Mips";
- return rl_dest;
+ UNREACHABLE();
}
bool MipsMir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
- DCHECK_NE(cu_->instruction_set, kThumb2);
+ UNUSED(info, is_long, is_object);
return false;
}
bool MipsMir2Lir::GenInlinedAbsFloat(CallInfo* info) {
- // TODO - add Mips implementation
+ UNUSED(info);
+ // TODO: add Mips implementation.
return false;
}
bool MipsMir2Lir::GenInlinedAbsDouble(CallInfo* info) {
- // TODO - add Mips implementation
+ UNUSED(info);
+ // TODO: add Mips implementation.
return false;
}
bool MipsMir2Lir::GenInlinedSqrt(CallInfo* info) {
- DCHECK_NE(cu_->instruction_set, kThumb2);
+ UNUSED(info);
return false;
}
@@ -325,23 +333,27 @@ bool MipsMir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
}
LIR* MipsMir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
+ UNUSED(reg, target);
LOG(FATAL) << "Unexpected use of OpPcRelLoad for Mips";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for Mips";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for Mips";
- return NULL;
+ UNREACHABLE();
}
void MipsMir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -373,27 +385,31 @@ LIR* MipsMir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targ
bool MipsMir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of smallLiteralDive in Mips";
- return false;
+ UNREACHABLE();
}
bool MipsMir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of easyMultiply in Mips";
- return false;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
LOG(FATAL) << "Unexpected use of OpIT in Mips";
- return NULL;
+ UNREACHABLE();
}
void MipsMir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT in Mips";
}
-
void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
+ UNUSED(opcode);
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -416,6 +432,7 @@ void MipsMir2Lir::GenAddLong(Instruction::Code opcode, RegLocation rl_dest,
void MipsMir2Lir::GenSubLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_src2) {
+ UNUSED(opcode);
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
rl_src2 = LoadValueWide(rl_src2, kCoreReg);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -629,6 +646,7 @@ void MipsMir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
void MipsMir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src1, RegLocation rl_shift, int flags) {
+ UNUSED(flags);
// Default implementation is just to ignore the constant case.
GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
}
diff --git a/compiler/dex/quick/mips/mips_lir.h b/compiler/dex/quick/mips/mips_lir.h
index 495eb1686c..3615916201 100644
--- a/compiler/dex/quick/mips/mips_lir.h
+++ b/compiler/dex/quick/mips/mips_lir.h
@@ -142,7 +142,7 @@ enum MipsResourceEncodingPos {
// This bit determines how the CPU access FP registers.
#define FR_BIT 0
-enum MipsNativeRegisterPool {
+enum MipsNativeRegisterPool { // private marker to avoid generate-operator-out.py from processing.
rZERO = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
rAT = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
rV0 = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
@@ -408,9 +408,7 @@ enum MipsOpCode {
kMipsBnez, // bnez s,o [000101] s[25..21] [00000] o[15..0].
kMipsBne, // bne s,t,o [000101] s[25..21] t[20..16] o[15..0].
kMipsDiv, // div s,t [000000] s[25..21] t[20..16] [0000000000011010].
-#if __mips_isa_rev >= 2
kMipsExt, // ext t,s,p,z [011111] s[25..21] t[20..16] z[15..11] p[10..6] [000000].
-#endif
kMipsJal, // jal t [000011] t[25..0].
kMipsJalr, // jalr d,s [000000] s[25..21] [00000] d[15..11] hint[10..6] [001001].
kMipsJr, // jr s [000000] s[25..21] [0000000000] hint[10..6] [001000].
@@ -433,10 +431,8 @@ enum MipsOpCode {
kMipsOri, // ori t,s,imm16 [001001] s[25..21] t[20..16] imm16[15..0].
kMipsPref, // pref h,o(b) [101011] b[25..21] h[20..16] o[15..0].
kMipsSb, // sb t,o(b) [101000] b[25..21] t[20..16] o[15..0].
-#if __mips_isa_rev >= 2
kMipsSeb, // seb d,t [01111100000] t[20..16] d[15..11] [10000100000].
kMipsSeh, // seh d,t [01111100000] t[20..16] d[15..11] [11000100000].
-#endif
kMipsSh, // sh t,o(b) [101001] b[25..21] t[20..16] o[15..0].
kMipsSll, // sll d,t,a [00000000000] t[20..16] d[15..11] a[10..6] [000000].
kMipsSllv, // sllv d,t,s [000000] s[25..21] t[20..16] d[15..11] [00000000100].
@@ -481,15 +477,17 @@ enum MipsOpCode {
kMipsUndefined, // undefined [011001xxxxxxxxxxxxxxxx].
kMipsLast
};
+std::ostream& operator<<(std::ostream& os, const MipsOpCode& rhs);
// Instruction assembly field_loc kind.
enum MipsEncodingKind {
kFmtUnused,
- kFmtBitBlt, /* Bit string using end/start */
- kFmtDfp, /* Double FP reg */
- kFmtSfp, /* Single FP reg */
- kFmtBlt5_2, /* Same 5-bit field to 2 locations */
+ kFmtBitBlt, // Bit string using end/start.
+ kFmtDfp, // Double FP reg.
+ kFmtSfp, // Single FP reg
+ kFmtBlt5_2, // Same 5-bit field to 2 locations.
};
+std::ostream& operator<<(std::ostream& os, const MipsEncodingKind& rhs);
// Struct used to define the snippet positions for each MIPS opcode.
struct MipsEncodingMap {
diff --git a/compiler/dex/quick/mips/target_mips.cc b/compiler/dex/quick/mips/target_mips.cc
index d3719ab312..4a340ecca0 100644
--- a/compiler/dex/quick/mips/target_mips.cc
+++ b/compiler/dex/quick/mips/target_mips.cc
@@ -421,6 +421,7 @@ void MipsMir2Lir::FreeCallTemps() {
}
bool MipsMir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
+ UNUSED(barrier_kind);
#if ANDROID_SMP != 0
NewLIR1(kMipsSync, 0 /* Only stype currently supported */);
return true;
@@ -574,11 +575,10 @@ RegisterClass MipsMir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volati
MipsMir2Lir::MipsMir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
: Mir2Lir(cu, mir_graph, arena) {
for (int i = 0; i < kMipsLast; i++) {
- if (MipsMir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
- }
+ DCHECK_EQ(MipsMir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << MipsMir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(MipsMir2Lir::EncodingMap[i].opcode);
}
}
diff --git a/compiler/dex/quick/mips/utility_mips.cc b/compiler/dex/quick/mips/utility_mips.cc
index 7178edec8b..044972cc5f 100644
--- a/compiler/dex/quick/mips/utility_mips.cc
+++ b/compiler/dex/quick/mips/utility_mips.cc
@@ -56,14 +56,17 @@ bool MipsMir2Lir::InexpensiveConstantInt(int32_t value) {
}
bool MipsMir2Lir::InexpensiveConstantFloat(int32_t value) {
+ UNUSED(value);
return false; // TUNING
}
bool MipsMir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
return false; // TUNING
}
bool MipsMir2Lir::InexpensiveConstantDouble(int64_t value) {
+ UNUSED(value);
return false; // TUNING
}
@@ -320,25 +323,28 @@ LIR* MipsMir2Lir::OpRegReg(OpKind op, RegStorage r_dest_src1, RegStorage r_src2)
return NewLIR3(kMipsAndi, r_dest_src1.GetReg(), r_src2.GetReg(), 0xFFFF);
default:
LOG(FATAL) << "Bad case in OpRegReg";
- break;
+ UNREACHABLE();
}
return NewLIR2(opcode, r_dest_src1.GetReg(), r_src2.GetReg());
}
LIR* MipsMir2Lir::OpMovRegMem(RegStorage r_dest, RegStorage r_base, int offset,
MoveType move_type) {
+ UNUSED(r_dest, r_base, offset, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) {
+ UNUSED(r_base, offset, r_src, move_type);
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) {
+ UNUSED(op, cc, r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpCondRegReg for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
@@ -681,16 +687,19 @@ LIR* MipsMir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage
}
LIR* MipsMir2Lir::OpMem(OpKind op, RegStorage r_base, int disp) {
+ UNUSED(op, r_base, disp);
LOG(FATAL) << "Unexpected use of OpMem for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
+ UNUSED(cc, target);
LOG(FATAL) << "Unexpected use of OpCondBranch for MIPS";
- return NULL;
+ UNREACHABLE();
}
LIR* MipsMir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline); // The address of the trampoline is already loaded into r_tgt.
return OpReg(op, r_tgt);
}
diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc
index 408606d366..533a6778b5 100644
--- a/compiler/dex/quick/mir_to_lir.cc
+++ b/compiler/dex/quick/mir_to_lir.cc
@@ -647,7 +647,6 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::IF_GT:
case Instruction::IF_LE: {
LIR* taken = &label_list[bb->taken];
- LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const && rl_src[1].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg),
@@ -664,7 +663,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
!mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
GenSuspendTest(opt_flags);
}
- GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken, fall_through);
+ GenCompareAndBranch(opcode, rl_src[0], rl_src[1], taken);
}
break;
}
@@ -676,7 +675,6 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
case Instruction::IF_GTZ:
case Instruction::IF_LEZ: {
LIR* taken = &label_list[bb->taken];
- LIR* fall_through = &label_list[bb->fall_through];
// Result known at compile time?
if (rl_src[0].is_const) {
bool is_taken = EvaluateBranch(opcode, mir_graph_->ConstantValue(rl_src[0].orig_sreg), 0);
@@ -692,7 +690,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
!mir_graph_->HasSuspendTestBetween(bb, bb->fall_through))) {
GenSuspendTest(opt_flags);
}
- GenCompareZeroAndBranch(opcode, rl_src[0], taken, fall_through);
+ GenCompareZeroAndBranch(opcode, rl_src[0], taken);
}
break;
}
@@ -1377,8 +1375,9 @@ void Mir2Lir::CheckRegLocationImpl(RegLocation rl, bool fail, bool report) const
}
size_t Mir2Lir::GetInstructionOffset(LIR* lir) {
- UNIMPLEMENTED(FATAL) << "Unsuppored GetInstructionOffset()";
- return 0;
+ UNUSED(lir);
+ UNIMPLEMENTED(FATAL) << "Unsupported GetInstructionOffset()";
+ UNREACHABLE();
}
} // namespace art
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index f4e6dfead2..ef1e7e3c14 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -33,6 +33,7 @@
#include "utils/array_ref.h"
#include "utils/arena_allocator.h"
#include "utils/arena_containers.h"
+#include "utils/arena_object.h"
#include "utils/stack_checks.h"
namespace art {
@@ -129,11 +130,11 @@ namespace art {
#define INVALID_SREG (-1)
#endif
-struct BasicBlock;
+class BasicBlock;
struct CallInfo;
struct CompilationUnit;
struct InlineMethod;
-struct MIR;
+class MIR;
struct LIR;
struct RegisterInfo;
class DexFileMethodInliner;
@@ -501,12 +502,11 @@ class Mir2Lir : public Backend {
// has completed.
//
- class LIRSlowPath {
+ class LIRSlowPath : public ArenaObject<kArenaAllocSlowPaths> {
public:
LIRSlowPath(Mir2Lir* m2l, const DexOffset dexpc, LIR* fromfast,
LIR* cont = nullptr) :
m2l_(m2l), cu_(m2l->cu_), current_dex_pc_(dexpc), fromfast_(fromfast), cont_(cont) {
- m2l->StartSlowPath(this);
}
virtual ~LIRSlowPath() {}
virtual void Compile() = 0;
@@ -694,11 +694,6 @@ class Mir2Lir : public Backend {
void MarkPackedCaseLabels(Mir2Lir::SwitchTable* tab_rec);
void MarkSparseCaseLabels(Mir2Lir::SwitchTable* tab_rec);
- virtual void StartSlowPath(LIRSlowPath* slowpath) {}
- virtual void BeginInvoke(CallInfo* info) {}
- virtual void EndInvoke(CallInfo* info) {}
-
-
// Handle bookkeeping to convert a wide RegLocation to a narrow RegLocation. No code generated.
virtual RegLocation NarrowRegLoc(RegLocation loc);
@@ -822,10 +817,9 @@ class Mir2Lir : public Backend {
LIR* GenNullCheck(RegStorage m_reg, int opt_flags);
LIR* GenExplicitNullCheck(RegStorage m_reg, int opt_flags);
virtual void GenImplicitNullCheck(RegStorage reg, int opt_flags);
- void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1,
- RegLocation rl_src2, LIR* taken, LIR* fall_through);
- void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src,
- LIR* taken, LIR* fall_through);
+ void GenCompareAndBranch(Instruction::Code opcode, RegLocation rl_src1, RegLocation rl_src2,
+ LIR* taken);
+ void GenCompareZeroAndBranch(Instruction::Code opcode, RegLocation rl_src, LIR* taken);
virtual void GenIntToLong(RegLocation rl_dest, RegLocation rl_src);
void GenIntNarrowing(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src);
@@ -1350,7 +1344,7 @@ class Mir2Lir : public Backend {
*/
virtual void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) = 0;
+ RegisterClass dest_reg_class) = 0;
/**
* @brief Used to generate a memory barrier in an architecture specific way.
@@ -1452,6 +1446,7 @@ class Mir2Lir : public Backend {
virtual bool InexpensiveConstantLong(int64_t value) = 0;
virtual bool InexpensiveConstantDouble(int64_t value) = 0;
virtual bool InexpensiveConstantInt(int32_t value, Instruction::Code opcode) {
+ UNUSED(opcode);
return InexpensiveConstantInt(value);
}
@@ -1642,12 +1637,12 @@ class Mir2Lir : public Backend {
/**
* Returns true iff wide GPRs are just different views on the same physical register.
*/
- virtual bool WideGPRsAreAliases() = 0;
+ virtual bool WideGPRsAreAliases() const = 0;
/**
* Returns true iff wide FPRs are just different views on the same physical register.
*/
- virtual bool WideFPRsAreAliases() = 0;
+ virtual bool WideFPRsAreAliases() const = 0;
enum class WidenessCheck { // private
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 8f7bd3033a..5d49a9133d 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -625,6 +625,7 @@ bool QuickCompiler::WriteElf(art::File* file,
}
Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+ UNUSED(compilation_unit);
Mir2Lir* mir_to_lir = nullptr;
switch (cu->instruction_set) {
case kThumb2:
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 6305b22ded..0a98c800a8 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -316,16 +316,16 @@ RegStorage Mir2Lir::AllocPreservedFpReg(int s_reg) {
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) {
- RegStorage res;
+ UNUSED(s_reg);
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedDouble";
- return res;
+ UNREACHABLE();
}
// TODO: this is Thumb2 only. Remove when DoPromotion refactored.
RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) {
- RegStorage res;
+ UNUSED(s_reg);
UNIMPLEMENTED(FATAL) << "Unexpected use of AllocPreservedSingle";
- return res;
+ UNREACHABLE();
}
@@ -1392,6 +1392,7 @@ int Mir2Lir::GetSRegHi(int lowSreg) {
}
bool Mir2Lir::LiveOut(int s_reg) {
+ UNUSED(s_reg);
// For now.
return true;
}
diff --git a/compiler/dex/quick/resource_mask.h b/compiler/dex/quick/resource_mask.h
index 436cdb5440..78e81b2ed1 100644
--- a/compiler/dex/quick/resource_mask.h
+++ b/compiler/dex/quick/resource_mask.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include "base/logging.h"
+#include "base/value_object.h"
#include "dex/reg_storage.h"
namespace art {
@@ -113,10 +114,7 @@ class ResourceMask {
return (masks_[0] & other.masks_[0]) != 0u || (masks_[1] & other.masks_[1]) != 0u;
}
- void SetBit(size_t bit) {
- DCHECK_LE(bit, kHighestCommonResource);
- masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
- }
+ void SetBit(size_t bit);
constexpr bool HasBit(size_t bit) const {
return (masks_[bit / 64u] & (UINT64_C(1) << (bit & 63u))) != 0u;
@@ -139,6 +137,12 @@ class ResourceMask {
friend class ResourceMaskCache;
};
+std::ostream& operator<<(std::ostream& os, const ResourceMask::ResourceBit& rhs);
+
+inline void ResourceMask::SetBit(size_t bit) {
+ DCHECK_LE(bit, kHighestCommonResource);
+ masks_[bit / 64u] |= UINT64_C(1) << (bit & 63u);
+}
constexpr ResourceMask kEncodeNone = ResourceMask::NoBits();
constexpr ResourceMask kEncodeAll = ResourceMask::AllBits();
diff --git a/compiler/dex/quick/x86/assemble_x86.cc b/compiler/dex/quick/x86/assemble_x86.cc
index dce2b73fff..85a3b3210d 100644
--- a/compiler/dex/quick/x86/assemble_x86.cc
+++ b/compiler/dex/quick/x86/assemble_x86.cc
@@ -547,6 +547,11 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
{ kX86RepneScasw, kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
};
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs) {
+ os << X86Mir2Lir::EncodingMap[rhs].name;
+ return os;
+}
+
static bool NeedsRex(int32_t raw_reg) {
return RegStorage::RegNum(raw_reg) > 7;
}
@@ -1631,6 +1636,7 @@ void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) {
* sequence or request that the trace be shortened and retried.
*/
AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
+ UNUSED(start_addr);
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index 86efc1e52c..497ef94c27 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -290,9 +290,10 @@ void X86Mir2Lir::GenImplicitNullCheck(RegStorage reg, int opt_flags) {
*/
static int X86NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
int state, const MethodReference& target_method,
- uint32_t unused,
+ uint32_t,
uintptr_t direct_code, uintptr_t direct_method,
InvokeType type) {
+ UNUSED(info, direct_code);
Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
if (direct_method != 0) {
switch (state) {
diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h
index 7b5b831e38..dec99aefab 100644
--- a/compiler/dex/quick/x86/codegen_x86.h
+++ b/compiler/dex/quick/x86/codegen_x86.h
@@ -250,7 +250,7 @@ class X86Mir2Lir : public Mir2Lir {
void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) OVERRIDE;
+ RegisterClass dest_reg_class) OVERRIDE;
bool GenMemBarrier(MemBarrierKind barrier_kind) OVERRIDE;
void GenMoveException(RegLocation rl_dest) OVERRIDE;
void GenMultiplyByTwoBitMultiplier(RegLocation rl_src, RegLocation rl_result, int lit,
@@ -499,7 +499,7 @@ class X86Mir2Lir : public Mir2Lir {
void GenConstWide(RegLocation rl_dest, int64_t value);
void GenMultiplyVectorSignedByte(RegStorage rs_dest_src1, RegStorage rs_src2);
void GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_src2);
- void GenShiftByteVector(BasicBlock *bb, MIR *mir);
+ void GenShiftByteVector(MIR* mir);
void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
uint32_t m4);
void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
@@ -557,88 +557,80 @@ class X86Mir2Lir : public Mir2Lir {
/*
* @brief Load 128 bit constant into vector register.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector
* @note vA is the TypeSize for the register.
* @note vB is the destination XMM register. arg[0..3] are 32 bit constant values.
*/
- void GenConst128(BasicBlock* bb, MIR* mir);
+ void GenConst128(MIR* mir);
/*
* @brief MIR to move a vectorized register to another.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination
* @note vC: source
*/
- void GenMoveVector(BasicBlock *bb, MIR *mir);
+ void GenMoveVector(MIR* mir);
/*
* @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
* the type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenMultiplyVector(BasicBlock *bb, MIR *mir);
+ void GenMultiplyVector(MIR* mir);
/*
* @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenAddVector(BasicBlock *bb, MIR *mir);
+ void GenAddVector(MIR* mir);
/*
* @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenSubtractVector(BasicBlock *bb, MIR *mir);
+ void GenSubtractVector(MIR* mir);
/*
* @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
+ void GenShiftLeftVector(MIR* mir);
/*
* @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
* know the type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
+ void GenSignedShiftRightVector(MIR* mir);
/*
* @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
* to know the type of the vector.
- * @param bb The basic block in which the MIR is from..
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: immediate
*/
- void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
+ void GenUnsignedShiftRightVector(MIR* mir);
/*
* @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
@@ -647,51 +639,47 @@ class X86Mir2Lir : public Mir2Lir {
* @note vB: destination and source
* @note vC: source
*/
- void GenAndVector(BasicBlock *bb, MIR *mir);
+ void GenAndVector(MIR* mir);
/*
* @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenOrVector(BasicBlock *bb, MIR *mir);
+ void GenOrVector(MIR* mir);
/*
* @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
* type of the vector.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination and source
* @note vC: source
*/
- void GenXorVector(BasicBlock *bb, MIR *mir);
+ void GenXorVector(MIR* mir);
/*
* @brief Reduce a 128-bit packed element into a single VR by taking lower bits
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @details Instruction does a horizontal addition of the packed elements and then adds it to VR.
* @note vA: TypeSize
* @note vB: destination and source VR (not vector register)
* @note vC: source (vector register)
*/
- void GenAddReduceVector(BasicBlock *bb, MIR *mir);
+ void GenAddReduceVector(MIR* mir);
/*
* @brief Extract a packed element into a single VR.
- * @param bb The basic block in which the MIR is from.
* @param mir The MIR whose opcode is kMirConstVector.
* @note vA: TypeSize
* @note vB: destination VR (not vector register)
* @note vC: source (vector register)
* @note arg[0]: The index to use for extraction from vector register (which packed element).
*/
- void GenReduceVector(BasicBlock *bb, MIR *mir);
+ void GenReduceVector(MIR* mir);
/*
* @brief Create a vector value, with all TypeSize values equal to vC
@@ -701,21 +689,21 @@ class X86Mir2Lir : public Mir2Lir {
* @note vB: destination vector register.
* @note vC: source VR (not vector register).
*/
- void GenSetVector(BasicBlock *bb, MIR *mir);
+ void GenSetVector(MIR* mir);
/**
* @brief Used to generate code for kMirOpPackedArrayGet.
* @param bb The basic block of MIR.
* @param mir The mir whose opcode is kMirOpPackedArrayGet.
*/
- void GenPackedArrayGet(BasicBlock *bb, MIR *mir);
+ void GenPackedArrayGet(BasicBlock* bb, MIR* mir);
/**
* @brief Used to generate code for kMirOpPackedArrayPut.
* @param bb The basic block of MIR.
* @param mir The mir whose opcode is kMirOpPackedArrayPut.
*/
- void GenPackedArrayPut(BasicBlock *bb, MIR *mir);
+ void GenPackedArrayPut(BasicBlock* bb, MIR* mir);
/*
* @brief Generate code for a vector opcode.
@@ -890,8 +878,8 @@ class X86Mir2Lir : public Mir2Lir {
* the value is live in a temp register of the correct class. Additionally, if the value is in
* a temp register of the wrong register class, it will be clobbered.
*/
- RegLocation UpdateLocTyped(RegLocation loc, int reg_class);
- RegLocation UpdateLocWideTyped(RegLocation loc, int reg_class);
+ RegLocation UpdateLocTyped(RegLocation loc);
+ RegLocation UpdateLocWideTyped(RegLocation loc);
/*
* @brief Analyze MIR before generating code, to prepare for the code generation.
@@ -902,7 +890,7 @@ class X86Mir2Lir : public Mir2Lir {
* @brief Analyze one basic block.
* @param bb Basic block to analyze.
*/
- void AnalyzeBB(BasicBlock * bb);
+ void AnalyzeBB(BasicBlock* bb);
/*
* @brief Analyze one extended MIR instruction
@@ -910,7 +898,7 @@ class X86Mir2Lir : public Mir2Lir {
* @param bb Basic block containing instruction.
* @param mir Extended instruction to analyze.
*/
- void AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir);
+ void AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one MIR instruction
@@ -918,7 +906,7 @@ class X86Mir2Lir : public Mir2Lir {
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- virtual void AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir);
+ virtual void AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one MIR float/double instruction
@@ -926,7 +914,7 @@ class X86Mir2Lir : public Mir2Lir {
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- virtual void AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir);
+ virtual void AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir);
/*
* @brief Analyze one use of a double operand.
@@ -940,7 +928,7 @@ class X86Mir2Lir : public Mir2Lir {
* @param bb Basic block containing instruction.
* @param mir Instruction to analyze.
*/
- void AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir);
+ void AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir);
// Information derived from analysis of MIR
@@ -987,12 +975,11 @@ class X86Mir2Lir : public Mir2Lir {
*/
LIR* AddVectorLiteral(int32_t* constants);
- InToRegStorageMapping in_to_reg_storage_mapping_;
-
- bool WideGPRsAreAliases() OVERRIDE {
+ bool WideGPRsAreAliases() const OVERRIDE {
return cu_->target64; // On 64b, we have 64b GPRs.
}
- bool WideFPRsAreAliases() OVERRIDE {
+
+ bool WideFPRsAreAliases() const OVERRIDE {
return true; // xmm registers have 64b views even on x86.
}
@@ -1002,11 +989,17 @@ class X86Mir2Lir : public Mir2Lir {
*/
static void DumpRegLocation(RegLocation loc);
- static const X86EncodingMap EncodingMap[kX86Last];
+ InToRegStorageMapping in_to_reg_storage_mapping_;
private:
void SwapBits(RegStorage result_reg, int shift, int32_t value);
void SwapBits64(RegStorage result_reg, int shift, int64_t value);
+
+ static const X86EncodingMap EncodingMap[kX86Last];
+
+ friend std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
+
+ DISALLOW_COPY_AND_ASSIGN(X86Mir2Lir);
};
} // namespace art
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index 21d1a5cec2..254d90fa8c 100755
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -169,8 +169,7 @@ void X86Mir2Lir::GenLongToFP(RegLocation rl_dest, RegLocation rl_src, bool is_do
* If the result's location is in memory, then we do not need to do anything
* more since the fstp has already placed the correct value in memory.
*/
- RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
- UpdateLocTyped(rl_dest, kFPReg);
+ RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
/*
* We already know that the result is in a physical register but do not know if it is the
@@ -431,8 +430,7 @@ void X86Mir2Lir::GenRemFP(RegLocation rl_dest, RegLocation rl_src1, RegLocation
* If the result's location is in memory, then we do not need to do anything
* more since the fstp has already placed the correct value in memory.
*/
- RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest, kFPReg) :
- UpdateLocTyped(rl_dest, kFPReg);
+ RegLocation rl_result = is_double ? UpdateLocWideTyped(rl_dest) : UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
rl_result = EvalLoc(rl_dest, kFPReg, true);
if (is_double) {
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index aa1bf7fe6d..7229318331 100755
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -208,7 +208,7 @@ void X86Mir2Lir::OpRegCopyWide(RegStorage r_dest, RegStorage r_src) {
void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, ConditionCode code,
int32_t true_val, int32_t false_val, RegStorage rs_dest,
- int dest_reg_class) {
+ RegisterClass dest_reg_class) {
DCHECK(!left_op.IsPair() && !right_op.IsPair() && !rs_dest.IsPair());
DCHECK(!left_op.IsFloat() && !right_op.IsFloat() && !rs_dest.IsFloat());
@@ -268,6 +268,7 @@ void X86Mir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Condi
}
void X86Mir2Lir::GenSelect(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
RegLocation rl_result;
RegLocation rl_src = mir_graph_->GetSrc(mir, 0);
RegLocation rl_dest = mir_graph_->GetDest(mir);
@@ -594,8 +595,9 @@ void X86Mir2Lir::CalculateMagicAndShift(int64_t divisor, int64_t& magic, int& sh
}
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegStorage reg_lo, int lit, bool is_div) {
+ UNUSED(rl_dest, reg_lo, lit, is_div);
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
@@ -763,12 +765,14 @@ RegLocation X86Mir2Lir::GenDivRemLit(RegLocation rl_dest, RegLocation rl_src,
RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage reg_lo, RegStorage reg_hi,
bool is_div) {
+ UNUSED(rl_dest, reg_lo, reg_hi, is_div);
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
- return rl_dest;
+ UNREACHABLE();
}
RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
RegLocation rl_src2, bool is_div, int flags) {
+ UNUSED(rl_dest);
// We have to use fixed registers, so flush all the temps.
// Prepare for explicit register usage.
@@ -1022,7 +1026,7 @@ bool X86Mir2Lir::GenInlinedPoke(CallInfo* info, OpSize size) {
DCHECK(size == kSignedByte || size == kSignedHalf || size == k32);
// In 32-bit mode the only EAX..EDX registers can be used with Mov8MR.
if (!cu_->target64 && size == kSignedByte) {
- rl_src_value = UpdateLocTyped(rl_src_value, kCoreReg);
+ rl_src_value = UpdateLocTyped(rl_src_value);
if (rl_src_value.location == kLocPhysReg && !IsByteRegister(rl_src_value.reg)) {
RegStorage temp = AllocateByteRegister();
OpRegCopy(temp, rl_src_value.reg);
@@ -1309,18 +1313,21 @@ LIR* X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
}
LIR* X86Mir2Lir::OpVldm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVldm for x86";
- return NULL;
+ UNREACHABLE();
}
LIR* X86Mir2Lir::OpVstm(RegStorage r_base, int count) {
+ UNUSED(r_base, count);
LOG(FATAL) << "Unexpected use of OpVstm for x86";
- return NULL;
+ UNREACHABLE();
}
void X86Mir2Lir::GenMultiplyByTwoBitMultiplier(RegLocation rl_src,
RegLocation rl_result, int lit,
int first_bit, int second_bit) {
+ UNUSED(lit);
RegStorage t_reg = AllocTemp();
OpRegRegImm(kOpLsl, t_reg, rl_src.reg, second_bit - first_bit);
OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, t_reg);
@@ -1453,22 +1460,27 @@ LIR* X86Mir2Lir::OpDecAndBranch(ConditionCode c_code, RegStorage reg, LIR* targe
bool X86Mir2Lir::SmallLiteralDivRem(Instruction::Code dalvik_opcode, bool is_div,
RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(dalvik_opcode, is_div, rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
- return false;
+ UNREACHABLE();
}
bool X86Mir2Lir::EasyMultiply(RegLocation rl_src, RegLocation rl_dest, int lit) {
+ UNUSED(rl_src, rl_dest, lit);
LOG(FATAL) << "Unexpected use of easyMultiply in x86";
- return false;
+ UNREACHABLE();
}
LIR* X86Mir2Lir::OpIT(ConditionCode cond, const char* guide) {
+ UNUSED(cond, guide);
LOG(FATAL) << "Unexpected use of OpIT in x86";
- return NULL;
+ UNREACHABLE();
}
void X86Mir2Lir::OpEndIT(LIR* it) {
+ UNUSED(it);
LOG(FATAL) << "Unexpected use of OpEndIT in x86";
+ UNREACHABLE();
}
void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
@@ -1486,6 +1498,7 @@ void X86Mir2Lir::GenImulRegImm(RegStorage dest, RegStorage src, int val) {
}
void X86Mir2Lir::GenImulMemImm(RegStorage dest, int sreg, int displacement, int val) {
+ UNUSED(sreg);
// All memory accesses below reference dalvik regs.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -1616,7 +1629,7 @@ bool X86Mir2Lir::GenMulLongConst(RegLocation rl_dest, RegLocation rl_src1, int64
int32_t val_hi = High32Bits(val);
// Prepare for explicit register usage.
ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
bool src1_in_reg = rl_src1.location == kLocPhysReg;
int displacement = SRegOffset(rl_src1.s_reg_low);
@@ -1700,8 +1713,8 @@ void X86Mir2Lir::GenMulLong(Instruction::Code, RegLocation rl_dest, RegLocation
// Prepare for explicit register usage.
ExplicitTempRegisterLock(this, 3, &rs_r0, &rs_r1, &rs_r2);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
// At this point, the VRs are in their home locations.
bool src1_in_reg = rl_src1.location == kLocPhysReg;
@@ -1837,12 +1850,12 @@ void X86Mir2Lir::GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src,
}
void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op) {
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
if (rl_dest.location == kLocPhysReg) {
// Ensure we are in a register pair
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
GenLongRegOrMemOp(rl_result, rl_src, op);
StoreFinalValueWide(rl_dest, rl_result);
return;
@@ -1850,7 +1863,7 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src, Instructi
// Handle the case when src and dest are intersect.
rl_src = LoadValueWide(rl_src, kCoreReg);
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
GenLongRegOrMemOp(rl_result, rl_src, op);
StoreFinalValueWide(rl_dest, rl_result);
return;
@@ -1910,7 +1923,7 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
rl_result = ForceTempWide(rl_result);
// Perform the operation using the RHS.
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
GenLongRegOrMemOp(rl_result, rl_src2, op);
// And now record that the result is in the temp.
@@ -1919,10 +1932,9 @@ void X86Mir2Lir::GenLongArith(RegLocation rl_dest, RegLocation rl_src1,
}
// It wasn't in registers, so it better be in memory.
- DCHECK((rl_dest.location == kLocDalvikFrame) ||
- (rl_dest.location == kLocCompilerTemp));
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
- rl_src2 = UpdateLocWideTyped(rl_src2, kCoreReg);
+ DCHECK((rl_dest.location == kLocDalvikFrame) || (rl_dest.location == kLocCompilerTemp));
+ rl_src1 = UpdateLocWideTyped(rl_src1);
+ rl_src2 = UpdateLocWideTyped(rl_src2);
// Get one of the source operands into temporary register.
rl_src1 = LoadValueWide(rl_src1, kCoreReg);
@@ -2088,7 +2100,7 @@ void X86Mir2Lir::GenDivRemLongLit(RegLocation rl_dest, RegLocation rl_src,
NewLIR1(kX86Imul64DaR, numerator_reg.GetReg());
} else {
// Only need this once. Multiply directly from the value.
- rl_src = UpdateLocWideTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocWideTyped(rl_src);
if (rl_src.location != kLocPhysReg) {
// Okay, we can do this from memory.
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
@@ -2395,6 +2407,7 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
RegLocation X86Mir2Lir::GenShiftImmOpLong(Instruction::Code opcode, RegLocation rl_dest,
RegLocation rl_src, int shift_amount, int flags) {
+ UNUSED(flags);
RegLocation rl_result = EvalLocWide(rl_dest, kCoreReg, true);
if (cu_->target64) {
OpKind op = static_cast<OpKind>(0); /* Make gcc happy */
@@ -2692,7 +2705,7 @@ X86OpCode X86Mir2Lir::GetOpcode(Instruction::Code op, RegLocation loc, bool is_h
return in_mem ? kX86Xor32MI : kX86Xor32RI;
default:
LOG(FATAL) << "Unexpected opcode: " << op;
- return kX86Add32MI;
+ UNREACHABLE();
}
}
@@ -2706,7 +2719,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
return false;
}
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
if ((rl_dest.location == kLocDalvikFrame) ||
(rl_dest.location == kLocCompilerTemp)) {
@@ -2736,7 +2749,7 @@ bool X86Mir2Lir::GenLongImm(RegLocation rl_dest, RegLocation rl_src, Instruction
int32_t val_lo = Low32Bits(val);
int32_t val_hi = High32Bits(val);
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
// Can we just do this into memory?
if ((rl_dest.location == kLocDalvikFrame) ||
@@ -2812,8 +2825,8 @@ bool X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1,
int32_t val_lo = Low32Bits(val);
int32_t val_hi = High32Bits(val);
- rl_dest = UpdateLocWideTyped(rl_dest, kCoreReg);
- rl_src1 = UpdateLocWideTyped(rl_src1, kCoreReg);
+ rl_dest = UpdateLocWideTyped(rl_dest);
+ rl_src1 = UpdateLocWideTyped(rl_src1);
// Can we do this directly into the destination registers?
if (rl_dest.location == kLocPhysReg && rl_src1.location == kLocPhysReg &&
@@ -3035,7 +3048,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
if (unary) {
rl_lhs = LoadValue(rl_lhs, kCoreReg);
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
rl_result = EvalLoc(rl_dest, kCoreReg, true);
OpRegReg(op, rl_result.reg, rl_lhs.reg);
} else {
@@ -3045,7 +3058,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
LoadValueDirectFixed(rl_rhs, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory
OpMemReg(op, rl_result, t_reg.GetReg());
@@ -3068,12 +3081,12 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
// Multiply is 3 operand only (sort of).
if (is_two_addr && op != kOpMul) {
// Can we do this directly into memory?
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
// Ensure res is in a core reg
rl_result = EvalLoc(rl_dest, kCoreReg, true);
// Can we do this from memory directly?
- rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+ rl_rhs = UpdateLocTyped(rl_rhs);
if (rl_rhs.location != kLocPhysReg) {
OpRegMem(op, rl_result.reg, rl_rhs);
StoreFinalValue(rl_dest, rl_result);
@@ -3088,7 +3101,7 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
// It might happen rl_rhs and rl_dest are the same VR
// in this case rl_dest is in reg after LoadValue while
// rl_result is not updated yet, so do this
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory.
OpMemReg(op, rl_result, rl_rhs.reg.GetReg());
@@ -3105,8 +3118,8 @@ void X86Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest,
}
} else {
// Try to use reg/memory instructions.
- rl_lhs = UpdateLocTyped(rl_lhs, kCoreReg);
- rl_rhs = UpdateLocTyped(rl_rhs, kCoreReg);
+ rl_lhs = UpdateLocTyped(rl_lhs);
+ rl_rhs = UpdateLocTyped(rl_rhs);
// We can't optimize with FP registers.
if (!IsOperationSafeWithoutTemps(rl_lhs, rl_rhs)) {
// Something is difficult, so fall back to the standard case.
@@ -3178,7 +3191,7 @@ void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
Mir2Lir::GenIntToLong(rl_dest, rl_src);
return;
}
- rl_src = UpdateLocTyped(rl_src, kCoreReg);
+ rl_src = UpdateLocTyped(rl_src);
RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
if (rl_src.location == kLocPhysReg) {
NewLIR2(kX86MovsxdRR, rl_result.reg.GetReg(), rl_src.reg.GetReg());
@@ -3278,7 +3291,7 @@ void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
LoadValueDirectFixed(rl_shift, t_reg);
if (is_two_addr) {
// Can we do this directly into memory?
- rl_result = UpdateLocWideTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocWideTyped(rl_dest);
if (rl_result.location != kLocPhysReg) {
// Okay, we can do this into memory
ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc
index 79d5eebe17..9616d8fa71 100755
--- a/compiler/dex/quick/x86/target_x86.cc
+++ b/compiler/dex/quick/x86/target_x86.cc
@@ -143,25 +143,6 @@ static constexpr ArrayRef<const RegStorage> xp_temps_64(xp_temps_arr_64);
RegStorage rs_rX86_SP;
-X86NativeRegisterPool rX86_ARG0;
-X86NativeRegisterPool rX86_ARG1;
-X86NativeRegisterPool rX86_ARG2;
-X86NativeRegisterPool rX86_ARG3;
-X86NativeRegisterPool rX86_ARG4;
-X86NativeRegisterPool rX86_ARG5;
-X86NativeRegisterPool rX86_FARG0;
-X86NativeRegisterPool rX86_FARG1;
-X86NativeRegisterPool rX86_FARG2;
-X86NativeRegisterPool rX86_FARG3;
-X86NativeRegisterPool rX86_FARG4;
-X86NativeRegisterPool rX86_FARG5;
-X86NativeRegisterPool rX86_FARG6;
-X86NativeRegisterPool rX86_FARG7;
-X86NativeRegisterPool rX86_RET0;
-X86NativeRegisterPool rX86_RET1;
-X86NativeRegisterPool rX86_INVOKE_TGT;
-X86NativeRegisterPool rX86_COUNT;
-
RegStorage rs_rX86_ARG0;
RegStorage rs_rX86_ARG1;
RegStorage rs_rX86_ARG2;
@@ -237,8 +218,9 @@ RegStorage X86Mir2Lir::TargetReg32(SpecialTargetRegister reg) {
}
RegStorage X86Mir2Lir::TargetReg(SpecialTargetRegister reg) {
+ UNUSED(reg);
LOG(FATAL) << "Do not use this function!!!";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
/*
@@ -795,14 +777,11 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
class_type_address_insns_.reserve(100);
call_method_insns_.reserve(100);
store_method_addr_used_ = false;
- if (kIsDebugBuild) {
for (int i = 0; i < kX86Last; i++) {
- if (X86Mir2Lir::EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
- }
- }
+ DCHECK_EQ(X86Mir2Lir::EncodingMap[i].opcode, i)
+ << "Encoding order for " << X86Mir2Lir::EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << static_cast<int>(X86Mir2Lir::EncodingMap[i].opcode);
}
if (cu_->target64) {
rs_rX86_SP = rs_rX86_SP_64;
@@ -821,20 +800,6 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
rs_rX86_FARG5 = rs_fr5;
rs_rX86_FARG6 = rs_fr6;
rs_rX86_FARG7 = rs_fr7;
- rX86_ARG0 = rDI;
- rX86_ARG1 = rSI;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rCX;
- rX86_ARG4 = r8;
- rX86_ARG5 = r9;
- rX86_FARG0 = fr0;
- rX86_FARG1 = fr1;
- rX86_FARG2 = fr2;
- rX86_FARG3 = fr3;
- rX86_FARG4 = fr4;
- rX86_FARG5 = fr5;
- rX86_FARG6 = fr6;
- rX86_FARG7 = fr7;
rs_rX86_INVOKE_TGT = rs_rDI;
} else {
rs_rX86_SP = rs_rX86_SP_32;
@@ -853,14 +818,6 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
rs_rX86_FARG5 = RegStorage::InvalidReg();
rs_rX86_FARG6 = RegStorage::InvalidReg();
rs_rX86_FARG7 = RegStorage::InvalidReg();
- rX86_ARG0 = rAX;
- rX86_ARG1 = rCX;
- rX86_ARG2 = rDX;
- rX86_ARG3 = rBX;
- rX86_FARG0 = rAX;
- rX86_FARG1 = rCX;
- rX86_FARG2 = rDX;
- rX86_FARG3 = rBX;
rs_rX86_INVOKE_TGT = rs_rAX;
// TODO(64): Initialize with invalid reg
// rX86_ARG4 = RegStorage::InvalidReg();
@@ -869,10 +826,6 @@ X86Mir2Lir::X86Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator*
rs_rX86_RET0 = rs_rAX;
rs_rX86_RET1 = rs_rDX;
rs_rX86_COUNT = rs_rCX;
- rX86_RET0 = rAX;
- rX86_RET1 = rDX;
- rX86_INVOKE_TGT = rAX;
- rX86_COUNT = rCX;
}
Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -882,8 +835,9 @@ Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
// Not used in x86(-64)
RegStorage X86Mir2Lir::LoadHelper(QuickEntrypointEnum trampoline) {
+ UNUSED(trampoline);
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
- return RegStorage::InvalidReg();
+ UNREACHABLE();
}
LIR* X86Mir2Lir::CheckSuspendUsingLoad() {
@@ -1548,46 +1502,46 @@ void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
ReturnVectorRegisters(mir);
break;
case kMirOpConstVector:
- GenConst128(bb, mir);
+ GenConst128(mir);
break;
case kMirOpMoveVector:
- GenMoveVector(bb, mir);
+ GenMoveVector(mir);
break;
case kMirOpPackedMultiply:
- GenMultiplyVector(bb, mir);
+ GenMultiplyVector(mir);
break;
case kMirOpPackedAddition:
- GenAddVector(bb, mir);
+ GenAddVector(mir);
break;
case kMirOpPackedSubtract:
- GenSubtractVector(bb, mir);
+ GenSubtractVector(mir);
break;
case kMirOpPackedShiftLeft:
- GenShiftLeftVector(bb, mir);
+ GenShiftLeftVector(mir);
break;
case kMirOpPackedSignedShiftRight:
- GenSignedShiftRightVector(bb, mir);
+ GenSignedShiftRightVector(mir);
break;
case kMirOpPackedUnsignedShiftRight:
- GenUnsignedShiftRightVector(bb, mir);
+ GenUnsignedShiftRightVector(mir);
break;
case kMirOpPackedAnd:
- GenAndVector(bb, mir);
+ GenAndVector(mir);
break;
case kMirOpPackedOr:
- GenOrVector(bb, mir);
+ GenOrVector(mir);
break;
case kMirOpPackedXor:
- GenXorVector(bb, mir);
+ GenXorVector(mir);
break;
case kMirOpPackedAddReduce:
- GenAddReduceVector(bb, mir);
+ GenAddReduceVector(mir);
break;
case kMirOpPackedReduce:
- GenReduceVector(bb, mir);
+ GenReduceVector(mir);
break;
case kMirOpPackedSet:
- GenSetVector(bb, mir);
+ GenSetVector(mir);
break;
case kMirOpMemBarrier:
GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
@@ -1638,7 +1592,7 @@ void X86Mir2Lir::ReturnVectorRegisters(MIR* mir) {
}
}
-void X86Mir2Lir::GenConst128(BasicBlock* bb, MIR* mir) {
+void X86Mir2Lir::GenConst128(MIR* mir) {
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
Clobber(rs_dest);
@@ -1689,7 +1643,7 @@ void X86Mir2Lir::AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir) {
load->target = data_target;
}
-void X86Mir2Lir::GenMoveVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMoveVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1805,7 +1759,7 @@ void X86Mir2Lir::GenMultiplyVectorLong(RegStorage rs_dest_src1, RegStorage rs_sr
NewLIR2(kX86PaddqRR, rs_dest_src1.GetReg(), rs_tmp_vector_1.GetReg());
}
-void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenMultiplyVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1839,7 +1793,7 @@ void X86Mir2Lir::GenMultiplyVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1874,7 +1828,7 @@ void X86Mir2Lir::GenAddVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSubtractVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1909,7 +1863,7 @@ void X86Mir2Lir::GenSubtractVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftByteVector(MIR* mir) {
// Destination does not need clobbered because it has already been as part
// of the general packed shift handler (caller of this method).
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1953,7 +1907,7 @@ void X86Mir2Lir::GenShiftByteVector(BasicBlock *bb, MIR *mir) {
AndMaskVectorRegister(rs_dest_src1, int_mask, int_mask, int_mask, int_mask);
}
-void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenShiftLeftVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1973,7 +1927,7 @@ void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector shift left " << opsize;
@@ -1982,7 +1936,7 @@ void X86Mir2Lir::GenShiftLeftVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -1999,18 +1953,18 @@ void X86Mir2Lir::GenSignedShiftRightVector(BasicBlock *bb, MIR *mir) {
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
case k64:
// TODO Implement emulated shift algorithm.
default:
LOG(FATAL) << "Unsupported vector signed shift right " << opsize;
- break;
+ UNREACHABLE();
}
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenUnsignedShiftRightVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2030,7 +1984,7 @@ void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
break;
case kSignedByte:
case kUnsignedByte:
- GenShiftByteVector(bb, mir);
+ GenShiftByteVector(mir);
return;
default:
LOG(FATAL) << "Unsupported vector unsigned shift right " << opsize;
@@ -2039,7 +1993,7 @@ void X86Mir2Lir::GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir) {
NewLIR2(opcode, rs_dest_src1.GetReg(), imm);
}
-void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAndVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2048,7 +2002,7 @@ void X86Mir2Lir::GenAndVector(BasicBlock *bb, MIR *mir) {
NewLIR2(kX86PandRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenOrVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2057,7 +2011,7 @@ void X86Mir2Lir::GenOrVector(BasicBlock *bb, MIR *mir) {
NewLIR2(kX86PorRR, rs_dest_src1.GetReg(), rs_src2.GetReg());
}
-void X86Mir2Lir::GenXorVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenXorVector(MIR* mir) {
// We only support 128 bit registers.
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
RegStorage rs_dest_src1 = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2084,7 +2038,7 @@ void X86Mir2Lir::MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32
AppendOpcodeWithConst(opcode, rs_src1.GetReg(), const_mirp);
}
-void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenAddReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
bool is_wide = opsize == k64 || opsize == kDouble;
@@ -2219,7 +2173,7 @@ void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
// except the rhs is not a VR but a physical register allocated above.
// No load of source VR is done because it assumes that rl_result will
// share physical register / memory location.
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
if (rl_result.location == kLocPhysReg) {
// Ensure res is in a core reg.
rl_result = EvalLoc(rl_dest, kCoreReg, true);
@@ -2232,7 +2186,7 @@ void X86Mir2Lir::GenAddReduceVector(BasicBlock *bb, MIR *mir) {
}
}
-void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenReduceVector(MIR* mir) {
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegLocation rl_dest = mir_graph_->GetDest(mir);
RegStorage vector_src = RegStorage::Solo128(mir->dalvikInsn.vB);
@@ -2286,7 +2240,7 @@ void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
} else {
int extract_index = mir->dalvikInsn.arg[0];
int extr_opcode = 0;
- rl_result = UpdateLocTyped(rl_dest, kCoreReg);
+ rl_result = UpdateLocTyped(rl_dest);
// Handle the rest of integral types now.
switch (opsize) {
@@ -2302,7 +2256,7 @@ void X86Mir2Lir::GenReduceVector(BasicBlock *bb, MIR *mir) {
break;
default:
LOG(FATAL) << "Unsupported vector reduce " << opsize;
- return;
+ UNREACHABLE();
}
if (rl_result.location == kLocPhysReg) {
@@ -2331,7 +2285,7 @@ void X86Mir2Lir::LoadVectorRegister(RegStorage rs_dest, RegStorage rs_src,
}
}
-void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenSetVector(MIR* mir) {
DCHECK_EQ(mir->dalvikInsn.vC & 0xFFFF, 128U);
OpSize opsize = static_cast<OpSize>(mir->dalvikInsn.vC >> 16);
RegStorage rs_dest = RegStorage::Solo128(mir->dalvikInsn.vA);
@@ -2406,11 +2360,13 @@ void X86Mir2Lir::GenSetVector(BasicBlock *bb, MIR *mir) {
}
}
-void X86Mir2Lir::GenPackedArrayGet(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayGet(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayGet not supported.";
}
-void X86Mir2Lir::GenPackedArrayPut(BasicBlock *bb, MIR *mir) {
+void X86Mir2Lir::GenPackedArrayPut(BasicBlock* bb, MIR* mir) {
+ UNUSED(bb, mir);
UNIMPLEMENTED(FATAL) << "Extended opcode kMirOpPackedArrayPut not supported.";
}
diff --git a/compiler/dex/quick/x86/utility_x86.cc b/compiler/dex/quick/x86/utility_x86.cc
index 8d5dabc5fd..cb9a24a336 100644
--- a/compiler/dex/quick/x86/utility_x86.cc
+++ b/compiler/dex/quick/x86/utility_x86.cc
@@ -54,14 +54,16 @@ LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
}
bool X86Mir2Lir::InexpensiveConstantInt(int32_t value) {
+ UNUSED(value);
return true;
}
bool X86Mir2Lir::InexpensiveConstantFloat(int32_t value) {
- return false;
+ return value == 0;
}
bool X86Mir2Lir::InexpensiveConstantLong(int64_t value) {
+ UNUSED(value);
return true;
}
@@ -934,13 +936,14 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r
LIR* X86Mir2Lir::OpCmpMemImmBranch(ConditionCode cond, RegStorage temp_reg, RegStorage base_reg,
int offset, int check_value, LIR* target, LIR** compare) {
- LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
- offset, check_value);
- if (compare != nullptr) {
- *compare = inst;
- }
- LIR* branch = OpCondBranch(cond, target);
- return branch;
+ UNUSED(temp_reg); // Comparison performed directly with memory.
+ LIR* inst = NewLIR3(IS_SIMM8(check_value) ? kX86Cmp32MI8 : kX86Cmp32MI, base_reg.GetReg(),
+ offset, check_value);
+ if (compare != nullptr) {
+ *compare = inst;
+ }
+ LIR* branch = OpCondBranch(cond, target);
+ return branch;
}
void X86Mir2Lir::AnalyzeMIR() {
@@ -965,13 +968,13 @@ void X86Mir2Lir::AnalyzeMIR() {
}
}
-void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
+void X86Mir2Lir::AnalyzeBB(BasicBlock* bb) {
if (bb->block_type == kDead) {
// Ignore dead blocks
return;
}
- for (MIR *mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
+ for (MIR* mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
int opcode = mir->dalvikInsn.opcode;
if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
AnalyzeExtendedMIR(opcode, bb, mir);
@@ -982,7 +985,7 @@ void X86Mir2Lir::AnalyzeBB(BasicBlock * bb) {
}
-void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock* bb, MIR* mir) {
switch (opcode) {
// Instructions referencing doubles.
case kMirOpFusedCmplDouble:
@@ -1009,7 +1012,7 @@ void X86Mir2Lir::AnalyzeExtendedMIR(int opcode, BasicBlock * bb, MIR *mir) {
}
}
-void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock* bb, MIR* mir) {
// Looking for
// - Do we need a pointer to the code (used for packed switches and double lits)?
@@ -1046,7 +1049,8 @@ void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
}
}
-void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeFPInstruction(int opcode, BasicBlock* bb, MIR* mir) {
+ UNUSED(bb);
// Look at all the uses, and see if they are double constants.
uint64_t attrs = MIRGraph::GetDataFlowAttributes(static_cast<Instruction::Code>(opcode));
int next_sreg = 0;
@@ -1080,7 +1084,7 @@ void X86Mir2Lir::AnalyzeDoubleUse(RegLocation use) {
}
}
-RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc) {
loc = UpdateLoc(loc);
if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1094,7 +1098,7 @@ RegLocation X86Mir2Lir::UpdateLocTyped(RegLocation loc, int reg_class) {
return loc;
}
-RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
+RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc) {
loc = UpdateLocWide(loc);
if ((loc.location == kLocPhysReg) && (loc.fp != loc.reg.IsFloat())) {
if (GetRegInfo(loc.reg)->IsTemp()) {
@@ -1108,7 +1112,8 @@ RegLocation X86Mir2Lir::UpdateLocWideTyped(RegLocation loc, int reg_class) {
return loc;
}
-void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
+void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock* bb, MIR* mir) {
+ UNUSED(opcode, bb);
// For now this is only actual for x86-32.
if (cu_->target64) {
return;
@@ -1132,6 +1137,7 @@ void X86Mir2Lir::AnalyzeInvokeStatic(int opcode, BasicBlock * bb, MIR *mir) {
}
LIR* X86Mir2Lir::InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) {
+ UNUSED(r_tgt); // Call to absolute memory location doesn't need a temporary target register.
if (cu_->target64) {
return OpThreadMem(op, GetThreadOffset<8>(trampoline));
} else {
diff --git a/compiler/dex/quick/x86/x86_lir.h b/compiler/dex/quick/x86/x86_lir.h
index 22a2f30d4d..afdc244dac 100644
--- a/compiler/dex/quick/x86/x86_lir.h
+++ b/compiler/dex/quick/x86/x86_lir.h
@@ -313,25 +313,6 @@ constexpr RegStorage rs_xr13(RegStorage::kValid | xr13);
constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
-extern X86NativeRegisterPool rX86_ARG0;
-extern X86NativeRegisterPool rX86_ARG1;
-extern X86NativeRegisterPool rX86_ARG2;
-extern X86NativeRegisterPool rX86_ARG3;
-extern X86NativeRegisterPool rX86_ARG4;
-extern X86NativeRegisterPool rX86_ARG5;
-extern X86NativeRegisterPool rX86_FARG0;
-extern X86NativeRegisterPool rX86_FARG1;
-extern X86NativeRegisterPool rX86_FARG2;
-extern X86NativeRegisterPool rX86_FARG3;
-extern X86NativeRegisterPool rX86_FARG4;
-extern X86NativeRegisterPool rX86_FARG5;
-extern X86NativeRegisterPool rX86_FARG6;
-extern X86NativeRegisterPool rX86_FARG7;
-extern X86NativeRegisterPool rX86_RET0;
-extern X86NativeRegisterPool rX86_RET1;
-extern X86NativeRegisterPool rX86_INVOKE_TGT;
-extern X86NativeRegisterPool rX86_COUNT;
-
extern RegStorage rs_rX86_ARG0;
extern RegStorage rs_rX86_ARG1;
extern RegStorage rs_rX86_ARG2;
@@ -674,6 +655,7 @@ enum X86OpCode {
kX86RepneScasw, // repne scasw
kX86Last
};
+std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
/* Instruction assembly field_loc kind */
enum X86EncodingKind {
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 706933a1b4..4a84ff2516 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -18,6 +18,7 @@
#define ART_COMPILER_DEX_REG_STORAGE_H_
#include "base/logging.h"
+#include "base/value_object.h"
#include "compiler_enums.h" // For WideKind
namespace art {
@@ -72,7 +73,7 @@ namespace art {
* records.
*/
-class RegStorage {
+class RegStorage : public ValueObject {
public:
enum RegStorageKind {
kValidMask = 0x8000,
@@ -112,7 +113,7 @@ class RegStorage {
}
constexpr RegStorage(RegStorageKind rs_kind, int low_reg, int high_reg)
: reg_(
- DCHECK_CONSTEXPR(rs_kind == k64BitPair, << rs_kind, 0u)
+ DCHECK_CONSTEXPR(rs_kind == k64BitPair, << static_cast<int>(rs_kind), 0u)
DCHECK_CONSTEXPR((low_reg & kFloatingPoint) == (high_reg & kFloatingPoint),
<< low_reg << ", " << high_reg, 0u)
DCHECK_CONSTEXPR((high_reg & kRegNumMask) <= kHighRegNumMask,
@@ -331,9 +332,8 @@ class RegStorage {
case k256BitSolo: return 32;
case k512BitSolo: return 64;
case k1024BitSolo: return 128;
- default: LOG(FATAL) << "Unexpected shape";
+ default: LOG(FATAL) << "Unexpected shape"; UNREACHABLE();
}
- return 0;
}
private:
diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc
index a8e6b3c8df..4929b5b2b0 100644
--- a/compiler/dex/verification_results.cc
+++ b/compiler/dex/verification_results.cc
@@ -106,6 +106,8 @@ bool VerificationResults::IsCandidateForCompilation(MethodReference& method_ref,
if (use_sea) {
return true;
}
+#else
+ UNUSED(method_ref);
#endif
if (!compiler_options_->IsCompilationEnabled()) {
return false;
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 65a842d605..682b17a7d1 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -75,6 +75,7 @@ enum DexToDexCompilationLevel {
kRequired, // Dex-to-dex compilation required for correctness.
kOptimize // Perform required transformation and peep-hole optimizations.
};
+std::ostream& operator<<(std::ostream& os, const DexToDexCompilationLevel& rhs);
class CompilerDriver {
public:
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index fb7aeb9b06..0592f0cf1e 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -21,6 +21,7 @@
#include <vector>
#include "base/macros.h"
+#include "globals.h"
namespace art {
@@ -242,6 +243,7 @@ class CompilerOptions FINAL {
DISALLOW_COPY_AND_ASSIGN(CompilerOptions);
};
+std::ostream& operator<<(std::ostream& os, const CompilerOptions::CompilerFilter& rhs);
} // namespace art
diff --git a/compiler/elf_builder.h b/compiler/elf_builder.h
index c32bdb45dc..7f3056500c 100644
--- a/compiler/elf_builder.h
+++ b/compiler/elf_builder.h
@@ -494,7 +494,7 @@ class ElfFileOatTextPiece FINAL : public ElfFilePiece<Elf_Word> {
output_(output) {}
protected:
- bool DoActualWrite(File* elf_file) OVERRIDE {
+ bool DoActualWrite(File* elf_file ATTRIBUTE_UNUSED) OVERRIDE {
// All data is written by the ElfFileRodataPiece right now, as the oat writer writes in one
// piece. This is for future flexibility.
UNUSED(output_);
diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc
index 575886b8e0..25cf086696 100644
--- a/compiler/elf_writer_quick.cc
+++ b/compiler/elf_writer_quick.cc
@@ -222,9 +222,9 @@ template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
typename Elf_Phdr, typename Elf_Shdr>
bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Write(OatWriter* oat_writer,
- const std::vector<const DexFile*>& dex_files_unused,
- const std::string& android_root_unused,
- bool is_host_unused) {
+ const std::vector<const DexFile*>& dex_files_unused ATTRIBUTE_UNUSED,
+ const std::string& android_root_unused ATTRIBUTE_UNUSED,
+ bool is_host_unused ATTRIBUTE_UNUSED) {
constexpr bool debug = false;
const OatHeader& oat_header = oat_writer->GetOatHeader();
Elf_Word oat_data_size = oat_header.GetExecutableOffset();
diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc
index 861a182a71..2fd5a52745 100644
--- a/compiler/image_writer.cc
+++ b/compiler/image_writer.cc
@@ -265,7 +265,7 @@ bool ImageWriter::ComputeLazyFieldsForClassesVisitor(Class* c, void* /*arg*/) {
return true;
}
-void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg) {
+void ImageWriter::ComputeEagerResolvedStringsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) {
if (!obj->GetClass()->IsStringClass()) {
return;
}
@@ -661,7 +661,8 @@ class FixupClassVisitor FINAL : public FixupVisitor {
}
}
- void operator()(mirror::Class* /*klass*/, mirror::Reference* ref) const
+ void operator()(mirror::Class* klass ATTRIBUTE_UNUSED,
+ mirror::Reference* ref ATTRIBUTE_UNUSED) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
LOG(FATAL) << "Reference not expected here.";
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index 0fea2a7de7..113204635c 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -530,7 +530,7 @@ JNI_TEST(CompileAndRunStaticDoubleDoubleMethod)
// point return value would be in xmm0. We use log, to somehow ensure
// the compiler will use the floating point stack.
-jdouble Java_MyClassNatives_logD(JNIEnv* env, jclass klass, jdouble x) {
+jdouble Java_MyClassNatives_logD(JNIEnv*, jclass, jdouble x) {
return log(x);
}
@@ -544,7 +544,7 @@ void JniCompilerTest::RunStaticLogDoubleMethodImpl() {
JNI_TEST(RunStaticLogDoubleMethod)
-jfloat Java_MyClassNatives_logF(JNIEnv* env, jclass klass, jfloat x) {
+jfloat Java_MyClassNatives_logF(JNIEnv*, jclass, jfloat x) {
return logf(x);
}
@@ -558,15 +558,15 @@ void JniCompilerTest::RunStaticLogFloatMethodImpl() {
JNI_TEST(RunStaticLogFloatMethod)
-jboolean Java_MyClassNatives_returnTrue(JNIEnv* env, jclass klass) {
+jboolean Java_MyClassNatives_returnTrue(JNIEnv*, jclass) {
return JNI_TRUE;
}
-jboolean Java_MyClassNatives_returnFalse(JNIEnv* env, jclass klass) {
+jboolean Java_MyClassNatives_returnFalse(JNIEnv*, jclass) {
return JNI_FALSE;
}
-jint Java_MyClassNatives_returnInt(JNIEnv* env, jclass klass) {
+jint Java_MyClassNatives_returnInt(JNIEnv*, jclass) {
return 42;
}
@@ -1056,7 +1056,10 @@ void JniCompilerTest::CompileAndRunFloatFloatMethodImpl() {
JNI_TEST(CompileAndRunFloatFloatMethod)
-void Java_MyClassNatives_checkParameterAlign(JNIEnv* env, jobject thisObj, jint i1, jlong l1) {
+void Java_MyClassNatives_checkParameterAlign(JNIEnv* env ATTRIBUTE_UNUSED,
+ jobject thisObj ATTRIBUTE_UNUSED,
+ jint i1 ATTRIBUTE_UNUSED,
+ jlong l1 ATTRIBUTE_UNUSED) {
// EXPECT_EQ(kNative, Thread::Current()->GetState());
// EXPECT_EQ(Thread::Current()->GetJniEnv(), env);
// EXPECT_TRUE(thisObj != nullptr);
@@ -1520,7 +1523,7 @@ void JniCompilerTest::WithoutImplementationImpl() {
JNI_TEST(WithoutImplementation)
-void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv* env, jclass klass, jint i1, jint i2, jint i3,
+void Java_MyClassNatives_stackArgsIntsFirst(JNIEnv*, jclass, jint i1, jint i2, jint i3,
jint i4, jint i5, jint i6, jint i7, jint i8, jint i9,
jint i10, jfloat f1, jfloat f2, jfloat f3, jfloat f4,
jfloat f5, jfloat f6, jfloat f7, jfloat f8, jfloat f9,
@@ -1591,7 +1594,7 @@ void JniCompilerTest::StackArgsIntsFirstImpl() {
JNI_TEST(StackArgsIntsFirst)
-void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv* env, jclass klass, jfloat f1, jfloat f2,
+void Java_MyClassNatives_stackArgsFloatsFirst(JNIEnv*, jclass, jfloat f1, jfloat f2,
jfloat f3, jfloat f4, jfloat f5, jfloat f6, jfloat f7,
jfloat f8, jfloat f9, jfloat f10, jint i1, jint i2,
jint i3, jint i4, jint i5, jint i6, jint i7, jint i8,
@@ -1662,7 +1665,7 @@ void JniCompilerTest::StackArgsFloatsFirstImpl() {
JNI_TEST(StackArgsFloatsFirst)
-void Java_MyClassNatives_stackArgsMixed(JNIEnv* env, jclass klass, jint i1, jfloat f1, jint i2,
+void Java_MyClassNatives_stackArgsMixed(JNIEnv*, jclass, jint i1, jfloat f1, jint i2,
jfloat f2, jint i3, jfloat f3, jint i4, jfloat f4, jint i5,
jfloat f5, jint i6, jfloat f6, jint i7, jfloat f7, jint i8,
jfloat f8, jint i9, jfloat f9, jint i10, jfloat f10) {
diff --git a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
index 525f05c522..a100552695 100644
--- a/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
+++ b/compiler/jni/quick/x86_64/calling_convention_x86_64.cc
@@ -38,6 +38,7 @@ ManagedRegister X86_64JniCallingConvention::ReturnScratchRegister() const {
}
static ManagedRegister ReturnRegisterForShorty(const char* shorty, bool jni) {
+ UNUSED(jni);
if (shorty[0] == 'F' || shorty[0] == 'D') {
return X86_64ManagedRegister::FromXmmRegister(XMM0);
} else if (shorty[0] == 'J') {
diff --git a/compiler/llvm/llvm_compiler.cc b/compiler/llvm/llvm_compiler.cc
index 55af6145be..fa93e00e19 100644
--- a/compiler/llvm/llvm_compiler.cc
+++ b/compiler/llvm/llvm_compiler.cc
@@ -16,6 +16,7 @@
#include "llvm_compiler.h"
+#include "base/macros.h"
#ifdef ART_USE_PORTABLE_COMPILER
#include "compiler.h"
#include "compiler_llvm.h"
@@ -152,9 +153,10 @@ class LLVMCompiler FINAL : public Compiler {
Compiler* CreateLLVMCompiler(CompilerDriver* driver) {
#ifdef ART_USE_PORTABLE_COMPILER
- return new llvm::LLVMCompiler(driver);
+ return new llvm::LLVMCompiler(driver);
#else
- return nullptr;
+ UNUSED(driver);
+ return nullptr;
#endif
}
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index be52f40a0b..6138411b11 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -70,16 +70,18 @@ class OatWriter::NoRelativeCallPatcher FINAL : public RelativeCallPatcher {
public:
NoRelativeCallPatcher() { }
- uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method) OVERRIDE {
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED) OVERRIDE {
return offset; // No space reserved; no patches expected.
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
return offset; // No thunks added; no patches expected.
}
- void Patch(std::vector<uint8_t>* code, uint32_t literal_offset, uint32_t patch_offset,
- uint32_t target_offset) OVERRIDE {
+ void Patch(std::vector<uint8_t>* code ATTRIBUTE_UNUSED, uint32_t literal_offset ATTRIBUTE_UNUSED,
+ uint32_t patch_offset ATTRIBUTE_UNUSED,
+ uint32_t target_offset ATTRIBUTE_UNUSED) OVERRIDE {
LOG(FATAL) << "Unexpected relative patch.";
}
@@ -91,11 +93,12 @@ class OatWriter::X86RelativeCallPatcher FINAL : public RelativeCallPatcher {
public:
X86RelativeCallPatcher() { }
- uint32_t ReserveSpace(uint32_t offset, const CompiledMethod* compiled_method) OVERRIDE {
+ uint32_t ReserveSpace(uint32_t offset,
+ const CompiledMethod* compiled_method ATTRIBUTE_UNUSED) OVERRIDE {
return offset; // No space reserved; no limit on relative call distance.
}
- uint32_t WriteThunks(OutputStream* out, uint32_t offset) OVERRIDE {
+ uint32_t WriteThunks(OutputStream* out ATTRIBUTE_UNUSED, uint32_t offset) OVERRIDE {
return offset; // No thunks added; no limit on relative call distance.
}
@@ -648,7 +651,7 @@ class OatWriter::InitOatClassesMethodVisitor : public DexMethodVisitor {
return true;
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) {
+ bool VisitMethod(size_t class_def_method_index ATTRIBUTE_UNUSED, const ClassDataItemIterator& it) {
// Fill in the compiled_methods_ array for methods that have a
// CompiledMethod. We track the number of non-null entries in
// num_non_null_compiled_methods_ since we only want to allocate
@@ -860,7 +863,7 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor {
: OatDexMethodVisitor(writer, offset) {
}
- bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it)
+ bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
OatClass* oat_class = writer_->oat_classes_[oat_class_index_];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 71f0b1b850..01c5cc9637 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -51,7 +51,7 @@ struct PcInfo {
uintptr_t native_pc;
};
-class SlowPathCode : public ArenaObject {
+class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
public:
SlowPathCode() {}
virtual ~SlowPathCode() {}
@@ -62,7 +62,7 @@ class SlowPathCode : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
-class CodeGenerator : public ArenaObject {
+class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
// succeeded.
@@ -115,12 +115,14 @@ class CodeGenerator : public ArenaObject {
// Restores the register from the stack. Returns the size taken on stack.
virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- LOG(FATAL) << "Unimplemented";
- return 0u;
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- LOG(FATAL) << "Unimplemented";
- return 0u;
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index c812f6b416..0cec4b404a 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -794,6 +794,7 @@ void LocationsBuilderARM::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ bkpt(0);
@@ -959,6 +960,7 @@ void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
@@ -986,6 +988,7 @@ void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -996,6 +999,7 @@ void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -1006,6 +1010,7 @@ void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
@@ -1016,6 +1021,7 @@ void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -1026,6 +1032,7 @@ void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
@@ -1033,6 +1040,7 @@ void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
}
void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
@@ -1043,6 +1051,7 @@ void LocationsBuilderARM::VisitReturn(HReturn* ret) {
}
void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
@@ -1508,6 +1517,7 @@ void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderARM::VisitNot(HNot* not_) {
@@ -1596,6 +1606,7 @@ void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1998,9 +2009,11 @@ void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index ec9af73a71..6ac7a31d3f 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -556,6 +556,7 @@ enum UnimplementedInstructionBreakCode {
#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
+ UNUSED(instr); \
__ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
} \
void LocationsBuilderARM64::Visit##name(H##name* instr) { \
@@ -711,6 +712,7 @@ void LocationsBuilderARM64::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
__ Brk(0); // TODO: Introduce special markers for such code locations.
@@ -877,6 +879,7 @@ void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
@@ -967,6 +970,7 @@ void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderARM64::VisitLocal(HLocal* local) {
@@ -984,6 +988,7 @@ void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM64::VisitMul(HMul* mul) {
@@ -1109,6 +1114,7 @@ void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
@@ -1120,6 +1126,7 @@ void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1164,6 +1171,7 @@ void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
}
void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ UNUSED(instruction);
codegen_->GenerateFrameExit();
__ Br(lr);
}
@@ -1191,6 +1199,7 @@ void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderARM64::VisitSub(HSub* instruction) {
@@ -1242,6 +1251,7 @@ void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
} // namespace arm64
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a4003ffea5..5530f46065 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -180,11 +180,15 @@ class CodeGeneratorARM64 : public CodeGenerator {
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
return 0;
}
virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
return 0;
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index d41d5a00a8..ac328c319c 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -645,6 +645,7 @@ void LocationsBuilderX86::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -734,6 +735,7 @@ void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) {
void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
@@ -762,6 +764,7 @@ void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86::VisitCondition(HCondition* comp) {
@@ -851,6 +854,7 @@ void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -861,6 +865,7 @@ void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
@@ -871,6 +876,7 @@ void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -881,6 +887,7 @@ void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
@@ -888,6 +895,7 @@ void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
}
void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -1456,6 +1464,7 @@ void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
}
void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
+ UNUSED(instruction);
}
void LocationsBuilderX86::VisitNot(HNot* not_) {
@@ -1550,6 +1559,7 @@ void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -2026,9 +2036,11 @@ void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index bda3520708..0bc2bad896 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -556,6 +556,7 @@ void LocationsBuilderX86_64::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -644,6 +645,7 @@ void LocationsBuilderX86_64::VisitLoadLocal(HLoadLocal* local) {
void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
@@ -671,6 +673,7 @@ void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86_64::VisitCondition(HCondition* comp) {
@@ -793,6 +796,7 @@ void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -803,6 +807,7 @@ void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
@@ -813,6 +818,7 @@ void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -823,6 +829,7 @@ void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
@@ -830,6 +837,7 @@ void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
}
void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -1381,6 +1389,7 @@ void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderX86_64::VisitNot(HNot* not_) {
@@ -1423,6 +1432,7 @@ void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
@@ -1902,9 +1912,11 @@ void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index a98d714476..8d2c77475c 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -25,7 +25,7 @@ namespace art {
* A node in the collision list of a ValueSet. Encodes the instruction,
* the hash code, and the next node in the collision list.
*/
-class ValueSetNode : public ArenaObject {
+class ValueSetNode : public ArenaObject<kArenaAllocMisc> {
public:
ValueSetNode(HInstruction* instruction, size_t hash_code, ValueSetNode* next)
: instruction_(instruction), hash_code_(hash_code), next_(next) {}
@@ -52,7 +52,7 @@ class ValueSetNode : public ArenaObject {
* if there is one in the set. In GVN, we would say those instructions have the
* same "number".
*/
-class ValueSet : public ArenaObject {
+class ValueSet : public ArenaObject<kArenaAllocMisc> {
public:
explicit ValueSet(ArenaAllocator* allocator)
: allocator_(allocator), number_of_entries_(0), collisions_(nullptr) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index d7295aa112..914a0c46cb 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -351,6 +351,8 @@ class Location : public ValueObject {
// way that none of them can be interpreted as a kConstant tag.
uintptr_t value_;
};
+std::ostream& operator<<(std::ostream& os, const Location::Kind& rhs);
+std::ostream& operator<<(std::ostream& os, const Location::Policy& rhs);
class RegisterSet : public ValueObject {
public:
@@ -401,7 +403,7 @@ class RegisterSet : public ValueObject {
* The intent is to have the code for generating the instruction independent of
* register allocation. A register allocator just has to provide a LocationSummary.
*/
-class LocationSummary : public ArenaObject {
+class LocationSummary : public ArenaObject<kArenaAllocMisc> {
public:
enum CallKind {
kNoCall,
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d624ad5e5e..8cb2ef6de8 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -647,4 +647,16 @@ bool HInstruction::Equals(HInstruction* other) const {
return true;
}
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs) {
+#define DECLARE_CASE(type, super) case HInstruction::k##type: os << #type; break;
+ switch (rhs) {
+ FOR_EACH_INSTRUCTION(DECLARE_CASE)
+ default:
+ os << "Unknown instruction kind " << static_cast<int>(rhs);
+ break;
+ }
+#undef DECLARE_CASE
+ return os;
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 33bfe19081..7549ebfbe4 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -79,12 +79,14 @@ class HInstructionList {
};
// Control-flow graph of a method. Contains a list of basic blocks.
-class HGraph : public ArenaObject {
+class HGraph : public ArenaObject<kArenaAllocMisc> {
public:
explicit HGraph(ArenaAllocator* arena)
: arena_(arena),
blocks_(arena, kDefaultNumberOfBlocks),
reverse_post_order_(arena, kDefaultNumberOfBlocks),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
number_of_vregs_(0),
number_of_in_vregs_(0),
@@ -199,7 +201,7 @@ class HGraph : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
-class HLoopInformation : public ArenaObject {
+class HLoopInformation : public ArenaObject<kArenaAllocMisc> {
public:
HLoopInformation(HBasicBlock* header, HGraph* graph)
: header_(header),
@@ -278,7 +280,7 @@ static constexpr uint32_t kNoDexPc = -1;
// as a double linked list. Each block knows its predecessors and
// successors.
-class HBasicBlock : public ArenaObject {
+class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
public:
explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
@@ -537,7 +539,7 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
virtual void Accept(HGraphVisitor* visitor)
template <typename T>
-class HUseListNode : public ArenaObject {
+class HUseListNode : public ArenaObject<kArenaAllocMisc> {
public:
HUseListNode(T* user, size_t index, HUseListNode* tail)
: user_(user), index_(index), tail_(tail) {}
@@ -619,7 +621,7 @@ class SideEffects : public ValueObject {
size_t flags_;
};
-class HInstruction : public ArenaObject {
+class HInstruction : public ArenaObject<kArenaAllocMisc> {
public:
explicit HInstruction(SideEffects side_effects)
: previous_(nullptr),
@@ -738,12 +740,18 @@ class HInstruction : public ArenaObject {
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
- virtual bool InstructionTypeEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionTypeEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether any data encoded in the two instructions is equal.
// This method does not look at the inputs. Both instructions must be
// of the same type, otherwise the method has undefined behavior.
- virtual bool InstructionDataEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether two instructions are equal, that is:
// 1) They have the same type and contain the same data,
@@ -808,6 +816,7 @@ class HInstruction : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(HInstruction);
};
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
template<typename T>
class HUseIterator : public ValueObject {
@@ -833,7 +842,7 @@ class HUseIterator : public ValueObject {
};
// A HEnvironment object contains the values of virtual registers at a given location.
-class HEnvironment : public ArenaObject {
+class HEnvironment : public ArenaObject<kArenaAllocMisc> {
public:
HEnvironment(ArenaAllocator* arena, size_t number_of_vregs) : vregs_(arena, number_of_vregs) {
vregs_.SetSize(number_of_vregs);
@@ -965,14 +974,14 @@ class EmbeddedArray<T, 0> {
public:
intptr_t length() const { return 0; }
const T& operator[](intptr_t i) const {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
T& operator[](intptr_t i) {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
};
@@ -1110,7 +1119,10 @@ class HUnaryOperation : public HExpression<1> {
Primitive::Type GetResultType() const { return GetType(); }
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
@@ -1143,7 +1155,10 @@ class HBinaryOperation : public HExpression<2> {
virtual bool IsCommutative() { return false; }
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
@@ -1732,7 +1747,10 @@ class HNot : public HUnaryOperation {
: HUnaryOperation(result_type, input) {}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
virtual int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
@@ -1792,7 +1810,10 @@ class HNullCheck : public HExpression<1> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -1884,7 +1905,10 @@ class HArrayGet : public HExpression<2> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
void SetType(Primitive::Type type) { type_ = type; }
DECLARE_INSTRUCTION(ArrayGet);
@@ -1948,7 +1972,10 @@ class HArrayLength : public HExpression<1> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
DECLARE_INSTRUCTION(ArrayLength);
@@ -1966,7 +1993,10 @@ class HBoundsCheck : public HExpression<2> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -2177,7 +2207,7 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
-class MoveOperands : public ArenaObject {
+class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source, Location destination, HInstruction* instruction)
: source_(source), destination_(destination), instruction_(instruction) {}
@@ -2278,7 +2308,7 @@ class HGraphVisitor : public ValueObject {
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
virtual ~HGraphVisitor() {}
- virtual void VisitInstruction(HInstruction* instruction) {}
+ virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); }
virtual void VisitBasicBlock(HBasicBlock* block);
// Visit the graph following basic block insertion order.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index d3fe1c4afc..08b74c7988 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -213,6 +213,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
uint32_t method_idx,
jobject class_loader,
const DexFile& dex_file) const {
+ UNUSED(invoke_type);
total_compiled_methods_++;
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 2bdcc61b04..62629bcd0c 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -50,8 +50,8 @@ class TestParallelMoveResolver : public ParallelMoveResolver {
<< ")";
}
- virtual void SpillScratch(int reg) {}
- virtual void RestoreScratch(int reg) {}
+ virtual void SpillScratch(int reg ATTRIBUTE_UNUSED) {}
+ virtual void RestoreScratch(int reg ATTRIBUTE_UNUSED) {}
std::string GetMessage() const {
return message_.str();
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 8811ac8939..ca08d5b3e6 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -25,7 +25,7 @@ class CodeGenerator;
static constexpr int kNoRegister = -1;
-class BlockInfo : public ArenaObject {
+class BlockInfo : public ArenaObject<kArenaAllocMisc> {
public:
BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
@@ -53,7 +53,7 @@ class BlockInfo : public ArenaObject {
* A live range contains the start and end of a range where an instruction or a temporary
* is live.
*/
-class LiveRange : public ArenaObject {
+class LiveRange FINAL : public ArenaObject<kArenaAllocMisc> {
public:
LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
DCHECK_LT(start, end);
@@ -64,16 +64,16 @@ class LiveRange : public ArenaObject {
size_t GetEnd() const { return end_; }
LiveRange* GetNext() const { return next_; }
- bool IntersectsWith(const LiveRange& other) {
+ bool IntersectsWith(const LiveRange& other) const {
return (start_ >= other.start_ && start_ < other.end_)
|| (other.start_ >= start_ && other.start_ < end_);
}
- bool IsBefore(const LiveRange& other) {
+ bool IsBefore(const LiveRange& other) const {
return end_ <= other.start_;
}
- void Dump(std::ostream& stream) {
+ void Dump(std::ostream& stream) const {
stream << "[" << start_ << ", " << end_ << ")";
}
@@ -90,7 +90,7 @@ class LiveRange : public ArenaObject {
/**
* A use position represents a live interval use at a given position.
*/
-class UsePosition : public ArenaObject {
+class UsePosition : public ArenaObject<kArenaAllocMisc> {
public:
UsePosition(HInstruction* user,
size_t input_index,
@@ -137,7 +137,7 @@ class UsePosition : public ArenaObject {
* An interval is a list of disjoint live ranges where an instruction is live.
* Each instruction that has uses gets an interval.
*/
-class LiveInterval : public ArenaObject {
+class LiveInterval : public ArenaObject<kArenaAllocMisc> {
public:
static LiveInterval* MakeInterval(ArenaAllocator* allocator,
Primitive::Type type,
diff --git a/compiler/output_stream.cc b/compiler/output_stream.cc
new file mode 100644
index 0000000000..a8b64ca1ce
--- /dev/null
+++ b/compiler/output_stream.cc
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "output_stream.h"
+
+namespace art {
+
+std::ostream& operator<<(std::ostream& os, const Whence& rhs) {
+ switch (rhs) {
+ case kSeekSet: os << "SEEK_SET"; break;
+ case kSeekCurrent: os << "SEEK_CUR"; break;
+ case kSeekEnd: os << "SEEK_END"; break;
+ default: UNREACHABLE();
+ }
+ return os;
+}
+
+} // namespace art
diff --git a/compiler/output_stream.h b/compiler/output_stream.h
index 97ccc2caa6..4d30b83234 100644
--- a/compiler/output_stream.h
+++ b/compiler/output_stream.h
@@ -17,9 +17,7 @@
#ifndef ART_COMPILER_OUTPUT_STREAM_H_
#define ART_COMPILER_OUTPUT_STREAM_H_
-#include <stdint.h>
-#include <sys/types.h>
-
+#include <ostream>
#include <string>
#include "base/macros.h"
@@ -31,6 +29,7 @@ enum Whence {
kSeekCurrent = SEEK_CUR,
kSeekEnd = SEEK_END,
};
+std::ostream& operator<<(std::ostream& os, const Whence& rhs);
class OutputStream {
public:
diff --git a/compiler/utils/arena_allocator.h b/compiler/utils/arena_allocator.h
index b2f5ca9755..6d213991d3 100644
--- a/compiler/utils/arena_allocator.h
+++ b/compiler/utils/arena_allocator.h
@@ -82,7 +82,7 @@ class ArenaAllocatorStatsImpl<false> {
ArenaAllocatorStatsImpl& operator = (const ArenaAllocatorStatsImpl& other) = delete;
void Copy(const ArenaAllocatorStatsImpl& other) { UNUSED(other); }
- void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes); UNUSED(kind); }
+ void RecordAlloc(size_t bytes, ArenaAllocKind kind) { UNUSED(bytes, kind); }
size_t NumAllocations() const { return 0u; }
size_t BytesAllocated() const { return 0u; }
void Dump(std::ostream& os, const Arena* first, ssize_t lost_bytes_adjustment) const {
diff --git a/compiler/utils/arena_bit_vector.cc b/compiler/utils/arena_bit_vector.cc
index de35f3d197..f17e5a92a4 100644
--- a/compiler/utils/arena_bit_vector.cc
+++ b/compiler/utils/arena_bit_vector.cc
@@ -16,12 +16,12 @@
#include "arena_allocator.h"
#include "arena_bit_vector.h"
-#include "base/allocator.h"
namespace art {
template <typename ArenaAlloc>
-class ArenaBitVectorAllocator FINAL : public Allocator {
+class ArenaBitVectorAllocator FINAL : public Allocator,
+ public ArenaObject<kArenaAllocGrowableBitMap> {
public:
explicit ArenaBitVectorAllocator(ArenaAlloc* arena) : arena_(arena) {}
~ArenaBitVectorAllocator() {}
@@ -32,11 +32,6 @@ class ArenaBitVectorAllocator FINAL : public Allocator {
virtual void Free(void*) {} // Nop.
- static void* operator new(size_t size, ArenaAlloc* arena) {
- return arena->Alloc(sizeof(ArenaBitVectorAllocator), kArenaAllocGrowableBitMap);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
ArenaAlloc* const arena_;
DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
diff --git a/compiler/utils/arena_bit_vector.h b/compiler/utils/arena_bit_vector.h
index c92658f7d6..34f1ca9129 100644
--- a/compiler/utils/arena_bit_vector.h
+++ b/compiler/utils/arena_bit_vector.h
@@ -17,12 +17,14 @@
#ifndef ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
#define ART_COMPILER_UTILS_ARENA_BIT_VECTOR_H_
+#include "arena_object.h"
#include "base/bit_vector.h"
-#include "utils/arena_allocator.h"
-#include "utils/scoped_arena_allocator.h"
namespace art {
+class ArenaAllocator;
+class ScopedArenaAllocator;
+
// Type of growable bitmap for memory tuning.
enum OatBitMapKind {
kBitMapMisc = 0,
@@ -50,7 +52,7 @@ std::ostream& operator<<(std::ostream& os, const OatBitMapKind& kind);
/*
* A BitVector implementation that uses Arena allocation.
*/
-class ArenaBitVector : public BitVector {
+class ArenaBitVector : public BitVector, public ArenaObject<kArenaAllocGrowableBitMap> {
public:
ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
OatBitMapKind kind = kBitMapMisc);
@@ -58,16 +60,10 @@ class ArenaBitVector : public BitVector {
OatBitMapKind kind = kBitMapMisc);
~ArenaBitVector() {}
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
- }
- static void* operator new(size_t size, ScopedArenaAllocator* arena) {
- return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
const OatBitMapKind kind_; // for memory use tuning. TODO: currently unused.
+
+ DISALLOW_COPY_AND_ASSIGN(ArenaBitVector);
};
diff --git a/compiler/utils/arena_containers.h b/compiler/utils/arena_containers.h
index c48b0c81ec..825259157a 100644
--- a/compiler/utils/arena_containers.h
+++ b/compiler/utils/arena_containers.h
@@ -66,7 +66,7 @@ template <>
class ArenaAllocatorAdapterKindImpl<false> {
public:
// Not tracking allocations, ignore the supplied kind and arbitrarily provide kArenaAllocSTL.
- explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { }
+ explicit ArenaAllocatorAdapterKindImpl(ArenaAllocKind kind) { UNUSED(kind); }
ArenaAllocatorAdapterKindImpl& operator=(const ArenaAllocatorAdapterKindImpl& other) = default;
ArenaAllocKind Kind() { return kArenaAllocSTL; }
};
@@ -159,11 +159,13 @@ class ArenaAllocatorAdapter : private DebugStackReference, private ArenaAllocato
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, ArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ UNUSED(hint);
DCHECK_LE(n, max_size());
return reinterpret_cast<T*>(arena_allocator_->Alloc(n * sizeof(T),
ArenaAllocatorAdapterKind::Kind()));
}
void deallocate(pointer p, size_type n) {
+ UNUSED(p, n);
}
void construct(pointer p, const_reference val) {
diff --git a/compiler/utils/arena_object.h b/compiler/utils/arena_object.h
index 8f6965edc5..d64c419954 100644
--- a/compiler/utils/arena_object.h
+++ b/compiler/utils/arena_object.h
@@ -19,14 +19,21 @@
#include "arena_allocator.h"
#include "base/logging.h"
+#include "scoped_arena_allocator.h"
namespace art {
+// Parent for arena allocated objects giving appropriate new and delete operators.
+template<enum ArenaAllocKind kAllocKind>
class ArenaObject {
public:
// Allocate a new ArenaObject of 'size' bytes in the Arena.
void* operator new(size_t size, ArenaAllocator* allocator) {
- return allocator->Alloc(size, kArenaAllocMisc);
+ return allocator->Alloc(size, kAllocKind);
+ }
+
+ static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kAllocKind);
}
void operator delete(void*, size_t) {
@@ -35,6 +42,26 @@ class ArenaObject {
}
};
+
+// Parent for arena allocated objects that get deleted, gives appropriate new and delete operators.
+// Currently this is used by the quick compiler for debug reference counting arena allocations.
+template<enum ArenaAllocKind kAllocKind>
+class DeletableArenaObject {
+ public:
+ // Allocate a new ArenaObject of 'size' bytes in the Arena.
+ void* operator new(size_t size, ArenaAllocator* allocator) {
+ return allocator->Alloc(size, kAllocKind);
+ }
+
+ static void* operator new(size_t size, ScopedArenaAllocator* arena) {
+ return arena->Alloc(size, kAllocKind);
+ }
+
+ void operator delete(void*, size_t) {
+ // Nop.
+ }
+};
+
} // namespace art
#endif // ART_COMPILER_UTILS_ARENA_OBJECT_H_
diff --git a/compiler/utils/arm/assembler_arm.h b/compiler/utils/arm/assembler_arm.h
index c1ed6a239d..dca2ab7517 100644
--- a/compiler/utils/arm/assembler_arm.h
+++ b/compiler/utils/arm/assembler_arm.h
@@ -20,6 +20,7 @@
#include <vector>
#include "base/logging.h"
+#include "base/value_object.h"
#include "constants_arm.h"
#include "utils/arm/managed_register_arm.h"
#include "utils/assembler.h"
@@ -179,8 +180,12 @@ enum BlockAddressMode {
DB_W = (8|0|1) << 21, // decrement before with writeback to base
IB_W = (8|4|1) << 21 // increment before with writeback to base
};
+inline std::ostream& operator<<(std::ostream& os, const BlockAddressMode& rhs) {
+ os << static_cast<int>(rhs);
+ return os;
+}
-class Address {
+class Address : public ValueObject {
public:
// Memory operand addressing mode (in ARM encoding form. For others we need
// to adjust)
@@ -260,13 +265,17 @@ class Address {
}
private:
- Register rn_;
- Register rm_;
- int32_t offset_; // Used as shift amount for register offset.
- Mode am_;
- bool is_immed_offset_;
- Shift shift_;
+ const Register rn_;
+ const Register rm_;
+ const int32_t offset_; // Used as shift amount for register offset.
+ const Mode am_;
+ const bool is_immed_offset_;
+ const Shift shift_;
};
+inline std::ostream& operator<<(std::ostream& os, const Address::Mode& rhs) {
+ os << static_cast<int>(rhs);
+ return os;
+}
// Instruction encoding bits.
enum {
@@ -344,10 +353,6 @@ constexpr uint32_t kInvalidModifiedImmediate = -1;
extern const char* kRegisterNames[];
extern const char* kConditionNames[];
-extern std::ostream& operator<<(std::ostream& os, const Register& rhs);
-extern std::ostream& operator<<(std::ostream& os, const SRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
-extern std::ostream& operator<<(std::ostream& os, const Condition& rhs);
// This is an abstract ARM assembler. Subclasses provide assemblers for the individual
// instruction sets (ARM32, Thumb2, etc.)
@@ -448,8 +453,10 @@ class ArmAssembler : public Assembler {
virtual void bkpt(uint16_t imm16) = 0;
virtual void svc(uint32_t imm24) = 0;
- virtual void it(Condition firstcond, ItState i1 = kItOmitted,
- ItState i2 = kItOmitted, ItState i3 = kItOmitted) {
+ virtual void it(Condition firstcond ATTRIBUTE_UNUSED,
+ ItState i1 ATTRIBUTE_UNUSED = kItOmitted,
+ ItState i2 ATTRIBUTE_UNUSED = kItOmitted,
+ ItState i3 ATTRIBUTE_UNUSED = kItOmitted) {
// Ignored if not supported.
}
@@ -537,14 +544,9 @@ class ArmAssembler : public Assembler {
Condition cond = AL) = 0;
virtual void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) = 0;
- virtual void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) = 0;
// Load and Store. May clobber IP.
virtual void LoadImmediate(Register rd, int32_t value, Condition cond = AL) = 0;
- virtual void LoadSImmediate(SRegister sd, float value, Condition cond = AL) = 0;
- virtual void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) = 0;
virtual void MarkExceptionHandler(Label* label) = 0;
virtual void LoadFromOffset(LoadOperandType type,
Register reg,
diff --git a/compiler/utils/arm/assembler_arm32.cc b/compiler/utils/arm/assembler_arm32.cc
index d262b6acd6..c8a57b1873 100644
--- a/compiler/utils/arm/assembler_arm32.cc
+++ b/compiler/utils/arm/assembler_arm32.cc
@@ -1303,7 +1303,6 @@ void Arm32Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t value
}
}
-
void Arm32Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperand::CanHoldArm(value, &shifter_op)) {
@@ -1483,12 +1482,12 @@ void Arm32Assembler::dmb(DmbOptions flavor) {
}
-void Arm32Assembler::cbz(Register rn, Label* target) {
+void Arm32Assembler::cbz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "cbz is not supported on ARM32";
}
-void Arm32Assembler::cbnz(Register rn, Label* target) {
+void Arm32Assembler::cbnz(Register rn ATTRIBUTE_UNUSED, Label* target ATTRIBUTE_UNUSED) {
LOG(FATAL) << "cbnz is not supported on ARM32";
}
diff --git a/compiler/utils/arm/assembler_arm32.h b/compiler/utils/arm/assembler_arm32.h
index cfc300b8a6..dbabb99933 100644
--- a/compiler/utils/arm/assembler_arm32.h
+++ b/compiler/utils/arm/assembler_arm32.h
@@ -238,14 +238,9 @@ class Arm32Assembler FINAL : public ArmAssembler {
Condition cond = AL) OVERRIDE;
void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) OVERRIDE;
- void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) {}
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
- void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) {}
void MarkExceptionHandler(Label* label) OVERRIDE;
void LoadFromOffset(LoadOperandType type,
Register reg,
diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc
index 633f55b929..fd2613a89e 100644
--- a/compiler/utils/arm/assembler_thumb2.cc
+++ b/compiler/utils/arm/assembler_thumb2.cc
@@ -152,6 +152,8 @@ void Thumb2Assembler::mvns(Register rd, const ShifterOperand& so, Condition cond
void Thumb2Assembler::mul(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
if (rd == rm && !IsHighRegister(rd) && !IsHighRegister(rn) && !force_32bit_) {
// 16 bit.
int16_t encoding = B14 | B9 | B8 | B6 |
@@ -176,6 +178,8 @@ void Thumb2Assembler::mul(Register rd, Register rn, Register rm, Condition cond)
void Thumb2Assembler::mla(Register rd, Register rn, Register rm, Register ra,
Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 0U /* 0b000 */;
uint32_t op2 = 0U /* 0b00 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -192,6 +196,8 @@ void Thumb2Assembler::mla(Register rd, Register rn, Register rm, Register ra,
void Thumb2Assembler::mls(Register rd, Register rn, Register rm, Register ra,
Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 0U /* 0b000 */;
uint32_t op2 = 01 /* 0b01 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 |
@@ -208,6 +214,8 @@ void Thumb2Assembler::mls(Register rd, Register rn, Register rm, Register ra,
void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 2U /* 0b010; */;
uint32_t op2 = 0U /* 0b0000 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 |
@@ -223,6 +231,8 @@ void Thumb2Assembler::umull(Register rd_lo, Register rd_hi, Register rn,
void Thumb2Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 1U /* 0b001 */;
uint32_t op2 = 15U /* 0b1111 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B20 |
@@ -238,6 +248,8 @@ void Thumb2Assembler::sdiv(Register rd, Register rn, Register rm, Condition cond
void Thumb2Assembler::udiv(Register rd, Register rn, Register rm, Condition cond) {
+ CheckCondition(cond);
+
uint32_t op1 = 1U /* 0b001 */;
uint32_t op2 = 15U /* 0b1111 */;
int32_t encoding = B31 | B30 | B29 | B28 | B27 | B25 | B24 | B23 | B21 | B20 |
@@ -293,6 +305,7 @@ void Thumb2Assembler::ldrsh(Register rd, const Address& ad, Condition cond) {
void Thumb2Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
+ CheckCondition(cond);
CHECK_EQ(rd % 2, 0);
// This is different from other loads. The encoding is like ARM.
int32_t encoding = B31 | B30 | B29 | B27 | B22 | B20 |
@@ -304,6 +317,7 @@ void Thumb2Assembler::ldrd(Register rd, const Address& ad, Condition cond) {
void Thumb2Assembler::strd(Register rd, const Address& ad, Condition cond) {
+ CheckCondition(cond);
CHECK_EQ(rd % 2, 0);
// This is different from other loads. The encoding is like ARM.
int32_t encoding = B31 | B30 | B29 | B27 | B22 |
@@ -609,9 +623,9 @@ void Thumb2Assembler::Emit16(int16_t value) {
}
-bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
+bool Thumb2Assembler::Is32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc ATTRIBUTE_UNUSED,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -727,9 +741,9 @@ bool Thumb2Assembler::Is32BitDataProcessing(Condition cond,
}
-void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
+void Thumb2Assembler::Emit32BitDataProcessing(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -789,7 +803,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
}
encoding = B31 | B30 | B29 | B28 |
thumb_opcode << 21 |
- set_cc << 20 |
+ (set_cc ? 1 : 0) << 20 |
rn << 16 |
rd << 8 |
imm;
@@ -798,7 +812,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
// Register (possibly shifted)
encoding = B31 | B30 | B29 | B27 | B25 |
thumb_opcode << 21 |
- set_cc << 20 |
+ (set_cc ? 1 : 0) << 20 |
rn << 16 |
rd << 8 |
so.encodingThumb();
@@ -809,7 +823,7 @@ void Thumb2Assembler::Emit32BitDataProcessing(Condition cond,
void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -936,9 +950,9 @@ void Thumb2Assembler::Emit16BitDataProcessing(Condition cond,
// ADD and SUB are complex enough to warrant their own emitter.
-void Thumb2Assembler::Emit16BitAddSub(Condition cond,
+void Thumb2Assembler::Emit16BitAddSub(Condition cond ATTRIBUTE_UNUSED,
Opcode opcode,
- int set_cc,
+ bool set_cc ATTRIBUTE_UNUSED,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1075,7 +1089,7 @@ void Thumb2Assembler::Emit16BitAddSub(Condition cond,
void Thumb2Assembler::EmitDataProcessing(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so) {
@@ -1405,7 +1419,7 @@ void Thumb2Assembler::EmitLoadStore(Condition cond,
void Thumb2Assembler::EmitMultiMemOp(Condition cond,
- BlockAddressMode am,
+ BlockAddressMode bam,
bool load,
Register base,
RegList regs) {
@@ -1417,7 +1431,7 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
must_be_32bit = true;
}
- uint32_t w_bit = am == IA_W || am == DB_W || am == DA_W || am == IB_W;
+ bool w_bit = bam == IA_W || bam == DB_W || bam == DA_W || bam == IB_W;
// 16 bit always uses writeback.
if (!w_bit) {
must_be_32bit = true;
@@ -1425,7 +1439,7 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
if (must_be_32bit) {
uint32_t op = 0;
- switch (am) {
+ switch (bam) {
case IA:
case IA_W:
op = 1U /* 0b01 */;
@@ -1438,7 +1452,7 @@ void Thumb2Assembler::EmitMultiMemOp(Condition cond,
case IB:
case DA_W:
case IB_W:
- LOG(FATAL) << "LDM/STM mode not supported on thumb: " << am;
+ LOG(FATAL) << "LDM/STM mode not supported on thumb: " << bam;
}
if (load) {
// Cannot have SP in the list.
@@ -2354,7 +2368,6 @@ void Thumb2Assembler::AddConstantSetFlags(Register rd, Register rn, int32_t valu
}
}
-
void Thumb2Assembler::LoadImmediate(Register rd, int32_t value, Condition cond) {
ShifterOperand shifter_op;
if (ShifterOperand::CanHoldThumb(rd, R0, MOV, value, &shifter_op)) {
diff --git a/compiler/utils/arm/assembler_thumb2.h b/compiler/utils/arm/assembler_thumb2.h
index b26173fe28..9ccdef7e1e 100644
--- a/compiler/utils/arm/assembler_thumb2.h
+++ b/compiler/utils/arm/assembler_thumb2.h
@@ -269,14 +269,9 @@ class Thumb2Assembler FINAL : public ArmAssembler {
Condition cond = AL) OVERRIDE;
void AddConstantSetFlags(Register rd, Register rn, int32_t value,
Condition cond = AL) OVERRIDE;
- void AddConstantWithCarry(Register rd, Register rn, int32_t value,
- Condition cond = AL) {}
// Load and Store. May clobber IP.
void LoadImmediate(Register rd, int32_t value, Condition cond = AL) OVERRIDE;
- void LoadSImmediate(SRegister sd, float value, Condition cond = AL) {}
- void LoadDImmediate(DRegister dd, double value,
- Register scratch, Condition cond = AL) {}
void MarkExceptionHandler(Label* label) OVERRIDE;
void LoadFromOffset(LoadOperandType type,
Register reg,
@@ -324,40 +319,40 @@ class Thumb2Assembler FINAL : public ArmAssembler {
private:
// Emit a single 32 or 16 bit data processing instruction.
void EmitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Must the instruction be 32 bits or can it possibly be encoded
// in 16 bits?
bool Is32BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Emit a 32 bit data processing instruction.
void Emit32BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
// Emit a 16 bit data processing instruction.
void Emit16BitDataProcessing(Condition cond,
- Opcode opcode,
- int set_cc,
- Register rn,
- Register rd,
- const ShifterOperand& so);
+ Opcode opcode,
+ bool set_cc,
+ Register rn,
+ Register rd,
+ const ShifterOperand& so);
void Emit16BitAddSub(Condition cond,
Opcode opcode,
- int set_cc,
+ bool set_cc,
Register rn,
Register rd,
const ShifterOperand& so);
@@ -365,12 +360,12 @@ class Thumb2Assembler FINAL : public ArmAssembler {
uint16_t EmitCompareAndBranch(Register rn, uint16_t prev, bool n);
void EmitLoadStore(Condition cond,
- bool load,
- bool byte,
- bool half,
- bool is_signed,
- Register rd,
- const Address& ad);
+ bool load,
+ bool byte,
+ bool half,
+ bool is_signed,
+ Register rd,
+ const Address& ad);
void EmitMemOpAddressMode3(Condition cond,
int32_t mode,
diff --git a/compiler/utils/arm/constants_arm.h b/compiler/utils/arm/constants_arm.h
index 702e03a277..1513296c2d 100644
--- a/compiler/utils/arm/constants_arm.h
+++ b/compiler/utils/arm/constants_arm.h
@@ -38,15 +38,6 @@ namespace arm {
// Constants for specific fields are defined in their respective named enums.
// General constants are in an anonymous enum in class Instr.
-
-// We support both VFPv3-D16 and VFPv3-D32 profiles, but currently only one at
-// a time, so that compile time optimizations can be applied.
-// Warning: VFPv3-D32 is untested.
-#define VFPv3_D16
-#if defined(VFPv3_D16) == defined(VFPv3_D32)
-#error "Exactly one of VFPv3_D16 or VFPv3_D32 can be defined at a time."
-#endif
-
// 4 bits option for the dmb instruction.
// Order and values follows those of the ARM Architecture Reference Manual.
enum DmbOptions {
@@ -66,26 +57,23 @@ enum ScaleFactor {
};
// Values for double-precision floating point registers.
-enum DRegister {
- D0 = 0,
- D1 = 1,
- D2 = 2,
- D3 = 3,
- D4 = 4,
- D5 = 5,
- D6 = 6,
- D7 = 7,
- D8 = 8,
- D9 = 9,
+enum DRegister { // private marker to avoid generate-operator-out.py from processing.
+ D0 = 0,
+ D1 = 1,
+ D2 = 2,
+ D3 = 3,
+ D4 = 4,
+ D5 = 5,
+ D6 = 6,
+ D7 = 7,
+ D8 = 8,
+ D9 = 9,
D10 = 10,
D11 = 11,
D12 = 12,
D13 = 13,
D14 = 14,
D15 = 15,
-#ifdef VFPv3_D16
- kNumberOfDRegisters = 16,
-#else
D16 = 16,
D17 = 17,
D18 = 18,
@@ -103,7 +91,6 @@ enum DRegister {
D30 = 30,
D31 = 31,
kNumberOfDRegisters = 32,
-#endif
kNumberOfOverlappingDRegisters = 16,
kNoDRegister = -1,
};
@@ -111,18 +98,18 @@ std::ostream& operator<<(std::ostream& os, const DRegister& rhs);
// Values for the condition field as defined in section A3.2.
-enum Condition {
+enum Condition { // private marker to avoid generate-operator-out.py from processing.
kNoCondition = -1,
- EQ = 0, // equal
- NE = 1, // not equal
- CS = 2, // carry set/unsigned higher or same
- CC = 3, // carry clear/unsigned lower
- MI = 4, // minus/negative
- PL = 5, // plus/positive or zero
- VS = 6, // overflow
- VC = 7, // no overflow
- HI = 8, // unsigned higher
- LS = 9, // unsigned lower or same
+ EQ = 0, // equal
+ NE = 1, // not equal
+ CS = 2, // carry set/unsigned higher or same
+ CC = 3, // carry clear/unsigned lower
+ MI = 4, // minus/negative
+ PL = 5, // plus/positive or zero
+ VS = 6, // overflow
+ VC = 7, // no overflow
+ HI = 8, // unsigned higher
+ LS = 9, // unsigned lower or same
GE = 10, // signed greater than or equal
LT = 11, // signed less than
GT = 12, // signed greater than
@@ -138,16 +125,16 @@ std::ostream& operator<<(std::ostream& os, const Condition& rhs);
// as defined in section A3.4
enum Opcode {
kNoOperand = -1,
- AND = 0, // Logical AND
- EOR = 1, // Logical Exclusive OR
- SUB = 2, // Subtract
- RSB = 3, // Reverse Subtract
- ADD = 4, // Add
- ADC = 5, // Add with Carry
- SBC = 6, // Subtract with Carry
- RSC = 7, // Reverse Subtract with Carry
- TST = 8, // Test
- TEQ = 9, // Test Equivalence
+ AND = 0, // Logical AND
+ EOR = 1, // Logical Exclusive OR
+ SUB = 2, // Subtract
+ RSB = 3, // Reverse Subtract
+ ADD = 4, // Add
+ ADC = 5, // Add with Carry
+ SBC = 6, // Subtract with Carry
+ RSC = 7, // Reverse Subtract with Carry
+ TST = 8, // Test
+ TEQ = 9, // Test Equivalence
CMP = 10, // Compare
CMN = 11, // Compare Negated
ORR = 12, // Logical (inclusive) OR
@@ -156,7 +143,7 @@ enum Opcode {
MVN = 15, // Move Not
kMaxOperand = 16
};
-
+std::ostream& operator<<(std::ostream& os, const Opcode& rhs);
// Shifter types for Data-processing operands as defined in section A5.1.2.
enum Shift {
@@ -168,11 +155,11 @@ enum Shift {
RRX = 4, // Rotate right with extend.
kMaxShift
};
-
+std::ostream& operator<<(std::ostream& os, const Shift& rhs);
// Constants used for the decoding or encoding of the individual fields of
// instructions. Based on the "Figure 3-1 ARM instruction set summary".
-enum InstructionFields {
+enum InstructionFields { // private marker to avoid generate-operator-out.py from processing.
kConditionShift = 28,
kConditionBits = 4,
kTypeShift = 25,
diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc
index 1af7374a3e..02011b87a0 100644
--- a/compiler/utils/arm64/assembler_arm64.cc
+++ b/compiler/utils/arm64/assembler_arm64.cc
@@ -474,7 +474,7 @@ void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
}
-void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) {
+void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch ATTRIBUTE_UNUSED) {
// TODO: Should we check that m_scratch is IP? - see arm.
#if ANDROID_SMP != 0
___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h
index e6b4a6a47c..c137e46804 100644
--- a/compiler/utils/array_ref.h
+++ b/compiler/utils/array_ref.h
@@ -68,7 +68,8 @@ class ArrayRef {
template <typename U, size_t size>
constexpr ArrayRef(U (&array)[size],
- typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
+ typename std::enable_if<std::is_same<T, const U>::value, tag>::type
+ t ATTRIBUTE_UNUSED = tag())
: array_(array), size_(size) {
}
@@ -76,12 +77,6 @@ class ArrayRef {
: array_(array), size_(size) {
}
- template <typename U>
- constexpr ArrayRef(U* array, size_t size,
- typename std::enable_if<std::is_same<T, const U>::value, tag>::type t = tag())
- : array_(array), size_(size) {
- }
-
template <typename Alloc>
explicit ArrayRef(std::vector<T, Alloc>& v)
: array_(v.data()), size_(v.size()) {
@@ -89,7 +84,8 @@ class ArrayRef {
template <typename U, typename Alloc>
ArrayRef(const std::vector<U, Alloc>& v,
- typename std::enable_if<std::is_same<T, const U>::value, tag>::tag t = tag())
+ typename std::enable_if<std::is_same<T, const U>::value, tag>::tag
+ t ATTRIBUTE_UNUSED = tag())
: array_(v.data()), size_(v.size()) {
}
diff --git a/compiler/utils/assembler.cc b/compiler/utils/assembler.cc
index 8a1289dc17..68345129c3 100644
--- a/compiler/utils/assembler.cc
+++ b/compiler/utils/assembler.cc
@@ -125,77 +125,91 @@ Assembler* Assembler::Create(InstructionSet instruction_set) {
}
}
-void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
- ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread32(ThreadOffset<4> dest ATTRIBUTE_UNUSED,
+ uint32_t imm ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
- ManagedRegister scratch) {
+void Assembler::StoreImmediateToThread64(ThreadOffset<8> dest ATTRIBUTE_UNUSED,
+ uint32_t imm ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
- FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
+void Assembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
+void Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadFromThread32(ManagedRegister dest, ThreadOffset<4> src, size_t size) {
+void Assembler::LoadFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<4> src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadFromThread64(ManagedRegister dest, ThreadOffset<8> src, size_t size) {
+void Assembler::LoadFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<8> src ATTRIBUTE_UNUSED,
+ size_t size ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadRawPtrFromThread32(ManagedRegister dest, ThreadOffset<4> offs) {
+void Assembler::LoadRawPtrFromThread32(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<4> offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::LoadRawPtrFromThread64(ManagedRegister dest, ThreadOffset<8> offs) {
+void Assembler::LoadRawPtrFromThread64(ManagedRegister dest ATTRIBUTE_UNUSED,
+ ThreadOffset<8> offs ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs, ThreadOffset<4> thr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread32(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs, ThreadOffset<8> thr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs, FrameOffset fr_offs,
- ManagedRegister scratch) {
+void Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs ATTRIBUTE_UNUSED,
+ FrameOffset fr_offs ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread32(ThreadOffset<4> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread32(ThreadOffset<4> offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
-void Assembler::CallFromThread64(ThreadOffset<8> offset, ManagedRegister scratch) {
+void Assembler::CallFromThread64(ThreadOffset<8> offset ATTRIBUTE_UNUSED,
+ ManagedRegister scratch ATTRIBUTE_UNUSED) {
UNIMPLEMENTED(FATAL);
}
diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h
index 2b0c94c9e0..e1b6d7c21d 100644
--- a/compiler/utils/assembler.h
+++ b/compiler/utils/assembler.h
@@ -366,7 +366,7 @@ class Assembler {
}
// TODO: Implement with disassembler.
- virtual void Comment(const char* format, ...) { }
+ virtual void Comment(const char* format, ...) { UNUSED(format); }
// Emit code that will create an activation on the stack
virtual void BuildFrame(size_t frame_size, ManagedRegister method_reg,
diff --git a/compiler/utils/growable_array.h b/compiler/utils/growable_array.h
index 61e420c222..fde65e79c3 100644
--- a/compiler/utils/growable_array.h
+++ b/compiler/utils/growable_array.h
@@ -19,26 +19,20 @@
#include <stdint.h>
#include <stddef.h>
-#include "arena_allocator.h"
-namespace art {
+#include "arena_object.h"
-// Type of growable list for memory tuning.
-enum OatListKind {
- kGrowableArrayMisc = 0,
- kGNumListKinds
-};
+namespace art {
// Deprecated
// TODO: Replace all uses with ArenaVector<T>.
template<typename T>
-class GrowableArray {
+class GrowableArray : public ArenaObject<kArenaAllocGrowableArray> {
public:
- GrowableArray(ArenaAllocator* arena, size_t init_length, OatListKind kind = kGrowableArrayMisc)
+ GrowableArray(ArenaAllocator* arena, size_t init_length)
: arena_(arena),
num_allocated_(init_length),
- num_used_(0),
- kind_(kind) {
+ num_used_(0) {
elem_list_ = static_cast<T*>(arena_->Alloc(sizeof(T) * init_length,
kArenaAllocGrowableArray));
}
@@ -152,16 +146,10 @@ class GrowableArray {
T* GetRawStorage() const { return elem_list_; }
- static void* operator new(size_t size, ArenaAllocator* arena) {
- return arena->Alloc(sizeof(GrowableArray<T>), kArenaAllocGrowableArray);
- }
- static void operator delete(void* p) {} // Nop.
-
private:
ArenaAllocator* const arena_;
size_t num_allocated_;
size_t num_used_;
- OatListKind kind_;
T* elem_list_;
};
diff --git a/compiler/utils/scoped_arena_containers.h b/compiler/utils/scoped_arena_containers.h
index 0de7403c07..df93b273d1 100644
--- a/compiler/utils/scoped_arena_containers.h
+++ b/compiler/utils/scoped_arena_containers.h
@@ -140,12 +140,15 @@ class ScopedArenaAllocatorAdapter
const_pointer address(const_reference x) const { return &x; }
pointer allocate(size_type n, ScopedArenaAllocatorAdapter<void>::pointer hint = nullptr) {
+ UNUSED(hint);
DCHECK_LE(n, max_size());
DebugStackIndirectTopRef::CheckTop();
return reinterpret_cast<T*>(arena_stack_->Alloc(n * sizeof(T),
ArenaAllocatorAdapterKind::Kind()));
}
void deallocate(pointer p, size_type n) {
+ UNUSED(p);
+ UNUSED(n);
DebugStackIndirectTopRef::CheckTop();
}
diff --git a/compiler/utils/stack_checks.h b/compiler/utils/stack_checks.h
index ce01077808..e762f7d266 100644
--- a/compiler/utils/stack_checks.h
+++ b/compiler/utils/stack_checks.h
@@ -35,6 +35,7 @@ static constexpr size_t kSmallFrameSize = 1 * KB;
//
// A frame is considered large when it's above kLargeFrameSize.
static inline bool FrameNeedsStackCheck(size_t size, InstructionSet isa) {
+ UNUSED(isa);
return size >= kLargeFrameSize;
}
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 4f04e72f32..30dfdf048d 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -411,7 +411,6 @@ class Dex2Oat {
const std::string& android_root,
bool is_host,
File* oat_file,
- const std::string& oat_location,
TimingLogger* timings,
SafeMap<std::string, std::string>* key_value_store) {
CHECK(key_value_store != nullptr);
@@ -535,7 +534,7 @@ class Dex2Oat {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime->HasCalleeSaveMethod(type)) {
- runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(type), type);
+ runtime->SetCalleeSaveMethod(runtime->CreateCalleeSaveMethod(), type);
}
}
runtime->GetClassLinker()->FixupDexCaches(runtime->GetResolutionMethod());
@@ -1462,7 +1461,6 @@ static int dex2oat(int argc, char** argv) {
android_root,
is_host,
oat_file.get(),
- oat_location,
&timings,
key_value_store.get())) {
LOG(ERROR) << "Failed to create oat file: " << oat_location;
diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc
index f5652779d3..b78daf0d2f 100644
--- a/oatdump/oatdump.cc
+++ b/oatdump/oatdump.cc
@@ -263,10 +263,13 @@ class OatSymbolizer FINAL : public CodeOutput {
method_access_flags);
}
- void RegisterForDedup(const DexFile::ClassDef& class_def, uint32_t class_method_index,
- const OatFile::OatMethod& oat_method, const DexFile& dex_file,
- uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags) {
+ void RegisterForDedup(const DexFile::ClassDef& class_def ATTRIBUTE_UNUSED,
+ uint32_t class_method_index ATTRIBUTE_UNUSED,
+ const OatFile::OatMethod& oat_method,
+ const DexFile& dex_file ATTRIBUTE_UNUSED,
+ uint32_t dex_method_idx ATTRIBUTE_UNUSED,
+ const DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t method_access_flags ATTRIBUTE_UNUSED) {
state_[oat_method.GetCodeOffset()]++;
}
@@ -294,10 +297,13 @@ class OatSymbolizer FINAL : public CodeOutput {
return DedupState::kDeduplicatedFirst;
}
- void AddSymbol(const DexFile::ClassDef& class_def, uint32_t class_method_index,
- const OatFile::OatMethod& oat_method, const DexFile& dex_file,
- uint32_t dex_method_idx, const DexFile::CodeItem* code_item,
- uint32_t method_access_flags) {
+ void AddSymbol(const DexFile::ClassDef& class_def ATTRIBUTE_UNUSED,
+ uint32_t class_method_index ATTRIBUTE_UNUSED,
+ const OatFile::OatMethod& oat_method,
+ const DexFile& dex_file,
+ uint32_t dex_method_idx,
+ const DexFile::CodeItem* code_item ATTRIBUTE_UNUSED,
+ uint32_t method_access_flags ATTRIBUTE_UNUSED) {
DedupState dedup = IsDuplicated(oat_method.GetCodeOffset());
if (dedup != DedupState::kDeduplicatedOther) {
std::string pretty_name = PrettyMethod(dex_method_idx, dex_file, true);
@@ -316,7 +322,7 @@ class OatSymbolizer FINAL : public CodeOutput {
}
// Set oat data offset. Required by ElfBuilder/CodeOutput.
- void SetCodeOffset(size_t offset) {
+ void SetCodeOffset(size_t offset ATTRIBUTE_UNUSED) {
// Nothing to do.
}
diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc
index 629330b9b1..75160ca3d5 100644
--- a/patchoat/patchoat.cc
+++ b/patchoat/patchoat.cc
@@ -466,14 +466,15 @@ bool PatchOat::InHeap(mirror::Object* o) {
}
void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off,
- bool is_static_unused) const {
+ bool is_static_unused ATTRIBUTE_UNUSED) const {
mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off);
DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
mirror::Object* moved_object = patcher_->RelocatedAddressOf(referent);
copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object);
}
-void PatchOat::PatchVisitor::operator() (mirror::Class* cls, mirror::Reference* ref) const {
+void PatchOat::PatchVisitor::operator() (mirror::Class* cls ATTRIBUTE_UNUSED,
+ mirror::Reference* ref) const {
MemberOffset off = mirror::Reference::ReferentOffset();
mirror::Object* referent = ref->GetReferent();
DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap.";
diff --git a/runtime/Android.mk b/runtime/Android.mk
index a2fc24a21c..4505b8e990 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -31,7 +31,6 @@ LIBART_COMMON_SRC_FILES := \
base/stringprintf.cc \
base/timing_logger.cc \
base/unix_file/fd_file.cc \
- base/unix_file/mapped_file.cc \
base/unix_file/null_file.cc \
base/unix_file/random_access_file_utils.cc \
base/unix_file/string_file.cc \
@@ -296,12 +295,16 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
arch/x86_64/registers_x86_64.h \
base/allocator.h \
base/mutex.h \
+ debugger.h \
dex_file.h \
dex_instruction.h \
+ gc/allocator/rosalloc.h \
gc/collector/gc_type.h \
+ gc/allocator_type.h \
gc/collector_type.h \
gc/space/space.h \
gc/heap.h \
+ instrumentation.h \
indirect_reference_table.h \
instruction_set.h \
invoke_type.h \
@@ -311,7 +314,10 @@ LIBART_ENUM_OPERATOR_OUT_HEADER_FILES := \
mirror/class.h \
oat.h \
object_callbacks.h \
+ profiler_options.h \
quick/inline_method_analyser.h \
+ runtime.h \
+ stack.h \
thread.h \
thread_state.h \
verifier/method_verifier.h
diff --git a/runtime/arch/arch_test.cc b/runtime/arch/arch_test.cc
index 42bf8fb124..cac500c13e 100644
--- a/runtime/arch/arch_test.cc
+++ b/runtime/arch/arch_test.cc
@@ -32,7 +32,7 @@ class ArchTest : public CommonRuntimeTest {
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
EXPECT_EQ(frame_info.FrameSizeInBytes(), save_size) << "Expected and real size differs for "
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index c5a0f6c231..b0928f8cfa 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -41,7 +41,7 @@ class StubTest : public CommonRuntimeTest {
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!runtime_->HasCalleeSaveMethod(type)) {
- runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(type), type);
+ runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
}
}
}
@@ -530,18 +530,6 @@ class StubTest : public CommonRuntimeTest {
#endif
}
- // Method with 32b arg0, 32b arg1, 64b arg2
- size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code,
- Thread* self, mirror::ArtMethod* referrer) {
-#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
- // Just pass through.
- return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer);
-#else
- // TODO: Needs 4-param invoke.
- return 0;
-#endif
- }
-
static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
int32_t offset;
#ifdef __LP64__
@@ -1303,8 +1291,8 @@ TEST_F(StubTest, StringCompareTo) {
}
-static void GetSetBooleanStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetBooleanStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
constexpr size_t num_values = 5;
@@ -1332,8 +1320,8 @@ static void GetSetBooleanStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtF
std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetByteStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetByteStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int8_t values[] = { -128, -64, 0, 64, 127 };
@@ -1362,7 +1350,7 @@ static void GetSetByteStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
static void GetSetBooleanInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
- Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+ Thread* self, mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint8_t values[] = { 0, true, 2, 128, 0xFF };
@@ -1427,8 +1415,8 @@ static void GetSetByteInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtFi
#endif
}
-static void GetSetCharStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetCharStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
@@ -1455,8 +1443,8 @@ static void GetSetCharStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
#endif
}
-static void GetSetShortStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetShortStatic(Handle<mirror::ArtField>* f, Thread* self,
+ mirror::ArtMethod* referrer, StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
@@ -1549,8 +1537,8 @@ static void GetSetShortInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtF
#endif
}
-static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSet32Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
@@ -1637,8 +1625,8 @@ static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* se
}
#endif
-static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSetObjStatic(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
@@ -1702,8 +1690,8 @@ static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtFie
// TODO: Complete these tests for 32b architectures.
-static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
- mirror::ArtMethod* referrer, StubTest* test)
+static void GetSet64Static(Handle<mirror::ArtField>* f, Thread* self, mirror::ArtMethod* referrer,
+ StubTest* test)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
@@ -1724,6 +1712,7 @@ static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>
EXPECT_EQ(res, values[i]) << "Iteration " << i;
}
#else
+ UNUSED(f, self, referrer, test);
LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1760,6 +1749,7 @@ static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtFiel
EXPECT_EQ(res, static_cast<int64_t>(res2));
}
#else
+ UNUSED(obj, f, self, referrer, test);
LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
// Force-print to std::cout so it's also outside the logcat.
std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
@@ -1796,40 +1786,40 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
switch (type) {
case Primitive::Type::kPrimBoolean:
if (test_type == type) {
- GetSetBooleanStatic(&obj, &f, self, m.Get(), test);
+ GetSetBooleanStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimByte:
if (test_type == type) {
- GetSetByteStatic(&obj, &f, self, m.Get(), test);
+ GetSetByteStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimChar:
if (test_type == type) {
- GetSetCharStatic(&obj, &f, self, m.Get(), test);
+ GetSetCharStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimShort:
if (test_type == type) {
- GetSetShortStatic(&obj, &f, self, m.Get(), test);
+ GetSetShortStatic(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimInt:
if (test_type == type) {
- GetSet32Static(&obj, &f, self, m.Get(), test);
+ GetSet32Static(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimLong:
if (test_type == type) {
- GetSet64Static(&obj, &f, self, m.Get(), test);
+ GetSet64Static(&f, self, m.Get(), test);
}
break;
case Primitive::Type::kPrimNot:
// Don't try array.
if (test_type == type && f->GetTypeDescriptor()[0] != '[') {
- GetSetObjStatic(&obj, &f, self, m.Get(), test);
+ GetSetObjStatic(&f, self, m.Get(), test);
}
break;
diff --git a/runtime/arch/x86/context_x86.cc b/runtime/arch/x86/context_x86.cc
index 32eec57b50..49aa3268de 100644
--- a/runtime/arch/x86/context_x86.cc
+++ b/runtime/arch/x86/context_x86.cc
@@ -72,6 +72,16 @@ bool X86Context::SetGPR(uint32_t reg, uintptr_t value) {
}
}
+bool X86Context::GetFPR(uint32_t reg ATTRIBUTE_UNUSED, uintptr_t* val ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Floating-point registers are all caller save in X86";
+ UNREACHABLE();
+}
+
+bool X86Context::SetFPR(uint32_t reg ATTRIBUTE_UNUSED, uintptr_t value ATTRIBUTE_UNUSED) {
+ LOG(FATAL) << "Floating-point registers are all caller save in X86";
+ UNREACHABLE();
+}
+
void X86Context::DoLongJump() {
#if defined(__i386__)
// Array of GPR values, filled from the context backward for the long jump pop. We add a slot at
diff --git a/runtime/arch/x86/context_x86.h b/runtime/arch/x86/context_x86.h
index a350b2500f..01c8b82c27 100644
--- a/runtime/arch/x86/context_x86.h
+++ b/runtime/arch/x86/context_x86.h
@@ -62,15 +62,9 @@ class X86Context : public Context {
bool SetGPR(uint32_t reg, uintptr_t value) OVERRIDE;
- bool GetFPR(uint32_t reg, uintptr_t* val) OVERRIDE {
- LOG(FATAL) << "Floating-point registers are all caller save in X86";
- return false;
- }
+ bool GetFPR(uint32_t reg, uintptr_t* val) OVERRIDE;
- bool SetFPR(uint32_t reg, uintptr_t value) OVERRIDE {
- LOG(FATAL) << "Floating-point registers are all caller save in X86";
- return false;
- }
+ bool SetFPR(uint32_t reg, uintptr_t value) OVERRIDE;
void SmashCallerSaves() OVERRIDE;
void DoLongJump() OVERRIDE;
diff --git a/runtime/arch/x86/fault_handler_x86.cc b/runtime/arch/x86/fault_handler_x86.cc
index 9d74ef5ef8..ad962e2e11 100644
--- a/runtime/arch/x86/fault_handler_x86.cc
+++ b/runtime/arch/x86/fault_handler_x86.cc
@@ -231,7 +231,7 @@ static uint32_t GetInstructionSize(const uint8_t* pc) {
return pc - startpc;
}
-void FaultManager::HandleNestedSignal(int sig, siginfo_t* info, void* context) {
+void FaultManager::HandleNestedSignal(int, siginfo_t*, void* context) {
// For the Intel architectures we need to go to an assembly language
// stub. This is because the 32 bit call to longjmp is much different
// from the 64 bit ABI call and pushing things onto the stack inside this
@@ -284,7 +284,7 @@ void FaultManager::GetMethodAndReturnPcAndSp(siginfo_t* siginfo, void* context,
*out_return_pc = reinterpret_cast<uintptr_t>(pc + instr_size);
}
-bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
+bool NullPointerHandler::Action(int, siginfo_t*, void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uint8_t* pc = reinterpret_cast<uint8_t*>(uc->CTX_EIP);
uint8_t* sp = reinterpret_cast<uint8_t*>(uc->CTX_ESP);
@@ -324,7 +324,7 @@ bool NullPointerHandler::Action(int sig, siginfo_t* info, void* context) {
// The offset from fs is Thread::ThreadSuspendTriggerOffset().
// To check for a suspend check, we examine the instructions that caused
// the fault.
-bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
+bool SuspensionHandler::Action(int, siginfo_t*, void* context) {
// These are the instructions to check for. The first one is the mov eax, fs:[xxx]
// where xxx is the offset of the suspend trigger.
#if defined(__x86_64__)
@@ -398,7 +398,7 @@ bool SuspensionHandler::Action(int sig, siginfo_t* info, void* context) {
// This is done before any frame is established in the method. The return
// address for the previous method is on the stack at ESP.
-bool StackOverflowHandler::Action(int sig, siginfo_t* info, void* context) {
+bool StackOverflowHandler::Action(int, siginfo_t* info, void* context) {
struct ucontext *uc = reinterpret_cast<struct ucontext*>(context);
uintptr_t sp = static_cast<uintptr_t>(uc->CTX_ESP);
diff --git a/runtime/base/allocator.cc b/runtime/base/allocator.cc
index 994e2357af..4f2fc074fb 100644
--- a/runtime/base/allocator.cc
+++ b/runtime/base/allocator.cc
@@ -30,11 +30,11 @@ class MallocAllocator FINAL : public Allocator {
explicit MallocAllocator() {}
~MallocAllocator() {}
- virtual void* Alloc(size_t size) {
+ void* Alloc(size_t size) {
return calloc(sizeof(uint8_t), size);
}
- virtual void Free(void* p) {
+ void Free(void* p) {
free(p);
}
@@ -49,13 +49,15 @@ class NoopAllocator FINAL : public Allocator {
explicit NoopAllocator() {}
~NoopAllocator() {}
- virtual void* Alloc(size_t size) {
+ void* Alloc(size_t size) {
+ UNUSED(size);
LOG(FATAL) << "NoopAllocator::Alloc should not be called";
- return NULL;
+ UNREACHABLE();
}
- virtual void Free(void* p) {
+ void Free(void* p) {
// Noop.
+ UNUSED(p);
}
private:
diff --git a/runtime/base/allocator.h b/runtime/base/allocator.h
index 95dd407924..3ca9ebbd5c 100644
--- a/runtime/base/allocator.h
+++ b/runtime/base/allocator.h
@@ -114,6 +114,7 @@ class TrackingAllocatorImpl {
// Used internally by STL data structures.
template <class U>
TrackingAllocatorImpl(const TrackingAllocatorImpl<U, kTag>& alloc) throw() {
+ UNUSED(alloc);
}
// Used internally by STL data structures.
@@ -129,6 +130,7 @@ class TrackingAllocatorImpl {
};
pointer allocate(size_type n, const_pointer hint = 0) {
+ UNUSED(hint);
const size_t size = n * sizeof(T);
TrackedAllocators::RegisterAllocation(GetTag(), size);
return reinterpret_cast<pointer>(malloc(size));
diff --git a/runtime/base/macros.h b/runtime/base/macros.h
index febea61b8d..90cf951ca2 100644
--- a/runtime/base/macros.h
+++ b/runtime/base/macros.h
@@ -189,7 +189,19 @@ char (&ArraySizeHelper(T (&array)[N]))[N];
#define PURE __attribute__ ((__pure__))
#define WARN_UNUSED __attribute__((warn_unused_result))
-template<typename T> void UNUSED(const T&) {}
+// A deprecated function to call to create a false use of the parameter, for example:
+// int foo(int x) { UNUSED(x); return 10; }
+// to avoid compiler warnings. Going forward we prefer ATTRIBUTE_UNUSED.
+template<typename... T> void UNUSED(const T&...) {}
+
+// An attribute to place on a parameter to a function, for example:
+// int foo(int x ATTRIBUTE_UNUSED) { return 10; }
+// to avoid compiler warnings.
+#define ATTRIBUTE_UNUSED __attribute__((__unused__))
+
+// Define that a position within code is unreachable, for example:
+// int foo () { LOG(FATAL) << "Don't call me"; UNREACHABLE(); }
+// without the UNREACHABLE a return statement would be necessary.
#define UNREACHABLE __builtin_unreachable
// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
diff --git a/runtime/base/unix_file/mapped_file.cc b/runtime/base/unix_file/mapped_file.cc
deleted file mode 100644
index 77f4d020a9..0000000000
--- a/runtime/base/unix_file/mapped_file.cc
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/logging.h"
-#include "base/unix_file/mapped_file.h"
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <algorithm>
-#include <string>
-
-namespace unix_file {
-
-MappedFile::~MappedFile() {
-}
-
-int MappedFile::Close() {
- if (IsMapped()) {
- Unmap();
- }
- return FdFile::Close();
-}
-
-bool MappedFile::MapReadOnly() {
- CHECK(IsOpened());
- CHECK(!IsMapped());
- struct stat st;
- int result = TEMP_FAILURE_RETRY(fstat(Fd(), &st));
- if (result == -1) {
- PLOG(::art::WARNING) << "Failed to stat file '" << GetPath() << "'";
- return false;
- }
- file_size_ = st.st_size;
- do {
- mapped_file_ = mmap(NULL, file_size_, PROT_READ, MAP_PRIVATE, Fd(), 0);
- } while (mapped_file_ == MAP_FAILED && errno == EINTR);
- if (mapped_file_ == MAP_FAILED) {
- PLOG(::art::WARNING) << "Failed to mmap file '" << GetPath() << "' of size "
- << file_size_ << " bytes to memory";
- return false;
- }
- map_mode_ = kMapReadOnly;
- return true;
-}
-
-bool MappedFile::MapReadWrite(int64_t file_size) {
- CHECK(IsOpened());
- CHECK(!IsMapped());
-#ifdef __linux__
- int result = TEMP_FAILURE_RETRY(ftruncate64(Fd(), file_size));
-#else
- int result = TEMP_FAILURE_RETRY(ftruncate(Fd(), file_size));
-#endif
- if (result == -1) {
- PLOG(::art::ERROR) << "Failed to truncate file '" << GetPath() << "' to size " << file_size;
- return false;
- }
- file_size_ = file_size;
- do {
- mapped_file_ =
- mmap(NULL, file_size_, PROT_READ | PROT_WRITE, MAP_SHARED, Fd(), 0);
- } while (mapped_file_ == MAP_FAILED && errno == EINTR);
- if (mapped_file_ == MAP_FAILED) {
- PLOG(::art::WARNING) << "Failed to mmap file '" << GetPath() << "' of size "
- << file_size_ << " bytes to memory";
- return false;
- }
- map_mode_ = kMapReadWrite;
- return true;
-}
-
-bool MappedFile::Unmap() {
- CHECK(IsMapped());
- int result = TEMP_FAILURE_RETRY(munmap(mapped_file_, file_size_));
- if (result == -1) {
- PLOG(::art::WARNING) << "Failed unmap file '" << GetPath() << "' of size " << file_size_;
- return false;
- } else {
- mapped_file_ = NULL;
- file_size_ = -1;
- return true;
- }
-}
-
-int64_t MappedFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
- if (IsMapped()) {
- if (offset < 0) {
- errno = EINVAL;
- return -errno;
- }
- int64_t read_size = std::max(static_cast<int64_t>(0),
- std::min(byte_count, file_size_ - offset));
- if (read_size > 0) {
- memcpy(buf, data() + offset, read_size);
- }
- return read_size;
- } else {
- return FdFile::Read(buf, byte_count, offset);
- }
-}
-
-int MappedFile::SetLength(int64_t new_length) {
- CHECK(!IsMapped());
- return FdFile::SetLength(new_length);
-}
-
-int64_t MappedFile::GetLength() const {
- if (IsMapped()) {
- return file_size_;
- } else {
- return FdFile::GetLength();
- }
-}
-
-int MappedFile::Flush() {
- int rc = IsMapped() ? TEMP_FAILURE_RETRY(msync(mapped_file_, file_size_, 0)) : FdFile::Flush();
- return rc == -1 ? -errno : 0;
-}
-
-int64_t MappedFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
- if (IsMapped()) {
- CHECK_EQ(kMapReadWrite, map_mode_);
- if (offset < 0) {
- errno = EINVAL;
- return -errno;
- }
- int64_t write_size = std::max(static_cast<int64_t>(0),
- std::min(byte_count, file_size_ - offset));
- if (write_size > 0) {
- memcpy(data() + offset, buf, write_size);
- }
- return write_size;
- } else {
- return FdFile::Write(buf, byte_count, offset);
- }
-}
-
-int64_t MappedFile::size() const {
- return GetLength();
-}
-
-bool MappedFile::IsMapped() const {
- return mapped_file_ != NULL && mapped_file_ != MAP_FAILED;
-}
-
-char* MappedFile::data() const {
- CHECK(IsMapped());
- return static_cast<char*>(mapped_file_);
-}
-
-} // namespace unix_file
diff --git a/runtime/base/unix_file/mapped_file.h b/runtime/base/unix_file/mapped_file.h
deleted file mode 100644
index 73056e9764..0000000000
--- a/runtime/base/unix_file/mapped_file.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
-#define ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
-
-#include <fcntl.h>
-#include <string>
-#include "base/unix_file/fd_file.h"
-
-namespace unix_file {
-
-// Random access file which handles an mmap(2), munmap(2) pair in C++
-// RAII style. When a file is mmapped, the random access file
-// interface accesses the mmapped memory directly; otherwise, the
-// standard file I/O is used. Whenever a function fails, it returns
-// false and errno is set to the corresponding error code.
-class MappedFile : public FdFile {
- public:
- // File modes used in Open().
- enum FileMode {
-#ifdef __linux__
- kReadOnlyMode = O_RDONLY | O_LARGEFILE,
- kReadWriteMode = O_CREAT | O_RDWR | O_LARGEFILE,
-#else
- kReadOnlyMode = O_RDONLY,
- kReadWriteMode = O_CREAT | O_RDWR,
-#endif
- };
-
- MappedFile() : FdFile(), file_size_(-1), mapped_file_(NULL) {
- }
- // Creates a MappedFile using the given file descriptor. Takes ownership of
- // the file descriptor.
- explicit MappedFile(int fd) : FdFile(fd), file_size_(-1), mapped_file_(NULL) {
- }
-
- // Unmaps and closes the file if needed.
- virtual ~MappedFile();
-
- // Maps an opened file to memory in the read-only mode.
- bool MapReadOnly();
-
- // Maps an opened file to memory in the read-write mode. Before the
- // file is mapped, it is truncated to 'file_size' bytes.
- bool MapReadWrite(int64_t file_size);
-
- // Unmaps a mapped file so that, e.g., SetLength() may be invoked.
- bool Unmap();
-
- // RandomAccessFile API.
- // The functions below require that the file is open, but it doesn't
- // have to be mapped.
- virtual int Close();
- virtual int64_t Read(char* buf, int64_t byte_count, int64_t offset) const;
- // SetLength() requires that the file is not mmapped.
- virtual int SetLength(int64_t new_length);
- virtual int64_t GetLength() const;
- virtual int Flush();
- // Write() requires that, if the file is mmapped, it is mmapped in
- // the read-write mode. Writes past the end of file are discarded.
- virtual int64_t Write(const char* buf, int64_t byte_count, int64_t offset);
-
- // A convenience method equivalent to GetLength().
- int64_t size() const;
-
- // Returns true if the file has been mmapped.
- bool IsMapped() const;
-
- // Returns a pointer to the start of the memory mapping once the
- // file is successfully mapped; crashes otherwise.
- char* data() const;
-
- private:
- enum MapMode {
- kMapReadOnly = 1,
- kMapReadWrite = 2,
- };
-
- mutable int64_t file_size_; // May be updated in GetLength().
- void* mapped_file_;
- MapMode map_mode_;
-
- DISALLOW_COPY_AND_ASSIGN(MappedFile);
-};
-
-} // namespace unix_file
-
-#endif // ART_RUNTIME_BASE_UNIX_FILE_MAPPED_FILE_H_
diff --git a/runtime/base/unix_file/mapped_file_test.cc b/runtime/base/unix_file/mapped_file_test.cc
deleted file mode 100644
index 59334d45ad..0000000000
--- a/runtime/base/unix_file/mapped_file_test.cc
+++ /dev/null
@@ -1,272 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "base/unix_file/mapped_file.h"
-#include "base/logging.h"
-#include "base/unix_file/fd_file.h"
-#include "base/unix_file/random_access_file_test.h"
-#include "base/unix_file/random_access_file_utils.h"
-#include "base/unix_file/string_file.h"
-#include "gtest/gtest.h"
-
-namespace unix_file {
-
-class MappedFileTest : public RandomAccessFileTest {
- protected:
- MappedFileTest() : kContent("some content") {
- }
-
- void SetUp() {
- RandomAccessFileTest::SetUp();
-
- good_path_ = GetTmpPath("some-file.txt");
- int fd = TEMP_FAILURE_RETRY(open(good_path_.c_str(), O_CREAT|O_RDWR, 0666));
- FdFile dst(fd);
-
- StringFile src;
- src.Assign(kContent);
-
- ASSERT_TRUE(CopyFile(src, &dst));
- }
-
- void TearDown() {
- ASSERT_EQ(unlink(good_path_.c_str()), 0);
-
- RandomAccessFileTest::TearDown();
- }
-
- virtual RandomAccessFile* MakeTestFile() {
- TEMP_FAILURE_RETRY(truncate(good_path_.c_str(), 0));
- MappedFile* f = new MappedFile;
- CHECK(f->Open(good_path_, MappedFile::kReadWriteMode));
- return f;
- }
-
- const std::string kContent;
- std::string good_path_;
-};
-
-TEST_F(MappedFileTest, OkayToNotUse) {
- MappedFile file;
- EXPECT_EQ(-1, file.Fd());
- EXPECT_FALSE(file.IsOpened());
- EXPECT_FALSE(file.IsMapped());
-}
-
-TEST_F(MappedFileTest, OpenClose) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_GE(file.Fd(), 0);
- EXPECT_TRUE(file.IsOpened());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- EXPECT_EQ(0, file.Close());
- EXPECT_EQ(-1, file.Fd());
- EXPECT_FALSE(file.IsOpened());
-}
-
-TEST_F(MappedFileTest, OpenFdClose) {
- FILE* f = tmpfile();
- ASSERT_TRUE(f != NULL);
- MappedFile file(fileno(f));
- EXPECT_GE(file.Fd(), 0);
- EXPECT_TRUE(file.IsOpened());
- EXPECT_EQ(0, file.Close());
-}
-
-TEST_F(MappedFileTest, CanUseAfterMapReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.IsMapped());
- EXPECT_TRUE(file.MapReadOnly());
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- ASSERT_TRUE(file.data());
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data(), file.size()));
- EXPECT_EQ(0, file.Flush());
-}
-
-TEST_F(MappedFileTest, CanUseAfterMapReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- EXPECT_FALSE(file.IsMapped());
- EXPECT_TRUE(file.MapReadWrite(1));
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(1, file.size());
- ASSERT_TRUE(file.data());
- EXPECT_EQ(kContent[0], *file.data());
- EXPECT_EQ(0, file.Flush());
-}
-
-TEST_F(MappedFileTest, CanWriteNewData) {
- const std::string new_path(GetTmpPath("new-file.txt"));
- ASSERT_EQ(-1, unlink(new_path.c_str()));
- ASSERT_EQ(ENOENT, errno);
-
- MappedFile file;
- ASSERT_TRUE(file.Open(new_path, MappedFile::kReadWriteMode));
- EXPECT_TRUE(file.MapReadWrite(kContent.size()));
- EXPECT_TRUE(file.IsMapped());
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.size()));
- ASSERT_TRUE(file.data());
- memcpy(file.data(), kContent.c_str(), kContent.size());
- EXPECT_EQ(0, file.Close());
- EXPECT_FALSE(file.IsMapped());
-
- FdFile new_file(TEMP_FAILURE_RETRY(open(new_path.c_str(), O_RDONLY)));
- StringFile buffer;
- ASSERT_TRUE(CopyFile(new_file, &buffer));
- EXPECT_EQ(kContent, buffer.ToStringPiece());
- EXPECT_EQ(0, unlink(new_path.c_str()));
-}
-
-TEST_F(MappedFileTest, FileMustExist) {
- const std::string bad_path(GetTmpPath("does-not-exist.txt"));
- MappedFile file;
- EXPECT_FALSE(file.Open(bad_path, MappedFile::kReadOnlyMode));
- EXPECT_EQ(-1, file.Fd());
-}
-
-TEST_F(MappedFileTest, FileMustBeWritable) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.MapReadWrite(10));
-}
-
-TEST_F(MappedFileTest, RemappingAllowedUntilSuccess) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_FALSE(file.MapReadWrite(10));
- EXPECT_FALSE(file.MapReadWrite(10));
-}
-
-TEST_F(MappedFileTest, ResizeMappedFile) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_EQ(10, file.GetLength());
- EXPECT_TRUE(file.Unmap());
- EXPECT_TRUE(file.MapReadWrite(20));
- EXPECT_EQ(20, file.GetLength());
- EXPECT_EQ(0, file.Flush());
- EXPECT_TRUE(file.Unmap());
- EXPECT_EQ(0, file.Flush());
- EXPECT_EQ(0, file.SetLength(5));
- EXPECT_TRUE(file.MapReadOnly());
- EXPECT_EQ(5, file.GetLength());
-}
-
-TEST_F(MappedFileTest, ReadNotMapped) {
- TestRead();
-}
-
-TEST_F(MappedFileTest, SetLengthNotMapped) {
- TestSetLength();
-}
-
-TEST_F(MappedFileTest, WriteNotMapped) {
- TestWrite();
-}
-
-TEST_F(MappedFileTest, ReadMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- TestReadContent(kContent, &file);
-}
-
-TEST_F(MappedFileTest, ReadMappedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(kContent.size()));
- TestReadContent(kContent, &file);
-}
-
-TEST_F(MappedFileTest, WriteMappedReadWrite) {
- TEMP_FAILURE_RETRY(unlink(good_path_.c_str()));
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(kContent.size()));
-
- // Can't write to a negative offset.
- EXPECT_EQ(-EINVAL, file.Write(kContent.c_str(), 0, -123));
-
- // A zero-length write is a no-op.
- EXPECT_EQ(0, file.Write(kContent.c_str(), 0, 0));
- // But the file size is as given when mapped.
- EXPECT_EQ(kContent.size(), static_cast<uint64_t>(file.GetLength()));
-
- // Data written past the end are discarded.
- EXPECT_EQ(kContent.size() - 1,
- static_cast<uint64_t>(file.Write(kContent.c_str(), kContent.size(), 1)));
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data() + 1, kContent.size() - 1));
-
- // Data can be overwritten.
- EXPECT_EQ(kContent.size(),
- static_cast<uint64_t>(file.Write(kContent.c_str(), kContent.size(), 0)));
- EXPECT_EQ(0, memcmp(kContent.c_str(), file.data(), kContent.size()));
-}
-
-#if 0 // death tests don't work on android yet
-
-class MappedFileDeathTest : public MappedFileTest {};
-
-TEST_F(MappedFileDeathTest, MustMapBeforeUse) {
- MappedFile file;
- EXPECT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- EXPECT_DEATH(file.data(), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, RemappingNotAllowedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- EXPECT_DEATH(file.MapReadOnly(), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, RemappingNotAllowedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_DEATH(file.MapReadWrite(10), "mapped_");
-}
-
-TEST_F(MappedFileDeathTest, SetLengthMappedReadWrite) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadWriteMode));
- ASSERT_TRUE(file.MapReadWrite(10));
- EXPECT_EQ(10, file.GetLength());
- EXPECT_DEATH(file.SetLength(0), ".*");
-}
-
-TEST_F(MappedFileDeathTest, SetLengthMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- EXPECT_EQ(kContent.size(), file.GetLength());
- EXPECT_DEATH(file.SetLength(0), ".*");
-}
-
-TEST_F(MappedFileDeathTest, WriteMappedReadOnly) {
- MappedFile file;
- ASSERT_TRUE(file.Open(good_path_, MappedFile::kReadOnlyMode));
- ASSERT_TRUE(file.MapReadOnly());
- char buf[10];
- EXPECT_DEATH(file.Write(buf, 0, 0), ".*");
-}
-
-#endif
-
-} // namespace unix_file
diff --git a/runtime/base/unix_file/null_file.cc b/runtime/base/unix_file/null_file.cc
index 050decb6db..322c25aa72 100644
--- a/runtime/base/unix_file/null_file.cc
+++ b/runtime/base/unix_file/null_file.cc
@@ -33,7 +33,8 @@ int NullFile::Flush() {
return 0;
}
-int64_t NullFile::Read(char* buf, int64_t byte_count, int64_t offset) const {
+int64_t NullFile::Read(char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
+ int64_t offset) const {
if (offset < 0) {
return -EINVAL;
}
@@ -51,7 +52,8 @@ int64_t NullFile::GetLength() const {
return 0;
}
-int64_t NullFile::Write(const char* buf, int64_t byte_count, int64_t offset) {
+int64_t NullFile::Write(const char* buf ATTRIBUTE_UNUSED, int64_t byte_count ATTRIBUTE_UNUSED,
+ int64_t offset) {
if (offset < 0) {
return -EINVAL;
}
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index b2df09136d..ef5ccb6af9 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -512,7 +512,7 @@ class ScopedCheck {
return true;
}
- bool CheckReferenceKind(IndirectRefKind expected_kind, JavaVMExt* vm, Thread* self, jobject obj) {
+ bool CheckReferenceKind(IndirectRefKind expected_kind, Thread* self, jobject obj) {
IndirectRefKind found_kind;
if (expected_kind == kLocal) {
found_kind = GetIndirectRefKind(obj);
@@ -2398,7 +2398,7 @@ class CheckJNI {
}
if (sc.Check(soa, false, "L", &result)) {
DCHECK_EQ(IsSameObject(env, obj, result.L), JNI_TRUE);
- DCHECK(sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), result.L));
+ DCHECK(sc.CheckReferenceKind(kind, soa.Self(), result.L));
return result.L;
}
}
@@ -2410,7 +2410,7 @@ class CheckJNI {
ScopedCheck sc(kFlag_ExcepOkay, function_name);
JniValueType args[2] = {{.E = env}, {.L = obj}};
sc.Check(soa, true, "EL", args);
- if (sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), obj)) {
+ if (sc.CheckReferenceKind(kind, soa.Self(), obj)) {
JniValueType result;
switch (kind) {
case kGlobal:
@@ -3116,7 +3116,7 @@ class CheckJNI {
static jarray NewPrimitiveArray(const char* function_name, JNIEnv* env, jsize length,
Primitive::Type type) {
ScopedObjectAccess soa(env);
- ScopedCheck sc(kFlag_Default, __FUNCTION__);
+ ScopedCheck sc(kFlag_Default, function_name);
JniValueType args[2] = {{.E = env}, {.z = length}};
if (sc.Check(soa, true, "Ez", args)) {
JniValueType result;
diff --git a/runtime/class_linker-inl.h b/runtime/class_linker-inl.h
index 875efbb2a0..ead3fa5139 100644
--- a/runtime/class_linker-inl.h
+++ b/runtime/class_linker-inl.h
@@ -105,8 +105,7 @@ inline mirror::Class* ClassLinker::ResolveType(uint16_t type_idx, mirror::ArtFie
}
inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
- mirror::ArtMethod* referrer,
- InvokeType type) {
+ mirror::ArtMethod* referrer) {
mirror::ArtMethod* resolved_method = referrer->GetDexCacheResolvedMethod(method_idx);
if (resolved_method == nullptr || resolved_method->IsRuntimeMethod()) {
return nullptr;
@@ -117,7 +116,7 @@ inline mirror::ArtMethod* ClassLinker::GetResolvedMethod(uint32_t method_idx,
inline mirror::ArtMethod* ClassLinker::ResolveMethod(Thread* self, uint32_t method_idx,
mirror::ArtMethod** referrer,
InvokeType type) {
- mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer, type);
+ mirror::ArtMethod* resolved_method = GetResolvedMethod(method_idx, *referrer);
if (LIKELY(resolved_method != nullptr)) {
return resolved_method;
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index d99e373ed2..f5ac35074f 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -2673,8 +2673,7 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
const OatFile::OatClass* oat_class,
- const DexFile& dex_file, uint32_t dex_method_index,
- uint32_t method_index) {
+ uint32_t class_def_method_index) {
Runtime* runtime = Runtime::Current();
if (runtime->IsCompiler()) {
// The following code only applies to a non-compiler runtime.
@@ -2686,7 +2685,7 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method,
if (oat_class != nullptr) {
// Every kind of method should at least get an invoke stub from the oat_method.
// non-abstract methods also get their code pointers.
- const OatFile::OatMethod oat_method = oat_class->GetOatMethod(method_index);
+ const OatFile::OatMethod oat_method = oat_class->GetOatMethod(class_def_method_index);
oat_method.LinkMethod(method.Get());
}
@@ -2788,18 +2787,17 @@ void ClassLinker::LoadClass(Thread* self, const DexFile& dex_file,
OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
&has_oat_class);
if (has_oat_class) {
- LoadClassMembers(self, dex_file, class_data, klass, class_loader, &oat_class);
+ LoadClassMembers(self, dex_file, class_data, klass, &oat_class);
}
}
if (!has_oat_class) {
- LoadClassMembers(self, dex_file, class_data, klass, class_loader, nullptr);
+ LoadClassMembers(self, dex_file, class_data, klass, nullptr);
}
}
void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
const uint8_t* class_data,
Handle<mirror::Class> klass,
- mirror::ClassLoader* class_loader,
const OatFile::OatClass* oat_class) {
// Load fields.
ClassDataItemIterator it(dex_file, class_data);
@@ -2876,7 +2874,7 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
return;
}
klass->SetDirectMethod(i, method.Get());
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
+ LinkCode(method, oat_class, class_def_method_index);
uint32_t it_method_index = it.GetMemberIndex();
if (last_dex_method_index == it_method_index) {
// duplicate case
@@ -2898,7 +2896,7 @@ void ClassLinker::LoadClassMembers(Thread* self, const DexFile& dex_file,
}
klass->SetVirtualMethod(i, method.Get());
DCHECK_EQ(class_def_method_index, it.NumDirectMethods() + i);
- LinkCode(method, oat_class, dex_file, it.GetMemberIndex(), class_def_method_index);
+ LinkCode(method, oat_class, class_def_method_index);
class_def_method_index++;
}
DCHECK(!it.HasNext());
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 8034d620aa..a1cae4d0ab 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -222,8 +222,7 @@ class ClassLinker {
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer,
- InvokeType type)
+ mirror::ArtMethod* GetResolvedMethod(uint32_t method_idx, mirror::ArtMethod* referrer)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::ArtMethod* ResolveMethod(Thread* self, uint32_t method_idx, mirror::ArtMethod** referrer,
InvokeType type)
@@ -506,8 +505,7 @@ class ClassLinker {
Handle<mirror::Class> klass, mirror::ClassLoader* class_loader)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadClassMembers(Thread* self, const DexFile& dex_file, const uint8_t* class_data,
- Handle<mirror::Class> klass, mirror::ClassLoader* class_loader,
- const OatFile::OatClass* oat_class)
+ Handle<mirror::Class> klass, const OatFile::OatClass* oat_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
@@ -581,7 +579,7 @@ class ClassLinker {
bool LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_static, size_t* class_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
- const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
+ uint32_t class_def_method_index)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 1ca6eb3a15..bd0dbaaf24 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -85,7 +85,7 @@ class CommonRuntimeTest : public testing::Test {
virtual void SetUp();
// Allow subclases such as CommonCompilerTest to add extra options.
- virtual void SetUpRuntimeOptions(RuntimeOptions* options) {}
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options ATTRIBUTE_UNUSED) {}
void ClearDirectory(const char* dirpath);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 5409d54f0a..a9663bba51 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -231,7 +231,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
virtual ~DebugInstrumentationListener() {}
void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc)
+ uint32_t dex_pc ATTRIBUTE_UNUSED)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (method->IsNative()) {
// TODO: post location events is a suspension point and native method entry stubs aren't.
@@ -254,6 +254,7 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
uint32_t dex_pc)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// We're not recorded to listen to this kind of event, so complain.
+ UNUSED(thread, this_object, method, dex_pc);
LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
<< " " << dex_pc;
}
@@ -267,16 +268,18 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
uint32_t dex_pc, mirror::ArtField* field)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread);
Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
}
- void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
- uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
+ void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
+ mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
+ const JValue& field_value)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
- void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
+ void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 48e457fc6f..488ba7f728 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -189,6 +189,7 @@ class DeoptimizationRequest {
// Method for selective deoptimization.
jmethodID method_;
};
+std::ostream& operator<<(std::ostream& os, const DeoptimizationRequest::Kind& rhs);
class Dbg {
public:
@@ -493,7 +494,7 @@ class Dbg {
/*
* Debugger notification
*/
- enum {
+ enum EventFlag {
kBreakpoint = 0x01,
kSingleStep = 0x02,
kMethodEntry = 0x04,
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 10fe6bf9af..a07a5b6c96 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -1252,7 +1252,7 @@ class EncodedStaticFieldValueIterator {
template<bool kTransactionActive>
void ReadValueToField(Handle<mirror::ArtField> field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool HasNext() { return pos_ < array_size_; }
+ bool HasNext() const { return pos_ < array_size_; }
void Next();
diff --git a/runtime/dex_instruction.h b/runtime/dex_instruction.h
index 72802e4911..af5d9d00d1 100644
--- a/runtime/dex_instruction.h
+++ b/runtime/dex_instruction.h
@@ -35,7 +35,7 @@ enum {
class Instruction {
public:
// NOP-encoded switch-statement signatures.
- enum {
+ enum Signatures {
kPackedSwitchSignature = 0x0100,
kSparseSwitchSignature = 0x0200,
kArrayDataSignature = 0x0300,
@@ -79,10 +79,7 @@ class Instruction {
DISALLOW_COPY_AND_ASSIGN(ArrayDataPayload);
};
- // TODO: the code layout below is deliberate to avoid this enum being picked up by
- // generate-operator-out.py.
- enum Code
- { // NOLINT(whitespace/braces)
+ enum Code { // private marker to avoid generate-operator-out.py from processing.
#define INSTRUCTION_ENUM(opcode, cname, p, f, r, i, a, v) cname = opcode,
#include "dex_instruction_list.h"
DEX_INSTRUCTION_LIST(INSTRUCTION_ENUM)
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index f76da8edaa..1a8ca027e5 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -35,7 +35,6 @@
namespace art {
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <const bool kAccessCheck>
ALWAYS_INLINE
static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
@@ -90,7 +89,6 @@ static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
return klass;
}
-// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE
static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self,
@@ -120,7 +118,6 @@ static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class*
// cannot be resolved, throw an error. If it can, use it to create an instance.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
@@ -140,11 +137,9 @@ static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
}
// Given the context of a calling Method and a resolved class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
@@ -163,11 +158,9 @@ static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
}
// Given the context of a calling Method and an initialized class, create an instance.
-// TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
template <bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type) {
DCHECK(klass != nullptr);
@@ -176,7 +169,6 @@ static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klas
}
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck>
ALWAYS_INLINE
static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
@@ -213,7 +205,6 @@ static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
// it cannot be resolved, throw an error. If it can, use it to create an array.
// When verification/compiler hasn't been able to verify access, optionally perform an access
// check.
-// TODO: Fix no thread safety analysis when GCC can handle template specialization.
template <bool kAccessCheck, bool kInstrumented>
ALWAYS_INLINE
static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
@@ -362,7 +353,7 @@ static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
mirror::Object** this_object,
mirror::ArtMethod** referrer, Thread* self) {
ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
- mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer, type);
+ mirror::ArtMethod* resolved_method = class_linker->GetResolvedMethod(method_idx, *referrer);
if (resolved_method == nullptr) {
StackHandleScope<1> hs(self);
mirror::Object* null_this = nullptr;
diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h
index c46d8871d5..311cafa3b9 100644
--- a/runtime/entrypoints/entrypoint_utils.h
+++ b/runtime/entrypoints/entrypoint_utils.h
@@ -47,7 +47,6 @@ ALWAYS_INLINE static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-// TODO: Fix no thread safety analysis when annotalysis is smarter.
ALWAYS_INLINE static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
Thread* self, bool* slow_path)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -66,7 +65,6 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCode(uint32_t type_id
// Given the context of a calling Method and a resolved class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -74,7 +72,6 @@ ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeResolved(mirror::
// Given the context of a calling Method and an initialized class, create an instance.
template <bool kInstrumented>
ALWAYS_INLINE static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
- mirror::ArtMethod* method,
Thread* self,
gc::AllocatorType allocator_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
index f0ad6de3a1..afe769e5ec 100644
--- a/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_fillarray_entrypoints.cc
@@ -25,6 +25,7 @@ extern "C" void art_portable_fill_array_data_from_code(mirror::ArtMethod* method
mirror::Array* array,
uint32_t payload_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(dex_pc);
const DexFile::CodeItem* code_item = method->GetCodeItem();
const Instruction::ArrayDataPayload* payload =
reinterpret_cast<const Instruction::ArrayDataPayload*>(code_item->insns_ + payload_offset);
diff --git a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
index 61d66ba461..e7975f8923 100644
--- a/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/portable/portable_trampoline_entrypoints.cc
@@ -121,6 +121,7 @@ class PortableArgumentVisitor {
private:
static size_t ComputeArgsInRegs(MethodHelper& mh) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#if (defined(__i386__))
+ UNUSED(mh);
return 0;
#else
size_t args_in_regs = 0;
diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
index 7dbfdd5679..cccf8f3596 100644
--- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc
@@ -29,8 +29,7 @@ static constexpr bool kUseTlabFastPath = true;
#define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
- uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
- StackReference<mirror::ArtMethod>* sp) \
+ uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
@@ -59,6 +58,7 @@ extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
if (LIKELY(klass->IsInitialized())) { \
@@ -80,11 +80,12 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
} \
} \
} \
- return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
+ return AllocObjectFromCodeResolved<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
mirror::Class* klass, mirror::ArtMethod* method, Thread* self) \
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+ UNUSED(method); \
ScopedQuickEntrypointChecks sqec(self); \
if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
size_t byte_count = klass->GetObjectSize(); \
@@ -104,7 +105,7 @@ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
return obj; \
} \
} \
- return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
+ return AllocObjectFromCodeInitialized<instrumented_bool>(klass, self, allocator_type); \
} \
extern "C" mirror::Object* artAllocObjectFromCodeWithAccessCheck##suffix##suffix2( \
uint32_t type_idx, mirror::ArtMethod* method, Thread* self) \
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index e0aab75ddf..5cb51789cf 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -229,7 +229,7 @@ class QuickArgumentVisitor {
gpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Gpr1Offset),
fpr_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_Fpr1Offset),
stack_args_(reinterpret_cast<uint8_t*>(sp) + kQuickCalleeSaveFrame_RefAndArgs_FrameSize
- + StackArgumentStartFromShorty(is_static, shorty, shorty_len)),
+ + sizeof(StackReference<mirror::ArtMethod>)), // Skip StackReference<ArtMethod>.
gpr_index_(0), fpr_index_(0), fpr_double_index_(0), stack_index_(0),
cur_type_(Primitive::kPrimVoid), is_split_long_or_double_(false) {
COMPILE_ASSERT(kQuickSoftFloatAbi == (kNumQuickFprArgs == 0), knum_of_quick_fpr_arg_unexpected);
@@ -409,13 +409,6 @@ class QuickArgumentVisitor {
}
}
- private:
- static size_t StackArgumentStartFromShorty(bool is_static, const char* shorty,
- uint32_t shorty_len) {
- // 'stack_args_' points to the first method's argument
- return sizeof(StackReference<mirror::ArtMethod>); // Skip StackReference<ArtMethod>.
- }
-
protected:
const bool is_static_;
const char* const shorty_;
@@ -1234,7 +1227,9 @@ class ComputeNativeCallFrameSize {
}
virtual void WalkHeader(BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize>* sm)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {}
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(sm);
+ }
void Walk(const char* shorty, uint32_t shorty_len) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
BuildNativeCallFrameStateMachine<ComputeNativeCallFrameSize> sm(this);
@@ -1366,8 +1361,7 @@ class ComputeGenericJniFrameSize FINAL : public ComputeNativeCallFrameSize {
// WARNING: After this, *sp won't be pointing to the method anymore!
uint8_t* ComputeLayout(Thread* self, StackReference<mirror::ArtMethod>** m,
- bool is_static, const char* shorty, uint32_t shorty_len,
- HandleScope** handle_scope,
+ const char* shorty, uint32_t shorty_len, HandleScope** handle_scope,
uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Walk(shorty, shorty_len);
@@ -1441,9 +1435,9 @@ class FillNativeCall {
cur_stack_arg_++;
}
- virtual uintptr_t PushHandle(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ virtual uintptr_t PushHandle(mirror::Object*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
LOG(FATAL) << "(Non-JNI) Native call does not use handles.";
- return 0U;
+ UNREACHABLE();
}
private:
@@ -1464,7 +1458,7 @@ class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
uintptr_t* start_gpr_reg;
uint32_t* start_fpr_reg;
uintptr_t* start_stack_arg;
- bottom_of_used_area_ = fsc.ComputeLayout(self, sp, is_static, shorty, shorty_len,
+ bottom_of_used_area_ = fsc.ComputeLayout(self, sp, shorty, shorty_len,
&handle_scope_,
&start_stack_arg,
&start_gpr_reg, &start_fpr_reg);
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
index a9af75401e..85a0b99ab1 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints_test.cc
@@ -34,7 +34,7 @@ class QuickTrampolineEntrypointsTest : public CommonRuntimeTest {
t->TransitionFromSuspendedToRunnable(); // So we can create callee-save methods.
r->SetInstructionSet(isa);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(type);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, type);
t->TransitionFromRunnableToSuspended(ThreadState::kNative); // So we can shut down.
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 1365cd4bef..ee9b221808 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -165,7 +165,7 @@ TEST_F(ExceptionTest, StackTraceElement) {
std::vector<uintptr_t> fake_stack;
Runtime* r = Runtime::Current();
r->SetInstructionSet(kRuntimeISA);
- mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod(Runtime::kSaveAll);
+ mirror::ArtMethod* save_method = r->CreateCalleeSaveMethod();
r->SetCalleeSaveMethod(save_method, Runtime::kSaveAll);
QuickMethodFrameInfo frame_info = save_method->GetQuickFrameInfo();
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 4ae929b0cc..c4736847b9 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -383,7 +383,7 @@ JavaStackTraceHandler::JavaStackTraceHandler(FaultManager* manager) : FaultHandl
bool JavaStackTraceHandler::Action(int sig, siginfo_t* siginfo, void* context) {
// Make sure that we are in the generated code, but we may not have a dex pc.
-
+ UNUSED(sig);
#ifdef TEST_NESTED_SIGNAL
bool in_generated_code = true;
#else
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index 753b42deb5..0a15e9eb90 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -46,6 +46,7 @@ class ModUnionClearCardSetVisitor {
}
inline void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+ UNUSED(new_value);
if (expected_value == CardTable::kCardDirty) {
cleared_cards_->insert(card);
}
@@ -62,6 +63,7 @@ class ModUnionClearCardVisitor {
}
void operator()(uint8_t* card, uint8_t expected_card, uint8_t new_card) const {
+ UNUSED(new_card);
if (expected_card == CardTable::kCardDirty) {
cleared_cards_->push_back(card);
}
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index d43dc0a39a..b16a146ea9 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -43,6 +43,7 @@ class RememberedSetCardVisitor {
: dirty_cards_(dirty_cards) {}
void operator()(uint8_t* card, uint8_t expected_value, uint8_t new_value) const {
+ UNUSED(new_value);
if (expected_value == CardTable::kCardDirty) {
dirty_cards_->insert(card);
}
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 40856fc9ef..850325a33d 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -91,7 +91,7 @@ class SimpleCounter {
public:
explicit SimpleCounter(size_t* counter) : count_(counter) {}
- void operator()(mirror::Object* obj) const {
+ void operator()(mirror::Object* obj ATTRIBUTE_UNUSED) const {
(*count_)++;
}
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index fbeba7f447..acff52d50d 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -43,7 +43,8 @@ static void art_heap_corruption(const char* function) {
}
static void art_heap_usage_error(const char* function, void* p) {
- LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p << " not expected";
+ LOG(::art::FATAL) << "Incorrect use of function '" << function << "' argument " << p
+ << " not expected";
}
#include "globals.h"
@@ -70,7 +71,9 @@ extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_byte
}
}
-extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocBytesAllocatedCallback(void* start ATTRIBUTE_UNUSED,
+ void* end ATTRIBUTE_UNUSED, size_t used_bytes,
+ void* arg) {
if (used_bytes == 0) {
return;
}
@@ -78,7 +81,10 @@ extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t us
*bytes_allocated += used_bytes + sizeof(size_t);
}
-extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes,
+ void* arg) {
+ UNUSED(start);
+ UNUSED(end);
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index fa531a7c60..f5e2fed9d7 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -1851,7 +1851,8 @@ void RosAlloc::Initialize() {
dedicated_full_run_->SetIsThreadLocal(true);
}
-void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes, void* arg) {
if (used_bytes == 0) {
return;
}
@@ -1859,7 +1860,8 @@ void RosAlloc::BytesAllocatedCallback(void* start, void* end, size_t used_bytes,
*bytes_allocated += used_bytes;
}
-void RosAlloc::ObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg) {
+void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED,
+ size_t used_bytes, void* arg) {
if (used_bytes == 0) {
return;
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index ad7f901181..a2f8342fd8 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -105,6 +105,9 @@ class RosAlloc {
rosalloc->ReleasePageRange(start, start + byte_size);
}
}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FreePageRun);
};
// Represents a run of memory slots of the same size.
@@ -256,6 +259,8 @@ class RosAlloc {
size_t MarkFreeBitMapShared(void* ptr, uint32_t* free_bit_map_base, const char* caller_name);
// Turns the bit map into a string for debugging.
static std::string BitMapToStr(uint32_t* bit_map_base, size_t num_vec);
+
+ // TODO: DISALLOW_COPY_AND_ASSIGN(Run);
};
// The magic number for a run.
@@ -446,7 +451,7 @@ class RosAlloc {
// Bracket lock names (since locks only have char* names).
std::string size_bracket_lock_names_[kNumOfSizeBrackets];
// The types of page map entries.
- enum {
+ enum PageMapKind {
kPageMapReleased = 0, // Zero and released back to the OS.
kPageMapEmpty, // Zero but probably dirty.
kPageMapRun, // The beginning of a run.
@@ -526,11 +531,15 @@ class RosAlloc {
// Release a range of pages.
size_t ReleasePageRange(uint8_t* start, uint8_t* end) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Dumps the page map for debugging.
+ std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
public:
RosAlloc(void* base, size_t capacity, size_t max_capacity,
PageReleaseMode page_release_mode,
size_t page_release_size_threshold = kDefaultPageReleaseSizeThreshold);
~RosAlloc();
+
// If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
// If used, this may cause race conditions if multiple threads are allocating at the same time.
template<bool kThreadSafe = true>
@@ -540,6 +549,7 @@ class RosAlloc {
LOCKS_EXCLUDED(bulk_free_lock_);
size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
LOCKS_EXCLUDED(bulk_free_lock_);
+
// Returns the size of the allocated slot for a given allocated memory chunk.
size_t UsableSize(void* ptr);
// Returns the size of the allocated slot for a given size.
@@ -557,6 +567,7 @@ class RosAlloc {
void InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg),
void* arg)
LOCKS_EXCLUDED(lock_);
+
// Release empty pages.
size_t ReleasePages() LOCKS_EXCLUDED(lock_);
// Returns the current footprint.
@@ -565,6 +576,7 @@ class RosAlloc {
size_t FootprintLimit() LOCKS_EXCLUDED(lock_);
// Update the current capacity.
void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
+
// Releases the thread-local runs assigned to the given thread back to the common set of runs.
void RevokeThreadLocalRuns(Thread* thread);
// Releases the thread-local runs assigned to all the threads back to the common set of runs.
@@ -573,8 +585,7 @@ class RosAlloc {
void AssertThreadLocalRunsAreRevoked(Thread* thread);
// Assert all the thread local runs are revoked.
void AssertAllThreadLocalRunsAreRevoked() LOCKS_EXCLUDED(Locks::thread_list_lock_);
- // Dumps the page map for debugging.
- std::string DumpPageMap() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
static Run* GetDedicatedFullRun() {
return dedicated_full_run_;
}
@@ -597,7 +608,13 @@ class RosAlloc {
void Verify() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes);
+
+ private:
+ friend std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
+
+ DISALLOW_COPY_AND_ASSIGN(RosAlloc);
};
+std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs);
} // namespace allocator
} // namespace gc
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
index 938b0f1d2a..c6ebc7384c 100644
--- a/runtime/gc/allocator_type.h
+++ b/runtime/gc/allocator_type.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
#define ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
+#include <ostream>
+
namespace art {
namespace gc {
@@ -29,6 +31,7 @@ enum AllocatorType {
kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
};
+std::ostream& operator<<(std::ostream& os, const AllocatorType& rhs);
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index ce7c75ab64..ee5a785a1a 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -29,7 +29,9 @@ class ConcurrentCopying : public GarbageCollector {
const std::string& name_prefix = "")
: GarbageCollector(heap,
name_prefix + (name_prefix.empty() ? "" : " ") +
- "concurrent copying + mark sweep") {}
+ "concurrent copying + mark sweep") {
+ UNUSED(generational);
+ }
~ConcurrentCopying() {}
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index ad3bb11d80..e3966e3081 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -658,6 +658,7 @@ class MarkStackTask : public Task {
// Scans all of the objects
virtual void Run(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+ UNUSED(self);
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc
index 4ed6abc386..5be3db712b 100644
--- a/runtime/gc/collector/sticky_mark_sweep.cc
+++ b/runtime/gc/collector/sticky_mark_sweep.cc
@@ -58,6 +58,7 @@ void StickyMarkSweep::MarkReachableObjects() {
}
void StickyMarkSweep::Sweep(bool swap_bitmaps) {
+ UNUSED(swap_bitmaps);
SweepArray(GetHeap()->GetLiveStack(), false);
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 8e080d1cdd..9fd9a2b377 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -265,14 +265,13 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
// Attempt to create 2 mem maps at or after the requested begin.
main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
- PROT_READ | PROT_WRITE, &error_str));
+ &error_str));
CHECK(main_mem_map_1.get() != nullptr) << error_str;
if (support_homogeneous_space_compaction ||
background_collector_type_ == kCollectorTypeSS ||
foreground_collector_type_ == kCollectorTypeSS) {
main_mem_map_2.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[1], main_mem_map_1->End(),
- capacity_, PROT_READ | PROT_WRITE,
- &error_str));
+ capacity_, &error_str));
CHECK(main_mem_map_2.get() != nullptr) << error_str;
}
// Create the non moving space first so that bitmaps don't take up the address range.
@@ -435,8 +434,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
}
}
-MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin, size_t capacity,
- int prot_flags, std::string* out_error_str) {
+MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
+ size_t capacity, std::string* out_error_str) {
while (true) {
MemMap* map = MemMap::MapAnonymous(name, request_begin, capacity,
PROT_READ | PROT_WRITE, true, out_error_str);
@@ -887,7 +886,7 @@ space::Space* Heap::FindSpaceFromObject(const mirror::Object* obj, bool fail_ok)
if (result != NULL) {
return result;
}
- return FindDiscontinuousSpaceFromObject(obj, true);
+ return FindDiscontinuousSpaceFromObject(obj, fail_ok);
}
space::ImageSpace* Heap::GetImageSpace() const {
@@ -1832,6 +1831,7 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const {
// Don't sweep any spaces since we probably blasted the internal accounting of the free list
// allocator.
+ UNUSED(space);
return false;
}
@@ -2239,6 +2239,7 @@ class VerifyReferenceVisitor {
void operator()(mirror::Class* klass, mirror::Reference* ref) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(klass);
if (verify_referent_) {
VerifyReference(ref, ref->GetReferent(), mirror::Reference::ReferentOffset());
}
@@ -2583,6 +2584,7 @@ bool Heap::VerifyMissingCardMarks() {
}
void Heap::SwapStacks(Thread* self) {
+ UNUSED(self);
if (kUseThreadLocalAllocationStack) {
live_stack_->AssertAllZero();
}
@@ -2711,6 +2713,7 @@ void Heap::PreGcVerification(collector::GarbageCollector* gc) {
}
void Heap::PrePauseRosAllocVerification(collector::GarbageCollector* gc) {
+ UNUSED(gc);
// TODO: Add a new runtime option for this?
if (verify_pre_gc_rosalloc_) {
RosAllocVerification(current_gc_iteration_.GetTimings(), "PreGcRosAllocVerification");
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 7b891a61ac..cf7352e75c 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -622,8 +622,7 @@ class Heap {
// Create a mem map with a preferred base address.
static MemMap* MapAnonymousPreferredAddress(const char* name, uint8_t* request_begin,
- size_t capacity, int prot_flags,
- std::string* out_error_str);
+ size_t capacity, std::string* out_error_str);
bool SupportHSpaceCompaction() const {
// Returns true if we can do hspace compaction
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index d2d95b4c7b..445c720d4c 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -314,6 +314,7 @@ static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void*
}
void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) {
+ UNUSED(failed_alloc_bytes);
Thread* self = Thread::Current();
size_t max_contiguous_allocation = 0;
// To allow the Walk/InspectAll() to exclusively-lock the mutator
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 9434bfe91c..c0c6444306 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -159,7 +159,11 @@ size_t LargeObjectMapSpace::AllocationSize(mirror::Object* obj, size_t* usable_s
MutexLock mu(Thread::Current(), lock_);
auto found = mem_maps_.find(obj);
CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
- return found->second->BaseSize();
+ size_t alloc_size = found->second->BaseSize();
+ if (usable_size != nullptr) {
+ *usable_size = alloc_size;
+ }
+ return alloc_size;
}
size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index eb6fe9c32b..bc870a674a 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -44,6 +44,7 @@ class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterRecentFree(mirror::Object* ptr) OVERRIDE {
+ UNUSED(ptr);
}
ValgrindMallocSpace(const std::string& name, MemMap* mem_map, AllocatorType allocator,
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index 9de0548561..a868e6831d 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -32,6 +32,7 @@ class CountObjectsAllocated {
: objects_allocated_(objects_allocated) {}
void operator()(mirror::Object* obj) const {
+ UNUSED(obj);
++*objects_allocated_;
}
@@ -76,30 +77,29 @@ void ZygoteSpace::Dump(std::ostream& os) const {
<< ",name=\"" << GetName() << "\"]";
}
-mirror::Object* ZygoteSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+mirror::Object* ZygoteSpace::Alloc(Thread*, size_t, size_t*, size_t*) {
UNIMPLEMENTED(FATAL);
- return nullptr;
+ UNREACHABLE();
}
-size_t ZygoteSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
+size_t ZygoteSpace::AllocationSize(mirror::Object*, size_t*) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-size_t ZygoteSpace::Free(Thread* self, mirror::Object* ptr) {
+size_t ZygoteSpace::Free(Thread*, mirror::Object*) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-size_t ZygoteSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
+size_t ZygoteSpace::FreeList(Thread*, size_t, mirror::Object**) {
UNIMPLEMENTED(FATAL);
- return 0;
+ UNREACHABLE();
}
-void ZygoteSpace::LogFragmentationAllocFailure(std::ostream& /*os*/,
- size_t /*failed_alloc_bytes*/) {
+void ZygoteSpace::LogFragmentationAllocFailure(std::ostream&, size_t) {
UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) {
diff --git a/runtime/handle_scope.h b/runtime/handle_scope.h
index c55835dd00..2c4f0f915d 100644
--- a/runtime/handle_scope.h
+++ b/runtime/handle_scope.h
@@ -100,7 +100,7 @@ class PACKED(4) HandleScope {
}
// Offset of link within HandleScope, used by generated code.
- static size_t LinkOffset(size_t pointer_size) {
+ static size_t LinkOffset(size_t pointer_size ATTRIBUTE_UNUSED) {
return 0;
}
diff --git a/runtime/instruction_set.cc b/runtime/instruction_set.cc
index 0ca32fe705..e165a75659 100644
--- a/runtime/instruction_set.cc
+++ b/runtime/instruction_set.cc
@@ -431,9 +431,8 @@ const ArmInstructionSetFeatures* ArmInstructionSetFeatures::FromHwcap() {
// A signal handler called by a fault for an illegal instruction. We record the fact in r0
// and then increment the PC in the signal context to return to the next instruction. We know the
// instruction is an sdiv (4 bytes long).
-static void bad_divide_inst_handle(int signo, siginfo_t* si, void* data) {
- UNUSED(signo);
- UNUSED(si);
+static void bad_divide_inst_handle(int signo ATTRIBUTE_UNUSED, siginfo_t* si ATTRIBUTE_UNUSED,
+ void* data) {
#if defined(__arm__)
struct ucontext *uc = (struct ucontext *)data;
struct sigcontext *sc = &uc->uc_mcontext;
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index fc3da3635b..160e8c36a3 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -595,6 +595,7 @@ void Instrumentation::ConfigureStubs(bool require_entry_exit_stubs, bool require
}
static void ResetQuickAllocEntryPointsForThread(Thread* thread, void* arg) {
+ UNUSED(arg);
thread->ResetQuickAllocEntryPointsForThread();
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index 3017bf6a38..646c7ae9c5 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -103,13 +103,13 @@ struct InstrumentationListener {
class Instrumentation {
public:
enum InstrumentationEvent {
- kMethodEntered = 1 << 0,
- kMethodExited = 1 << 1,
- kMethodUnwind = 1 << 2,
- kDexPcMoved = 1 << 3,
- kFieldRead = 1 << 4,
- kFieldWritten = 1 << 5,
- kExceptionCaught = 1 << 6,
+ kMethodEntered = 1, // 1 << 0
+ kMethodExited = 2, // 1 << 1
+ kMethodUnwind = 4, // 1 << 2
+ kDexPcMoved = 8, // 1 << 3
+ kFieldRead = 16, // 1 << 4,
+ kFieldWritten = 32, // 1 << 5
+ kExceptionCaught = 64, // 1 << 6
};
Instrumentation();
@@ -464,6 +464,7 @@ class Instrumentation {
DISALLOW_COPY_AND_ASSIGN(Instrumentation);
};
+std::ostream& operator<<(std::ostream& os, const Instrumentation::InstrumentationEvent& rhs);
// An element in the instrumentation side stack maintained in art::Thread.
struct InstrumentationStackFrame {
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 23324a6e75..89586b0982 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -355,7 +355,8 @@ void InternTable::Table::Insert(mirror::String* s) {
post_zygote_table_.insert(GcRoot<mirror::String>(s));
}
-void InternTable::Table::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags flags) {
+void InternTable::Table::VisitRoots(RootCallback* callback, void* arg,
+ VisitRootFlags flags ATTRIBUTE_UNUSED) {
for (auto& intern : pre_zygote_table_) {
const_cast<GcRoot<mirror::String>&>(intern).VisitRoot(callback, arg, 0, kRootInternedString);
}
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 9de12f2401..44e2029329 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -315,6 +315,10 @@ enum InterpreterImplKind {
kSwitchImpl, // Switch-based interpreter implementation.
kComputedGotoImplKind // Computed-goto-based interpreter implementation.
};
+std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) {
+ os << ((rhs == kSwitchImpl) ? "Switch-based interpreter" : "Computed-goto-based interpreter");
+ return os;
+}
#if !defined(__clang__)
static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKind;
@@ -322,8 +326,7 @@ static constexpr InterpreterImplKind kInterpreterImplKind = kComputedGotoImplKin
// Clang 3.4 fails to build the goto interpreter implementation.
static constexpr InterpreterImplKind kInterpreterImplKind = kSwitchImpl;
template<bool do_access_check, bool transaction_active>
-JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* code_item,
- ShadowFrame& shadow_frame, JValue result_register) {
+JValue ExecuteGotoImpl(Thread*, MethodHelper&, const DexFile::CodeItem*, ShadowFrame&, JValue) {
LOG(FATAL) << "UNREACHABLE";
UNREACHABLE();
}
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 7fdc18e8a0..be34bd3df2 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -1126,6 +1126,7 @@ static JdwpError TR_CurrentContendedMonitor(JdwpState*, Request* request, Expand
static JdwpError TR_Interrupt(JdwpState*, Request* request, ExpandBuf* reply)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(reply);
ObjectId thread_id = request->ReadThreadId();
return Dbg::Interrupt(thread_id);
}
diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc
index 4f348967b6..bf72c7b619 100644
--- a/runtime/jdwp/object_registry.cc
+++ b/runtime/jdwp/object_registry.cc
@@ -17,6 +17,7 @@
#include "object_registry.h"
#include "handle_scope-inl.h"
+#include "jni_internal.h"
#include "mirror/class.h"
#include "scoped_thread_state_change.h"
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index ad06b85f36..dd66af72f7 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -1749,6 +1749,7 @@ class JNI {
}
static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
+ UNUSED(chars);
CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
ScopedObjectAccess soa(env);
gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -1777,7 +1778,7 @@ class JNI {
return bytes;
}
- static void ReleaseStringUTFChars(JNIEnv* env, jstring, const char* chars) {
+ static void ReleaseStringUTFChars(JNIEnv*, jstring, const char* chars) {
delete[] chars;
}
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index 13cc3b0bcb..2d5c71bb93 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -52,7 +52,7 @@ class Monitor;
*/
class LockWord {
public:
- enum {
+ enum SizeShiftsAndMasks { // private marker to avoid generate-operator-out.py from processing.
// Number of bits to encode the state, currently just fat or thin/unlocked or hash code.
kStateSize = 2,
// Number of bits to encode the thin lock owner.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index c1184716b1..51aba9c374 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -238,6 +238,9 @@ static bool CheckMapRequest(uint8_t* expected_ptr, void* actual_ptr, size_t byte
MemMap* MemMap::MapAnonymous(const char* name, uint8_t* expected_ptr, size_t byte_count, int prot,
bool low_4gb, std::string* error_msg) {
+#ifndef __LP64__
+ UNUSED(low_4gb);
+#endif
if (byte_count == 0) {
return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
}
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 7e1ad7868a..13f881d966 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -81,6 +81,7 @@ static inline size_t ComputeArraySize(Thread* self, Class* array_class, int32_t
// 64-bit. No overflow as component_count is 32-bit and the maximum
// component size is 8.
DCHECK_LE((1U << component_size_shift), 8U);
+ UNUSED(self);
#else
// 32-bit.
DCHECK_NE(header_size, 0U);
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index d9094fc393..68fbb8b4e6 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -839,7 +839,7 @@ class MANAGED Class FINAL : public Object {
// Returns the number of static fields containing reference types.
uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsResolved() || IsErroneous()) << PrettyClass(this) << " status=" << GetStatus();
+ DCHECK(IsResolved() || IsErroneous());
return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index c9e60bc0b2..c451764c93 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -135,6 +135,7 @@ inline void Object::SetReadBarrierPointer(Object* rb_ptr) {
#else
LOG(FATAL) << "Unreachable";
UNREACHABLE();
+ UNUSED(rb_ptr);
#endif
}
@@ -156,6 +157,7 @@ inline bool Object::AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object*
DCHECK_EQ(new_ref.reference_, atomic_rb_ptr->LoadRelaxed());
return true;
#else
+ UNUSED(expected_rb_ptr, rb_ptr);
LOG(FATAL) << "Unreachable";
UNREACHABLE();
#endif
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 5b9209350b..27678dcbdd 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -53,6 +53,7 @@ class MonitorPool {
static void ReleaseMonitor(Thread* self, Monitor* monitor) {
#ifndef __LP64__
+ UNUSED(self);
delete monitor;
#else
GetMonitorPool()->ReleaseMonitorToPool(self, monitor);
@@ -61,6 +62,7 @@ class MonitorPool {
static void ReleaseMonitors(Thread* self, MonitorList::Monitors* monitors) {
#ifndef __LP64__
+ UNUSED(self);
STLDeleteElements(monitors);
#else
GetMonitorPool()->ReleaseMonitorsToPool(self, monitors);
@@ -85,6 +87,7 @@ class MonitorPool {
static MonitorId ComputeMonitorId(Monitor* mon, Thread* self) {
#ifndef __LP64__
+ UNUSED(self);
return MonitorIdFromMonitor(mon);
#else
return GetMonitorPool()->ComputeMonitorIdInPool(mon, self);
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index c35bb30b48..e1ceb8c864 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -166,13 +166,13 @@ static jstring VMRuntime_vmInstructionSet(JNIEnv* env, jobject) {
return env->NewStringUTF(isa_string);
}
-static jboolean VMRuntime_is64Bit(JNIEnv* env, jobject) {
+static jboolean VMRuntime_is64Bit(JNIEnv*, jobject) {
bool is64BitMode = (sizeof(void*) == sizeof(uint64_t));
return is64BitMode ? JNI_TRUE : JNI_FALSE;
}
static jboolean VMRuntime_isCheckJniEnabled(JNIEnv* env, jobject) {
- return Runtime::Current()->GetJavaVM()->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
+ return down_cast<JNIEnvExt*>(env)->vm->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
}
static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sdk_version) {
@@ -201,9 +201,10 @@ static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) {
Runtime::Current()->GetHeap()->RegisterNativeFree(env, static_cast<size_t>(bytes));
}
-static void VMRuntime_updateProcessState(JNIEnv* env, jobject, jint process_state) {
- Runtime::Current()->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(process_state));
- Runtime::Current()->UpdateProfilerState(process_state);
+static void VMRuntime_updateProcessState(JNIEnv*, jobject, jint process_state) {
+ Runtime* runtime = Runtime::Current();
+ runtime->GetHeap()->UpdateProcessState(static_cast<gc::ProcessState>(process_state));
+ runtime->UpdateProfilerState(process_state);
}
static void VMRuntime_trimHeap(JNIEnv*, jobject) {
@@ -514,8 +515,9 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) {
* for ART.
*/
static void VMRuntime_registerAppInfo(JNIEnv* env, jclass, jstring pkgName,
- jstring appDir, jstring procName) {
- const char *pkgNameChars = env->GetStringUTFChars(pkgName, NULL);
+ jstring appDir ATTRIBUTE_UNUSED,
+ jstring procName ATTRIBUTE_UNUSED) {
+ const char *pkgNameChars = env->GetStringUTFChars(pkgName, nullptr);
std::string profileFile = StringPrintf("/data/dalvik-cache/profiles/%s", pkgNameChars);
Runtime::Current()->StartProfiler(profileFile.c_str());
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e469126206..adc7c4f410 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -100,8 +100,7 @@ static jlong ZygoteHooks_nativePreFork(JNIEnv* env, jclass) {
runtime->PreZygoteFork();
// Grab thread before fork potentially makes Thread::pthread_key_self_ unusable.
- Thread* self = Thread::Current();
- return reinterpret_cast<jlong>(self);
+ return reinterpret_cast<jlong>(ThreadForEnv(env));
}
static void ZygoteHooks_nativePostForkChild(JNIEnv* env, jclass, jlong token, jint debug_flags,
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 7f5a611d83..1f07336779 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -57,9 +57,8 @@ ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::ArtFiel
}
template<bool kAllowReferences>
-ALWAYS_INLINE inline static bool GetFieldValue(
- const ScopedFastNativeObjectAccess& soa, mirror::Object* o, mirror::ArtField* f,
- Primitive::Type field_type, JValue* value)
+ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::ArtField* f,
+ Primitive::Type field_type, JValue* value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(value->GetJ(), INT64_C(0));
switch (field_type) {
@@ -148,7 +147,7 @@ static jobject Field_get(JNIEnv* env, jobject javaField, jobject javaObj, jboole
// Get the field's value, boxing if necessary.
Primitive::Type field_type = f->GetTypeAsPrimitiveType();
JValue value;
- if (!GetFieldValue<true>(soa, o, f, field_type, &value)) {
+ if (!GetFieldValue<true>(o, f, field_type, &value)) {
DCHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
@@ -178,13 +177,13 @@ ALWAYS_INLINE inline static JValue GetPrimitiveField(JNIEnv* env, jobject javaFi
JValue field_value;
if (field_type == kPrimitiveType) {
// This if statement should get optimized out since we only pass in valid primitive types.
- if (UNLIKELY(!GetFieldValue<false>(soa, o, f, kPrimitiveType, &field_value))) {
+ if (UNLIKELY(!GetFieldValue<false>(o, f, kPrimitiveType, &field_value))) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
return field_value;
}
- if (!GetFieldValue<false>(soa, o, f, field_type, &field_value)) {
+ if (!GetFieldValue<false>(o, f, field_type, &field_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
}
@@ -232,9 +231,8 @@ static jshort Field_getShort(JNIEnv* env, jobject javaField, jobject javaObj, jb
return GetPrimitiveField<Primitive::kPrimShort>(env, javaField, javaObj, accessible).GetS();
}
-static void SetFieldValue(ScopedFastNativeObjectAccess& soa, mirror::Object* o,
- mirror::ArtField* f, Primitive::Type field_type, bool allow_references,
- const JValue& new_value)
+static void SetFieldValue(mirror::Object* o, mirror::ArtField* f, Primitive::Type field_type,
+ bool allow_references, const JValue& new_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(f->GetDeclaringClass()->IsInitialized());
switch (field_type) {
@@ -317,7 +315,7 @@ static void Field_set(JNIEnv* env, jobject javaField, jobject javaObj, jobject j
DCHECK(soa.Self()->IsExceptionPending());
return;
}
- SetFieldValue(soa, o, f, field_prim_type, true, unboxed_value);
+ SetFieldValue(o, f, field_prim_type, true, unboxed_value);
}
template<Primitive::Type kPrimitiveType>
@@ -350,7 +348,7 @@ static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj,
}
// Write the value.
- SetFieldValue(soa, o, f, field_type, false, wide_value);
+ SetFieldValue(o, f, field_type, false, wide_value);
}
static void Field_setBoolean(JNIEnv* env, jobject javaField, jobject javaObj, jboolean z,
diff --git a/runtime/noop_compiler_callbacks.h b/runtime/noop_compiler_callbacks.h
index e9ad3531b6..300abc9f7a 100644
--- a/runtime/noop_compiler_callbacks.h
+++ b/runtime/noop_compiler_callbacks.h
@@ -26,11 +26,11 @@ class NoopCompilerCallbacks FINAL : public CompilerCallbacks {
NoopCompilerCallbacks() {}
~NoopCompilerCallbacks() {}
- bool MethodVerified(verifier::MethodVerifier* verifier) OVERRIDE {
+ bool MethodVerified(verifier::MethodVerifier* verifier ATTRIBUTE_UNUSED) OVERRIDE {
return true;
}
- void ClassRejected(ClassReference ref) OVERRIDE {}
+ void ClassRejected(ClassReference ref ATTRIBUTE_UNUSED) OVERRIDE {}
// This is only used by compilers which need to be able to run without relocation even when it
// would normally be enabled. For example the patchoat executable, and dex2oat --image, both need
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 6b64c257df..e3bd541f27 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -741,7 +741,7 @@ void ParsedOptions::Abort() {
}
void ParsedOptions::UsageMessageV(FILE* stream, const char* fmt, va_list ap) {
- hook_vfprintf_(stderr, fmt, ap);
+ hook_vfprintf_(stream, fmt, ap);
}
void ParsedOptions::UsageMessage(FILE* stream, const char* fmt, ...) {
diff --git a/runtime/profiler_options.h b/runtime/profiler_options.h
index e3ef69739b..1db2f0508c 100644
--- a/runtime/profiler_options.h
+++ b/runtime/profiler_options.h
@@ -26,6 +26,7 @@ enum ProfileDataType {
kProfilerMethod, // Method only
kProfilerBoundedStack, // Methods with Dex PC on top of the stack
};
+std::ostream& operator<<(std::ostream& os, const ProfileDataType& rhs);
class ProfilerOptions {
public:
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index c58735a94e..90c9fe7c32 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -297,7 +297,7 @@ void QuickExceptionHandler::DeoptimizeStack() {
// Unwinds all instrumentation stack frame prior to catch handler or upcall.
class InstrumentationStackVisitor : public StackVisitor {
public:
- InstrumentationStackVisitor(Thread* self, bool is_deoptimization, size_t frame_depth)
+ InstrumentationStackVisitor(Thread* self, size_t frame_depth)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
: StackVisitor(self, nullptr),
frame_depth_(frame_depth),
@@ -332,7 +332,7 @@ class InstrumentationStackVisitor : public StackVisitor {
void QuickExceptionHandler::UpdateInstrumentationStack() {
if (method_tracing_active_) {
- InstrumentationStackVisitor visitor(self_, is_deoptimization_, handler_frame_depth_);
+ InstrumentationStackVisitor visitor(self_, handler_frame_depth_);
visitor.WalkStack(true);
size_t instrumentation_frames_to_pop = visitor.GetInstrumentationFramesToPop();
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index fd43d78835..0dc31e7bc1 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -27,9 +27,7 @@ template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
inline MirrorType* ReadBarrier::Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr) {
// Unused for now.
- UNUSED(obj);
- UNUSED(offset);
- UNUSED(ref_addr);
+ UNUSED(obj, offset, ref_addr);
const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
if (with_read_barrier && kUseBakerReadBarrier) {
// To be implemented.
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 228d2007d4..44d1bc4ad1 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -219,8 +219,7 @@ class ArgArray {
PrettyDescriptor(found_descriptor).c_str()).c_str());
}
- bool BuildArgArrayFromObjectArray(const ScopedObjectAccessAlreadyRunnable& soa,
- mirror::Object* receiver,
+ bool BuildArgArrayFromObjectArray(mirror::Object* receiver,
mirror::ObjectArray<mirror::Object>* args, MethodHelper& mh)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile::TypeList* classes = mh.GetMethod()->GetParameterTypeList();
@@ -613,7 +612,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
ArgArray arg_array(shorty, shorty_len);
StackHandleScope<1> hs(soa.Self());
MethodHelper mh(hs.NewHandle(m));
- if (!arg_array.BuildArgArrayFromObjectArray(soa, receiver, objects, mh)) {
+ if (!arg_array.BuildArgArrayFromObjectArray(receiver, objects, mh)) {
CHECK(soa.Self()->IsExceptionPending());
return nullptr;
}
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index de3e976a86..4ac9634158 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -576,8 +576,7 @@ void Runtime::StartDaemonThreads() {
VLOG(startup) << "Runtime::StartDaemonThreads exiting";
}
-static bool OpenDexFilesFromImage(const std::vector<std::string>& dex_filenames,
- const std::string& image_location,
+static bool OpenDexFilesFromImage(const std::string& image_location,
std::vector<const DexFile*>& dex_files,
size_t* failures) {
std::string system_filename;
@@ -639,8 +638,7 @@ static size_t OpenDexFiles(const std::vector<std::string>& dex_filenames,
const std::string& image_location,
std::vector<const DexFile*>& dex_files) {
size_t failure_count = 0;
- if (!image_location.empty() && OpenDexFilesFromImage(dex_filenames, image_location, dex_files,
- &failure_count)) {
+ if (!image_location.empty() && OpenDexFilesFromImage(image_location, dex_files, &failure_count)) {
return failure_count;
}
failure_count = 0;
@@ -828,7 +826,7 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
if (!HasCalleeSaveMethod(type)) {
- SetCalleeSaveMethod(CreateCalleeSaveMethod(type), type);
+ SetCalleeSaveMethod(CreateCalleeSaveMethod(), type);
}
}
} else {
@@ -1260,7 +1258,7 @@ mirror::ArtMethod* Runtime::CreateResolutionMethod() {
return method.Get();
}
-mirror::ArtMethod* Runtime::CreateCalleeSaveMethod(CalleeSaveType type) {
+mirror::ArtMethod* Runtime::CreateCalleeSaveMethod() {
Thread* self = Thread::Current();
Runtime* runtime = Runtime::Current();
ClassLinker* class_linker = runtime->GetClassLinker();
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 11db613007..3cbe1e5eb5 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -55,8 +55,8 @@ namespace mirror {
class Throwable;
} // namespace mirror
namespace verifier {
-class MethodVerifier;
-}
+ class MethodVerifier;
+} // namespace verifier
class ClassLinker;
class DexFile;
class InternTable;
@@ -379,8 +379,7 @@ class Runtime {
void SetCalleeSaveMethod(mirror::ArtMethod* method, CalleeSaveType type);
- mirror::ArtMethod* CreateCalleeSaveMethod(CalleeSaveType type)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ mirror::ArtMethod* CreateCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t GetStat(int kind);
@@ -655,6 +654,7 @@ class Runtime {
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
+std::ostream& operator<<(std::ostream& os, const Runtime::CalleeSaveType& rhs);
} // namespace art
diff --git a/runtime/stack.h b/runtime/stack.h
index 2f8df61099..66c840d7c3 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -53,6 +53,7 @@ enum VRegKind {
kImpreciseConstant,
kUndefined,
};
+std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);
// A reference from the shadow stack to a MirrorType object within the Java heap.
template<class MirrorType>
@@ -336,9 +337,7 @@ class ShadowFrame {
}
#if defined(ART_USE_PORTABLE_COMPILER)
- enum ShadowFrameFlag {
- kHasReferenceArray = 1ul << 31
- };
+ constexpr uint32_t kHasReferenceArray = 1ul << 31;
// TODO: make const in the portable case.
uint32_t number_of_vregs_;
#else
@@ -633,6 +632,7 @@ class StackVisitor {
}
static int GetOutVROffset(uint16_t out_num, InstructionSet isa) {
+ UNUSED(isa);
// According to stack model, the first out is above the Method referernce.
return sizeof(StackReference<mirror::ArtMethod>) + (out_num * sizeof(uint32_t));
}
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index f8c8fdbfee..e3ef4eb26e 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -220,7 +220,7 @@ static void UnsafeLogFatalForThreadSuspendAllTimeout() {
// individual thread requires polling. delay_us is the requested sleep and total_delay_us
// accumulates the total time spent sleeping for timeouts. The first sleep is just a yield,
// subsequently sleeps increase delay_us from 1ms to 500ms by doubling.
-static void ThreadSuspendSleep(Thread* self, useconds_t* delay_us, useconds_t* total_delay_us) {
+static void ThreadSuspendSleep(useconds_t* delay_us, useconds_t* total_delay_us) {
useconds_t new_delay_us = (*delay_us) * 2;
CHECK_GE(new_delay_us, *delay_us);
if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s.
@@ -285,7 +285,7 @@ size_t ThreadList::RunCheckpoint(Closure* checkpoint_function) {
useconds_t total_delay_us = 0;
do {
useconds_t delay_us = 100;
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
} while (!thread->IsSuspended());
// Shouldn't need to wait for longer than 1000 microseconds.
constexpr useconds_t kLongWaitThresholdUS = 1000;
@@ -561,7 +561,7 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension,
// Release locks and come out of runnable state.
}
VLOG(threads) << "SuspendThreadByPeer sleeping to allow thread chance to suspend";
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
}
}
@@ -639,7 +639,7 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe
// Release locks and come out of runnable state.
}
VLOG(threads) << "SuspendThreadByThreadId sleeping to allow thread chance to suspend";
- ThreadSuspendSleep(self, &delay_us, &total_delay_us);
+ ThreadSuspendSleep(&delay_us, &total_delay_us);
}
}
diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc
index e8c9ff85db..a7f2ecdd81 100644
--- a/runtime/thread_pool.cc
+++ b/runtime/thread_pool.cc
@@ -42,14 +42,14 @@ ThreadPoolWorker::ThreadPoolWorker(ThreadPool* thread_pool, const std::string& n
}
ThreadPoolWorker::~ThreadPoolWorker() {
- CHECK_PTHREAD_CALL(pthread_join, (pthread_, NULL), "thread pool worker shutdown");
+ CHECK_PTHREAD_CALL(pthread_join, (pthread_, nullptr), "thread pool worker shutdown");
}
void ThreadPoolWorker::Run() {
Thread* self = Thread::Current();
- Task* task = NULL;
+ Task* task = nullptr;
thread_pool_->creation_barier_.Wait(self);
- while ((task = thread_pool_->GetTask(self)) != NULL) {
+ while ((task = thread_pool_->GetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
}
@@ -58,11 +58,11 @@ void ThreadPoolWorker::Run() {
void* ThreadPoolWorker::Callback(void* arg) {
ThreadPoolWorker* worker = reinterpret_cast<ThreadPoolWorker*>(arg);
Runtime* runtime = Runtime::Current();
- CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, NULL, false));
+ CHECK(runtime->AttachCurrentThread(worker->name_.c_str(), true, nullptr, false));
// Do work until its time to shut down.
worker->Run();
runtime->DetachCurrentThread();
- return NULL;
+ return nullptr;
}
void ThreadPool::AddTask(Thread* self, Task* task) {
@@ -137,8 +137,8 @@ Task* ThreadPool::GetTask(Thread* self) {
const size_t active_threads = thread_count - waiting_count_;
// <= since self is considered an active worker.
if (active_threads <= max_active_workers_) {
- Task* task = TryGetTaskLocked(self);
- if (task != NULL) {
+ Task* task = TryGetTaskLocked();
+ if (task != nullptr) {
return task;
}
}
@@ -157,28 +157,28 @@ Task* ThreadPool::GetTask(Thread* self) {
--waiting_count_;
}
- // We are shutting down, return NULL to tell the worker thread to stop looping.
- return NULL;
+ // We are shutting down, return nullptr to tell the worker thread to stop looping.
+ return nullptr;
}
Task* ThreadPool::TryGetTask(Thread* self) {
MutexLock mu(self, task_queue_lock_);
- return TryGetTaskLocked(self);
+ return TryGetTaskLocked();
}
-Task* ThreadPool::TryGetTaskLocked(Thread* self) {
+Task* ThreadPool::TryGetTaskLocked() {
if (started_ && !tasks_.empty()) {
Task* task = tasks_.front();
tasks_.pop_front();
return task;
}
- return NULL;
+ return nullptr;
}
void ThreadPool::Wait(Thread* self, bool do_work, bool may_hold_locks) {
if (do_work) {
- Task* task = NULL;
- while ((task = TryGetTask(self)) != NULL) {
+ Task* task = nullptr;
+ while ((task = TryGetTask(self)) != nullptr) {
task->Run(self);
task->Finalize();
}
@@ -201,17 +201,17 @@ size_t ThreadPool::GetTaskCount(Thread* self) {
WorkStealingWorker::WorkStealingWorker(ThreadPool* thread_pool, const std::string& name,
size_t stack_size)
- : ThreadPoolWorker(thread_pool, name, stack_size), task_(NULL) {}
+ : ThreadPoolWorker(thread_pool, name, stack_size), task_(nullptr) {}
void WorkStealingWorker::Run() {
Thread* self = Thread::Current();
- Task* task = NULL;
+ Task* task = nullptr;
WorkStealingThreadPool* thread_pool = down_cast<WorkStealingThreadPool*>(thread_pool_);
- while ((task = thread_pool_->GetTask(self)) != NULL) {
+ while ((task = thread_pool_->GetTask(self)) != nullptr) {
WorkStealingTask* stealing_task = down_cast<WorkStealingTask*>(task);
{
- CHECK(task_ == NULL);
+ CHECK(task_ == nullptr);
MutexLock mu(self, thread_pool->work_steal_lock_);
// Register that we are running the task
++stealing_task->ref_count_;
@@ -221,7 +221,7 @@ void WorkStealingWorker::Run() {
// Mark ourselves as not running a task so that nobody tries to steal from us.
// There is a race condition that someone starts stealing from us at this point. This is okay
// due to the reference counting.
- task_ = NULL;
+ task_ = nullptr;
bool finalize;
@@ -229,13 +229,13 @@ void WorkStealingWorker::Run() {
// all that happens when the race occurs is that we steal some work instead of processing a
// task from the queue.
while (thread_pool->GetTaskCount(self) == 0) {
- WorkStealingTask* steal_from_task = NULL;
+ WorkStealingTask* steal_from_task = nullptr;
{
MutexLock mu(self, thread_pool->work_steal_lock_);
// Try finding a task to steal from.
- steal_from_task = thread_pool->FindTaskToStealFrom(self);
- if (steal_from_task != NULL) {
+ steal_from_task = thread_pool->FindTaskToStealFrom();
+ if (steal_from_task != nullptr) {
CHECK_NE(stealing_task, steal_from_task)
<< "Attempting to steal from completed self task";
steal_from_task->ref_count_++;
@@ -244,7 +244,7 @@ void WorkStealingWorker::Run() {
}
}
- if (steal_from_task != NULL) {
+ if (steal_from_task != nullptr) {
// Task which completed earlier is going to steal some work.
stealing_task->StealFrom(self, steal_from_task);
@@ -284,7 +284,7 @@ WorkStealingThreadPool::WorkStealingThreadPool(const char* name, size_t num_thre
}
}
-WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom(Thread* self) {
+WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom() {
const size_t thread_count = GetThreadCount();
for (size_t i = 0; i < thread_count; ++i) {
// TODO: Use CAS instead of lock.
@@ -301,7 +301,7 @@ WorkStealingTask* WorkStealingThreadPool::FindTaskToStealFrom(Thread* self) {
}
}
// Couldn't find something to steal.
- return NULL;
+ return nullptr;
}
WorkStealingThreadPool::~WorkStealingThreadPool() {}
diff --git a/runtime/thread_pool.h b/runtime/thread_pool.h
index c816c84561..d6330c8de5 100644
--- a/runtime/thread_pool.h
+++ b/runtime/thread_pool.h
@@ -101,7 +101,7 @@ class ThreadPool {
// Try to get a task, returning NULL if there is none available.
Task* TryGetTask(Thread* self);
- Task* TryGetTaskLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
+ Task* TryGetTaskLocked() EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_);
// Are we shutting down?
bool IsShuttingDown() const EXCLUSIVE_LOCKS_REQUIRED(task_queue_lock_) {
@@ -178,7 +178,7 @@ class WorkStealingThreadPool : public ThreadPool {
size_t steal_index_;
// Find a task to steal from
- WorkStealingTask* FindTaskToStealFrom(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_);
+ WorkStealingTask* FindTaskToStealFrom() EXCLUSIVE_LOCKS_REQUIRED(work_steal_lock_);
friend class WorkStealingWorker;
};
diff --git a/runtime/thread_state.h b/runtime/thread_state.h
index 0e47d2166b..6e5deeb0fe 100644
--- a/runtime/thread_state.h
+++ b/runtime/thread_state.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_THREAD_STATE_H_
#define ART_RUNTIME_THREAD_STATE_H_
+#include <ostream>
+
namespace art {
enum ThreadState {
@@ -43,6 +45,7 @@ enum ThreadState {
kNative, // RUNNABLE TS_RUNNING running in a JNI native method
kSuspended, // RUNNABLE TS_RUNNING suspended by GC or debugger
};
+std::ostream& operator<<(std::ostream& os, const ThreadState& rhs);
} // namespace art
diff --git a/runtime/trace.cc b/runtime/trace.cc
index b3158a48a6..29c01e4d47 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -244,7 +244,8 @@ static void GetSample(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mu
the_trace->CompareAndUpdateStackTrace(thread, stack_trace);
}
-static void ClearThreadStackTraceAndClockBase(Thread* thread, void* arg) {
+static void ClearThreadStackTraceAndClockBase(Thread* thread ATTRIBUTE_UNUSED,
+ void* arg ATTRIBUTE_UNUSED) {
thread->SetTraceClockBase(0);
std::vector<mirror::ArtMethod*>* stack_trace = thread->GetStackTraceSample();
thread->SetStackTraceSample(NULL);
@@ -561,27 +562,30 @@ void Trace::FinishTracing() {
void Trace::DexPcMoved(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t new_dex_pc) {
+ UNUSED(thread, this_object, method, new_dex_pc);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected dex PC event in tracing " << PrettyMethod(method) << " " << new_dex_pc;
}
-void Trace::FieldRead(Thread* /*thread*/, mirror::Object* this_object,
+void Trace::FieldRead(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, this_object, method, dex_pc, field);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field read event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::FieldWritten(Thread* /*thread*/, mirror::Object* this_object,
+void Trace::FieldWritten(Thread* thread, mirror::Object* this_object,
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, this_object, method, dex_pc, field, field_value);
// We're not recorded to listen to this kind of event, so complain.
LOG(ERROR) << "Unexpected field write event in tracing " << PrettyMethod(method) << " " << dex_pc;
}
-void Trace::MethodEntered(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+void Trace::MethodEntered(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -589,10 +593,9 @@ void Trace::MethodEntered(Thread* thread, mirror::Object* this_object,
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodExited(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc,
- const JValue& return_value) {
- UNUSED(return_value);
+void Trace::MethodExited(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED,
+ const JValue& return_value ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -600,8 +603,8 @@ void Trace::MethodExited(Thread* thread, mirror::Object* this_object,
thread_clock_diff, wall_clock_diff);
}
-void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object,
- mirror::ArtMethod* method, uint32_t dex_pc) {
+void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method, uint32_t dex_pc ATTRIBUTE_UNUSED) {
uint32_t thread_clock_diff = 0;
uint32_t wall_clock_diff = 0;
ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
@@ -613,6 +616,7 @@ void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ UNUSED(thread, throw_location, catch_method, catch_dex_pc, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index b496f25c23..478066f018 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -144,7 +144,7 @@ void Transaction::RecordWeakStringRemoval(mirror::String* s) {
LogInternedString(log);
}
-void Transaction::LogInternedString(InternStringLog& log) {
+void Transaction::LogInternedString(const InternStringLog& log) {
Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
MutexLock mu(Thread::Current(), log_lock_);
intern_string_logs_.push_front(log);
@@ -384,7 +384,7 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi
}
break;
default:
- LOG(FATAL) << "Unknown value kind " << field_value.kind;
+ LOG(FATAL) << "Unknown value kind " << static_cast<int>(field_value.kind);
break;
}
}
@@ -406,38 +406,38 @@ void Transaction::ObjectLog::VisitRoots(RootCallback* callback, void* arg) {
void Transaction::InternStringLog::Undo(InternTable* intern_table) {
DCHECK(intern_table != nullptr);
switch (string_op_) {
- case InternStringLog::kInsert: {
- switch (string_kind_) {
- case InternStringLog::kStrongString:
- intern_table->RemoveStrongFromTransaction(str_);
- break;
- case InternStringLog::kWeakString:
- intern_table->RemoveWeakFromTransaction(str_);
- break;
- default:
- LOG(FATAL) << "Unknown interned string kind";
- break;
- }
- break;
+ case InternStringLog::kInsert: {
+ switch (string_kind_) {
+ case InternStringLog::kStrongString:
+ intern_table->RemoveStrongFromTransaction(str_);
+ break;
+ case InternStringLog::kWeakString:
+ intern_table->RemoveWeakFromTransaction(str_);
+ break;
+ default:
+ LOG(FATAL) << "Unknown interned string kind";
+ break;
}
- case InternStringLog::kRemove: {
- switch (string_kind_) {
- case InternStringLog::kStrongString:
- intern_table->InsertStrongFromTransaction(str_);
- break;
- case InternStringLog::kWeakString:
- intern_table->InsertWeakFromTransaction(str_);
- break;
- default:
- LOG(FATAL) << "Unknown interned string kind";
- break;
- }
- break;
+ break;
+ }
+ case InternStringLog::kRemove: {
+ switch (string_kind_) {
+ case InternStringLog::kStrongString:
+ intern_table->InsertStrongFromTransaction(str_);
+ break;
+ case InternStringLog::kWeakString:
+ intern_table->InsertWeakFromTransaction(str_);
+ break;
+ default:
+ LOG(FATAL) << "Unknown interned string kind";
+ break;
}
- default:
- LOG(FATAL) << "Unknown interned string op";
- break;
+ break;
}
+ default:
+ LOG(FATAL) << "Unknown interned string op";
+ break;
+ }
}
void Transaction::InternStringLog::VisitRoots(RootCallback* callback, void* arg) {
diff --git a/runtime/transaction.h b/runtime/transaction.h
index 21d3c98054..566f231de6 100644
--- a/runtime/transaction.h
+++ b/runtime/transaction.h
@@ -19,6 +19,7 @@
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/value_object.h"
#include "object_callbacks.h"
#include "offsets.h"
#include "primitive.h"
@@ -35,7 +36,7 @@ class String;
}
class InternTable;
-class Transaction {
+class Transaction FINAL {
public:
Transaction();
~Transaction();
@@ -92,7 +93,7 @@ class Transaction {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- class ObjectLog {
+ class ObjectLog : public ValueObject {
public:
void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile);
void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile);
@@ -119,7 +120,7 @@ class Transaction {
k64Bits,
kReference
};
- struct FieldValue {
+ struct FieldValue : public ValueObject {
// TODO use JValue instead ?
uint64_t value;
FieldValueKind kind;
@@ -134,7 +135,7 @@ class Transaction {
std::map<uint32_t, FieldValue> field_values_;
};
- class ArrayLog {
+ class ArrayLog : public ValueObject {
public:
void LogValue(size_t index, uint64_t value);
@@ -153,7 +154,7 @@ class Transaction {
std::map<size_t, uint64_t> array_values_;
};
- class InternStringLog {
+ class InternStringLog : public ValueObject {
public:
enum StringKind {
kStrongString,
@@ -175,11 +176,11 @@ class Transaction {
private:
mirror::String* str_;
- StringKind string_kind_;
- StringOp string_op_;
+ const StringKind string_kind_;
+ const StringOp string_op_;
};
- void LogInternedString(InternStringLog& log)
+ void LogInternedString(const InternStringLog& log)
EXCLUSIVE_LOCKS_REQUIRED(Locks::intern_table_lock_)
LOCKS_EXCLUDED(log_lock_);
diff --git a/runtime/utils.h b/runtime/utils.h
index b7daa64d90..39011e29c7 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -467,15 +467,12 @@ class VoidFunctor {
template <typename A, typename B>
inline void operator() (A a, B b) const {
- UNUSED(a);
- UNUSED(b);
+ UNUSED(a, b);
}
template <typename A, typename B, typename C>
inline void operator() (A a, B b, C c) const {
- UNUSED(a);
- UNUSED(b);
- UNUSED(c);
+ UNUSED(a, b, c);
}
};
diff --git a/test/004-JniTest/jni_test.cc b/test/004-JniTest/jni_test.cc
index 6fc4484731..c2877be02e 100644
--- a/test/004-JniTest/jni_test.cc
+++ b/test/004-JniTest/jni_test.cc
@@ -172,7 +172,7 @@ extern "C" void JNICALL Java_Main_testZeroLengthByteBuffers(JNIEnv* env, jclass)
constexpr size_t kByteReturnSize = 7;
jbyte byte_returns[kByteReturnSize] = { 0, 1, 2, 127, -1, -2, -128 };
-extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv* env, jclass klass, jbyte b1, jbyte b2,
+extern "C" jbyte JNICALL Java_Main_byteMethod(JNIEnv*, jclass, jbyte b1, jbyte b2,
jbyte b3, jbyte b4, jbyte b5, jbyte b6,
jbyte b7, jbyte b8, jbyte b9, jbyte b10) {
// We use b1 to drive the output.
@@ -197,7 +197,7 @@ jshort short_returns[kShortReturnSize] = { 0, 1, 2, 127, 32767, -1, -2, -128,
static_cast<jshort>(0x8000) };
// The weird static_cast is because short int is only guaranteed down to -32767, not Java's -32768.
-extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv* env, jclass klass, jshort s1, jshort s2,
+extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv*, jclass, jshort s1, jshort s2,
jshort s3, jshort s4, jshort s5, jshort s6,
jshort s7, jshort s8, jshort s9, jshort s10) {
// We use s1 to drive the output.
@@ -217,7 +217,7 @@ extern "C" jshort JNICALL Java_Main_shortMethod(JNIEnv* env, jclass klass, jshor
return short_returns[s1];
}
-extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv* env, jclass klass, jboolean b1,
+extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv*, jclass, jboolean b1,
jboolean b2, jboolean b3, jboolean b4,
jboolean b5, jboolean b6, jboolean b7,
jboolean b8, jboolean b9, jboolean b10) {
@@ -239,7 +239,7 @@ extern "C" jboolean JNICALL Java_Main_booleanMethod(JNIEnv* env, jclass klass, j
constexpr size_t kCharReturnSize = 8;
jchar char_returns[kCharReturnSize] = { 0, 1, 2, 127, 255, 256, 15000, 34000 };
-extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv* env, jclass klacc, jchar c1, jchar c2,
+extern "C" jchar JNICALL Java_Main_charMethod(JNIEnv*, jclass, jchar c1, jchar c2,
jchar c3, jchar c4, jchar c5, jchar c6, jchar c7,
jchar c8, jchar c9, jchar c10) {
// We use c1 to drive the output.
@@ -312,7 +312,7 @@ static void testShallowGetCallingClassLoader(JNIEnv* env) {
}
// http://b/16867274
-extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoader(JNIEnv* env,
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetCallingClassLoader(JNIEnv*,
jclass) {
PthreadHelper(&testShallowGetCallingClassLoader);
}
@@ -350,7 +350,7 @@ static void testShallowGetStackClass2(JNIEnv* env) {
// ourselves.
}
-extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetStackClass2(JNIEnv* env, jclass) {
+extern "C" JNIEXPORT void JNICALL Java_Main_nativeTestShallowGetStackClass2(JNIEnv*, jclass) {
PthreadHelper(&testShallowGetStackClass2);
}
diff --git a/test/004-SignalTest/signaltest.cc b/test/004-SignalTest/signaltest.cc
index a6d9b668cc..31371f6d22 100644
--- a/test/004-SignalTest/signaltest.cc
+++ b/test/004-SignalTest/signaltest.cc
@@ -14,14 +14,14 @@
* limitations under the License.
*/
+#include <jni.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
+#include <sys/ucontext.h>
#include <unistd.h>
-#include "jni.h"
-
-#include <sys/ucontext.h>
+#include "base/macros.h"
static int signal_count;
static const int kMaxSignal = 2;
@@ -47,7 +47,8 @@ static const int kMaxSignal = 2;
#endif
#endif
-static void signalhandler(int sig, siginfo_t* info, void* context) {
+static void signalhandler(int sig ATTRIBUTE_UNUSED, siginfo_t* info ATTRIBUTE_UNUSED,
+ void* context) {
printf("signal caught\n");
++signal_count;
if (signal_count > kMaxSignal) {
diff --git a/test/115-native-bridge/nativebridge.cc b/test/115-native-bridge/nativebridge.cc
index 23145e3216..6bcc1f5413 100644
--- a/test/115-native-bridge/nativebridge.cc
+++ b/test/115-native-bridge/nativebridge.cc
@@ -18,13 +18,14 @@
#include <algorithm>
#include <dlfcn.h>
+#include <jni.h>
#include <vector>
-#include "jni.h"
#include "stdio.h"
#include "unistd.h"
#include "sys/stat.h"
+#include "base/macros.h"
#include "nativebridge/native_bridge.h"
struct NativeBridgeMethod {
@@ -209,7 +210,8 @@ static NativeBridgeMethod* find_native_bridge_method(const char *name) {
// NativeBridgeCallbacks implementations
extern "C" bool native_bridge_initialize(const android::NativeBridgeRuntimeCallbacks* art_cbs,
- const char* app_code_cache_dir, const char* isa) {
+ const char* app_code_cache_dir,
+ const char* isa ATTRIBUTE_UNUSED) {
struct stat st;
if ((app_code_cache_dir != nullptr)
&& (stat(app_code_cache_dir, &st) == 0)
@@ -248,7 +250,7 @@ extern "C" void* native_bridge_loadLibrary(const char* libpath, int flag) {
}
extern "C" void* native_bridge_getTrampoline(void* handle, const char* name, const char* shorty,
- uint32_t len) {
+ uint32_t len ATTRIBUTE_UNUSED) {
printf("Getting trampoline for %s with shorty %s.\n", name, shorty);
// The name here is actually the JNI name, so we can directly do the lookup.
diff --git a/test/116-nodex2oat/nodex2oat.cc b/test/116-nodex2oat/nodex2oat.cc
index 04cac45f8e..564d58d251 100644
--- a/test/116-nodex2oat/nodex2oat.cc
+++ b/test/116-nodex2oat/nodex2oat.cc
@@ -38,7 +38,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasOat(JNIEnv*, jclass cls) {
return NoDex2OatTest::hasOat(cls);
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv*, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isDex2OatEnabled(JNIEnv*, jclass) {
return Runtime::Current()->IsDex2OatEnabled();
}
diff --git a/test/118-noimage-dex2oat/noimage-dex2oat.cc b/test/118-noimage-dex2oat/noimage-dex2oat.cc
index 7340d9e7f7..c49a13e8f4 100644
--- a/test/118-noimage-dex2oat/noimage-dex2oat.cc
+++ b/test/118-noimage-dex2oat/noimage-dex2oat.cc
@@ -34,11 +34,11 @@ class NoDex2OatTest {
}
};
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv*, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasImage(JNIEnv*, jclass) {
return Runtime::Current()->GetHeap()->HasImageSpace();
}
-extern "C" JNIEXPORT jboolean JNICALL Java_Main_isImageDex2OatEnabled(JNIEnv*, jclass cls) {
+extern "C" JNIEXPORT jboolean JNICALL Java_Main_isImageDex2OatEnabled(JNIEnv*, jclass) {
return Runtime::Current()->IsImageDex2OatEnabled();
}
diff --git a/tools/generate-operator-out.py b/tools/generate-operator-out.py
index f666ad154b..2b57222049 100755
--- a/tools/generate-operator-out.py
+++ b/tools/generate-operator-out.py
@@ -39,6 +39,7 @@ def Confused(filename, line_number, line):
def ProcessFile(filename):
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
in_enum = False
+ is_enum_private = False
is_enum_class = False
line_number = 0
@@ -57,15 +58,16 @@ def ProcessFile(filename):
# Except when it's private
if m.group(3) is not None:
- continue
-
- is_enum_class = m.group(1) is not None
- enum_name = m.group(2)
- if len(enclosing_classes) > 0:
- enum_name = '::'.join(enclosing_classes) + '::' + enum_name
- _ENUMS[enum_name] = []
- _NAMESPACES[enum_name] = '::'.join(namespaces)
- _ENUM_CLASSES[enum_name] = is_enum_class
+ is_enum_private = True
+ else:
+ is_enum_private = False
+ is_enum_class = m.group(1) is not None
+ enum_name = m.group(2)
+ if len(enclosing_classes) > 0:
+ enum_name = '::'.join(enclosing_classes) + '::' + enum_name
+ _ENUMS[enum_name] = []
+ _NAMESPACES[enum_name] = '::'.join(namespaces)
+ _ENUM_CLASSES[enum_name] = is_enum_class
in_enum = True
continue
@@ -80,11 +82,11 @@ def ProcessFile(filename):
continue
# Is this the start or end of an enclosing class or struct?
- m = re.compile(r'^(?:class|struct)(?: MANAGED)? (\S+).* \{').search(raw_line)
+ m = re.compile(r'^\s*(?:class|struct)(?: MANAGED)?(?: PACKED\([0-9]\))? (\S+).* \{').search(raw_line)
if m:
enclosing_classes.append(m.group(1))
continue
- m = re.compile(r'^\};').search(raw_line)
+ m = re.compile(r'^\s*\}( .*)?;').search(raw_line)
if m:
enclosing_classes = enclosing_classes[0:len(enclosing_classes) - 1]
continue
@@ -99,6 +101,9 @@ def ProcessFile(filename):
in_enum = False
continue
+ if is_enum_private:
+ continue
+
# The only useful thing in comments is the <<alternate text>> syntax for
# overriding the default enum value names. Pull that out...
enum_text = None
@@ -146,6 +151,7 @@ def ProcessFile(filename):
# There shouldn't be anything left.
if len(rest):
+ sys.stderr.write('%s\n' % (rest))
Confused(filename, line_number, raw_line)
if len(enclosing_classes) > 0: