diff options
author | Andreas Gampe <agampe@google.com> | 2014-11-03 21:36:10 -0800 |
---|---|---|
committer | Andreas Gampe <agampe@google.com> | 2014-11-04 18:40:08 -0800 |
commit | 277ccbd200ea43590dfc06a93ae184a765327ad0 (patch) | |
tree | d89712e93da5fb2748989353c9ee071102cf3f33 | |
parent | ad17d41841ba1fb177fb0bf175ec0e9f5e1412b3 (diff) | |
download | android_art-277ccbd200ea43590dfc06a93ae184a765327ad0.tar.gz android_art-277ccbd200ea43590dfc06a93ae184a765327ad0.tar.bz2 android_art-277ccbd200ea43590dfc06a93ae184a765327ad0.zip |
ART: More warnings
Enable -Wno-conversion-null, -Wredundant-decls and -Wshadow in general,
and -Wunused-but-set-parameter for GCC builds.
Change-Id: I81bbdd762213444673c65d85edae594a523836e5
150 files changed, 1493 insertions, 682 deletions
diff --git a/build/Android.common_build.mk b/build/Android.common_build.mk index 84d77f8ab4..7e58f5c2bb 100644 --- a/build/Android.common_build.mk +++ b/build/Android.common_build.mk @@ -156,15 +156,36 @@ art_clang_cflags += -Wimplicit-fallthrough # Enable float equality warnings. art_clang_cflags += -Wfloat-equal +# Enable warning of converting ints to void*. +art_clang_cflags += -Wint-to-void-pointer-cast + +# GCC-only warnings. +art_gcc_cflags := -Wunused-but-set-parameter +# Suggest const: too many false positives, but good for a trial run. +# -Wsuggest-attribute=const +# Useless casts: too many, as we need to be 32/64 agnostic, but the compiler knows. +# -Wuseless-cast +# Zero-as-null: Have to convert all NULL and "diagnostic ignore" all includes like libnativehelper +# that are still stuck pre-C++11. +# -Wzero-as-null-pointer-constant \ +# Suggest final: Have to move to a more recent GCC. +# -Wsuggest-final-types + + ifeq ($(ART_HOST_CLANG),true) ART_HOST_CFLAGS += $(art_clang_cflags) +else + ART_HOST_CFLAGS += $(art_gcc_cflags) endif ifeq ($(ART_TARGET_CLANG),true) ART_TARGET_CFLAGS += $(art_clang_cflags) +else + ART_TARGET_CFLAGS += $(art_gcc_cflags) endif -# Clear local variable now its use has ended. +# Clear local variables now their use has ended. art_clang_cflags := +art_gcc_cflags := ART_CPP_EXTENSION := .cc @@ -187,9 +208,16 @@ art_cflags := \ -Wstrict-aliasing \ -fstrict-aliasing \ -Wunreachable-code \ + -Wno-conversion-null \ + -Wredundant-decls \ + -Wshadow \ -fvisibility=protected \ $(art_default_gc_type_cflags) +# Missing declarations: too many at the moment, as we use "extern" quite a bit. +# -Wmissing-declarations \ + + ifeq ($(ART_SMALL_MODE),true) art_cflags += -DART_SMALL_MODE=1 endif diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h index cdae8d2d24..0361cd18cc 100644 --- a/compiler/compiled_method.h +++ b/compiler/compiled_method.h @@ -162,7 +162,7 @@ class SrcMap FINAL : public std::vector<SrcMapElem> { } this->resize(i + 1); - for (size_t i = size(); --i >= 1; ) { + for (i = size(); --i >= 1; ) { (*this)[i].from_ -= (*this)[i-1].from_; (*this)[i].to_ -= (*this)[i-1].to_; } diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc index a7d93538d6..c1ce2ac016 100644 --- a/compiler/dex/local_value_numbering.cc +++ b/compiler/dex/local_value_numbering.cc @@ -787,9 +787,9 @@ void LocalValueNumbering::MergeAliasingValues(const typename Map::value_type& en if (same_version) { // Find the first non-null values. for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) { - auto it = (lvn->*map_ptr).find(key); - if (it != (lvn->*map_ptr).end()) { - cmp_values = &it->second; + auto value = (lvn->*map_ptr).find(key); + if (value != (lvn->*map_ptr).end()) { + cmp_values = &value->second; break; } } @@ -799,21 +799,21 @@ void LocalValueNumbering::MergeAliasingValues(const typename Map::value_type& en // field version and the values' memory_version_before_stores, last_stored_value // and store_loc_set are identical. for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) { - auto it = (lvn->*map_ptr).find(key); - if (it == (lvn->*map_ptr).end()) { + auto value = (lvn->*map_ptr).find(key); + if (value == (lvn->*map_ptr).end()) { if (cmp_values->memory_version_before_stores != kNoValue) { same_version = false; break; } - } else if (cmp_values->last_stored_value != it->second.last_stored_value || - cmp_values->memory_version_before_stores != it->second.memory_version_before_stores || - cmp_values->store_loc_set != it->second.store_loc_set) { + } else if (cmp_values->last_stored_value != value->second.last_stored_value || + cmp_values->memory_version_before_stores != value->second.memory_version_before_stores || + cmp_values->store_loc_set != value->second.store_loc_set) { same_version = false; break; - } else if (it->second.last_load_memory_version != kNoValue) { + } else if (value->second.last_load_memory_version != kNoValue) { DCHECK(load_memory_version_for_same_version == kNoValue || - load_memory_version_for_same_version == it->second.last_load_memory_version); - load_memory_version_for_same_version = it->second.last_load_memory_version; + load_memory_version_for_same_version == value->second.last_load_memory_version); + load_memory_version_for_same_version = value->second.last_load_memory_version; } } } @@ -828,12 +828,12 @@ void LocalValueNumbering::MergeAliasingValues(const typename Map::value_type& en if (!cmp_values->load_value_map.empty()) { my_values->load_value_map = cmp_values->load_value_map; for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) { - auto it = (lvn->*map_ptr).find(key); - if (it == (lvn->*map_ptr).end() || it->second.load_value_map.empty()) { + auto value = (lvn->*map_ptr).find(key); + if (value == (lvn->*map_ptr).end() || value->second.load_value_map.empty()) { my_values->load_value_map.clear(); break; } - InPlaceIntersectMaps(&my_values->load_value_map, it->second.load_value_map); + InPlaceIntersectMaps(&my_values->load_value_map, value->second.load_value_map); if (my_values->load_value_map.empty()) { break; } @@ -847,20 +847,20 @@ void LocalValueNumbering::MergeAliasingValues(const typename Map::value_type& en // Calculate the locations that have been either read from or written to in each incoming LVN. bool first_lvn = true; for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) { - auto it = (lvn->*map_ptr).find(key); - if (it == (lvn->*map_ptr).end()) { + auto value = (lvn->*map_ptr).find(key); + if (value == (lvn->*map_ptr).end()) { my_values->load_value_map.clear(); break; } if (first_lvn) { first_lvn = false; // Copy the first LVN's locations. Values will be overwritten later. - my_values->load_value_map = it->second.load_value_map; - for (uint16_t location : it->second.store_loc_set) { + my_values->load_value_map = value->second.load_value_map; + for (uint16_t location : value->second.store_loc_set) { my_values->load_value_map.Put(location, 0u); } } else { - IntersectAliasingValueLocations(my_values, &it->second); + IntersectAliasingValueLocations(my_values, &value->second); } } // Calculate merged values for the intersection. diff --git a/compiler/dex/quick/arm/utility_arm.cc b/compiler/dex/quick/arm/utility_arm.cc index 448e80f715..a1a5ad1d1f 100644 --- a/compiler/dex/quick/arm/utility_arm.cc +++ b/compiler/dex/quick/arm/utility_arm.cc @@ -464,7 +464,6 @@ LIR* ArmMir2Lir::OpRegRegReg(OpKind op, RegStorage r_dest, RegStorage r_src1, Re } LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, int value) { - LIR* res; bool neg = (value < 0); int32_t abs_value = (neg) ? -value : value; ArmOpcode opcode = kThumbBkpt; @@ -590,6 +589,7 @@ LIR* ArmMir2Lir::OpRegRegImm(OpKind op, RegStorage r_dest, RegStorage r_src1, in } else { RegStorage r_scratch = AllocTemp(); LoadConstant(r_scratch, value); + LIR* res; if (EncodingMap[alt_opcode].flags & IS_QUAD_OP) res = NewLIR4(alt_opcode, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), 0); else diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc index 85f502ced6..da7ac87325 100644 --- a/compiler/dex/quick/arm64/assemble_arm64.cc +++ b/compiler/dex/quick/arm64/assemble_arm64.cc @@ -928,14 +928,13 @@ void Arm64Mir2Lir::AssembleLIR() { // Check if branch offset can be encoded in tbz/tbnz. if (!IS_SIGNED_IMM14(delta >> 2)) { DexOffset dalvik_offset = lir->dalvik_offset; - int16_t opcode = lir->opcode; - LIR* target = lir->target; + LIR* targetLIR = lir->target; // "tbz/tbnz Rt, #imm, label" -> "tst Rt, #(1<<imm)". offset_adjustment -= lir->flags.size; - int32_t imm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]); - DCHECK_NE(imm, -1); + int32_t encodedImm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]); + DCHECK_NE(encodedImm, -1); lir->opcode = IS_WIDE(opcode) ? WIDE(kA64Tst2rl) : kA64Tst2rl; - lir->operands[1] = imm; + lir->operands[1] = encodedImm; lir->target = nullptr; lir->flags.fixup = EncodingMap[kA64Tst2rl].fixup; lir->flags.size = EncodingMap[kA64Tst2rl].size; @@ -944,7 +943,7 @@ void Arm64Mir2Lir::AssembleLIR() { opcode = UNWIDE(opcode); DCHECK(opcode == kA64Tbz3rht || opcode == kA64Tbnz3rht); LIR* new_lir = RawLIR(dalvik_offset, kA64B2ct, - opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, target); + opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, targetLIR); InsertLIRAfter(lir, new_lir); new_lir->offset = lir->offset + lir->flags.size; new_lir->flags.generation = generation; diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 9410f7e83b..c5aa27c324 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -66,8 +66,8 @@ void Mir2Lir::GenDivZeroCheck(RegStorage reg) { void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { class DivZeroCheckSlowPath : public Mir2Lir::LIRSlowPath { public: - DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch) - : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch) { + DivZeroCheckSlowPath(Mir2Lir* m2l, LIR* branch_in) + : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in) { } void Compile() OVERRIDE { @@ -84,9 +84,10 @@ void Mir2Lir::AddDivZeroCheckSlowPath(LIR* branch) { void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { public: - ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, RegStorage index, RegStorage length) - : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), - index_(index), length_(length) { + ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, RegStorage index_in, + RegStorage length_in) + : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in), + index_(index_in), length_(length_in) { } void Compile() OVERRIDE { @@ -108,9 +109,9 @@ void Mir2Lir::GenArrayBoundsCheck(RegStorage index, RegStorage length) { void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { public: - ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, int index, RegStorage length) - : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), - index_(index), length_(length) { + ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, int index_in, RegStorage length_in) + : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in), + index_(index_in), length_(length_in) { } void Compile() OVERRIDE { @@ -461,7 +462,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { // Set up the loop counter (known to be > 0) LoadConstant(r_idx, elems - 1); // Generate the copy loop. Going backwards for convenience - LIR* target = NewLIR0(kPseudoTargetLabel); + LIR* loop_head_target = NewLIR0(kPseudoTargetLabel); // Copy next element { ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); @@ -471,7 +472,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { } StoreBaseIndexed(r_dst, r_idx, r_val, 2, k32); FreeTemp(r_val); - OpDecAndBranch(kCondGe, r_idx, target); + OpDecAndBranch(kCondGe, r_idx, loop_head_target); if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { // Restore the target pointer OpRegRegImm(kOpAdd, ref_reg, r_dst, @@ -955,7 +956,6 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { RegLocation rl_method = LoadCurrMethod(); CheckRegLocation(rl_method); RegStorage res_reg = AllocTempRef(); - RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); if (!cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, type_idx)) { @@ -965,6 +965,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { RegLocation rl_result = GetReturn(kRefReg); StoreValue(rl_dest, rl_result); } else { + RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); // We're don't need access checks, load type from dex cache int32_t dex_cache_offset = mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(); @@ -981,10 +982,10 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { // Object to generate the slow path for class resolution. class SlowPath : public LIRSlowPath { public: - SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, - const RegLocation& rl_method, const RegLocation& rl_result) : - LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), - rl_method_(rl_method), rl_result_(rl_result) { + SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in, + const RegLocation& rl_method_in, const RegLocation& rl_result_in) : + LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in), + type_idx_(type_idx_in), rl_method_(rl_method_in), rl_result_(rl_result_in) { } void Compile() { @@ -1045,9 +1046,10 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { // Object to generate the slow path for string resolution. class SlowPath : public LIRSlowPath { public: - SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, RegStorage r_method, int32_t string_idx) : - LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), - r_method_(r_method), string_idx_(string_idx) { + SlowPath(Mir2Lir* m2l, LIR* fromfast_in, LIR* cont_in, RegStorage r_method_in, + int32_t string_idx_in) : + LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast_in, cont_in), + r_method_(r_method_in), string_idx_(string_idx_in) { } void Compile() { @@ -1225,10 +1227,10 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know class InitTypeSlowPath : public Mir2Lir::LIRSlowPath { public: - InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx, - RegLocation rl_src) - : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx), - rl_src_(rl_src) { + InitTypeSlowPath(Mir2Lir* m2l, LIR* branch, LIR* cont, uint32_t type_idx_in, + RegLocation rl_src_in) + : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, cont), type_idx_(type_idx_in), + rl_src_(rl_src_in) { } void Compile() OVERRIDE { @@ -1370,10 +1372,10 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Slow path to initialize the type. Executed if the type is NULL. class SlowPath : public LIRSlowPath { public: - SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont, const int type_idx, - const RegStorage class_reg) : - LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont), type_idx_(type_idx), - class_reg_(class_reg) { + SlowPath(Mir2Lir* m2l, LIR* fromfast, LIR* cont_in, const int type_idx_in, + const RegStorage class_reg_in) : + LIRSlowPath(m2l, m2l->GetCurrentDexPc(), fromfast, cont_in), + type_idx_(type_idx_in), class_reg_(class_reg_in) { } void Compile() { diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index c7449c8eae..4cb12f1dc9 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -44,8 +44,8 @@ typedef mirror::ObjectArray<mirror::Object> ObjArray; void Mir2Lir::AddIntrinsicSlowPath(CallInfo* info, LIR* branch, LIR* resume) { class IntrinsicSlowPathPath : public Mir2Lir::LIRSlowPath { public: - IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info, LIR* branch, LIR* resume = nullptr) - : LIRSlowPath(m2l, info->offset, branch, resume), info_(info) { + IntrinsicSlowPathPath(Mir2Lir* m2l, CallInfo* info_in, LIR* branch_in, LIR* resume_in) + : LIRSlowPath(m2l, info_in->offset, branch_in, resume_in), info_(info_in) { } void Compile() { @@ -790,13 +790,13 @@ int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info, if (rl_arg.reg.IsPair()) { reg = rl_arg.reg.GetHigh(); } else { - RegisterInfo* info = GetRegInfo(rl_arg.reg); - info = info->FindMatchingView(RegisterInfo::kHighSingleStorageMask); - if (info == nullptr) { + RegisterInfo* reg_info = GetRegInfo(rl_arg.reg); + reg_info = reg_info->FindMatchingView(RegisterInfo::kHighSingleStorageMask); + if (reg_info == nullptr) { // NOTE: For hard float convention we won't split arguments across reg/mem. UNIMPLEMENTED(FATAL) << "Needs hard float api."; } - reg = info->GetReg(); + reg = reg_info->GetReg(); } } else { // kArg2 & rArg3 can safely be used here diff --git a/compiler/dex/quick/mips/assemble_mips.cc b/compiler/dex/quick/mips/assemble_mips.cc index 310e1e980b..ca71c3010a 100644 --- a/compiler/dex/quick/mips/assemble_mips.cc +++ b/compiler/dex/quick/mips/assemble_mips.cc @@ -696,12 +696,12 @@ AssemblerStatus MipsMir2Lir::AssembleInstructions(CodeOffset start_addr) { // TUNING: replace with proper delay slot handling if (encoder->size == 8) { DCHECK(!IsPseudoLirOp(lir->opcode)); - const MipsEncodingMap *encoder = &EncodingMap[kMipsNop]; - uint32_t bits = encoder->skeleton; - code_buffer_.push_back(bits & 0xff); - code_buffer_.push_back((bits >> 8) & 0xff); - code_buffer_.push_back((bits >> 16) & 0xff); - code_buffer_.push_back((bits >> 24) & 0xff); + const MipsEncodingMap *encoder2 = &EncodingMap[kMipsNop]; + uint32_t bits2 = encoder2->skeleton; + code_buffer_.push_back(bits2 & 0xff); + code_buffer_.push_back((bits2 >> 8) & 0xff); + code_buffer_.push_back((bits2 >> 16) & 0xff); + code_buffer_.push_back((bits2 >> 24) & 0xff); } } return res; diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 7229318331..26465a5568 100755 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -1362,10 +1362,10 @@ void X86Mir2Lir::GenArrayBoundsCheck(RegStorage index, int len_offset) { class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { public: - ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, - RegStorage index, RegStorage array_base, int32_t len_offset) - : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), - index_(index), array_base_(array_base), len_offset_(len_offset) { + ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, + RegStorage index_in, RegStorage array_base_in, int32_t len_offset_in) + : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in), + index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) { } void Compile() OVERRIDE { @@ -1410,10 +1410,10 @@ void X86Mir2Lir::GenArrayBoundsCheck(int32_t index, int32_t len_offset) { class ArrayBoundsCheckSlowPath : public Mir2Lir::LIRSlowPath { public: - ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch, - int32_t index, RegStorage array_base, int32_t len_offset) - : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch), - index_(index), array_base_(array_base), len_offset_(len_offset) { + ArrayBoundsCheckSlowPath(Mir2Lir* m2l, LIR* branch_in, + int32_t index_in, RegStorage array_base_in, int32_t len_offset_in) + : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch_in), + index_(index_in), array_base_(array_base_in), len_offset_(len_offset_in) { } void Compile() OVERRIDE { diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 9616d8fa71..270a4e5007 100755 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -2703,7 +2703,7 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, bool src_is_8b_aligned = (current_src_offset & 0x7) == 0; bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0; - ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); + ScopedMemRefType mem_ref_type2(this, ResourceMask::kDalvikReg); if (src_is_16b_aligned) { ld1 = OpMovRegMem(temp, rs_rX86_SP, current_src_offset, kMovA128FP); } else if (src_is_8b_aligned) { diff --git a/compiler/dex/ssa_transformation.cc b/compiler/dex/ssa_transformation.cc index 412f85d5dd..d3d76badd0 100644 --- a/compiler/dex/ssa_transformation.cc +++ b/compiler/dex/ssa_transformation.cc @@ -539,8 +539,7 @@ bool MIRGraph::InsertPhiNodeOperands(BasicBlock* bb) { for (BasicBlockId pred_id : bb->predecessors) { BasicBlock* pred_bb = GetBasicBlock(pred_id); DCHECK(pred_bb != nullptr); - int ssa_reg = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg]; - uses[idx] = ssa_reg; + uses[idx] = pred_bb->data_flow_info->vreg_to_ssa_map_exit[v_reg]; incoming[idx] = pred_id; idx++; } diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 05785a89fa..aab94c000f 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -718,9 +718,9 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) for (const std::pair<uint16_t, const DexFile*>& exception_type : unresolved_exception_types) { uint16_t exception_type_idx = exception_type.first; const DexFile* dex_file = exception_type.second; - StackHandleScope<2> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(*dex_file))); - Handle<mirror::Class> klass(hs.NewHandle( + StackHandleScope<2> hs2(self); + Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->FindDexCache(*dex_file))); + Handle<mirror::Class> klass(hs2.NewHandle( class_linker->ResolveType(*dex_file, exception_type_idx, dex_cache, NullHandle<mirror::ClassLoader>()))); if (klass.Get() == nullptr) { @@ -757,13 +757,13 @@ static void MaybeAddToImageClasses(Handle<mirror::Class> c, std::set<std::string } VLOG(compiler) << "Adding " << descriptor << " to image classes"; for (size_t i = 0; i < klass->NumDirectInterfaces(); ++i) { - StackHandleScope<1> hs(self); - MaybeAddToImageClasses(hs.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)), + StackHandleScope<1> hs2(self); + MaybeAddToImageClasses(hs2.NewHandle(mirror::Class::GetDirectInterface(self, klass, i)), image_classes); } if (klass->IsArrayClass()) { - StackHandleScope<1> hs(self); - MaybeAddToImageClasses(hs.NewHandle(klass->GetComponentType()), image_classes); + StackHandleScope<1> hs2(self); + MaybeAddToImageClasses(hs2.NewHandle(klass->GetComponentType()), image_classes); } klass.Assign(klass->GetSuperClass()); } diff --git a/compiler/driver/compiler_driver_test.cc b/compiler/driver/compiler_driver_test.cc index 9ae9bd400a..5a0ec2fa57 100644 --- a/compiler/driver/compiler_driver_test.cc +++ b/compiler/driver/compiler_driver_test.cc @@ -86,11 +86,11 @@ class CompilerDriverTest : public CommonCompilerTest { hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader))); mirror::Class* c = class_linker->FindClass(soa.Self(), descriptor, loader); CHECK(c != NULL); - for (size_t i = 0; i < c->NumDirectMethods(); i++) { - MakeExecutable(c->GetDirectMethod(i)); + for (size_t j = 0; j < c->NumDirectMethods(); j++) { + MakeExecutable(c->GetDirectMethod(j)); } - for (size_t i = 0; i < c->NumVirtualMethods(); i++) { - MakeExecutable(c->GetVirtualMethod(i)); + for (size_t j = 0; j < c->NumVirtualMethods(); j++) { + MakeExecutable(c->GetVirtualMethod(j)); } } } diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index 113204635c..27554423ca 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -785,9 +785,9 @@ jint Java_MyClassNatives_nativeUpCall(JNIEnv* env, jobject thisObj, jint i) { EXPECT_EQ(11, trace_array->GetLength()); // Check stack trace entries have expected values - for (int32_t i = 0; i < trace_array->GetLength(); ++i) { - EXPECT_EQ(-2, trace_array->Get(i)->GetLineNumber()); - mirror::StackTraceElement* ste = trace_array->Get(i); + for (int32_t j = 0; j < trace_array->GetLength(); ++j) { + EXPECT_EQ(-2, trace_array->Get(j)->GetLineNumber()); + mirror::StackTraceElement* ste = trace_array->Get(j); EXPECT_STREQ("MyClassNatives.java", ste->GetFileName()->ToModifiedUtf8().c_str()); EXPECT_STREQ("MyClassNatives", ste->GetDeclaringClass()->ToModifiedUtf8().c_str()); EXPECT_STREQ("fooI", ste->GetMethodName()->ToModifiedUtf8().c_str()); diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 4cb7d9cdd6..97b7cc90dd 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -113,8 +113,8 @@ TEST_F(OatTest, WriteRead) { timer_.get(), "")); jobject class_loader = nullptr; if (kCompile) { - TimingLogger timings("OatTest::WriteRead", false, false); - compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings); + TimingLogger timings2("OatTest::WriteRead", false, false); + compiler_driver_->CompileAll(class_loader, class_linker->GetBootClassPath(), &timings2); } ScratchFile tmp; @@ -174,12 +174,12 @@ TEST_F(OatTest, WriteRead) { oat_class.GetType()) << descriptor; size_t method_index = 0; - for (size_t i = 0; i < klass->NumDirectMethods(); i++, method_index++) { - CheckMethod(klass->GetDirectMethod(i), + for (size_t j = 0; j < klass->NumDirectMethods(); j++, method_index++) { + CheckMethod(klass->GetDirectMethod(j), oat_class.GetOatMethod(method_index), dex_file); } - for (size_t i = 0; i < num_virtual_methods; i++, method_index++) { - CheckMethod(klass->GetVirtualMethod(i), + for (size_t j = 0; j < num_virtual_methods; j++, method_index++) { + CheckMethod(klass->GetVirtualMethod(j), oat_class.GetOatMethod(method_index), dex_file); } } diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 6138411b11..659c3328fc 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -1294,9 +1294,9 @@ size_t OatWriter::InitOatClasses(size_t offset) { // Update oat_dex_files_. auto oat_class_it = oat_classes_.begin(); for (OatDexFile* oat_dex_file : oat_dex_files_) { - for (uint32_t& offset : oat_dex_file->methods_offsets_) { + for (uint32_t& method_offset : oat_dex_file->methods_offsets_) { DCHECK(oat_class_it != oat_classes_.end()); - offset = (*oat_class_it)->offset_; + method_offset = (*oat_class_it)->offset_; ++oat_class_it; } oat_dex_file->UpdateChecksum(oat_header_); diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 0cec4b404a..5513c62d17 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -1561,7 +1561,6 @@ void LocationsBuilderARM::VisitCompare(HCompare* compare) { } void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) { - Label greater, done; LocationSummary* locations = compare->GetLocations(); switch (compare->InputAt(0)->GetType()) { case Primitive::kPrimLong: { diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 6ac7a31d3f..90d7c35975 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -760,16 +760,16 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) { // the comparison and its condition as the branch condition. Register lhs = InputRegisterAt(condition, 0); Operand rhs = InputOperandAt(condition, 1); - Condition cond = ARM64Condition(condition->GetCondition()); - if ((cond == eq || cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) { - if (cond == eq) { + Condition arm64_cond = ARM64Condition(condition->GetCondition()); + if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) { + if (arm64_cond == eq) { __ Cbz(lhs, true_target); } else { __ Cbnz(lhs, true_target); } } else { __ Cmp(lhs, rhs); - __ B(cond, true_target); + __ B(arm64_cond, true_target); } } diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index ac328c319c..ff852512c5 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -1507,7 +1507,6 @@ void LocationsBuilderX86::VisitCompare(HCompare* compare) { } void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) { - Label greater, done; LocationSummary* locations = compare->GetLocations(); switch (compare->InputAt(0)->GetType()) { case Primitive::kPrimLong: { diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index 803a09b733..68fcb25036 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -373,9 +373,9 @@ TEST(CodegenTest, NonMaterializedCondition) { PrepareForRegisterAllocation(graph).Run(); ASSERT_FALSE(equal->NeedsMaterialization()); - auto hook_before_codegen = [](HGraph* graph) { - HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0); - HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); + auto hook_before_codegen = [](HGraph* graph_in) { + HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0); + HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; @@ -463,9 +463,9 @@ TEST(CodegenTest, MaterializedCondition1) { HReturn ret(&cmp_lt); code_block->AddInstruction(&ret); - auto hook_before_codegen = [](HGraph* graph) { - HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0); - HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); + auto hook_before_codegen = [](HGraph* graph_in) { + HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0); + HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; @@ -533,9 +533,9 @@ TEST(CodegenTest, MaterializedCondition2) { HReturn ret_ge(&cst_ge); if_false_block->AddInstruction(&ret_ge); - auto hook_before_codegen = [](HGraph* graph) { - HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0); - HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena()); + auto hook_before_codegen = [](HGraph* graph_in) { + HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0); + HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc index 10a7e46299..fca9933872 100644 --- a/compiler/optimizing/constant_folding.cc +++ b/compiler/optimizing/constant_folding.cc @@ -28,9 +28,9 @@ void HConstantFolding::Run() { // Traverse this block's instructions in (forward) order and // replace the ones that can be statically evaluated by a // compile-time counterpart. - for (HInstructionIterator it(block->GetInstructions()); - !it.Done(); it.Advance()) { - HInstruction* inst = it.Current(); + for (HInstructionIterator inst_it(block->GetInstructions()); + !inst_it.Done(); inst_it.Advance()) { + HInstruction* inst = inst_it.Current(); if (inst->IsBinaryOperation()) { // Constant folding: replace `op(a, b)' with a constant at // compile time if `a' and `b' are both constants. diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc index 027b3d4ff3..25168b5b0c 100644 --- a/compiler/optimizing/gvn.cc +++ b/compiler/optimizing/gvn.cc @@ -54,8 +54,9 @@ void GlobalValueNumberer::ComputeSideEffects() { SideEffects effects = SideEffects::None(); // Update `effects` with the side effects of all instructions in this block. - for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { - HInstruction* instruction = it.Current(); + for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done(); + inst_it.Advance()) { + HInstruction* instruction = inst_it.Current(); effects = effects.Union(instruction->GetSideEffects()); if (effects.HasAllSideEffects()) { break; diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc index c71d93ebe5..1e93ece2ef 100644 --- a/compiler/optimizing/parallel_move_resolver.cc +++ b/compiler/optimizing/parallel_move_resolver.cc @@ -130,13 +130,13 @@ void ParallelMoveResolver::PerformMove(size_t index) { // this move's source or destination needs to have their source // changed to reflect the state of affairs after the swap. Location source = move->GetSource(); - Location destination = move->GetDestination(); + Location swap_destination = move->GetDestination(); move->Eliminate(); for (size_t i = 0; i < moves_.Size(); ++i) { const MoveOperands& other_move = *moves_.Get(i); if (other_move.Blocks(source)) { - moves_.Get(i)->SetSource(destination); - } else if (other_move.Blocks(destination)) { + moves_.Get(i)->SetSource(swap_destination); + } else if (other_move.Blocks(swap_destination)) { moves_.Get(i)->SetSource(source); } } diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index 2387141a39..35d56f3ccb 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -23,8 +23,9 @@ void PrepareForRegisterAllocation::Run() { for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); // No need to visit the phis. - for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { - it.Current()->Accept(this); + for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done(); + inst_it.Advance()) { + inst_it.Current()->Accept(this); } } } diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index c98b82a6ed..2a9c88506d 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -126,11 +126,12 @@ void RegisterAllocator::AllocateRegistersInternal() { // is the one with the lowest start position. for (HLinearPostOrderIterator it(liveness_); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); - for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { - ProcessInstruction(it.Current()); + for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done(); + back_it.Advance()) { + ProcessInstruction(back_it.Current()); } - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - ProcessInstruction(it.Current()); + for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) { + ProcessInstruction(inst_it.Current()); } } @@ -1201,8 +1202,8 @@ void RegisterAllocator::Resolve() { // Resolve phi inputs. Order does not matter. for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) { HBasicBlock* current = it.Current(); - for (HInstructionIterator it(current->GetPhis()); !it.Done(); it.Advance()) { - HInstruction* phi = it.Current(); + for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) { + HInstruction* phi = inst_it.Current(); for (size_t i = 0, e = current->GetPredecessors().Size(); i < e; ++i) { HBasicBlock* predecessor = current->GetPredecessors().Get(i); DCHECK_EQ(predecessor->GetSuccessors().Size(), 1u); diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index a0cc8a94ee..e83c528fab 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -109,8 +109,8 @@ void SsaBuilder::VisitBasicBlock(HBasicBlock* block) { HPhi* phi = new (GetGraph()->GetArena()) HPhi( GetGraph()->GetArena(), local, block->GetPredecessors().Size(), Primitive::kPrimVoid); for (size_t i = 0; i < block->GetPredecessors().Size(); i++) { - HInstruction* value = ValueOfLocal(block->GetPredecessors().Get(i), local); - phi->SetRawInputAt(i, value); + HInstruction* pred_value = ValueOfLocal(block->GetPredecessors().Get(i), local); + phi->SetRawInputAt(i, pred_value); } block->AddPhi(phi); value = phi; diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc index 97bc7f3dfd..0085b27c58 100644 --- a/compiler/optimizing/ssa_liveness_analysis.cc +++ b/compiler/optimizing/ssa_liveness_analysis.cc @@ -107,8 +107,8 @@ void SsaLivenessAnalysis::NumberInstructions() { HBasicBlock* block = it.Current(); block->SetLifetimeStart(lifetime_position); - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - HInstruction* current = it.Current(); + for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) { + HInstruction* current = inst_it.Current(); current->Accept(location_builder); LocationSummary* locations = current->GetLocations(); if (locations != nullptr && locations->Out().IsValid()) { @@ -124,8 +124,9 @@ void SsaLivenessAnalysis::NumberInstructions() { // Add a null marker to notify we are starting a block. instructions_from_lifetime_position_.Add(nullptr); - for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { - HInstruction* current = it.Current(); + for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done(); + inst_it.Advance()) { + HInstruction* current = inst_it.Current(); current->Accept(codegen_->GetLocationBuilder()); LocationSummary* locations = current->GetLocations(); if (locations != nullptr && locations->Out().IsValid()) { @@ -178,8 +179,8 @@ void SsaLivenessAnalysis::ComputeLiveRanges() { HBasicBlock* successor = block->GetSuccessors().Get(i); live_in->Union(GetLiveInSet(*successor)); size_t phi_input_index = successor->GetPredecessorIndexOf(block); - for (HInstructionIterator it(successor->GetPhis()); !it.Done(); it.Advance()) { - HInstruction* phi = it.Current(); + for (HInstructionIterator inst_it(successor->GetPhis()); !inst_it.Done(); inst_it.Advance()) { + HInstruction* phi = inst_it.Current(); HInstruction* input = phi->InputAt(phi_input_index); input->GetLiveInterval()->AddPhiUse(phi, phi_input_index, block); // A phi input whose last user is the phi dies at the end of the predecessor block, @@ -195,8 +196,9 @@ void SsaLivenessAnalysis::ComputeLiveRanges() { current->GetLiveInterval()->AddRange(block->GetLifetimeStart(), block->GetLifetimeEnd()); } - for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) { - HInstruction* current = it.Current(); + for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done(); + back_it.Advance()) { + HInstruction* current = back_it.Current(); if (current->HasSsaIndex()) { // Kill the instruction and shorten its interval. kill->SetBit(current->GetSsaIndex()); @@ -230,8 +232,8 @@ void SsaLivenessAnalysis::ComputeLiveRanges() { } // Kill phis defined in this block. - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - HInstruction* current = it.Current(); + for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) { + HInstruction* current = inst_it.Current(); if (current->HasSsaIndex()) { kill->SetBit(current->GetSsaIndex()); live_in->ClearBit(current->GetSsaIndex()); diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index 4eda0f3757..56979e1c6a 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -22,10 +22,10 @@ void SsaDeadPhiElimination::Run() { // Add to the worklist phis referenced by non-phi instructions. for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - HPhi* phi = it.Current()->AsPhi(); - for (HUseIterator<HInstruction> it(phi->GetUses()); !it.Done(); it.Advance()) { - HUseListNode<HInstruction>* current = it.Current(); + for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) { + HPhi* phi = inst_it.Current()->AsPhi(); + for (HUseIterator<HInstruction> use_it(phi->GetUses()); !use_it.Done(); use_it.Advance()) { + HUseListNode<HInstruction>* current = use_it.Current(); HInstruction* user = current->GetUser(); if (!user->IsPhi()) { worklist_.Add(phi); @@ -61,8 +61,9 @@ void SsaDeadPhiElimination::Run() { next = current->GetNext(); if (current->AsPhi()->IsDead()) { if (current->HasUses()) { - for (HUseIterator<HInstruction> it(current->GetUses()); !it.Done(); it.Advance()) { - HUseListNode<HInstruction>* user_node = it.Current(); + for (HUseIterator<HInstruction> use_it(current->GetUses()); !use_it.Done(); + use_it.Advance()) { + HUseListNode<HInstruction>* user_node = use_it.Current(); HInstruction* user = user_node->GetUser(); DCHECK(user->IsLoopHeaderPhi()); DCHECK(user->AsPhi()->IsDead()); @@ -72,8 +73,9 @@ void SsaDeadPhiElimination::Run() { } } if (current->HasEnvironmentUses()) { - for (HUseIterator<HEnvironment> it(current->GetEnvUses()); !it.Done(); it.Advance()) { - HUseListNode<HEnvironment>* user_node = it.Current(); + for (HUseIterator<HEnvironment> use_it(current->GetEnvUses()); !use_it.Done(); + use_it.Advance()) { + HUseListNode<HEnvironment>* user_node = use_it.Current(); HEnvironment* user = user_node->GetUser(); user->SetRawEnvAt(user_node->GetIndex(), nullptr); current->RemoveEnvironmentUser(user, user_node->GetIndex()); @@ -90,8 +92,8 @@ void SsaRedundantPhiElimination::Run() { // Add all phis in the worklist. for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); - for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) { - worklist_.Add(it.Current()->AsPhi()); + for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) { + worklist_.Add(inst_it.Current()->AsPhi()); } } diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 5f74c33643..9cfa71c13f 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -167,33 +167,33 @@ class StackMapStream : public ValueObject { } // Set the register map. - MemoryRegion region = dex_register_maps_region.Subregion( + MemoryRegion register_region = dex_register_maps_region.Subregion( next_dex_register_map_offset, DexRegisterMap::kFixedSize + entry.num_dex_registers * DexRegisterMap::SingleEntrySize()); - next_dex_register_map_offset += region.size(); - DexRegisterMap dex_register_map(region); - stack_map.SetDexRegisterMapOffset(region.start() - memory_start); + next_dex_register_map_offset += register_region.size(); + DexRegisterMap dex_register_map(register_region); + stack_map.SetDexRegisterMapOffset(register_region.start() - memory_start); - for (size_t i = 0; i < entry.num_dex_registers; ++i) { + for (size_t j = 0; j < entry.num_dex_registers; ++j) { DexRegisterEntry register_entry = - dex_register_maps_.Get(i + entry.dex_register_maps_start_index); - dex_register_map.SetRegisterInfo(i, register_entry.kind, register_entry.value); + dex_register_maps_.Get(j + entry.dex_register_maps_start_index); + dex_register_map.SetRegisterInfo(j, register_entry.kind, register_entry.value); } // Set the inlining info. if (entry.inlining_depth != 0) { - MemoryRegion region = inline_infos_region.Subregion( + MemoryRegion inline_region = inline_infos_region.Subregion( next_inline_info_offset, InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize()); - next_inline_info_offset += region.size(); - InlineInfo inline_info(region); + next_inline_info_offset += inline_region.size(); + InlineInfo inline_info(inline_region); - stack_map.SetInlineDescriptorOffset(region.start() - memory_start); + stack_map.SetInlineDescriptorOffset(inline_region.start() - memory_start); inline_info.SetDepth(entry.inlining_depth); - for (size_t i = 0; i < entry.inlining_depth; ++i) { - InlineInfoEntry inline_entry = inline_infos_.Get(i + entry.inline_infos_start_index); - inline_info.SetMethodReferenceIndexAtDepth(i, inline_entry.method_index); + for (size_t j = 0; j < entry.inlining_depth; ++j) { + InlineInfoEntry inline_entry = inline_infos_.Get(j + entry.inline_infos_start_index); + inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index); } } else { stack_map.SetInlineDescriptorOffset(InlineInfo::kNoInlineInfo); diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc index e9788f91ba..591d461244 100644 --- a/compiler/utils/arm/assembler_arm.cc +++ b/compiler/utils/arm/assembler_arm.cc @@ -301,11 +301,11 @@ uint32_t Address::vencoding() const { CHECK(IsAbsoluteUint(10, offset)); // In the range -1020 to +1020. CHECK_ALIGNED(offset, 2); // Multiple of 4. CHECK((am_ == Offset) || (am_ == NegOffset)); - uint32_t vencoding = (encoding & (0xf << kRnShift)) | (offset >> 2); + uint32_t vencoding_value = (encoding & (0xf << kRnShift)) | (offset >> 2); if (am_ == Offset) { - vencoding |= 1 << 23; + vencoding_value |= 1 << 23; } - return vencoding; + return vencoding_value; } diff --git a/compiler/utils/arm/assembler_thumb2.cc b/compiler/utils/arm/assembler_thumb2.cc index fd2613a89e..71d6e7ead8 100644 --- a/compiler/utils/arm/assembler_thumb2.cc +++ b/compiler/utils/arm/assembler_thumb2.cc @@ -2121,8 +2121,8 @@ void Thumb2Assembler::Bind(Label* label) { branch->ResetSize(Branch::k16Bit); // Now add a compare instruction in the place the branch was. - int16_t cmp = B13 | B11 | static_cast<int16_t>(branch->GetRegister()) << 8; - buffer_.Store<int16_t>(branch_location, cmp); + buffer_.Store<int16_t>(branch_location, + B13 | B11 | static_cast<int16_t>(branch->GetRegister()) << 8); // Since have moved made a hole in the code we need to reload the // current pc. diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index 1b1d121725..a69be2599e 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -27,8 +27,13 @@ #include "utils/assembler.h" #include "offsets.h" #include "utils.h" + +// TODO: make vixl clean wrt -Wshadow. +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" #include "a64/macro-assembler-a64.h" #include "a64/disasm-a64.h" +#pragma GCC diagnostic pop namespace art { namespace arm64 { diff --git a/compiler/utils/array_ref.h b/compiler/utils/array_ref.h index c137e46804..1a7f2e8c02 100644 --- a/compiler/utils/array_ref.h +++ b/compiler/utils/array_ref.h @@ -73,8 +73,8 @@ class ArrayRef { : array_(array), size_(size) { } - constexpr ArrayRef(T* array, size_t size) - : array_(array), size_(size) { + constexpr ArrayRef(T* array_in, size_t size_in) + : array_(array_in), size_(size_in) { } template <typename Alloc> diff --git a/compiler/utils/assembler.h b/compiler/utils/assembler.h index e1b6d7c21d..ad7e98d906 100644 --- a/compiler/utils/assembler.h +++ b/compiler/utils/assembler.h @@ -56,9 +56,9 @@ namespace x86_64 { class ExternalLabel { public: - ExternalLabel(const char* name, uintptr_t address) - : name_(name), address_(address) { - DCHECK(name != nullptr); + ExternalLabel(const char* name_in, uintptr_t address_in) + : name_(name_in), address_(address_in) { + DCHECK(name_in != nullptr); } const char* name() const { return name_; } @@ -140,10 +140,10 @@ class AssemblerFixup { int position_; AssemblerFixup* previous() const { return previous_; } - void set_previous(AssemblerFixup* previous) { previous_ = previous; } + void set_previous(AssemblerFixup* previous_in) { previous_ = previous_in; } int position() const { return position_; } - void set_position(int position) { position_ = position; } + void set_position(int position_in) { position_ = position_in; } friend class AssemblerBuffer; }; diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index b5bf31bbd6..de4e6de878 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -31,7 +31,7 @@ namespace x86 { class Immediate : public ValueObject { public: - explicit Immediate(int32_t value) : value_(value) {} + explicit Immediate(int32_t value_in) : value_(value_in) {} int32_t value() const { return value_; } @@ -90,16 +90,16 @@ class Operand : public ValueObject { // Operand can be sub classed (e.g: Address). Operand() : length_(0) { } - void SetModRM(int mod, Register rm) { - CHECK_EQ(mod & ~3, 0); - encoding_[0] = (mod << 6) | rm; + void SetModRM(int mod_in, Register rm_in) { + CHECK_EQ(mod_in & ~3, 0); + encoding_[0] = (mod_in << 6) | rm_in; length_ = 1; } - void SetSIB(ScaleFactor scale, Register index, Register base) { + void SetSIB(ScaleFactor scale_in, Register index_in, Register base_in) { CHECK_EQ(length_, 1); - CHECK_EQ(scale & ~3, 0); - encoding_[1] = (scale << 6) | (index << 3) | base; + CHECK_EQ(scale_in & ~3, 0); + encoding_[1] = (scale_in << 6) | (index_in << 3) | base_in; length_ = 2; } @@ -122,10 +122,10 @@ class Operand : public ValueObject { explicit Operand(Register reg) { SetModRM(3, reg); } // Get the operand encoding byte at the given index. - uint8_t encoding_at(int index) const { - CHECK_GE(index, 0); - CHECK_LT(index, length_); - return encoding_[index]; + uint8_t encoding_at(int index_in) const { + CHECK_GE(index_in, 0); + CHECK_LT(index_in, length_); + return encoding_[index_in]; } friend class X86Assembler; @@ -134,57 +134,57 @@ class Operand : public ValueObject { class Address : public Operand { public: - Address(Register base, int32_t disp) { - Init(base, disp); + Address(Register base_in, int32_t disp) { + Init(base_in, disp); } - Address(Register base, Offset disp) { - Init(base, disp.Int32Value()); + Address(Register base_in, Offset disp) { + Init(base_in, disp.Int32Value()); } - Address(Register base, FrameOffset disp) { - CHECK_EQ(base, ESP); + Address(Register base_in, FrameOffset disp) { + CHECK_EQ(base_in, ESP); Init(ESP, disp.Int32Value()); } - Address(Register base, MemberOffset disp) { - Init(base, disp.Int32Value()); + Address(Register base_in, MemberOffset disp) { + Init(base_in, disp.Int32Value()); } - void Init(Register base, int32_t disp) { - if (disp == 0 && base != EBP) { - SetModRM(0, base); - if (base == ESP) SetSIB(TIMES_1, ESP, base); + void Init(Register base_in, int32_t disp) { + if (disp == 0 && base_in != EBP) { + SetModRM(0, base_in); + if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in); } else if (disp >= -128 && disp <= 127) { - SetModRM(1, base); - if (base == ESP) SetSIB(TIMES_1, ESP, base); + SetModRM(1, base_in); + if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in); SetDisp8(disp); } else { - SetModRM(2, base); - if (base == ESP) SetSIB(TIMES_1, ESP, base); + SetModRM(2, base_in); + if (base_in == ESP) SetSIB(TIMES_1, ESP, base_in); SetDisp32(disp); } } - Address(Register index, ScaleFactor scale, int32_t disp) { - CHECK_NE(index, ESP); // Illegal addressing mode. + Address(Register index_in, ScaleFactor scale_in, int32_t disp) { + CHECK_NE(index_in, ESP); // Illegal addressing mode. SetModRM(0, ESP); - SetSIB(scale, index, EBP); + SetSIB(scale_in, index_in, EBP); SetDisp32(disp); } - Address(Register base, Register index, ScaleFactor scale, int32_t disp) { - CHECK_NE(index, ESP); // Illegal addressing mode. - if (disp == 0 && base != EBP) { + Address(Register base_in, Register index_in, ScaleFactor scale_in, int32_t disp) { + CHECK_NE(index_in, ESP); // Illegal addressing mode. + if (disp == 0 && base_in != EBP) { SetModRM(0, ESP); - SetSIB(scale, index, base); + SetSIB(scale_in, index_in, base_in); } else if (disp >= -128 && disp <= 127) { SetModRM(1, ESP); - SetSIB(scale, index, base); + SetSIB(scale_in, index_in, base_in); SetDisp8(disp); } else { SetModRM(2, ESP); - SetSIB(scale, index, base); + SetSIB(scale_in, index_in, base_in); SetDisp32(disp); } } diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 2de3ce53f6..5b16f0891c 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -38,7 +38,7 @@ namespace x86_64 { // conversion rules in expressions regarding negation, especially size_t on 32b. class Immediate : public ValueObject { public: - explicit Immediate(int64_t value) : value_(value) {} + explicit Immediate(int64_t value_in) : value_(value_in) {} int64_t value() const { return value_; } @@ -105,26 +105,26 @@ class Operand : public ValueObject { // Operand can be sub classed (e.g: Address). Operand() : rex_(0), length_(0) { } - void SetModRM(uint8_t mod, CpuRegister rm) { - CHECK_EQ(mod & ~3, 0); - if (rm.NeedsRex()) { + void SetModRM(uint8_t mod_in, CpuRegister rm_in) { + CHECK_EQ(mod_in & ~3, 0); + if (rm_in.NeedsRex()) { rex_ |= 0x41; // REX.000B } - encoding_[0] = (mod << 6) | rm.LowBits(); + encoding_[0] = (mod_in << 6) | rm_in.LowBits(); length_ = 1; } - void SetSIB(ScaleFactor scale, CpuRegister index, CpuRegister base) { + void SetSIB(ScaleFactor scale_in, CpuRegister index_in, CpuRegister base_in) { CHECK_EQ(length_, 1); - CHECK_EQ(scale & ~3, 0); - if (base.NeedsRex()) { + CHECK_EQ(scale_in & ~3, 0); + if (base_in.NeedsRex()) { rex_ |= 0x41; // REX.000B } - if (index.NeedsRex()) { + if (index_in.NeedsRex()) { rex_ |= 0x42; // REX.00X0 } - encoding_[1] = (scale << 6) | (static_cast<uint8_t>(index.LowBits()) << 3) | - static_cast<uint8_t>(base.LowBits()); + encoding_[1] = (scale_in << 6) | (static_cast<uint8_t>(index_in.LowBits()) << 3) | + static_cast<uint8_t>(base_in.LowBits()); length_ = 2; } @@ -148,10 +148,10 @@ class Operand : public ValueObject { explicit Operand(CpuRegister reg) : rex_(0), length_(0) { SetModRM(3, reg); } // Get the operand encoding byte at the given index. - uint8_t encoding_at(int index) const { - CHECK_GE(index, 0); - CHECK_LT(index, length_); - return encoding_[index]; + uint8_t encoding_at(int index_in) const { + CHECK_GE(index_in, 0); + CHECK_LT(index_in, length_); + return encoding_[index_in]; } friend class X86_64Assembler; @@ -160,64 +160,64 @@ class Operand : public ValueObject { class Address : public Operand { public: - Address(CpuRegister base, int32_t disp) { - Init(base, disp); + Address(CpuRegister base_in, int32_t disp) { + Init(base_in, disp); } - Address(CpuRegister base, Offset disp) { - Init(base, disp.Int32Value()); + Address(CpuRegister base_in, Offset disp) { + Init(base_in, disp.Int32Value()); } - Address(CpuRegister base, FrameOffset disp) { - CHECK_EQ(base.AsRegister(), RSP); + Address(CpuRegister base_in, FrameOffset disp) { + CHECK_EQ(base_in.AsRegister(), RSP); Init(CpuRegister(RSP), disp.Int32Value()); } - Address(CpuRegister base, MemberOffset disp) { - Init(base, disp.Int32Value()); + Address(CpuRegister base_in, MemberOffset disp) { + Init(base_in, disp.Int32Value()); } - void Init(CpuRegister base, int32_t disp) { - if (disp == 0 && base.AsRegister() != RBP) { - SetModRM(0, base); - if (base.AsRegister() == RSP) { - SetSIB(TIMES_1, CpuRegister(RSP), base); + void Init(CpuRegister base_in, int32_t disp) { + if (disp == 0 && base_in.AsRegister() != RBP) { + SetModRM(0, base_in); + if (base_in.AsRegister() == RSP) { + SetSIB(TIMES_1, CpuRegister(RSP), base_in); } } else if (disp >= -128 && disp <= 127) { - SetModRM(1, base); - if (base.AsRegister() == RSP) { - SetSIB(TIMES_1, CpuRegister(RSP), base); + SetModRM(1, base_in); + if (base_in.AsRegister() == RSP) { + SetSIB(TIMES_1, CpuRegister(RSP), base_in); } SetDisp8(disp); } else { - SetModRM(2, base); - if (base.AsRegister() == RSP) { - SetSIB(TIMES_1, CpuRegister(RSP), base); + SetModRM(2, base_in); + if (base_in.AsRegister() == RSP) { + SetSIB(TIMES_1, CpuRegister(RSP), base_in); } SetDisp32(disp); } } - Address(CpuRegister index, ScaleFactor scale, int32_t disp) { - CHECK_NE(index.AsRegister(), RSP); // Illegal addressing mode. + Address(CpuRegister index_in, ScaleFactor scale_in, int32_t disp) { + CHECK_NE(index_in.AsRegister(), RSP); // Illegal addressing mode. SetModRM(0, CpuRegister(RSP)); - SetSIB(scale, index, CpuRegister(RBP)); + SetSIB(scale_in, index_in, CpuRegister(RBP)); SetDisp32(disp); } - Address(CpuRegister base, CpuRegister index, ScaleFactor scale, int32_t disp) { - CHECK_NE(index.AsRegister(), RSP); // Illegal addressing mode. - if (disp == 0 && base.AsRegister() != RBP) { + Address(CpuRegister base_in, CpuRegister index_in, ScaleFactor scale_in, int32_t disp) { + CHECK_NE(index_in.AsRegister(), RSP); // Illegal addressing mode. + if (disp == 0 && base_in.AsRegister() != RBP) { SetModRM(0, CpuRegister(RSP)); - SetSIB(scale, index, base); + SetSIB(scale_in, index_in, base_in); } else if (disp >= -128 && disp <= 127) { SetModRM(1, CpuRegister(RSP)); - SetSIB(scale, index, base); + SetSIB(scale_in, index_in, base_in); SetDisp8(disp); } else { SetModRM(2, CpuRegister(RSP)); - SetSIB(scale, index, base); + SetSIB(scale_in, index_in, base_in); SetDisp32(disp); } } diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 30dfdf048d..7770588ec2 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -1065,8 +1065,8 @@ static int dex2oat(int argc, char** argv) { } else if (option == "--print-all-passes") { PassDriverMEOpts::SetPrintAllPasses(); } else if (option.starts_with("--dump-cfg-passes=")) { - std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data(); - PassDriverMEOpts::SetDumpPassList(dump_passes); + std::string dump_passes_string = option.substr(strlen("--dump-cfg-passes=")).data(); + PassDriverMEOpts::SetDumpPassList(dump_passes_string); } else if (option == "--print-pass-options") { print_pass_options = true; } else if (option.starts_with("--pass-options=")) { @@ -1337,7 +1337,6 @@ static int dex2oat(int argc, char** argv) { // If --image-classes was specified, calculate the full list of classes to include in the image std::unique_ptr<std::set<std::string>> image_classes(nullptr); if (image_classes_filename != nullptr) { - std::string error_msg; if (image_classes_zip_filename != nullptr) { image_classes.reset(dex2oat->ReadImageClassesFromZip(image_classes_zip_filename, image_classes_filename, @@ -1360,7 +1359,6 @@ static int dex2oat(int argc, char** argv) { } else { if (dex_filenames.empty()) { ATRACE_BEGIN("Opening zip archive from file descriptor"); - std::string error_msg; std::unique_ptr<ZipArchive> zip_archive(ZipArchive::OpenFromFd(zip_fd, zip_location.c_str(), &error_msg)); if (zip_archive.get() == nullptr) { diff --git a/disassembler/disassembler_arm.cc b/disassembler/disassembler_arm.cc index ee652b34f7..9243b1a86a 100644 --- a/disassembler/disassembler_arm.cc +++ b/disassembler/disassembler_arm.cc @@ -125,8 +125,10 @@ static const char* kThumbReverseOperations[] = { }; struct ArmRegister { - explicit ArmRegister(uint32_t r) : r(r) { CHECK_LE(r, 15U); } - ArmRegister(uint32_t instruction, uint32_t at_bit) : r((instruction >> at_bit) & 0xf) { CHECK_LE(r, 15U); } + explicit ArmRegister(uint32_t r_in) : r(r_in) { CHECK_LE(r_in, 15U); } + ArmRegister(uint32_t instruction, uint32_t at_bit) : r((instruction >> at_bit) & 0xf) { + CHECK_LE(r, 15U); + } uint32_t r; }; std::ostream& operator<<(std::ostream& os, const ArmRegister& r) { @@ -390,7 +392,7 @@ uint32_t VFPExpand32(uint32_t imm8) { return (bit_a << 31) | ((1 << 30) - (bit_b << 25)) | (slice << 19); } -uint64_t VFPExpand64(uint32_t imm8) { +static uint64_t VFPExpand64(uint32_t imm8) { CHECK_EQ(imm8 & 0xffu, imm8); uint64_t bit_a = (imm8 >> 7) & 1; uint64_t bit_b = (imm8 >> 6) & 1; @@ -398,45 +400,6 @@ uint64_t VFPExpand64(uint32_t imm8) { return (bit_a << 31) | ((UINT64_C(1) << 62) - (bit_b << 54)) | (slice << 48); } -uint64_t AdvSIMDExpand(uint32_t op, uint32_t cmode, uint32_t imm8) { - CHECK_EQ(op & 1, op); - CHECK_EQ(cmode & 0xf, cmode); - CHECK_EQ(imm8 & 0xff, imm8); - int32_t cmode321 = cmode >> 1; - if (imm8 == 0 && cmode321 != 0 && cmode321 != 4 && cmode321 != 7) { - return INT64_C(0x00000000deadbeef); // UNPREDICTABLE - } - uint64_t imm = imm8; - switch (cmode321) { - case 3: imm <<= 8; FALLTHROUGH_INTENDED; - case 2: imm <<= 8; FALLTHROUGH_INTENDED; - case 1: imm <<= 8; FALLTHROUGH_INTENDED; - case 0: return static_cast<int64_t>((imm << 32) | imm); - case 5: imm <<= 8; FALLTHROUGH_INTENDED; - case 4: return static_cast<int64_t>((imm << 48) | (imm << 32) | (imm << 16) | imm); - case 6: - imm = ((imm + 1u) << ((cmode & 1) != 0 ? 16 : 8)) - 1u; // Add 8 or 16 ones. - return static_cast<int64_t>((imm << 32) | imm); - default: - CHECK_EQ(cmode321, 7); - if ((cmode & 1) == 0 && op == 0) { - imm = (imm << 8) | imm; - return static_cast<int64_t>((imm << 48) | (imm << 32) | (imm << 16) | imm); - } else if ((cmode & 1) == 0 && op != 0) { - for (int i = 1; i != 8; ++i) { - imm |= ((imm >> i) & UINT64_C(1)) << (i * 8); - } - imm = imm & ~UINT64_C(0xfe); - return static_cast<int64_t>((imm << 8) - imm); - } else if ((cmode & 1) != 0 && op == 0) { - imm = static_cast<uint32_t>(VFPExpand32(imm8)); - return static_cast<int64_t>((imm << 32) | imm); - } else { - return INT64_C(0xdeadbeef00000000); // UNDEFINED - } - } -} - size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) { uint32_t instr = (ReadU16(instr_ptr) << 16) | ReadU16(instr_ptr + 2); // |111|1 1|1000000|0000|1111110000000000| @@ -1359,8 +1322,6 @@ size_t DisassemblerArm::DumpThumb32(std::ostream& os, const uint8_t* instr_ptr) } } else { // STR Rt, [Rn, Rm, LSL #imm2] - 111 11 000 010 0 nnnn tttt 000000iimmmm - ArmRegister Rn(instr, 16); - ArmRegister Rt(instr, 12); ArmRegister Rm(instr, 0); uint32_t imm2 = (instr >> 4) & 3; opcode << "str.w"; diff --git a/disassembler/disassembler_arm64.h b/disassembler/disassembler_arm64.h index ad20c70222..e56fe4f08a 100644 --- a/disassembler/disassembler_arm64.h +++ b/disassembler/disassembler_arm64.h @@ -19,8 +19,11 @@ #include "disassembler.h" +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" #include "a64/decoder-a64.h" #include "a64/disasm-a64.h" +#pragma GCC diagnostic pop namespace art { namespace arm64 { diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index b78daf0d2f..dca048fb85 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -1296,26 +1296,26 @@ class ImageDumper { std::ostream indent2_os(&indent2_filter); mirror::ObjectArray<mirror::Object>* image_root_object_array = image_root_object->AsObjectArray<mirror::Object>(); - for (int i = 0; i < image_root_object_array->GetLength(); i++) { - mirror::Object* value = image_root_object_array->Get(i); + for (int j = 0; j < image_root_object_array->GetLength(); j++) { + mirror::Object* value = image_root_object_array->Get(j); size_t run = 0; - for (int32_t j = i + 1; j < image_root_object_array->GetLength(); j++) { - if (value == image_root_object_array->Get(j)) { + for (int32_t k = j + 1; k < image_root_object_array->GetLength(); k++) { + if (value == image_root_object_array->Get(k)) { run++; } else { break; } } if (run == 0) { - indent2_os << StringPrintf("%d: ", i); + indent2_os << StringPrintf("%d: ", j); } else { - indent2_os << StringPrintf("%d to %zd: ", i, i + run); - i = i + run; + indent2_os << StringPrintf("%d to %zd: ", j, j + run); + j = j + run; } if (value != nullptr) { PrettyObjectValue(indent2_os, value->GetClass(), value); } else { - indent2_os << i << ": null\n"; + indent2_os << j << ": null\n"; } } } @@ -1747,20 +1747,20 @@ class ImageDumper { dex_instruction_bytes(0) {} struct SizeAndCount { - SizeAndCount(size_t bytes, size_t count) : bytes(bytes), count(count) {} + SizeAndCount(size_t bytes_in, size_t count_in) : bytes(bytes_in), count(count_in) {} size_t bytes; size_t count; }; typedef SafeMap<std::string, SizeAndCount> SizeAndCountTable; SizeAndCountTable sizes_and_counts; - void Update(const char* descriptor, size_t object_bytes) { + void Update(const char* descriptor, size_t object_bytes_in) { SizeAndCountTable::iterator it = sizes_and_counts.find(descriptor); if (it != sizes_and_counts.end()) { - it->second.bytes += object_bytes; + it->second.bytes += object_bytes_in; it->second.count += 1; } else { - sizes_and_counts.Put(descriptor, SizeAndCount(object_bytes, 1)); + sizes_and_counts.Put(descriptor, SizeAndCount(object_bytes_in, 1)); } } diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index 24e9b1d3e4..d7d13c2223 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -21,7 +21,6 @@ #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/math_entrypoints.h" -#include "entrypoints/runtime_asm_entrypoints.h" #include "interpreter/interpreter.h" namespace art { @@ -124,9 +123,12 @@ extern "C" void art_quick_throw_no_such_method(int32_t method_idx); extern "C" void art_quick_throw_null_pointer_exception(); extern "C" void art_quick_throw_stack_overflow(void*); -// Generic JNI downcall +// Generic JNI downcall. extern "C" void art_quick_generic_jni_trampoline(mirror::ArtMethod*); +// JNI resolution. +extern "C" void* art_jni_dlsym_lookup_stub(JNIEnv*, jobject); + void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, PortableEntryPoints* ppoints, QuickEntryPoints* qpoints) { // Interpreter diff --git a/runtime/arch/memcmp16.cc b/runtime/arch/memcmp16.cc index 5a3e73eebc..813df2f1d7 100644 --- a/runtime/arch/memcmp16.cc +++ b/runtime/arch/memcmp16.cc @@ -19,6 +19,7 @@ // This linked against by assembly stubs, only. #pragma GCC diagnostic ignored "-Wunused-function" +int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count); int32_t memcmp16_generic_static(const uint16_t* s0, const uint16_t* s1, size_t count) { for (size_t i = 0; i < count; i++) { if (s0[i] != s1[i]) { diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index b0928f8cfa..0fcd297497 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -732,17 +732,17 @@ static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS { EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state); } } else { - bool lock; // Whether to lock or unlock in this step. + bool take_lock; // Whether to lock or unlock in this step. if (counts[index] == 0) { - lock = true; + take_lock = true; } else if (counts[index] == kThinLockLoops) { - lock = false; + take_lock = false; } else { // Randomly. - lock = r.next() % 2 == 0; + take_lock = r.next() % 2 == 0; } - if (lock) { + if (take_lock) { test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object, self); counts[index]++; @@ -1779,8 +1779,8 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetSFields())); int32_t num_fields = fields->GetLength(); for (int32_t i = 0; i < num_fields; ++i) { - StackHandleScope<1> hs(self); - Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i))); + StackHandleScope<1> hs2(self); + Handle<mirror::ArtField> f(hs2.NewHandle(fields->Get(i))); Primitive::Type type = f->GetTypeAsPrimitiveType(); switch (type) { @@ -1834,8 +1834,8 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetIFields())); int32_t num_fields = fields->GetLength(); for (int32_t i = 0; i < num_fields; ++i) { - StackHandleScope<1> hs(self); - Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i))); + StackHandleScope<1> hs2(self); + Handle<mirror::ArtField> f(hs2.NewHandle(fields->Get(i))); Primitive::Type type = f->GetTypeAsPrimitiveType(); switch (type) { diff --git a/runtime/base/macros.h b/runtime/base/macros.h index 90cf951ca2..88b99a183a 100644 --- a/runtime/base/macros.h +++ b/runtime/base/macros.h @@ -41,28 +41,6 @@ #define FINAL #endif -// The COMPILE_ASSERT macro can be used to verify that a compile time -// expression is true. For example, you could use it to verify the -// size of a static array: -// -// COMPILE_ASSERT(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES, -// content_type_names_incorrect_size); -// -// or to make sure a struct is smaller than a certain size: -// -// COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large); -// -// The second argument to the macro is the name of the variable. If -// the expression is false, most compilers will issue a warning/error -// containing the name of the variable. - -template <bool> -struct CompileAssert { -}; - -#define COMPILE_ASSERT(expr, msg) \ - typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1] // NOLINT - // Declare a friend relationship in a class with a test. Used rather that FRIEND_TEST to avoid // globally importing gtest/gtest.h into the main ART header files. #define ART_FRIEND_TEST(test_set_name, individual_test)\ diff --git a/runtime/base/stringpiece.h b/runtime/base/stringpiece.h index b8de308057..d793bb6153 100644 --- a/runtime/base/stringpiece.h +++ b/runtime/base/stringpiece.h @@ -67,8 +67,8 @@ class StringPiece { ptr_ = nullptr; length_ = 0; } - void set(const char* data, size_type len) { - ptr_ = data; + void set(const char* data_in, size_type len) { + ptr_ = data_in; length_ = len; } void set(const char* str) { @@ -79,8 +79,8 @@ class StringPiece { length_ = 0; } } - void set(const void* data, size_type len) { - ptr_ = reinterpret_cast<const char*>(data); + void set(const void* data_in, size_type len) { + ptr_ = reinterpret_cast<const char*>(data_in); length_ = len; } diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc index ef5ccb6af9..fe5b765a3a 100644 --- a/runtime/check_jni.cc +++ b/runtime/check_jni.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "jni_internal.h" +#include "check_jni.h" #include <sys/mman.h> #include <zlib.h> @@ -27,6 +27,7 @@ #include "field_helper.h" #include "gc/space/space.h" #include "java_vm_ext.h" +#include "jni_internal.h" #include "mirror/art_field-inl.h" #include "mirror/art_method-inl.h" #include "mirror/class-inl.h" diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 08efb70ede..eeb65f94e1 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -173,7 +173,7 @@ struct FieldGapsComparator { typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps; // Adds largest aligned gaps to queue of gaps. -void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) { +static void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) { DCHECK(gaps != nullptr); uint32_t current_offset = gap_start; @@ -817,7 +817,6 @@ static bool LoadMultiDexFilesFromOatFile(const OatFile* oat_file, if (oat_dex_file == nullptr) { if (i == 0 && generated) { - std::string error_msg; error_msg = StringPrintf("\nFailed to find dex file '%s' (checksum 0x%x) in generated out " " file'%s'", dex_location, next_location_checksum, oat_file->GetLocation().c_str()); @@ -1193,11 +1192,11 @@ bool ClassLinker::VerifyOatAndDexFileChecksums(const OatFile* oat_file, if (oat_dex_file == nullptr) { *error_msg = StringPrintf("oat file '%s' does not contain contents for '%s' with checksum 0x%x", oat_file->GetLocation().c_str(), dex_location, dex_location_checksum); - for (const OatFile::OatDexFile* oat_dex_file : oat_file->GetOatDexFiles()) { + for (const OatFile::OatDexFile* oat_dex_file_in : oat_file->GetOatDexFiles()) { *error_msg += StringPrintf("\noat file '%s' contains contents for '%s' with checksum 0x%x", oat_file->GetLocation().c_str(), - oat_dex_file->GetDexFileLocation().c_str(), - oat_dex_file->GetDexFileLocationChecksum()); + oat_dex_file_in->GetDexFileLocation().c_str(), + oat_dex_file_in->GetDexFileLocationChecksum()); } return false; } @@ -1673,8 +1672,8 @@ void ClassLinker::InitFromImage() { CHECK_EQ(oat_file.GetOatHeader().GetDexFileCount(), static_cast<uint32_t>(dex_caches->GetLength())); for (int32_t i = 0; i < dex_caches->GetLength(); i++) { - StackHandleScope<1> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(dex_caches->Get(i))); + StackHandleScope<1> hs2(self); + Handle<mirror::DexCache> dex_cache(hs2.NewHandle(dex_caches->Get(i))); const std::string& dex_file_location(dex_cache->GetLocation()->ToModifiedUtf8()); const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(dex_file_location.c_str(), nullptr); @@ -2041,8 +2040,8 @@ mirror::Class* ClassLinker::EnsureResolved(Thread* self, const char* descriptor, typedef std::pair<const DexFile*, const DexFile::ClassDef*> ClassPathEntry; // Search a collection of DexFiles for a descriptor -ClassPathEntry FindInClassPath(const char* descriptor, - const std::vector<const DexFile*>& class_path) { +static ClassPathEntry FindInClassPath(const char* descriptor, + const std::vector<const DexFile*>& class_path) { for (size_t i = 0; i != class_path.size(); ++i) { const DexFile* dex_file = class_path[i]; const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor); @@ -2118,12 +2117,12 @@ mirror::Class* ClassLinker::FindClassInPathClassLoader(ScopedObjectAccessAlready LOG(WARNING) << "Null DexFile::mCookie for " << descriptor; break; } - for (const DexFile* dex_file : *dex_files) { - const DexFile::ClassDef* dex_class_def = dex_file->FindClassDef(descriptor); + for (const DexFile* cp_dex_file : *dex_files) { + const DexFile::ClassDef* dex_class_def = cp_dex_file->FindClassDef(descriptor); if (dex_class_def != nullptr) { - RegisterDexFile(*dex_file); + RegisterDexFile(*cp_dex_file); mirror::Class* klass = - DefineClass(self, descriptor, class_loader, *dex_file, *dex_class_def); + DefineClass(self, descriptor, class_loader, *cp_dex_file, *dex_class_def); if (klass == nullptr) { CHECK(self->IsExceptionPending()) << descriptor; self->ClearException(); @@ -2206,9 +2205,9 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor, } } else { ScopedObjectAccessUnchecked soa(self); - mirror::Class* klass = FindClassInPathClassLoader(soa, self, descriptor, class_loader); - if (klass != nullptr) { - return klass; + mirror::Class* cp_klass = FindClassInPathClassLoader(soa, self, descriptor, class_loader); + if (cp_klass != nullptr) { + return cp_klass; } ScopedLocalRef<jobject> class_loader_object(soa.Env(), soa.AddLocalReference<jobject>(class_loader.Get())); @@ -2453,17 +2452,18 @@ const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method // by search for its position in the declared virtual methods. oat_method_index = declaring_class->NumDirectMethods(); size_t end = declaring_class->NumVirtualMethods(); - bool found = false; + bool found_virtual = false; for (size_t i = 0; i < end; i++) { // Check method index instead of identity in case of duplicate method definitions. if (method->GetDexMethodIndex() == declaring_class->GetVirtualMethod(i)->GetDexMethodIndex()) { - found = true; + found_virtual = true; break; } oat_method_index++; } - CHECK(found) << "Didn't find oat method index for virtual method: " << PrettyMethod(method); + CHECK(found_virtual) << "Didn't find oat method index for virtual method: " + << PrettyMethod(method); } DCHECK_EQ(oat_method_index, GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(), @@ -2472,10 +2472,9 @@ const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method OatFile::OatClass oat_class = FindOatClass(*declaring_class->GetDexCache()->GetDexFile(), declaring_class->GetDexClassDefIndex(), found); - if (!found) { + if (!(*found)) { return OatFile::OatMethod::Invalid(); } - *found = true; return oat_class.GetOatMethod(oat_method_index); } @@ -3213,9 +3212,9 @@ mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descripto new_class->SetClassLoader(component_type->GetClassLoader()); new_class->SetStatus(mirror::Class::kStatusLoaded, self); { - StackHandleScope<mirror::Class::kImtSize> hs(self, - Runtime::Current()->GetImtUnimplementedMethod()); - new_class->PopulateEmbeddedImtAndVTable(&hs); + StackHandleScope<mirror::Class::kImtSize> hs2(self, + Runtime::Current()->GetImtUnimplementedMethod()); + new_class->PopulateEmbeddedImtAndVTable(&hs2); } new_class->SetStatus(mirror::Class::kStatusInitialized, self); // don't need to set new_class->SetObjectSize(..) @@ -3343,8 +3342,8 @@ mirror::Class* ClassLinker::UpdateClass(const char* descriptor, mirror::Class* k for (auto it = class_table_.lower_bound(hash), end = class_table_.end(); it != end && it->first == hash; ++it) { - mirror::Class* klass = it->second.Read(); - if (klass == existing) { + mirror::Class* klass_from_table = it->second.Read(); + if (klass_from_table == existing) { class_table_.erase(it); break; } @@ -3560,7 +3559,7 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) { Handle<mirror::Class> super(hs.NewHandle(klass->GetSuperClass())); if (super.Get() != nullptr) { // Acquire lock to prevent races on verifying the super class. - ObjectLock<mirror::Class> lock(self, super); + ObjectLock<mirror::Class> super_lock(self, super); if (!super->IsVerified() && !super->IsErroneous()) { VerifyClass(self, super); @@ -3865,10 +3864,10 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& klass->SetVirtualMethods(virtuals); } for (size_t i = 0; i < num_virtual_methods; ++i) { - StackHandleScope<1> hs(self); + StackHandleScope<1> hs2(self); mirror::ObjectArray<mirror::ArtMethod>* decoded_methods = soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods); - Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i))); + Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i))); mirror::ArtMethod* clone = CreateProxyMethod(self, klass, prototype); if (UNLIKELY(clone == nullptr)) { CHECK(self->IsExceptionPending()); // OOME. @@ -3917,11 +3916,11 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& CHECK(klass->GetIFields() == nullptr); CheckProxyConstructor(klass->GetDirectMethod(0)); for (size_t i = 0; i < num_virtual_methods; ++i) { - StackHandleScope<2> hs(self); + StackHandleScope<2> hs2(self); mirror::ObjectArray<mirror::ArtMethod>* decoded_methods = soa.Decode<mirror::ObjectArray<mirror::ArtMethod>*>(methods); - Handle<mirror::ArtMethod> prototype(hs.NewHandle(decoded_methods->Get(i))); - Handle<mirror::ArtMethod> virtual_method(hs.NewHandle(klass->GetVirtualMethod(i))); + Handle<mirror::ArtMethod> prototype(hs2.NewHandle(decoded_methods->Get(i))); + Handle<mirror::ArtMethod> virtual_method(hs2.NewHandle(klass->GetVirtualMethod(i))); CheckProxyMethod(virtual_method, prototype); } @@ -4238,8 +4237,8 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, DCHECK(field_it.HasNextStaticField()); CHECK(can_init_statics); for ( ; value_it.HasNext(); value_it.Next(), field_it.Next()) { - StackHandleScope<1> hs(self); - Handle<mirror::ArtField> field(hs.NewHandle( + StackHandleScope<1> hs2(self); + Handle<mirror::ArtField> field(hs2.NewHandle( ResolveField(dex_file, field_it.GetMemberIndex(), dex_cache, class_loader, true))); if (Runtime::Current()->IsActiveTransaction()) { value_it.ReadValueToField<true>(field); @@ -5033,7 +5032,7 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass self->AllowThreadSuspension(); size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods(); if (num_methods > 0) { - StackHandleScope<2> hs(self); + StackHandleScope<2> hs2(self); const bool is_super = i < super_ifcount; const bool super_interface = is_super && extend_super_iftable; Handle<mirror::ObjectArray<mirror::ArtMethod>> method_array; @@ -5043,13 +5042,13 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass DCHECK(if_table != nullptr); DCHECK(if_table->GetMethodArray(i) != nullptr); // If we are working on a super interface, try extending the existing method array. - method_array = hs.NewHandle(if_table->GetMethodArray(i)->Clone(self)-> + method_array = hs2.NewHandle(if_table->GetMethodArray(i)->Clone(self)-> AsObjectArray<mirror::ArtMethod>()); // We are overwriting a super class interface, try to only virtual methods instead of the // whole vtable. - input_array = hs.NewHandle(klass->GetVirtualMethods()); + input_array = hs2.NewHandle(klass->GetVirtualMethods()); } else { - method_array = hs.NewHandle(AllocArtMethodArray(self, num_methods)); + method_array = hs2.NewHandle(AllocArtMethodArray(self, num_methods)); // A new interface, we need the whole vtable incase a new interface method is implemented // in the whole superclass. input_array = vtable; @@ -5172,9 +5171,9 @@ bool ClassLinker::LinkInterfaceMethods(Thread* self, Handle<mirror::Class> klass } if (kIsDebugBuild) { - mirror::ObjectArray<mirror::ArtMethod>* vtable = klass->GetVTableDuringLinking(); - for (int i = 0; i < vtable->GetLength(); ++i) { - CHECK(vtable->GetWithoutChecks(i) != nullptr); + mirror::ObjectArray<mirror::ArtMethod>* check_vtable = klass->GetVTableDuringLinking(); + for (int i = 0; i < check_vtable->GetLength(); ++i) { + CHECK(check_vtable->GetWithoutChecks(i) != nullptr); } } @@ -5320,7 +5319,7 @@ bool ClassLinker::LinkFields(Thread* self, Handle<mirror::Class> klass, bool is_ << " class=" << PrettyClass(klass.Get()) << " field=" << PrettyField(field) << " offset=" - << field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset())); + << field->GetField32(mirror::ArtField::OffsetOffset()); } Primitive::Type type = field->GetTypeAsPrimitiveType(); bool is_primitive = type != Primitive::kPrimNot; diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index 88e6265df5..70807da22e 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -374,8 +374,8 @@ struct CheckOffset { template <typename T> struct CheckOffsets { - CheckOffsets(bool is_static, const char* class_descriptor) - : is_static(is_static), class_descriptor(class_descriptor) {} + CheckOffsets(bool is_static_in, const char* class_descriptor_in) + : is_static(is_static_in), class_descriptor(class_descriptor_in) {} bool is_static; std::string class_descriptor; std::vector<CheckOffset> offsets; diff --git a/runtime/debugger.cc b/runtime/debugger.cc index a9663bba51..584743b9c7 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -1146,7 +1146,7 @@ void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) { // the primitive types). // Returns a newly-allocated buffer full of RefTypeId values. struct ClassListCreator { - explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes(classes) { + explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes_in) : classes(classes_in) { } static bool Visit(mirror::Class* c, void* arg) { @@ -1386,7 +1386,6 @@ JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int c mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>(); for (int i = 0; i < count; ++i) { JDWP::ObjectId id = request->ReadObjectId(); - JDWP::JdwpError error; mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error); if (error != JDWP::ERR_NONE) { return error; @@ -2291,8 +2290,8 @@ void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { struct CountStackDepthVisitor : public StackVisitor { - explicit CountStackDepthVisitor(Thread* thread) - : StackVisitor(thread, nullptr), depth(0) {} + explicit CountStackDepthVisitor(Thread* thread_in) + : StackVisitor(thread_in, nullptr), depth(0) {} // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses // annotalysis. @@ -2330,10 +2329,11 @@ JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_fram size_t frame_count, JDWP::ExpandBuf* buf) { class GetFrameVisitor : public StackVisitor { public: - GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) + GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in, + JDWP::ExpandBuf* buf_in) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : StackVisitor(thread, nullptr), depth_(0), - start_frame_(start_frame), frame_count_(frame_count), buf_(buf) { + start_frame_(start_frame_in), frame_count_(frame_count_in), buf_(buf_in) { expandBufAdd4BE(buf_, frame_count_); } @@ -2453,9 +2453,9 @@ void Dbg::SuspendSelf() { } struct GetThisVisitor : public StackVisitor { - GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id) + GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : StackVisitor(thread, context), this_object(nullptr), frame_id(frame_id) {} + : StackVisitor(thread, context), this_object(nullptr), frame_id(frame_id_in) {} // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses // annotalysis. @@ -3421,15 +3421,15 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize // struct DebugCallbackContext { - explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number, + explicit DebugCallbackContext(SingleStepControl* single_step_control_cb, int32_t line_number_cb, const DexFile::CodeItem* code_item) - : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item), - last_pc_valid(false), last_pc(0) { + : single_step_control_(single_step_control_cb), line_number_(line_number_cb), + code_item_(code_item), last_pc_valid(false), last_pc(0) { } - static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) { + static bool Callback(void* raw_context, uint32_t address, uint32_t line_number_cb) { DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context); - if (static_cast<int32_t>(line_number) == context->line_number_) { + if (static_cast<int32_t>(line_number_cb) == context->line_number_) { if (!context->last_pc_valid) { // Everything from this address until the next line change is ours. context->last_pc = address; @@ -4484,9 +4484,9 @@ void Dbg::SetAllocTrackingEnabled(bool enable) { } struct AllocRecordStackVisitor : public StackVisitor { - AllocRecordStackVisitor(Thread* thread, AllocRecord* record) + AllocRecordStackVisitor(Thread* thread, AllocRecord* record_in) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : StackVisitor(thread, nullptr), record(record), depth(0) {} + : StackVisitor(thread, nullptr), record(record_in), depth(0) {} // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses // annotalysis. diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc index 761441eae6..16bc33f76a 100644 --- a/runtime/dex_file.cc +++ b/runtime/dex_file.cc @@ -39,7 +39,6 @@ #include "mirror/string.h" #include "os.h" #include "safe_map.h" -#include "ScopedFd.h" #include "handle_scope-inl.h" #include "thread.h" #include "utf-inl.h" @@ -47,6 +46,11 @@ #include "well_known_classes.h" #include "zip_archive.h" +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#include "ScopedFd.h" +#pragma GCC diagnostic pop + namespace art { const uint8_t DexFile::kDexMagic[] = { 'd', 'e', 'x', '\n' }; @@ -454,8 +458,8 @@ const DexFile::ClassDef* DexFile::FindClassDef(const char* descriptor) const { index = new Index(num_class_defs); for (uint32_t i = 0; i < num_class_defs; ++i) { const ClassDef& class_def = GetClassDef(i); - const char* descriptor = GetClassDescriptor(class_def); - index->insert(std::make_pair(descriptor, &class_def)); + const char* class_descriptor = GetClassDescriptor(class_def); + index->insert(std::make_pair(class_descriptor, &class_def)); } // Sanity check the index still doesn't exist, only 1 thread should build it. CHECK(class_def_index_.LoadSequentiallyConsistent() == nullptr); diff --git a/runtime/elf_file.cc b/runtime/elf_file.cc index fb53271157..b6cf921f2c 100644 --- a/runtime/elf_file.cc +++ b/runtime/elf_file.cc @@ -60,6 +60,7 @@ extern "C" { // GDB will place breakpoint into this function. // To prevent GCC from inlining or removing it we place noinline attribute // and inline assembler statement inside. + void __attribute__((noinline)) __jit_debug_register_code(); void __attribute__((noinline)) __jit_debug_register_code() { __asm__(""); } @@ -2396,22 +2397,22 @@ bool ElfFileImpl<Elf_Ehdr, Elf_Phdr, Elf_Shdr, Elf_Word, Elf_Shdr* sh = GetSectionHeader(i); CHECK(sh != nullptr); if (sh->sh_type == SHT_REL) { - for (uint32_t i = 0; i < GetRelNum(*sh); i++) { - Elf_Rel& rel = GetRel(*sh, i); + for (uint32_t j = 0; j < GetRelNum(*sh); j++) { + Elf_Rel& rel = GetRel(*sh, j); if (DEBUG_FIXUP) { LOG(INFO) << StringPrintf("In %s moving Elf_Rel[%d] from 0x%" PRIx64 " to 0x%" PRIx64, - GetFile().GetPath().c_str(), i, + GetFile().GetPath().c_str(), j, static_cast<uint64_t>(rel.r_offset), static_cast<uint64_t>(rel.r_offset + base_address)); } rel.r_offset += base_address; } } else if (sh->sh_type == SHT_RELA) { - for (uint32_t i = 0; i < GetRelaNum(*sh); i++) { - Elf_Rela& rela = GetRela(*sh, i); + for (uint32_t j = 0; j < GetRelaNum(*sh); j++) { + Elf_Rela& rela = GetRela(*sh, j); if (DEBUG_FIXUP) { LOG(INFO) << StringPrintf("In %s moving Elf_Rela[%d] from 0x%" PRIx64 " to 0x%" PRIx64, - GetFile().GetPath().c_str(), i, + GetFile().GetPath().c_str(), j, static_cast<uint64_t>(rela.r_offset), static_cast<uint64_t>(rela.r_offset + base_address)); } diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 2cebd6e7a2..4f61707040 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -521,8 +521,8 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa MethodHelper mh(hs.NewHandle(method)); if (mh.Get()->IsStatic() && !mh.Get()->GetDeclaringClass()->IsInitialized()) { // Ensure static method's class is initialized. - StackHandleScope<1> hs(self); - Handle<mirror::Class> h_class(hs.NewHandle(mh.Get()->GetDeclaringClass())); + StackHandleScope<1> hs2(self); + Handle<mirror::Class> h_class(hs2.NewHandle(mh.Get()->GetDeclaringClass())); if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_class, true, true)) { DCHECK(Thread::Current()->IsExceptionPending()) << PrettyMethod(mh.Get()); self->PopManagedStackFragment(fragment); diff --git a/runtime/gc/accounting/heap_bitmap-inl.h b/runtime/gc/accounting/heap_bitmap-inl.h index c67542f484..34c15c7f8b 100644 --- a/runtime/gc/accounting/heap_bitmap-inl.h +++ b/runtime/gc/accounting/heap_bitmap-inl.h @@ -40,9 +40,9 @@ inline bool HeapBitmap::Test(const mirror::Object* obj) { if (LIKELY(bitmap != nullptr)) { return bitmap->Test(obj); } - for (const auto& bitmap : large_object_bitmaps_) { - if (LIKELY(bitmap->HasAddress(obj))) { - return bitmap->Test(obj); + for (const auto& lo_bitmap : large_object_bitmaps_) { + if (LIKELY(lo_bitmap->HasAddress(obj))) { + return lo_bitmap->Test(obj); } } LOG(FATAL) << "Invalid object " << obj; @@ -55,9 +55,9 @@ inline void HeapBitmap::Clear(const mirror::Object* obj) { bitmap->Clear(obj); return; } - for (const auto& bitmap : large_object_bitmaps_) { - if (LIKELY(bitmap->HasAddress(obj))) { - bitmap->Clear(obj); + for (const auto& lo_bitmap : large_object_bitmaps_) { + if (LIKELY(lo_bitmap->HasAddress(obj))) { + lo_bitmap->Clear(obj); } } LOG(FATAL) << "Invalid object " << obj; @@ -70,9 +70,9 @@ inline bool HeapBitmap::Set(const mirror::Object* obj, const LargeObjectSetVisit return bitmap->Set(obj); } visitor(obj); - for (const auto& bitmap : large_object_bitmaps_) { - if (LIKELY(bitmap->HasAddress(obj))) { - return bitmap->Set(obj); + for (const auto& lo_bitmap : large_object_bitmaps_) { + if (LIKELY(lo_bitmap->HasAddress(obj))) { + return lo_bitmap->Set(obj); } } LOG(FATAL) << "Invalid object " << obj; @@ -87,9 +87,9 @@ inline bool HeapBitmap::AtomicTestAndSet(const mirror::Object* obj, return bitmap->AtomicTestAndSet(obj); } visitor(obj); - for (const auto& bitmap : large_object_bitmaps_) { - if (LIKELY(bitmap->HasAddress(obj))) { - return bitmap->AtomicTestAndSet(obj); + for (const auto& lo_bitmap : large_object_bitmaps_) { + if (LIKELY(lo_bitmap->HasAddress(obj))) { + return lo_bitmap->AtomicTestAndSet(obj); } } LOG(FATAL) << "Invalid object " << obj; diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc index acff52d50d..8558f96730 100644 --- a/runtime/gc/allocator/dlmalloc.cc +++ b/runtime/gc/allocator/dlmalloc.cc @@ -19,8 +19,8 @@ #include "base/logging.h" // ART specific morecore implementation defined in space.cc. +static void* art_heap_morecore(void* m, intptr_t increment); #define MORECORE(x) art_heap_morecore(m, x) -extern "C" void* art_heap_morecore(void* m, intptr_t increment); // Custom heap error handling. #define PROCEED_ON_ERROR 0 @@ -31,12 +31,16 @@ static void art_heap_usage_error(const char* function, void* p); // Ugly inclusion of C file so that ART specific #defines configure dlmalloc for our use for // mspaces (regular dlmalloc is still declared in bionic). +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wredundant-decls" #pragma GCC diagnostic ignored "-Wempty-body" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "../../../bionic/libc/upstream-dlmalloc/malloc.c" -#pragma GCC diagnostic warning "-Wstrict-aliasing" -#pragma GCC diagnostic warning "-Wempty-body" +#pragma GCC diagnostic pop +static void* art_heap_morecore(void* m, intptr_t increment) { + return ::art::gc::allocator::ArtDlMallocMoreCore(m, increment); +} static void art_heap_corruption(const char* function) { LOG(::art::FATAL) << "Corrupt heap detected in: " << function; diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h index c7ecbc83ce..0e91a4372c 100644 --- a/runtime/gc/allocator/dlmalloc.h +++ b/runtime/gc/allocator/dlmalloc.h @@ -17,6 +17,8 @@ #ifndef ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_ #define ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_ +#include <cstdint> + // Configure dlmalloc for mspaces. // Avoid a collision with one used in llvm. #undef HAVE_MMAP @@ -28,7 +30,10 @@ #define ONLY_MSPACES 1 #define MALLOC_INSPECT_ALL 1 +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wredundant-decls" #include "../../bionic/libc/upstream-dlmalloc/malloc.h" +#pragma GCC diagnostic pop #ifdef HAVE_ANDROID_OS // Define dlmalloc routines from bionic that cannot be included directly because of redefining @@ -47,4 +52,16 @@ extern "C" void DlmallocMadviseCallback(void* start, void* end, size_t used_byte extern "C" void DlmallocBytesAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); extern "C" void DlmallocObjectsAllocatedCallback(void* start, void* end, size_t used_bytes, void* arg); +namespace art { +namespace gc { +namespace allocator { + +// Callback from dlmalloc when it needs to increase the footprint. Must be implemented somewhere +// else (currently dlmalloc_space.cc). +void* ArtDlMallocMoreCore(void* mspace, intptr_t increment); + +} // namespace allocator +} // namespace gc +} // namespace art + #endif // ART_RUNTIME_GC_ALLOCATOR_DLMALLOC_H_ diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc index f5e2fed9d7..f9d6a512ce 100644 --- a/runtime/gc/allocator/rosalloc.cc +++ b/runtime/gc/allocator/rosalloc.cc @@ -31,8 +31,6 @@ namespace art { namespace gc { namespace allocator { -extern "C" void* art_heap_rosalloc_morecore(RosAlloc* rosalloc, intptr_t increment); - static constexpr bool kUsePrefetchDuringAllocRun = true; static constexpr bool kPrefetchNewRunDataByZeroing = false; static constexpr size_t kPrefetchStride = 64; @@ -179,7 +177,7 @@ void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type page_map_size_ = new_num_of_pages; DCHECK_LE(page_map_size_, max_page_map_size_); free_page_run_size_map_.resize(new_num_of_pages); - art_heap_rosalloc_morecore(this, increment); + ArtRosAllocMoreCore(this, increment); if (last_free_page_run_size > 0) { // There was a free page run at the end. Expand its size. DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this)); @@ -745,7 +743,7 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) { const size_t idx = run->size_bracket_idx_; const size_t bracket_size = bracketSizes[idx]; bool run_was_full = false; - MutexLock mu(self, *size_bracket_locks_[idx]); + MutexLock brackets_mu(self, *size_bracket_locks_[idx]); if (kIsDebugBuild) { run_was_full = run->IsFull(); } @@ -785,7 +783,7 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) { DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end()); run->ZeroHeader(); { - MutexLock mu(self, lock_); + MutexLock lock_mu(self, lock_); FreePages(self, run, true); } } else { @@ -1243,7 +1241,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { run->to_be_bulk_freed_ = false; #endif size_t idx = run->size_bracket_idx_; - MutexLock mu(self, *size_bracket_locks_[idx]); + MutexLock brackets_mu(self, *size_bracket_locks_[idx]); if (run->IsThreadLocal()) { DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets); DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end()); @@ -1303,7 +1301,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { } if (!run_was_current) { run->ZeroHeader(); - MutexLock mu(self, lock_); + MutexLock lock_mu(self, lock_); FreePages(self, run, true); } } else { @@ -1521,7 +1519,7 @@ bool RosAlloc::Trim() { page_map_size_ = new_num_of_pages; free_page_run_size_map_.resize(new_num_of_pages); DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages); - art_heap_rosalloc_morecore(this, -(static_cast<intptr_t>(decrement))); + ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement))); if (kTraceRosAlloc) { LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from " << footprint_ << " to " << new_footprint; @@ -1737,14 +1735,14 @@ void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) { void RosAlloc::AssertAllThreadLocalRunsAreRevoked() { if (kIsDebugBuild) { Thread* self = Thread::Current(); - MutexLock mu(self, *Locks::runtime_shutdown_lock_); - MutexLock mu2(self, *Locks::thread_list_lock_); + MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_); + MutexLock thread_list_mu(self, *Locks::thread_list_lock_); std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); for (Thread* t : thread_list) { AssertThreadLocalRunsAreRevoked(t); } for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) { - MutexLock mu(self, *size_bracket_locks_[idx]); + MutexLock brackets_mu(self, *size_bracket_locks_[idx]); CHECK_EQ(current_runs_[idx], dedicated_full_run_); } } @@ -1873,11 +1871,11 @@ void RosAlloc::Verify() { Thread* self = Thread::Current(); CHECK(Locks::mutator_lock_->IsExclusiveHeld(self)) << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__; - MutexLock mu(self, *Locks::thread_list_lock_); + MutexLock thread_list_mu(self, *Locks::thread_list_lock_); ReaderMutexLock wmu(self, bulk_free_lock_); std::vector<Run*> runs; { - MutexLock mu(self, lock_); + MutexLock lock_mu(self, lock_); size_t pm_end = page_map_size_; size_t i = 0; while (i < pm_end) { @@ -1968,7 +1966,7 @@ void RosAlloc::Verify() { std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList(); for (Thread* thread : threads) { for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) { - MutexLock mu(self, *size_bracket_locks_[i]); + MutexLock brackets_mu(self, *size_bracket_locks_[i]); Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i)); CHECK(thread_local_run != nullptr); CHECK(thread_local_run->IsThreadLocal()); @@ -1977,7 +1975,7 @@ void RosAlloc::Verify() { } } for (size_t i = 0; i < kNumOfSizeBrackets; i++) { - MutexLock mu(self, *size_bracket_locks_[i]); + MutexLock brackets_mu(self, *size_bracket_locks_[i]); Run* current_run = current_runs_[i]; CHECK(current_run != nullptr); if (current_run != dedicated_full_run_) { diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h index a2f8342fd8..2a0bf10d90 100644 --- a/runtime/gc/allocator/rosalloc.h +++ b/runtime/gc/allocator/rosalloc.h @@ -616,6 +616,10 @@ class RosAlloc { }; std::ostream& operator<<(std::ostream& os, const RosAlloc::PageMapKind& rhs); +// Callback from rosalloc when it needs to increase the footprint. Must be implemented somewhere +// else (currently rosalloc_space.cc). +void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment); + } // namespace allocator } // namespace gc } // namespace art diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc index 6691b0f4fc..b2482acaf5 100644 --- a/runtime/gc/collector/mark_compact.cc +++ b/runtime/gc/collector/mark_compact.cc @@ -239,7 +239,7 @@ void MarkCompact::UpdateAndMarkModUnion() { accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); if (table != nullptr) { // TODO: Improve naming. - TimingLogger::ScopedTiming t( + TimingLogger::ScopedTiming t2( space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : "UpdateAndMarkImageModUnionTable", GetTimings()); table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); @@ -348,7 +348,7 @@ void MarkCompact::UpdateReferences() { accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); if (table != nullptr) { // TODO: Improve naming. - TimingLogger::ScopedTiming t( + TimingLogger::ScopedTiming t2( space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" : "UpdateImageModUnionTableReferences", GetTimings()); @@ -538,7 +538,7 @@ void MarkCompact::Sweep(bool swap_bitmaps) { if (!ShouldSweepSpace(alloc_space)) { continue; } - TimingLogger::ScopedTiming t( + TimingLogger::ScopedTiming t2( alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", GetTimings()); RecordFree(alloc_space->Sweep(swap_bitmaps)); } diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index e3966e3081..6ad44e6a75 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -667,10 +667,10 @@ class MarkStackTask : public Task { Object* obj = nullptr; if (kUseMarkStackPrefetch) { while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) { - Object* obj = mark_stack_[--mark_stack_pos_]; - DCHECK(obj != nullptr); - __builtin_prefetch(obj); - prefetch_fifo.push_back(obj); + Object* mark_stack_obj = mark_stack_[--mark_stack_pos_]; + DCHECK(mark_stack_obj != nullptr); + __builtin_prefetch(mark_stack_obj); + prefetch_fifo.push_back(mark_stack_obj); } if (UNLIKELY(prefetch_fifo.empty())) { break; @@ -928,7 +928,7 @@ void MarkSweep::ReMarkRoots() { kVisitRootFlagStopLoggingNewRoots | kVisitRootFlagClearRootLog)); if (kVerifyRootsMarked) { - TimingLogger::ScopedTiming t("(Paused)VerifyRoots", GetTimings()); + TimingLogger::ScopedTiming t2("(Paused)VerifyRoots", GetTimings()); Runtime::Current()->VisitRoots(VerifyRootMarked, this); } } @@ -1057,7 +1057,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma // if needed. if (!mark_bitmap->Test(obj)) { if (chunk_free_pos >= kSweepArrayChunkFreeSize) { - TimingLogger::ScopedTiming t("FreeList", GetTimings()); + TimingLogger::ScopedTiming t2("FreeList", GetTimings()); freed.objects += chunk_free_pos; freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); chunk_free_pos = 0; @@ -1069,7 +1069,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma } } if (chunk_free_pos > 0) { - TimingLogger::ScopedTiming t("FreeList", GetTimings()); + TimingLogger::ScopedTiming t2("FreeList", GetTimings()); freed.objects += chunk_free_pos; freed.bytes += alloc_space->FreeList(self, chunk_free_pos, chunk_free_buffer); chunk_free_pos = 0; @@ -1099,10 +1099,10 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma } } { - TimingLogger::ScopedTiming t("RecordFree", GetTimings()); + TimingLogger::ScopedTiming t2("RecordFree", GetTimings()); RecordFree(freed); RecordFreeLOS(freed_los); - t.NewTiming("ResetStack"); + t2.NewTiming("ResetStack"); allocations->Reset(); } sweep_array_free_buffer_mem_map_->MadviseDontNeedAndZero(); @@ -1218,10 +1218,10 @@ void MarkSweep::ProcessMarkStack(bool paused) { Object* obj = NULL; if (kUseMarkStackPrefetch) { while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) { - Object* obj = mark_stack_->PopBack(); - DCHECK(obj != NULL); - __builtin_prefetch(obj); - prefetch_fifo.push_back(obj); + Object* mark_stack_obj = mark_stack_->PopBack(); + DCHECK(mark_stack_obj != NULL); + __builtin_prefetch(mark_stack_obj); + prefetch_fifo.push_back(mark_stack_obj); } if (prefetch_fifo.empty()) { break; diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index e141b6f4ab..cb9f111058 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -224,7 +224,7 @@ void SemiSpace::MarkingPhase() { // Need to do this before the checkpoint since we don't want any threads to add references to // the live stack during the recursive mark. if (kUseThreadLocalAllocationStack) { - TimingLogger::ScopedTiming t("RevokeAllThreadLocalAllocationStacks", GetTimings()); + TimingLogger::ScopedTiming t2("RevokeAllThreadLocalAllocationStacks", GetTimings()); heap_->RevokeAllThreadLocalAllocationStacks(self_); } heap_->SwapStacks(self_); @@ -368,7 +368,7 @@ void SemiSpace::MarkReachableObjects() { CHECK_EQ(is_large_object_space_immune_, collect_from_space_only_); space::LargeObjectSpace* los = GetHeap()->GetLargeObjectsSpace(); if (is_large_object_space_immune_ && los != nullptr) { - TimingLogger::ScopedTiming t("VisitLargeObjects", GetTimings()); + TimingLogger::ScopedTiming t2("VisitLargeObjects", GetTimings()); DCHECK(collect_from_space_only_); // Delay copying the live set to the marked set until here from // BindBitmaps() as the large objects on the allocation stack may diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 9fd9a2b377..06cd326d84 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -599,8 +599,8 @@ void Heap::DumpObject(std::ostream& stream, mirror::Object* obj) { } } // Unprotect all the spaces. - for (const auto& space : continuous_spaces_) { - mprotect(space->Begin(), space->Capacity(), PROT_READ | PROT_WRITE); + for (const auto& con_space : continuous_spaces_) { + mprotect(con_space->Begin(), con_space->Capacity(), PROT_READ | PROT_WRITE); } stream << "Object " << obj; if (space != nullptr) { @@ -1266,12 +1266,12 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat continue; } // Attempt to run the collector, if we succeed, re-try the allocation. - const bool gc_ran = + const bool plan_gc_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false) != collector::kGcTypeNone; if (was_default_allocator && allocator != GetCurrentAllocator()) { return nullptr; } - if (gc_ran) { + if (plan_gc_ran) { // Did we free sufficient memory for the allocation to succeed? mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated, usable_size); @@ -1532,7 +1532,7 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() { ScopedThreadStateChange tsc(self, kWaitingPerformingGc); Locks::mutator_lock_->AssertNotHeld(self); { - ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); + ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); MutexLock mu(self, *gc_complete_lock_); // Ensure there is only one GC at a time. WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self); @@ -1604,7 +1604,7 @@ void Heap::TransitionCollector(CollectorType collector_type) { // compacting_gc_disable_count_, this should rarely occurs). for (;;) { { - ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); + ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); MutexLock mu(self, *gc_complete_lock_); // Ensure there is only one GC at a time. WaitForGcToCompleteLocked(kGcCauseCollectorTransition, self); @@ -2079,7 +2079,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus bool compacting_gc; { gc_complete_lock_->AssertNotHeld(self); - ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); + ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete); MutexLock mu(self, *gc_complete_lock_); // Ensure there is only one GC at a time. WaitForGcToCompleteLocked(gc_cause, self); @@ -2646,15 +2646,15 @@ void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets) { if (table != nullptr) { const char* name = space->IsZygoteSpace() ? "ZygoteModUnionClearCards" : "ImageModUnionClearCards"; - TimingLogger::ScopedTiming t(name, timings); + TimingLogger::ScopedTiming t2(name, timings); table->ClearCards(); } else if (use_rem_sets && rem_set != nullptr) { DCHECK(collector::SemiSpace::kUseRememberedSet && collector_type_ == kCollectorTypeGSS) << static_cast<int>(collector_type_); - TimingLogger::ScopedTiming t("AllocSpaceRemSetClearCards", timings); + TimingLogger::ScopedTiming t2("AllocSpaceRemSetClearCards", timings); rem_set->ClearCards(); } else if (space->GetType() != space::kSpaceTypeBumpPointerSpace) { - TimingLogger::ScopedTiming t("AllocSpaceClearCards", timings); + TimingLogger::ScopedTiming t2("AllocSpaceClearCards", timings); // No mod union table for the AllocSpace. Age the cards so that the GC knows that these cards // were dirty before the GC started. // TODO: Need to use atomic for the case where aged(cleaning thread) -> dirty(other thread) @@ -2676,7 +2676,7 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { TimingLogger* const timings = current_gc_iteration_.GetTimings(); TimingLogger::ScopedTiming t(__FUNCTION__, timings); if (verify_pre_gc_heap_) { - TimingLogger::ScopedTiming t("(Paused)PreGcVerifyHeapReferences", timings); + TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyHeapReferences", timings); ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); size_t failures = VerifyHeapReferences(); if (failures > 0) { @@ -2686,7 +2686,7 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { } // Check that all objects which reference things in the live stack are on dirty cards. if (verify_missing_card_marks_) { - TimingLogger::ScopedTiming t("(Paused)PreGcVerifyMissingCardMarks", timings); + TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyMissingCardMarks", timings); ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); SwapStacks(self); // Sort the live stack so that we can quickly binary search it later. @@ -2695,7 +2695,7 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) { SwapStacks(self); } if (verify_mod_union_table_) { - TimingLogger::ScopedTiming t("(Paused)PreGcVerifyModUnionTables", timings); + TimingLogger::ScopedTiming t2("(Paused)PreGcVerifyModUnionTables", timings); ReaderMutexLock reader_lock(self, *Locks::heap_bitmap_lock_); for (const auto& table_pair : mod_union_tables_) { accounting::ModUnionTable* mod_union_table = table_pair.second; @@ -2727,7 +2727,7 @@ void Heap::PreSweepingGcVerification(collector::GarbageCollector* gc) { // Called before sweeping occurs since we want to make sure we are not going so reclaim any // reachable objects. if (verify_pre_sweeping_heap_) { - TimingLogger::ScopedTiming t("(Paused)PostSweepingVerifyHeapReferences", timings); + TimingLogger::ScopedTiming t2("(Paused)PostSweepingVerifyHeapReferences", timings); CHECK_NE(self->GetState(), kRunnable); WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); // Swapping bound bitmaps does nothing. @@ -2760,7 +2760,7 @@ void Heap::PostGcVerificationPaused(collector::GarbageCollector* gc) { RosAllocVerification(timings, "(Paused)PostGcRosAllocVerification"); } if (verify_post_gc_heap_) { - TimingLogger::ScopedTiming t("(Paused)PostGcVerifyHeapReferences", timings); + TimingLogger::ScopedTiming t2("(Paused)PostGcVerifyHeapReferences", timings); ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); size_t failures = VerifyHeapReferences(); if (failures > 0) { diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc index 3106b4c913..73196b20a2 100644 --- a/runtime/gc/heap_test.cc +++ b/runtime/gc/heap_test.cc @@ -48,8 +48,8 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) { Handle<mirror::Class> c( hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;"))); for (size_t i = 0; i < 1024; ++i) { - StackHandleScope<1> hs(soa.Self()); - Handle<mirror::ObjectArray<mirror::Object>> array(hs.NewHandle( + StackHandleScope<1> hs2(soa.Self()); + Handle<mirror::ObjectArray<mirror::Object>> array(hs2.NewHandle( mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.Get(), 2048))); for (size_t j = 0; j < 2048; ++j) { mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!"); diff --git a/runtime/gc/reference_processor.cc b/runtime/gc/reference_processor.cc index bfaa2bb0cc..012f9f91f5 100644 --- a/runtime/gc/reference_processor.cc +++ b/runtime/gc/reference_processor.cc @@ -143,7 +143,7 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing soft_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); weak_reference_queue_.ClearWhiteReferences(&cleared_references_, is_marked_callback, arg); { - TimingLogger::ScopedTiming t(concurrent ? "EnqueueFinalizerReferences" : + TimingLogger::ScopedTiming t2(concurrent ? "EnqueueFinalizerReferences" : "(Paused)EnqueueFinalizerReferences", timings); if (concurrent) { StartPreservingReferences(self); diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc index 0a55b52c08..04b09e9969 100644 --- a/runtime/gc/space/bump_pointer_space.cc +++ b/runtime/gc/space/bump_pointer_space.cc @@ -188,11 +188,11 @@ void BumpPointerSpace::Walk(ObjectCallback* callback, void* arg) { size_t block_size = header->size_; pos += sizeof(BlockHeader); // Skip the header so that we know where the objects mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos); - const mirror::Object* end = reinterpret_cast<const mirror::Object*>(pos + block_size); - CHECK_LE(reinterpret_cast<const uint8_t*>(end), End()); + const mirror::Object* end_obj = reinterpret_cast<const mirror::Object*>(pos + block_size); + CHECK_LE(reinterpret_cast<const uint8_t*>(end_obj), End()); // We don't know how many objects are allocated in the current block. When we hit a null class // assume its the end. TODO: Have a thread update the header when it flushes the block? - while (obj < end && obj->GetClass() != nullptr) { + while (obj < end_obj && obj->GetClass() != nullptr) { callback(obj, arg); obj = GetNextObject(obj); } diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc index 445c720d4c..3072c23bf3 100644 --- a/runtime/gc/space/dlmalloc_space.cc +++ b/runtime/gc/space/dlmalloc_space.cc @@ -213,27 +213,6 @@ size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p } } -// Callback from dlmalloc when it needs to increase the footprint -extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) { - Heap* heap = Runtime::Current()->GetHeap(); - DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace(); - // Support for multiple DlMalloc provided by a slow path. - if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) { - dlmalloc_space = nullptr; - for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) { - if (space->IsDlMallocSpace()) { - DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace(); - if (cur_dlmalloc_space->GetMspace() == mspace) { - dlmalloc_space = cur_dlmalloc_space; - break; - } - } - } - CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace; - } - return dlmalloc_space->MoreCore(increment); -} - size_t DlMallocSpace::Trim() { MutexLock mu(Thread::Current(), lock_); // Trim to release memory at the end of the space. @@ -330,5 +309,31 @@ void DlMallocSpace::LogFragmentationAllocFailure(std::ostream& os, size_t failed } } // namespace space + +namespace allocator { + +// Implement the dlmalloc morecore callback. +void* ArtDlMallocMoreCore(void* mspace, intptr_t increment) { + Heap* heap = Runtime::Current()->GetHeap(); + ::art::gc::space::DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace(); + // Support for multiple DlMalloc provided by a slow path. + if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) { + dlmalloc_space = nullptr; + for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) { + if (space->IsDlMallocSpace()) { + ::art::gc::space::DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace(); + if (cur_dlmalloc_space->GetMspace() == mspace) { + dlmalloc_space = cur_dlmalloc_space; + break; + } + } + } + CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace; + } + return dlmalloc_space->MoreCore(increment); +} + +} // namespace allocator + } // namespace gc } // namespace art diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index 161eba9c1d..ff8b570a02 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -228,15 +228,6 @@ size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p return bytes_freed; } -// Callback from rosalloc when it needs to increase the footprint -extern "C" void* art_heap_rosalloc_morecore(allocator::RosAlloc* rosalloc, intptr_t increment) { - Heap* heap = Runtime::Current()->GetHeap(); - RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc); - DCHECK(rosalloc_space != nullptr); - DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc); - return rosalloc_space->MoreCore(increment); -} - size_t RosAllocSpace::Trim() { VLOG(heap) << "RosAllocSpace::Trim() "; { @@ -367,5 +358,19 @@ void RosAllocSpace::Clear() { } } // namespace space + +namespace allocator { + +// Callback from rosalloc when it needs to increase the footprint. +void* ArtRosAllocMoreCore(allocator::RosAlloc* rosalloc, intptr_t increment) { + Heap* heap = Runtime::Current()->GetHeap(); + art::gc::space::RosAllocSpace* rosalloc_space = heap->GetRosAllocSpace(rosalloc); + DCHECK(rosalloc_space != nullptr); + DCHECK_EQ(rosalloc_space->GetRosAlloc(), rosalloc); + return rosalloc_space->MoreCore(increment); +} + +} // namespace allocator + } // namespace gc } // namespace art diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc index b233805e4c..486d79ad1b 100644 --- a/runtime/gc/space/space.cc +++ b/runtime/gc/space/space.cc @@ -133,8 +133,8 @@ void ContinuousMemMapAllocSpace::SwapBitmaps() { mark_bitmap_->SetName(temp_name); } -AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps, space::Space* space) - : swap_bitmaps(swap_bitmaps), space(space), self(Thread::Current()) { +AllocSpace::SweepCallbackContext::SweepCallbackContext(bool swap_bitmaps_in, space::Space* space_in) + : swap_bitmaps(swap_bitmaps_in), space(space_in), self(Thread::Current()) { } } // namespace space diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h index 529fa0c05f..84a3e80636 100644 --- a/runtime/instruction_set.h +++ b/runtime/instruction_set.h @@ -370,7 +370,8 @@ static inline constexpr TwoWordReturn GetTwoWordFailureValue() { // Use the lower 32b for the method pointer and the upper 32b for the code pointer. static inline TwoWordReturn GetTwoWordSuccessValue(uintptr_t hi, uintptr_t lo) { - uint32_t lo32 = static_cast<uint32_t>(lo); + static_assert(sizeof(uint32_t) == sizeof(uintptr_t), "Unexpected size difference"); + uint32_t lo32 = lo; uint64_t hi64 = static_cast<uint64_t>(hi); return ((hi64 << 32) | lo32); } diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index 160e8c36a3..003e1601ce 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -177,8 +177,9 @@ void Instrumentation::InstallStubsForMethod(mirror::ArtMethod* method) { static void InstrumentationInstallStack(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { struct InstallStackVisitor : public StackVisitor { - InstallStackVisitor(Thread* thread, Context* context, uintptr_t instrumentation_exit_pc) - : StackVisitor(thread, context), instrumentation_stack_(thread->GetInstrumentationStack()), + InstallStackVisitor(Thread* thread_in, Context* context, uintptr_t instrumentation_exit_pc) + : StackVisitor(thread_in, context), + instrumentation_stack_(thread_in->GetInstrumentationStack()), instrumentation_exit_pc_(instrumentation_exit_pc), reached_existing_instrumentation_frames_(false), instrumentation_stack_depth_(0), last_return_pc_(0) { @@ -316,12 +317,12 @@ static void InstrumentationInstallStack(Thread* thread, void* arg) static void InstrumentationRestoreStack(Thread* thread, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { struct RestoreStackVisitor : public StackVisitor { - RestoreStackVisitor(Thread* thread, uintptr_t instrumentation_exit_pc, + RestoreStackVisitor(Thread* thread_in, uintptr_t instrumentation_exit_pc, Instrumentation* instrumentation) - : StackVisitor(thread, NULL), thread_(thread), + : StackVisitor(thread_in, NULL), thread_(thread_in), instrumentation_exit_pc_(instrumentation_exit_pc), instrumentation_(instrumentation), - instrumentation_stack_(thread->GetInstrumentationStack()), + instrumentation_stack_(thread_in->GetInstrumentationStack()), frames_removed_(0) {} virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc index 89586b0982..95186c6ad6 100644 --- a/runtime/intern_table.cc +++ b/runtime/intern_table.cc @@ -192,9 +192,9 @@ mirror::String* InternTable::LookupStringFromImage(mirror::String* s) const DexFile::StringId* string_id = dex_file->FindStringId(utf8.c_str()); if (string_id != NULL) { uint32_t string_idx = dex_file->GetIndexForStringId(*string_id); - mirror::String* image = dex_cache->GetResolvedString(string_idx); - if (image != NULL) { - return image; + mirror::String* image_string = dex_cache->GetResolvedString(string_idx); + if (image_string != NULL) { + return image_string; } } } diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 44e2029329..18de133147 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -315,7 +315,7 @@ enum InterpreterImplKind { kSwitchImpl, // Switch-based interpreter implementation. kComputedGotoImplKind // Computed-goto-based interpreter implementation. }; -std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) { +static std::ostream& operator<<(std::ostream& os, const InterpreterImplKind& rhs) { os << ((rhs == kSwitchImpl) ? "Switch-based interpreter" : "Computed-goto-based interpreter"); return os; } diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index 5c77b96a5f..eb80c307f6 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -855,12 +855,12 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper* mh, // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail // going the reflective Dex way. Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass(); - String* name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString(); + String* name2 = shadow_frame->GetVRegReference(arg_offset + 1)->AsString(); ArtField* found = NULL; ObjectArray<ArtField>* fields = klass->GetIFields(); for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) { ArtField* f = fields->Get(i); - if (name->Equals(f->GetName())) { + if (name2->Equals(f->GetName())) { found = f; } } @@ -868,14 +868,14 @@ static void UnstartedRuntimeInvoke(Thread* self, MethodHelper* mh, fields = klass->GetSFields(); for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) { ArtField* f = fields->Get(i); - if (name->Equals(f->GetName())) { + if (name2->Equals(f->GetName())) { found = f; } } } CHECK(found != NULL) << "Failed to find field in Class.getDeclaredField in un-started runtime. name=" - << name->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass); + << name2->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass); // TODO: getDeclaredField calls GetType once the field is found to ensure a // NoClassDefFoundError is thrown if the field's type cannot be resolved. Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass(); diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc index fe91bb6d47..df7d068922 100644 --- a/runtime/jdwp/jdwp_adb.cc +++ b/runtime/jdwp/jdwp_adb.cc @@ -84,13 +84,13 @@ struct JdwpAdbState : public JdwpNetStateBase { shutting_down_ = true; int control_sock = this->control_sock_; - int clientSock = this->clientSock; + int local_clientSock = this->clientSock; /* clear these out so it doesn't wake up and try to reuse them */ this->control_sock_ = this->clientSock = -1; - if (clientSock != -1) { - shutdown(clientSock, SHUT_RDWR); + if (local_clientSock != -1) { + shutdown(local_clientSock, SHUT_RDWR); } if (control_sock != -1) { diff --git a/runtime/jdwp/jdwp_bits.h b/runtime/jdwp/jdwp_bits.h index 9f80cbe307..f9cf9ca0d9 100644 --- a/runtime/jdwp/jdwp_bits.h +++ b/runtime/jdwp/jdwp_bits.h @@ -68,7 +68,7 @@ static inline void AppendUtf16BE(std::vector<uint8_t>& bytes, const uint16_t* ch // @deprecated static inline void Set1(uint8_t* buf, uint8_t val) { - *buf = (uint8_t)(val); + *buf = val; } // @deprecated diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc index d1229b28a8..44f713ca96 100644 --- a/runtime/jdwp/jdwp_event.cc +++ b/runtime/jdwp/jdwp_event.cc @@ -138,7 +138,7 @@ static bool NeedsFullDeoptimization(JdwpEventKind eventKind) { } } -uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) { +static uint32_t GetInstrumentationEventFor(JdwpEventKind eventKind) { switch (eventKind) { case EK_BREAKPOINT: case EK_SINGLE_STEP: diff --git a/runtime/jdwp/jdwp_socket.cc b/runtime/jdwp/jdwp_socket.cc index 4a80957b11..e8c08561d0 100644 --- a/runtime/jdwp/jdwp_socket.cc +++ b/runtime/jdwp/jdwp_socket.cc @@ -170,20 +170,20 @@ static JdwpSocketState* SocketStartup(JdwpState* state, uint16_t port, bool prob * for an open port.) */ void JdwpSocketState::Shutdown() { - int listenSock = this->listenSock; - int clientSock = this->clientSock; + int local_listenSock = this->listenSock; + int local_clientSock = this->clientSock; /* clear these out so it doesn't wake up and try to reuse them */ this->listenSock = this->clientSock = -1; /* "shutdown" dislodges blocking read() and accept() calls */ - if (listenSock != -1) { - shutdown(listenSock, SHUT_RDWR); - close(listenSock); + if (local_listenSock != -1) { + shutdown(local_listenSock, SHUT_RDWR); + close(local_listenSock); } - if (clientSock != -1) { - shutdown(clientSock, SHUT_RDWR); - close(clientSock); + if (local_clientSock != -1) { + shutdown(local_clientSock, SHUT_RDWR); + close(local_clientSock); } WakePipe(); diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc index bf72c7b619..91239944fe 100644 --- a/runtime/jdwp/object_registry.cc +++ b/runtime/jdwp/object_registry.cc @@ -214,10 +214,10 @@ void ObjectRegistry::DisposeObject(JDWP::ObjectId id, uint32_t reference_count) // Erase the object from the maps. Note object may be null if it's // a weak ref and the GC has cleared it. int32_t hash_code = entry->identity_hash_code; - for (auto it = object_to_entry_.lower_bound(hash_code), end = object_to_entry_.end(); - it != end && it->first == hash_code; ++it) { - if (entry == it->second) { - object_to_entry_.erase(it); + for (auto inner_it = object_to_entry_.lower_bound(hash_code), end = object_to_entry_.end(); + inner_it != end && inner_it->first == hash_code; ++inner_it) { + if (entry == inner_it->second) { + object_to_entry_.erase(inner_it); break; } } diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc index 180e3d7865..b2d3835405 100644 --- a/runtime/jni_env_ext.cc +++ b/runtime/jni_env_ext.cc @@ -28,9 +28,9 @@ static constexpr size_t kMonitorsMax = 4096; // Arbitrary sanity check. static constexpr size_t kLocalsInitial = 64; // Arbitrary. -JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm) - : self(self), - vm(vm), +JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in) + : self(self_in), + vm(vm_in), local_ref_cookie(IRT_FIRST_SEGMENT), locals(kLocalsInitial, kLocalsMax, kLocal), check_jni(false), diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index dd66af72f7..67e52cbef9 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -196,8 +196,8 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con // Failed to find type from the signature of the field. DCHECK(soa.Self()->IsExceptionPending()); ThrowLocation throw_location; - StackHandleScope<1> hs(soa.Self()); - Handle<mirror::Throwable> cause(hs.NewHandle(soa.Self()->GetException(&throw_location))); + StackHandleScope<1> hs2(soa.Self()); + Handle<mirror::Throwable> cause(hs2.NewHandle(soa.Self()->GetException(&throw_location))); soa.Self()->ClearException(); std::string temp; soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;", diff --git a/runtime/lock_word-inl.h b/runtime/lock_word-inl.h index cf6f83cb62..c52578f8a2 100644 --- a/runtime/lock_word-inl.h +++ b/runtime/lock_word-inl.h @@ -34,13 +34,13 @@ inline uint32_t LockWord::ThinLockCount() const { inline Monitor* LockWord::FatLockMonitor() const { DCHECK_EQ(GetState(), kFatLocked); - MonitorId mon_id = static_cast<MonitorId>(value_ & ~(kStateMask << kStateShift)); + MonitorId mon_id = value_ & ~(kStateMask << kStateShift); return MonitorPool::MonitorFromMonitorId(mon_id); } inline size_t LockWord::ForwardingAddress() const { DCHECK_EQ(GetState(), kForwardingAddress); - return static_cast<size_t>(value_ << kStateSize); + return value_ << kStateSize; } inline LockWord::LockWord() : value_(0) { diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index 51aba9c374..8303f845a8 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -28,7 +28,12 @@ #endif #include "base/stringprintf.h" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" #include "ScopedFd.h" +#pragma GCC diagnostic pop + #include "thread-inl.h" #include "utils.h" diff --git a/runtime/mem_map.h b/runtime/mem_map.h index df1222c39d..9b003aa66c 100644 --- a/runtime/mem_map.h +++ b/runtime/mem_map.h @@ -175,6 +175,7 @@ class MemMap { friend class MemMapTest; // To allow access to base_begin_ and base_size_. }; std::ostream& operator<<(std::ostream& os, const MemMap& mem_map); +std::ostream& operator<<(std::ostream& os, const MemMap::Maps& mem_maps); } // namespace art diff --git a/runtime/memory_region.h b/runtime/memory_region.h index 4eb6d47537..b3820be26c 100644 --- a/runtime/memory_region.h +++ b/runtime/memory_region.h @@ -32,7 +32,7 @@ namespace art { class MemoryRegion FINAL : public ValueObject { public: MemoryRegion() : pointer_(nullptr), size_(0) {} - MemoryRegion(void* pointer, uintptr_t size) : pointer_(pointer), size_(size) {} + MemoryRegion(void* pointer_in, uintptr_t size_in) : pointer_(pointer_in), size_(size_in) {} void* pointer() const { return pointer_; } size_t size() const { return size_; } @@ -78,10 +78,10 @@ class MemoryRegion FINAL : public ValueObject { void CopyFrom(size_t offset, const MemoryRegion& from) const; // Compute a sub memory region based on an existing one. - MemoryRegion Subregion(uintptr_t offset, uintptr_t size) const { - CHECK_GE(this->size(), size); - CHECK_LE(offset, this->size() - size); - return MemoryRegion(reinterpret_cast<void*>(start() + offset), size); + MemoryRegion Subregion(uintptr_t offset, uintptr_t size_in) const { + CHECK_GE(this->size(), size_in); + CHECK_LE(offset, this->size() - size_in); + return MemoryRegion(reinterpret_cast<void*>(start() + offset), size_in); } // Compute an extended memory region based on an existing one. diff --git a/runtime/mirror/array.cc b/runtime/mirror/array.cc index 636be3346a..b92f01797a 100644 --- a/runtime/mirror/array.cc +++ b/runtime/mirror/array.cc @@ -58,8 +58,8 @@ static Array* RecursiveCreateMultiArray(Thread* self, if (current_dimension + 1 < dimensions->GetLength()) { // Create a new sub-array in every element of the array. for (int32_t i = 0; i < array_length; i++) { - StackHandleScope<1> hs(self); - Handle<mirror::Class> h_component_type(hs.NewHandle(array_class->GetComponentType())); + StackHandleScope<1> hs2(self); + Handle<mirror::Class> h_component_type(hs2.NewHandle(array_class->GetComponentType())); Array* sub_array = RecursiveCreateMultiArray(self, h_component_type, current_dimension + 1, dimensions); if (UNLIKELY(sub_array == nullptr)) { diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index 61bfe41ae6..566505911b 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -628,8 +628,8 @@ ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const String HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k)); // Is this field in any of this class' interfaces? for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) { - StackHandleScope<1> hs(self); - Handle<mirror::Class> interface(hs.NewHandle(GetDirectInterface(self, h_k, i))); + StackHandleScope<1> hs2(self); + Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i))); f = FindStaticField(self, interface, name, type); if (f != nullptr) { return f; @@ -652,8 +652,8 @@ ArtField* Class::FindStaticField(Thread* self, Handle<Class> klass, const DexCac HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k)); // Is this field in any of this class' interfaces? for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) { - StackHandleScope<1> hs(self); - Handle<mirror::Class> interface(hs.NewHandle(GetDirectInterface(self, h_k, i))); + StackHandleScope<1> hs2(self); + Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i))); f = FindStaticField(self, interface, dex_cache, dex_field_idx); if (f != nullptr) { return f; @@ -680,8 +680,8 @@ ArtField* Class::FindField(Thread* self, Handle<Class> klass, const StringPiece& StackHandleScope<1> hs(self); HandleWrapper<mirror::Class> h_k(hs.NewHandleWrapper(&k)); for (uint32_t i = 0; i < h_k->NumDirectInterfaces(); ++i) { - StackHandleScope<1> hs(self); - Handle<mirror::Class> interface(hs.NewHandle(GetDirectInterface(self, h_k, i))); + StackHandleScope<1> hs2(self); + Handle<mirror::Class> interface(hs2.NewHandle(GetDirectInterface(self, h_k, i))); f = interface->FindStaticField(self, interface, name, type); if (f != nullptr) { return f; diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 5020ced396..0439428983 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -543,7 +543,7 @@ void Monitor::Notify(Thread* self) { thread->SetWaitNext(nullptr); // Check to see if the thread is still waiting. - MutexLock mu(self, *thread->GetWaitMutex()); + MutexLock wait_mu(self, *thread->GetWaitMutex()); if (thread->GetWaitMonitor() != nullptr) { thread->GetWaitConditionVariable()->Signal(self); return; @@ -992,12 +992,12 @@ void Monitor::VisitLocks(StackVisitor* stack_visitor, void (*callback)(mirror::O for (size_t i = 0; i < monitor_enter_dex_pcs.size(); ++i) { // The verifier works in terms of the dex pcs of the monitor-enter instructions. // We want the registers used by those instructions (so we can read the values out of them). - uint32_t dex_pc = monitor_enter_dex_pcs[i]; - uint16_t monitor_enter_instruction = code_item->insns_[dex_pc]; + uint32_t monitor_dex_pc = monitor_enter_dex_pcs[i]; + uint16_t monitor_enter_instruction = code_item->insns_[monitor_dex_pc]; // Quick sanity check. if ((monitor_enter_instruction & 0xff) != Instruction::MONITOR_ENTER) { - LOG(FATAL) << "expected monitor-enter @" << dex_pc << "; was " + LOG(FATAL) << "expected monitor-enter @" << monitor_dex_pc << "; was " << reinterpret_cast<void*>(monitor_enter_instruction); } diff --git a/runtime/monitor_test.cc b/runtime/monitor_test.cc index 704e0410a5..adc7848ae0 100644 --- a/runtime/monitor_test.cc +++ b/runtime/monitor_test.cc @@ -341,8 +341,7 @@ static void CommonWaitSetup(MonitorTest* test, ClassLinker* class_linker, uint64 // Wake the watchdog. { - Thread* self = Thread::Current(); - ScopedObjectAccess soa(self); + ScopedObjectAccess soa(Thread::Current()); test->watchdog_object_.Get()->MonitorEnter(self); // Lock the object. test->watchdog_object_.Get()->NotifyAll(self); // Wake up waiting parties. diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc index 2d038cfc46..012e03e051 100644 --- a/runtime/native/dalvik_system_DexFile.cc +++ b/runtime/native/dalvik_system_DexFile.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "dalvik_system_DexFile.h" + #include <algorithm> #include <set> #include <fcntl.h> @@ -43,13 +45,17 @@ #include "profiler.h" #include "runtime.h" #include "scoped_thread_state_change.h" -#include "ScopedFd.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" #include "utils.h" #include "well_known_classes.h" #include "zip_archive.h" +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" +#include "ScopedFd.h" +#pragma GCC diagnostic pop + namespace art { // A smart pointer that provides read-only access to a Java string's UTF chars. diff --git a/runtime/native/dalvik_system_DexFile.h b/runtime/native/dalvik_system_DexFile.h new file mode 100644 index 0000000000..487df05b34 --- /dev/null +++ b/runtime/native/dalvik_system_DexFile.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_ +#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_ + +#include <jni.h> + +namespace art { + +void register_dalvik_system_DexFile(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_DEXFILE_H_ diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc index ceff2065ba..6c82eb22bd 100644 --- a/runtime/native/dalvik_system_VMDebug.cc +++ b/runtime/native/dalvik_system_VMDebug.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "dalvik_system_VMDebug.h" + #include <string.h> #include <unistd.h> diff --git a/runtime/native/dalvik_system_VMDebug.h b/runtime/native/dalvik_system_VMDebug.h new file mode 100644 index 0000000000..b7eb8a8379 --- /dev/null +++ b/runtime/native/dalvik_system_VMDebug.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMDEBUG_H_ +#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMDEBUG_H_ + +#include <jni.h> + +namespace art { + +void register_dalvik_system_VMDebug(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMDEBUG_H_ diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index e1ceb8c864..fdba43ed8b 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "dalvik_system_VMRuntime.h" + #include <limits.h> #include "ScopedUtfChars.h" @@ -38,7 +40,11 @@ #include "scoped_thread_state_change.h" #include "thread.h" #include "thread_list.h" + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wshadow" #include "toStringArray.h" +#pragma GCC diagnostic pop namespace art { @@ -386,26 +392,26 @@ static void PreloadDexCachesStatsFilled(DexCacheStats* filled) const DexFile* dex_file = boot_class_path[i]; CHECK(dex_file != NULL); mirror::DexCache* dex_cache = linker->FindDexCache(*dex_file); - for (size_t i = 0; i < dex_cache->NumStrings(); i++) { - mirror::String* string = dex_cache->GetResolvedString(i); + for (size_t j = 0; j < dex_cache->NumStrings(); j++) { + mirror::String* string = dex_cache->GetResolvedString(j); if (string != NULL) { filled->num_strings++; } } - for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { - mirror::Class* klass = dex_cache->GetResolvedType(i); + for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) { + mirror::Class* klass = dex_cache->GetResolvedType(j); if (klass != NULL) { filled->num_types++; } } - for (size_t i = 0; i < dex_cache->NumResolvedFields(); i++) { - mirror::ArtField* field = dex_cache->GetResolvedField(i); + for (size_t j = 0; j < dex_cache->NumResolvedFields(); j++) { + mirror::ArtField* field = dex_cache->GetResolvedField(j); if (field != NULL) { filled->num_fields++; } } - for (size_t i = 0; i < dex_cache->NumResolvedMethods(); i++) { - mirror::ArtMethod* method = dex_cache->GetResolvedMethod(i); + for (size_t j = 0; j < dex_cache->NumResolvedMethods(); j++) { + mirror::ArtMethod* method = dex_cache->GetResolvedMethod(j); if (method != NULL) { filled->num_methods++; } @@ -450,14 +456,14 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) { Handle<mirror::DexCache> dex_cache(hs.NewHandle(linker->FindDexCache(*dex_file))); if (kPreloadDexCachesStrings) { - for (size_t i = 0; i < dex_cache->NumStrings(); i++) { - PreloadDexCachesResolveString(dex_cache, i, strings); + for (size_t j = 0; j < dex_cache->NumStrings(); j++) { + PreloadDexCachesResolveString(dex_cache, j, strings); } } if (kPreloadDexCachesTypes) { - for (size_t i = 0; i < dex_cache->NumResolvedTypes(); i++) { - PreloadDexCachesResolveType(soa.Self(), dex_cache.Get(), i); + for (size_t j = 0; j < dex_cache->NumResolvedTypes(); j++) { + PreloadDexCachesResolveType(soa.Self(), dex_cache.Get(), j); } } diff --git a/runtime/native/dalvik_system_VMRuntime.h b/runtime/native/dalvik_system_VMRuntime.h new file mode 100644 index 0000000000..795caa5ec6 --- /dev/null +++ b/runtime/native/dalvik_system_VMRuntime.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMRUNTIME_H_ +#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMRUNTIME_H_ + +#include <jni.h> + +namespace art { + +void register_dalvik_system_VMRuntime(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMRUNTIME_H_ diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc index eef1c46c1f..e396dad5c3 100644 --- a/runtime/native/dalvik_system_VMStack.cc +++ b/runtime/native/dalvik_system_VMStack.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "dalvik_system_VMStack.h" + #include "jni_internal.h" #include "nth_caller_visitor.h" #include "mirror/art_method-inl.h" @@ -87,8 +89,10 @@ static jobject VMStack_getCallingClassLoader(JNIEnv* env, jclass) { static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject javaBootstrap, jobject javaSystem) { struct ClosestUserClassLoaderVisitor : public StackVisitor { - ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap, mirror::Object* system) - : StackVisitor(thread, NULL), bootstrap(bootstrap), system(system), class_loader(NULL) {} + ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap_in, + mirror::Object* system_in) + : StackVisitor(thread, NULL), bootstrap(bootstrap_in), system(system_in), + class_loader(NULL) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(class_loader == NULL); diff --git a/runtime/native/dalvik_system_VMStack.h b/runtime/native/dalvik_system_VMStack.h new file mode 100644 index 0000000000..5638f99ec1 --- /dev/null +++ b/runtime/native/dalvik_system_VMStack.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMSTACK_H_ +#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMSTACK_H_ + +#include <jni.h> + +namespace art { + +void register_dalvik_system_VMStack(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_VMSTACK_H_ diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc index adc7c4f410..09669543a8 100644 --- a/runtime/native/dalvik_system_ZygoteHooks.cc +++ b/runtime/native/dalvik_system_ZygoteHooks.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "dalvik_system_ZygoteHooks.h" + #include <stdlib.h> #include "debugger.h" diff --git a/runtime/native/dalvik_system_ZygoteHooks.h b/runtime/native/dalvik_system_ZygoteHooks.h new file mode 100644 index 0000000000..ca0658d318 --- /dev/null +++ b/runtime/native/dalvik_system_ZygoteHooks.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_DALVIK_SYSTEM_ZYGOTEHOOKS_H_ +#define ART_RUNTIME_NATIVE_DALVIK_SYSTEM_ZYGOTEHOOKS_H_ + +#include <jni.h> + +namespace art { + +void register_dalvik_system_ZygoteHooks(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_DALVIK_SYSTEM_ZYGOTEHOOKS_H_ diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index b11cbdfb92..1ea75f386f 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_Class.h" + #include "class_linker.h" #include "dex_file-inl.h" #include "jni_internal.h" diff --git a/runtime/native/java_lang_Class.h b/runtime/native/java_lang_Class.h new file mode 100644 index 0000000000..8f769c39e9 --- /dev/null +++ b/runtime/native/java_lang_Class.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_CLASS_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_CLASS_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_Class(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_CLASS_H_ diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc index c1c6c26047..27eae46236 100644 --- a/runtime/native/java_lang_DexCache.cc +++ b/runtime/native/java_lang_DexCache.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_DexCache.h" + #include "dex_file.h" #include "jni_internal.h" #include "mirror/dex_cache.h" diff --git a/runtime/native/java_lang_DexCache.h b/runtime/native/java_lang_DexCache.h new file mode 100644 index 0000000000..b1c1f5e72c --- /dev/null +++ b/runtime/native/java_lang_DexCache.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_DexCache(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_ diff --git a/runtime/native/java_lang_Object.cc b/runtime/native/java_lang_Object.cc index 4768f48d9c..49cacdf156 100644 --- a/runtime/native/java_lang_Object.cc +++ b/runtime/native/java_lang_Object.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_Object.h" + #include "jni_internal.h" #include "mirror/object-inl.h" #include "scoped_fast_native_object_access.h" diff --git a/runtime/native/java_lang_Object.h b/runtime/native/java_lang_Object.h new file mode 100644 index 0000000000..c860571904 --- /dev/null +++ b/runtime/native/java_lang_Object.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_OBJECT_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_OBJECT_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_Object(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_OBJECT_H_ diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc index f9a1cee2d8..dc0cb7bad6 100644 --- a/runtime/native/java_lang_Runtime.cc +++ b/runtime/native/java_lang_Runtime.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_Runtime.h" + #include <dlfcn.h> #include <limits.h> #include <unistd.h> diff --git a/runtime/native/java_lang_Runtime.h b/runtime/native/java_lang_Runtime.h new file mode 100644 index 0000000000..ceda06bde9 --- /dev/null +++ b/runtime/native/java_lang_Runtime.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_RUNTIME_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_RUNTIME_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_Runtime(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_RUNTIME_H_ diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc index d6b47ebc87..4ea2546e30 100644 --- a/runtime/native/java_lang_String.cc +++ b/runtime/native/java_lang_String.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_String.h" + #include "common_throws.h" #include "jni_internal.h" #include "mirror/string-inl.h" diff --git a/runtime/native/java_lang_String.h b/runtime/native/java_lang_String.h new file mode 100644 index 0000000000..357eb3daf9 --- /dev/null +++ b/runtime/native/java_lang_String.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_STRING_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_STRING_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_String(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_STRING_H_ diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc index 43681a70fd..f79be56aeb 100644 --- a/runtime/native/java_lang_System.cc +++ b/runtime/native/java_lang_System.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_System.h" + #include "common_throws.h" #include "gc/accounting/card_table-inl.h" #include "jni_internal.h" diff --git a/runtime/native/java_lang_System.h b/runtime/native/java_lang_System.h new file mode 100644 index 0000000000..e371fa5db4 --- /dev/null +++ b/runtime/native/java_lang_System.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_SYSTEM_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_SYSTEM_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_System(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_SYSTEM_H_ diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc index c0c72651de..0722a2485d 100644 --- a/runtime/native/java_lang_Thread.cc +++ b/runtime/native/java_lang_Thread.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_Thread.h" + #include "common_throws.h" #include "debugger.h" #include "jni_internal.h" diff --git a/runtime/native/java_lang_Thread.h b/runtime/native/java_lang_Thread.h new file mode 100644 index 0000000000..7700ce29a8 --- /dev/null +++ b/runtime/native/java_lang_Thread.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_THREAD_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_THREAD_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_Thread(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_THREAD_H_ diff --git a/runtime/native/java_lang_Throwable.cc b/runtime/native/java_lang_Throwable.cc index 3ed4cfe100..cb8a86918f 100644 --- a/runtime/native/java_lang_Throwable.cc +++ b/runtime/native/java_lang_Throwable.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_Throwable.h" + #include "jni_internal.h" #include "scoped_fast_native_object_access.h" #include "thread.h" diff --git a/runtime/native/java_lang_Throwable.h b/runtime/native/java_lang_Throwable.h new file mode 100644 index 0000000000..f9aea84abe --- /dev/null +++ b/runtime/native/java_lang_Throwable.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_THROWABLE_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_THROWABLE_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_Throwable(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_THROWABLE_H_ diff --git a/runtime/native/java_lang_VMClassLoader.cc b/runtime/native/java_lang_VMClassLoader.cc index f6a46bde88..45563d299c 100644 --- a/runtime/native/java_lang_VMClassLoader.cc +++ b/runtime/native/java_lang_VMClassLoader.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_VMClassLoader.h" + #include "class_linker.h" #include "jni_internal.h" #include "mirror/class_loader.h" diff --git a/runtime/native/java_lang_VMClassLoader.h b/runtime/native/java_lang_VMClassLoader.h new file mode 100644 index 0000000000..bf8d94f5a9 --- /dev/null +++ b/runtime/native/java_lang_VMClassLoader.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_VMCLASSLOADER_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_VMCLASSLOADER_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_VMClassLoader(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_VMCLASSLOADER_H_ diff --git a/runtime/native/java_lang_ref_FinalizerReference.cc b/runtime/native/java_lang_ref_FinalizerReference.cc index ad48ec0a11..0532c359a0 100644 --- a/runtime/native/java_lang_ref_FinalizerReference.cc +++ b/runtime/native/java_lang_ref_FinalizerReference.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_ref_FinalizerReference.h" + #include "gc/heap.h" #include "gc/reference_processor.h" #include "jni_internal.h" diff --git a/runtime/native/java_lang_ref_FinalizerReference.h b/runtime/native/java_lang_ref_FinalizerReference.h new file mode 100644 index 0000000000..848a7aded6 --- /dev/null +++ b/runtime/native/java_lang_ref_FinalizerReference.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REF_FINALIZERREFERENCE_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_REF_FINALIZERREFERENCE_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_ref_FinalizerReference(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REF_FINALIZERREFERENCE_H_ diff --git a/runtime/native/java_lang_ref_Reference.cc b/runtime/native/java_lang_ref_Reference.cc index 4f04d60232..d2320591b1 100644 --- a/runtime/native/java_lang_ref_Reference.cc +++ b/runtime/native/java_lang_ref_Reference.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_ref_Reference.h" + #include "gc/heap.h" #include "gc/reference_processor.h" #include "jni_internal.h" diff --git a/runtime/native/java_lang_ref_Reference.h b/runtime/native/java_lang_ref_Reference.h new file mode 100644 index 0000000000..0cbf11646d --- /dev/null +++ b/runtime/native/java_lang_ref_Reference.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REF_REFERENCE_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_REF_REFERENCE_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_ref_Reference(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REF_REFERENCE_H_ diff --git a/runtime/native/java_lang_reflect_Array.cc b/runtime/native/java_lang_reflect_Array.cc index 763a6645b9..1ffcbdfd5e 100644 --- a/runtime/native/java_lang_reflect_Array.cc +++ b/runtime/native/java_lang_reflect_Array.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_reflect_Array.h" + #include "class_linker-inl.h" #include "common_throws.h" #include "dex_file-inl.h" diff --git a/runtime/native/java_lang_reflect_Array.h b/runtime/native/java_lang_reflect_Array.h new file mode 100644 index 0000000000..805bf7992a --- /dev/null +++ b/runtime/native/java_lang_reflect_Array.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ARRAY_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ARRAY_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_reflect_Array(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_ARRAY_H_ diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc index 0542aeb98a..3121a90d09 100644 --- a/runtime/native/java_lang_reflect_Constructor.cc +++ b/runtime/native/java_lang_reflect_Constructor.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_reflect_Constructor.h" + #include "class_linker.h" #include "jni_internal.h" #include "mirror/art_method.h" diff --git a/runtime/native/java_lang_reflect_Constructor.h b/runtime/native/java_lang_reflect_Constructor.h new file mode 100644 index 0000000000..7baae978e6 --- /dev/null +++ b/runtime/native/java_lang_reflect_Constructor.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_CONSTRUCTOR_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_CONSTRUCTOR_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_reflect_Constructor(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_CONSTRUCTOR_H_ diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc index 1f07336779..a0426205c2 100644 --- a/runtime/native/java_lang_reflect_Field.cc +++ b/runtime/native/java_lang_reflect_Field.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_reflect_Field.h" + #include "class_linker.h" #include "class_linker-inl.h" #include "common_throws.h" diff --git a/runtime/native/java_lang_reflect_Field.h b/runtime/native/java_lang_reflect_Field.h new file mode 100644 index 0000000000..1739711de8 --- /dev/null +++ b/runtime/native/java_lang_reflect_Field.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_FIELD_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_FIELD_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_reflect_Field(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_FIELD_H_ diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc index f029b16746..9859746563 100644 --- a/runtime/native/java_lang_reflect_Method.cc +++ b/runtime/native/java_lang_reflect_Method.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_reflect_Method.h" + #include "class_linker.h" #include "jni_internal.h" #include "mirror/art_method.h" diff --git a/runtime/native/java_lang_reflect_Method.h b/runtime/native/java_lang_reflect_Method.h new file mode 100644 index 0000000000..3a93cd05d9 --- /dev/null +++ b/runtime/native/java_lang_reflect_Method.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_METHOD_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_METHOD_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_reflect_Method(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_METHOD_H_ diff --git a/runtime/native/java_lang_reflect_Proxy.cc b/runtime/native/java_lang_reflect_Proxy.cc index 07d670d51a..baf8b24207 100644 --- a/runtime/native/java_lang_reflect_Proxy.cc +++ b/runtime/native/java_lang_reflect_Proxy.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_lang_reflect_Proxy.h" + #include "class_linker.h" #include "jni_internal.h" #include "mirror/class_loader.h" diff --git a/runtime/native/java_lang_reflect_Proxy.h b/runtime/native/java_lang_reflect_Proxy.h new file mode 100644 index 0000000000..e25f0f76b6 --- /dev/null +++ b/runtime/native/java_lang_reflect_Proxy.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_PROXY_H_ +#define ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_PROXY_H_ + +#include <jni.h> + +namespace art { + +void register_java_lang_reflect_Proxy(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_LANG_REFLECT_PROXY_H_ diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc index bf92e1230d..04f0ba0c19 100644 --- a/runtime/native/java_util_concurrent_atomic_AtomicLong.cc +++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "java_util_concurrent_atomic_AtomicLong.h" + #include "atomic.h" #include "jni_internal.h" diff --git a/runtime/native/java_util_concurrent_atomic_AtomicLong.h b/runtime/native/java_util_concurrent_atomic_AtomicLong.h new file mode 100644 index 0000000000..990dc861ff --- /dev/null +++ b/runtime/native/java_util_concurrent_atomic_AtomicLong.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_JAVA_UTIL_CONCURRENT_ATOMIC_ATOMICLONG_H_ +#define ART_RUNTIME_NATIVE_JAVA_UTIL_CONCURRENT_ATOMIC_ATOMICLONG_H_ + +#include <jni.h> + +namespace art { + +void register_java_util_concurrent_atomic_AtomicLong(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_JAVA_UTIL_CONCURRENT_ATOMIC_ATOMICLONG_H_ diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc index 8b2aecbbb1..0ab29799a0 100644 --- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc +++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "org_apache_harmony_dalvik_ddmc_DdmServer.h" + #include "base/logging.h" #include "debugger.h" #include "jni_internal.h" diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h new file mode 100644 index 0000000000..9a4645c1aa --- /dev/null +++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMSERVER_H_ +#define ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMSERVER_H_ + +#include <jni.h> + +namespace art { + +void register_org_apache_harmony_dalvik_ddmc_DdmServer(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMSERVER_H_ diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc index 45ef9ae727..b74430f237 100644 --- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc +++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "org_apache_harmony_dalvik_ddmc_DdmVmInternal.h" + #include "base/logging.h" #include "base/mutex.h" #include "debugger.h" diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h new file mode 100644 index 0000000000..736e4c8793 --- /dev/null +++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMVMINTERNAL_H_ +#define ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMVMINTERNAL_H_ + +#include <jni.h> + +namespace art { + +void register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_ORG_APACHE_HARMONY_DALVIK_DDMC_DDMVMINTERNAL_H_ diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc index 65dece04e6..17ebdff996 100644 --- a/runtime/native/sun_misc_Unsafe.cc +++ b/runtime/native/sun_misc_Unsafe.cc @@ -14,6 +14,8 @@ * limitations under the License. */ +#include "sun_misc_Unsafe.h" + #include "gc/accounting/card_table-inl.h" #include "jni_internal.h" #include "mirror/array.h" diff --git a/runtime/native/sun_misc_Unsafe.h b/runtime/native/sun_misc_Unsafe.h new file mode 100644 index 0000000000..93194f4fad --- /dev/null +++ b/runtime/native/sun_misc_Unsafe.h @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_NATIVE_SUN_MISC_UNSAFE_H_ +#define ART_RUNTIME_NATIVE_SUN_MISC_UNSAFE_H_ + +#include <jni.h> + +namespace art { + +void register_sun_misc_Unsafe(JNIEnv* env); + +} // namespace art + +#endif // ART_RUNTIME_NATIVE_SUN_MISC_UNSAFE_H_ diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc index f598e27d7b..59922b8c07 100644 --- a/runtime/native_bridge_art_interface.cc +++ b/runtime/native_bridge_art_interface.cc @@ -25,7 +25,7 @@ namespace art { -const char* GetMethodShorty(JNIEnv* env, jmethodID mid) { +static const char* GetMethodShorty(JNIEnv* env, jmethodID mid) { ScopedObjectAccess soa(env); StackHandleScope<1> scope(soa.Self()); mirror::ArtMethod* m = soa.DecodeMethod(mid); @@ -33,7 +33,7 @@ const char* GetMethodShorty(JNIEnv* env, jmethodID mid) { return mh.GetShorty(); } -uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) { +static uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) { if (clazz == nullptr) return 0; @@ -56,8 +56,8 @@ uint32_t GetNativeMethodCount(JNIEnv* env, jclass clazz) { return native_method_count; } -uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods, - uint32_t method_count) { +static uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods, + uint32_t method_count) { if ((clazz == nullptr) || (methods == nullptr)) { return 0; } @@ -121,6 +121,8 @@ void PreInitializeNativeBridge(std::string dir) { LOG(WARNING) << "Could not create mount namespace."; } android::PreInitializeNativeBridge(dir.c_str(), GetInstructionSetString(kRuntimeISA)); +#else + UNUSED(dir); #endif } diff --git a/runtime/nth_caller_visitor.h b/runtime/nth_caller_visitor.h index 374a80ea28..a851f21e29 100644 --- a/runtime/nth_caller_visitor.h +++ b/runtime/nth_caller_visitor.h @@ -26,9 +26,9 @@ class Thread; // Walks up the stack 'n' callers, when used with Thread::WalkStack. struct NthCallerVisitor : public StackVisitor { - NthCallerVisitor(Thread* thread, size_t n, bool include_runtime_and_upcalls = false) - : StackVisitor(thread, NULL), n(n), include_runtime_and_upcalls_(include_runtime_and_upcalls), - count(0), caller(NULL) {} + NthCallerVisitor(Thread* thread, size_t n_in, bool include_runtime_and_upcalls = false) + : StackVisitor(thread, NULL), n(n_in), + include_runtime_and_upcalls_(include_runtime_and_upcalls), count(0), caller(NULL) {} bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { mirror::ArtMethod* m = GetMethod(); diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index e3bd541f27..1a97c357fa 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -504,35 +504,35 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize } else if (StartsWith(option, "-verbose:")) { std::vector<std::string> verbose_options; Split(option.substr(strlen("-verbose:")), ',', &verbose_options); - for (size_t i = 0; i < verbose_options.size(); ++i) { - if (verbose_options[i] == "class") { + for (size_t j = 0; j < verbose_options.size(); ++j) { + if (verbose_options[j] == "class") { gLogVerbosity.class_linker = true; - } else if (verbose_options[i] == "compiler") { + } else if (verbose_options[j] == "compiler") { gLogVerbosity.compiler = true; - } else if (verbose_options[i] == "gc") { + } else if (verbose_options[j] == "gc") { gLogVerbosity.gc = true; - } else if (verbose_options[i] == "heap") { + } else if (verbose_options[j] == "heap") { gLogVerbosity.heap = true; - } else if (verbose_options[i] == "jdwp") { + } else if (verbose_options[j] == "jdwp") { gLogVerbosity.jdwp = true; - } else if (verbose_options[i] == "jni") { + } else if (verbose_options[j] == "jni") { gLogVerbosity.jni = true; - } else if (verbose_options[i] == "monitor") { + } else if (verbose_options[j] == "monitor") { gLogVerbosity.monitor = true; - } else if (verbose_options[i] == "profiler") { + } else if (verbose_options[j] == "profiler") { gLogVerbosity.profiler = true; - } else if (verbose_options[i] == "signals") { + } else if (verbose_options[j] == "signals") { gLogVerbosity.signals = true; - } else if (verbose_options[i] == "startup") { + } else if (verbose_options[j] == "startup") { gLogVerbosity.startup = true; - } else if (verbose_options[i] == "third-party-jni") { + } else if (verbose_options[j] == "third-party-jni") { gLogVerbosity.third_party_jni = true; - } else if (verbose_options[i] == "threads") { + } else if (verbose_options[j] == "threads") { gLogVerbosity.threads = true; - } else if (verbose_options[i] == "verifier") { + } else if (verbose_options[j] == "verifier") { gLogVerbosity.verifier = true; } else { - Usage("Unknown -verbose option %s\n", verbose_options[i].c_str()); + Usage("Unknown -verbose option %s\n", verbose_options[j].c_str()); return false; } } diff --git a/runtime/reflection_test.cc b/runtime/reflection_test.cc index f8e0f47130..eca1800c16 100644 --- a/runtime/reflection_test.cc +++ b/runtime/reflection_test.cc @@ -115,8 +115,8 @@ class ReflectionTest : public CommonCompilerTest { *receiver = nullptr; } else { // Ensure class is initialized before allocating object - StackHandleScope<1> hs(self); - Handle<mirror::Class> h_class(hs.NewHandle(c)); + StackHandleScope<1> hs2(self); + Handle<mirror::Class> h_class(hs2.NewHandle(c)); bool initialized = class_linker_->EnsureInitialized(self, h_class, true, true); CHECK(initialized); *receiver = c->AllocObject(self); diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 4ac9634158..1cda29bcd2 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -69,6 +69,31 @@ #include "mirror/throwable.h" #include "monitor.h" #include "native_bridge_art_interface.h" +#include "native/dalvik_system_DexFile.h" +#include "native/dalvik_system_VMDebug.h" +#include "native/dalvik_system_VMRuntime.h" +#include "native/dalvik_system_VMStack.h" +#include "native/dalvik_system_ZygoteHooks.h" +#include "native/java_lang_Class.h" +#include "native/java_lang_DexCache.h" +#include "native/java_lang_Object.h" +#include "native/java_lang_ref_FinalizerReference.h" +#include "native/java_lang_reflect_Array.h" +#include "native/java_lang_reflect_Constructor.h" +#include "native/java_lang_reflect_Field.h" +#include "native/java_lang_reflect_Method.h" +#include "native/java_lang_reflect_Proxy.h" +#include "native/java_lang_ref_Reference.h" +#include "native/java_lang_Runtime.h" +#include "native/java_lang_String.h" +#include "native/java_lang_System.h" +#include "native/java_lang_Thread.h" +#include "native/java_lang_Throwable.h" +#include "native/java_lang_VMClassLoader.h" +#include "native/java_util_concurrent_atomic_AtomicLong.h" +#include "native/org_apache_harmony_dalvik_ddmc_DdmServer.h" +#include "native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.h" +#include "native/sun_misc_Unsafe.h" #include "parsed_options.h" #include "oat_file.h" #include "os.h" @@ -344,7 +369,7 @@ bool Runtime::Create(const RuntimeOptions& options, bool ignore_unrecognized) { return true; } -jobject CreateSystemClassLoader() { +static jobject CreateSystemClassLoader() { if (Runtime::Current()->UseCompileTimeClassPath()) { return NULL; } @@ -388,9 +413,9 @@ std::string Runtime::GetPatchoatExecutable() const { if (!patchoat_executable_.empty()) { return patchoat_executable_; } - std::string patchoat_executable_(GetAndroidRoot()); - patchoat_executable_ += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat"); - return patchoat_executable_; + std::string patchoat_executable(GetAndroidRoot()); + patchoat_executable += (kIsDebugBuild ? "/bin/patchoatd" : "/bin/patchoat"); + return patchoat_executable; } std::string Runtime::GetCompilerExecutable() const { @@ -969,34 +994,31 @@ jobject Runtime::GetSystemClassLoader() const { } void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) { -#define REGISTER(FN) extern void FN(JNIEnv*); FN(env) - // Register Throwable first so that registration of other native methods can throw exceptions - REGISTER(register_java_lang_Throwable); - REGISTER(register_dalvik_system_DexFile); - REGISTER(register_dalvik_system_VMDebug); - REGISTER(register_dalvik_system_VMRuntime); - REGISTER(register_dalvik_system_VMStack); - REGISTER(register_dalvik_system_ZygoteHooks); - REGISTER(register_java_lang_Class); - REGISTER(register_java_lang_DexCache); - REGISTER(register_java_lang_Object); - REGISTER(register_java_lang_Runtime); - REGISTER(register_java_lang_String); - REGISTER(register_java_lang_System); - REGISTER(register_java_lang_Thread); - REGISTER(register_java_lang_VMClassLoader); - REGISTER(register_java_lang_ref_FinalizerReference); - REGISTER(register_java_lang_ref_Reference); - REGISTER(register_java_lang_reflect_Array); - REGISTER(register_java_lang_reflect_Constructor); - REGISTER(register_java_lang_reflect_Field); - REGISTER(register_java_lang_reflect_Method); - REGISTER(register_java_lang_reflect_Proxy); - REGISTER(register_java_util_concurrent_atomic_AtomicLong); - REGISTER(register_org_apache_harmony_dalvik_ddmc_DdmServer); - REGISTER(register_org_apache_harmony_dalvik_ddmc_DdmVmInternal); - REGISTER(register_sun_misc_Unsafe); -#undef REGISTER + register_dalvik_system_DexFile(env); + register_dalvik_system_VMDebug(env); + register_dalvik_system_VMRuntime(env); + register_dalvik_system_VMStack(env); + register_dalvik_system_ZygoteHooks(env); + register_java_lang_Class(env); + register_java_lang_DexCache(env); + register_java_lang_Object(env); + register_java_lang_ref_FinalizerReference(env); + register_java_lang_reflect_Array(env); + register_java_lang_reflect_Constructor(env); + register_java_lang_reflect_Field(env); + register_java_lang_reflect_Method(env); + register_java_lang_reflect_Proxy(env); + register_java_lang_ref_Reference(env); + register_java_lang_Runtime(env); + register_java_lang_String(env); + register_java_lang_System(env); + register_java_lang_Thread(env); + register_java_lang_Throwable(env); + register_java_lang_VMClassLoader(env); + register_java_util_concurrent_atomic_AtomicLong(env); + register_org_apache_harmony_dalvik_ddmc_DdmServer(env); + register_org_apache_harmony_dalvik_ddmc_DdmVmInternal(env); + register_sun_misc_Unsafe(env); } void Runtime::DumpForSigQuit(std::ostream& os) { diff --git a/runtime/stack.cc b/runtime/stack.cc index 0adf0313ff..44086096f0 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -409,8 +409,8 @@ void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) { size_t StackVisitor::ComputeNumFrames(Thread* thread) { struct NumFramesVisitor : public StackVisitor { - explicit NumFramesVisitor(Thread* thread) - : StackVisitor(thread, NULL), frames(0) {} + explicit NumFramesVisitor(Thread* thread_in) + : StackVisitor(thread_in, NULL), frames(0) {} bool VisitFrame() OVERRIDE { frames++; @@ -461,8 +461,8 @@ bool StackVisitor::GetNextMethodAndDexPc(mirror::ArtMethod** next_method, uint32 void StackVisitor::DescribeStack(Thread* thread) { struct DescribeStackVisitor : public StackVisitor { - explicit DescribeStackVisitor(Thread* thread) - : StackVisitor(thread, NULL) {} + explicit DescribeStackVisitor(Thread* thread_in) + : StackVisitor(thread_in, NULL) {} bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation(); diff --git a/runtime/thread.cc b/runtime/thread.cc index da82c766f0..7d24562389 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -837,10 +837,11 @@ void Thread::DumpState(std::ostream& os) const { } struct StackDumpVisitor : public StackVisitor { - StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate) + StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate), - last_method(nullptr), last_line_number(0), repetition_count(0), frame_count(0) { + : StackVisitor(thread_in, context), os(os_in), thread(thread_in), + can_allocate(can_allocate_in), last_method(nullptr), last_line_number(0), + repetition_count(0), frame_count(0) { } virtual ~StackDumpVisitor() { @@ -2151,7 +2152,6 @@ class ReferenceMapVisitor : public StackVisitor { const VmapTable vmap_table(m->GetVmapTable(code_pointer)); QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer); // For all dex registers in the bitmap - StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame(); DCHECK(cur_quick_frame != nullptr); for (size_t reg = 0; reg < num_regs; ++reg) { // Does this register hold a reference? diff --git a/runtime/thread.h b/runtime/thread.h index c243413fed..89aee04e5d 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -1193,7 +1193,6 @@ class ScopedAssertNoThreadSuspension { }; std::ostream& operator<<(std::ostream& os, const Thread& thread); -std::ostream& operator<<(std::ostream& os, const ThreadState& state); } // namespace art diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index e3ef4eb26e..2181e29259 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -515,7 +515,7 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension, // than request thread suspension, to avoid potential cycles in threads requesting each other // suspend. ScopedObjectAccess soa(self); - MutexLock mu(self, *Locks::thread_list_lock_); + MutexLock thread_list_mu(self, *Locks::thread_list_lock_); thread = Thread::FromManagedThread(soa, peer); if (thread == nullptr) { ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer); @@ -528,7 +528,7 @@ Thread* ThreadList::SuspendThreadByPeer(jobject peer, bool request_suspension, } VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread; { - MutexLock mu(self, *Locks::thread_suspend_count_lock_); + MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); if (request_suspension) { thread->ModifySuspendCount(self, +1, debug_suspension); request_suspension = false; @@ -588,7 +588,7 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe // than request thread suspension, to avoid potential cycles in threads requesting each other // suspend. ScopedObjectAccess soa(self); - MutexLock mu(self, *Locks::thread_list_lock_); + MutexLock thread_list_mu(self, *Locks::thread_list_lock_); Thread* thread = nullptr; for (const auto& it : list_) { if (it->GetThreadId() == thread_id) { @@ -606,7 +606,7 @@ Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id, bool debug_suspe VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread; DCHECK(Contains(thread)); { - MutexLock mu(self, *Locks::thread_suspend_count_lock_); + MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); if (suspended_thread == nullptr) { thread->ModifySuspendCount(self, +1, debug_suspension); suspended_thread = thread; @@ -662,9 +662,9 @@ void ThreadList::SuspendAllForDebugger() { VLOG(threads) << *self << " SuspendAllForDebugger starting..."; { - MutexLock mu(self, *Locks::thread_list_lock_); + MutexLock thread_list_mu(self, *Locks::thread_list_lock_); { - MutexLock mu(self, *Locks::thread_suspend_count_lock_); + MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. DCHECK_GE(suspend_all_count_, debug_suspend_all_count_); ++suspend_all_count_; @@ -769,9 +769,9 @@ void ThreadList::ResumeAllForDebugger() { Locks::mutator_lock_->AssertNotExclusiveHeld(self); { - MutexLock mu(self, *Locks::thread_list_lock_); + MutexLock thread_list_mu(self, *Locks::thread_list_lock_); { - MutexLock mu(self, *Locks::thread_suspend_count_lock_); + MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_); // Update global suspend all state for attaching threads. DCHECK_GE(suspend_all_count_, debug_suspend_all_count_); needs_resume = (debug_suspend_all_count_ > 0); diff --git a/runtime/thread_pool.cc b/runtime/thread_pool.cc index a7f2ecdd81..587eb320cd 100644 --- a/runtime/thread_pool.cc +++ b/runtime/thread_pool.cc @@ -89,8 +89,9 @@ ThreadPool::ThreadPool(const char* name, size_t num_threads) max_active_workers_(num_threads) { Thread* self = Thread::Current(); while (GetThreadCount() < num_threads) { - const std::string name = StringPrintf("%s worker thread %zu", name_.c_str(), GetThreadCount()); - threads_.push_back(new ThreadPoolWorker(this, name, ThreadPoolWorker::kDefaultStackSize)); + const std::string worker_name = StringPrintf("%s worker thread %zu", name_.c_str(), + GetThreadCount()); + threads_.push_back(new ThreadPoolWorker(this, worker_name, ThreadPoolWorker::kDefaultStackSize)); } // Wait for all of the threads to attach. creation_barier_.Wait(self); @@ -279,8 +280,9 @@ WorkStealingThreadPool::WorkStealingThreadPool(const char* name, size_t num_thre work_steal_lock_("work stealing lock"), steal_index_(0) { while (GetThreadCount() < num_threads) { - const std::string name = StringPrintf("Work stealing worker %zu", GetThreadCount()); - threads_.push_back(new WorkStealingWorker(this, name, ThreadPoolWorker::kDefaultStackSize)); + const std::string worker_name = StringPrintf("Work stealing worker %zu", GetThreadCount()); + threads_.push_back(new WorkStealingWorker(this, worker_name, + ThreadPoolWorker::kDefaultStackSize)); } } diff --git a/runtime/utils.cc b/runtime/utils.cc index 9c94f6cb01..11c610bf7f 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -1109,8 +1109,8 @@ std::string GetSchedulerGroupName(pid_t tid) { Split(cgroup_lines[i], ':', &cgroup_fields); std::vector<std::string> cgroups; Split(cgroup_fields[1], ',', &cgroups); - for (size_t i = 0; i < cgroups.size(); ++i) { - if (cgroups[i] == "cpu") { + for (size_t j = 0; j < cgroups.size(); ++j) { + if (cgroups[j] == "cpu") { return cgroup_fields[2].substr(1); // Skip the leading slash. } } diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 7380a50a5a..2be47d17d0 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -1191,11 +1191,6 @@ std::ostream& MethodVerifier::DumpFailures(std::ostream& os) { return os; } -extern "C" void MethodVerifierGdbDump(MethodVerifier* v) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - v->Dump(std::cerr); -} - void MethodVerifier::Dump(std::ostream& os) { if (code_item_ == nullptr) { os << "Native method\n"; diff --git a/sigchainlib/sigchain_dummy.cc b/sigchainlib/sigchain_dummy.cc index 0d62a7abad..76779ab29d 100644 --- a/sigchainlib/sigchain_dummy.cc +++ b/sigchainlib/sigchain_dummy.cc @@ -41,6 +41,9 @@ static void log(const char* format, ...) { va_end(ap); } +namespace art { + + extern "C" void ClaimSignalChain(int signal ATTRIBUTE_UNUSED, struct sigaction* oldaction ATTRIBUTE_UNUSED) { log("ClaimSignalChain is not exported by the main executable."); @@ -69,3 +72,5 @@ extern "C" void EnsureFrontOfChain(int signal ATTRIBUTE_UNUSED, log("EnsureFrontOfChain is not exported by the main executable."); abort(); } + +} // namespace art |