diff options
author | Jeff Hao <jeffhao@google.com> | 2014-01-15 13:49:50 -0800 |
---|---|---|
committer | Jeff Hao <jeffhao@google.com> | 2015-04-27 18:54:52 -0700 |
commit | 848f70a3d73833fc1bf3032a9ff6812e429661d9 (patch) | |
tree | b0349b3a40aab5a915af491b100659a5ca9fbbf6 /compiler/optimizing | |
parent | d14438f0c5071962be7fab572b54687d32d9d087 (diff) | |
download | android_art-848f70a3d73833fc1bf3032a9ff6812e429661d9.tar.gz android_art-848f70a3d73833fc1bf3032a9ff6812e429661d9.tar.bz2 android_art-848f70a3d73833fc1bf3032a9ff6812e429661d9.zip |
Replace String CharArray with internal uint16_t array.
Summary of high level changes:
- Adds compiler inliner support to identify string init methods
- Adds compiler support (quick & optimizing) with new invoke code path
that calls method off the thread pointer
- Adds thread entrypoints for all string init methods
- Adds map to verifier to log when receiver of string init has been
copied to other registers. used by compiler and interpreter
Change-Id: I797b992a8feb566f9ad73060011ab6f51eb7ce01
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/builder.cc | 58 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 31 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 30 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 25 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 24 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics.cc | 13 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm.cc | 91 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_arm64.cc | 95 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_list.h | 4 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86.cc | 93 | ||||
-rw-r--r-- | compiler/optimizing/intrinsics_x86_64.cc | 87 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 9 |
13 files changed, 467 insertions, 95 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 96e08fd24c..a883bd0149 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -19,6 +19,7 @@ #include "art_field-inl.h" #include "base/logging.h" #include "class_linker.h" +#include "dex/verified_method.h" #include "dex_file-inl.h" #include "dex_instruction-inl.h" #include "dex/verified_method.h" @@ -612,6 +613,16 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, HInvokeStaticOrDirect::ClinitCheckRequirement::kImplicit; // Potential class initialization check, in the case of a static method call. HClinitCheck* clinit_check = nullptr; + // Replace calls to String.<init> with StringFactory. + int32_t string_init_offset = 0; + bool is_string_init = compiler_driver_->IsStringInit(method_idx, dex_file_, &string_init_offset); + if (is_string_init) { + return_type = Primitive::kPrimNot; + is_instance_call = false; + number_of_arguments--; + invoke_type = kStatic; + optimized_invoke_type = kStatic; + } HInvoke* invoke = nullptr; @@ -698,7 +709,8 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, invoke = new (arena_) HInvokeStaticOrDirect( arena_, number_of_arguments, return_type, dex_pc, target_method.dex_method_index, - is_recursive, invoke_type, optimized_invoke_type, clinit_check_requirement); + is_recursive, string_init_offset, invoke_type, optimized_invoke_type, + clinit_check_requirement); } size_t start_index = 0; @@ -714,6 +726,9 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, uint32_t descriptor_index = 1; uint32_t argument_index = start_index; + if (is_string_init) { + start_index = 1; + } for (size_t i = start_index; i < number_of_vreg_arguments; i++, argument_index++) { Primitive::Type type = Primitive::GetType(descriptor[descriptor_index++]); bool is_wide = (type == Primitive::kPrimLong) || (type == Primitive::kPrimDouble); @@ -740,6 +755,28 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, DCHECK_EQ(argument_index, number_of_arguments); current_block_->AddInstruction(invoke); latest_result_ = invoke; + + // Add move-result for StringFactory method. + if (is_string_init) { + uint32_t orig_this_reg = is_range ? register_index : args[0]; + const VerifiedMethod* verified_method = + compiler_driver_->GetVerifiedMethod(dex_file_, dex_compilation_unit_->GetDexMethodIndex()); + if (verified_method == nullptr) { + LOG(WARNING) << "No verified method for method calling String.<init>: " + << PrettyMethod(dex_compilation_unit_->GetDexMethodIndex(), *dex_file_); + return false; + } + const SafeMap<uint32_t, std::set<uint32_t>>& string_init_map = + verified_method->GetStringInitPcRegMap(); + auto map_it = string_init_map.find(dex_pc); + if (map_it != string_init_map.end()) { + std::set<uint32_t> reg_set = map_it->second; + for (auto set_it = reg_set.begin(); set_it != reg_set.end(); ++set_it) { + UpdateLocal(*set_it, invoke); + } + } + UpdateLocal(orig_this_reg, invoke); + } return true; } @@ -1916,12 +1953,19 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 case Instruction::NEW_INSTANCE: { uint16_t type_index = instruction.VRegB_21c(); - QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index) - ? kQuickAllocObjectWithAccessCheck - : kQuickAllocObject; - - current_block_->AddInstruction(new (arena_) HNewInstance(dex_pc, type_index, entrypoint)); - UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); + if (compiler_driver_->IsStringTypeIndex(type_index, dex_file_)) { + // Turn new-instance of string into a const 0. + int32_t register_index = instruction.VRegA(); + HNullConstant* constant = graph_->GetNullConstant(); + UpdateLocal(register_index, constant); + } else { + QuickEntrypointEnum entrypoint = NeedsAccessCheck(type_index) + ? kQuickAllocObjectWithAccessCheck + : kQuickAllocObject; + + current_block_->AddInstruction(new (arena_) HNewInstance(dex_pc, type_index, entrypoint)); + UpdateLocal(instruction.VRegA(), current_block_->GetLastInstruction()); + } break; } diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 01748a9f5c..bcdfccdd16 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -4071,15 +4071,9 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, // // Currently we implement the app -> app logic, which looks up in the resolve cache. - // temp = method; - LoadCurrentMethod(temp); - if (!invoke->IsRecursive()) { - // temp = temp->dex_cache_resolved_methods_; - __ LoadFromOffset( - kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); - // temp = temp[index_in_cache] - __ LoadFromOffset( - kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())); + if (invoke->IsStringInit()) { + // temp = thread->string_init_entrypoint + __ LoadFromOffset(kLoadWord, temp, TR, invoke->GetStringInitOffset()); // LR = temp[offset_of_quick_compiled_code] __ LoadFromOffset(kLoadWord, LR, temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( @@ -4087,7 +4081,24 @@ void CodeGeneratorARM::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, // LR() __ blx(LR); } else { - __ bl(GetFrameEntryLabel()); + // temp = method; + LoadCurrentMethod(temp); + if (!invoke->IsRecursive()) { + // temp = temp->dex_cache_resolved_methods_; + __ LoadFromOffset( + kLoadWord, temp, temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); + // temp = temp[index_in_cache] + __ LoadFromOffset( + kLoadWord, temp, temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex())); + // LR = temp[offset_of_quick_compiled_code] + __ LoadFromOffset(kLoadWord, LR, temp, + mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArmWordSize).Int32Value()); + // LR() + __ blx(LR); + } else { + __ bl(GetFrameEntryLabel()); + } } DCHECK(!IsLeafMethod()); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index dada4ce5bd..0d963d73b4 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -2006,20 +2006,30 @@ void CodeGeneratorARM64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invok // // Currently we implement the app -> app logic, which looks up in the resolve cache. - // temp = method; - LoadCurrentMethod(temp); - if (!invoke->IsRecursive()) { - // temp = temp->dex_cache_resolved_methods_; - __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset())); - // temp = temp[index_in_cache]; - __ Ldr(temp, HeapOperand(temp, index_in_cache)); - // lr = temp->entry_point_from_quick_compiled_code_; + if (invoke->IsStringInit()) { + // temp = thread->string_init_entrypoint + __ Ldr(temp, HeapOperand(tr, invoke->GetStringInitOffset())); + // LR = temp->entry_point_from_quick_compiled_code_; __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( kArm64WordSize))); - // lr(); + // lr() __ Blr(lr); } else { - __ Bl(&frame_entry_label_); + // temp = method; + LoadCurrentMethod(temp); + if (!invoke->IsRecursive()) { + // temp = temp->dex_cache_resolved_methods_; + __ Ldr(temp, HeapOperand(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset())); + // temp = temp[index_in_cache]; + __ Ldr(temp, HeapOperand(temp, index_in_cache)); + // lr = temp->entry_point_from_quick_compiled_code_; + __ Ldr(lr, HeapOperand(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArm64WordSize))); + // lr(); + __ Blr(lr); + } else { + __ Bl(&frame_entry_label_); + } } DCHECK(!IsLeafMethod()); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 04999bedb0..a037040ae4 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -3114,18 +3114,27 @@ void CodeGeneratorX86::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invoke, // 3) app -> app // // Currently we implement the app -> app logic, which looks up in the resolve cache. - // temp = method; - LoadCurrentMethod(temp); - if (!invoke->IsRecursive()) { - // temp = temp->dex_cache_resolved_methods_; - __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); - // temp = temp[index_in_cache] - __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()))); + + if (invoke->IsStringInit()) { + // temp = thread->string_init_entrypoint + __ fs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset())); // (temp + offset_of_quick_compiled_code)() __ call(Address( temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); } else { - __ call(GetFrameEntryLabel()); + // temp = method; + LoadCurrentMethod(temp); + if (!invoke->IsRecursive()) { + // temp = temp->dex_cache_resolved_methods_; + __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value())); + // temp = temp[index_in_cache] + __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()))); + // (temp + offset_of_quick_compiled_code)() + __ call(Address(temp, + mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kX86WordSize).Int32Value())); + } else { + __ call(GetFrameEntryLabel()); + } } DCHECK(!IsLeafMethod()); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 5ce932928b..f175283885 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -366,18 +366,26 @@ void CodeGeneratorX86_64::GenerateStaticOrDirectCall(HInvokeStaticOrDirect* invo // // Currently we implement the app -> app logic, which looks up in the resolve cache. - // temp = method; - LoadCurrentMethod(temp); - if (!invoke->IsRecursive()) { - // temp = temp->dex_cache_resolved_methods_; - __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue())); - // temp = temp[index_in_cache] - __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()))); + if (invoke->IsStringInit()) { + // temp = thread->string_init_entrypoint + __ gs()->movl(temp, Address::Absolute(invoke->GetStringInitOffset())); // (temp + offset_of_quick_compiled_code)() __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( kX86_64WordSize).SizeValue())); } else { - __ call(&frame_entry_label_); + // temp = method; + LoadCurrentMethod(temp); + if (!invoke->IsRecursive()) { + // temp = temp->dex_cache_resolved_methods_; + __ movl(temp, Address(temp, mirror::ArtMethod::DexCacheResolvedMethodsOffset().SizeValue())); + // temp = temp[index_in_cache] + __ movl(temp, Address(temp, CodeGenerator::GetCacheOffset(invoke->GetDexMethodIndex()))); + // (temp + offset_of_quick_compiled_code)() + __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kX86_64WordSize).SizeValue())); + } else { + __ call(&frame_entry_label_); + } } DCHECK(!IsLeafMethod()); diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 6cdc82262c..e3fd5d7ec1 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -37,7 +37,7 @@ static constexpr FloatRegister kParameterFloatRegisters[] = static constexpr size_t kParameterCoreRegistersLength = arraysize(kParameterCoreRegisters); static constexpr size_t kParameterFloatRegistersLength = arraysize(kParameterFloatRegisters); -static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX }; +static constexpr Register kRuntimeParameterCoreRegisters[] = { RDI, RSI, RDX, RCX }; static constexpr size_t kRuntimeParameterCoreRegistersLength = arraysize(kRuntimeParameterCoreRegisters); static constexpr FloatRegister kRuntimeParameterFpuRegisters[] = { XMM0, XMM1 }; diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc index 20aa45f197..5d3db5c6f2 100644 --- a/compiler/optimizing/intrinsics.cc +++ b/compiler/optimizing/intrinsics.cc @@ -186,6 +186,8 @@ static Intrinsics GetIntrinsic(InlineMethod method) { return Intrinsics::kStringCharAt; case kIntrinsicCompareTo: return Intrinsics::kStringCompareTo; + case kIntrinsicGetCharsNoCheck: + return Intrinsics::kStringGetCharsNoCheck; case kIntrinsicIsEmptyOrLength: // The inliner can handle these two cases - and this is the preferred approach // since after inlining the call is no longer visible (as opposed to waiting @@ -194,6 +196,12 @@ static Intrinsics GetIntrinsic(InlineMethod method) { case kIntrinsicIndexOf: return ((method.d.data & kIntrinsicFlagBase0) == 0) ? Intrinsics::kStringIndexOfAfter : Intrinsics::kStringIndexOf; + case kIntrinsicNewStringFromBytes: + return Intrinsics::kStringNewStringFromBytes; + case kIntrinsicNewStringFromChars: + return Intrinsics::kStringNewStringFromChars; + case kIntrinsicNewStringFromString: + return Intrinsics::kStringNewStringFromString; case kIntrinsicCas: switch (GetType(method.d.data, false)) { @@ -280,6 +288,11 @@ static Intrinsics GetIntrinsic(InlineMethod method) { case kInlineOpIPut: return Intrinsics::kNone; + // String init cases, not intrinsics. + + case kInlineStringInit: + return Intrinsics::kNone; + // No default case to make the compiler warn on missing cases. } return Intrinsics::kNone; diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index abdf04ebb1..27d2d43e17 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -48,7 +48,7 @@ static void MoveFromReturnRegister(Location trg, Primitive::Type type, CodeGener DCHECK_NE(type, Primitive::kPrimVoid); - if (Primitive::IsIntegralType(type)) { + if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) { if (type == Primitive::kPrimLong) { Register trg_reg_lo = trg.AsRegisterPairLow<Register>(); Register trg_reg_hi = trg.AsRegisterPairHigh<Register>(); @@ -810,10 +810,6 @@ void IntrinsicCodeGeneratorARM::VisitStringCharAt(HInvoke* invoke) { const MemberOffset value_offset = mirror::String::ValueOffset(); // Location of count const MemberOffset count_offset = mirror::String::CountOffset(); - // Starting offset within data array - const MemberOffset offset_offset = mirror::String::OffsetOffset(); - // Start of char data with array_ - const MemberOffset data_offset = mirror::Array::DataOffset(sizeof(uint16_t)); Register obj = locations->InAt(0).AsRegister<Register>(); // String object pointer. Register idx = locations->InAt(1).AsRegister<Register>(); // Index of character. @@ -835,15 +831,10 @@ void IntrinsicCodeGeneratorARM::VisitStringCharAt(HInvoke* invoke) { __ cmp(idx, ShifterOperand(temp)); __ b(slow_path->GetEntryLabel(), CS); - // Index computation. - __ ldr(temp, Address(obj, offset_offset.Int32Value())); // temp := str.offset. - __ ldr(array_temp, Address(obj, value_offset.Int32Value())); // array_temp := str.offset. - __ add(temp, temp, ShifterOperand(idx)); - DCHECK_EQ(data_offset.Int32Value() % 2, 0); // We'll compensate by shifting. - __ add(temp, temp, ShifterOperand(data_offset.Int32Value() / 2)); + __ add(array_temp, obj, ShifterOperand(value_offset.Int32Value())); // array_temp := str.value. // Load the value. - __ ldrh(out, Address(array_temp, temp, LSL, 1)); // out := array_temp[temp]. + __ ldrh(out, Address(array_temp, idx, LSL, 1)); // out := array_temp[idx]. __ Bind(slow_path->GetExitLabel()); } @@ -878,6 +869,81 @@ void IntrinsicCodeGeneratorARM::VisitStringCompareTo(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderARM::VisitStringNewStringFromBytes(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3))); + locations->SetOut(Location::RegisterLocation(R0)); +} + +void IntrinsicCodeGeneratorARM::VisitStringNewStringFromBytes(HInvoke* invoke) { + ArmAssembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register byte_array = locations->InAt(0).AsRegister<Register>(); + __ cmp(byte_array, ShifterOperand(0)); + SlowPathCodeARM* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke); + codegen_->AddSlowPath(slow_path); + __ b(slow_path->GetEntryLabel(), EQ); + + __ LoadFromOffset( + kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromBytes).Int32Value()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ blx(LR); + __ Bind(slow_path->GetExitLabel()); +} + +void IntrinsicLocationsBuilderARM::VisitStringNewStringFromChars(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + locations->SetOut(Location::RegisterLocation(R0)); +} + +void IntrinsicCodeGeneratorARM::VisitStringNewStringFromChars(HInvoke* invoke) { + ArmAssembler* assembler = GetAssembler(); + + __ LoadFromOffset( + kLoadWord, LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromChars).Int32Value()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ blx(LR); +} + +void IntrinsicLocationsBuilderARM::VisitStringNewStringFromString(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetOut(Location::RegisterLocation(R0)); +} + +void IntrinsicCodeGeneratorARM::VisitStringNewStringFromString(HInvoke* invoke) { + ArmAssembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register string_to_copy = locations->InAt(0).AsRegister<Register>(); + __ cmp(string_to_copy, ShifterOperand(0)); + SlowPathCodeARM* slow_path = new (GetAllocator()) IntrinsicSlowPathARM(invoke); + codegen_->AddSlowPath(slow_path); + __ b(slow_path->GetEntryLabel(), EQ); + + __ LoadFromOffset(kLoadWord, + LR, TR, QUICK_ENTRYPOINT_OFFSET(kArmWordSize, pAllocStringFromString).Int32Value()); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ blx(LR); + __ Bind(slow_path->GetExitLabel()); +} + // Unimplemented intrinsics. #define UNIMPLEMENTED_INTRINSIC(Name) \ @@ -907,6 +973,7 @@ UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(StringIndexOf) UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) +UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) } // namespace arm } // namespace art diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 7a753b2da9..4f008e7537 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -75,7 +75,7 @@ static void MoveFromReturnRegister(Location trg, DCHECK_NE(type, Primitive::kPrimVoid); - if (Primitive::IsIntegralType(type)) { + if (Primitive::IsIntegralType(type) || type == Primitive::kPrimNot) { Register trg_reg = RegisterFrom(trg, type); Register res_reg = RegisterFrom(ARM64ReturnLocation(type), type); __ Mov(trg_reg, res_reg, kDiscardForSameWReg); @@ -953,10 +953,6 @@ void IntrinsicCodeGeneratorARM64::VisitStringCharAt(HInvoke* invoke) { const MemberOffset value_offset = mirror::String::ValueOffset(); // Location of count const MemberOffset count_offset = mirror::String::CountOffset(); - // Starting offset within data array - const MemberOffset offset_offset = mirror::String::OffsetOffset(); - // Start of char data with array_ - const MemberOffset data_offset = mirror::Array::DataOffset(sizeof(uint16_t)); Register obj = WRegisterFrom(locations->InAt(0)); // String object pointer. Register idx = WRegisterFrom(locations->InAt(1)); // Index of character. @@ -979,21 +975,15 @@ void IntrinsicCodeGeneratorARM64::VisitStringCharAt(HInvoke* invoke) { __ Cmp(idx, temp); __ B(hs, slow_path->GetEntryLabel()); - // Index computation. - __ Ldr(temp, HeapOperand(obj, offset_offset)); // temp := str.offset. - __ Ldr(array_temp, HeapOperand(obj, value_offset)); // array_temp := str.offset. - __ Add(temp, temp, idx); - DCHECK_EQ(data_offset.Int32Value() % 2, 0); // We'll compensate by shifting. - __ Add(temp, temp, Operand(data_offset.Int32Value() / 2)); + __ Add(array_temp, obj, Operand(value_offset.Int32Value())); // array_temp := str.value. // Load the value. - __ Ldrh(out, MemOperand(array_temp.X(), temp, UXTW, 1)); // out := array_temp[temp]. + __ Ldrh(out, MemOperand(array_temp.X(), idx, UXTW, 1)); // out := array_temp[idx]. __ Bind(slow_path->GetExitLabel()); } void IntrinsicLocationsBuilderARM64::VisitStringCompareTo(HInvoke* invoke) { - // The inputs plus one temp. LocationSummary* locations = new (arena_) LocationSummary(invoke, LocationSummary::kCall, kIntrinsified); @@ -1022,6 +1012,84 @@ void IntrinsicCodeGeneratorARM64::VisitStringCompareTo(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromBytes(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); + locations->SetInAt(3, LocationFrom(calling_convention.GetRegisterAt(3))); + locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); +} + +void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromBytes(HInvoke* invoke) { + vixl::MacroAssembler* masm = GetVIXLAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register byte_array = WRegisterFrom(locations->InAt(0)); + __ Cmp(byte_array, 0); + SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke); + codegen_->AddSlowPath(slow_path); + __ B(eq, slow_path->GetEntryLabel()); + + __ Ldr(lr, + MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromBytes).Int32Value())); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Blr(lr); + __ Bind(slow_path->GetExitLabel()); +} + +void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromChars(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); + locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); +} + +void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromChars(HInvoke* invoke) { + vixl::MacroAssembler* masm = GetVIXLAssembler(); + + __ Ldr(lr, + MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromChars).Int32Value())); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Blr(lr); +} + +void IntrinsicLocationsBuilderARM64::VisitStringNewStringFromString(HInvoke* invoke) { + // The inputs plus one temp. + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, LocationFrom(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, LocationFrom(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, LocationFrom(calling_convention.GetRegisterAt(2))); + locations->SetOut(calling_convention.GetReturnLocation(Primitive::kPrimNot)); +} + +void IntrinsicCodeGeneratorARM64::VisitStringNewStringFromString(HInvoke* invoke) { + vixl::MacroAssembler* masm = GetVIXLAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register string_to_copy = WRegisterFrom(locations->InAt(0)); + __ Cmp(string_to_copy, 0); + SlowPathCodeARM64* slow_path = new (GetAllocator()) IntrinsicSlowPathARM64(invoke); + codegen_->AddSlowPath(slow_path); + __ B(eq, slow_path->GetEntryLabel()); + + __ Ldr(lr, + MemOperand(tr, QUICK_ENTRYPOINT_OFFSET(kArm64WordSize, pAllocStringFromString).Int32Value())); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Blr(lr); + __ Bind(slow_path->GetExitLabel()); +} + // Unimplemented intrinsics. #define UNIMPLEMENTED_INTRINSIC(Name) \ @@ -1034,6 +1102,7 @@ UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(StringIndexOf) UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter) UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent) +UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) } // namespace arm64 } // namespace art diff --git a/compiler/optimizing/intrinsics_list.h b/compiler/optimizing/intrinsics_list.h index 10f6e1d6c7..2c9248f52c 100644 --- a/compiler/optimizing/intrinsics_list.h +++ b/compiler/optimizing/intrinsics_list.h @@ -60,8 +60,12 @@ V(MemoryPokeShortNative, kStatic) \ V(StringCharAt, kDirect) \ V(StringCompareTo, kDirect) \ + V(StringGetCharsNoCheck, kDirect) \ V(StringIndexOf, kDirect) \ V(StringIndexOfAfter, kDirect) \ + V(StringNewStringFromBytes, kStatic) \ + V(StringNewStringFromChars, kStatic) \ + V(StringNewStringFromString, kStatic) \ V(UnsafeCASInt, kDirect) \ V(UnsafeCASLong, kDirect) \ V(UnsafeCASObject, kDirect) \ diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index 7275edb695..b3e821cfbe 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -910,23 +910,18 @@ void IntrinsicCodeGeneratorX86::VisitStringCharAt(HInvoke* invoke) { const int32_t value_offset = mirror::String::ValueOffset().Int32Value(); // Location of count const int32_t count_offset = mirror::String::CountOffset().Int32Value(); - // Starting offset within data array - const int32_t offset_offset = mirror::String::OffsetOffset().Int32Value(); - // Start of char data with array_ - const int32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); Register obj = locations->InAt(0).AsRegister<Register>(); Register idx = locations->InAt(1).AsRegister<Register>(); Register out = locations->Out().AsRegister<Register>(); - Location temp_loc = locations->GetTemp(0); - Register temp = temp_loc.AsRegister<Register>(); // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth // the cost. // TODO: For simplicity, the index parameter is requested in a register, so different from Quick // we will not optimize the code for constants (which would save a register). - SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke, temp); + SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86( + invoke, locations->GetTemp(0).AsRegister<Register>()); codegen_->AddSlowPath(slow_path); X86Assembler* assembler = GetAssembler(); @@ -935,12 +930,8 @@ void IntrinsicCodeGeneratorX86::VisitStringCharAt(HInvoke* invoke) { codegen_->MaybeRecordImplicitNullCheck(invoke); __ j(kAboveEqual, slow_path->GetEntryLabel()); - // Get the actual element. - __ movl(temp, idx); // temp := idx. - __ addl(temp, Address(obj, offset_offset)); // temp := offset + idx. - __ movl(out, Address(obj, value_offset)); // obj := obj.array. - // out = out[2*temp]. - __ movzxw(out, Address(out, temp, ScaleFactor::TIMES_2, data_offset)); + // out = out[2*idx]. + __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset)); __ Bind(slow_path->GetExitLabel()); } @@ -976,6 +967,81 @@ void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3))); + locations->SetOut(Location::RegisterLocation(EAX)); + // Needs to be EAX for the invoke. + locations->AddTemp(Location::RegisterLocation(EAX)); +} + +void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) { + X86Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register byte_array = locations->InAt(0).AsRegister<Register>(); + __ testl(byte_array, byte_array); + SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86( + invoke, locations->GetTemp(0).AsRegister<Register>()); + codegen_->AddSlowPath(slow_path); + __ j(kEqual, slow_path->GetEntryLabel()); + + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes))); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Bind(slow_path->GetExitLabel()); +} + +void IntrinsicLocationsBuilderX86::VisitStringNewStringFromChars(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + locations->SetOut(Location::RegisterLocation(EAX)); +} + +void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) { + X86Assembler* assembler = GetAssembler(); + + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars))); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); +} + +void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetOut(Location::RegisterLocation(EAX)); + // Needs to be EAX for the invoke. + locations->AddTemp(Location::RegisterLocation(EAX)); +} + +void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke) { + X86Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + Register string_to_copy = locations->InAt(0).AsRegister<Register>(); + __ testl(string_to_copy, string_to_copy); + SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86( + invoke, locations->GetTemp(0).AsRegister<Register>()); + codegen_->AddSlowPath(slow_path); + __ j(kEqual, slow_path->GetEntryLabel()); + + __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString))); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Bind(slow_path->GetExitLabel()); +} + static void GenPeek(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) { Register address = locations->InAt(0).AsRegisterPairLow<Register>(); Location out_loc = locations->Out(); @@ -1536,6 +1602,7 @@ void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) } UNIMPLEMENTED_INTRINSIC(MathRoundDouble) +UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) UNIMPLEMENTED_INTRINSIC(StringIndexOf) UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter) UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 35daaf60bb..5779b9cb9f 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -824,16 +824,10 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCharAt(HInvoke* invoke) { const int32_t value_offset = mirror::String::ValueOffset().Int32Value(); // Location of count const int32_t count_offset = mirror::String::CountOffset().Int32Value(); - // Starting offset within data array - const int32_t offset_offset = mirror::String::OffsetOffset().Int32Value(); - // Start of char data with array_ - const int32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value(); CpuRegister obj = locations->InAt(0).AsRegister<CpuRegister>(); CpuRegister idx = locations->InAt(1).AsRegister<CpuRegister>(); CpuRegister out = locations->Out().AsRegister<CpuRegister>(); - Location temp_loc = locations->GetTemp(0); - CpuRegister temp = temp_loc.AsRegister<CpuRegister>(); // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth // the cost. @@ -849,12 +843,8 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCharAt(HInvoke* invoke) { codegen_->MaybeRecordImplicitNullCheck(invoke); __ j(kAboveEqual, slow_path->GetEntryLabel()); - // Get the actual element. - __ movl(temp, idx); // temp := idx. - __ addl(temp, Address(obj, offset_offset)); // temp := offset + idx. - __ movl(out, Address(obj, value_offset)); // obj := obj.array. - // out = out[2*temp]. - __ movzxw(out, Address(out, temp, ScaleFactor::TIMES_2, data_offset)); + // out = out[2*idx]. + __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset)); __ Bind(slow_path->GetExitLabel()); } @@ -887,6 +877,78 @@ void IntrinsicCodeGeneratorX86_64::VisitStringCompareTo(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3))); + locations->SetOut(Location::RegisterLocation(RAX)); +} + +void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromBytes(HInvoke* invoke) { + X86_64Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + CpuRegister byte_array = locations->InAt(0).AsRegister<CpuRegister>(); + __ testl(byte_array, byte_array); + SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke); + codegen_->AddSlowPath(slow_path); + __ j(kEqual, slow_path->GetEntryLabel()); + + __ gs()->call(Address::Absolute( + QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromBytes), true)); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Bind(slow_path->GetExitLabel()); +} + +void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromChars(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1))); + locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2))); + locations->SetOut(Location::RegisterLocation(RAX)); +} + +void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromChars(HInvoke* invoke) { + X86_64Assembler* assembler = GetAssembler(); + + __ gs()->call(Address::Absolute( + QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromChars), true)); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); +} + +void IntrinsicLocationsBuilderX86_64::VisitStringNewStringFromString(HInvoke* invoke) { + LocationSummary* locations = new (arena_) LocationSummary(invoke, + LocationSummary::kCall, + kIntrinsified); + InvokeRuntimeCallingConvention calling_convention; + locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0))); + locations->SetOut(Location::RegisterLocation(RAX)); +} + +void IntrinsicCodeGeneratorX86_64::VisitStringNewStringFromString(HInvoke* invoke) { + X86_64Assembler* assembler = GetAssembler(); + LocationSummary* locations = invoke->GetLocations(); + + CpuRegister string_to_copy = locations->InAt(0).AsRegister<CpuRegister>(); + __ testl(string_to_copy, string_to_copy); + SlowPathCodeX86_64* slow_path = new (GetAllocator()) IntrinsicSlowPathX86_64(invoke); + codegen_->AddSlowPath(slow_path); + __ j(kEqual, slow_path->GetEntryLabel()); + + __ gs()->call(Address::Absolute( + QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pAllocStringFromString), true)); + codegen_->RecordPcInfo(invoke, invoke->GetDexPc()); + __ Bind(slow_path->GetExitLabel()); +} + static void GenPeek(LocationSummary* locations, Primitive::Type size, X86_64Assembler* assembler) { CpuRegister address = locations->InAt(0).AsRegister<CpuRegister>(); CpuRegister out = locations->Out().AsRegister<CpuRegister>(); // == address, here for clarity. @@ -1390,6 +1452,7 @@ void IntrinsicLocationsBuilderX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UN void IntrinsicCodeGeneratorX86_64::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \ } +UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck) UNIMPLEMENTED_INTRINSIC(StringIndexOf) UNIMPLEMENTED_INTRINSIC(StringIndexOfAfter) UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar) diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 0533bff0b3..9e8df04cfb 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -2292,6 +2292,7 @@ class HInvokeStaticOrDirect : public HInvoke { uint32_t dex_pc, uint32_t dex_method_index, bool is_recursive, + int32_t string_init_offset, InvokeType original_invoke_type, InvokeType invoke_type, ClinitCheckRequirement clinit_check_requirement) @@ -2299,7 +2300,8 @@ class HInvokeStaticOrDirect : public HInvoke { original_invoke_type_(original_invoke_type), invoke_type_(invoke_type), is_recursive_(is_recursive), - clinit_check_requirement_(clinit_check_requirement) {} + clinit_check_requirement_(clinit_check_requirement), + string_init_offset_(string_init_offset) {} bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE { UNUSED(obj); @@ -2312,6 +2314,8 @@ class HInvokeStaticOrDirect : public HInvoke { InvokeType GetInvokeType() const { return invoke_type_; } bool IsRecursive() const { return is_recursive_; } bool NeedsDexCache() const OVERRIDE { return !IsRecursive(); } + bool IsStringInit() const { return string_init_offset_ != 0; } + int32_t GetStringInitOffset() const { return string_init_offset_; } // Is this instruction a call to a static method? bool IsStatic() const { @@ -2367,6 +2371,9 @@ class HInvokeStaticOrDirect : public HInvoke { const InvokeType invoke_type_; const bool is_recursive_; ClinitCheckRequirement clinit_check_requirement_; + // Thread entrypoint offset for string init method if this is a string init invoke. + // Note that there are multiple string init methods, each having its own offset. + int32_t string_init_offset_; DISALLOW_COPY_AND_ASSIGN(HInvokeStaticOrDirect); }; |