diff options
| author | Sebastien Hertz <shertz@google.com> | 2015-03-24 14:12:35 +0000 |
|---|---|---|
| committer | Gerrit Code Review <noreply-gerritcodereview@google.com> | 2015-03-24 14:12:35 +0000 |
| commit | af87659f462ac650009fce295097cae3dabce171 (patch) | |
| tree | f203c9eed1543f6393175b2431ac6b43deb1b43c | |
| parent | 94ead7673f90a8199d926fe161d7d021202f0aa7 (diff) | |
| parent | eb07669e9784ccb41d75df180727e57fc4520e28 (diff) | |
| download | art-af87659f462ac650009fce295097cae3dabce171.tar.gz art-af87659f462ac650009fce295097cae3dabce171.tar.bz2 art-af87659f462ac650009fce295097cae3dabce171.zip | |
Merge "JDWP: Optimized single step during debugging"
| -rw-r--r-- | runtime/debugger.cc | 155 | ||||
| -rw-r--r-- | runtime/debugger.h | 78 | ||||
| -rw-r--r-- | runtime/entrypoints/quick/quick_trampoline_entrypoints.cc | 36 | ||||
| -rw-r--r-- | runtime/instrumentation.cc | 3 | ||||
| -rw-r--r-- | runtime/interpreter/interpreter_common.cc | 11 | ||||
| -rw-r--r-- | runtime/jdwp/jdwp_event.cc | 12 | ||||
| -rw-r--r-- | runtime/jdwp/jdwp_handler.cc | 3 | ||||
| -rw-r--r-- | runtime/jdwp/jdwp_main.cc | 2 | ||||
| -rw-r--r-- | runtime/mirror/art_method.cc | 4 |
9 files changed, 237 insertions, 67 deletions
diff --git a/runtime/debugger.cc b/runtime/debugger.cc index 6296cf5c68..7144577acb 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -307,7 +307,6 @@ static JDWP::JdwpOptions gJdwpOptions; // Runtime JDWP state. static JDWP::JdwpState* gJdwpState = nullptr; static bool gDebuggerConnected; // debugger or DDMS is connected. -static bool gDebuggerActive; // debugger is making requests. static bool gDisposed; // debugger called VirtualMachine.Dispose, so we should drop the connection. static bool gDdmThreadNotification = false; @@ -319,6 +318,7 @@ static Dbg::HpsgWhat gDdmHpsgWhat; static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER; static Dbg::HpsgWhat gDdmNhsgWhat; +bool Dbg::gDebuggerActive = false; ObjectRegistry* Dbg::gRegistry = nullptr; // Recent allocation tracking. @@ -331,7 +331,6 @@ Dbg::TypeCache Dbg::type_cache_; // Deoptimization support. std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_; size_t Dbg::full_deoptimization_event_count_ = 0; -size_t Dbg::delayed_full_undeoptimization_count_ = 0; // Instrumentation event reference counters. size_t Dbg::dex_pc_change_event_ref_count_ = 0; @@ -620,7 +619,7 @@ void Dbg::GoActive() { // Enable all debugging features, including scans for breakpoints. // This is a no-op if we're already active. // Only called from the JDWP handler thread. - if (gDebuggerActive) { + if (IsDebuggerActive()) { return; } @@ -634,7 +633,6 @@ void Dbg::GoActive() { MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); CHECK_EQ(deoptimization_requests_.size(), 0U); CHECK_EQ(full_deoptimization_event_count_, 0U); - CHECK_EQ(delayed_full_undeoptimization_count_, 0U); CHECK_EQ(dex_pc_change_event_ref_count_, 0U); CHECK_EQ(method_enter_event_ref_count_, 0U); CHECK_EQ(method_exit_event_ref_count_, 0U); @@ -673,7 +671,7 @@ void Dbg::Disconnected() { ThreadState old_state = self->SetStateUnsafe(kRunnable); // Debugger may not be active at this point. - if (gDebuggerActive) { + if (IsDebuggerActive()) { { // Since we're going to disable deoptimization, we clear the deoptimization requests queue. // This prevents us from having any pending deoptimization request when the debugger attaches @@ -681,7 +679,6 @@ void Dbg::Disconnected() { MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); deoptimization_requests_.clear(); full_deoptimization_event_count_ = 0U; - delayed_full_undeoptimization_count_ = 0U; } if (instrumentation_events_ != 0) { runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, @@ -704,10 +701,6 @@ void Dbg::Disconnected() { gDebuggerConnected = false; } -bool Dbg::IsDebuggerActive() { - return gDebuggerActive; -} - void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) { CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown); gJdwpOptions = jdwp_options; @@ -3020,29 +3013,6 @@ void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) { } } -void Dbg::DelayFullUndeoptimization() { - if (RequiresDeoptimization()) { - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); - ++delayed_full_undeoptimization_count_; - DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_); - } -} - -void Dbg::ProcessDelayedFullUndeoptimizations() { - // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization). - { - MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_); - while (delayed_full_undeoptimization_count_ > 0) { - DeoptimizationRequest req; - req.SetKind(DeoptimizationRequest::kFullUndeoptimization); - req.SetMethod(nullptr); - RequestDeoptimizationLocked(req); - --delayed_full_undeoptimization_count_; - } - } - ManageDeoptimization(); -} - void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) { if (req.GetKind() == DeoptimizationRequest::kNothing) { // Nothing to do. @@ -3352,6 +3322,125 @@ void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequ } } +bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMethod* m) { + const SingleStepControl* const ssc = thread->GetSingleStepControl(); + if (ssc == nullptr) { + // If we are not single-stepping, then we don't have to force interpreter. + return false; + } + if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) { + // If we are in interpreter only mode, then we don't have to force interpreter. + return false; + } + + if (!m->IsNative() && !m->IsProxyMethod()) { + // If we want to step into a method, then we have to force interpreter on that call. + if (ssc->GetStepDepth() == JDWP::SD_INTO) { + return true; + } + } + return false; +} + +bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) { + instrumentation::Instrumentation* const instrumentation = + Runtime::Current()->GetInstrumentation(); + // If we are in interpreter only mode, then we don't have to force interpreter. + if (instrumentation->InterpretOnly()) { + return false; + } + // We can only interpret pure Java method. + if (m->IsNative() || m->IsProxyMethod()) { + return false; + } + const SingleStepControl* const ssc = thread->GetSingleStepControl(); + if (ssc != nullptr) { + // If we want to step into a method, then we have to force interpreter on that call. + if (ssc->GetStepDepth() == JDWP::SD_INTO) { + return true; + } + // If we are stepping out from a static initializer, by issuing a step + // in or step over, that was implicitly invoked by calling a static method, + // then we need to step into that method. Having a lower stack depth than + // the one the single step control has indicates that the step originates + // from the static initializer. + if (ssc->GetStepDepth() != JDWP::SD_OUT && + ssc->GetStackDepth() > GetStackDepth(thread)) { + return true; + } + } + // There are cases where we have to force interpreter on deoptimized methods, + // because in some cases the call will not be performed by invoking an entry + // point that has been replaced by the deoptimization, but instead by directly + // invoking the compiled code of the method, for example. + return instrumentation->IsDeoptimized(m); +} + +bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) { + // The upcall can be nullptr and in that case we don't need to do anything. + if (m == nullptr) { + return false; + } + instrumentation::Instrumentation* const instrumentation = + Runtime::Current()->GetInstrumentation(); + // If we are in interpreter only mode, then we don't have to force interpreter. + if (instrumentation->InterpretOnly()) { + return false; + } + // We can only interpret pure Java method. + if (m->IsNative() || m->IsProxyMethod()) { + return false; + } + const SingleStepControl* const ssc = thread->GetSingleStepControl(); + if (ssc != nullptr) { + // If we are stepping out from a static initializer, by issuing a step + // out, that was implicitly invoked by calling a static method, then we + // need to step into the caller of that method. Having a lower stack + // depth than the one the single step control has indicates that the + // step originates from the static initializer. + if (ssc->GetStepDepth() == JDWP::SD_OUT && + ssc->GetStackDepth() > GetStackDepth(thread)) { + return true; + } + } + // If we are returning from a static intializer, that was implicitly + // invoked by calling a static method and the caller is deoptimized, + // then we have to deoptimize the stack without forcing interpreter + // on the static method that was called originally. This problem can + // be solved easily by forcing instrumentation on the called method, + // because the instrumentation exit hook will recognise the need of + // stack deoptimization by calling IsForcedInterpreterNeededForUpcall. + return instrumentation->IsDeoptimized(m); +} + +bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) { + // The upcall can be nullptr and in that case we don't need to do anything. + if (m == nullptr) { + return false; + } + instrumentation::Instrumentation* const instrumentation = + Runtime::Current()->GetInstrumentation(); + // If we are in interpreter only mode, then we don't have to force interpreter. + if (instrumentation->InterpretOnly()) { + return false; + } + // We can only interpret pure Java method. + if (m->IsNative() || m->IsProxyMethod()) { + return false; + } + const SingleStepControl* const ssc = thread->GetSingleStepControl(); + if (ssc != nullptr) { + // The debugger is not interested in what is happening under the level + // of the step, thus we only force interpreter when we are not below of + // the step. + if (ssc->GetStackDepth() >= GetStackDepth(thread)) { + return true; + } + } + // We have to require stack deoptimization if the upcall is deoptimized. + return instrumentation->IsDeoptimized(m); +} + // Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't // cause suspension if the thread is the current thread. class ScopedThreadSuspension { diff --git a/runtime/debugger.h b/runtime/debugger.h index 01c9d5dbab..d015294eac 100644 --- a/runtime/debugger.h +++ b/runtime/debugger.h @@ -243,7 +243,9 @@ class Dbg { // Returns true if we're actually debugging with a real debugger, false if it's // just DDMS (or nothing at all). - static bool IsDebuggerActive(); + static bool IsDebuggerActive() { + return gDebuggerActive; + } // Configures JDWP with parsed command-line options. static void ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options); @@ -543,13 +545,6 @@ class Dbg { LOCKS_EXCLUDED(Locks::deoptimization_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Support delayed full undeoptimization requests. This is currently only used for single-step - // events. - static void DelayFullUndeoptimization() LOCKS_EXCLUDED(Locks::deoptimization_lock_); - static void ProcessDelayedFullUndeoptimizations() - LOCKS_EXCLUDED(Locks::deoptimization_lock_) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - // Manage deoptimization after updating JDWP events list. Suspends all threads, processes each // request and finally resumes all threads. static void ManageDeoptimization() @@ -564,6 +559,53 @@ class Dbg { LOCKS_EXCLUDED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + /* + * Forced interpreter checkers for single-step and continue support. + */ + + // Indicates whether we need to force the use of interpreter to invoke a method. + // This allows to single-step or continue into the called method. + static bool IsForcedInterpreterNeededForCalling(Thread* thread, mirror::ArtMethod* m) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (!IsDebuggerActive()) { + return false; + } + return IsForcedInterpreterNeededForCallingImpl(thread, m); + } + + // Indicates whether we need to force the use of interpreter entrypoint when calling a + // method through the resolution trampoline. This allows to single-step or continue into + // the called method. + static bool IsForcedInterpreterNeededForResolution(Thread* thread, mirror::ArtMethod* m) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (!IsDebuggerActive()) { + return false; + } + return IsForcedInterpreterNeededForResolutionImpl(thread, m); + } + + // Indicates whether we need to force the use of instrumentation entrypoint when calling + // a method through the resolution trampoline. This allows to deoptimize the stack for + // debugging when we returned from the called method. + static bool IsForcedInstrumentationNeededForResolution(Thread* thread, mirror::ArtMethod* m) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (!IsDebuggerActive()) { + return false; + } + return IsForcedInstrumentationNeededForResolutionImpl(thread, m); + } + + // Indicates whether we need to force the use of interpreter when returning from the + // interpreter into the runtime. This allows to deoptimize the stack and continue + // execution with interpreter for debugging. + static bool IsForcedInterpreterNeededForUpcall(Thread* thread, mirror::ArtMethod* m) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + if (!IsDebuggerActive()) { + return false; + } + return IsForcedInterpreterNeededForUpcallImpl(thread, m); + } + // Single-stepping. static JDWP::JdwpError ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize size, JDWP::JdwpStepDepth depth) @@ -690,11 +732,27 @@ class Dbg { EXCLUSIVE_LOCKS_REQUIRED(Locks::deoptimization_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static bool IsForcedInterpreterNeededForCallingImpl(Thread* thread, mirror::ArtMethod* m) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static bool IsForcedInterpreterNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static bool IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, mirror::ArtMethod* m) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + static bool IsForcedInterpreterNeededForUpcallImpl(Thread* thread, mirror::ArtMethod* m) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static AllocRecord* recent_allocation_records_ PT_GUARDED_BY(Locks::alloc_tracker_lock_); static size_t alloc_record_max_ GUARDED_BY(Locks::alloc_tracker_lock_); static size_t alloc_record_head_ GUARDED_BY(Locks::alloc_tracker_lock_); static size_t alloc_record_count_ GUARDED_BY(Locks::alloc_tracker_lock_); + // Indicates whether the debugger is making requests. + static bool gDebuggerActive; + + // The registry mapping objects to JDWP ids. static ObjectRegistry* gRegistry; // Deoptimization requests to be processed each time the event list is updated. This is used when @@ -709,10 +767,6 @@ class Dbg { // undeoptimize when the last event is unregistered (when the counter is set to 0). static size_t full_deoptimization_event_count_ GUARDED_BY(Locks::deoptimization_lock_); - // Count the number of full undeoptimization requests delayed to next resume or end of debug - // session. - static size_t delayed_full_undeoptimization_count_ GUARDED_BY(Locks::deoptimization_lock_); - static size_t* GetReferenceCounterForEvent(uint32_t instrumentation_event); // Weak global type cache, TODO improve this. diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 70ee04246a..5eb97d880c 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -30,6 +30,7 @@ #include "mirror/object_array-inl.h" #include "runtime.h" #include "scoped_thread_state_change.h" +#include "debugger.h" namespace art { @@ -639,6 +640,14 @@ extern "C" uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Threa JValue result = interpreter::EnterInterpreterFromEntryPoint(self, code_item, shadow_frame); // Pop transition. self->PopManagedStackFragment(fragment); + + // Request a stack deoptimization if needed + mirror::ArtMethod* caller = QuickArgumentVisitor::GetCallingMethod(sp); + if (UNLIKELY(Dbg::IsForcedInterpreterNeededForUpcall(self, caller))) { + self->SetException(Thread::GetDeoptimizationException()); + self->SetDeoptimizationReturnValue(result); + } + // No need to restore the args since the method has already been run by the interpreter. return result.GetJ(); } @@ -950,14 +959,37 @@ extern "C" const void* artQuickResolutionTrampoline(mirror::ArtMethod* called, called->GetDexCache()->SetResolvedMethod(called_dex_method_idx, called); } } + // Ensure that the called method's class is initialized. StackHandleScope<1> hs(soa.Self()); Handle<mirror::Class> called_class(hs.NewHandle(called->GetDeclaringClass())); linker->EnsureInitialized(soa.Self(), called_class, true, true); if (LIKELY(called_class->IsInitialized())) { - code = called->GetEntryPointFromQuickCompiledCode(); + if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { + // If we are single-stepping or the called method is deoptimized (by a + // breakpoint, for example), then we have to execute the called method + // with the interpreter. + code = GetQuickToInterpreterBridge(); + } else if (UNLIKELY(Dbg::IsForcedInstrumentationNeededForResolution(self, caller))) { + // If the caller is deoptimized (by a breakpoint, for example), we have to + // continue its execution with interpreter when returning from the called + // method. Because we do not want to execute the called method with the + // interpreter, we wrap its execution into the instrumentation stubs. + // When the called method returns, it will execute the instrumentation + // exit hook that will determine the need of the interpreter with a call + // to Dbg::IsForcedInterpreterNeededForUpcall and deoptimize the stack if + // it is needed. + code = GetQuickInstrumentationEntryPoint(); + } else { + code = called->GetEntryPointFromQuickCompiledCode(); + } } else if (called_class->IsInitializing()) { - if (invoke_type == kStatic) { + if (UNLIKELY(Dbg::IsForcedInterpreterNeededForResolution(self, called))) { + // If we are single-stepping or the called method is deoptimized (by a + // breakpoint, for example), then we have to execute the called method + // with the interpreter. + code = GetQuickToInterpreterBridge(); + } else if (invoke_type == kStatic) { // Class is still initializing, go to oat and grab code (trampoline must be left in place // until class is initialized to stop races between threads). code = linker->GetQuickOatCodeFor(called); diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc index b53b8cddb4..9adb4ac991 100644 --- a/runtime/instrumentation.cc +++ b/runtime/instrumentation.cc @@ -1030,7 +1030,8 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self, uintpt NthCallerVisitor visitor(self, 1, true); visitor.WalkStack(true); bool deoptimize = (visitor.caller != nullptr) && - (interpreter_stubs_installed_ || IsDeoptimized(visitor.caller)); + (interpreter_stubs_installed_ || IsDeoptimized(visitor.caller) || + Dbg::IsForcedInterpreterNeededForUpcall(self, visitor.caller)); if (deoptimize) { if (kVerboseInstrumentation) { LOG(INFO) << StringPrintf("Deoptimizing %s by returning from %s with result %#" PRIx64 " in ", diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index 26ab602dc1..a3ab026ba8 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -18,6 +18,7 @@ #include <cmath> +#include "debugger.h" #include "mirror/array-inl.h" #include "unstarted_runtime.h" @@ -616,8 +617,14 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame, << PrettyMethod(new_shadow_frame->GetMethod()); UNREACHABLE(); } - (new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter())(self, code_item, - new_shadow_frame, result); + // Force the use of interpreter when it is required by the debugger. + mirror::EntryPointFromInterpreter* entry; + if (UNLIKELY(Dbg::IsForcedInterpreterNeededForCalling(self, new_shadow_frame->GetMethod()))) { + entry = &art::artInterpreterToInterpreterBridge; + } else { + entry = new_shadow_frame->GetMethod()->GetEntryPointFromInterpreter(); + } + entry(self, code_item, new_shadow_frame, result); } else { UnstartedRuntimeInvoke(self, code_item, new_shadow_frame, result, first_dest_reg); } diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc index 4bf7142692..c9a4483188 100644 --- a/runtime/jdwp/jdwp_event.cc +++ b/runtime/jdwp/jdwp_event.cc @@ -133,7 +133,6 @@ static bool NeedsFullDeoptimization(JdwpEventKind eventKind) { case EK_METHOD_ENTRY: case EK_METHOD_EXIT: case EK_METHOD_EXIT_WITH_RETURN_VALUE: - case EK_SINGLE_STEP: case EK_FIELD_ACCESS: case EK_FIELD_MODIFICATION: return true; @@ -278,16 +277,7 @@ void JdwpState::UnregisterEvent(JdwpEvent* pEvent) { Dbg::UnconfigureStep(pMod->step.threadId); } } - if (pEvent->eventKind == EK_SINGLE_STEP) { - // Special case for single-steps where we want to avoid the slow pattern deoptimize/undeoptimize - // loop between each single-step. In a IDE, this would happens each time the user click on the - // "single-step" button. Here we delay the full undeoptimization to the next resume - // (VM.Resume or ThreadReference.Resume) or the end of the debugging session (VM.Dispose or - // runtime shutdown). - // Therefore, in a singles-stepping sequence, only the first single-step will trigger a full - // deoptimization and only the last single-step will trigger a full undeoptimization. - Dbg::DelayFullUndeoptimization(); - } else if (NeedsFullDeoptimization(pEvent->eventKind)) { + if (NeedsFullDeoptimization(pEvent->eventKind)) { CHECK_EQ(req.GetKind(), DeoptimizationRequest::kNothing); CHECK(req.Method() == nullptr); req.SetKind(DeoptimizationRequest::kFullUndeoptimization); diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc index c7083dcedc..add1394f2d 100644 --- a/runtime/jdwp/jdwp_handler.cc +++ b/runtime/jdwp/jdwp_handler.cc @@ -295,7 +295,6 @@ static JdwpError VM_Suspend(JdwpState*, Request*, ExpandBuf*) */ static JdwpError VM_Resume(JdwpState*, Request*, ExpandBuf*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - Dbg::ProcessDelayedFullUndeoptimizations(); Dbg::ResumeVM(); return ERR_NONE; } @@ -989,8 +988,6 @@ static JdwpError TR_Resume(JdwpState*, Request* request, ExpandBuf*) return ERR_NONE; } - Dbg::ProcessDelayedFullUndeoptimizations(); - Dbg::ResumeThread(thread_id); return ERR_NONE; } diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc index 3d69796fb1..e2b88a5e79 100644 --- a/runtime/jdwp/jdwp_main.cc +++ b/runtime/jdwp/jdwp_main.cc @@ -322,8 +322,6 @@ void JdwpState::ResetState() { CHECK(event_list_ == nullptr); } - Dbg::ProcessDelayedFullUndeoptimizations(); - /* * Should not have one of these in progress. If the debugger went away * mid-request, though, we could see this. diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc index ffee59edc0..c1f7594578 100644 --- a/runtime/mirror/art_method.cc +++ b/runtime/mirror/art_method.cc @@ -401,7 +401,9 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* Runtime* runtime = Runtime::Current(); // Call the invoke stub, passing everything as arguments. - if (UNLIKELY(!runtime->IsStarted())) { + // If the runtime is not yet started or it is required by the debugger, then perform the + // Invocation by the interpreter. + if (UNLIKELY(!runtime->IsStarted() || Dbg::IsForcedInterpreterNeededForCalling(self, this))) { if (IsStatic()) { art::interpreter::EnterInterpreterFromInvoke(self, this, nullptr, args, result); } else { |
