diff options
-rw-r--r-- | compiler/dex/arena_allocator.cc | 1 | ||||
-rw-r--r-- | compiler/dex/dex_to_dex_compiler.cc | 6 | ||||
-rw-r--r-- | compiler/dex/portable/mir_to_gbc.cc | 1 | ||||
-rw-r--r-- | compiler/dex/quick/dex_file_method_inliner.cc | 1 | ||||
-rw-r--r-- | compiler/driver/compiler_driver.cc | 2 | ||||
-rw-r--r-- | compiler/image_writer.cc | 4 | ||||
-rw-r--r-- | runtime/base/macros.h | 7 | ||||
-rw-r--r-- | runtime/base/timing_logger.cc | 2 | ||||
-rw-r--r-- | runtime/class_linker.cc | 2 | ||||
-rw-r--r-- | runtime/debugger.cc | 21 | ||||
-rw-r--r-- | runtime/entrypoints/entrypoint_utils.h | 18 | ||||
-rw-r--r-- | runtime/entrypoints/portable/portable_invoke_entrypoints.cc | 16 | ||||
-rw-r--r-- | runtime/entrypoints/quick/quick_invoke_entrypoints.cc | 18 | ||||
-rw-r--r-- | runtime/gc/collector/mark_sweep.cc | 2 | ||||
-rw-r--r-- | runtime/gc/collector/semi_space.cc | 2 | ||||
-rw-r--r-- | runtime/interpreter/interpreter.cc | 72 | ||||
-rw-r--r-- | runtime/interpreter/interpreter_common.h | 52 | ||||
-rw-r--r-- | runtime/jdwp/object_registry.cc | 15 | ||||
-rw-r--r-- | runtime/verifier/method_verifier.h | 5 |
19 files changed, 134 insertions, 113 deletions
diff --git a/compiler/dex/arena_allocator.cc b/compiler/dex/arena_allocator.cc index 95e44b3e0d..132831c3ef 100644 --- a/compiler/dex/arena_allocator.cc +++ b/compiler/dex/arena_allocator.cc @@ -28,6 +28,7 @@ namespace art { static constexpr bool kUseMemMap = false; static constexpr bool kUseMemSet = true && kUseMemMap; static constexpr size_t kValgrindRedZoneBytes = 8; +constexpr size_t Arena::kDefaultSize; static const char* alloc_names[ArenaAllocator::kNumAllocKinds] = { "Misc ", diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index abafbc5830..3368132a0e 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -52,12 +52,6 @@ class DexCompiler { return *unit_.GetDexFile(); } - // TODO: since the whole compilation pipeline uses a "const DexFile", we need - // to "unconst" here. The DEX-to-DEX compiler should work on a non-const DexFile. - DexFile& GetModifiableDexFile() { - return *const_cast<DexFile*>(unit_.GetDexFile()); - } - bool PerformOptimizations() const { return dex_to_dex_compilation_level_ >= kOptimize; } diff --git a/compiler/dex/portable/mir_to_gbc.cc b/compiler/dex/portable/mir_to_gbc.cc index e5b4876f08..70b660b7ae 100644 --- a/compiler/dex/portable/mir_to_gbc.cc +++ b/compiler/dex/portable/mir_to_gbc.cc @@ -1660,7 +1660,6 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) { uint16_t arg_reg = cu_->num_regs; ::llvm::Function::arg_iterator arg_iter(func_->arg_begin()); - ::llvm::Function::arg_iterator arg_end(func_->arg_end()); const char* shorty = cu_->shorty; uint32_t shorty_size = strlen(shorty); diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc index fb471abfa9..b21e37e13d 100644 --- a/compiler/dex/quick/dex_file_method_inliner.cc +++ b/compiler/dex/quick/dex_file_method_inliner.cc @@ -22,6 +22,7 @@ namespace art { +const uint32_t DexFileMethodInliner::kIndexUnresolved; const char* DexFileMethodInliner::kClassCacheNames[] = { "Z", // kClassCacheBoolean "B", // kClassCacheByte diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index 8e666dd2a8..43ed28c762 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1430,7 +1430,7 @@ class ParallelCompilationManager { private: ParallelCompilationManager* const manager_; const size_t end_; - const Callback* const callback_; + Callback* const callback_; }; AtomicInteger index_; diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 90e2c65c89..02654ad55a 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -630,10 +630,10 @@ void ImageWriter::FixupMethod(const ArtMethod* orig, ArtMethod* copy) { copy->SetEntryPointFromCompiledCode(GetOatAddress(quick_to_interpreter_bridge_offset_)); #endif copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*> - (GetOatAddress(interpreter_to_interpreter_bridge_offset_))); + (const_cast<byte*>(GetOatAddress(interpreter_to_interpreter_bridge_offset_)))); } else { copy->SetEntryPointFromInterpreter(reinterpret_cast<EntryPointFromInterpreter*> - (GetOatAddress(interpreter_to_compiled_code_bridge_offset_))); + (const_cast<byte*>(GetOatAddress(interpreter_to_compiled_code_bridge_offset_)))); // Use original code if it exists. Otherwise, set the code pointer to the resolution // trampoline. const byte* code = GetOatAddress(orig->GetOatCodeOffset()); diff --git a/runtime/base/macros.h b/runtime/base/macros.h index 00a530a206..cf7029a63f 100644 --- a/runtime/base/macros.h +++ b/runtime/base/macros.h @@ -140,6 +140,13 @@ char (&ArraySizeHelper(T (&array)[N]))[N]; #define ALWAYS_INLINE __attribute__ ((always_inline)) #endif +#ifdef __clang__ +/* clang doesn't like attributes on lambda functions */ +#define ALWAYS_INLINE_LAMBDA +#else +#define ALWAYS_INLINE_LAMBDA ALWAYS_INLINE +#endif + #if defined (__APPLE__) #define HOT_ATTR #define COLD_ATTR diff --git a/runtime/base/timing_logger.cc b/runtime/base/timing_logger.cc index bb32b2da88..fe18f66032 100644 --- a/runtime/base/timing_logger.cc +++ b/runtime/base/timing_logger.cc @@ -31,6 +31,8 @@ namespace art { +constexpr size_t CumulativeLogger::kLowMemoryBucketCount; +constexpr size_t CumulativeLogger::kDefaultBucketCount; CumulativeLogger::CumulativeLogger(const std::string& name) : name_(name), lock_name_("CumulativeLoggerLock" + name), diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index a98673d0af..643c183d1f 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -2489,6 +2489,8 @@ void ClassLinker::VerifyClass(const SirtRef<mirror::Class>& klass) { if (cause.get() != nullptr) { self->GetException(nullptr)->SetCause(cause.get()); } + ClassReference ref(klass->GetDexCache()->GetDexFile(), klass->GetDexClassDefIndex()); + verifier::MethodVerifier::AddRejectedClass(ref); klass->SetStatus(mirror::Class::kStatusError, self); return; } diff --git a/runtime/debugger.cc b/runtime/debugger.cc index bb44db87af..bcf72671dd 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -796,18 +796,37 @@ JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_c JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); + if (o == NULL || o == ObjectRegistry::kInvalidObject) { + return JDWP::ERR_INVALID_OBJECT; + } gRegistry->DisableCollection(object_id); return JDWP::ERR_NONE; } JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); + // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI + // also ignores these cases and never return an error. However it's not obvious why this command + // should behave differently from DisableCollection and IsCollected commands. So let's be more + // strict and return an error if this happens. + if (o == NULL || o == ObjectRegistry::kInvalidObject) { + return JDWP::ERR_INVALID_OBJECT; + } gRegistry->EnableCollection(object_id); return JDWP::ERR_NONE; } JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); + // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However + // the RI seems to ignore this and does not return any error in this case. Let's comply with + // JDWP specs here. + if (o == NULL || o == ObjectRegistry::kInvalidObject) { + return JDWP::ERR_INVALID_OBJECT; + } is_collected = gRegistry->IsCollected(object_id); return JDWP::ERR_NONE; } @@ -2713,7 +2732,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec if (argument == ObjectRegistry::kInvalidObject) { return JDWP::ERR_INVALID_OBJECT; } - if (!argument->InstanceOf(parameter_type)) { + if (argument != NULL && !argument->InstanceOf(parameter_type)) { return JDWP::ERR_ILLEGAL_ARGUMENT; } diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index bfdbd74e58..a60446caba 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -262,9 +262,9 @@ static inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, const mirr // Explicit template declarations of FindFieldFromCode for all field access types. #define EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \ template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ -static mirror::ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \ - const mirror::ArtMethod* referrer, \ - Thread* self, size_t expected_size) \ +mirror::ArtField* FindFieldFromCode<_type, _access_check>(uint32_t field_idx, \ + const mirror::ArtMethod* referrer, \ + Thread* self, size_t expected_size) \ #define EXPLICIT_FIND_FIELD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \ EXPLICIT_FIND_FIELD_FROM_CODE_TEMPLATE_DECL(_type, false); \ @@ -393,12 +393,12 @@ static inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx, mirror: } // Explicit template declarations of FindMethodFromCode for all invoke types. -#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ - static mirror::ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \ - mirror::Object* this_object, \ - mirror::ArtMethod* referrer, \ - Thread* self) +#define EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, _access_check) \ + template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ + mirror::ArtMethod* FindMethodFromCode<_type, _access_check>(uint32_t method_idx, \ + mirror::Object* this_object, \ + mirror::ArtMethod* referrer, \ + Thread* self) #define EXPLICIT_FIND_METHOD_FROM_CODE_TYPED_TEMPLATE_DECL(_type) \ EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, false); \ EXPLICIT_FIND_METHOD_FROM_CODE_TEMPLATE_DECL(_type, true) diff --git a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc index e2a0cc2008..47ccbb126e 100644 --- a/runtime/entrypoints/portable/portable_invoke_entrypoints.cc +++ b/runtime/entrypoints/portable/portable_invoke_entrypoints.cc @@ -22,8 +22,8 @@ namespace art { template<InvokeType type, bool access_check> -static mirror::ArtMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object, - mirror::ArtMethod* caller_method, Thread* thread) { +mirror::ArtMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* this_object, + mirror::ArtMethod* caller_method, Thread* thread) { mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); if (UNLIKELY(method == NULL)) { @@ -46,12 +46,12 @@ static mirror::ArtMethod* FindMethodHelper(uint32_t method_idx, mirror::Object* } // Explicit template declarations of FindMethodHelper for all invoke types. -#define EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, _access_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ - static mirror::ArtMethod* FindMethodHelper<_type, _access_check>(uint32_t method_idx, \ - mirror::Object* this_object, \ - mirror::ArtMethod* caller_method, \ - Thread* thread) +#define EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, _access_check) \ + template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + mirror::ArtMethod* FindMethodHelper<_type, _access_check>(uint32_t method_idx, \ + mirror::Object* this_object, \ + mirror::ArtMethod* caller_method, \ + Thread* thread) #define EXPLICIT_FIND_METHOD_HELPER_TYPED_TEMPLATE_DECL(_type) \ EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, false); \ EXPLICIT_FIND_METHOD_HELPER_TEMPLATE_DECL(_type, true) diff --git a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc index b852a3292e..5a1b3e84ad 100644 --- a/runtime/entrypoints/quick/quick_invoke_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_invoke_entrypoints.cc @@ -142,9 +142,9 @@ extern "C" uint64_t artInvokeInterfaceTrampoline(mirror::ArtMethod* interface_me } template<InvokeType type, bool access_check> -static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, - mirror::ArtMethod* caller_method, - Thread* self, mirror::ArtMethod** sp) { +uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object, + mirror::ArtMethod* caller_method, + Thread* self, mirror::ArtMethod** sp) { mirror::ArtMethod* method = FindMethodFast(method_idx, this_object, caller_method, access_check, type); if (UNLIKELY(method == NULL)) { @@ -174,12 +174,12 @@ static uint64_t artInvokeCommon(uint32_t method_idx, mirror::Object* this_object } // Explicit template declarations of artInvokeCommon for all invoke types. -#define EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, _access_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ - static uint64_t artInvokeCommon<_type, _access_check>(uint32_t method_idx, \ - mirror::Object* this_object, \ - mirror::ArtMethod* caller_method, \ - Thread* self, mirror::ArtMethod** sp) +#define EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, _access_check) \ + template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \ + uint64_t artInvokeCommon<_type, _access_check>(uint32_t method_idx, \ + mirror::Object* this_object, \ + mirror::ArtMethod* caller_method, \ + Thread* self, mirror::ArtMethod** sp) #define EXPLICIT_ART_INVOKE_COMMON_TYPED_TEMPLATE_DECL(_type) \ EXPLICIT_ART_INVOKE_COMMON_TEMPLATE_DECL(_type, false); \ diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 0697a6515b..28cc510806 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -669,7 +669,7 @@ class MarkStackTask : public Task { MarkSweep* mark_sweep = chunk_task_->mark_sweep_; mark_sweep->ScanObjectVisit(obj, [mark_sweep, this](Object* /* obj */, Object* ref, const MemberOffset& /* offset */, - bool /* is_static */) ALWAYS_INLINE { + bool /* is_static */) ALWAYS_INLINE_LAMBDA { if (ref != nullptr && mark_sweep->MarkObjectParallel(ref)) { if (kUseFinger) { android_memory_barrier(); diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index b75b493d49..923560e260 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -558,7 +558,7 @@ void SemiSpace::ScanObject(Object* obj) { DCHECK(obj != NULL); DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, - bool /* is_static */) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { + bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS { mirror::Object* new_address = MarkObject(ref); if (new_address != ref) { DCHECK(new_address != nullptr); diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 02c90123fc..f574a0f2cf 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -90,8 +90,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s ScopedObjectAccessUnchecked soa(self); if (method->IsStatic()) { if (shorty == "L") { - typedef jobject (fnptr)(JNIEnv*, jclass); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jobject (fntype)(JNIEnv*, jclass); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); jobject jresult; @@ -101,36 +101,36 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s } result->SetL(soa.Decode<Object*>(jresult)); } else if (shorty == "V") { - typedef void (fnptr)(JNIEnv*, jclass); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef void (fntype)(JNIEnv*, jclass); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); fn(soa.Env(), klass.get()); } else if (shorty == "Z") { - typedef jboolean (fnptr)(JNIEnv*, jclass); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jboolean (fntype)(JNIEnv*, jclass); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetZ(fn(soa.Env(), klass.get())); } else if (shorty == "BI") { - typedef jbyte (fnptr)(JNIEnv*, jclass, jint); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jbyte (fntype)(JNIEnv*, jclass, jint); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetB(fn(soa.Env(), klass.get(), args[0])); } else if (shorty == "II") { - typedef jint (fnptr)(JNIEnv*, jclass, jint); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jint (fntype)(JNIEnv*, jclass, jint); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetI(fn(soa.Env(), klass.get(), args[0])); } else if (shorty == "LL") { - typedef jobject (fnptr)(JNIEnv*, jclass, jobject); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jobject (fntype)(JNIEnv*, jclass, jobject); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedLocalRef<jobject> arg0(soa.Env(), @@ -142,15 +142,15 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s } result->SetL(soa.Decode<Object*>(jresult)); } else if (shorty == "IIZ") { - typedef jint (fnptr)(JNIEnv*, jclass, jint, jboolean); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetI(fn(soa.Env(), klass.get(), args[0], args[1])); } else if (shorty == "ILI") { - typedef jint (fnptr)(JNIEnv*, jclass, jobject, jint); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jint (fntype)(JNIEnv*, jclass, jobject, jint); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedLocalRef<jobject> arg0(soa.Env(), @@ -158,22 +158,22 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s ScopedThreadStateChange tsc(self, kNative); result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1])); } else if (shorty == "SIZ") { - typedef jshort (fnptr)(JNIEnv*, jclass, jint, jboolean); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetS(fn(soa.Env(), klass.get(), args[0], args[1])); } else if (shorty == "VIZ") { - typedef void (fnptr)(JNIEnv*, jclass, jint, jboolean); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef void (fntype)(JNIEnv*, jclass, jint, jboolean); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); fn(soa.Env(), klass.get(), args[0], args[1]); } else if (shorty == "ZLL") { - typedef jboolean (fnptr)(JNIEnv*, jclass, jobject, jobject); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedLocalRef<jobject> arg0(soa.Env(), @@ -183,8 +183,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s ScopedThreadStateChange tsc(self, kNative); result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get())); } else if (shorty == "ZILL") { - typedef jboolean (fnptr)(JNIEnv*, jclass, jint, jobject, jobject); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedLocalRef<jobject> arg1(soa.Env(), @@ -194,8 +194,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s ScopedThreadStateChange tsc(self, kNative); result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get())); } else if (shorty == "VILII") { - typedef void (fnptr)(JNIEnv*, jclass, jint, jobject, jint, jint); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedLocalRef<jobject> arg1(soa.Env(), @@ -203,8 +203,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s ScopedThreadStateChange tsc(self, kNative); fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]); } else if (shorty == "VLILII") { - typedef void (fnptr)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jclass> klass(soa.Env(), soa.AddLocalReference<jclass>(method->GetDeclaringClass())); ScopedLocalRef<jobject> arg0(soa.Env(), @@ -219,8 +219,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s } } else { if (shorty == "L") { - typedef jobject (fnptr)(JNIEnv*, jobject); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jobject (fntype)(JNIEnv*, jobject); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jobject> rcvr(soa.Env(), soa.AddLocalReference<jobject>(receiver)); jobject jresult; @@ -230,15 +230,15 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s } result->SetL(soa.Decode<Object*>(jresult)); } else if (shorty == "V") { - typedef void (fnptr)(JNIEnv*, jobject); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef void (fntype)(JNIEnv*, jobject); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jobject> rcvr(soa.Env(), soa.AddLocalReference<jobject>(receiver)); ScopedThreadStateChange tsc(self, kNative); fn(soa.Env(), rcvr.get()); } else if (shorty == "LL") { - typedef jobject (fnptr)(JNIEnv*, jobject, jobject); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jobject (fntype)(JNIEnv*, jobject, jobject); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jobject> rcvr(soa.Env(), soa.AddLocalReference<jobject>(receiver)); ScopedLocalRef<jobject> arg0(soa.Env(), @@ -251,8 +251,8 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s result->SetL(soa.Decode<Object*>(jresult)); ScopedThreadStateChange tsc(self, kNative); } else if (shorty == "III") { - typedef jint (fnptr)(JNIEnv*, jobject, jint, jint); - const fnptr* fn = reinterpret_cast<const fnptr*>(method->GetNativeMethod()); + typedef jint (fntype)(JNIEnv*, jobject, jint, jint); + fntype* const fn = reinterpret_cast<fntype*>(const_cast<void*>(method->GetNativeMethod())); ScopedLocalRef<jobject> rcvr(soa.Env(), soa.AddLocalReference<jobject>(receiver)); ScopedThreadStateChange tsc(self, kNative); diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index 3b8d50bc25..a9b8909b2b 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -557,11 +557,11 @@ static inline bool IsBackwardBranch(int32_t branch_offset) { } // Explicitly instantiate all DoInvoke functions. -#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ - static bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \ - const Instruction* inst, uint16_t inst_data, \ - JValue* result) +#define EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, _is_range, _do_check) \ + template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ + bool DoInvoke<_type, _is_range, _do_check>(Thread* self, ShadowFrame& shadow_frame, \ + const Instruction* inst, uint16_t inst_data, \ + JValue* result) #define EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(_type) \ EXPLICIT_DO_INVOKE_TEMPLATE_DECL(_type, false, false); \ @@ -578,10 +578,10 @@ EXPLICIT_DO_INVOKE_ALL_TEMPLATE_DECL(kInterface); // invoke-interface/range. #undef EXPLICIT_DO_INVOKE_TEMPLATE_DECL // Explicitly instantiate all DoFieldGet functions. -#define EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, _do_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ - static bool DoFieldGet<_find_type, _field_type, _do_check>(Thread* self, ShadowFrame& shadow_frame, \ - const Instruction* inst, uint16_t inst_data) +#define EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, _do_check) \ + template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ + bool DoFieldGet<_find_type, _field_type, _do_check>(Thread* self, ShadowFrame& shadow_frame, \ + const Instruction* inst, uint16_t inst_data) #define EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(_find_type, _field_type) \ EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL(_find_type, _field_type, false); \ @@ -609,10 +609,10 @@ EXPLICIT_DO_FIELD_GET_ALL_TEMPLATE_DECL(StaticObjectRead, Primitive::kPrimNot); #undef EXPLICIT_DO_FIELD_GET_TEMPLATE_DECL // Explicitly instantiate all DoFieldPut functions. -#define EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, _do_check) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ - static bool DoFieldPut<_find_type, _field_type, _do_check>(Thread* self, const ShadowFrame& shadow_frame, \ - const Instruction* inst, uint16_t inst_data) +#define EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, _do_check) \ + template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ + bool DoFieldPut<_find_type, _field_type, _do_check>(Thread* self, const ShadowFrame& shadow_frame, \ + const Instruction* inst, uint16_t inst_data) #define EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(_find_type, _field_type) \ EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL(_find_type, _field_type, false); \ @@ -640,21 +640,21 @@ EXPLICIT_DO_FIELD_PUT_ALL_TEMPLATE_DECL(StaticObjectWrite, Primitive::kPrimNot); #undef EXPLICIT_DO_FIELD_PUT_TEMPLATE_DECL // Explicitly instantiate all DoInvokeVirtualQuick functions. -#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ - static bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \ - const Instruction* inst, uint16_t inst_data, \ - JValue* result) +#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \ + template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ + bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \ + const Instruction* inst, uint16_t inst_data, \ + JValue* result) EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(false); // invoke-virtual-quick. EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(true); // invoke-virtual-quick-range. #undef EXPLICIT_INSTANTIATION_DO_INVOKE_VIRTUAL_QUICK // Explicitly instantiate all DoIGetQuick functions. -#define EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(_field_type) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ - static bool DoIGetQuick<_field_type>(ShadowFrame& shadow_frame, const Instruction* inst, \ - uint16_t inst_data) +#define EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(_field_type) \ + template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ + bool DoIGetQuick<_field_type>(ShadowFrame& shadow_frame, const Instruction* inst, \ + uint16_t inst_data) EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimInt); // iget-quick. EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimLong); // iget-wide-quick. @@ -662,10 +662,10 @@ EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL(Primitive::kPrimNot); // iget-object-qui #undef EXPLICIT_DO_IGET_QUICK_TEMPLATE_DECL // Explicitly instantiate all DoIPutQuick functions. -#define EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type) \ - template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ - static bool DoIPutQuick<_field_type>(const ShadowFrame& shadow_frame, const Instruction* inst, \ - uint16_t inst_data) +#define EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type) \ + template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE \ + bool DoIPutQuick<_field_type>(const ShadowFrame& shadow_frame, const Instruction* inst, \ + uint16_t inst_data) EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(Primitive::kPrimInt); // iget-quick. EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(Primitive::kPrimLong); // iget-wide-quick. diff --git a/runtime/jdwp/object_registry.cc b/runtime/jdwp/object_registry.cc index be2219c311..369eddd428 100644 --- a/runtime/jdwp/object_registry.cc +++ b/runtime/jdwp/object_registry.cc @@ -118,6 +118,9 @@ mirror::Object* ObjectRegistry::InternalGet(JDWP::ObjectId id) { } jobject ObjectRegistry::GetJObject(JDWP::ObjectId id) { + if (id == 0) { + return NULL; + } Thread* self = Thread::Current(); MutexLock mu(self, lock_); id_iterator it = id_to_entry_.find(id); @@ -130,9 +133,7 @@ void ObjectRegistry::DisableCollection(JDWP::ObjectId id) { Thread* self = Thread::Current(); MutexLock mu(self, lock_); id_iterator it = id_to_entry_.find(id); - if (it == id_to_entry_.end()) { - return; - } + CHECK(it != id_to_entry_.end()); Promote(*(it->second)); } @@ -140,9 +141,7 @@ void ObjectRegistry::EnableCollection(JDWP::ObjectId id) { Thread* self = Thread::Current(); MutexLock mu(self, lock_); id_iterator it = id_to_entry_.find(id); - if (it == id_to_entry_.end()) { - return; - } + CHECK(it != id_to_entry_.end()); Demote(*(it->second)); } @@ -172,9 +171,7 @@ bool ObjectRegistry::IsCollected(JDWP::ObjectId id) { Thread* self = Thread::Current(); MutexLock mu(self, lock_); id_iterator it = id_to_entry_.find(id); - if (it == id_to_entry_.end()) { - return true; // TODO: can we report that this was an invalid id? - } + CHECK(it != id_to_entry_.end()); ObjectRegistryEntry& entry = *(it->second); if (entry.jni_reference_type == JNIWeakGlobalRefType) { diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index 6b5747b2f1..dffda966aa 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -212,6 +212,8 @@ class MethodVerifier { static void Init() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static void Shutdown(); + static void AddRejectedClass(ClassReference ref) + LOCKS_EXCLUDED(rejected_classes_lock_); static bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); @@ -662,9 +664,6 @@ class MethodVerifier { static ReaderWriterMutex* rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; static RejectedClassesTable* rejected_classes_ GUARDED_BY(rejected_classes_lock_); - static void AddRejectedClass(ClassReference ref) - LOCKS_EXCLUDED(rejected_classes_lock_); - RegTypeCache reg_types_; PcToRegisterLineTable reg_table_; |