summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2014-04-28 16:47:08 -0700
committerIan Rogers <irogers@google.com>2014-04-29 14:36:28 -0700
commitb0fa5dc7769c1e054032f39de0a3f6d6dd06f8cf (patch)
tree839d13ebfa7170967dd9b4abd434b7abda53da99 /runtime
parent948740c1938860df055ddc801f20fd1707331e38 (diff)
downloadart-b0fa5dc7769c1e054032f39de0a3f6d6dd06f8cf.tar.gz
art-b0fa5dc7769c1e054032f39de0a3f6d6dd06f8cf.tar.bz2
art-b0fa5dc7769c1e054032f39de0a3f6d6dd06f8cf.zip
Force inlining on trivial accessors.
Make volatility for GetFieldObject a template parameter. Move some trivial mirror::String routines to a -inl.h. Bug: 14285442 Change-Id: Ie23b11d4f18cb15a62c3bbb42837a8aaf6b68f92
Diffstat (limited to 'runtime')
-rw-r--r--runtime/check_jni.cc1
-rw-r--r--runtime/class_linker.cc4
-rw-r--r--runtime/class_linker.h5
-rw-r--r--runtime/debugger.cc1
-rw-r--r--runtime/dex_file.cc1
-rw-r--r--runtime/dex_file.h11
-rw-r--r--runtime/gc/accounting/mod_union_table.cc8
-rw-r--r--runtime/gc/allocator_type.h36
-rw-r--r--runtime/gc/collector/mark_sweep.cc4
-rw-r--r--runtime/gc/collector/mark_sweep.h2
-rw-r--r--runtime/gc/collector/semi_space.cc4
-rw-r--r--runtime/gc/collector/semi_space.h5
-rw-r--r--runtime/gc/heap.cc13
-rw-r--r--runtime/gc/heap.h11
-rw-r--r--runtime/gc/space/space-inl.h1
-rw-r--r--runtime/interpreter/interpreter.cc5
-rw-r--r--runtime/interpreter/interpreter_common.h18
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc28
-rw-r--r--runtime/jni_internal.cc1
-rw-r--r--runtime/mirror/array-inl.h50
-rw-r--r--runtime/mirror/array.h63
-rw-r--r--runtime/mirror/art_field-inl.h48
-rw-r--r--runtime/mirror/art_field.cc2
-rw-r--r--runtime/mirror/art_field.h8
-rw-r--r--runtime/mirror/art_method-inl.h19
-rw-r--r--runtime/mirror/art_method.cc8
-rw-r--r--runtime/mirror/art_method.h102
-rw-r--r--runtime/mirror/class-inl.h98
-rw-r--r--runtime/mirror/class.cc16
-rw-r--r--runtime/mirror/class.h85
-rw-r--r--runtime/mirror/class_loader.h3
-rw-r--r--runtime/mirror/dex_cache.cc12
-rw-r--r--runtime/mirror/dex_cache.h43
-rw-r--r--runtime/mirror/iftable.h1
-rw-r--r--runtime/mirror/object-inl.h139
-rw-r--r--runtime/mirror/object.cc4
-rw-r--r--runtime/mirror/object.h100
-rw-r--r--runtime/mirror/object_array-inl.h16
-rw-r--r--runtime/mirror/object_array.h14
-rw-r--r--runtime/mirror/object_reference.h2
-rw-r--r--runtime/mirror/object_test.cc1
-rw-r--r--runtime/mirror/proxy.h8
-rw-r--r--runtime/mirror/reference-inl.h4
-rw-r--r--runtime/mirror/reference.h14
-rw-r--r--runtime/mirror/stack_trace_element.cc8
-rw-r--r--runtime/mirror/stack_trace_element.h12
-rw-r--r--runtime/mirror/string-inl.h67
-rw-r--r--runtime/mirror/string.cc55
-rw-r--r--runtime/mirror/string.h18
-rw-r--r--runtime/mirror/throwable.cc18
-rw-r--r--runtime/mirror/throwable.h16
-rw-r--r--runtime/monitor.h10
-rw-r--r--runtime/monitor_pool.h3
-rw-r--r--runtime/native/dalvik_system_DexFile.cc1
-rw-r--r--runtime/native/java_lang_String.cc2
-rw-r--r--runtime/native/sun_misc_Unsafe.cc30
-rw-r--r--runtime/oat_file.h2
-rw-r--r--runtime/reference_table.cc2
-rw-r--r--runtime/stack_indirect_reference_table.h1
-rw-r--r--runtime/thread.cc3
-rw-r--r--runtime/transaction.cc26
61 files changed, 740 insertions, 553 deletions
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index b52941b258..3df050e74f 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -29,6 +29,7 @@
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/string-inl.h"
#include "mirror/throwable.h"
#include "object_utils.h"
#include "runtime.h"
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 338133c22a..583e5e5c92 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -4053,8 +4053,8 @@ bool ClassLinker::LinkFields(const SirtRef<mirror::Class>& klass, bool is_static
LOG(INFO) << "LinkFields: " << (is_static ? "static" : "instance")
<< " class=" << PrettyClass(klass.get())
<< " field=" << PrettyField(field)
- << " offset=" << field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset()),
- false);
+ << " offset="
+ << field->GetField32(MemberOffset(mirror::ArtField::OffsetOffset()));
}
FieldHelper fh(field);
Primitive::Type type = fh.GetTypeAsPrimitiveType();
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 9771318d49..a23add09a8 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -251,7 +251,7 @@ class ClassLinker {
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool IsDexFileRegistered(const DexFile& dex_file) const
- LOCKS_EXCLUDED(dex_lock_);
+ LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void FixupDexCaches(mirror::ArtMethod* resolution_method) const
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -462,7 +462,8 @@ class ClassLinker {
void RegisterDexFileLocked(const DexFile& dex_file, const SirtRef<mirror::DexCache>& dex_cache)
EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- bool IsDexFileRegisteredLocked(const DexFile& dex_file) const SHARED_LOCKS_REQUIRED(dex_lock_);
+ bool IsDexFileRegisteredLocked(const DexFile& dex_file) const
+ SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_);
bool InitializeClass(const SirtRef<mirror::Class>& klass, bool can_run_clinit,
bool can_init_parents)
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 07d3a2a5b8..1efd2e0db7 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -36,6 +36,7 @@
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/string-inl.h"
#include "mirror/throwable.h"
#include "object_utils.h"
#include "quick/inline_method_analyser.h"
diff --git a/runtime/dex_file.cc b/runtime/dex_file.cc
index e5bc19c0cc..6adfc1fdc1 100644
--- a/runtime/dex_file.cc
+++ b/runtime/dex_file.cc
@@ -293,7 +293,6 @@ DexFile::DexFile(const byte* base, size_t size,
location_(location),
location_checksum_(location_checksum),
mem_map_(mem_map),
- modification_lock("DEX modification lock"),
header_(reinterpret_cast<const Header*>(base)),
string_ids_(reinterpret_cast<const StringId*>(base + header_->string_ids_off_)),
type_ids_(reinterpret_cast<const TypeId*>(base + header_->type_ids_off_)),
diff --git a/runtime/dex_file.h b/runtime/dex_file.h
index 70baeed366..c782ab120c 100644
--- a/runtime/dex_file.h
+++ b/runtime/dex_file.h
@@ -21,7 +21,7 @@
#include <vector>
#include "base/logging.h"
-#include "base/mutex.h"
+#include "base/mutex.h" // For Locks::mutator_lock_.
#include "globals.h"
#include "invoke_type.h"
#include "jni.h"
@@ -386,10 +386,6 @@ class DexFile {
return *header_;
}
- Mutex& GetModificationLock() {
- return modification_lock;
- }
-
// Decode the dex magic version
uint32_t GetVersion() const;
@@ -877,11 +873,6 @@ class DexFile {
// Manages the underlying memory allocation.
UniquePtr<MemMap> mem_map_;
- // The DEX-to-DEX compiler uses this lock to ensure thread safety when
- // enabling write access to a read-only DEX file.
- // TODO: move to Locks::dex_file_modification_lock.
- Mutex modification_lock;
-
// Points to the header section.
const Header* const header_;
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index d744deeb3e..7cddaf46cb 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -101,7 +101,7 @@ class ModUnionScanImageRootVisitor {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(root != NULL);
ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_);
- root->VisitReferences<kMovingClasses>(ref_visitor);
+ root->VisitReferences<kMovingClasses>(ref_visitor, VoidFunctor());
}
private:
@@ -153,7 +153,7 @@ class ModUnionReferenceVisitor {
// We don't have an early exit since we use the visitor pattern, an early
// exit should significantly speed this up.
AddToReferenceArrayVisitor visitor(mod_union_table_, references_);
- obj->VisitReferences<kMovingClasses>(visitor);
+ obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor());
}
private:
ModUnionTableReferenceCache* const mod_union_table_;
@@ -171,7 +171,7 @@ class CheckReferenceVisitor {
// Extra parameters are required since we use this same visitor signature for checking objects.
void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref != nullptr && mod_union_table_->ShouldAddReference(ref) &&
references_.find(ref) == references_.end()) {
Heap* heap = mod_union_table_->GetHeap();
@@ -205,7 +205,7 @@ class ModUnionCheckReferences {
void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
Locks::heap_bitmap_lock_->AssertSharedHeld(Thread::Current());
CheckReferenceVisitor visitor(mod_union_table_, references_);
- obj->VisitReferences<kMovingClasses>(visitor);
+ obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor());
}
private:
diff --git a/runtime/gc/allocator_type.h b/runtime/gc/allocator_type.h
new file mode 100644
index 0000000000..938b0f1d2a
--- /dev/null
+++ b/runtime/gc/allocator_type.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
+#define ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
+
+namespace art {
+namespace gc {
+
+// Different types of allocators.
+enum AllocatorType {
+ kAllocatorTypeBumpPointer, // Use BumpPointer allocator, has entrypoints.
+ kAllocatorTypeTLAB, // Use TLAB allocator, has entrypoints.
+ kAllocatorTypeRosAlloc, // Use RosAlloc allocator, has entrypoints.
+ kAllocatorTypeDlMalloc, // Use dlmalloc allocator, has entrypoints.
+ kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
+ kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
+};
+
+} // namespace gc
+} // namespace art
+
+#endif // ART_RUNTIME_GC_ALLOCATOR_TYPE_H_
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 9cd740e9a6..e225d5a569 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -596,7 +596,7 @@ class MarkStackTask : public Task {
void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
if (kUseFinger) {
android_memory_barrier();
@@ -1190,7 +1190,7 @@ class MarkObjectVisitor {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
Locks::heap_bitmap_lock_->AssertExclusiveHeld(Thread::Current());
}
- mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset, false));
+ mark_sweep_->MarkObject(obj->GetFieldObject<mirror::Object>(offset));
}
private:
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index 0c5a0da1fc..bfc70d187d 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -22,7 +22,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "garbage_collector.h"
-#include "gc/accounting/space_bitmap.h"
+#include "gc/accounting/heap_bitmap.h"
#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 65bbbd2bee..0b2601992d 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -352,7 +352,7 @@ class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (from_space_->HasAddress(ref)) {
Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj);
LOG(FATAL) << ref << " found in from space";
@@ -365,7 +365,7 @@ class SemiSpaceVerifyNoFromSpaceReferencesVisitor {
void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) {
DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_);
- obj->VisitReferences<kMovingClasses>(visitor);
+ obj->VisitReferences<kMovingClasses>(visitor, VoidFunctor());
}
class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor {
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index d468561b1b..9b6df16ec5 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -21,7 +21,7 @@
#include "base/macros.h"
#include "base/mutex.h"
#include "garbage_collector.h"
-#include "gc/accounting/space_bitmap.h"
+#include "gc/accounting/heap_bitmap.h"
#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
@@ -191,7 +191,8 @@ class SemiSpace : public GarbageCollector {
void ProcessMarkStack()
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const;
+ inline mirror::Object* GetForwardingAddressInFromSpace(mirror::Object* obj) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Revoke all the thread-local buffers.
void RevokeAllThreadLocalBuffers();
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b57fc69f42..4d074f1f4b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1136,8 +1136,7 @@ void Heap::VerifyObjectBody(mirror::Object* obj) {
return;
}
CHECK(IsAligned<kObjectAlignment>(obj)) << "Object isn't aligned: " << obj;
- mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(
- mirror::Object::ClassOffset(), false);
+ mirror::Class* c = obj->GetFieldObject<mirror::Class, kVerifyNone>(mirror::Object::ClassOffset());
CHECK(c != nullptr) << "Null class in object " << obj;
CHECK(IsAligned<kObjectAlignment>(c)) << "Class " << c << " not aligned in object " << obj;
CHECK(VerifyClassClass(c));
@@ -1378,13 +1377,13 @@ class ReferringObjectsFinder {
// TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for
// annotalysis on visitors.
void operator()(mirror::Object* o) const NO_THREAD_SAFETY_ANALYSIS {
- o->VisitReferences<true>(*this);
+ o->VisitReferences<true>(*this, VoidFunctor());
}
// For Object::VisitReferences.
void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref == object_ && (max_count_ == 0 || referring_objects_.size() < max_count_)) {
referring_objects_.push_back(obj);
}
@@ -1990,7 +1989,7 @@ class VerifyReferenceVisitor {
void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- this->operator()(obj, obj->GetFieldObject<mirror::Object>(offset, false), offset);
+ this->operator()(obj, obj->GetFieldObject<mirror::Object>(offset), offset);
}
// TODO: Fix the no thread safety analysis.
@@ -2182,7 +2181,7 @@ class VerifyReferenceCardVisitor {
// annotalysis on visitors.
void operator()(mirror::Object* obj, MemberOffset offset, bool is_static) const
NO_THREAD_SAFETY_ANALYSIS {
- mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false);
+ mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
// Filter out class references since changing an object's class does not mark the card as dirty.
// Also handles large objects, since the only reference they hold is a class reference.
if (ref != nullptr && !ref->IsClass()) {
@@ -2252,7 +2251,7 @@ class VerifyLiveStackReferences {
void operator()(mirror::Object* obj) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
VerifyReferenceCardVisitor visitor(heap_, const_cast<bool*>(&failed_));
- obj->VisitReferences<true>(visitor);
+ obj->VisitReferences<true>(visitor, VoidFunctor());
}
bool Failed() const {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 631397b4c0..c631372d15 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -21,6 +21,7 @@
#include <string>
#include <vector>
+#include "allocator_type.h"
#include "atomic.h"
#include "base/timing_logger.h"
#include "gc/accounting/atomic_stack.h"
@@ -90,16 +91,6 @@ class AgeCardVisitor {
}
};
-// Different types of allocators.
-enum AllocatorType {
- kAllocatorTypeBumpPointer, // Use BumpPointer allocator, has entrypoints.
- kAllocatorTypeTLAB, // Use TLAB allocator, has entrypoints.
- kAllocatorTypeRosAlloc, // Use RosAlloc allocator, has entrypoints.
- kAllocatorTypeDlMalloc, // Use dlmalloc allocator, has entrypoints.
- kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
- kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
-};
-
// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
static constexpr bool kUseRosAlloc = true;
diff --git a/runtime/gc/space/space-inl.h b/runtime/gc/space/space-inl.h
index 3a715ab438..3ea68cf9ca 100644
--- a/runtime/gc/space/space-inl.h
+++ b/runtime/gc/space/space-inl.h
@@ -19,6 +19,7 @@
#include "space.h"
+#include "base/casts.h"
#include "dlmalloc_space.h"
#include "image_space.h"
#include "large_object_space.h"
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index e3f3cd0abe..3c6c225c65 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -15,8 +15,11 @@
*/
#include "interpreter_common.h"
+
#include <limits>
+#include "mirror/string-inl.h"
+
namespace art {
namespace interpreter {
@@ -85,7 +88,7 @@ static void UnstartedRuntimeJni(Thread* self, ArtMethod* method,
Object* obj = reinterpret_cast<Object*>(args[0]);
jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
Object* newValue = reinterpret_cast<Object*>(args[3]);
- obj->SetFieldObject<true>(MemberOffset(offset), newValue, false);
+ obj->SetFieldObject<true>(MemberOffset(offset), newValue);
} else if (name == "int sun.misc.Unsafe.getArrayBaseOffsetForComponentType(java.lang.Class)") {
mirror::Class* component = reinterpret_cast<Object*>(args[0])->AsClass();
Primitive::Type primitive_type = component->GetPrimitiveType();
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index cc1fa0c94f..ce3346e37d 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -228,17 +228,17 @@ static inline bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* ins
instrumentation->FieldReadEvent(Thread::Current(), obj, shadow_frame.GetMethod(),
shadow_frame.GetDexPC(), f);
}
- const bool is_volatile = false; // iget-x-quick only on non volatile fields.
+ // Note: iget-x-quick instructions are only for non-volatile fields.
const uint32_t vregA = inst->VRegA_22c(inst_data);
switch (field_type) {
case Primitive::kPrimInt:
- shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset, is_volatile)));
+ shadow_frame.SetVReg(vregA, static_cast<int32_t>(obj->GetField32(field_offset)));
break;
case Primitive::kPrimLong:
- shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset, is_volatile)));
+ shadow_frame.SetVRegLong(vregA, static_cast<int64_t>(obj->GetField64(field_offset)));
break;
case Primitive::kPrimNot:
- shadow_frame.SetVRegReference(vregA, obj->GetFieldObject<mirror::Object>(field_offset, is_volatile));
+ shadow_frame.SetVRegReference(vregA, obj->GetFieldObject<mirror::Object>(field_offset));
break;
default:
LOG(FATAL) << "Unreachable: " << field_type;
@@ -382,18 +382,16 @@ static inline bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instructio
instrumentation->FieldWriteEvent(Thread::Current(), obj, shadow_frame.GetMethod(),
shadow_frame.GetDexPC(), f, field_value);
}
- const bool is_volatile = false; // iput-x-quick only on non volatile fields.
+ // Note: iput-x-quick instructions are only for non-volatile fields.
switch (field_type) {
case Primitive::kPrimInt:
- obj->SetField32<transaction_active>(field_offset, shadow_frame.GetVReg(vregA), is_volatile);
+ obj->SetField32<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
break;
case Primitive::kPrimLong:
- obj->SetField64<transaction_active>(field_offset, shadow_frame.GetVRegLong(vregA),
- is_volatile);
+ obj->SetField64<transaction_active>(field_offset, shadow_frame.GetVRegLong(vregA));
break;
case Primitive::kPrimNot:
- obj->SetFieldObject<transaction_active>(field_offset, shadow_frame.GetVRegReference(vregA),
- is_volatile);
+ obj->SetFieldObject<transaction_active>(field_offset, shadow_frame.GetVRegReference(vregA));
break;
default:
LOG(FATAL) << "Unreachable: " << field_type;
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index abee1dbec6..77e2a82c34 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -835,7 +835,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
BooleanArray* array = a->AsBooleanArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
} else {
@@ -853,7 +853,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
ByteArray* array = a->AsByteArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
} else {
@@ -871,7 +871,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
CharArray* array = a->AsCharArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
} else {
@@ -889,7 +889,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
ShortArray* array = a->AsShortArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
} else {
@@ -907,7 +907,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
IntArray* array = a->AsIntArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
} else {
@@ -925,7 +925,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
LongArray* array = a->AsLongArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
} else {
@@ -943,7 +943,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
}
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
ObjectArray<Object>* array = a->AsObjectArray<Object>();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index));
inst = inst->Next_2xx();
} else {
@@ -962,7 +962,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
BooleanArray* array = a->AsBooleanArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
} else {
@@ -981,7 +981,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
ByteArray* array = a->AsByteArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
} else {
@@ -1000,7 +1000,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
CharArray* array = a->AsCharArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
} else {
@@ -1019,7 +1019,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
ShortArray* array = a->AsShortArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
} else {
@@ -1038,7 +1038,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
IntArray* array = a->AsIntArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
} else {
@@ -1057,7 +1057,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
LongArray* array = a->AsLongArray();
- if (LIKELY(array->CheckIsValidIndex(index))) {
+ if (array->CheckIsValidIndex(index)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
} else {
@@ -1076,7 +1076,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data));
ObjectArray<Object>* array = a->AsObjectArray<Object>();
- if (LIKELY(array->CheckIsValidIndex(index) && array->CheckAssignable(val))) {
+ if (array->CheckIsValidIndex(index) && array->CheckAssignable(val)) {
array->SetWithoutChecks<transaction_active>(index, val);
inst = inst->Next_2xx();
} else {
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index fd9c40be13..e6a35d071f 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -37,6 +37,7 @@
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
+#include "mirror/string-inl.h"
#include "mirror/throwable.h"
#include "object_utils.h"
#include "parsed_options.h"
diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h
index 7f974d0cf0..bc8d34815f 100644
--- a/runtime/mirror/array-inl.h
+++ b/runtime/mirror/array-inl.h
@@ -40,6 +40,16 @@ inline size_t Array::SizeOf() {
return header_size + data_size;
}
+template<VerifyObjectFlags kVerifyFlags>
+inline bool Array::CheckIsValidIndex(int32_t index) {
+ if (UNLIKELY(static_cast<uint32_t>(index) >=
+ static_cast<uint32_t>(GetLength<kVerifyFlags>()))) {
+ ThrowArrayIndexOutOfBoundsException(index);
+ return false;
+ }
+ return true;
+}
+
static inline size_t ComputeArraySize(Thread* self, Class* array_class, int32_t component_count,
size_t component_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -164,6 +174,46 @@ inline PrimitiveArray<T>* PrimitiveArray<T>::Alloc(Thread* self, size_t length)
return down_cast<PrimitiveArray<T>*>(raw_array);
}
+template<typename T>
+inline T PrimitiveArray<T>::Get(int32_t i) {
+ if (!CheckIsValidIndex(i)) {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ return T(0);
+ }
+ return GetWithoutChecks(i);
+}
+
+template<typename T>
+inline void PrimitiveArray<T>::Set(int32_t i, T value) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ Set<true>(i, value);
+ } else {
+ Set<false>(i, value);
+ }
+}
+
+template<typename T>
+template<bool kTransactionActive, bool kCheckTransaction>
+inline void PrimitiveArray<T>::Set(int32_t i, T value) {
+ if (CheckIsValidIndex(i)) {
+ SetWithoutChecks<kTransactionActive, kCheckTransaction>(i, value);
+ } else {
+ DCHECK(Thread::Current()->IsExceptionPending());
+ }
+}
+
+template<typename T>
+template<bool kTransactionActive, bool kCheckTransaction>
+inline void PrimitiveArray<T>::SetWithoutChecks(int32_t i, T value) {
+ if (kCheckTransaction) {
+ DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+ }
+ if (kTransactionActive) {
+ Runtime::Current()->RecordWriteArray(this, i, GetWithoutChecks(i));
+ }
+ DCHECK(CheckIsValidIndex(i));
+ GetData()[i] = value;
+}
// Backward copy where elements are of aligned appropriately for T. Count is in T sized units.
// Copies are guaranteed not to tear when the sizeof T is less-than 64bit.
template<typename T>
diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h
index 6bfd5c890f..92f0e67b17 100644
--- a/runtime/mirror/array.h
+++ b/runtime/mirror/array.h
@@ -17,13 +17,14 @@
#ifndef ART_RUNTIME_MIRROR_ARRAY_H_
#define ART_RUNTIME_MIRROR_ARRAY_H_
+#include "gc/allocator_type.h"
#include "object.h"
#include "object_callbacks.h"
-#include "gc/heap.h"
-#include "runtime.h"
-#include "thread.h"
namespace art {
+
+template<class T> class SirtRef;
+
namespace mirror {
class MANAGED Array : public Object {
@@ -45,14 +46,14 @@ class MANAGED Array : public Object {
size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
int32_t GetLength() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_), false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Array, length_));
}
void SetLength(int32_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK_GE(length, 0);
// We use non transactional version since we can't undo this write. We also disable checking
// since it would fail during a transaction.
- SetField32<false, false, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Array, length_), length, false);
+ SetField32<false, false, kVerifyNone>(OFFSET_OF_OBJECT_MEMBER(Array, length_), length);
}
static MemberOffset LengthOffset() {
@@ -84,14 +85,7 @@ class MANAGED Array : public Object {
// Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and
// returns false.
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CheckIsValidIndex(int32_t index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(static_cast<uint32_t>(index) >=
- static_cast<uint32_t>(GetLength<kVerifyFlags>()))) {
- ThrowArrayIndexOutOfBoundsException(index);
- return false;
- }
- return true;
- }
+ bool CheckIsValidIndex(int32_t index) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
protected:
void ThrowArrayStoreException(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -108,7 +102,7 @@ class MANAGED Array : public Object {
DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
};
-template<class T>
+template<typename T>
class MANAGED PrimitiveArray : public Array {
public:
typedef T ElementType;
@@ -116,59 +110,32 @@ class MANAGED PrimitiveArray : public Array {
static PrimitiveArray<T>* Alloc(Thread* self, size_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const T* GetData() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const T* GetData() const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reinterpret_cast<const T*>(GetRawData(sizeof(T), 0));
}
- T* GetData() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T* GetData() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return reinterpret_cast<T*>(GetRawData(sizeof(T), 0));
}
- T Get(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (UNLIKELY(!CheckIsValidIndex(i))) {
- DCHECK(Thread::Current()->IsExceptionPending());
- return T(0);
- }
- return GetWithoutChecks(i);
- }
+ T Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- T GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ T GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(CheckIsValidIndex(i));
return GetData()[i];
}
- void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (Runtime::Current()->IsActiveTransaction()) {
- Set<true>(i, value);
- } else {
- Set<false>(i, value);
- }
- }
+ void Set(int32_t i, T value) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO fix thread safety analysis broken by the use of template. This should be
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true>
- void Set(int32_t i, T value) NO_THREAD_SAFETY_ANALYSIS {
- if (LIKELY(CheckIsValidIndex(i))) {
- SetWithoutChecks<kTransactionActive, kCheckTransaction>(i, value);
- } else {
- DCHECK(Thread::Current()->IsExceptionPending());
- }
- }
+ void Set(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true>
- void SetWithoutChecks(int32_t i, T value) NO_THREAD_SAFETY_ANALYSIS {
- if (kCheckTransaction) {
- DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
- }
- if (kTransactionActive) {
- Runtime::Current()->RecordWriteArray(this, i, GetWithoutChecks(i));
- }
- DCHECK(CheckIsValidIndex(i));
- GetData()[i] = value;
- }
+ void SetWithoutChecks(int32_t i, T value) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
/*
* Works like memmove(), except we guarantee not to allow tearing of array values (ie using
diff --git a/runtime/mirror/art_field-inl.h b/runtime/mirror/art_field-inl.h
index 6253edd88c..ad24d0a551 100644
--- a/runtime/mirror/art_field-inl.h
+++ b/runtime/mirror/art_field-inl.h
@@ -30,69 +30,89 @@ namespace art {
namespace mirror {
inline Class* ArtField::GetDeclaringClass() {
- Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtField, declaring_class_), false);
+ Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtField, declaring_class_));
DCHECK(result != NULL);
DCHECK(result->IsLoaded() || result->IsErroneous());
return result;
}
inline void ArtField::SetDeclaringClass(Class *new_declaring_class) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, declaring_class_),
- new_declaring_class, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, declaring_class_), new_declaring_class);
}
inline uint32_t ArtField::GetAccessFlags() {
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, access_flags_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, access_flags_));
}
inline MemberOffset ArtField::GetOffset() {
DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous());
- return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_), false));
+ return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_)));
}
inline MemberOffset ArtField::GetOffsetDuringLinking() {
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
- return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_), false));
+ return MemberOffset(GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_)));
}
inline uint32_t ArtField::Get32(Object* object) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
- return object->GetField32(GetOffset(), IsVolatile());
+ if (UNLIKELY(IsVolatile())) {
+ return object->GetField32Volatile(GetOffset());
+ }
+ return object->GetField32(GetOffset());
}
template<bool kTransactionActive>
inline void ArtField::Set32(Object* object, uint32_t new_value) {
- DCHECK(object != NULL) << PrettyField(this);
+ DCHECK(object != nullptr) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
- object->SetField32<kTransactionActive>(GetOffset(), new_value, IsVolatile());
+ if (UNLIKELY(IsVolatile())) {
+ object->SetField32Volatile<kTransactionActive>(GetOffset(), new_value);
+ } else {
+ object->SetField32<kTransactionActive>(GetOffset(), new_value);
+ }
}
inline uint64_t ArtField::Get64(Object* object) {
DCHECK(object != NULL) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
- return object->GetField64(GetOffset(), IsVolatile());
+ if (UNLIKELY(IsVolatile())) {
+ return object->GetField64Volatile(GetOffset());
+ }
+ return object->GetField64(GetOffset());
}
template<bool kTransactionActive>
inline void ArtField::Set64(Object* object, uint64_t new_value) {
DCHECK(object != NULL) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
- object->SetField64<kTransactionActive>(GetOffset(), new_value, IsVolatile());
+ if (UNLIKELY(IsVolatile())) {
+ object->SetField64Volatile<kTransactionActive>(GetOffset(), new_value);
+ } else {
+ object->SetField64<kTransactionActive>(GetOffset(), new_value);
+ }
}
inline Object* ArtField::GetObj(Object* object) {
DCHECK(object != NULL) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
- return object->GetFieldObject<Object>(GetOffset(), IsVolatile());
+ if (UNLIKELY(IsVolatile())) {
+ return object->GetFieldObjectVolatile<Object>(GetOffset());
+ }
+ return object->GetFieldObject<Object>(GetOffset());
}
template<bool kTransactionActive>
inline void ArtField::SetObj(Object* object, Object* new_value) {
DCHECK(object != NULL) << PrettyField(this);
DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted());
- object->SetFieldObject<kTransactionActive>(GetOffset(), new_value, IsVolatile());
+ if (UNLIKELY(IsVolatile())) {
+ object->SetFieldObjectVolatile<kTransactionActive>(GetOffset(), new_value);
+ } else {
+ object->SetFieldObject<kTransactionActive>(GetOffset(), new_value);
+ }
}
inline bool ArtField::GetBoolean(Object* object) {
diff --git a/runtime/mirror/art_field.cc b/runtime/mirror/art_field.cc
index 7b0b94cd78..8eb30f9949 100644
--- a/runtime/mirror/art_field.cc
+++ b/runtime/mirror/art_field.cc
@@ -60,7 +60,7 @@ void ArtField::SetOffset(MemberOffset num_bytes) {
}
}
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_), num_bytes.Uint32Value(), false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, offset_), num_bytes.Uint32Value());
}
void ArtField::VisitRoots(RootCallback* callback, void* arg) {
diff --git a/runtime/mirror/art_field.h b/runtime/mirror/art_field.h
index ba70cc64e3..029bd5ae92 100644
--- a/runtime/mirror/art_field.h
+++ b/runtime/mirror/art_field.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_MIRROR_ART_FIELD_H_
#define ART_RUNTIME_MIRROR_ART_FIELD_H_
+#include <jni.h>
+
#include "class.h"
#include "modifiers.h"
#include "object.h"
@@ -43,7 +45,7 @@ class MANAGED ArtField : public Object {
void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, access_flags_), new_access_flags, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, access_flags_), new_access_flags);
}
bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -59,12 +61,12 @@ class MANAGED ArtField : public Object {
}
uint32_t GetDexFieldIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, field_dex_idx_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtField, field_dex_idx_));
}
void SetDexFieldIndex(uint32_t new_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, field_dex_idx_), new_idx, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtField, field_dex_idx_), new_idx);
}
// Offset to field within an Object.
diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h
index 6e1f0623bb..fb9a09a95f 100644
--- a/runtime/mirror/art_method-inl.h
+++ b/runtime/mirror/art_method-inl.h
@@ -29,8 +29,7 @@ namespace art {
namespace mirror {
inline Class* ArtMethod::GetDeclaringClass() {
- Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_),
- false);
+ Class* result = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_));
DCHECK(result != NULL) << this;
DCHECK(result->IsIdxLoaded() || result->IsErroneous()) << this;
return result;
@@ -38,17 +37,17 @@ inline Class* ArtMethod::GetDeclaringClass() {
inline void ArtMethod::SetDeclaringClass(Class *new_declaring_class) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, declaring_class_),
- new_declaring_class, false);
+ new_declaring_class);
}
inline uint32_t ArtMethod::GetAccessFlags() {
DCHECK(GetDeclaringClass()->IsIdxLoaded() || GetDeclaringClass()->IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_));
}
inline uint16_t ArtMethod::GetMethodIndex() {
DCHECK(GetDeclaringClass()->IsResolved() || GetDeclaringClass()->IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_));
}
inline uint32_t ArtMethod::GetDexMethodIndex() {
@@ -58,22 +57,22 @@ inline uint32_t ArtMethod::GetDexMethodIndex() {
#else
DCHECK(GetDeclaringClass()->IsLoaded() || GetDeclaringClass()->IsErroneous());
#endif
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_));
}
inline ObjectArray<String>* ArtMethod::GetDexCacheStrings() {
return GetFieldObject<ObjectArray<String> >(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_), false);
+ OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_));
}
inline ObjectArray<ArtMethod>* ArtMethod::GetDexCacheResolvedMethods() {
return GetFieldObject<ObjectArray<ArtMethod> >(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_), false);
+ OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_));
}
inline ObjectArray<Class>* ArtMethod::GetDexCacheResolvedTypes() {
return GetFieldObject<ObjectArray<Class> >(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_), false);
+ OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_));
}
inline uint32_t ArtMethod::GetCodeSize() {
@@ -199,7 +198,7 @@ inline bool ArtMethod::IsImtConflictMethod() {
template<VerifyObjectFlags kVerifyFlags>
inline void ArtMethod::SetNativeMethod(const void* native_method) {
SetFieldPtr<false, true, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method, false);
+ OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method);
}
} // namespace mirror
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 90bcbabdc5..7453d4d3fe 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -90,17 +90,17 @@ void ArtMethod::ResetClass() {
void ArtMethod::SetDexCacheStrings(ObjectArray<String>* new_dex_cache_strings) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_strings_),
- new_dex_cache_strings, false);
+ new_dex_cache_strings);
}
void ArtMethod::SetDexCacheResolvedMethods(ObjectArray<ArtMethod>* new_dex_cache_methods) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_),
- new_dex_cache_methods, false);
+ new_dex_cache_methods);
}
void ArtMethod::SetDexCacheResolvedTypes(ObjectArray<Class>* new_dex_cache_classes) {
SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_),
- new_dex_cache_classes, false);
+ new_dex_cache_classes);
}
size_t ArtMethod::NumArgRegisters(const StringPiece& shorty) {
@@ -345,7 +345,7 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
bool ArtMethod::IsRegistered() {
void* native_method =
- GetFieldPtr<void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), false);
+ GetFieldPtr<void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_));
CHECK(native_method != nullptr);
void* jni_stub = GetJniDlsymLookupStub();
return native_method != jni_stub;
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index b3b9ca7dc1..f61a01d90e 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -21,7 +21,6 @@
#include "dex_file.h"
#include "invoke_type.h"
#include "modifiers.h"
-#include "oat.h"
#include "object.h"
#include "object_callbacks.h"
@@ -49,7 +48,7 @@ class MANAGED ArtMethod : public Object {
static ArtMethod* FromReflectedMethod(const ScopedObjectAccess& soa, jobject jlr_method)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Class* GetDeclaringClass() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetDeclaringClass(Class *new_declaring_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -61,7 +60,7 @@ class MANAGED ArtMethod : public Object {
void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_), new_access_flags, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, access_flags_), new_access_flags);
}
// Approximate what kind of method call would be used for this method.
@@ -162,7 +161,7 @@ class MANAGED ArtMethod : public Object {
void SetMethodIndex(uint16_t new_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_), new_method_index, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_), new_method_index);
}
static MemberOffset MethodIndexOffset() {
@@ -170,12 +169,12 @@ class MANAGED ArtMethod : public Object {
}
uint32_t GetCodeItemOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_));
}
- void SetCodeItemOffset(uint32_t new_code_off) {
+ void SetCodeItemOffset(uint32_t new_code_off) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_), new_code_off, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_code_item_offset_), new_code_off);
}
// Number of 32bit registers that would be required to hold all the arguments
@@ -183,9 +182,9 @@ class MANAGED ArtMethod : public Object {
uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetDexMethodIndex(uint32_t new_idx) {
+ void SetDexMethodIndex(uint32_t new_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_), new_idx, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_), new_idx);
}
ObjectArray<String>* GetDexCacheStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -219,16 +218,18 @@ class MANAGED ArtMethod : public Object {
const char* shorty) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- EntryPointFromInterpreter* GetEntryPointFromInterpreter() {
+ EntryPointFromInterpreter* GetEntryPointFromInterpreter()
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFieldPtr<EntryPointFromInterpreter*, kVerifyFlags>(
- OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_), false);
+ OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_));
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter) {
+ void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
SetFieldPtr<false, true, kVerifyFlags>(
OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_),
- entry_point_from_interpreter, false);
+ entry_point_from_interpreter);
}
static MemberOffset EntryPointFromPortableCompiledCodeOffset() {
@@ -236,15 +237,16 @@ class MANAGED ArtMethod : public Object {
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- const void* GetEntryPointFromPortableCompiledCode() {
+ const void* GetEntryPointFromPortableCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFieldPtr<const void*, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset(), false);
+ EntryPointFromPortableCompiledCodeOffset());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code) {
+ void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
SetFieldPtr<false, true, kVerifyFlags>(
- EntryPointFromPortableCompiledCodeOffset(), entry_point_from_portable_compiled_code, false);
+ EntryPointFromPortableCompiledCodeOffset(), entry_point_from_portable_compiled_code);
}
static MemberOffset EntryPointFromQuickCompiledCodeOffset() {
@@ -252,14 +254,15 @@ class MANAGED ArtMethod : public Object {
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- const void* GetEntryPointFromQuickCompiledCode() {
- return GetFieldPtr<const void*, kVerifyFlags>(EntryPointFromQuickCompiledCodeOffset(), false);
+ const void* GetEntryPointFromQuickCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtr<const void*, kVerifyFlags>(EntryPointFromQuickCompiledCodeOffset());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) {
+ void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
SetFieldPtr<false, true, kVerifyFlags>(
- EntryPointFromQuickCompiledCodeOffset(), entry_point_from_quick_compiled_code, false);
+ EntryPointFromQuickCompiledCodeOffset(), entry_point_from_quick_compiled_code);
}
uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -281,10 +284,10 @@ class MANAGED ArtMethod : public Object {
void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- uint32_t GetQuickOatCodeOffset();
- uint32_t GetPortableOatCodeOffset();
- void SetQuickOatCodeOffset(uint32_t code_offset);
- void SetPortableOatCodeOffset(uint32_t code_offset);
+ uint32_t GetQuickOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetPortableOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetPortableOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static const void* EntryPointToCodePointer(const void* entry_point) ALWAYS_INLINE {
uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
@@ -301,36 +304,35 @@ class MANAGED ArtMethod : public Object {
// Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
const uint8_t* GetVmapTable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- const uint8_t* GetNativeGcMap() {
- return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), false);
+ const uint8_t* GetNativeGcMap() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtr<uint8_t*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_));
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetNativeGcMap(const uint8_t* data) {
- SetFieldPtr<false, true, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), data,
- false);
+ void SetNativeGcMap(const uint8_t* data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ SetFieldPtr<false, true, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), data);
}
// When building the oat need a convenient place to stuff the offset of the native GC map.
- void SetOatNativeGcMapOffset(uint32_t gc_map_offset);
- uint32_t GetOatNativeGcMapOffset();
+ void SetOatNativeGcMapOffset(uint32_t gc_map_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ uint32_t GetOatNativeGcMapOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template <bool kCheckFrameSize = true>
- uint32_t GetFrameSizeInBytes() {
- uint32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_),
- false);
+ uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ uint32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_));
if (kCheckFrameSize) {
DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
}
return result;
}
- void SetFrameSizeInBytes(size_t new_frame_size_in_bytes) {
+ void SetFrameSizeInBytes(size_t new_frame_size_in_bytes)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_frame_size_in_bytes_),
- new_frame_size_in_bytes, false);
+ new_frame_size_in_bytes);
}
- size_t GetReturnPcOffsetInBytes() {
+ size_t GetReturnPcOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFrameSizeInBytes() - kPointerSize;
}
@@ -338,7 +340,7 @@ class MANAGED ArtMethod : public Object {
return kPointerSize;
}
- bool IsRegistered();
+ bool IsRegistered() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void RegisterNative(Thread* self, const void* native_method, bool is_fast)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -349,35 +351,35 @@ class MANAGED ArtMethod : public Object {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_);
}
- const void* GetNativeMethod() {
- return GetFieldPtr<const void*>(NativeMethodOffset(), false);
+ const void* GetNativeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtr<const void*>(NativeMethodOffset());
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetNativeMethod(const void*);
+ void SetNativeMethod(const void*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static MemberOffset GetMethodIndexOffset() {
return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
}
- uint32_t GetCoreSpillMask() {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_), false);
+ uint32_t GetCoreSpillMask() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_));
}
- void SetCoreSpillMask(uint32_t core_spill_mask) {
+ void SetCoreSpillMask(uint32_t core_spill_mask) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Computed during compilation.
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_), core_spill_mask, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_core_spill_mask_), core_spill_mask);
}
- uint32_t GetFpSpillMask() {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_), false);
+ uint32_t GetFpSpillMask() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_));
}
- void SetFpSpillMask(uint32_t fp_spill_mask) {
+ void SetFpSpillMask(uint32_t fp_spill_mask) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Computed during compilation.
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_), fp_spill_mask, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, quick_fp_spill_mask_), fp_spill_mask);
}
// Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal
diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h
index 3c02aa0b49..8a1f383853 100644
--- a/runtime/mirror/class-inl.h
+++ b/runtime/mirror/class-inl.h
@@ -35,38 +35,36 @@ namespace mirror {
inline uint32_t Class::GetObjectSize() {
DCHECK(!IsVariableSize()) << " class=" << PrettyTypeOf(this);
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_));
}
inline Class* Class::GetSuperClass() {
// Can only get super class for loaded classes (hack for when runtime is
// initializing)
DCHECK(IsLoaded() || IsErroneous() || !Runtime::Current()->IsStarted()) << IsLoaded();
- return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), false);
+ return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
}
inline ClassLoader* Class::GetClassLoader() {
- return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), false);
+ return GetFieldObject<ClassLoader>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_));
}
template<VerifyObjectFlags kVerifyFlags>
inline DexCache* Class::GetDexCache() {
- return GetFieldObject<DexCache, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), false);
+ return GetFieldObject<DexCache, kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_));
}
inline ObjectArray<ArtMethod>* Class::GetDirectMethods() {
DCHECK(IsLoaded() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtMethod> >(
- OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false);
+ return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_));
}
inline void Class::SetDirectMethods(ObjectArray<ArtMethod>* new_direct_methods)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(NULL == GetFieldObject<ObjectArray<ArtMethod> >(
- OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false));
+ OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_)));
DCHECK_NE(0, new_direct_methods->GetLength());
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_),
- new_direct_methods, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), new_direct_methods);
}
inline ArtMethod* Class::GetDirectMethod(int32_t i) {
@@ -76,8 +74,7 @@ inline ArtMethod* Class::GetDirectMethod(int32_t i) {
inline void Class::SetDirectMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* direct_methods =
- GetFieldObject<ObjectArray<ArtMethod> >(
- OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_), false);
+ GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, direct_methods_));
direct_methods->Set<false>(i, f);
}
@@ -89,16 +86,14 @@ inline uint32_t Class::NumDirectMethods() {
template<VerifyObjectFlags kVerifyFlags>
inline ObjectArray<ArtMethod>* Class::GetVirtualMethods() {
DCHECK(IsLoaded() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtMethod> >(
- OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false);
+ return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_));
}
inline void Class::SetVirtualMethods(ObjectArray<ArtMethod>* new_virtual_methods) {
// TODO: we reassign virtual methods to grow the table for miranda
// methods.. they should really just be assigned once.
DCHECK_NE(0, new_virtual_methods->GetLength());
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_),
- new_virtual_methods, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), new_virtual_methods);
}
inline uint32_t Class::NumVirtualMethods() {
@@ -119,31 +114,30 @@ inline ArtMethod* Class::GetVirtualMethodDuringLinking(uint32_t i) {
inline void Class::SetVirtualMethod(uint32_t i, ArtMethod* f) // TODO: uint16_t
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtMethod>* virtual_methods =
- GetFieldObject<ObjectArray<ArtMethod> >(
- OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_), false);
+ GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, virtual_methods_));
virtual_methods->Set<false>(i, f);
}
inline ObjectArray<ArtMethod>* Class::GetVTable() {
DCHECK(IsResolved() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false);
+ return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, vtable_));
}
inline ObjectArray<ArtMethod>* Class::GetVTableDuringLinking() {
DCHECK(IsLoaded() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), false);
+ return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, vtable_));
}
inline void Class::SetVTable(ObjectArray<ArtMethod>* new_vtable) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, vtable_), new_vtable);
}
inline ObjectArray<ArtMethod>* Class::GetImTable() {
- return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), false);
+ return GetFieldObject<ObjectArray<ArtMethod> >(OFFSET_OF_OBJECT_MEMBER(Class, imtable_));
}
inline void Class::SetImTable(ObjectArray<ArtMethod>* new_imtable) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), new_imtable, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, imtable_), new_imtable);
}
inline bool Class::Implements(Class* klass) {
@@ -338,7 +332,7 @@ inline ArtMethod* Class::FindVirtualMethodForVirtualOrInterface(ArtMethod* metho
}
inline IfTable* Class::GetIfTable() {
- return GetFieldObject<IfTable>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), false);
+ return GetFieldObject<IfTable>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_));
}
inline int32_t Class::GetIfTableCount() {
@@ -350,46 +344,45 @@ inline int32_t Class::GetIfTableCount() {
}
inline void Class::SetIfTable(IfTable* new_iftable) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, iftable_), new_iftable);
}
inline ObjectArray<ArtField>* Class::GetIFields() {
DCHECK(IsLoaded() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false);
+ return GetFieldObject<ObjectArray<ArtField>>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
}
inline void Class::SetIFields(ObjectArray<ArtField>* new_ifields)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(NULL == GetFieldObject<ObjectArray<ArtField> >(
- OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false));
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields, false);
+ DCHECK(NULL == GetFieldObject<ObjectArray<ArtField> >(OFFSET_OF_OBJECT_MEMBER(Class, ifields_)));
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, ifields_), new_ifields);
}
inline ObjectArray<ArtField>* Class::GetSFields() {
DCHECK(IsLoaded() || IsErroneous());
- return GetFieldObject<ObjectArray<ArtField> >(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false);
+ return GetFieldObject<ObjectArray<ArtField> >(OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
}
inline void Class::SetSFields(ObjectArray<ArtField>* new_sfields)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(NULL == GetFieldObject<ObjectArray<ArtField> >(
- OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false));
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields, false);
+ DCHECK(NULL == GetFieldObject<ObjectArray<ArtField> >(OFFSET_OF_OBJECT_MEMBER(Class, sfields_)));
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, sfields_), new_sfields);
}
inline uint32_t Class::NumStaticFields() {
return (GetSFields() != NULL) ? GetSFields()->GetLength() : 0;
}
+
inline ArtField* Class::GetStaticField(uint32_t i) // TODO: uint16_t
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetSFields()->Get(i);
+ return GetSFields()->GetWithoutChecks(i);
}
inline void Class::SetStaticField(uint32_t i, ArtField* f) // TODO: uint16_t
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtField>* sfields= GetFieldObject<ObjectArray<ArtField> >(
- OFFSET_OF_OBJECT_MEMBER(Class, sfields_), false);
+ OFFSET_OF_OBJECT_MEMBER(Class, sfields_));
sfields->Set<false>(i, f);
}
@@ -399,22 +392,36 @@ inline uint32_t Class::NumInstanceFields() {
inline ArtField* Class::GetInstanceField(uint32_t i) { // TODO: uint16_t
DCHECK_NE(NumInstanceFields(), 0U);
- return GetIFields()->Get(i);
+ return GetIFields()->GetWithoutChecks(i);
}
inline void Class::SetInstanceField(uint32_t i, ArtField* f) // TODO: uint16_t
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ObjectArray<ArtField>* ifields= GetFieldObject<ObjectArray<ArtField> >(
- OFFSET_OF_OBJECT_MEMBER(Class, ifields_), false);
+ OFFSET_OF_OBJECT_MEMBER(Class, ifields_));
ifields->Set<false>(i, f);
}
+template<VerifyObjectFlags kVerifyFlags>
+inline uint32_t Class::GetReferenceInstanceOffsets() {
+ DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_));
+}
+
+inline void Class::SetClinitThreadId(pid_t new_clinit_thread_id) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), new_clinit_thread_id);
+ } else {
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), new_clinit_thread_id);
+ }
+}
+
inline void Class::SetVerifyErrorClass(Class* klass) {
CHECK(klass != NULL) << PrettyClass(this);
if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass, false);
+ SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass);
} else {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), klass);
}
}
@@ -427,20 +434,27 @@ inline uint32_t Class::GetAccessFlags() {
this == String::GetJavaLangString() ||
this == ArtField::GetJavaLangReflectArtField() ||
this == ArtMethod::GetJavaLangReflectArtMethod());
- return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
}
inline String* Class::GetName() {
- return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_), false);
+ return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Class, name_));
}
inline void Class::SetName(String* name) {
if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, name_), name, false);
+ SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, name_), name);
} else {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, name_), name, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, name_), name);
}
}
+template<VerifyObjectFlags kVerifyFlags>
+inline Primitive::Type Class::GetPrimitiveType() {
+ DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
+ return static_cast<Primitive::Type>(
+ GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_)));
+}
+
inline void Class::CheckObjectAlloc() {
DCHECK(!IsArrayClass())
<< PrettyClass(this)
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index ad86e1fc88..64a849bad1 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -114,9 +114,9 @@ void Class::SetStatus(Status new_status, Thread* self) {
}
CHECK(sizeof(Status) == sizeof(uint32_t)) << PrettyClass(this);
if (Runtime::Current()->IsActiveTransaction()) {
- SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status, false);
+ SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
} else {
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, status_), new_status);
}
// Classes that are being resolved or initialized need to notify waiters that the class status
// changed. See ClassLinker::EnsureResolved and ClassLinker::WaitForInitializeClass.
@@ -127,7 +127,7 @@ void Class::SetStatus(Status new_status, Thread* self) {
}
void Class::SetDexCache(DexCache* new_dex_cache) {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_), new_dex_cache);
}
void Class::SetClassSize(uint32_t new_class_size) {
@@ -136,7 +136,7 @@ void Class::SetClassSize(uint32_t new_class_size) {
CHECK_GE(new_class_size, GetClassSize()) << " class=" << PrettyTypeOf(this);
}
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), new_class_size);
}
// Return the class' name. The exact format is bizarre, but it's the specified behavior for
@@ -261,7 +261,7 @@ void Class::SetReferenceInstanceOffsets(uint32_t new_reference_offsets) {
}
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_),
- new_reference_offsets, false);
+ new_reference_offsets);
}
void Class::SetReferenceStaticOffsets(uint32_t new_reference_offsets) {
@@ -273,7 +273,7 @@ void Class::SetReferenceStaticOffsets(uint32_t new_reference_offsets) {
}
// Not called within a transaction.
SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_),
- new_reference_offsets, false);
+ new_reference_offsets);
}
bool Class::IsInSamePackage(const StringPiece& descriptor1, const StringPiece& descriptor2) {
@@ -330,9 +330,9 @@ bool Class::IsThrowableClass() {
void Class::SetClassLoader(ClassLoader* new_class_loader) {
if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader, false);
+ SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
} else {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, class_loader_), new_class_loader);
}
}
diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h
index 226dee0c9a..23211c2057 100644
--- a/runtime/mirror/class.h
+++ b/runtime/mirror/class.h
@@ -17,9 +17,11 @@
#ifndef ART_RUNTIME_MIRROR_CLASS_H_
#define ART_RUNTIME_MIRROR_CLASS_H_
+#include "gc/allocator_type.h"
#include "invoke_type.h"
#include "modifiers.h"
#include "object.h"
+#include "object_callbacks.h"
#include "primitive.h"
/*
@@ -122,8 +124,7 @@ class MANAGED Class : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
Status GetStatus() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
COMPILE_ASSERT(sizeof(Status) == sizeof(uint32_t), size_of_status_not_uint32);
- return static_cast<Status>(GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_),
- true));
+ return static_cast<Status>(GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, status_)));
}
void SetStatus(Status new_status, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -185,7 +186,7 @@ class MANAGED Class : public Object {
void SetAccessFlags(uint32_t new_access_flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), new_access_flags);
}
// Returns true if the class is an interface.
@@ -208,7 +209,7 @@ class MANAGED Class : public Object {
}
void SetFinalizable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_), false);
+ uint32_t flags = GetField32(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
SetAccessFlags(flags | kAccClassIsFinalizable);
}
@@ -280,21 +281,16 @@ class MANAGED Class : public Object {
// Read access flags without using getter as whether something is a proxy can be check in
// any loaded state
// TODO: switch to a check if the super class is java.lang.reflect.Proxy?
- uint32_t access_flags = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_),
- false);
+ uint32_t access_flags = GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, access_flags_));
return (access_flags & kAccClassIsProxy) != 0;
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- Primitive::Type GetPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
- return static_cast<Primitive::Type>(
- GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), false));
- }
+ Primitive::Type GetPrimitiveType() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetPrimitiveType(Primitive::Type new_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_EQ(sizeof(Primitive::Type), sizeof(int32_t));
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), new_type, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, primitive_type_), new_type);
}
// Returns true if the class is a primitive type.
@@ -387,14 +383,14 @@ class MANAGED Class : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
Class* GetComponentType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<Class, kVerifyFlags, kDoReadBarrier>(ComponentTypeOffset(), false);
+ return GetFieldObject<Class, kVerifyFlags, kDoReadBarrier>(ComponentTypeOffset());
}
void SetComponentType(Class* new_component_type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(GetComponentType() == NULL);
DCHECK(new_component_type != NULL);
// Component type is invariant: use non-transactional mode without check.
- SetFieldObject<false, false>(ComponentTypeOffset(), new_component_type, false);
+ SetFieldObject<false, false>(ComponentTypeOffset(), new_component_type);
}
template<bool kDoReadBarrier = true>
@@ -433,12 +429,12 @@ class MANAGED Class : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
uint32_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t GetClassSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_), false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, class_size_));
}
void SetClassSize(uint32_t new_class_size)
@@ -449,7 +445,7 @@ class MANAGED Class : public Object {
void SetObjectSize(uint32_t new_object_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(!IsVariableSize());
// Not called within a transaction.
- return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size, false);
+ return SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size);
}
// Returns true if this class is in the same packages as that class.
@@ -538,11 +534,10 @@ class MANAGED Class : public Object {
void SetSuperClass(Class *new_super_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Super class is assigned once, except during class linker initialization.
- Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_),
- false);
+ Class* old_super_class = GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_));
DCHECK(old_super_class == nullptr || old_super_class == new_super_class);
DCHECK(new_super_class != nullptr);
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Class, super_class_), new_super_class);
}
bool HasSuperClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -553,7 +548,7 @@ class MANAGED Class : public Object {
return MemberOffset(OFFSETOF_MEMBER(Class, super_class_));
}
- ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ ClassLoader* GetClassLoader() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetClassLoader(ClassLoader* new_cl) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -709,26 +704,21 @@ class MANAGED Class : public Object {
// Returns the number of instance fields containing reference types.
uint32_t NumReferenceInstanceFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
uint32_t NumReferenceInstanceFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_));
}
- void SetNumReferenceInstanceFields(uint32_t new_num) {
+ void SetNumReferenceInstanceFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num,
- false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_instance_fields_), new_num);
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- uint32_t GetReferenceInstanceOffsets() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- DCHECK(IsResolved<kVerifyFlags>() || IsErroneous<kVerifyFlags>());
- return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, reference_instance_offsets_),
- false);
- }
+ uint32_t GetReferenceInstanceOffsets() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SetReferenceInstanceOffsets(uint32_t new_reference_offsets)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -741,17 +731,17 @@ class MANAGED Class : public Object {
// Returns the number of static fields containing reference types.
uint32_t NumReferenceStaticFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(IsResolved() || IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
uint32_t NumReferenceStaticFieldsDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(IsLoaded() || IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_));
}
- void SetNumReferenceStaticFields(uint32_t new_num) {
+ void SetNumReferenceStaticFields(uint32_t new_num) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, num_reference_static_fields_), new_num);
}
// Gets the static fields of the class.
@@ -769,8 +759,7 @@ class MANAGED Class : public Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
uint32_t GetReferenceStaticOffsets() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_),
- false);
+ return GetField32<kVerifyFlags>(OFFSET_OF_OBJECT_MEMBER(Class, reference_static_offsets_));
}
void SetReferenceStaticOffsets(uint32_t new_reference_offsets)
@@ -812,40 +801,32 @@ class MANAGED Class : public Object {
pid_t GetClinitThreadId() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(IsIdxLoaded() || IsErroneous());
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_));
}
- void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (Runtime::Current()->IsActiveTransaction()) {
- SetField32<true>(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), new_clinit_thread_id,
- false);
- } else {
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, clinit_thread_id_), new_clinit_thread_id,
- false);
- }
- }
+ void SetClinitThreadId(pid_t new_clinit_thread_id) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Class* GetVerifyErrorClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// DCHECK(IsErroneous());
- return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_), false);
+ return GetFieldObject<Class>(OFFSET_OF_OBJECT_MEMBER(Class, verify_error_class_));
}
uint16_t GetDexClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_));
}
void SetDexClassDefIndex(uint16_t class_def_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_class_def_idx_), class_def_idx);
}
uint16_t GetDexTypeIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_));
}
void SetDexTypeIndex(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Not called within a transaction.
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(Class, dex_type_idx_), type_idx);
}
static Class* GetJavaLangClass() {
diff --git a/runtime/mirror/class_loader.h b/runtime/mirror/class_loader.h
index 69accf5834..74dae386f3 100644
--- a/runtime/mirror/class_loader.h
+++ b/runtime/mirror/class_loader.h
@@ -17,9 +17,6 @@
#ifndef ART_RUNTIME_MIRROR_CLASS_LOADER_H_
#define ART_RUNTIME_MIRROR_CLASS_LOADER_H_
-#include <vector>
-
-#include "dex_file.h"
#include "mirror/object.h"
namespace art {
diff --git a/runtime/mirror/dex_cache.cc b/runtime/mirror/dex_cache.cc
index 0a77db3187..d6c11e8b0c 100644
--- a/runtime/mirror/dex_cache.cc
+++ b/runtime/mirror/dex_cache.cc
@@ -44,12 +44,12 @@ void DexCache::Init(const DexFile* dex_file,
CHECK(resolved_methods != nullptr);
CHECK(resolved_fields != nullptr);
- SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file, false);
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location, false);
- SetFieldObject<false>(StringsOffset(), strings, false);
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), resolved_types, false);
- SetFieldObject<false>(ResolvedMethodsOffset(), resolved_methods, false);
- SetFieldObject<false>(ResolvedFieldsOffset(), resolved_fields, false);
+ SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), location);
+ SetFieldObject<false>(StringsOffset(), strings);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), resolved_types);
+ SetFieldObject<false>(ResolvedMethodsOffset(), resolved_methods);
+ SetFieldObject<false>(ResolvedFieldsOffset(), resolved_fields);
Runtime* runtime = Runtime::Current();
if (runtime->HasResolutionMethod()) {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 843f860185..11a40023e1 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -53,7 +53,7 @@ class MANAGED DexCache : public Object {
void Fixup(ArtMethod* trampoline) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
String* GetLocation() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_), false);
+ return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
static MemberOffset StringsOffset() {
@@ -88,17 +88,18 @@ class MANAGED DexCache : public Object {
return GetStrings()->Get(string_idx);
}
- void SetResolvedString(uint32_t string_idx, String* resolved)
+ void SetResolvedString(uint32_t string_idx, String* resolved) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// TODO default transaction support.
GetStrings()->Set(string_idx, resolved);
}
- Class* GetResolvedType(uint32_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ Class* GetResolvedType(uint32_t type_idx) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetResolvedTypes()->Get(type_idx);
}
- void SetResolvedType(uint32_t type_idx, Class* resolved)
+ void SetResolvedType(uint32_t type_idx, Class* resolved) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// TODO default transaction support.
GetResolvedTypes()->Set(type_idx, resolved);
@@ -106,43 +107,47 @@ class MANAGED DexCache : public Object {
ArtMethod* GetResolvedMethod(uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved)
+ void SetResolvedMethod(uint32_t method_idx, ArtMethod* resolved) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
GetResolvedMethods()->Set(method_idx, resolved);
}
- ArtField* GetResolvedField(uint32_t field_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ArtField* GetResolvedField(uint32_t field_idx) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetResolvedFields()->Get(field_idx);
}
- void SetResolvedField(uint32_t field_idx, ArtField* resolved)
+ void SetResolvedField(uint32_t field_idx, ArtField* resolved) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
GetResolvedFields()->Set(field_idx, resolved);
}
- ObjectArray<String>* GetStrings() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject< ObjectArray<String> >(StringsOffset(), false);
+ ObjectArray<String>* GetStrings() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject< ObjectArray<String> >(StringsOffset());
}
- ObjectArray<Class>* GetResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ObjectArray<Class>* GetResolvedTypes() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFieldObject<ObjectArray<Class> >(
- OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_), false);
+ OFFSET_OF_OBJECT_MEMBER(DexCache, resolved_types_));
}
- ObjectArray<ArtMethod>* GetResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject< ObjectArray<ArtMethod> >(ResolvedMethodsOffset(), false);
+ ObjectArray<ArtMethod>* GetResolvedMethods() ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject< ObjectArray<ArtMethod> >(ResolvedMethodsOffset());
}
- ObjectArray<ArtField>* GetResolvedFields() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<ObjectArray<ArtField> >(ResolvedFieldsOffset(), false);
+ ObjectArray<ArtField>* GetResolvedFields() ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldObject<ObjectArray<ArtField> >(ResolvedFieldsOffset());
}
- const DexFile* GetDexFile() {
- return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), false);
+ const DexFile* GetDexFile() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return GetFieldPtr<const DexFile*>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_));
}
- void SetDexFile(const DexFile* dex_file) {
- return SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file, false);
+ void SetDexFile(const DexFile* dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ ALWAYS_INLINE {
+ return SetFieldPtr<false>(OFFSET_OF_OBJECT_MEMBER(DexCache, dex_file_), dex_file);
}
private:
diff --git a/runtime/mirror/iftable.h b/runtime/mirror/iftable.h
index bb4cd41717..ad312ed229 100644
--- a/runtime/mirror/iftable.h
+++ b/runtime/mirror/iftable.h
@@ -17,6 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_IFTABLE_H_
#define ART_RUNTIME_MIRROR_IFTABLE_H_
+#include "base/casts.h"
#include "object_array.h"
namespace art {
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 04517ec28b..c70a08dbad 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -37,7 +37,7 @@ namespace mirror {
template<VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
inline Class* Object::GetClass() {
return GetFieldObject<Class, kVerifyFlags, kDoReadBarrier>(
- OFFSET_OF_OBJECT_MEMBER(Object, klass_), false);
+ OFFSET_OF_OBJECT_MEMBER(Object, klass_));
}
template<VerifyObjectFlags kVerifyFlags>
@@ -49,17 +49,23 @@ inline void Object::SetClass(Class* new_klass) {
// we may run in transaction mode here.
SetFieldObjectWithoutWriteBarrier<false, false,
static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(
- OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass, false);
+ OFFSET_OF_OBJECT_MEMBER(Object, klass_), new_klass);
}
inline LockWord Object::GetLockWord(bool as_volatile) {
- return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), as_volatile));
+ if (as_volatile) {
+ return LockWord(GetField32Volatile(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+ }
+ return LockWord(GetField32(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
}
inline void Object::SetLockWord(LockWord new_val, bool as_volatile) {
// Force use of non-transactional mode and do not check.
- SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue(),
- as_volatile);
+ if (as_volatile) {
+ SetField32Volatile<false, false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue());
+ } else {
+ SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_), new_val.GetValue());
+ }
}
inline bool Object::CasLockWord(LockWord old_val, LockWord new_val) {
@@ -387,14 +393,14 @@ inline size_t Object::SizeOf() {
return result;
}
-template<VerifyObjectFlags kVerifyFlags>
-inline int32_t Object::GetField32(MemberOffset field_offset, bool is_volatile) {
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline int32_t Object::GetField32(MemberOffset field_offset) {
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
- if (UNLIKELY(is_volatile)) {
+ if (UNLIKELY(kIsVolatile)) {
int32_t result = *(reinterpret_cast<volatile int32_t*>(const_cast<int32_t*>(word_addr)));
QuasiAtomic::MembarLoadLoad(); // Ensure volatile loads don't re-order.
return result;
@@ -403,21 +409,28 @@ inline int32_t Object::GetField32(MemberOffset field_offset, bool is_volatile) {
}
}
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void Object::SetField32(MemberOffset field_offset, int32_t new_value, bool is_volatile) {
+template<VerifyObjectFlags kVerifyFlags>
+inline int32_t Object::GetField32Volatile(MemberOffset field_offset) {
+ return GetField32<kVerifyFlags, true>(field_offset);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+ bool kIsVolatile>
+inline void Object::SetField32(MemberOffset field_offset, int32_t new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
if (kTransactionActive) {
- Runtime::Current()->RecordWriteField32(this, field_offset, GetField32(field_offset, is_volatile),
- is_volatile);
+ Runtime::Current()->RecordWriteField32(this, field_offset,
+ GetField32<kVerifyFlags, kIsVolatile>(field_offset),
+ kIsVolatile);
}
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
int32_t* word_addr = reinterpret_cast<int32_t*>(raw_addr);
- if (UNLIKELY(is_volatile)) {
+ if (kIsVolatile) {
QuasiAtomic::MembarStoreStore(); // Ensure this store occurs after others in the queue.
*word_addr = new_value;
QuasiAtomic::MembarStoreLoad(); // Ensure this store occurs before any volatile loads.
@@ -427,6 +440,11 @@ inline void Object::SetField32(MemberOffset field_offset, int32_t new_value, boo
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetField32Volatile(MemberOffset field_offset, int32_t new_value) {
+ SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasField32(MemberOffset field_offset, int32_t old_value, int32_t new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
@@ -442,14 +460,14 @@ inline bool Object::CasField32(MemberOffset field_offset, int32_t old_value, int
return __sync_bool_compare_and_swap(addr, old_value, new_value);
}
-template<VerifyObjectFlags kVerifyFlags>
-inline int64_t Object::GetField64(MemberOffset field_offset, bool is_volatile) {
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline int64_t Object::GetField64(MemberOffset field_offset) {
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
const int64_t* addr = reinterpret_cast<const int64_t*>(raw_addr);
- if (UNLIKELY(is_volatile)) {
+ if (kIsVolatile) {
int64_t result = QuasiAtomic::Read64(addr);
QuasiAtomic::MembarLoadLoad(); // Ensure volatile loads don't re-order.
return result;
@@ -458,21 +476,28 @@ inline int64_t Object::GetField64(MemberOffset field_offset, bool is_volatile) {
}
}
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void Object::SetField64(MemberOffset field_offset, int64_t new_value, bool is_volatile) {
+template<VerifyObjectFlags kVerifyFlags>
+inline int64_t Object::GetField64Volatile(MemberOffset field_offset) {
+ return GetField64<kVerifyFlags, true>(field_offset);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+ bool kIsVolatile>
+inline void Object::SetField64(MemberOffset field_offset, int64_t new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
if (kTransactionActive) {
- Runtime::Current()->RecordWriteField64(this, field_offset, GetField64(field_offset, is_volatile),
- is_volatile);
+ Runtime::Current()->RecordWriteField64(this, field_offset,
+ GetField64<kVerifyFlags, kIsVolatile>(field_offset),
+ kIsVolatile);
}
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
int64_t* addr = reinterpret_cast<int64_t*>(raw_addr);
- if (UNLIKELY(is_volatile)) {
+ if (kIsVolatile) {
QuasiAtomic::MembarStoreStore(); // Ensure this store occurs after others in the queue.
QuasiAtomic::Write64(addr, new_value);
if (!QuasiAtomic::LongAtomicsUseMutexes()) {
@@ -486,6 +511,12 @@ inline void Object::SetField64(MemberOffset field_offset, int64_t new_value, boo
}
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetField64Volatile(MemberOffset field_offset, int64_t new_value) {
+ return SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(field_offset,
+ new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasField64(MemberOffset field_offset, int64_t old_value, int64_t new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
@@ -501,15 +532,15 @@ inline bool Object::CasField64(MemberOffset field_offset, int64_t old_value, int
return QuasiAtomic::Cas64(old_value, new_value, addr);
}
-template<class T, VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
-inline T* Object::GetFieldObject(MemberOffset field_offset, bool is_volatile) {
+template<class T, VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier, bool kIsVolatile>
+inline T* Object::GetFieldObject(MemberOffset field_offset) {
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
}
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
HeapReference<T>* objref_addr = reinterpret_cast<HeapReference<T>*>(raw_addr);
T* result = ReadBarrier::Barrier<T, kDoReadBarrier>(this, field_offset, objref_addr);
- if (UNLIKELY(is_volatile)) {
+ if (kIsVolatile) {
QuasiAtomic::MembarLoadLoad(); // Ensure loads don't re-order.
}
if (kVerifyFlags & kVerifyReads) {
@@ -518,16 +549,26 @@ inline T* Object::GetFieldObject(MemberOffset field_offset, bool is_volatile) {
return result;
}
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value,
- bool is_volatile) {
+template<class T, VerifyObjectFlags kVerifyFlags, bool kDoReadBarrier>
+inline T* Object::GetFieldObjectVolatile(MemberOffset field_offset) {
+ return GetFieldObject<T, kVerifyFlags, kDoReadBarrier, true>(field_offset);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+ bool kIsVolatile>
+inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
+ Object* new_value) {
if (kCheckTransaction) {
DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
}
if (kTransactionActive) {
- Runtime::Current()->RecordWriteFieldReference(this, field_offset,
- GetFieldObject<Object>(field_offset, is_volatile),
- true);
+ mirror::Object* obj;
+ if (kIsVolatile) {
+ obj = GetFieldObjectVolatile<Object>(field_offset);
+ } else {
+ obj = GetFieldObject<Object>(field_offset);
+ }
+ Runtime::Current()->RecordWriteFieldReference(this, field_offset, obj, true);
}
if (kVerifyFlags & kVerifyThis) {
VerifyObject(this);
@@ -537,7 +578,7 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
}
byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
HeapReference<Object>* objref_addr = reinterpret_cast<HeapReference<Object>*>(raw_addr);
- if (UNLIKELY(is_volatile)) {
+ if (kIsVolatile) {
QuasiAtomic::MembarStoreStore(); // Ensure this store occurs after others in the queue.
objref_addr->Assign(new_value);
QuasiAtomic::MembarStoreLoad(); // Ensure this store occurs before any loads.
@@ -546,16 +587,23 @@ inline void Object::SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset,
}
}
-template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
-inline void Object::SetFieldObject(MemberOffset field_offset, Object* new_value, bool is_volatile) {
- SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, new_value, is_volatile);
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+ bool kIsVolatile>
+inline void Object::SetFieldObject(MemberOffset field_offset, Object* new_value) {
+ SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags,
+ kIsVolatile>(field_offset, new_value);
if (new_value != nullptr) {
CheckFieldAssignment(field_offset, new_value);
Runtime::Current()->GetHeap()->WriteBarrierField(this, field_offset, new_value);
}
}
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value) {
+ SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(field_offset,
+ new_value);
+}
+
template <VerifyObjectFlags kVerifyFlags>
inline HeapReference<Object>* Object::GetFieldObjectReferenceAddr(MemberOffset field_offset) {
if (kVerifyFlags & kVerifyThis) {
@@ -618,8 +666,7 @@ inline void Object::VisitFieldsReferences(uint32_t ref_offsets, const Visitor& v
size_t num_reference_fields =
kIsStatic ? klass->NumReferenceStaticFields() : klass->NumReferenceInstanceFields();
for (size_t i = 0; i < num_reference_fields; ++i) {
- mirror::ArtField* field = kIsStatic ? klass->GetStaticField(i)
- : klass->GetInstanceField(i);
+ mirror::ArtField* field = kIsStatic ? klass->GetStaticField(i) : klass->GetInstanceField(i);
MemberOffset field_offset = field->GetOffset();
// TODO: Do a simpler check?
if (!kVisitClass && UNLIKELY(field_offset.Uint32Value() == ClassOffset().Uint32Value())) {
@@ -648,14 +695,16 @@ template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags, typename Visit
inline void Object::VisitReferences(const Visitor& visitor,
const JavaLangRefVisitor& ref_visitor) {
mirror::Class* klass = GetClass<kVerifyFlags>();
- if (UNLIKELY(klass == Class::GetJavaLangClass())) {
- DCHECK_EQ(klass->GetClass<kVerifyNone>(), Class::GetJavaLangClass());
- AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor);
- } else if (UNLIKELY(klass->IsArrayClass<kVerifyFlags>())) {
- if (klass->IsObjectArrayClass<kVerifyNone>()) {
- AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor);
- } else if (kVisitClass) {
- visitor(this, ClassOffset(), false);
+ if (klass->IsVariableSize()) {
+ if (klass->IsClassClass()) {
+ AsClass<kVerifyNone>()->VisitReferences<kVisitClass>(klass, visitor);
+ } else {
+ DCHECK(klass->IsArrayClass<kVerifyFlags>());
+ if (klass->IsObjectArrayClass<kVerifyNone>()) {
+ AsObjectArray<mirror::Object, kVerifyNone>()->VisitReferences<kVisitClass>(visitor);
+ } else if (kVisitClass) {
+ visitor(this, ClassOffset(), false);
+ }
}
} else {
VisitInstanceFieldsReferences<kVisitClass>(klass, visitor);
diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc
index 766bbc9892..2cd71a0412 100644
--- a/runtime/mirror/object.cc
+++ b/runtime/mirror/object.cc
@@ -47,10 +47,10 @@ class CopyReferenceFieldsWithReadBarrierVisitor {
void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// GetFieldObject() contains a RB.
- Object* ref = obj->GetFieldObject<Object>(offset, false);
+ Object* ref = obj->GetFieldObject<Object>(offset);
// No WB here as a large object space does not have a card table
// coverage. Instead, cards will be marked separately.
- dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref, false);
+ dest_obj_->SetFieldObjectWithoutWriteBarrier<false, false>(offset, ref);
}
void operator()(mirror::Class* klass, mirror::Reference* ref) const
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 370b3b89cb..cf28b18a0a 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -17,23 +17,18 @@
#ifndef ART_RUNTIME_MIRROR_OBJECT_H_
#define ART_RUNTIME_MIRROR_OBJECT_H_
-#include "base/casts.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "cutils/atomic-inline.h"
-#include "monitor.h"
#include "object_reference.h"
#include "offsets.h"
-#include "runtime.h"
#include "verify_object.h"
namespace art {
class ImageWriter;
class LockWord;
+class Monitor;
struct ObjectOffsets;
class Thread;
-template <typename T> class SirtRef;
+class VoidFunctor;
namespace mirror {
@@ -73,7 +68,7 @@ class MANAGED LOCKABLE Object {
}
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
- Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ Class* GetClass() ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -104,8 +99,8 @@ class MANAGED LOCKABLE Object {
// As volatile can be false if the mutators are suspended. This is an optimization since it
// avoids the barriers.
- LockWord GetLockWord(bool as_volatile);
- void SetLockWord(LockWord new_val, bool as_volatile);
+ LockWord GetLockWord(bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void SetLockWord(LockWord new_val, bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool CasLockWord(LockWord old_val, LockWord new_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t GetLockOwnerThreadId();
@@ -189,18 +184,31 @@ class MANAGED LOCKABLE Object {
bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Accessor for Java type fields.
- template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kDoReadBarrier = true>
- T* GetFieldObject(MemberOffset field_offset, bool is_volatile)
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ bool kDoReadBarrier = true, bool kIsVolatile = false>
+ T* GetFieldObject(MemberOffset field_offset) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
+ bool kDoReadBarrier = true>
+ T* GetFieldObjectVolatile(MemberOffset field_offset) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
template<bool kTransactionActive, bool kCheckTransaction = true,
- VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value,
- bool is_volatile)
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value)
+ ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ void SetFieldObject(MemberOffset field_offset, Object* new_value) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetFieldObject(MemberOffset field_offset, Object* new_value, bool is_volatile)
+ void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CasFieldObject(MemberOffset field_offset, Object* old_value, Object* new_value)
@@ -209,23 +217,46 @@ class MANAGED LOCKABLE Object {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ int32_t GetField32(MemberOffset field_offset) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- int32_t GetField32(MemberOffset field_offset, bool is_volatile)
- NO_THREAD_SAFETY_ANALYSIS;
+ int32_t GetField32Volatile(MemberOffset field_offset) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ void SetField32(MemberOffset field_offset, int32_t new_value) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetField32(MemberOffset field_offset, int32_t new_value, bool is_volatile);
+ void SetField32Volatile(MemberOffset field_offset, int32_t new_value) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- bool CasField32(MemberOffset field_offset, int32_t old_value, int32_t new_value)
+ bool CasField32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ int64_t GetField64(MemberOffset field_offset) ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- int64_t GetField64(MemberOffset field_offset, bool is_volatile);
+ int64_t GetField64Volatile(MemberOffset field_offset) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template<bool kTransactionActive, bool kCheckTransaction = true,
+ VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ void SetField64(MemberOffset field_offset, int64_t new_value) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetField64(MemberOffset field_offset, int64_t new_value, bool is_volatile);
+ void SetField64Volatile(MemberOffset field_offset, int64_t new_value) ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -234,13 +265,14 @@ class MANAGED LOCKABLE Object {
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T>
- void SetFieldPtr(MemberOffset field_offset, T new_value, bool is_volatile) {
+ void SetFieldPtr(MemberOffset field_offset, T new_value)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#ifndef __LP64__
SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, reinterpret_cast<int32_t>(new_value), is_volatile);
+ field_offset, reinterpret_cast<int32_t>(new_value));
#else
SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- field_offset, reinterpret_cast<int64_t>(new_value), is_volatile);
+ field_offset, reinterpret_cast<int64_t>(new_value));
#endif
}
@@ -248,30 +280,30 @@ class MANAGED LOCKABLE Object {
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template <const bool kVisitClass, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
typename Visitor, typename JavaLangRefVisitor = VoidFunctor>
- void VisitReferences(const Visitor& visitor,
- const JavaLangRefVisitor& ref_visitor = VoidFunctor())
+ void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor)
NO_THREAD_SAFETY_ANALYSIS;
protected:
// Accessors for non-Java type fields
- template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- T GetFieldPtr(MemberOffset field_offset, bool is_volatile) NO_THREAD_SAFETY_ANALYSIS {
+ template<class T, VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+ T GetFieldPtr(MemberOffset field_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
#ifndef __LP64__
- return reinterpret_cast<T>(GetField32<kVerifyFlags>(field_offset, is_volatile));
+ return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset));
#else
- return reinterpret_cast<T>(GetField64<kVerifyFlags>(field_offset, is_volatile));
+ return reinterpret_cast<T>(GetField64<kVerifyFlags, kIsVolatile>(field_offset));
#endif
}
// TODO: Fixme when anotatalysis works with visitors.
template<bool kVisitClass, bool kIsStatic, typename Visitor>
- void VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor)
+ void VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) HOT_ATTR
NO_THREAD_SAFETY_ANALYSIS;
template<bool kVisitClass, typename Visitor>
- void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
template<bool kVisitClass, typename Visitor>
- void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor)
+ void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index e0c14c3ea0..203a6b2510 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -51,11 +51,11 @@ inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_c
template<class T>
inline T* ObjectArray<T>::Get(int32_t i) {
- if (UNLIKELY(!CheckIsValidIndex(i))) {
+ if (!CheckIsValidIndex(i)) {
DCHECK(Thread::Current()->IsExceptionPending());
return NULL;
}
- return GetFieldObject<T>(OffsetOfElement(i), false);
+ return GetFieldObject<T>(OffsetOfElement(i));
}
template<class T> template<VerifyObjectFlags kVerifyFlags>
@@ -82,9 +82,8 @@ inline void ObjectArray<T>::Set(int32_t i, T* object) {
template<class T>
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline void ObjectArray<T>::Set(int32_t i, T* object) {
- if (LIKELY(CheckIsValidIndex(i) && CheckAssignable<kVerifyFlags>(object))) {
- SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object,
- false);
+ if (CheckIsValidIndex(i) && CheckAssignable<kVerifyFlags>(object)) {
+ SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object);
} else {
DCHECK(Thread::Current()->IsExceptionPending());
}
@@ -95,8 +94,7 @@ template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVer
inline void ObjectArray<T>::SetWithoutChecks(int32_t i, T* object) {
DCHECK(CheckIsValidIndex<kVerifyFlags>(i));
DCHECK(CheckAssignable<static_cast<VerifyObjectFlags>(kVerifyFlags & ~kVerifyThis)>(object));
- SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object,
- false);
+ SetFieldObject<kTransactionActive, kCheckTransaction, kVerifyFlags>(OffsetOfElement(i), object);
}
template<class T>
@@ -106,13 +104,13 @@ inline void ObjectArray<T>::SetWithoutChecksAndWriteBarrier(int32_t i, T* object
// TODO: enable this check. It fails when writing the image in ImageWriter::FixupObjectArray.
// DCHECK(CheckAssignable(object));
SetFieldObjectWithoutWriteBarrier<kTransactionActive, kCheckTransaction, kVerifyFlags>(
- OffsetOfElement(i), object, false);
+ OffsetOfElement(i), object);
}
template<class T>
inline T* ObjectArray<T>::GetWithoutChecks(int32_t i) {
DCHECK(CheckIsValidIndex(i));
- return GetFieldObject<T>(OffsetOfElement(i), false);
+ return GetFieldObject<T>(OffsetOfElement(i));
}
template<class T>
diff --git a/runtime/mirror/object_array.h b/runtime/mirror/object_array.h
index 26b1fd1093..54d12401fe 100644
--- a/runtime/mirror/object_array.h
+++ b/runtime/mirror/object_array.h
@@ -18,7 +18,6 @@
#define ART_RUNTIME_MIRROR_OBJECT_ARRAY_H_
#include "array.h"
-#include "gc/heap.h"
namespace art {
namespace mirror {
@@ -33,7 +32,7 @@ class MANAGED ObjectArray : public Array {
static ObjectArray<T>* Alloc(Thread* self, Class* object_array_class, int32_t length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- T* Get(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ T* Get(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if the object can be stored into the array. If not, throws
// an ArrayStoreException and returns false.
@@ -41,11 +40,11 @@ class MANAGED ObjectArray : public Array {
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
bool CheckAssignable(T* object) NO_THREAD_SAFETY_ANALYSIS;
- void Set(int32_t i, T* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void Set(int32_t i, T* object) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// TODO fix thread safety analysis: should be SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void Set(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
+ void Set(int32_t i, T* object) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
// Set element without bound and element type checks, to be used in limited
// circumstances, such as during boot image writing.
@@ -53,14 +52,15 @@ class MANAGED ObjectArray : public Array {
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetWithoutChecks(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
+ void SetWithoutChecks(int32_t i, T* object) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS;
// TODO fix thread safety analysis broken by the use of template. This should be
// SHARED_LOCKS_REQUIRED(Locks::mutator_lock_).
template<bool kTransactionActive, bool kCheckTransaction = true,
VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
- void SetWithoutChecksAndWriteBarrier(int32_t i, T* object) NO_THREAD_SAFETY_ANALYSIS;
+ void SetWithoutChecksAndWriteBarrier(int32_t i, T* object) ALWAYS_INLINE
+ NO_THREAD_SAFETY_ANALYSIS;
- T* GetWithoutChecks(int32_t i) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ T* GetWithoutChecks(int32_t i) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Copy src into this array (dealing with overlaps as memmove does) without assignability checks.
void AssignableMemmove(int32_t dst_pos, ObjectArray<T>* src, int32_t src_pos,
diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h
index 72f281df06..b63d13d602 100644
--- a/runtime/mirror/object_reference.h
+++ b/runtime/mirror/object_reference.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_
#define ART_RUNTIME_MIRROR_OBJECT_REFERENCE_H_
-#include "base/mutex.h"
+#include "base/mutex.h" // For Locks::mutator_lock_.
#include "globals.h"
namespace art {
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 32f30c3dc3..c494f133f2 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -35,6 +35,7 @@
#include "object-inl.h"
#include "object_array-inl.h"
#include "sirt_ref.h"
+#include "string-inl.h"
#include "UniquePtr.h"
namespace art {
diff --git a/runtime/mirror/proxy.h b/runtime/mirror/proxy.h
index ff019c6a27..5f9cceb164 100644
--- a/runtime/mirror/proxy.h
+++ b/runtime/mirror/proxy.h
@@ -17,7 +17,7 @@
#ifndef ART_RUNTIME_MIRROR_PROXY_H_
#define ART_RUNTIME_MIRROR_PROXY_H_
-#include "mirror/object.h"
+#include "object.h"
namespace art {
@@ -31,14 +31,12 @@ class MANAGED SynthesizedProxyClass : public Class {
public:
ObjectArray<Class>* GetInterfaces() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFieldObject<ObjectArray<Class> >(OFFSET_OF_OBJECT_MEMBER(SynthesizedProxyClass,
- interfaces_),
- false);
+ interfaces_));
}
ObjectArray<ObjectArray<Class> >* GetThrows() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return GetFieldObject<ObjectArray<ObjectArray<Class> > >(OFFSET_OF_OBJECT_MEMBER(SynthesizedProxyClass,
- throws_),
- false);
+ throws_));
}
private:
diff --git a/runtime/mirror/reference-inl.h b/runtime/mirror/reference-inl.h
index 0f76f77e0a..43767c8792 100644
--- a/runtime/mirror/reference-inl.h
+++ b/runtime/mirror/reference-inl.h
@@ -25,8 +25,8 @@ namespace mirror {
inline bool Reference::IsEnqueuable() {
// Not using volatile reads as an optimization since this is only called with all the mutators
// suspended.
- const Object* queue = GetFieldObject<mirror::Object>(QueueOffset(), false);
- const Object* queue_next = GetFieldObject<mirror::Object>(QueueNextOffset(), false);
+ const Object* queue = GetFieldObject<mirror::Object>(QueueOffset());
+ const Object* queue_next = GetFieldObject<mirror::Object>(QueueNextOffset());
return queue != nullptr && queue_next == nullptr;
}
diff --git a/runtime/mirror/reference.h b/runtime/mirror/reference.h
index c2a83ff855..cf65d2093b 100644
--- a/runtime/mirror/reference.h
+++ b/runtime/mirror/reference.h
@@ -43,26 +43,26 @@ class MANAGED Reference : public Object {
}
Object* GetReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<Object>(ReferentOffset(), true);
+ return GetFieldObjectVolatile<Object>(ReferentOffset());
}
template<bool kTransactionActive>
void SetReferent(Object* referent) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldObject<kTransactionActive>(ReferentOffset(), referent, true);
+ SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), referent);
}
template<bool kTransactionActive>
void ClearReferent() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldObject<kTransactionActive>(ReferentOffset(), nullptr, true);
+ SetFieldObjectVolatile<kTransactionActive>(ReferentOffset(), nullptr);
}
// Volatile read/write is not necessary since the java pending next is only accessed from
// the java threads for cleared references. Once these cleared references have a null referent,
// we never end up reading their pending next from the GC again.
Reference* GetPendingNext() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<Reference>(PendingNextOffset(), false);
+ return GetFieldObject<Reference>(PendingNextOffset());
}
template<bool kTransactionActive>
void SetPendingNext(Reference* pending_next) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next, false);
+ SetFieldObject<kTransactionActive>(PendingNextOffset(), pending_next);
}
bool IsEnqueued() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -93,10 +93,10 @@ class MANAGED FinalizerReference : public Reference {
template<bool kTransactionActive>
void SetZombie(Object* zombie) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return SetFieldObject<kTransactionActive>(ZombieOffset(), zombie, true);
+ return SetFieldObjectVolatile<kTransactionActive>(ZombieOffset(), zombie);
}
Object* GetZombie() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<Object>(ZombieOffset(), true);
+ return GetFieldObjectVolatile<Object>(ZombieOffset());
}
private:
diff --git a/runtime/mirror/stack_trace_element.cc b/runtime/mirror/stack_trace_element.cc
index 5217e5eda3..f220039e38 100644
--- a/runtime/mirror/stack_trace_element.cc
+++ b/runtime/mirror/stack_trace_element.cc
@@ -60,13 +60,13 @@ template<bool kTransactionActive>
void StackTraceElement::Init(SirtRef<String>& declaring_class, SirtRef<String>& method_name,
SirtRef<String>& file_name, int32_t line_number) {
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
- declaring_class.get(), false);
+ declaring_class.get());
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
- method_name.get(), false);
+ method_name.get());
SetFieldObject<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_),
- file_name.get(), false);
+ file_name.get());
SetField32<kTransactionActive>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_),
- line_number, false);
+ line_number);
}
void StackTraceElement::VisitRoots(RootCallback* callback, void* arg) {
diff --git a/runtime/mirror/stack_trace_element.h b/runtime/mirror/stack_trace_element.h
index 9e023c7dba..1acbbb064a 100644
--- a/runtime/mirror/stack_trace_element.h
+++ b/runtime/mirror/stack_trace_element.h
@@ -18,6 +18,7 @@
#define ART_RUNTIME_MIRROR_STACK_TRACE_ELEMENT_H_
#include "object.h"
+#include "object_callbacks.h"
namespace art {
@@ -30,22 +31,19 @@ namespace mirror {
class MANAGED StackTraceElement : public Object {
public:
String* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_),
- false);
+ return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, declaring_class_));
}
String* GetMethodName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_),
- false);
+ return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, method_name_));
}
String* GetFileName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_),
- false);
+ return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, file_name_));
}
int32_t GetLineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_), false);
+ return GetField32(OFFSET_OF_OBJECT_MEMBER(StackTraceElement, line_number_));
}
static StackTraceElement* Alloc(Thread* self,
diff --git a/runtime/mirror/string-inl.h b/runtime/mirror/string-inl.h
new file mode 100644
index 0000000000..315f7b1951
--- /dev/null
+++ b/runtime/mirror/string-inl.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_MIRROR_STRING_INL_H_
+#define ART_RUNTIME_MIRROR_STRING_INL_H_
+
+#include "array.h"
+#include "intern_table.h"
+#include "runtime.h"
+#include "string.h"
+#include "thread.h"
+
+namespace art {
+namespace mirror {
+
+inline CharArray* String::GetCharArray() {
+ return GetFieldObject<CharArray>(ValueOffset());
+}
+
+inline int32_t String::GetLength() {
+ int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, count_));
+ DCHECK(result >= 0 && result <= GetCharArray()->GetLength());
+ return result;
+}
+
+inline void String::SetArray(CharArray* new_array) {
+ // Array is invariant so use non-transactional mode. Also disable check as we may run inside
+ // a transaction.
+ DCHECK(new_array != NULL);
+ SetFieldObject<false, false>(OFFSET_OF_OBJECT_MEMBER(String, array_), new_array);
+}
+
+inline String* String::Intern() {
+ return Runtime::Current()->GetInternTable()->InternWeak(this);
+}
+
+inline uint16_t String::CharAt(int32_t index) {
+ // TODO: do we need this? Equals is the only caller, and could
+ // bounds check itself.
+ DCHECK_GE(count_, 0); // ensures the unsigned comparison is safe.
+ if (UNLIKELY(static_cast<uint32_t>(index) >= static_cast<uint32_t>(count_))) {
+ Thread* self = Thread::Current();
+ ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+ self->ThrowNewExceptionF(throw_location, "Ljava/lang/StringIndexOutOfBoundsException;",
+ "length=%i; index=%i", count_, index);
+ return 0;
+ }
+ return GetCharArray()->Get(index + GetOffset());
+}
+
+} // namespace mirror
+} // namespace art
+
+#endif // ART_RUNTIME_MIRROR_STRING_INL_H_
diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc
index 88a8e6f567..6a0c225022 100644
--- a/runtime/mirror/string.cc
+++ b/runtime/mirror/string.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "string.h"
+#include "string-inl.h"
#include "array.h"
#include "class-inl.h"
@@ -29,17 +29,8 @@
namespace art {
namespace mirror {
-CharArray* String::GetCharArray() {
- return GetFieldObject<CharArray>(ValueOffset(), false);
-}
-
-void String::ComputeHashCode() {
- SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength()));
-}
-
-int32_t String::GetUtfLength() {
- return CountUtf8Bytes(GetCharArray()->GetData() + GetOffset(), GetLength());
-}
+// TODO: get global references for these
+Class* String::java_lang_String_ = NULL;
int32_t String::FastIndexOf(int32_t ch, int32_t start) {
int32_t count = GetLength();
@@ -59,16 +50,6 @@ int32_t String::FastIndexOf(int32_t ch, int32_t start) {
return -1;
}
-void String::SetArray(CharArray* new_array) {
- // Array is invariant so use non-transactional mode. Also disable check as we may run inside
- // a transaction.
- DCHECK(new_array != NULL);
- SetFieldObject<false, false>(OFFSET_OF_OBJECT_MEMBER(String, array_), new_array, false);
-}
-
-// TODO: get global references for these
-Class* String::java_lang_String_ = NULL;
-
void String::SetClass(Class* java_lang_String) {
CHECK(java_lang_String_ == NULL);
CHECK(java_lang_String != NULL);
@@ -80,39 +61,23 @@ void String::ResetClass() {
java_lang_String_ = NULL;
}
-String* String::Intern() {
- return Runtime::Current()->GetInternTable()->InternWeak(this);
-}
-
int32_t String::GetHashCode() {
- int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false);
- if (result == 0) {
+ int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_));
+ if (UNLIKELY(result == 0)) {
ComputeHashCode();
}
- result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false);
+ result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_));
DCHECK(result != 0 || ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength()) == 0)
<< ToModifiedUtf8() << " " << result;
return result;
}
-int32_t String::GetLength() {
- int32_t result = GetField32(OFFSET_OF_OBJECT_MEMBER(String, count_), false);
- DCHECK(result >= 0 && result <= GetCharArray()->GetLength());
- return result;
+void String::ComputeHashCode() {
+ SetHashCode(ComputeUtf16Hash(GetCharArray(), GetOffset(), GetLength()));
}
-uint16_t String::CharAt(int32_t index) {
- // TODO: do we need this? Equals is the only caller, and could
- // bounds check itself.
- DCHECK_GE(count_, 0); // ensures the unsigned comparison is safe.
- if (UNLIKELY(static_cast<uint32_t>(index) >= static_cast<uint32_t>(count_))) {
- Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionF(throw_location, "Ljava/lang/StringIndexOutOfBoundsException;",
- "length=%i; index=%i", count_, index);
- return 0;
- }
- return GetCharArray()->Get(index + GetOffset());
+int32_t String::GetUtfLength() {
+ return CountUtf8Bytes(GetCharArray()->GetData() + GetOffset(), GetLength());
}
String* String::AllocFromUtf16(Thread* self,
diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h
index de9e4c40aa..f97308edc0 100644
--- a/runtime/mirror/string.h
+++ b/runtime/mirror/string.h
@@ -17,12 +17,14 @@
#ifndef ART_RUNTIME_MIRROR_STRING_H_
#define ART_RUNTIME_MIRROR_STRING_H_
+#include <gtest/gtest.h>
+
#include "class.h"
-#include "gtest/gtest.h"
#include "object_callbacks.h"
namespace art {
+template<class T> class SirtRef;
struct StringClassOffsets;
struct StringOffsets;
class StringPiece;
@@ -47,7 +49,7 @@ class MANAGED String : public Object {
CharArray* GetCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
int32_t GetOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- int32_t result = GetField32(OffsetOffset(), false);
+ int32_t result = GetField32(OffsetOffset());
DCHECK_LE(0, result);
return result;
}
@@ -111,25 +113,25 @@ class MANAGED String : public Object {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
private:
- void SetHashCode(int32_t new_hash_code) {
+ void SetHashCode(int32_t new_hash_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Hash code is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
- DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), false));
- SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), new_hash_code, false);
+ DCHECK_EQ(0, GetField32(OFFSET_OF_OBJECT_MEMBER(String, hash_code_)));
+ SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, hash_code_), new_hash_code);
}
- void SetCount(int32_t new_count) {
+ void SetCount(int32_t new_count) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Count is invariant so use non-transactional mode. Also disable check as we may run inside
// a transaction.
DCHECK_LE(0, new_count);
- SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count, false);
+ SetField32<false, false>(OFFSET_OF_OBJECT_MEMBER(String, count_), new_count);
}
void SetOffset(int32_t new_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
// Offset is only used during testing so use non-transactional mode.
DCHECK_LE(0, new_offset);
DCHECK_GE(GetLength(), new_offset);
- SetField32<false>(OFFSET_OF_OBJECT_MEMBER(String, offset_), new_offset, false);
+ SetField32<false>(OFFSET_OF_OBJECT_MEMBER(String, offset_), new_offset);
}
static String* Alloc(Thread* self, int32_t utf16_length)
diff --git a/runtime/mirror/throwable.cc b/runtime/mirror/throwable.cc
index 4c53993a6e..d393a13da6 100644
--- a/runtime/mirror/throwable.cc
+++ b/runtime/mirror/throwable.cc
@@ -32,16 +32,24 @@ namespace mirror {
Class* Throwable::java_lang_Throwable_ = NULL;
+void Throwable::SetDetailMessage(String* new_detail_message) {
+ if (Runtime::Current()->IsActiveTransaction()) {
+ SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), new_detail_message);
+ } else {
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_),
+ new_detail_message);
+ }
+}
+
void Throwable::SetCause(Throwable* cause) {
CHECK(cause != nullptr);
CHECK(cause != this);
- Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_),
- false);
+ Throwable* current_cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
CHECK(current_cause == NULL || current_cause == this);
if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause, false);
+ SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause);
} else {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause, false);
+ SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), cause);
}
}
@@ -78,7 +86,7 @@ std::string Throwable::Dump() {
source_file, line_number);
}
}
- Throwable* cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_), false);
+ Throwable* cause = GetFieldObject<Throwable>(OFFSET_OF_OBJECT_MEMBER(Throwable, cause_));
if (cause != NULL && cause != this) { // Constructor makes cause == this by default.
result += "Caused by: ";
result += cause->Dump();
diff --git a/runtime/mirror/throwable.h b/runtime/mirror/throwable.h
index c1438d7ed5..950b5e70e4 100644
--- a/runtime/mirror/throwable.h
+++ b/runtime/mirror/throwable.h
@@ -30,18 +30,12 @@ namespace mirror {
// C++ mirror of java.lang.Throwable
class MANAGED Throwable : public Object {
public:
- void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (Runtime::Current()->IsActiveTransaction()) {
- SetFieldObject<true>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), new_detail_message,
- false);
- } else {
- SetFieldObject<false>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), new_detail_message,
- false);
- }
- }
+ void SetDetailMessage(String* new_detail_message) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
String* GetDetailMessage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_), false);
+ return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(Throwable, detail_message_));
}
+
std::string Dump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// This is a runtime version of initCause, you shouldn't use it if initCause may have been
@@ -62,7 +56,7 @@ class MANAGED Throwable : public Object {
private:
Object* GetStackState() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return GetFieldObject<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_), true);
+ return GetFieldObjectVolatile<Object>(OFFSET_OF_OBJECT_MEMBER(Throwable, stack_state_));
}
// Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
diff --git a/runtime/monitor.h b/runtime/monitor.h
index 0b80892ba9..15620d5829 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -31,18 +31,16 @@
namespace art {
+class LockWord;
template<class T> class SirtRef;
+class Thread;
+class StackVisitor;
+typedef uint32_t MonitorId;
namespace mirror {
class ArtMethod;
class Object;
} // namespace mirror
-class LockWord;
-template<class T> class SirtRef;
-class Thread;
-class StackVisitor;
-
-typedef uint32_t MonitorId;
class Monitor {
public:
diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h
index 82d0feef4d..32e1553891 100644
--- a/runtime/monitor_pool.h
+++ b/runtime/monitor_pool.h
@@ -17,11 +17,12 @@
#ifndef ART_RUNTIME_MONITOR_POOL_H_
#define ART_RUNTIME_MONITOR_POOL_H_
+#include "monitor.h"
+
#ifdef __LP64__
#include <bitset>
#include <stdint.h>
-#include "monitor.h"
#include "runtime.h"
#include "safe_map.h"
#endif
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 223107075c..953d3a6dbb 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -158,6 +158,7 @@ static void DexFile_closeDexFile(JNIEnv* env, jclass, jlong cookie) {
if (dex_file == nullptr) {
return;
}
+ ScopedObjectAccess soa(env);
if (Runtime::Current()->GetClassLinker()->IsDexFileRegistered(*dex_file)) {
return;
}
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index f95664b9c3..d6b47ebc87 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -16,7 +16,7 @@
#include "common_throws.h"
#include "jni_internal.h"
-#include "mirror/string.h"
+#include "mirror/string-inl.h"
#include "scoped_fast_native_object_access.h"
#include "scoped_thread_state_change.h"
#include "ScopedLocalRef.h"
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index ad0f317b2a..764db5e7b6 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -55,20 +55,20 @@ static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaOb
static jint Unsafe_getInt(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- return obj->GetField32(MemberOffset(offset), false);
+ return obj->GetField32(MemberOffset(offset));
}
static jint Unsafe_getIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- return obj->GetField32(MemberOffset(offset), true);
+ return obj->GetField32Volatile(MemberOffset(offset));
}
static void Unsafe_putInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
// JNI must use non transactional mode.
- obj->SetField32<false>(MemberOffset(offset), newValue, false);
+ obj->SetField32<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -76,7 +76,7 @@ static void Unsafe_putIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong o
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
// JNI must use non transactional mode.
- obj->SetField32<false>(MemberOffset(offset), newValue, true);
+ obj->SetField32Volatile<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putOrderedInt(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -85,26 +85,26 @@ static void Unsafe_putOrderedInt(JNIEnv* env, jobject, jobject javaObj, jlong of
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
QuasiAtomic::MembarStoreStore();
// JNI must use non transactional mode.
- obj->SetField32<false>(MemberOffset(offset), newValue, false);
+ obj->SetField32<false>(MemberOffset(offset), newValue);
}
static jlong Unsafe_getLong(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- return obj->GetField64(MemberOffset(offset), false);
+ return obj->GetField64(MemberOffset(offset));
}
static jlong Unsafe_getLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- return obj->GetField64(MemberOffset(offset), true);
+ return obj->GetField64Volatile(MemberOffset(offset));
}
static void Unsafe_putLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
// JNI must use non transactional mode.
- obj->SetField64<false>(MemberOffset(offset), newValue, false);
+ obj->SetField64<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -112,7 +112,7 @@ static void Unsafe_putLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
// JNI must use non transactional mode.
- obj->SetField64<false>(MemberOffset(offset), newValue, true);
+ obj->SetField64Volatile<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putOrderedLong(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -121,20 +121,20 @@ static void Unsafe_putOrderedLong(JNIEnv* env, jobject, jobject javaObj, jlong o
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
QuasiAtomic::MembarStoreStore();
// JNI must use non transactional mode.
- obj->SetField64<false>(MemberOffset(offset), newValue, false);
+ obj->SetField64<false>(MemberOffset(offset), newValue);
}
static jobject Unsafe_getObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- mirror::Object* value = obj->GetFieldObject<mirror::Object>(MemberOffset(offset), true);
+ mirror::Object* value = obj->GetFieldObjectVolatile<mirror::Object>(MemberOffset(offset));
return soa.AddLocalReference<jobject>(value);
}
static jobject Unsafe_getObject(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- mirror::Object* value = obj->GetFieldObject<mirror::Object>(MemberOffset(offset), false);
+ mirror::Object* value = obj->GetFieldObject<mirror::Object>(MemberOffset(offset));
return soa.AddLocalReference<jobject>(value);
}
@@ -144,7 +144,7 @@ static void Unsafe_putObject(JNIEnv* env, jobject, jobject javaObj, jlong offset
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
mirror::Object* newValue = soa.Decode<mirror::Object*>(javaNewValue);
// JNI must use non transactional mode.
- obj->SetFieldObject<false>(MemberOffset(offset), newValue, false);
+ obj->SetFieldObject<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -153,7 +153,7 @@ static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlon
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
mirror::Object* newValue = soa.Decode<mirror::Object*>(javaNewValue);
// JNI must use non transactional mode.
- obj->SetFieldObject<false>(MemberOffset(offset), newValue, true);
+ obj->SetFieldObjectVolatile<false>(MemberOffset(offset), newValue);
}
static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong offset,
@@ -163,7 +163,7 @@ static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong
mirror::Object* newValue = soa.Decode<mirror::Object*>(javaNewValue);
QuasiAtomic::MembarStoreStore();
// JNI must use non transactional mode.
- obj->SetFieldObject<false>(MemberOffset(offset), newValue, false);
+ obj->SetFieldObject<false>(MemberOffset(offset), newValue);
}
static jint Unsafe_getArrayBaseOffsetForComponentType(JNIEnv* env, jclass, jobject component_class) {
diff --git a/runtime/oat_file.h b/runtime/oat_file.h
index 5f6cb1ec31..b358a00e60 100644
--- a/runtime/oat_file.h
+++ b/runtime/oat_file.h
@@ -73,7 +73,7 @@ class OatFile {
class OatMethod {
public:
- void LinkMethod(mirror::ArtMethod* method) const;
+ void LinkMethod(mirror::ArtMethod* method) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
uint32_t GetCodeOffset() const {
return code_offset_;
diff --git a/runtime/reference_table.cc b/runtime/reference_table.cc
index a3119bbd12..11527fa2fe 100644
--- a/runtime/reference_table.cc
+++ b/runtime/reference_table.cc
@@ -23,7 +23,7 @@
#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
-#include "mirror/string.h"
+#include "mirror/string-inl.h"
#include "thread.h"
#include "utils.h"
diff --git a/runtime/stack_indirect_reference_table.h b/runtime/stack_indirect_reference_table.h
index b113129db5..3b632e7806 100644
--- a/runtime/stack_indirect_reference_table.h
+++ b/runtime/stack_indirect_reference_table.h
@@ -20,6 +20,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "stack.h"
+#include "utils.h"
namespace art {
namespace mirror {
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 0fafbfa94d..7470670d0b 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1400,7 +1400,8 @@ class BuildInternalStackTraceVisitor : public StackVisitor {
return true; // Ignore runtime frames (in particular callee save).
}
method_trace_->Set<kTransactionActive>(count_, m);
- dex_pc_trace_->Set<kTransactionActive>(count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
+ dex_pc_trace_->Set<kTransactionActive>(count_,
+ m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
++count_;
return true;
}
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index e18cf04248..cc02a8de74 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -282,17 +282,29 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi
constexpr bool kCheckTransaction = true;
switch (field_value.kind) {
case k32Bits:
- obj->SetField32<false, kCheckTransaction>(field_offset, static_cast<uint32_t>(field_value.value),
- field_value.is_volatile);
+ if (UNLIKELY(field_value.is_volatile)) {
+ obj->SetField32Volatile<false, kCheckTransaction>(field_offset,
+ static_cast<uint32_t>(field_value.value));
+ } else {
+ obj->SetField32<false, kCheckTransaction>(field_offset,
+ static_cast<uint32_t>(field_value.value));
+ }
break;
case k64Bits:
- obj->SetField64<false, kCheckTransaction>(field_offset, field_value.value,
- field_value.is_volatile);
+ if (UNLIKELY(field_value.is_volatile)) {
+ obj->SetField64Volatile<false, kCheckTransaction>(field_offset, field_value.value);
+ } else {
+ obj->SetField64<false, kCheckTransaction>(field_offset, field_value.value);
+ }
break;
case kReference:
- obj->SetFieldObject<false, kCheckTransaction>(field_offset,
- reinterpret_cast<mirror::Object*>(field_value.value),
- field_value.is_volatile);
+ if (UNLIKELY(field_value.is_volatile)) {
+ obj->SetFieldObjectVolatile<false, kCheckTransaction>(field_offset,
+ reinterpret_cast<mirror::Object*>(field_value.value));
+ } else {
+ obj->SetFieldObject<false, kCheckTransaction>(field_offset,
+ reinterpret_cast<mirror::Object*>(field_value.value));
+ }
break;
default:
LOG(FATAL) << "Unknown value kind " << field_value.kind;