summaryrefslogtreecommitdiffstats
path: root/runtime/native
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/native')
-rw-r--r--runtime/native/dalvik_system_DexFile.cc2
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc5
-rw-r--r--runtime/native/dalvik_system_VMStack.cc2
-rw-r--r--runtime/native/dalvik_system_Zygote.cc2
-rw-r--r--runtime/native/java_lang_System.cc342
-rw-r--r--runtime/native/sun_misc_Unsafe.cc58
6 files changed, 119 insertions, 292 deletions
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 2c75ecc01e..1a3ceb80d8 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -290,7 +290,7 @@ static jboolean DexFile_isDexOptNeeded(JNIEnv* env, jclass, jstring javaFilename
return JNI_TRUE;
}
if (oat_file->GetOatHeader().GetImageFileLocationOatDataBegin()
- != reinterpret_cast<uint32_t>(image_header.GetOatDataBegin())) {
+ != reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin())) {
if (kDebugLogging) {
ScopedObjectAccess soa(env);
LOG(INFO) << "DexFile_isDexOptNeeded cache file " << cache_location
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index c9e255c99b..e1b5f97429 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -90,7 +90,7 @@ static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
ThrowRuntimeException("Trying to get address of movable array object");
return 0;
}
- return reinterpret_cast<uintptr_t>(array->GetRawData(array->GetClass()->GetComponentSize()));
+ return reinterpret_cast<uintptr_t>(array->GetRawData(array->GetClass()->GetComponentSize(), 0));
}
static void VMRuntime_clearGrowthLimit(JNIEnv*, jobject) {
@@ -181,7 +181,8 @@ static void VMRuntime_concurrentGC(JNIEnv* env, jobject) {
typedef std::map<std::string, mirror::String*> StringTable;
-static mirror::Object* PreloadDexCachesStringsVisitor(mirror::Object* root, void* arg) {
+static mirror::Object* PreloadDexCachesStringsVisitor(mirror::Object* root, void* arg)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
StringTable& table = *reinterpret_cast<StringTable*>(arg);
mirror::String* string = const_cast<mirror::Object*>(root)->AsString();
// LOG(INFO) << "VMRuntime.preloadDexCaches interned=" << string->ToModifiedUtf8();
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index f91536544a..7e02e29d3f 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -79,7 +79,7 @@ static jobject VMStack_getClosestUserClassLoader(JNIEnv* env, jclass, jobject ja
ClosestUserClassLoaderVisitor(Thread* thread, mirror::Object* bootstrap, mirror::Object* system)
: StackVisitor(thread, NULL), bootstrap(bootstrap), system(system), class_loader(NULL) {}
- bool VisitFrame() {
+ bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK(class_loader == NULL);
mirror::Class* c = GetMethod()->GetDeclaringClass();
mirror::Object* cl = c->GetClassLoader();
diff --git a/runtime/native/dalvik_system_Zygote.cc b/runtime/native/dalvik_system_Zygote.cc
index 7fa9457eb3..22c543055b 100644
--- a/runtime/native/dalvik_system_Zygote.cc
+++ b/runtime/native/dalvik_system_Zygote.cc
@@ -47,8 +47,10 @@
#if defined(__linux__)
#include <sys/personality.h>
#include <sys/utsname.h>
+#if defined(HAVE_ANDROID_OS)
#include <sys/capability.h>
#endif
+#endif
namespace art {
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index ea78e04702..6bbe642217 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -24,150 +24,14 @@
#include "mirror/object_array-inl.h"
#include "scoped_fast_native_object_access.h"
-/*
- * We make guarantees about the atomicity of accesses to primitive
- * variables. These guarantees also apply to elements of arrays.
- * In particular, 8-bit, 16-bit, and 32-bit accesses must be atomic and
- * must not cause "word tearing". Accesses to 64-bit array elements must
- * either be atomic or treated as two 32-bit operations. References are
- * always read and written atomically, regardless of the number of bits
- * used to represent them.
- *
- * We can't rely on standard libc functions like memcpy(3) and memmove(3)
- * in our implementation of System.arraycopy, because they may copy
- * byte-by-byte (either for the full run or for "unaligned" parts at the
- * start or end). We need to use functions that guarantee 16-bit or 32-bit
- * atomicity as appropriate.
- *
- * System.arraycopy() is heavily used, so having an efficient implementation
- * is important. The bionic libc provides a platform-optimized memory move
- * function that should be used when possible. If it's not available,
- * the trivial "reference implementation" versions below can be used until
- * a proper version can be written.
- *
- * For these functions, The caller must guarantee that dst/src are aligned
- * appropriately for the element type, and that n is a multiple of the
- * element size.
- */
+namespace art {
/*
- * Works like memmove(), except:
- * - if all arguments are at least 32-bit aligned, we guarantee that we
- * will use operations that preserve atomicity of 32-bit values
- * - if not, we guarantee atomicity of 16-bit values
- *
- * If all three arguments are not at least 16-bit aligned, the behavior
- * of this function is undefined. (We could remove this restriction by
- * testing for unaligned values and punting to memmove(), but that's
- * not currently useful.)
- *
- * TODO: add loop for 64-bit alignment
- * TODO: use __builtin_prefetch
- * TODO: write ARM/MIPS/x86 optimized versions
+ * We make guarantees about the atomicity of accesses to primitive variables. These guarantees
+ * also apply to elements of arrays. In particular, 8-bit, 16-bit, and 32-bit accesses must not
+ * cause "word tearing". Accesses to 64-bit array elements may be two 32-bit operations.
+ * References are never torn regardless of the number of bits used to represent them.
*/
-void MemmoveWords(void* dst, const void* src, size_t n) {
- DCHECK_EQ((((uintptr_t) dst | (uintptr_t) src | n) & 0x01), 0U);
-
- char* d = reinterpret_cast<char*>(dst);
- const char* s = reinterpret_cast<const char*>(src);
- size_t copyCount;
-
- // If the source and destination pointers are the same, this is
- // an expensive no-op. Testing for an empty move now allows us
- // to skip a check later.
- if (n == 0 || d == s) {
- return;
- }
-
- // Determine if the source and destination buffers will overlap if
- // we copy data forward (i.e. *dst++ = *src++).
- //
- // It's okay if the destination buffer starts before the source and
- // there is some overlap, because the reader is always ahead of the
- // writer.
- if (LIKELY((d < s) || ((size_t)(d - s) >= n))) {
- // Copy forward. We prefer 32-bit loads and stores even for 16-bit
- // data, so sort that out.
- if (((reinterpret_cast<uintptr_t>(d) | reinterpret_cast<uintptr_t>(s)) & 0x03) != 0) {
- // Not 32-bit aligned. Two possibilities:
- // (1) Congruent, we can align to 32-bit by copying one 16-bit val
- // (2) Non-congruent, we can do one of:
- // a. copy whole buffer as a series of 16-bit values
- // b. load/store 32 bits, using shifts to ensure alignment
- // c. just copy the as 32-bit values and assume the CPU
- // will do a reasonable job
- //
- // We're currently using (a), which is suboptimal.
- if (((reinterpret_cast<uintptr_t>(d) ^ reinterpret_cast<uintptr_t>(s)) & 0x03) != 0) {
- copyCount = n;
- } else {
- copyCount = 2;
- }
- n -= copyCount;
- copyCount /= sizeof(uint16_t);
-
- while (copyCount--) {
- *reinterpret_cast<uint16_t*>(d) = *reinterpret_cast<const uint16_t*>(s);
- d += sizeof(uint16_t);
- s += sizeof(uint16_t);
- }
- }
-
- // Copy 32-bit aligned words.
- copyCount = n / sizeof(uint32_t);
- while (copyCount--) {
- *reinterpret_cast<uint32_t*>(d) = *reinterpret_cast<const uint32_t*>(s);
- d += sizeof(uint32_t);
- s += sizeof(uint32_t);
- }
-
- // Check for leftovers. Either we finished exactly, or we have one remaining 16-bit chunk.
- if ((n & 0x02) != 0) {
- *reinterpret_cast<uint16_t*>(d) = *reinterpret_cast<const uint16_t*>(s);
- }
- } else {
- // Copy backward, starting at the end.
- d += n;
- s += n;
-
- if (((reinterpret_cast<uintptr_t>(d) | reinterpret_cast<uintptr_t>(s)) & 0x03) != 0) {
- // try for 32-bit alignment.
- if (((reinterpret_cast<uintptr_t>(d) ^ reinterpret_cast<uintptr_t>(s)) & 0x03) != 0) {
- copyCount = n;
- } else {
- copyCount = 2;
- }
- n -= copyCount;
- copyCount /= sizeof(uint16_t);
-
- while (copyCount--) {
- d -= sizeof(uint16_t);
- s -= sizeof(uint16_t);
- *reinterpret_cast<uint16_t*>(d) = *reinterpret_cast<const uint16_t*>(s);
- }
- }
-
- // Copy 32-bit aligned words.
- copyCount = n / sizeof(uint32_t);
- while (copyCount--) {
- d -= sizeof(uint32_t);
- s -= sizeof(uint32_t);
- *reinterpret_cast<uint32_t*>(d) = *reinterpret_cast<const uint32_t*>(s);
- }
-
- // Copy leftovers.
- if ((n & 0x02) != 0) {
- d -= sizeof(uint16_t);
- s -= sizeof(uint16_t);
- *reinterpret_cast<uint16_t*>(d) = *reinterpret_cast<const uint16_t*>(s);
- }
- }
-}
-
-#define move16 MemmoveWords
-#define move32 MemmoveWords
-
-namespace art {
static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::Object* array)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -178,168 +42,132 @@ static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::
"%s of type %s is not an array", identifier, actualType.c_str());
}
-static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst, jint dstPos, jint length) {
+static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst,
+ jint dstPos, jint length) {
+ // The API is defined in terms of length, but length is somewhat overloaded so we use count.
+ const jint count = length;
ScopedFastNativeObjectAccess soa(env);
// Null pointer checks.
- if (UNLIKELY(javaSrc == NULL)) {
- ThrowNullPointerException(NULL, "src == null");
+ if (UNLIKELY(javaSrc == nullptr)) {
+ ThrowNullPointerException(nullptr, "src == null");
return;
}
- if (UNLIKELY(javaDst == NULL)) {
- ThrowNullPointerException(NULL, "dst == null");
+ if (UNLIKELY(javaDst == nullptr)) {
+ ThrowNullPointerException(nullptr, "dst == null");
return;
}
// Make sure source and destination are both arrays.
mirror::Object* srcObject = soa.Decode<mirror::Object*>(javaSrc);
- mirror::Object* dstObject = soa.Decode<mirror::Object*>(javaDst);
if (UNLIKELY(!srcObject->IsArrayInstance())) {
ThrowArrayStoreException_NotAnArray("source", srcObject);
return;
}
+ mirror::Object* dstObject = soa.Decode<mirror::Object*>(javaDst);
if (UNLIKELY(!dstObject->IsArrayInstance())) {
ThrowArrayStoreException_NotAnArray("destination", dstObject);
return;
}
mirror::Array* srcArray = srcObject->AsArray();
mirror::Array* dstArray = dstObject->AsArray();
- mirror::Class* srcComponentType = srcArray->GetClass()->GetComponentType();
- mirror::Class* dstComponentType = dstArray->GetClass()->GetComponentType();
// Bounds checking.
- if (UNLIKELY(srcPos < 0 || dstPos < 0 || length < 0 || srcPos > srcArray->GetLength() - length || dstPos > dstArray->GetLength() - length)) {
+ if (UNLIKELY(srcPos < 0) || UNLIKELY(dstPos < 0) || UNLIKELY(count < 0) ||
+ UNLIKELY(srcPos > srcArray->GetLength() - count) ||
+ UNLIKELY(dstPos > dstArray->GetLength() - count)) {
ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
"src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d",
- srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos, length);
+ srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos,
+ count);
return;
}
- // Handle primitive arrays.
- if (srcComponentType->IsPrimitive() || dstComponentType->IsPrimitive()) {
- // If one of the arrays holds a primitive type the other array must hold the exact same type.
- if (UNLIKELY(srcComponentType != dstComponentType)) {
- std::string srcType(PrettyTypeOf(srcArray));
- std::string dstType(PrettyTypeOf(dstArray));
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;",
- "Incompatible types: src=%s, dst=%s",
- srcType.c_str(), dstType.c_str());
- return;
- }
-
- size_t width = srcArray->GetClass()->GetComponentSize();
- uint8_t* dstBytes = reinterpret_cast<uint8_t*>(dstArray->GetRawData(width));
- const uint8_t* srcBytes = reinterpret_cast<const uint8_t*>(srcArray->GetRawData(width));
-
- switch (width) {
- case 1:
- memmove(dstBytes + dstPos, srcBytes + srcPos, length);
- break;
- case 2:
- move16(dstBytes + dstPos * 2, srcBytes + srcPos * 2, length * 2);
- break;
- case 4:
- move32(dstBytes + dstPos * 4, srcBytes + srcPos * 4, length * 4);
- break;
- case 8:
- // We don't need to guarantee atomicity of the entire 64-bit word.
- move32(dstBytes + dstPos * 8, srcBytes + srcPos * 8, length * 8);
- break;
- default:
- LOG(FATAL) << "Unknown primitive array type: " << PrettyTypeOf(srcArray);
- }
-
- return;
- }
-
- // Neither class is primitive. Are the types trivially compatible?
- const size_t width = sizeof(mirror::Object*);
- uint8_t* dstBytes = reinterpret_cast<uint8_t*>(dstArray->GetRawData(width));
- const uint8_t* srcBytes = reinterpret_cast<const uint8_t*>(srcArray->GetRawData(width));
- if (dstArray == srcArray || dstComponentType->IsAssignableFrom(srcComponentType)) {
- // Yes. Bulk copy.
- COMPILE_ASSERT(sizeof(width) == sizeof(uint32_t), move32_assumes_Object_references_are_32_bit);
- move32(dstBytes + dstPos * width, srcBytes + srcPos * width, length * width);
- Runtime::Current()->GetHeap()->WriteBarrierArray(dstArray, dstPos, length);
- return;
- }
-
- // The arrays are not trivially compatible. However, we may still be able to copy some or all of
- // the elements if the source objects are compatible (for example, copying an Object[] to
- // String[], the Objects being copied might actually be Strings).
- // We can't do a bulk move because that would introduce a check-use race condition, so we copy
- // elements one by one.
-
- // We already dealt with overlapping copies, so we don't need to cope with that case below.
- CHECK_NE(dstArray, srcArray);
-
- mirror::Object* const * srcObjects =
- reinterpret_cast<mirror::Object* const *>(srcBytes + srcPos * width);
- mirror::Object** dstObjects = reinterpret_cast<mirror::Object**>(dstBytes + dstPos * width);
- mirror::Class* dstClass = dstArray->GetClass()->GetComponentType();
-
- // We want to avoid redundant IsAssignableFrom checks where possible, so we cache a class that
- // we know is assignable to the destination array's component type.
- mirror::Class* lastAssignableElementClass = dstClass;
-
- mirror::Object* o = NULL;
- int i = 0;
- for (; i < length; ++i) {
- o = srcObjects[i];
- if (o != NULL) {
- mirror::Class* oClass = o->GetClass();
- if (lastAssignableElementClass == oClass) {
- dstObjects[i] = o;
- } else if (dstClass->IsAssignableFrom(oClass)) {
- lastAssignableElementClass = oClass;
- dstObjects[i] = o;
- } else {
- // Can't put this element into the array.
- break;
+ mirror::Class* dstComponentType = dstArray->GetClass()->GetComponentType();
+ mirror::Class* srcComponentType = srcArray->GetClass()->GetComponentType();
+ Primitive::Type dstComponentPrimitiveType = dstComponentType->GetPrimitiveType();
+
+ if (LIKELY(srcComponentType == dstComponentType)) {
+ // Trivial assignability.
+ switch (dstComponentPrimitiveType) {
+ case Primitive::kPrimVoid:
+ LOG(FATAL) << "Unreachable, cannot have arrays of type void";
+ return;
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimByte:
+ DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 1U);
+ dstArray->AsByteSizedArray()->Memmove(dstPos, srcArray->AsByteSizedArray(), srcPos, count);
+ return;
+ case Primitive::kPrimChar:
+ case Primitive::kPrimShort:
+ DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 2U);
+ dstArray->AsShortSizedArray()->Memmove(dstPos, srcArray->AsShortSizedArray(), srcPos, count);
+ return;
+ case Primitive::kPrimInt:
+ case Primitive::kPrimFloat:
+ DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 4U);
+ dstArray->AsIntArray()->Memmove(dstPos, srcArray->AsIntArray(), srcPos, count);
+ return;
+ case Primitive::kPrimLong:
+ case Primitive::kPrimDouble:
+ DCHECK_EQ(Primitive::ComponentSize(dstComponentPrimitiveType), 8U);
+ dstArray->AsLongArray()->Memmove(dstPos, srcArray->AsLongArray(), srcPos, count);
+ return;
+ case Primitive::kPrimNot: {
+ mirror::ObjectArray<mirror::Object>* dstObjArray = dstArray->AsObjectArray<mirror::Object>();
+ mirror::ObjectArray<mirror::Object>* srcObjArray = srcArray->AsObjectArray<mirror::Object>();
+ dstObjArray->AssignableMemmove(dstPos, srcObjArray, srcPos, count);
+ return;
}
- } else {
- dstObjects[i] = NULL;
+ default:
+ LOG(FATAL) << "Unknown array type: " << PrettyTypeOf(srcArray);
+ return;
}
}
-
- Runtime::Current()->GetHeap()->WriteBarrierArray(dstArray, dstPos, length);
- if (UNLIKELY(i != length)) {
- std::string actualSrcType(PrettyTypeOf(o));
+ // If one of the arrays holds a primitive type the other array must hold the exact same type.
+ if (UNLIKELY((dstComponentPrimitiveType != Primitive::kPrimNot) ||
+ srcComponentType->IsPrimitive())) {
+ std::string srcType(PrettyTypeOf(srcArray));
std::string dstType(PrettyTypeOf(dstArray));
ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;",
- "source[%d] of type %s cannot be stored in destination array of type %s",
- srcPos + i, actualSrcType.c_str(), dstType.c_str());
+ "Incompatible types: src=%s, dst=%s",
+ srcType.c_str(), dstType.c_str());
+ return;
+ }
+ // Arrays hold distinct types and so therefore can't alias - use memcpy instead of memmove.
+ mirror::ObjectArray<mirror::Object>* dstObjArray = dstArray->AsObjectArray<mirror::Object>();
+ mirror::ObjectArray<mirror::Object>* srcObjArray = srcArray->AsObjectArray<mirror::Object>();
+ // If we're assigning into say Object[] then we don't need per element checks.
+ if (dstComponentType->IsAssignableFrom(srcComponentType)) {
+ dstObjArray->AssignableMemcpy(dstPos, srcObjArray, srcPos, count);
return;
}
+ dstObjArray->AssignableCheckingMemcpy(dstPos, srcObjArray, srcPos, count, true);
}
-static void System_arraycopyCharUnchecked(JNIEnv* env, jclass, jobject javaSrc, jint srcPos, jobject javaDst, jint dstPos, jint length) {
+static void System_arraycopyCharUnchecked(JNIEnv* env, jclass, jobject javaSrc, jint srcPos,
+ jobject javaDst, jint dstPos, jint count) {
ScopedFastNativeObjectAccess soa(env);
- DCHECK(javaSrc != NULL);
- DCHECK(javaDst != NULL);
mirror::Object* srcObject = soa.Decode<mirror::Object*>(javaSrc);
mirror::Object* dstObject = soa.Decode<mirror::Object*>(javaDst);
- DCHECK(srcObject->IsArrayInstance());
- DCHECK(dstObject->IsArrayInstance());
+ DCHECK(srcObject != nullptr);
+ DCHECK(dstObject != nullptr);
mirror::Array* srcArray = srcObject->AsArray();
mirror::Array* dstArray = dstObject->AsArray();
- DCHECK(srcPos >= 0 && dstPos >= 0 && length >= 0 &&
- srcPos + length <= srcArray->GetLength() && dstPos + length <= dstArray->GetLength());
- DCHECK_EQ(srcArray->GetClass()->GetComponentType(), dstArray->GetClass()->GetComponentType());
- DCHECK(srcArray->GetClass()->GetComponentType()->IsPrimitive());
- DCHECK(dstArray->GetClass()->GetComponentType()->IsPrimitive());
- DCHECK_EQ(srcArray->GetClass()->GetComponentSize(), static_cast<size_t>(2));
- DCHECK_EQ(dstArray->GetClass()->GetComponentSize(), static_cast<size_t>(2));
- uint8_t* dstBytes = reinterpret_cast<uint8_t*>(dstArray->GetRawData(2));
- const uint8_t* srcBytes = reinterpret_cast<const uint8_t*>(srcArray->GetRawData(2));
- move16(dstBytes + dstPos * 2, srcBytes + srcPos * 2, length * 2);
+ DCHECK_GE(srcPos, 0);
+ DCHECK_GE(dstPos, 0);
+ DCHECK_GE(count, 0);
+ DCHECK_LE(srcPos + count, srcArray->GetLength());
+ DCHECK_LE(dstPos + count, dstArray->GetLength());
+ DCHECK_EQ(srcArray->GetClass(), dstArray->GetClass());
+ DCHECK_EQ(srcArray->GetClass()->GetComponentType()->GetPrimitiveType(), Primitive::kPrimChar);
+ dstArray->AsCharArray()->Memmove(dstPos, srcArray->AsCharArray(), srcPos, count);
}
static jint System_identityHashCode(JNIEnv* env, jclass, jobject javaObject) {
- if (javaObject == nullptr) {
+ if (UNLIKELY(javaObject == nullptr)) {
return 0;
}
ScopedFastNativeObjectAccess soa(env);
diff --git a/runtime/native/sun_misc_Unsafe.cc b/runtime/native/sun_misc_Unsafe.cc
index b5fc7e7be5..6c22003396 100644
--- a/runtime/native/sun_misc_Unsafe.cc
+++ b/runtime/native/sun_misc_Unsafe.cc
@@ -14,7 +14,6 @@
* limitations under the License.
*/
-#include "atomic.h"
#include "gc/accounting/card_table-inl.h"
#include "jni_internal.h"
#include "mirror/object.h"
@@ -23,40 +22,30 @@
namespace art {
-static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint expectedValue, jint newValue) {
+static jboolean Unsafe_compareAndSwapInt(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jint expectedValue, jint newValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- byte* raw_addr = reinterpret_cast<byte*>(obj) + offset;
- volatile int32_t* address = reinterpret_cast<volatile int32_t*>(raw_addr);
- // Note: android_atomic_release_cas() returns 0 on success, not failure.
- int result = android_atomic_release_cas(expectedValue, newValue, address);
- return (result == 0) ? JNI_TRUE : JNI_FALSE;
+ bool success = obj->CasField32(MemberOffset(offset), expectedValue, newValue);
+ return success ? JNI_TRUE : JNI_FALSE;
}
-static jboolean Unsafe_compareAndSwapLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong expectedValue, jlong newValue) {
+static jboolean Unsafe_compareAndSwapLong(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jlong expectedValue, jlong newValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- byte* raw_addr = reinterpret_cast<byte*>(obj) + offset;
- volatile int64_t* address = reinterpret_cast<volatile int64_t*>(raw_addr);
- // Note: android_atomic_cmpxchg() returns 0 on success, not failure.
- bool success = QuasiAtomic::Cas64(expectedValue, newValue, address);
+ bool success = obj->CasField64(MemberOffset(offset), expectedValue, newValue);
return success ? JNI_TRUE : JNI_FALSE;
}
-static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaExpectedValue, jobject javaNewValue) {
+static jboolean Unsafe_compareAndSwapObject(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jobject javaExpectedValue, jobject javaNewValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
mirror::Object* expectedValue = soa.Decode<mirror::Object*>(javaExpectedValue);
mirror::Object* newValue = soa.Decode<mirror::Object*>(javaNewValue);
- byte* raw_addr = reinterpret_cast<byte*>(obj) + offset;
- int32_t* address = reinterpret_cast<int32_t*>(raw_addr);
- // Note: android_atomic_cmpxchg() returns 0 on success, not failure.
- int result = android_atomic_release_cas(reinterpret_cast<int32_t>(expectedValue),
- reinterpret_cast<int32_t>(newValue), address);
- if (result == 0) {
- Runtime::Current()->GetHeap()->WriteBarrierField(obj, MemberOffset(offset), newValue);
- }
- return (result == 0) ? JNI_TRUE : JNI_FALSE;
+ bool success = obj->CasFieldObject(MemberOffset(offset), expectedValue, newValue);
+ return success ? JNI_TRUE : JNI_FALSE;
}
static jint Unsafe_getInt(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
@@ -77,13 +66,15 @@ static void Unsafe_putInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, j
obj->SetField32(MemberOffset(offset), newValue, false);
}
-static void Unsafe_putIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) {
+static void Unsafe_putIntVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jint newValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
obj->SetField32(MemberOffset(offset), newValue, true);
}
-static void Unsafe_putOrderedInt(JNIEnv* env, jobject, jobject javaObj, jlong offset, jint newValue) {
+static void Unsafe_putOrderedInt(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jint newValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
QuasiAtomic::MembarStoreStore();
@@ -108,13 +99,15 @@ static void Unsafe_putLong(JNIEnv* env, jobject, jobject javaObj, jlong offset,
obj->SetField64(MemberOffset(offset), newValue, false);
}
-static void Unsafe_putLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) {
+static void Unsafe_putLongVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jlong newValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
obj->SetField64(MemberOffset(offset), newValue, true);
}
-static void Unsafe_putOrderedLong(JNIEnv* env, jobject, jobject javaObj, jlong offset, jlong newValue) {
+static void Unsafe_putOrderedLong(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jlong newValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
QuasiAtomic::MembarStoreStore();
@@ -124,32 +117,35 @@ static void Unsafe_putOrderedLong(JNIEnv* env, jobject, jobject javaObj, jlong o
static jobject Unsafe_getObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- mirror::Object* value = obj->GetFieldObject<mirror::Object*>(MemberOffset(offset), true);
+ mirror::Object* value = obj->GetFieldObject<mirror::Object>(MemberOffset(offset), true);
return soa.AddLocalReference<jobject>(value);
}
static jobject Unsafe_getObject(JNIEnv* env, jobject, jobject javaObj, jlong offset) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
- mirror::Object* value = obj->GetFieldObject<mirror::Object*>(MemberOffset(offset), false);
+ mirror::Object* value = obj->GetFieldObject<mirror::Object>(MemberOffset(offset), false);
return soa.AddLocalReference<jobject>(value);
}
-static void Unsafe_putObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) {
+static void Unsafe_putObject(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jobject javaNewValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
mirror::Object* newValue = soa.Decode<mirror::Object*>(javaNewValue);
obj->SetFieldObject(MemberOffset(offset), newValue, false);
}
-static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) {
+static void Unsafe_putObjectVolatile(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jobject javaNewValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
mirror::Object* newValue = soa.Decode<mirror::Object*>(javaNewValue);
obj->SetFieldObject(MemberOffset(offset), newValue, true);
}
-static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong offset, jobject javaNewValue) {
+static void Unsafe_putOrderedObject(JNIEnv* env, jobject, jobject javaObj, jlong offset,
+ jobject javaNewValue) {
ScopedFastNativeObjectAccess soa(env);
mirror::Object* obj = soa.Decode<mirror::Object*>(javaObj);
mirror::Object* newValue = soa.Decode<mirror::Object*>(javaNewValue);