summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.mk14
-rw-r--r--runtime/arch/arm64/entrypoints_init_arm64.cc14
-rw-r--r--runtime/arch/arm64/quick_entrypoints_arm64.S4
-rw-r--r--runtime/arch/x86/thread_x86.cc7
-rw-r--r--runtime/base/bit_vector.cc39
-rw-r--r--runtime/base/bit_vector.h151
-rw-r--r--runtime/base/bit_vector_test.cc19
-rw-r--r--runtime/base/mutex-inl.h18
-rw-r--r--runtime/base/mutex.cc92
-rw-r--r--runtime/base/mutex.h26
-rw-r--r--runtime/debugger.cc4
-rw-r--r--runtime/gc/heap.cc8
-rw-r--r--runtime/intern_table.cc6
-rw-r--r--runtime/monitor.cc17
-rw-r--r--runtime/monitor.h8
-rw-r--r--runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc8
-rw-r--r--runtime/oat.cc2
-rw-r--r--runtime/parsed_options.cc2
-rw-r--r--runtime/read_barrier-inl.h15
-rw-r--r--runtime/read_barrier.h4
-rw-r--r--runtime/thread.h10
-rw-r--r--runtime/thread_list.cc7
-rw-r--r--runtime/thread_list.h5
23 files changed, 321 insertions, 159 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index 1521caa05f..c2507b1457 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -308,6 +308,12 @@ ifeq ($(ART_USE_PORTABLE_COMPILER),true)
LIBART_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
endif
+ifeq ($(MALLOC_IMPL),jemalloc)
+ LIBART_CFLAGS += -DUSE_JEMALLOC
+else
+ LIBART_CFLAGS += -DUSE_DLMALLOC
+endif
+
# $(1): target or host
# $(2): ndebug or debug
# $(3): true or false for LOCAL_CLANG
@@ -397,12 +403,8 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
endif
LOCAL_C_INCLUDES += $(ART_C_INCLUDES)
LOCAL_SHARED_LIBRARIES += liblog libnativehelper
- ifeq ($$(art_target_or_host),target)
- include external/libcxx/libcxx.mk
- LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
- else
- LOCAL_SHARED_LIBRARIES += libbacktrace
- endif
+ include external/libcxx/libcxx.mk
+ LOCAL_SHARED_LIBRARIES += libbacktrace_libc++
ifeq ($$(art_target_or_host),target)
LOCAL_SHARED_LIBRARIES += libcutils libdl libselinux libutils
LOCAL_STATIC_LIBRARIES := libziparchive libz
diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc
index 2a5c7d1e8e..cb9f53b72a 100644
--- a/runtime/arch/arm64/entrypoints_init_arm64.cc
+++ b/runtime/arch/arm64/entrypoints_init_arm64.cc
@@ -84,12 +84,6 @@ extern "C" float fmodf(float a, float b); // REM_FLOAT[_2ADDR]
// Double-precision FP arithmetics.
extern "C" double fmod(double a, double b); // REM_DOUBLE[_2ADDR]
-// Long long arithmetics - REM_LONG[_2ADDR] and DIV_LONG[_2ADDR]
-extern "C" int64_t art_quick_mul_long(int64_t, int64_t);
-extern "C" uint64_t art_quick_shl_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_shr_long(uint64_t, uint32_t);
-extern "C" uint64_t art_quick_ushr_long(uint64_t, uint32_t);
-
// Intrinsic entrypoints.
extern "C" int32_t __memcmp16(void*, void*, int32_t);
extern "C" int32_t art_quick_indexof(void*, uint32_t, uint32_t, uint32_t);
@@ -199,10 +193,10 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
qpoints->pF2l = NULL;
qpoints->pLdiv = NULL;
qpoints->pLmod = NULL;
- qpoints->pLmul = art_quick_mul_long;
- qpoints->pShlLong = art_quick_shl_long;
- qpoints->pShrLong = art_quick_shr_long;
- qpoints->pUshrLong = art_quick_ushr_long;
+ qpoints->pLmul = NULL;
+ qpoints->pShlLong = NULL;
+ qpoints->pShrLong = NULL;
+ qpoints->pUshrLong = NULL;
// Intrinsics
qpoints->pIndexOf = art_quick_indexof;
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index ac922ddecd..7f31fb6881 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1611,10 +1611,6 @@ END art_quick_to_interpreter_bridge
UNIMPLEMENTED art_quick_instrumentation_entry
UNIMPLEMENTED art_quick_instrumentation_exit
UNIMPLEMENTED art_quick_deoptimize
-UNIMPLEMENTED art_quick_mul_long
-UNIMPLEMENTED art_quick_shl_long
-UNIMPLEMENTED art_quick_shr_long
-UNIMPLEMENTED art_quick_ushr_long
UNIMPLEMENTED art_quick_indexof
/*
diff --git a/runtime/arch/x86/thread_x86.cc b/runtime/arch/x86/thread_x86.cc
index 26cd8646d8..9f36927877 100644
--- a/runtime/arch/x86/thread_x86.cc
+++ b/runtime/arch/x86/thread_x86.cc
@@ -40,10 +40,9 @@ struct descriptor_table_entry_t {
namespace art {
-static Mutex modify_ldt_lock("modify_ldt lock");
-
void Thread::InitCpu() {
- MutexLock mu(Thread::Current(), modify_ldt_lock);
+ // Take the ldt lock, Thread::Current isn't yet established.
+ MutexLock mu(nullptr, *Locks::modify_ldt_lock_);
const uintptr_t base = reinterpret_cast<uintptr_t>(this);
const size_t limit = kPageSize;
@@ -138,7 +137,7 @@ void Thread::InitCpu() {
}
void Thread::CleanupCpu() {
- MutexLock mu(Thread::Current(), modify_ldt_lock);
+ MutexLock mu(this, *Locks::modify_ldt_lock_);
// Sanity check that reads from %fs point to this Thread*.
Thread* self_check;
diff --git a/runtime/base/bit_vector.cc b/runtime/base/bit_vector.cc
index 8fe6b27a64..0053389b47 100644
--- a/runtime/base/bit_vector.cc
+++ b/runtime/base/bit_vector.cc
@@ -45,10 +45,11 @@ BitVector::BitVector(uint32_t start_bits,
storage_size_(storage_size),
storage_(storage),
number_of_bits_(start_bits) {
- DCHECK_EQ(sizeof(*storage_), 4U); // Assuming 32-bit units.
+ COMPILE_ASSERT(sizeof(*storage_) == kWordBytes, check_word_bytes);
+ COMPILE_ASSERT(sizeof(*storage_) * 8u == kWordBits, check_word_bits);
if (storage_ == nullptr) {
storage_size_ = BitsToWords(start_bits);
- storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * sizeof(*storage_)));
+ storage_ = static_cast<uint32_t*>(allocator_->Alloc(storage_size_ * kWordBytes));
}
}
@@ -61,7 +62,7 @@ BitVector::~BitVector() {
*/
bool BitVector::IsBitSet(uint32_t num) const {
// If the index is over the size:
- if (num >= storage_size_ * sizeof(*storage_) * 8) {
+ if (num >= storage_size_ * kWordBits) {
// Whether it is expandable or not, this bit does not exist: thus it is not set.
return false;
}
@@ -71,7 +72,7 @@ bool BitVector::IsBitSet(uint32_t num) const {
// Mark all bits bit as "clear".
void BitVector::ClearAllBits() {
- memset(storage_, 0, storage_size_ * sizeof(*storage_));
+ memset(storage_, 0, storage_size_ * kWordBytes);
}
// Mark the specified bit as "set".
@@ -80,17 +81,17 @@ void BitVector::ClearAllBits() {
* not using it badly or change resize mechanism.
*/
void BitVector::SetBit(uint32_t num) {
- if (num >= storage_size_ * sizeof(*storage_) * 8) {
+ if (num >= storage_size_ * kWordBits) {
DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
/* Round up to word boundaries for "num+1" bits */
uint32_t new_size = BitsToWords(num + 1);
DCHECK_GT(new_size, storage_size_);
uint32_t *new_storage =
- static_cast<uint32_t*>(allocator_->Alloc(new_size * sizeof(*storage_)));
- memcpy(new_storage, storage_, storage_size_ * sizeof(*storage_));
+ static_cast<uint32_t*>(allocator_->Alloc(new_size * kWordBytes));
+ memcpy(new_storage, storage_, storage_size_ * kWordBytes);
// Zero out the new storage words.
- memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * sizeof(*storage_));
+ memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * kWordBytes);
// TOTO: collect stats on space wasted because of resize.
storage_ = new_storage;
storage_size_ = new_size;
@@ -103,7 +104,7 @@ void BitVector::SetBit(uint32_t num) {
// Mark the specified bit as "unset".
void BitVector::ClearBit(uint32_t num) {
// If the index is over the size, we don't have to do anything, it is cleared.
- if (num < storage_size_ * sizeof(*storage_) * 8) {
+ if (num < storage_size_ * kWordBits) {
// Otherwise, go ahead and clear it.
storage_[num >> 5] &= ~check_masks[num & 0x1f];
}
@@ -132,7 +133,7 @@ bool BitVector::SameBitsSet(const BitVector *src) {
// - Therefore, min_size goes up to at least that, we are thus comparing at least what we need to, but not less.
// ie. we are comparing all storage cells that could have difference, if both vectors have cells above our_highest_index,
// they are automatically at 0.
- return (memcmp(storage_, src->GetRawStorage(), our_highest_index * sizeof(*storage_)) == 0);
+ return (memcmp(storage_, src->GetRawStorage(), our_highest_index * kWordBytes) == 0);
}
// Intersect with another bit vector.
@@ -180,7 +181,7 @@ bool BitVector::Union(const BitVector* src) {
SetBit(highest_bit);
// Paranoid: storage size should be big enough to hold this bit now.
- DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * sizeof(*(storage_)) * 8);
+ DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * kWordBits);
}
for (uint32_t idx = 0; idx < src_size; idx++) {
@@ -215,7 +216,7 @@ bool BitVector::UnionIfNotIn(const BitVector* union_with, const BitVector* not_i
SetBit(highest_bit);
// Paranoid: storage size should be big enough to hold this bit now.
- DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * sizeof(*(storage_)) * 8);
+ DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * kWordBits);
}
uint32_t not_in_size = not_in->GetStorageSize();
@@ -268,14 +269,10 @@ uint32_t BitVector::NumSetBits() const {
// Count the number of bits that are set in range [0, end).
uint32_t BitVector::NumSetBits(uint32_t end) const {
- DCHECK_LE(end, storage_size_ * sizeof(*storage_) * 8);
+ DCHECK_LE(end, storage_size_ * kWordBits);
return NumSetBits(storage_, end);
}
-BitVector::Iterator* BitVector::GetIterator() const {
- return new (allocator_) Iterator(this);
-}
-
/*
* Mark specified number of bits as "set". Cannot set all bits like ClearAll
* since there might be unused bits - setting those to one will confuse the
@@ -329,7 +326,7 @@ int BitVector::GetHighestBitSet() const {
}
// Return cnt + how many storage units still remain * the number of bits per unit.
- int res = cnt + (idx * (sizeof(*storage_) * 8));
+ int res = cnt + (idx * kWordBits);
return res;
}
}
@@ -369,14 +366,14 @@ void BitVector::Copy(const BitVector *src) {
SetBit(highest_bit);
// Now set until highest bit's storage.
- uint32_t size = 1 + (highest_bit / (sizeof(*storage_) * 8));
- memcpy(storage_, src->GetRawStorage(), sizeof(*storage_) * size);
+ uint32_t size = 1 + (highest_bit / kWordBits);
+ memcpy(storage_, src->GetRawStorage(), kWordBytes * size);
// Set upper bits to 0.
uint32_t left = storage_size_ - size;
if (left > 0) {
- memset(storage_ + size, 0, sizeof(*storage_) * left);
+ memset(storage_ + size, 0, kWordBytes * left);
}
}
diff --git a/runtime/base/bit_vector.h b/runtime/base/bit_vector.h
index 3e965e4e9a..8f9afff47d 100644
--- a/runtime/base/bit_vector.h
+++ b/runtime/base/bit_vector.h
@@ -32,59 +32,115 @@ namespace art {
*/
class BitVector {
public:
- class Iterator {
+ class IndexContainer;
+
+ /**
+ * @brief Convenient iterator across the indexes of the BitVector's set bits.
+ *
+ * @details IndexIterator is a Forward iterator (C++11: 24.2.5) from the lowest
+ * to the highest index of the BitVector's set bits. Instances can be retrieved
+ * only through BitVector::Indexes() which returns an IndexContainer wrapper
+ * object with begin() and end() suitable for range-based loops:
+ * for (uint32_t idx : bit_vector.Indexes()) {
+ * // Use idx.
+ * }
+ */
+ class IndexIterator
+ : std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, uint32_t> {
public:
- explicit Iterator(const BitVector* bit_vector)
- : p_bits_(bit_vector),
- bit_storage_(bit_vector->GetRawStorage()),
- bit_index_(0),
- bit_size_(p_bits_->storage_size_ * sizeof(uint32_t) * 8) {}
-
- // Return the position of the next set bit. -1 means end-of-element reached.
- int32_t Next() {
- // Did anything obviously change since we started?
- DCHECK_EQ(bit_size_, p_bits_->GetStorageSize() * sizeof(uint32_t) * 8);
- DCHECK_EQ(bit_storage_, p_bits_->GetRawStorage());
-
- if (UNLIKELY(bit_index_ >= bit_size_)) {
- return -1;
- }
+ bool operator==(const IndexIterator& other) const {
+ DCHECK(bit_storage_ == other.bit_storage_);
+ DCHECK_EQ(storage_size_, other.storage_size_);
+ return bit_index_ == other.bit_index_;
+ }
- uint32_t word_index = bit_index_ / 32;
- uint32_t word = bit_storage_[word_index];
- // Mask out any bits in the first word we've already considered.
- word >>= bit_index_ & 0x1f;
- if (word == 0) {
- bit_index_ &= ~0x1f;
- do {
- word_index++;
- if (UNLIKELY((word_index * 32) >= bit_size_)) {
- bit_index_ = bit_size_;
- return -1;
- }
- word = bit_storage_[word_index];
- bit_index_ += 32;
- } while (word == 0);
- }
- bit_index_ += CTZ(word) + 1;
- return bit_index_ - 1;
+ bool operator!=(const IndexIterator& other) const {
+ return !(*this == other);
}
- static void* operator new(size_t size, Allocator* allocator) {
- return allocator->Alloc(sizeof(BitVector::Iterator));
- };
- static void operator delete(void* p) {
- Iterator* it = reinterpret_cast<Iterator*>(p);
- it->p_bits_->allocator_->Free(p);
+ int operator*() const {
+ DCHECK_LT(bit_index_, BitSize());
+ return bit_index_;
+ }
+
+ IndexIterator& operator++() {
+ DCHECK_LT(bit_index_, BitSize());
+ bit_index_ = FindIndex(bit_index_ + 1u);
+ return *this;
+ }
+
+ IndexIterator operator++(int) {
+ IndexIterator result(*this);
+ ++*this;
+ return result;
+ }
+
+ // Helper function to check for end without comparing with bit_vector.Indexes().end().
+ bool Done() const {
+ return bit_index_ == BitSize();
}
private:
- const BitVector* const p_bits_;
+ struct begin_tag { };
+ struct end_tag { };
+
+ IndexIterator(const BitVector* bit_vector, begin_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(FindIndex(0u)) { }
+
+ IndexIterator(const BitVector* bit_vector, end_tag)
+ : bit_storage_(bit_vector->GetRawStorage()),
+ storage_size_(bit_vector->storage_size_),
+ bit_index_(BitSize()) { }
+
+ uint32_t BitSize() const {
+ return storage_size_ * kWordBits;
+ }
+
+ uint32_t FindIndex(uint32_t start_index) const {
+ DCHECK_LE(start_index, BitSize());
+ uint32_t word_index = start_index / kWordBits;
+ if (UNLIKELY(word_index == storage_size_)) {
+ return start_index;
+ }
+ uint32_t word = bit_storage_[word_index];
+ // Mask out any bits in the first word we've already considered.
+ word &= static_cast<uint32_t>(-1) << (start_index & 0x1f);
+ while (word == 0u) {
+ ++word_index;
+ if (UNLIKELY(word_index == storage_size_)) {
+ return BitSize();
+ }
+ word = bit_storage_[word_index];
+ }
+ return word_index * 32u + CTZ(word);
+ }
+
const uint32_t* const bit_storage_;
+ const uint32_t storage_size_; // Size of vector in words.
uint32_t bit_index_; // Current index (size in bits).
- const uint32_t bit_size_; // Size of vector in bits.
- friend class BitVector;
+ friend class BitVector::IndexContainer;
+ };
+
+ /**
+ * @brief BitVector wrapper class for iteration across indexes of set bits.
+ */
+ class IndexContainer {
+ public:
+ explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
+
+ IndexIterator begin() const {
+ return IndexIterator(bit_vector_, IndexIterator::begin_tag());
+ }
+
+ IndexIterator end() const {
+ return IndexIterator(bit_vector_, IndexIterator::end_tag());
+ }
+
+ private:
+ const BitVector* const bit_vector_;
};
BitVector(uint32_t start_bits,
@@ -127,14 +183,16 @@ class BitVector {
// Number of bits set in range [0, end).
uint32_t NumSetBits(uint32_t end) const;
- Iterator* GetIterator() const;
+ IndexContainer Indexes() const {
+ return IndexContainer(this);
+ }
uint32_t GetStorageSize() const { return storage_size_; }
bool IsExpandable() const { return expandable_; }
uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
uint32_t* GetRawStorage() { return storage_; }
const uint32_t* GetRawStorage() const { return storage_; }
- size_t GetSizeOf() const { return storage_size_ * sizeof(uint32_t); }
+ size_t GetSizeOf() const { return storage_size_ * kWordBytes; }
/**
* @return the highest bit set, -1 if none are set
@@ -182,6 +240,9 @@ class BitVector {
void DumpDotHelper(bool last_entry, FILE* file, std::ostringstream& buffer) const;
private:
+ static constexpr uint32_t kWordBytes = sizeof(uint32_t);
+ static constexpr uint32_t kWordBits = kWordBytes * 8;
+
Allocator* const allocator_;
const bool expandable_; // expand bitmap if we run out?
uint32_t storage_size_; // current size, in 32-bit words.
diff --git a/runtime/base/bit_vector_test.cc b/runtime/base/bit_vector_test.cc
index 0f866a4442..1403f50c04 100644
--- a/runtime/base/bit_vector_test.cc
+++ b/runtime/base/bit_vector_test.cc
@@ -38,11 +38,8 @@ TEST(BitVector, Test) {
EXPECT_EQ(0U, bv.GetRawStorageWord(0));
EXPECT_EQ(0U, *bv.GetRawStorage());
- BitVector::Iterator empty_iterator(&bv);
- EXPECT_EQ(-1, empty_iterator.Next());
-
- std::unique_ptr<BitVector::Iterator> empty_iterator_on_heap(bv.GetIterator());
- EXPECT_EQ(-1, empty_iterator_on_heap->Next());
+ EXPECT_TRUE(bv.Indexes().begin().Done());
+ EXPECT_TRUE(bv.Indexes().begin() == bv.Indexes().end());
bv.SetBit(0);
bv.SetBit(kBits - 1);
@@ -57,10 +54,14 @@ TEST(BitVector, Test) {
EXPECT_EQ(0x80000001U, bv.GetRawStorageWord(0));
EXPECT_EQ(0x80000001U, *bv.GetRawStorage());
- BitVector::Iterator iterator(&bv);
- EXPECT_EQ(0, iterator.Next());
- EXPECT_EQ(static_cast<int>(kBits - 1), iterator.Next());
- EXPECT_EQ(-1, iterator.Next());
+ BitVector::IndexIterator iterator = bv.Indexes().begin();
+ EXPECT_TRUE(iterator != bv.Indexes().end());
+ EXPECT_EQ(0, *iterator);
+ ++iterator;
+ EXPECT_TRUE(iterator != bv.Indexes().end());
+ EXPECT_EQ(static_cast<int>(kBits - 1), *iterator);
+ ++iterator;
+ EXPECT_TRUE(iterator == bv.Indexes().end());
}
TEST(BitVector, NoopAllocator) {
diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h
index adf4c66aa4..6c415e77e3 100644
--- a/runtime/base/mutex-inl.h
+++ b/runtime/base/mutex-inl.h
@@ -132,9 +132,21 @@ static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALY
// TODO: tighten this check.
if (kDebugLocking) {
Runtime* runtime = Runtime::Current();
- CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
- level == kDefaultMutexLevel || level == kRuntimeShutdownLock ||
- level == kThreadListLock || level == kLoggingLock || level == kAbortLock);
+ CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
+ // Used during thread creation to avoid races with runtime shutdown. Thread::Current not
+ // yet established.
+ level == kRuntimeShutdownLock ||
+ // Thread Ids are allocated/released before threads are established.
+ level == kAllocatedThreadIdsLock ||
+ // Thread LDT's are initialized without Thread::Current established.
+ level == kModifyLdtLock ||
+ // Threads are unregistered while holding the thread list lock, during this process they
+ // no longer exist and so we expect an unlock with no self.
+ level == kThreadListLock ||
+ // Ignore logging which may or may not have set up thread data structures.
+ level == kLoggingLock ||
+ // Avoid recursive death.
+ level == kAbortLock);
}
}
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 6f7f2c1e99..705be40769 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -30,10 +30,12 @@
namespace art {
Mutex* Locks::abort_lock_ = nullptr;
+Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
Mutex* Locks::breakpoint_lock_ = nullptr;
ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
Mutex* Locks::logging_lock_ = nullptr;
+Mutex* Locks::modify_ldt_lock_ = nullptr;
ReaderWriterMutex* Locks::mutator_lock_ = nullptr;
Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mutex* Locks::thread_list_lock_ = nullptr;
@@ -814,7 +816,13 @@ void ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) {
void Locks::Init() {
if (logging_lock_ != nullptr) {
// Already initialized.
+ if (kRuntimeISA == kX86) {
+ DCHECK(modify_ldt_lock_ != nullptr);
+ } else {
+ DCHECK(modify_ldt_lock_ == nullptr);
+ }
DCHECK(abort_lock_ != nullptr);
+ DCHECK(allocated_thread_ids_lock_ != nullptr);
DCHECK(breakpoint_lock_ != nullptr);
DCHECK(classlinker_classes_lock_ != nullptr);
DCHECK(heap_bitmap_lock_ != nullptr);
@@ -827,32 +835,76 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ != nullptr);
DCHECK(intern_table_lock_ != nullptr);
} else {
- logging_lock_ = new Mutex("logging lock", kLoggingLock, true);
- abort_lock_ = new Mutex("abort lock", kAbortLock, true);
+ // Create global locks in level order from highest lock level to lowest.
+ LockLevel current_lock_level = kMutatorLock;
+ DCHECK(mutator_lock_ == nullptr);
+ mutator_lock_ = new ReaderWriterMutex("mutator lock", current_lock_level);
- DCHECK(breakpoint_lock_ == nullptr);
- breakpoint_lock_ = new Mutex("breakpoint lock", kBreakpointLock);
- DCHECK(classlinker_classes_lock_ == nullptr);
- classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
- kClassLinkerClassesLock);
+ #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
+ DCHECK_LT(new_level, current_lock_level); \
+ current_lock_level = new_level;
+
+ UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
DCHECK(heap_bitmap_lock_ == nullptr);
- heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", kHeapBitmapLock);
- DCHECK(mutator_lock_ == nullptr);
- mutator_lock_ = new ReaderWriterMutex("mutator lock", kMutatorLock);
+ heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
DCHECK(runtime_shutdown_lock_ == nullptr);
- runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", kRuntimeShutdownLock);
+ runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
+ DCHECK(profiler_lock_ == nullptr);
+ profiler_lock_ = new Mutex("profiler lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
+ DCHECK(trace_lock_ == nullptr);
+ trace_lock_ = new Mutex("trace lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
DCHECK(thread_list_lock_ == nullptr);
- thread_list_lock_ = new Mutex("thread list lock", kThreadListLock);
+ thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
+ DCHECK(breakpoint_lock_ == nullptr);
+ breakpoint_lock_ = new Mutex("breakpoint lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
+ DCHECK(classlinker_classes_lock_ == nullptr);
+ classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
+ current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
+ DCHECK(allocated_thread_ids_lock_ == nullptr);
+ allocated_thread_ids_lock_ = new Mutex("allocated thread ids lock", current_lock_level);
+
+ if (kRuntimeISA == kX86) {
+ UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
+ DCHECK(modify_ldt_lock_ == nullptr);
+ modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
+ }
+
+ UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
+ DCHECK(intern_table_lock_ == nullptr);
+ intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
+
+
+ UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
+ DCHECK(abort_lock_ == nullptr);
+ abort_lock_ = new Mutex("abort lock", current_lock_level, true);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
DCHECK(thread_suspend_count_lock_ == nullptr);
- thread_suspend_count_lock_ = new Mutex("thread suspend count lock", kThreadSuspendCountLock);
- DCHECK(trace_lock_ == nullptr);
- trace_lock_ = new Mutex("trace lock", kTraceLock);
- DCHECK(profiler_lock_ == nullptr);
- profiler_lock_ = new Mutex("profiler lock", kProfilerLock);
+ thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
DCHECK(unexpected_signal_lock_ == nullptr);
- unexpected_signal_lock_ = new Mutex("unexpected signal lock", kUnexpectedSignalLock, true);
- DCHECK(intern_table_lock_ == nullptr);
- intern_table_lock_ = new Mutex("InternTable lock", kInternTableLock);
+ unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+
+ UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
+ DCHECK(logging_lock_ == nullptr);
+ logging_lock_ = new Mutex("logging lock", current_lock_level, true);
+
+ #undef UPDATE_CURRENT_LOCK_LEVEL
}
}
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index e13c8d5d62..522692e6f3 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -74,6 +74,8 @@ enum LockLevel {
kPinTableLock,
kLoadLibraryLock,
kJdwpObjectRegistryLock,
+ kModifyLdtLock,
+ kAllocatedThreadIdsLock,
kClassLinkerClassesLock,
kBreakpointLock,
kMonitorLock,
@@ -532,28 +534,34 @@ class Locks {
// Guards shutdown of the runtime.
static Mutex* runtime_shutdown_lock_ ACQUIRED_AFTER(heap_bitmap_lock_);
+ // Guards background profiler global state.
+ static Mutex* profiler_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+
+ // Guards trace (ie traceview) requests.
+ static Mutex* trace_lock_ ACQUIRED_AFTER(profiler_lock_);
+
// The thread_list_lock_ guards ThreadList::list_. It is also commonly held to stop threads
// attaching and detaching.
- static Mutex* thread_list_lock_ ACQUIRED_AFTER(runtime_shutdown_lock_);
+ static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
// Guards breakpoints.
static Mutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
- // Guards trace requests.
- static Mutex* trace_lock_ ACQUIRED_AFTER(breakpoint_lock_);
-
- // Guards profile objects.
- static Mutex* profiler_lock_ ACQUIRED_AFTER(trace_lock_);
-
// Guards lists of classes within the class linker.
- static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(profiler_lock_);
+ static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
// When declaring any Mutex add DEFAULT_MUTEX_ACQUIRED_AFTER to use annotalysis to check the code
// doesn't try to hold a higher level Mutex.
#define DEFAULT_MUTEX_ACQUIRED_AFTER ACQUIRED_AFTER(Locks::classlinker_classes_lock_)
+ // Guard the allocation/deallocation of thread ids.
+ static Mutex* allocated_thread_ids_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+
+ // Guards modification of the LDT on x86.
+ static Mutex* modify_ldt_lock_ ACQUIRED_AFTER(allocated_thread_ids_lock_);
+
// Guards intern table.
- static Mutex* intern_table_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
+ static Mutex* intern_table_lock_ ACQUIRED_AFTER(modify_ldt_lock_);
// Have an exclusive aborting thread.
static Mutex* abort_lock_ ACQUIRED_AFTER(classlinker_classes_lock_);
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 7136c67b6f..8c8a3556da 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -4039,7 +4039,11 @@ void Dbg::DdmSendHeapSegments(bool native) {
// Send a series of heap segment chunks.
HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
if (native) {
+#ifdef USE_DLMALLOC
dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
+#else
+ UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
+#endif
} else {
gc::Heap* heap = Runtime::Current()->GetHeap();
const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ea1ccdd665..fdc43671ce 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -893,10 +893,16 @@ void Heap::Trim() {
uint64_t gc_heap_end_ns = NanoTime();
// We never move things in the native heap, so we can finish the GC at this point.
FinishGC(self, collector::kGcTypeNone);
+ size_t native_reclaimed = 0;
+#if defined(USE_DLMALLOC)
// Trim the native heap.
dlmalloc_trim(0);
- size_t native_reclaimed = 0;
dlmalloc_inspect_all(DlmallocMadviseCallback, &native_reclaimed);
+#elif defined(USE_JEMALLOC)
+ // Jemalloc does it's own internal trimming.
+#else
+ UNIMPLEMENTED(WARNING) << "Add trimming support";
+#endif
uint64_t end_ns = NanoTime();
VLOG(heap) << "Heap trim of managed (duration=" << PrettyDuration(gc_heap_end_ns - start_ns)
<< ", advised=" << PrettySize(managed_reclaimed) << ") and native (duration="
diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc
index 817d1045cf..339eb36178 100644
--- a/runtime/intern_table.cc
+++ b/runtime/intern_table.cc
@@ -84,7 +84,8 @@ void InternTable::VisitRoots(RootCallback* callback, void* arg, VisitRootFlags f
mirror::String* InternTable::Lookup(Table& table, mirror::String* s, int32_t hash_code) {
Locks::intern_table_lock_->AssertHeld(Thread::Current());
- for (auto it = table.find(hash_code), end = table.end(); it != end; ++it) {
+ for (auto it = table.lower_bound(hash_code), end = table.end();
+ it != end && it->first == hash_code; ++it) {
mirror::String* existing_string = it->second;
if (existing_string->Equals(s)) {
return existing_string;
@@ -123,7 +124,8 @@ void InternTable::RemoveWeak(mirror::String* s, int32_t hash_code) {
}
void InternTable::Remove(Table& table, mirror::String* s, int32_t hash_code) {
- for (auto it = table.find(hash_code), end = table.end(); it != end; ++it) {
+ for (auto it = table.lower_bound(hash_code), end = table.end();
+ it != end && it->first == hash_code; ++it) {
if (it->second == s) {
table.erase(it);
return;
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index f783edbfc3..c53520de12 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -111,7 +111,7 @@ bool Monitor::Install(Thread* self) {
MutexLock mu(self, monitor_lock_); // Uncontended mutex acquisition as monitor isn't yet public.
CHECK(owner_ == nullptr || owner_ == self || owner_->IsSuspended());
// Propagate the lock state.
- LockWord lw(obj_->GetLockWord(false));
+ LockWord lw(GetObject()->GetLockWord(false));
switch (lw.GetState()) {
case LockWord::kThinLocked: {
CHECK_EQ(owner_->GetThreadId(), lw.ThinLockOwner());
@@ -137,7 +137,7 @@ bool Monitor::Install(Thread* self) {
}
LockWord fat(this);
// Publish the updated lock word, which may race with other threads.
- bool success = obj_->CasLockWord(lw, fat);
+ bool success = GetObject()->CasLockWord(lw, fat);
// Lock profiling.
if (success && owner_ != nullptr && lock_profiling_threshold_ != 0) {
locking_method_ = owner_->GetCurrentMethod(&locking_dex_pc_);
@@ -226,9 +226,9 @@ void Monitor::Lock(Thread* self) {
// Do this before releasing the lock so that we don't get deflated.
++num_waiters_;
monitor_lock_.Unlock(self); // Let go of locks in order.
+ self->SetMonitorEnterObject(GetObject());
{
ScopedThreadStateChange tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
- self->SetMonitorEnterObject(obj_);
MutexLock mu2(self, monitor_lock_); // Reacquire monitor_lock_ without mutator_lock_ for Wait.
if (owner_ != NULL) { // Did the owner_ give the lock up?
monitor_contenders_.Wait(self); // Still contended so wait.
@@ -249,8 +249,8 @@ void Monitor::Lock(Thread* self) {
}
}
}
- self->SetMonitorEnterObject(nullptr);
}
+ self->SetMonitorEnterObject(nullptr);
monitor_lock_.Lock(self); // Reacquire locks in order.
--num_waiters_;
}
@@ -363,7 +363,7 @@ bool Monitor::Unlock(Thread* self) {
// We don't own this, so we're not allowed to unlock it.
// The JNI spec says that we should throw IllegalMonitorStateException
// in this case.
- FailedUnlock(obj_, self, owner, this);
+ FailedUnlock(GetObject(), self, owner, this);
return false;
}
return true;
@@ -895,7 +895,7 @@ void Monitor::DescribeWait(std::ostream& os, const Thread* thread) {
MutexLock mu(self, *thread->GetWaitMutex());
Monitor* monitor = thread->GetWaitMonitor();
if (monitor != nullptr) {
- pretty_object = monitor->obj_;
+ pretty_object = monitor->GetObject();
}
} else if (state == kBlocked) {
wait_message = " - waiting to lock ";
@@ -1101,12 +1101,13 @@ void MonitorList::SweepMonitorList(IsMarkedCallback* callback, void* arg) {
MutexLock mu(Thread::Current(), monitor_list_lock_);
for (auto it = list_.begin(); it != list_.end(); ) {
Monitor* m = *it;
- mirror::Object* obj = m->GetObject();
+ // Disable the read barrier in GetObject() as this is called by GC.
+ mirror::Object* obj = m->GetObject<kWithoutReadBarrier>();
// The object of a monitor can be null if we have deflated it.
mirror::Object* new_obj = obj != nullptr ? callback(obj, arg) : nullptr;
if (new_obj == nullptr) {
VLOG(monitor) << "freeing monitor " << m << " belonging to unmarked object "
- << m->GetObject();
+ << obj;
delete m;
it = list_.erase(it);
} else {
diff --git a/runtime/monitor.h b/runtime/monitor.h
index bc1b2ed4eb..7af2d4cf3a 100644
--- a/runtime/monitor.h
+++ b/runtime/monitor.h
@@ -27,6 +27,7 @@
#include "atomic.h"
#include "base/mutex.h"
#include "object_callbacks.h"
+#include "read_barrier.h"
#include "thread_state.h"
namespace art {
@@ -92,8 +93,9 @@ class Monitor {
static bool IsValidLockWord(LockWord lock_word);
+ template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
mirror::Object* GetObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return obj_;
+ return ReadBarrier::BarrierForWeakRoot<mirror::Object, kReadBarrierOption>(obj_);
}
void SetObject(mirror::Object* object);
@@ -190,7 +192,9 @@ class Monitor {
// Owner's recursive lock depth.
int lock_count_ GUARDED_BY(monitor_lock_);
- // What object are we part of.
+ // What object are we part of. This is a weak root. Do not access
+ // this directly, use GetObject() to read it so it will be guarded
+ // by a read barrier.
mirror::Object* obj_;
// Threads currently waiting on this monitor.
diff --git a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
index 5d90f1a579..e17e60a7ce 100644
--- a/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
+++ b/runtime/native/org_apache_harmony_dalvik_ddmc_DdmVmInternal.cc
@@ -52,9 +52,15 @@ static jobjectArray DdmVmInternal_getStackTraceById(JNIEnv* env, jclass, jint th
jobject internal_trace = self->CreateInternalStackTrace<false>(soa);
trace = Thread::InternalStackTraceToStackTraceElementArray(soa, internal_trace);
} else {
- // Suspend thread to build stack trace.
ThreadList* thread_list = Runtime::Current()->GetThreadList();
bool timed_out;
+
+ // Check for valid thread
+ if (thin_lock_id == ThreadList::kInvalidThreadId) {
+ return nullptr;
+ }
+
+ // Suspend thread to build stack trace.
Thread* thread = thread_list->SuspendThreadByThreadId(thin_lock_id, false, &timed_out);
if (thread != nullptr) {
{
diff --git a/runtime/oat.cc b/runtime/oat.cc
index cb9334a120..10d335eec1 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -22,7 +22,7 @@
namespace art {
const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '2', '8', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '2', '9', '\0' };
OatHeader::OatHeader() {
memset(this, 0, sizeof(*this));
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 4330d275a9..3756435b1c 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -790,7 +790,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -Xprofile-period:integervalue\n");
UsageMessage(stream, " -Xprofile-duration:integervalue\n");
UsageMessage(stream, " -Xprofile-interval:integervalue\n");
- UsageMessage(stream, " -Xprofile-backoff:integervalue\n");
+ UsageMessage(stream, " -Xprofile-backoff:doublevalue\n");
UsageMessage(stream, " -Xcompiler-option dex2oat-option\n");
UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n");
UsageMessage(stream, "\n");
diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h
index 88e2f8fa61..4302c9ef85 100644
--- a/runtime/read_barrier-inl.h
+++ b/runtime/read_barrier-inl.h
@@ -43,6 +43,21 @@ inline MirrorType* ReadBarrier::Barrier(
}
}
+template <typename MirrorType, ReadBarrierOption kReadBarrierOption>
+inline MirrorType* ReadBarrier::BarrierForWeakRoot(MirrorType* ref) {
+ UNUSED(ref);
+ const bool with_read_barrier = kReadBarrierOption == kWithReadBarrier;
+ if (with_read_barrier && kUseBakerReadBarrier) {
+ // To be implemented.
+ return ref;
+ } else if (with_read_barrier && kUseBrooksReadBarrier) {
+ // To be implemented.
+ return ref;
+ } else {
+ return ref;
+ }
+}
+
} // namespace art
#endif // ART_RUNTIME_READ_BARRIER_INL_H_
diff --git a/runtime/read_barrier.h b/runtime/read_barrier.h
index 73c3d43e92..e40e8eaa37 100644
--- a/runtime/read_barrier.h
+++ b/runtime/read_barrier.h
@@ -37,6 +37,10 @@ class ReadBarrier {
ALWAYS_INLINE static MirrorType* Barrier(
mirror::Object* obj, MemberOffset offset, mirror::HeapReference<MirrorType>* ref_addr)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ template <typename MirrorType, ReadBarrierOption kReadBarrierOption = kWithReadBarrier>
+ ALWAYS_INLINE static MirrorType* BarrierForWeakRoot(MirrorType* ref)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
};
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index 62fa323212..9a7cb486d8 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -396,11 +396,11 @@ class Thread {
// Convert a jobject into a Object*
mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* GetMonitorEnterObject() const {
+ mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return tlsPtr_.monitor_enter_object;
}
- void SetMonitorEnterObject(mirror::Object* obj) {
+ void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
tlsPtr_.monitor_enter_object = obj;
}
@@ -1045,9 +1045,6 @@ class Thread {
// A cached pthread_t for the pthread underlying this Thread*.
pthread_t pthread_self;
- // Support for Mutex lock hierarchy bug detection.
- BaseMutex* held_mutexes[kLockLevelCount];
-
// If no_thread_suspension_ is > 0, what is causing that assertion.
const char* last_no_thread_suspension_cause;
@@ -1074,6 +1071,9 @@ class Thread {
// Thread-local allocation stack data/routines.
mirror::Object** thread_local_alloc_stack_top;
mirror::Object** thread_local_alloc_stack_end;
+
+ // Support for Mutex lock hierarchy bug detection.
+ BaseMutex* held_mutexes[kLockLevelCount];
} tlsPtr_;
// Guards the 'interrupted_' and 'wait_monitor_' members.
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 8046500c59..388c9b4c76 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -40,8 +40,7 @@
namespace art {
ThreadList::ThreadList()
- : allocated_ids_lock_("allocated thread ids lock"),
- suspend_all_count_(0), debug_suspend_all_count_(0),
+ : suspend_all_count_(0), debug_suspend_all_count_(0),
thread_exit_cond_("thread exit condition variable", *Locks::thread_list_lock_) {
CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1)));
}
@@ -849,7 +848,7 @@ void ThreadList::VerifyRoots(VerifyRootCallback* callback, void* arg) const {
}
uint32_t ThreadList::AllocThreadId(Thread* self) {
- MutexLock mu(self, allocated_ids_lock_);
+ MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
for (size_t i = 0; i < allocated_ids_.size(); ++i) {
if (!allocated_ids_[i]) {
allocated_ids_.set(i);
@@ -861,7 +860,7 @@ uint32_t ThreadList::AllocThreadId(Thread* self) {
}
void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
- MutexLock mu(self, allocated_ids_lock_);
+ MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
--id; // Zero is reserved to mean "invalid".
DCHECK(allocated_ids_[id]) << id;
allocated_ids_.reset(id);
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index a574340368..d46987a8b8 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -132,7 +132,7 @@ class ThreadList {
private:
uint32_t AllocThreadId(Thread* self);
- void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(allocated_ids_lock_);
+ void ReleaseThreadId(Thread* self, uint32_t id) LOCKS_EXCLUDED(Locks::allocated_thread_ids_lock_);
bool Contains(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
bool Contains(pid_t tid) EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_);
@@ -151,8 +151,7 @@ class ThreadList {
LOCKS_EXCLUDED(Locks::thread_list_lock_,
Locks::thread_suspend_count_lock_);
- mutable Mutex allocated_ids_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(allocated_ids_lock_);
+ std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_);
// The actual list of all threads.
std::list<Thread*> list_ GUARDED_BY(Locks::thread_list_lock_);