summaryrefslogtreecommitdiffstats
path: root/runtime/gc
diff options
context:
space:
mode:
Diffstat (limited to 'runtime/gc')
-rw-r--r--runtime/gc/accounting/atomic_stack.h6
-rw-r--r--runtime/gc/accounting/card_table.cc2
-rw-r--r--runtime/gc/accounting/card_table.h5
-rw-r--r--runtime/gc/accounting/gc_allocator.h2
-rw-r--r--runtime/gc/accounting/mod_union_table.cc3
-rw-r--r--runtime/gc/accounting/mod_union_table.h4
-rw-r--r--runtime/gc/accounting/remembered_set.cc3
-rw-r--r--runtime/gc/accounting/remembered_set.h2
-rw-r--r--runtime/gc/accounting/space_bitmap-inl.h3
-rw-r--r--runtime/gc/accounting/space_bitmap.cc4
-rw-r--r--runtime/gc/accounting/space_bitmap.h14
-rw-r--r--runtime/gc/accounting/space_bitmap_test.cc8
-rw-r--r--runtime/gc/allocator/rosalloc.cc10
-rw-r--r--runtime/gc/allocator/rosalloc.h22
-rw-r--r--runtime/gc/collector/mark_sweep.h5
-rw-r--r--runtime/gc/collector/semi_space.h3
-rw-r--r--runtime/gc/heap.cc2
-rw-r--r--runtime/gc/heap.h16
-rw-r--r--runtime/gc/heap_test.cc4
-rw-r--r--runtime/gc/space/bump_pointer_space.cc2
-rw-r--r--runtime/gc/space/image_space.cc14
-rw-r--r--runtime/gc/space/image_space.h4
-rw-r--r--runtime/gc/space/large_object_space.cc3
-rw-r--r--runtime/gc/space/large_object_space.h8
-rw-r--r--runtime/gc/space/large_object_space_test.cc2
-rw-r--r--runtime/gc/space/malloc_space.cc2
-rw-r--r--runtime/gc/space/space.h14
-rw-r--r--runtime/gc/space/space_test.h18
28 files changed, 91 insertions, 94 deletions
diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h
index 7d8b584fc9..f3ed8d32c0 100644
--- a/runtime/gc/accounting/atomic_stack.h
+++ b/runtime/gc/accounting/atomic_stack.h
@@ -18,12 +18,12 @@
#define ART_RUNTIME_GC_ACCOUNTING_ATOMIC_STACK_H_
#include <algorithm>
+#include <memory>
#include <string>
#include "atomic.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "UniquePtrCompat.h"
#include "mem_map.h"
#include "utils.h"
@@ -36,7 +36,7 @@ class AtomicStack {
public:
// Capacity is how many elements we can store in the stack.
static AtomicStack* Create(const std::string& name, size_t capacity) {
- UniquePtr<AtomicStack> mark_stack(new AtomicStack(name, capacity));
+ std::unique_ptr<AtomicStack> mark_stack(new AtomicStack(name, capacity));
mark_stack->Init();
return mark_stack.release();
}
@@ -215,7 +215,7 @@ class AtomicStack {
std::string name_;
// Memory mapping of the atomic stack.
- UniquePtr<MemMap> mem_map_;
+ std::unique_ptr<MemMap> mem_map_;
// Back index (index after the last element pushed).
AtomicInteger back_index_;
diff --git a/runtime/gc/accounting/card_table.cc b/runtime/gc/accounting/card_table.cc
index 714e6f7123..43a173e2be 100644
--- a/runtime/gc/accounting/card_table.cc
+++ b/runtime/gc/accounting/card_table.cc
@@ -55,7 +55,7 @@ CardTable* CardTable::Create(const byte* heap_begin, size_t heap_capacity) {
size_t capacity = heap_capacity / kCardSize;
/* Allocate an extra 256 bytes to allow fixed low-byte of base */
std::string error_msg;
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous("card table", NULL,
+ std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous("card table", NULL,
capacity + 256, PROT_READ | PROT_WRITE,
false, &error_msg));
CHECK(mem_map.get() != NULL) << "couldn't allocate card table: " << error_msg;
diff --git a/runtime/gc/accounting/card_table.h b/runtime/gc/accounting/card_table.h
index 17e62a6153..7934974081 100644
--- a/runtime/gc/accounting/card_table.h
+++ b/runtime/gc/accounting/card_table.h
@@ -17,10 +17,11 @@
#ifndef ART_RUNTIME_GC_ACCOUNTING_CARD_TABLE_H_
#define ART_RUNTIME_GC_ACCOUNTING_CARD_TABLE_H_
+#include <memory>
+
#include "base/mutex.h"
#include "globals.h"
#include "mem_map.h"
-#include "UniquePtrCompat.h"
namespace art {
@@ -141,7 +142,7 @@ class CardTable {
void VerifyCardTable();
// Mmapped pages for the card table
- UniquePtr<MemMap> mem_map_;
+ std::unique_ptr<MemMap> mem_map_;
// Value used to compute card table addresses from object addresses, see GetBiasedBegin
byte* const biased_begin_;
// Card table doesn't begin at the beginning of the mem_map_, instead it is displaced by offset
diff --git a/runtime/gc/accounting/gc_allocator.h b/runtime/gc/accounting/gc_allocator.h
index 7dd7cca1fd..1d96112b0c 100644
--- a/runtime/gc/accounting/gc_allocator.h
+++ b/runtime/gc/accounting/gc_allocator.h
@@ -73,7 +73,7 @@ class GcAllocatorImpl : public std::allocator<T> {
// GCAllocatorImpl<T> if kMeasureGCMemoryOverhead is true, std::allocator<T> otherwise.
template <typename T>
class GcAllocator : public TypeStaticIf<kMeasureGcMemoryOverhead, GcAllocatorImpl<T>,
- std::allocator<T> >::type {
+ std::allocator<T>>::type {
};
} // namespace accounting
diff --git a/runtime/gc/accounting/mod_union_table.cc b/runtime/gc/accounting/mod_union_table.cc
index ef5653a3bf..228d1dc668 100644
--- a/runtime/gc/accounting/mod_union_table.cc
+++ b/runtime/gc/accounting/mod_union_table.cc
@@ -16,6 +16,8 @@
#include "mod_union_table.h"
+#include <memory>
+
#include "base/stl_util.h"
#include "card_table-inl.h"
#include "heap_bitmap.h"
@@ -30,7 +32,6 @@
#include "mirror/object_array-inl.h"
#include "space_bitmap-inl.h"
#include "thread.h"
-#include "UniquePtrCompat.h"
using ::art::mirror::Object;
diff --git a/runtime/gc/accounting/mod_union_table.h b/runtime/gc/accounting/mod_union_table.h
index 5ae7c77c19..449e171b64 100644
--- a/runtime/gc/accounting/mod_union_table.h
+++ b/runtime/gc/accounting/mod_union_table.h
@@ -50,7 +50,7 @@ class HeapBitmap;
// cleared between GC phases, reducing the number of dirty cards that need to be scanned.
class ModUnionTable {
public:
- typedef std::set<byte*, std::less<byte*>, GcAllocator<byte*> > CardSet;
+ typedef std::set<byte*, std::less<byte*>, GcAllocator<byte*>> CardSet;
explicit ModUnionTable(const std::string& name, Heap* heap, space::ContinuousSpace* space)
: name_(name),
@@ -126,7 +126,7 @@ class ModUnionTableReferenceCache : public ModUnionTable {
// Maps from dirty cards to their corresponding alloc space references.
SafeMap<const byte*, std::vector<mirror::HeapReference<mirror::Object>*>, std::less<const byte*>,
- GcAllocator<std::pair<const byte*, std::vector<mirror::HeapReference<mirror::Object>*> > > >
+ GcAllocator<std::pair<const byte*, std::vector<mirror::HeapReference<mirror::Object>*>>> >
references_;
};
diff --git a/runtime/gc/accounting/remembered_set.cc b/runtime/gc/accounting/remembered_set.cc
index 1def334a48..3ff5874854 100644
--- a/runtime/gc/accounting/remembered_set.cc
+++ b/runtime/gc/accounting/remembered_set.cc
@@ -16,6 +16,8 @@
#include "remembered_set.h"
+#include <memory>
+
#include "base/stl_util.h"
#include "card_table-inl.h"
#include "heap_bitmap.h"
@@ -30,7 +32,6 @@
#include "mirror/object_array-inl.h"
#include "space_bitmap-inl.h"
#include "thread.h"
-#include "UniquePtrCompat.h"
namespace art {
namespace gc {
diff --git a/runtime/gc/accounting/remembered_set.h b/runtime/gc/accounting/remembered_set.h
index e3d853742f..706cf35dc0 100644
--- a/runtime/gc/accounting/remembered_set.h
+++ b/runtime/gc/accounting/remembered_set.h
@@ -43,7 +43,7 @@ namespace accounting {
// from the free list spaces to the bump pointer spaces.
class RememberedSet {
public:
- typedef std::set<byte*, std::less<byte*>, GcAllocator<byte*> > CardSet;
+ typedef std::set<byte*, std::less<byte*>, GcAllocator<byte*>> CardSet;
explicit RememberedSet(const std::string& name, Heap* heap, space::ContinuousSpace* space)
: name_(name), heap_(heap), space_(space) {}
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index a4394622e8..7f1da796b1 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -19,6 +19,8 @@
#include "space_bitmap.h"
+#include <memory>
+
#include "base/logging.h"
#include "dex_file-inl.h"
#include "heap_bitmap.h"
@@ -28,7 +30,6 @@
#include "mirror/object_array-inl.h"
#include "object_utils.h"
#include "space_bitmap-inl.h"
-#include "UniquePtrCompat.h"
#include "utils.h"
namespace art {
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index 66f9a3a9ce..8e817e5bc5 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -51,7 +51,7 @@ SpaceBitmap<kAlignment>* SpaceBitmap<kAlignment>::Create(
// Round up since heap_capacity is not necessarily a multiple of kAlignment * kBitsPerWord.
const size_t bitmap_size = ComputeBitmapSize(heap_capacity);
std::string error_msg;
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
+ std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), nullptr, bitmap_size,
PROT_READ | PROT_WRITE, false, &error_msg));
if (UNLIKELY(mem_map.get() == nullptr)) {
LOG(ERROR) << "Failed to allocate bitmap " << name << ": " << error_msg;
@@ -226,7 +226,7 @@ void SpaceBitmap<kAlignment>::WalkFieldsInOrder(SpaceBitmap<kAlignment>* visited
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::InOrderWalk(ObjectCallback* callback, void* arg) {
- UniquePtr<SpaceBitmap<kAlignment>> visited(
+ std::unique_ptr<SpaceBitmap<kAlignment>> visited(
Create("bitmap for in-order walk", reinterpret_cast<byte*>(heap_begin_),
IndexToOffset(bitmap_size_ / kWordSize)));
CHECK(bitmap_begin_ != nullptr);
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index 1ccebf53d9..50d15c613d 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -17,17 +17,17 @@
#ifndef ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_H_
#define ART_RUNTIME_GC_ACCOUNTING_SPACE_BITMAP_H_
+#include <limits.h>
+#include <stdint.h>
+#include <memory>
+#include <set>
+#include <vector>
+
#include "base/mutex.h"
#include "gc_allocator.h"
#include "globals.h"
#include "mem_map.h"
#include "object_callbacks.h"
-#include "UniquePtrCompat.h"
-
-#include <limits.h>
-#include <set>
-#include <stdint.h>
-#include <vector>
namespace art {
@@ -217,7 +217,7 @@ class SpaceBitmap {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Backing storage for bitmap.
- UniquePtr<MemMap> mem_map_;
+ std::unique_ptr<MemMap> mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
uword* const bitmap_begin_;
diff --git a/runtime/gc/accounting/space_bitmap_test.cc b/runtime/gc/accounting/space_bitmap_test.cc
index 71db44bad2..a30bb253e3 100644
--- a/runtime/gc/accounting/space_bitmap_test.cc
+++ b/runtime/gc/accounting/space_bitmap_test.cc
@@ -17,11 +17,11 @@
#include "space_bitmap.h"
#include <stdint.h>
+#include <memory>
#include "common_runtime_test.h"
#include "globals.h"
#include "space_bitmap-inl.h"
-#include "UniquePtrCompat.h"
namespace art {
namespace gc {
@@ -32,7 +32,7 @@ class SpaceBitmapTest : public CommonRuntimeTest {};
TEST_F(SpaceBitmapTest, Init) {
byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
size_t heap_capacity = 16 * MB;
- UniquePtr<ContinuousSpaceBitmap> space_bitmap(
+ std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
EXPECT_TRUE(space_bitmap.get() != NULL);
}
@@ -60,7 +60,7 @@ TEST_F(SpaceBitmapTest, ScanRange) {
byte* heap_begin = reinterpret_cast<byte*>(0x10000000);
size_t heap_capacity = 16 * MB;
- UniquePtr<ContinuousSpaceBitmap> space_bitmap(
+ std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
EXPECT_TRUE(space_bitmap.get() != NULL);
@@ -120,7 +120,7 @@ void RunTest() NO_THREAD_SAFETY_ANALYSIS {
for (int i = 0; i < 5 ; ++i) {
- UniquePtr<ContinuousSpaceBitmap> space_bitmap(
+ std::unique_ptr<ContinuousSpaceBitmap> space_bitmap(
ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
for (int j = 0; j < 10000; ++j) {
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index 27c4c17d52..10b88b3506 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -793,7 +793,7 @@ size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
// already in the non-full run set (i.e., it was full) insert it
// into the non-full run set.
if (run != current_runs_[idx]) {
- unordered_set<Run*, hash_run, eq_run>* full_runs =
+ std::unordered_set<Run*, hash_run, eq_run>* full_runs =
kIsDebugBuild ? &full_runs_[idx] : NULL;
std::set<Run*>::iterator pos = non_full_runs->find(run);
if (pos == non_full_runs->end()) {
@@ -1160,7 +1160,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
#ifdef HAVE_ANDROID_OS
std::vector<Run*> runs;
#else
- unordered_set<Run*, hash_run, eq_run> runs;
+ std::unordered_set<Run*, hash_run, eq_run> runs;
#endif
for (size_t i = 0; i < num_ptrs; i++) {
void* ptr = ptrs[i];
@@ -1267,7 +1267,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
// Check if the run should be moved to non_full_runs_ or
// free_page_runs_.
std::set<Run*>* non_full_runs = &non_full_runs_[idx];
- unordered_set<Run*, hash_run, eq_run>* full_runs =
+ std::unordered_set<Run*, hash_run, eq_run>* full_runs =
kIsDebugBuild ? &full_runs_[idx] : NULL;
if (run->IsAllFree()) {
// It has just become completely free. Free the pages of the
@@ -1281,7 +1281,7 @@ size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) {
// If it was full, remove it from the full run set (debug
// only.)
if (kIsDebugBuild) {
- unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
+ std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run);
DCHECK(pos != full_runs->end());
full_runs->erase(pos);
if (kTraceRosAlloc) {
@@ -2054,7 +2054,7 @@ void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
} else {
// If it's full, it must in the full run set (debug build only.)
if (kIsDebugBuild) {
- unordered_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
+ std::unordered_set<Run*, hash_run, eq_run>& full_runs = rosalloc->full_runs_[idx];
CHECK(full_runs.find(this) != full_runs.end())
<< " A full run isn't in the full run set " << Dump();
}
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 9ea4306028..9464331c70 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -17,31 +17,21 @@
#ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
#define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_H_
-#include <set>
#include <stdint.h>
#include <stdlib.h>
-#include <string>
#include <sys/mman.h>
+#include <memory>
+#include <set>
+#include <string>
+#include <unordered_set>
#include <vector>
#include "base/mutex.h"
#include "base/logging.h"
#include "globals.h"
#include "mem_map.h"
-#include "UniquePtrCompat.h"
#include "utils.h"
-// Ensure we have an unordered_set until we have worked out C++ library issues.
-#ifdef ART_WITH_STLPORT
-#include <hash_set>
-template <class V, class H, class P>
-class unordered_set : public std::hash_set<V, H, P> {};
-#else // ART_WITH_STLPORT
-// TODO: avoid the use of using in a header file.
-#include <unordered_set>
-using std::unordered_set;
-#endif // ART_WITH_STLPORT
-
namespace art {
namespace gc {
namespace allocator {
@@ -451,7 +441,7 @@ class RosAlloc {
std::set<Run*> non_full_runs_[kNumOfSizeBrackets];
// The run sets that hold the runs whose slots are all full. This is
// debug only. full_runs_[i] is guarded by size_bracket_locks_[i].
- unordered_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
+ std::unordered_set<Run*, hash_run, eq_run> full_runs_[kNumOfSizeBrackets];
// The set of free pages.
std::set<FreePageRun*> free_page_runs_ GUARDED_BY(lock_);
// The dedicated full run, it is always full and shared by all threads when revoking happens.
@@ -479,7 +469,7 @@ class RosAlloc {
byte* page_map_; // No GUARDED_BY(lock_) for kReadPageMapEntryWithoutLockInBulkFree.
size_t page_map_size_;
size_t max_page_map_size_;
- UniquePtr<MemMap> page_map_mem_map_;
+ std::unique_ptr<MemMap> page_map_mem_map_;
// The table that indicates the size of free page runs. These sizes
// are stored here to avoid storing in the free page header and
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index fd79bf6a2d..e9a3c3a42b 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
#define ART_RUNTIME_GC_COLLECTOR_MARK_SWEEP_H_
+#include <memory>
+
#include "atomic.h"
#include "barrier.h"
#include "base/macros.h"
@@ -26,7 +28,6 @@
#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
-#include "UniquePtrCompat.h"
namespace art {
@@ -307,7 +308,7 @@ class MarkSweep : public GarbageCollector {
// Verification.
size_t live_stack_freeze_size_;
- UniquePtr<Barrier> gc_barrier_;
+ std::unique_ptr<Barrier> gc_barrier_;
Mutex mark_stack_lock_ ACQUIRED_AFTER(Locks::classlinker_classes_lock_);
const bool is_concurrent_;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index dacb5ae76f..a95abe440a 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -17,6 +17,8 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
#define ART_RUNTIME_GC_COLLECTOR_SEMI_SPACE_H_
+#include <memory>
+
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
@@ -25,7 +27,6 @@
#include "immune_region.h"
#include "object_callbacks.h"
#include "offsets.h"
-#include "UniquePtrCompat.h"
namespace art {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index ef31be3fdc..706d1dee60 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -20,6 +20,7 @@
#include <cutils/trace.h>
#include <limits>
+#include <memory>
#include <vector>
#include "base/histogram-inl.h"
@@ -64,7 +65,6 @@
#include "scoped_thread_state_change.h"
#include "handle_scope-inl.h"
#include "thread_list.h"
-#include "UniquePtrCompat.h"
#include "well_known_classes.h"
namespace art {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 9831861361..eea287972e 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -733,7 +733,7 @@ class Heap {
space::LargeObjectSpace* large_object_space_;
// The card table, dirtied by the write barrier.
- UniquePtr<accounting::CardTable> card_table_;
+ std::unique_ptr<accounting::CardTable> card_table_;
// A mod-union table remembers all of the references from the it's space to other spaces.
SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_;
@@ -788,7 +788,7 @@ class Heap {
// Guards access to the state of GC, associated conditional variable is used to signal when a GC
// completes.
Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
- UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
+ std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
// Reference processor;
ReferenceProcessor reference_processor_;
@@ -880,7 +880,7 @@ class Heap {
};
// Parallel GC data structures.
- UniquePtr<ThreadPool> thread_pool_;
+ std::unique_ptr<ThreadPool> thread_pool_;
// The nanosecond time at which the last GC ended.
uint64_t last_gc_time_ns_;
@@ -893,19 +893,19 @@ class Heap {
uint64_t allocation_rate_;
// For a GC cycle, a bitmap that is set corresponding to the
- UniquePtr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
- UniquePtr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
+ std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
+ std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
// Mark stack that we reuse to avoid re-allocating the mark stack.
- UniquePtr<accounting::ObjectStack> mark_stack_;
+ std::unique_ptr<accounting::ObjectStack> mark_stack_;
// Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
// to use the live bitmap as the old mark bitmap.
const size_t max_allocation_stack_size_;
- UniquePtr<accounting::ObjectStack> allocation_stack_;
+ std::unique_ptr<accounting::ObjectStack> allocation_stack_;
// Second allocation stack so that we can process allocation with the heap unlocked.
- UniquePtr<accounting::ObjectStack> live_stack_;
+ std::unique_ptr<accounting::ObjectStack> live_stack_;
// Allocator type.
AllocatorType current_allocator_;
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 8850b92190..4176f4ad08 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -48,7 +48,7 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) {
hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
for (size_t i = 0; i < 1024; ++i) {
StackHandleScope<1> hs(soa.Self());
- Handle<mirror::ObjectArray<mirror::Object> > array(hs.NewHandle(
+ Handle<mirror::ObjectArray<mirror::Object>> array(hs.NewHandle(
mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), c.Get(), 2048)));
for (size_t j = 0; j < 2048; ++j) {
mirror::String* string = mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!");
@@ -63,7 +63,7 @@ TEST_F(HeapTest, GarbageCollectClassLinkerInit) {
TEST_F(HeapTest, HeapBitmapCapacityTest) {
byte* heap_begin = reinterpret_cast<byte*>(0x1000);
const size_t heap_capacity = kObjectAlignment * (sizeof(intptr_t) * 8 + 1);
- UniquePtr<accounting::ContinuousSpaceBitmap> bitmap(
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
accounting::ContinuousSpaceBitmap::Create("test bitmap", heap_begin, heap_capacity));
mirror::Object* fake_end_of_heap_object =
reinterpret_cast<mirror::Object*>(&heap_begin[heap_capacity - kObjectAlignment]);
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index 90ffe59603..fcd772bba5 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -28,7 +28,7 @@ BumpPointerSpace* BumpPointerSpace::Create(const std::string& name, size_t capac
byte* requested_begin) {
capacity = RoundUp(capacity, kPageSize);
std::string error_msg;
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
+ std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
PROT_READ | PROT_WRITE, true, &error_msg));
if (mem_map.get() == nullptr) {
LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 2a717cbf17..5036095d0e 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -129,8 +129,8 @@ ImageHeader* ImageSpace::ReadImageHeaderOrDie(const char* image_location,
std::string image_filename;
bool is_system = false;
if (FindImageFilename(image_location, image_isa, &image_filename, &is_system)) {
- UniquePtr<File> image_file(OS::OpenFileForReading(image_filename.c_str()));
- UniquePtr<ImageHeader> image_header(new ImageHeader);
+ std::unique_ptr<File> image_file(OS::OpenFileForReading(image_filename.c_str()));
+ std::unique_ptr<ImageHeader> image_header(new ImageHeader);
const bool success = image_file->ReadFully(image_header.get(), sizeof(ImageHeader));
if (!success || !image_header->IsValid()) {
LOG(FATAL) << "Invalid Image header for: " << image_filename;
@@ -200,7 +200,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
LOG(INFO) << "ImageSpace::Init entering image_filename=" << image_filename;
}
- UniquePtr<File> file(OS::OpenFileForReading(image_filename));
+ std::unique_ptr<File> file(OS::OpenFileForReading(image_filename));
if (file.get() == NULL) {
*error_msg = StringPrintf("Failed to open '%s'", image_filename);
return nullptr;
@@ -213,7 +213,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
}
// Note: The image header is part of the image due to mmap page alignment required of offset.
- UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
+ std::unique_ptr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
image_header.GetImageSize(),
PROT_READ | PROT_WRITE,
MAP_PRIVATE,
@@ -229,7 +229,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
CHECK_EQ(image_header.GetImageBegin(), map->Begin());
DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
- UniquePtr<MemMap> image_map(MemMap::MapFileAtAddress(nullptr, image_header.GetImageBitmapSize(),
+ std::unique_ptr<MemMap> image_map(MemMap::MapFileAtAddress(nullptr, image_header.GetImageBitmapSize(),
PROT_READ, MAP_PRIVATE,
file->Fd(), image_header.GetBitmapOffset(),
false,
@@ -242,7 +242,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
uint32_t bitmap_index = bitmap_index_.FetchAndAdd(1);
std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename,
bitmap_index));
- UniquePtr<accounting::ContinuousSpaceBitmap> bitmap(
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap(
accounting::ContinuousSpaceBitmap::CreateFromMemMap(bitmap_name, image_map.release(),
reinterpret_cast<byte*>(map->Begin()),
map->Size()));
@@ -251,7 +251,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat
return nullptr;
}
- UniquePtr<ImageSpace> space(new ImageSpace(image_filename, image_location,
+ std::unique_ptr<ImageSpace> space(new ImageSpace(image_filename, image_location,
map.release(), bitmap.release()));
if (kIsDebugBuild) {
space->VerifyImageAllocations();
diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h
index 1dc6c576ab..372db3a580 100644
--- a/runtime/gc/space/image_space.h
+++ b/runtime/gc/space/image_space.h
@@ -128,7 +128,7 @@ class ImageSpace : public MemMapSpace {
static Atomic<uint32_t> bitmap_index_;
- UniquePtr<accounting::ContinuousSpaceBitmap> live_bitmap_;
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
ImageSpace(const std::string& name, const char* image_location,
MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap);
@@ -136,7 +136,7 @@ class ImageSpace : public MemMapSpace {
// The OatFile associated with the image during early startup to
// reserve space contiguous to the image. It is later released to
// the ClassLinker during it's initialization.
- UniquePtr<OatFile> oat_file_;
+ std::unique_ptr<OatFile> oat_file_;
const std::string image_location_;
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 6c851af603..e63cc3980d 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -16,11 +16,12 @@
#include "large_object_space.h"
+#include <memory>
+
#include "gc/accounting/space_bitmap-inl.h"
#include "base/logging.h"
#include "base/mutex-inl.h"
#include "base/stl_util.h"
-#include "UniquePtrCompat.h"
#include "image.h"
#include "os.h"
#include "space-inl.h"
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 0daefba0d9..a84b43a8a1 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -133,9 +133,9 @@ class LargeObjectMapSpace : public LargeObjectSpace {
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
std::vector<mirror::Object*,
- accounting::GcAllocator<mirror::Object*> > large_objects_ GUARDED_BY(lock_);
+ accounting::GcAllocator<mirror::Object*>> large_objects_ GUARDED_BY(lock_);
typedef SafeMap<mirror::Object*, MemMap*, std::less<mirror::Object*>,
- accounting::GcAllocator<std::pair<mirror::Object*, MemMap*> > > MemMaps;
+ accounting::GcAllocator<std::pair<mirror::Object*, MemMap*>>> MemMaps;
MemMaps mem_maps_ GUARDED_BY(lock_);
};
@@ -256,11 +256,11 @@ class FreeListSpace FINAL : public LargeObjectSpace {
AllocationHeader* GetAllocationHeader(const mirror::Object* obj);
typedef std::set<AllocationHeader*, AllocationHeader::SortByPrevFree,
- accounting::GcAllocator<AllocationHeader*> > FreeBlocks;
+ accounting::GcAllocator<AllocationHeader*>> FreeBlocks;
// There is not footer for any allocations at the end of the space, so we keep track of how much
// free space there is at the end manually.
- UniquePtr<MemMap> mem_map_;
+ std::unique_ptr<MemMap> mem_map_;
Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
size_t free_end_ GUARDED_BY(lock_);
FreeBlocks free_blocks_ GUARDED_BY(lock_);
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index 8a6636d433..23c67ffd86 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -39,7 +39,7 @@ void LargeObjectSpaceTest::LargeObjectTest() {
static const size_t num_allocations = 64;
static const size_t max_allocation_size = 0x100000;
- std::vector<std::pair<mirror::Object*, size_t> > requests;
+ std::vector<std::pair<mirror::Object*, size_t>> requests;
for (size_t phase = 0; phase < 2; ++phase) {
while (requests.size() < num_allocations) {
diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc
index ba46dccfb2..e710409449 100644
--- a/runtime/gc/space/malloc_space.cc
+++ b/runtime/gc/space/malloc_space.cc
@@ -188,7 +188,7 @@ ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l
VLOG(heap) << "Capacity " << PrettySize(capacity);
// Remap the tail.
std::string error_msg;
- UniquePtr<MemMap> mem_map(GetMemMap()->RemapAtEnd(end_, alloc_space_name,
+ std::unique_ptr<MemMap> mem_map(GetMemMap()->RemapAtEnd(end_, alloc_space_name,
PROT_READ | PROT_WRITE, &error_msg));
CHECK(mem_map.get() != nullptr) << error_msg;
void* allocator = CreateAllocator(end_, starting_size_, initial_size_, capacity, low_memory_mode);
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 343bc29b54..8415fa18ad 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -17,9 +17,9 @@
#ifndef ART_RUNTIME_GC_SPACE_SPACE_H_
#define ART_RUNTIME_GC_SPACE_SPACE_H_
+#include <memory>
#include <string>
-#include "UniquePtrCompat.h"
#include "base/macros.h"
#include "base/mutex.h"
#include "gc/accounting/space_bitmap.h"
@@ -339,8 +339,8 @@ class DiscontinuousSpace : public Space {
protected:
DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
- UniquePtr<accounting::LargeObjectBitmap> live_bitmap_;
- UniquePtr<accounting::LargeObjectBitmap> mark_bitmap_;
+ std::unique_ptr<accounting::LargeObjectBitmap> live_bitmap_;
+ std::unique_ptr<accounting::LargeObjectBitmap> mark_bitmap_;
private:
DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
@@ -374,7 +374,7 @@ class MemMapSpace : public ContinuousSpace {
}
// Underlying storage of the space
- UniquePtr<MemMap> mem_map_;
+ std::unique_ptr<MemMap> mem_map_;
private:
DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
@@ -419,9 +419,9 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace {
virtual accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() = 0;
protected:
- UniquePtr<accounting::ContinuousSpaceBitmap> live_bitmap_;
- UniquePtr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
- UniquePtr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> live_bitmap_;
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> mark_bitmap_;
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> temp_bitmap_;
ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin,
byte* end, byte* limit, GcRetentionPolicy gc_retention_policy)
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 407d362be7..a2d4942c8d 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -20,10 +20,10 @@
#include "zygote_space.h"
#include <stdint.h>
+#include <memory>
#include "common_runtime_test.h"
#include "globals.h"
-#include "UniquePtrCompat.h"
#include "mirror/array-inl.h"
#include "mirror/object-inl.h"
@@ -129,37 +129,37 @@ static inline size_t test_rand(size_t* seed) {
void SpaceTest::InitTestBody(CreateSpaceFn create_space) {
{
// Init < max == growth
- UniquePtr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
+ std::unique_ptr<Space> space(create_space("test", 16 * MB, 32 * MB, 32 * MB, nullptr));
EXPECT_TRUE(space.get() != nullptr);
}
{
// Init == max == growth
- UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
+ std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 16 * MB, nullptr));
EXPECT_TRUE(space.get() != nullptr);
}
{
// Init > max == growth
- UniquePtr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
+ std::unique_ptr<Space> space(create_space("test", 32 * MB, 16 * MB, 16 * MB, nullptr));
EXPECT_TRUE(space.get() == nullptr);
}
{
// Growth == init < max
- UniquePtr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
+ std::unique_ptr<Space> space(create_space("test", 16 * MB, 16 * MB, 32 * MB, nullptr));
EXPECT_TRUE(space.get() != nullptr);
}
{
// Growth < init < max
- UniquePtr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
+ std::unique_ptr<Space> space(create_space("test", 16 * MB, 8 * MB, 32 * MB, nullptr));
EXPECT_TRUE(space.get() == nullptr);
}
{
// Init < growth < max
- UniquePtr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
+ std::unique_ptr<Space> space(create_space("test", 8 * MB, 16 * MB, 32 * MB, nullptr));
EXPECT_TRUE(space.get() != nullptr);
}
{
// Init < max < growth
- UniquePtr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
+ std::unique_ptr<Space> space(create_space("test", 8 * MB, 32 * MB, 16 * MB, nullptr));
EXPECT_TRUE(space.get() == nullptr);
}
}
@@ -398,7 +398,7 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
// Fill the space with lots of small objects up to the growth limit
size_t max_objects = (growth_limit / (object_size > 0 ? object_size : 8)) + 1;
- UniquePtr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
+ std::unique_ptr<mirror::Object*[]> lots_of_objects(new mirror::Object*[max_objects]);
size_t last_object = 0; // last object for which allocation succeeded
size_t amount_allocated = 0; // amount of space allocated
Thread* self = Thread::Current();