summaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.mk3
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S705
-rw-r--r--runtime/arch/stub_test.cc4
-rw-r--r--runtime/arch/x86_64/quick_entrypoints_x86_64.S4
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/base/arena_allocator.cc75
-rw-r--r--runtime/base/arena_allocator.h34
-rw-r--r--runtime/base/logging.h1
-rw-r--r--runtime/check_jni.cc9
-rw-r--r--runtime/check_reference_map_visitor.h27
-rw-r--r--runtime/class_linker.cc962
-rw-r--r--runtime/class_linker.h107
-rw-r--r--runtime/class_linker_test.cc2
-rw-r--r--runtime/common_runtime_test.cc24
-rw-r--r--runtime/common_runtime_test.h8
-rw-r--r--runtime/common_throws.cc191
-rw-r--r--runtime/common_throws.h20
-rw-r--r--runtime/debugger.cc257
-rw-r--r--runtime/debugger.h40
-rw-r--r--runtime/entrypoints/entrypoint_utils-inl.h19
-rw-r--r--runtime/entrypoints/entrypoint_utils.cc31
-rw-r--r--runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc2
-rw-r--r--runtime/entrypoints/quick/quick_field_entrypoints.cc36
-rw-r--r--runtime/entrypoints/quick/quick_lock_entrypoints.cc8
-rw-r--r--runtime/entrypoints/quick/quick_throw_entrypoints.cc9
-rw-r--r--runtime/entrypoints/quick/quick_trampoline_entrypoints.cc2
-rw-r--r--runtime/entrypoints_order_test.cc3
-rw-r--r--runtime/gc/accounting/mod_union_table_test.cc9
-rw-r--r--runtime/gc/allocator/rosalloc-inl.h121
-rw-r--r--runtime/gc/allocator/rosalloc.cc129
-rw-r--r--runtime/gc/allocator/rosalloc.h46
-rw-r--r--runtime/gc/collector/concurrent_copying.cc5
-rw-r--r--runtime/gc/collector/garbage_collector.cc1
-rw-r--r--runtime/gc/collector/garbage_collector.h7
-rw-r--r--runtime/gc/collector/mark_sweep.cc1
-rw-r--r--runtime/gc/collector/semi_space.cc13
-rw-r--r--runtime/gc/heap-inl.h86
-rw-r--r--runtime/gc/heap.cc108
-rw-r--r--runtime/gc/heap.h22
-rw-r--r--runtime/gc/space/bump_pointer_space-inl.h8
-rw-r--r--runtime/gc/space/bump_pointer_space.cc6
-rw-r--r--runtime/gc/space/bump_pointer_space.h10
-rw-r--r--runtime/gc/space/dlmalloc_space-inl.h15
-rw-r--r--runtime/gc/space/dlmalloc_space.cc6
-rw-r--r--runtime/gc/space/dlmalloc_space.h26
-rw-r--r--runtime/gc/space/large_object_space.cc14
-rw-r--r--runtime/gc/space/large_object_space.h10
-rw-r--r--runtime/gc/space/large_object_space_test.cc14
-rw-r--r--runtime/gc/space/malloc_space.h10
-rw-r--r--runtime/gc/space/region_space-inl.h41
-rw-r--r--runtime/gc/space/region_space.cc8
-rw-r--r--runtime/gc/space/region_space.h19
-rw-r--r--runtime/gc/space/rosalloc_space-inl.h46
-rw-r--r--runtime/gc/space/rosalloc_space.cc16
-rw-r--r--runtime/gc/space/rosalloc_space.h45
-rw-r--r--runtime/gc/space/space.h24
-rw-r--r--runtime/gc/space/space_test.h100
-rw-r--r--runtime/gc/space/valgrind_malloc_space-inl.h66
-rw-r--r--runtime/gc/space/valgrind_malloc_space.h11
-rw-r--r--runtime/gc/space/zygote_space.cc2
-rw-r--r--runtime/gc/space/zygote_space.h8
-rw-r--r--runtime/hprof/hprof.cc52
-rw-r--r--runtime/instrumentation.cc11
-rw-r--r--runtime/instrumentation.h9
-rw-r--r--runtime/interpreter/interpreter.cc109
-rw-r--r--runtime/interpreter/interpreter_common.cc381
-rw-r--r--runtime/interpreter/interpreter_common.h11
-rw-r--r--runtime/interpreter/interpreter_goto_table_impl.cc46
-rw-r--r--runtime/interpreter/interpreter_switch_impl.cc46
-rw-r--r--runtime/interpreter/unstarted_runtime.cc951
-rw-r--r--runtime/interpreter/unstarted_runtime.h53
-rw-r--r--runtime/java_vm_ext.cc3
-rw-r--r--runtime/jdwp/jdwp_event.cc11
-rw-r--r--runtime/jdwp/jdwp_handler.cc68
-rw-r--r--runtime/jit/jit.cc21
-rw-r--r--runtime/jit/jit.h16
-rw-r--r--runtime/jit/jit_code_cache.cc1
-rw-r--r--runtime/jit/jit_instrumentation.h4
-rw-r--r--runtime/jni_internal.cc59
-rw-r--r--runtime/memory_region.h51
-rw-r--r--runtime/memory_region_test.cc58
-rw-r--r--runtime/mirror/art_method.cc7
-rw-r--r--runtime/mirror/class.cc12
-rw-r--r--runtime/mirror/dex_cache.h4
-rw-r--r--runtime/mirror/object_array-inl.h3
-rw-r--r--runtime/mirror/object_test.cc18
-rw-r--r--runtime/monitor.cc11
-rw-r--r--runtime/native/dalvik_system_DexFile.cc360
-rw-r--r--runtime/native/dalvik_system_VMDebug.cc8
-rw-r--r--runtime/native/dalvik_system_VMRuntime.cc6
-rw-r--r--runtime/native/java_lang_Class.cc3
-rw-r--r--runtime/native/java_lang_String.cc2
-rw-r--r--runtime/native/java_lang_System.cc13
-rw-r--r--runtime/native/java_lang_Thread.cc2
-rw-r--r--runtime/native/java_lang_reflect_Constructor.cc3
-rw-r--r--runtime/native/java_lang_reflect_Field.cc20
-rw-r--r--runtime/oat.h2
-rw-r--r--runtime/oat_file_assistant.cc952
-rw-r--r--runtime/oat_file_assistant.h431
-rw-r--r--runtime/oat_file_assistant_test.cc878
-rw-r--r--runtime/parsed_options.cc3
-rw-r--r--runtime/primitive.h4
-rw-r--r--runtime/quick_exception_handler.cc10
-rw-r--r--runtime/quick_exception_handler.h4
-rw-r--r--runtime/reflection-inl.h8
-rw-r--r--runtime/reflection.cc45
-rw-r--r--runtime/reflection.h6
-rw-r--r--runtime/runtime.cc21
-rw-r--r--runtime/runtime.h10
-rw-r--r--runtime/runtime_options.def1
-rw-r--r--runtime/stack.cc50
-rw-r--r--runtime/stack.h4
-rw-r--r--runtime/stack_map.h440
-rw-r--r--runtime/thread.cc113
-rw-r--r--runtime/thread.h62
-rw-r--r--runtime/thread_list.cc13
-rw-r--r--runtime/throw_location.cc46
-rw-r--r--runtime/throw_location.h93
-rw-r--r--runtime/trace.cc39
-rw-r--r--runtime/trace.h4
-rw-r--r--runtime/transaction.cc3
-rw-r--r--runtime/utils.cc17
-rw-r--r--runtime/utils.h32
-rw-r--r--runtime/utils_test.cc7
-rw-r--r--runtime/verifier/method_verifier.cc50
-rw-r--r--runtime/verifier/method_verifier.h2
-rw-r--r--runtime/verifier/register_line.cc13
-rw-r--r--runtime/verifier/register_line.h4
128 files changed, 6226 insertions, 3212 deletions
diff --git a/runtime/Android.mk b/runtime/Android.mk
index c5cf89014e..8f203810e1 100644
--- a/runtime/Android.mk
+++ b/runtime/Android.mk
@@ -80,6 +80,7 @@ LIBART_COMMON_SRC_FILES := \
interpreter/interpreter.cc \
interpreter/interpreter_common.cc \
interpreter/interpreter_switch_impl.cc \
+ interpreter/unstarted_runtime.cc \
java_vm_ext.cc \
jdwp/jdwp_event.cc \
jdwp/jdwp_expand_buf.cc \
@@ -135,6 +136,7 @@ LIBART_COMMON_SRC_FILES := \
native/sun_misc_Unsafe.cc \
oat.cc \
oat_file.cc \
+ oat_file_assistant.cc \
object_lock.cc \
offsets.cc \
os_linux.cc \
@@ -151,7 +153,6 @@ LIBART_COMMON_SRC_FILES := \
thread.cc \
thread_list.cc \
thread_pool.cc \
- throw_location.cc \
trace.cc \
transaction.cc \
profiler.cc \
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 6f1b8261a3..8cb95f1ab6 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -422,20 +422,120 @@ ENTRY art_quick_do_long_jump
move $v1, $zero
END art_quick_do_long_jump
-UNIMPLEMENTED art_quick_deliver_exception
-UNIMPLEMENTED art_quick_throw_null_pointer_exception
-UNIMPLEMENTED art_quick_throw_div_zero
-UNIMPLEMENTED art_quick_throw_array_bounds
-UNIMPLEMENTED art_quick_throw_stack_overflow
-UNIMPLEMENTED art_quick_throw_no_such_method
+ /*
+ * Called by managed code, saves most registers (forms basis of long jump
+ * context) and passes the bottom of the stack.
+ * artDeliverExceptionFromCode will place the callee save Method* at
+ * the bottom of the thread. On entry v0 holds Throwable*
+ */
+ENTRY art_quick_deliver_exception
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artDeliverExceptionFromCode
+ jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*)
+ move $a1, rSELF # pass Thread::Current
+END art_quick_deliver_exception
+
+ /*
+ * Called by managed code to create and deliver a NullPointerException
+ */
+ .extern artThrowNullPointerExceptionFromCode
+ENTRY art_quick_throw_null_pointer_exception
+.Lart_quick_throw_null_pointer_exception_gp_set:
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowNullPointerExceptionFromCode
+ jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
+END art_quick_throw_null_pointer_exception
-UNIMPLEMENTED art_quick_invoke_interface_trampoline
-UNIMPLEMENTED art_quick_invoke_interface_trampoline_with_access_check
+ /*
+ * Called by managed code to create and deliver an ArithmeticException
+ */
+ .extern artThrowDivZeroFromCode
+ENTRY art_quick_throw_div_zero
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowDivZeroFromCode
+ jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
+END art_quick_throw_div_zero
-UNIMPLEMENTED art_quick_invoke_static_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_direct_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_super_trampoline_with_access_check
-UNIMPLEMENTED art_quick_invoke_virtual_trampoline_with_access_check
+ /*
+ * Called by managed code to create and deliver an
+ * ArrayIndexOutOfBoundsException
+ */
+ .extern artThrowArrayBoundsFromCode
+ENTRY art_quick_throw_array_bounds
+.Lart_quick_throw_array_bounds_gp_set:
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowArrayBoundsFromCode
+ jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_throw_array_bounds
+
+ /*
+ * Called by managed code to create and deliver a StackOverflowError.
+ */
+ .extern artThrowStackOverflowFromCode
+ENTRY art_quick_throw_stack_overflow
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowStackOverflowFromCode
+ jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*)
+ move $a0, rSELF # pass Thread::Current
+END art_quick_throw_stack_overflow
+
+ /*
+ * Called by managed code to create and deliver a NoSuchMethodError.
+ */
+ .extern artThrowNoSuchMethodFromCode
+ENTRY art_quick_throw_no_such_method
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowNoSuchMethodFromCode
+ jalr $zero, $t9 # artThrowNoSuchMethodFromCode(method_idx, Thread*)
+ move $a1, rSELF # pass Thread::Current
+END art_quick_throw_no_such_method
+
+ /*
+ * All generated callsites for interface invokes and invocation slow paths will load arguments
+ * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain
+ * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the
+ * stack and call the appropriate C helper.
+ * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1.
+ *
+ * The helper will attempt to locate the target and return a 128-bit result in $v0/$v1 consisting
+ * of the target Method* in $v0 and method->code_ in $v1.
+ *
+ * If unsuccessful, the helper will return NULL/NULL. There will be a pending exception in the
+ * thread and we branch to another stub to deliver it.
+ *
+ * On success this wrapper will restore arguments and *jump* to the target, leaving the ra
+ * pointing back to the original caller.
+ */
+.macro INVOKE_TRAMPOLINE c_name, cxx_name
+ .extern \cxx_name
+ENTRY \c_name
+ SETUP_REFS_AND_ARGS_CALLEE_SAVE_FRAME # save callee saves in case allocation triggers GC
+ lwu $a2, FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE($sp) # pass caller Method*
+ move $a3, rSELF # pass Thread::Current
+ jal \cxx_name # (method_idx, this, caller, Thread*, $sp)
+ move $a4, $sp # pass $sp
+ move $a0, $v0 # save target Method*
+ move $t9, $v1 # save $v0->code_
+ RESTORE_REFS_AND_ARGS_CALLEE_SAVE_FRAME
+ beq $v0, $zero, 1f
+ nop
+ jalr $zero, $t9
+ nop
+1:
+ DELIVER_PENDING_EXCEPTION
+END \c_name
+.endm
+
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline
+INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck
+
+INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck
+INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck
# On entry:
# t0 = shorty
@@ -454,7 +554,7 @@ UNIMPLEMENTED art_quick_invoke_virtual_trampoline_with_access_check
li $t9, 74 # put char 'J' into t9
beq $t9, $t3, 3f # branch if result type char == 'J'
nop
- lwu $\gpu, 0($t1)
+ lw $\gpu, 0($t1)
sw $\gpu, 0($v0)
daddiu $v0, 4
daddiu $t1, 4
@@ -699,63 +799,534 @@ call_sfn:
sw $v1, 4($a4) # store the other half of the result
END art_quick_invoke_static_stub
+ /*
+ * Entry from managed code that calls artHandleFillArrayDataFromCode and
+ * delivers exception on failure.
+ */
+ .extern artHandleFillArrayDataFromCode
+ENTRY art_quick_handle_fill_data
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artHandleFillArrayDataFromCode # (payload offset, Array*, method, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_handle_fill_data
+
+ /*
+ * Entry from managed code that calls artLockObjectFromCode, may block for GC.
+ */
+ .extern artLockObjectFromCode
+ENTRY art_quick_lock_object
+ beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block
+ jal artLockObjectFromCode # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_lock_object
+
+ /*
+ * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure.
+ */
+ .extern artUnlockObjectFromCode
+ENTRY art_quick_unlock_object
+ beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC
+ jal artUnlockObjectFromCode # (Object* obj, Thread*)
+ move $a1, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_unlock_object
+
+ /*
+ * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure.
+ */
+ .extern artThrowClassCastException
+ENTRY art_quick_check_cast
+ daddiu $sp, $sp, -32
+ .cfi_adjust_cfa_offset 32
+ sd $ra, 24($sp)
+ .cfi_rel_offset 31, 24
+ sd $t9, 16($sp)
+ sd $a1, 8($sp)
+ sd $a0, 0($sp)
+ jal artIsAssignableFromCode
+ nop
+ beq $v0, $zero, .Lthrow_class_cast_exception
+ ld $ra, 24($sp)
+ jalr $zero, $ra
+ daddiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+.Lthrow_class_cast_exception:
+ ld $t9, 16($sp)
+ ld $a1, 8($sp)
+ ld $a0, 0($sp)
+ daddiu $sp, $sp, 32
+ .cfi_adjust_cfa_offset -32
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ dla $t9, artThrowClassCastException
+ jalr $zero, $t9 # artThrowClassCastException (Class*, Class*, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_check_cast
+
+ /*
+ * Entry from managed code for array put operations of objects where the value being stored
+ * needs to be checked for compatibility.
+ * a0 = array, a1 = index, a2 = value
+ */
+ENTRY art_quick_aput_obj_with_null_and_bound_check
+ bne $a0, $zero, .Lart_quick_aput_obj_with_bound_check_gp_set
+ nop
+ b .Lart_quick_throw_null_pointer_exception_gp_set
+ nop
+END art_quick_aput_obj_with_null_and_bound_check
+ENTRY art_quick_aput_obj_with_bound_check
+ lwu $t0, MIRROR_ARRAY_LENGTH_OFFSET($a0)
+ sltu $t1, $a1, $t0
+ bne $t1, $zero, .Lart_quick_aput_obj_gp_set
+ nop
+ move $a0, $a1
+ b .Lart_quick_throw_array_bounds_gp_set
+ move $a1, $t0
+END art_quick_aput_obj_with_bound_check
-UNIMPLEMENTED art_quick_handle_fill_data
-UNIMPLEMENTED art_quick_lock_object
-UNIMPLEMENTED art_quick_unlock_object
-UNIMPLEMENTED art_quick_check_cast
-UNIMPLEMENTED art_quick_aput_obj_with_null_and_bound_check
-UNIMPLEMENTED art_quick_aput_obj_with_bound_check
-UNIMPLEMENTED art_quick_aput_obj
-UNIMPLEMENTED art_quick_initialize_static_storage
-UNIMPLEMENTED art_quick_initialize_type
-UNIMPLEMENTED art_quick_initialize_type_and_verify_access
-UNIMPLEMENTED art_quick_get_boolean_static
-UNIMPLEMENTED art_quick_get_byte_static
-UNIMPLEMENTED art_quick_get_char_static
-UNIMPLEMENTED art_quick_get_short_static
-UNIMPLEMENTED art_quick_get32_static
-UNIMPLEMENTED art_quick_get64_static
-UNIMPLEMENTED art_quick_get_obj_static
-UNIMPLEMENTED art_quick_get_boolean_instance
-UNIMPLEMENTED art_quick_get_byte_instance
-UNIMPLEMENTED art_quick_get_char_instance
-UNIMPLEMENTED art_quick_get_short_instance
-UNIMPLEMENTED art_quick_get32_instance
-UNIMPLEMENTED art_quick_get64_instance
-UNIMPLEMENTED art_quick_get_obj_instance
-UNIMPLEMENTED art_quick_set8_static
-UNIMPLEMENTED art_quick_set16_static
-UNIMPLEMENTED art_quick_set32_static
-UNIMPLEMENTED art_quick_set64_static
-UNIMPLEMENTED art_quick_set_obj_static
-UNIMPLEMENTED art_quick_set8_instance
-UNIMPLEMENTED art_quick_set16_instance
-UNIMPLEMENTED art_quick_set32_instance
-UNIMPLEMENTED art_quick_set64_instance
-UNIMPLEMENTED art_quick_set_obj_instance
-UNIMPLEMENTED art_quick_resolve_string
+ENTRY art_quick_aput_obj
+ beq $a2, $zero, .Ldo_aput_null
+ nop
+ lwu $t0, MIRROR_OBJECT_CLASS_OFFSET($a0)
+ lwu $t1, MIRROR_OBJECT_CLASS_OFFSET($a2)
+ lwu $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET($t0)
+ bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability
+ nop
+.Ldo_aput:
+ dsll $a1, $a1, 2
+ daddu $t0, $a0, $a1
+ sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
+ ld $t0, THREAD_CARD_TABLE_OFFSET(rSELF)
+ dsrl $t1, $a0, 7
+ daddu $t1, $t1, $t0
+ sb $t0, ($t1)
+ jalr $zero, $ra
+ nop
+.Ldo_aput_null:
+ dsll $a1, $a1, 2
+ daddu $t0, $a0, $a1
+ sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0)
+ jalr $zero, $ra
+ nop
+.Lcheck_assignability:
+ daddiu $sp, $sp, -64
+ .cfi_adjust_cfa_offset 64
+ sd $ra, 56($sp)
+ .cfi_rel_offset 31, 56
+ sd $t9, 24($sp)
+ sd $a2, 16($sp)
+ sd $a1, 8($sp)
+ sd $a0, 0($sp)
+ move $a1, $t1
+ move $a0, $t0
+ jal artIsAssignableFromCode # (Class*, Class*)
+ nop
+ ld $ra, 56($sp)
+ ld $t9, 24($sp)
+ ld $a2, 16($sp)
+ ld $a1, 8($sp)
+ ld $a0, 0($sp)
+ daddiu $sp, $sp, 64
+ .cfi_adjust_cfa_offset -64
+ bne $v0, $zero, .Ldo_aput
+ nop
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ move $a1, $a2
+ dla $t9, artThrowArrayStoreException
+ jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*)
+ move $a2, rSELF # pass Thread::Current
+END art_quick_aput_obj
+
+ /*
+ * Entry from managed code when uninitialized static storage, this stub will run the class
+ * initializer and deliver the exception on error. On success the static storage base is
+ * returned.
+ */
+ .extern artInitializeStaticStorageFromCode
+ENTRY art_quick_initialize_static_storage
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeStaticStorageFromCode(uint32_t type_idx, Method* referrer, Thread*)
+ jal artInitializeStaticStorageFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_static_storage
+
+ /*
+ * Entry from managed code when dex cache misses for a type_idx.
+ */
+ .extern artInitializeTypeFromCode
+ENTRY art_quick_initialize_type
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
+ jal artInitializeTypeFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_type
+
+ /*
+ * Entry from managed code when type_idx needs to be checked for access and dex cache may also
+ * miss.
+ */
+ .extern artInitializeTypeAndVerifyAccessFromCode
+ENTRY art_quick_initialize_type_and_verify_access
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artInitializeTypeFromCode(uint32_t type_idx, Method* referrer, Thread*)
+ jal artInitializeTypeAndVerifyAccessFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_initialize_type_and_verify_access
+
+ /*
+ * Called by managed code to resolve a static field and load a boolean primitive value.
+ */
+ .extern artGetBooleanStaticFromCode
+ENTRY art_quick_get_boolean_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetBooleanStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_static
+
+ /*
+ * Called by managed code to resolve a static field and load a byte primitive value.
+ */
+ .extern artGetByteStaticFromCode
+ENTRY art_quick_get_byte_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetByteStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_static
+
+ /*
+ * Called by managed code to resolve a static field and load a char primitive value.
+ */
+ .extern artGetCharStaticFromCode
+ENTRY art_quick_get_char_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetCharStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_static
+
+ /*
+ * Called by managed code to resolve a static field and load a short primitive value.
+ */
+ .extern artGetShortStaticFromCode
+ENTRY art_quick_get_short_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetShortStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_static
+
+ /*
+ * Called by managed code to resolve a static field and load a 32-bit primitive value.
+ */
+ .extern artGet32StaticFromCode
+ENTRY art_quick_get32_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet32StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get32_static
+
+ /*
+ * Called by managed code to resolve a static field and load a 64-bit primitive value.
+ */
+ .extern artGet64StaticFromCode
+ENTRY art_quick_get64_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet64StaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get64_static
+
+ /*
+ * Called by managed code to resolve a static field and load an object reference.
+ */
+ .extern artGetObjStaticFromCode
+ENTRY art_quick_get_obj_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetObjStaticFromCode # (uint32_t field_idx, const Method* referrer, Thread*)
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_static
+
+ /*
+ * Called by managed code to resolve an instance field and load a boolean primitive value.
+ */
+ .extern artGetBooleanInstanceFromCode
+ENTRY art_quick_get_boolean_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a byte primitive value.
+ */
+ .extern artGetByteInstanceFromCode
+ENTRY art_quick_get_byte_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetByteInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a char primitive value.
+ */
+ .extern artGetCharInstanceFromCode
+ENTRY art_quick_get_char_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetCharInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a short primitive value.
+ */
+ .extern artGetShortInstanceFromCode
+ENTRY art_quick_get_short_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetShortInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a 32-bit primitive value.
+ */
+ .extern artGet32InstanceFromCode
+ENTRY art_quick_get32_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet32InstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get32_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load a 64-bit primitive value.
+ */
+ .extern artGet64InstanceFromCode
+ENTRY art_quick_get64_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGet64InstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get64_instance
+
+ /*
+ * Called by managed code to resolve an instance field and load an object reference.
+ */
+ .extern artGetObjInstanceFromCode
+ENTRY art_quick_get_obj_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artGetObjInstanceFromCode # (field_idx, Object*, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_NO_EXCEPTION
+END art_quick_get_obj_instance
+
+ /*
+ * Called by managed code to resolve a static field and store a 8-bit primitive value.
+ */
+ .extern artSet8StaticFromCode
+ENTRY art_quick_set8_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet8StaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set8_static
+
+ /*
+ * Called by managed code to resolve a static field and store a 16-bit primitive value.
+ */
+ .extern artSet16StaticFromCode
+ENTRY art_quick_set16_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet16StaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set16_static
+
+ /*
+ * Called by managed code to resolve a static field and store a 32-bit primitive value.
+ */
+ .extern artSet32StaticFromCode
+ENTRY art_quick_set32_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet32StaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set32_static
+
+ /*
+ * Called by managed code to resolve a static field and store a 64-bit primitive value.
+ */
+ .extern artSet64StaticFromCode
+ENTRY art_quick_set64_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ move $a2, $a1 # pass new_val
+ lwu $a1, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet64StaticFromCode # (field_idx, referrer, new_val, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set64_static
+
+ /*
+ * Called by managed code to resolve a static field and store an object reference.
+ */
+ .extern artSetObjStaticFromCode
+ENTRY art_quick_set_obj_static
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a2, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSetObjStaticFromCode # (field_idx, new_val, referrer, Thread*)
+ move $a3, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set_obj_static
+
+ /*
+ * Called by managed code to resolve an instance field and store a 8-bit primitive value.
+ */
+ .extern artSet8InstanceFromCode
+ENTRY art_quick_set8_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet8InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set8_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store a 16-bit primitive value.
+ */
+ .extern artSet16InstanceFromCode
+ENTRY art_quick_set16_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet16InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set16_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store a 32-bit primitive value.
+ */
+ .extern artSet32InstanceFromCode
+ENTRY art_quick_set32_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet32InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set32_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store a 64-bit primitive value.
+ */
+ .extern artSet64InstanceFromCode
+ENTRY art_quick_set64_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSet64InstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set64_instance
+
+ /*
+ * Called by managed code to resolve an instance field and store an object reference.
+ */
+ .extern artSetObjInstanceFromCode
+ENTRY art_quick_set_obj_instance
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ lwu $a3, FRAME_SIZE_REFS_ONLY_CALLEE_SAVE($sp) # pass referrer's Method*
+ jal artSetObjInstanceFromCode # (field_idx, Object*, new_val, referrer, Thread*)
+ move $a4, rSELF # pass Thread::Current
+ RETURN_IF_ZERO
+END art_quick_set_obj_instance
+
+ /*
+ * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
+ * exception on error. On success the String is returned. R0 holds the referring method,
+ * R1 holds the string index. The fast path check for hit in strings cache has already been
+ * performed.
+ */
+ .extern artResolveStringFromCode
+ENTRY art_quick_resolve_string
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ # artResolveStringFromCode(Method* referrer, uint32_t string_idx, Thread*, $sp)
+ jal artResolveStringFromCode
+ move $a2, rSELF # pass Thread::Current
+ RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER
+END art_quick_resolve_string
// Macro to facilitate adding new allocation entrypoints.
.macro TWO_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
ENTRY \name
- break
- break
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal \entrypoint
+ move $a2, rSELF # pass Thread::Current
+ \return
END \name
.endm
.macro THREE_ARG_DOWNCALL name, entrypoint, return
+ .extern \entrypoint
ENTRY \name
- break
- break
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case of GC
+ jal \entrypoint
+ move $a3, rSELF # pass Thread::Current
+ \return
END \name
.endm
// Generate the allocation entrypoints for each allocator.
GENERATE_ALL_ALLOC_ENTRYPOINTS
-UNIMPLEMENTED art_quick_test_suspend
+ /*
+ * Called by managed code when the value in rSUSPEND has been decremented to 0.
+ */
+ .extern artTestSuspendFromCode
+ENTRY art_quick_test_suspend
+ lh $a0, THREAD_FLAGS_OFFSET(rSELF)
+ bne $a0, $zero, 1f
+ daddiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL
+ jalr $zero, $ra
+ nop
+1:
+ SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves for stack crawl
+ jal artTestSuspendFromCode # (Thread*)
+ move $a0, rSELF
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME_AND_RETURN
+END art_quick_test_suspend
/*
* Called by managed code that is attempting to call a method on a proxy class. On entry
@@ -779,7 +1350,19 @@ ENTRY art_quick_proxy_invoke_handler
DELIVER_PENDING_EXCEPTION
END art_quick_proxy_invoke_handler
-UNIMPLEMENTED art_quick_imt_conflict_trampoline
+ /*
+ * Called to resolve an imt conflict. t0 is a hidden argument that holds the target method's
+ * dex method index.
+ */
+ENTRY art_quick_imt_conflict_trampoline
+ lwu $a0, 0($sp) # load caller Method*
+ lwu $a0, MIRROR_ART_METHOD_DEX_CACHE_METHODS_OFFSET($a0) # load dex_cache_resolved_methods
+ dsll $t0, 2 # convert target method offset to bytes
+ daddu $a0, $t0 # get address of target method
+ dla $t9, art_quick_invoke_interface_trampoline
+ jalr $zero, $t9
+ lwu $a0, MIRROR_OBJECT_ARRAY_DATA_OFFSET($a0) # load the target method
+END art_quick_imt_conflict_trampoline
.extern artQuickResolutionTrampoline
ENTRY art_quick_resolution_trampoline
@@ -930,6 +1513,18 @@ art_quick_instrumentation_exit:
.cfi_adjust_cfa_offset -(16+FRAME_SIZE_REFS_ONLY_CALLEE_SAVE)
END art_quick_instrumentation_exit
-UNIMPLEMENTED art_quick_deoptimize
+ /*
+ * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization
+ * will long jump to the upcall with a special exception of -1.
+ */
+ .extern artDeoptimize
+ .extern artEnterInterpreterFromDeoptimize
+ENTRY art_quick_deoptimize
+ SETUP_SAVE_ALL_CALLEE_SAVE_FRAME
+ jal artDeoptimize # artDeoptimize(Thread*, SP)
+ # Returns caller method's frame size.
+ move $a0, rSELF # pass Thread::current
+END art_quick_deoptimize
+
UNIMPLEMENTED art_quick_indexof
UNIMPLEMENTED art_quick_string_compareto
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index 0d41a8fd29..0769687716 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -1171,7 +1171,7 @@ TEST_F(StubTest, AllocObjectArray) {
reinterpret_cast<size_t>(nullptr),
StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
self);
- EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
+ EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException());
EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
EXPECT_TRUE(obj->IsArrayInstance());
@@ -2060,7 +2060,7 @@ TEST_F(StubTest, IMT) {
env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
- ASSERT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
+ ASSERT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException());
// Contains.
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index 65c65e2b72..0f874a49e8 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -1551,7 +1551,9 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
CFI_ADJUST_CFA_OFFSET(-8)
POP rax // Restore integer result.
- addq LITERAL(FRAME_SIZE_REFS_ONLY_CALLEE_SAVE), %rsp // Drop save frame and fake return pc.
+ RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME
+
+ addq LITERAL(8), %rsp // Drop fake return pc.
jmp *%rdi // Return.
END_FUNCTION art_quick_instrumentation_exit
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index ee70fe7c81..92f4ebead0 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -39,7 +39,7 @@
// impacts where samples will occur. Reducing the count as much as possible improves profiler
// accuracy in tools like traceview.
// TODO: get a compiler that can do a proper job of loop optimization and remove this.
-#define SUSPEND_CHECK_INTERVAL 1000
+#define SUSPEND_CHECK_INTERVAL 96
#endif
#if defined(__cplusplus)
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index e6380bfe5b..e37aca1031 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -26,9 +26,6 @@
namespace art {
-// Memmap is a bit slower than malloc according to my measurements.
-static constexpr bool kUseMemMap = false;
-static constexpr bool kUseMemSet = true && kUseMemMap;
static constexpr size_t kValgrindRedZoneBytes = 8;
constexpr size_t Arena::kDefaultSize;
@@ -123,45 +120,47 @@ void ArenaAllocatorStatsImpl<kCount>::Dump(std::ostream& os, const Arena* first,
// Explicitly instantiate the used implementation.
template class ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations>;
-Arena::Arena(size_t size)
- : bytes_allocated_(0),
- map_(nullptr),
- next_(nullptr) {
- if (kUseMemMap) {
- std::string error_msg;
- map_ = MemMap::MapAnonymous("dalvik-arena", nullptr, size, PROT_READ | PROT_WRITE, false, false,
- &error_msg);
- CHECK(map_ != nullptr) << error_msg;
- memory_ = map_->Begin();
- size_ = map_->Size();
- } else {
- memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
- size_ = size;
- }
+Arena::Arena() : bytes_allocated_(0), next_(nullptr) {
+}
+
+MallocArena::MallocArena(size_t size) {
+ memory_ = reinterpret_cast<uint8_t*>(calloc(1, size));
+ size_ = size;
+}
+
+MallocArena::~MallocArena() {
+ free(reinterpret_cast<void*>(memory_));
+}
+
+MemMapArena::MemMapArena(size_t size) {
+ std::string error_msg;
+ map_.reset(
+ MemMap::MapAnonymous("dalvik-LinearAlloc", nullptr, size, PROT_READ | PROT_WRITE, false,
+ false, &error_msg));
+ CHECK(map_.get() != nullptr) << error_msg;
+ memory_ = map_->Begin();
+ size_ = map_->Size();
}
-Arena::~Arena() {
- if (kUseMemMap) {
- delete map_;
- } else {
- free(reinterpret_cast<void*>(memory_));
+void MemMapArena::Release() {
+ if (bytes_allocated_ > 0) {
+ map_->MadviseDontNeedAndZero();
+ bytes_allocated_ = 0;
}
}
void Arena::Reset() {
- if (bytes_allocated_) {
- if (kUseMemSet || !kUseMemMap) {
- memset(Begin(), 0, bytes_allocated_);
- } else {
- map_->MadviseDontNeedAndZero();
- }
+ if (bytes_allocated_ > 0) {
+ memset(Begin(), 0, bytes_allocated_);
bytes_allocated_ = 0;
}
}
-ArenaPool::ArenaPool()
- : lock_("Arena pool lock"),
- free_arenas_(nullptr) {
+ArenaPool::ArenaPool(bool use_malloc)
+ : use_malloc_(use_malloc), lock_("Arena pool lock"), free_arenas_(nullptr) {
+ if (!use_malloc) {
+ MemMap::Init();
+ }
}
ArenaPool::~ArenaPool() {
@@ -183,12 +182,22 @@ Arena* ArenaPool::AllocArena(size_t size) {
}
}
if (ret == nullptr) {
- ret = new Arena(size);
+ ret = use_malloc_ ? static_cast<Arena*>(new MallocArena(size)) : new MemMapArena(size);
}
ret->Reset();
return ret;
}
+void ArenaPool::TrimMaps() {
+ if (!use_malloc_) {
+ // Doesn't work for malloc.
+ MutexLock lock(Thread::Current(), lock_);
+ for (auto* arena = free_arenas_; arena != nullptr; arena = arena->next_) {
+ arena->Release();
+ }
+ }
+}
+
size_t ArenaPool::GetBytesAllocated() const {
size_t total = 0;
MutexLock lock(Thread::Current(), lock_);
diff --git a/runtime/base/arena_allocator.h b/runtime/base/arena_allocator.h
index 92373919d4..cc7b856e84 100644
--- a/runtime/base/arena_allocator.h
+++ b/runtime/base/arena_allocator.h
@@ -116,9 +116,12 @@ typedef ArenaAllocatorStatsImpl<kArenaAllocatorCountAllocations> ArenaAllocatorS
class Arena {
public:
static constexpr size_t kDefaultSize = 128 * KB;
- explicit Arena(size_t size = kDefaultSize);
- ~Arena();
+ Arena();
+ virtual ~Arena() { }
+ // Reset is for pre-use and uses memset for performance.
void Reset();
+ // Release is used inbetween uses and uses madvise for memory usage.
+ virtual void Release() { }
uint8_t* Begin() {
return memory_;
}
@@ -139,29 +142,50 @@ class Arena {
return bytes_allocated_;
}
- private:
+ protected:
size_t bytes_allocated_;
uint8_t* memory_;
size_t size_;
- MemMap* map_;
Arena* next_;
friend class ArenaPool;
friend class ArenaAllocator;
friend class ArenaStack;
friend class ScopedArenaAllocator;
template <bool kCount> friend class ArenaAllocatorStatsImpl;
+
+ private:
DISALLOW_COPY_AND_ASSIGN(Arena);
};
+class MallocArena FINAL : public Arena {
+ public:
+ explicit MallocArena(size_t size = Arena::kDefaultSize);
+ virtual ~MallocArena();
+};
+
+class MemMapArena FINAL : public Arena {
+ public:
+ explicit MemMapArena(size_t size = Arena::kDefaultSize);
+ virtual ~MemMapArena() { }
+ void Release() OVERRIDE;
+
+ private:
+ std::unique_ptr<MemMap> map_;
+};
+
class ArenaPool {
public:
- ArenaPool();
+ explicit ArenaPool(bool use_malloc = true);
~ArenaPool();
Arena* AllocArena(size_t size) LOCKS_EXCLUDED(lock_);
void FreeArenaChain(Arena* first) LOCKS_EXCLUDED(lock_);
size_t GetBytesAllocated() const LOCKS_EXCLUDED(lock_);
+ // Trim the maps in arenas by madvising, used by JIT to reduce memory usage. This only works
+ // use_malloc is false.
+ void TrimMaps() LOCKS_EXCLUDED(lock_);
private:
+ const bool use_malloc_;
mutable Mutex lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Arena* free_arenas_ GUARDED_BY(lock_);
DISALLOW_COPY_AND_ASSIGN(ArenaPool);
diff --git a/runtime/base/logging.h b/runtime/base/logging.h
index 3d007ba1e5..014f4ab5bf 100644
--- a/runtime/base/logging.h
+++ b/runtime/base/logging.h
@@ -45,6 +45,7 @@ struct LogVerbosity {
bool jit;
bool jni;
bool monitor;
+ bool oat;
bool profiler;
bool signals;
bool startup;
diff --git a/runtime/check_jni.cc b/runtime/check_jni.cc
index 7db1d72370..2b0167d41e 100644
--- a/runtime/check_jni.cc
+++ b/runtime/check_jni.cc
@@ -1065,11 +1065,10 @@ class ScopedCheck {
// Verify that, if an exception has been raised, the native code doesn't
// make any JNI calls other than the Exception* methods.
if ((flags_ & kFlag_ExcepOkay) == 0 && self->IsExceptionPending()) {
- ThrowLocation throw_location;
- mirror::Throwable* exception = self->GetException(&throw_location);
- std::string type(PrettyTypeOf(exception));
- AbortF("JNI %s called with pending exception '%s' thrown in %s",
- function_name_, type.c_str(), throw_location.Dump().c_str());
+ mirror::Throwable* exception = self->GetException();
+ AbortF("JNI %s called with pending exception %s",
+ function_name_,
+ exception->Dump().c_str());
return false;
}
return true;
diff --git a/runtime/check_reference_map_visitor.h b/runtime/check_reference_map_visitor.h
index 93062a7c4b..893ab11bad 100644
--- a/runtime/check_reference_map_visitor.h
+++ b/runtime/check_reference_map_visitor.h
@@ -66,31 +66,36 @@ class CheckReferenceMapVisitor : public StackVisitor {
mirror::ArtMethod* m = GetMethod();
CodeInfo code_info = m->GetOptimizedCodeInfo();
StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
- DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map, m->GetCodeItem()->registers_size_);
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, m->GetCodeItem()->registers_size_);
MemoryRegion stack_mask = stack_map.GetStackMask();
uint32_t register_mask = stack_map.GetRegisterMask();
for (int i = 0; i < number_of_references; ++i) {
int reg = registers[i];
CHECK(reg < m->GetCodeItem()->registers_size_);
- DexRegisterMap::LocationKind location = dex_register_map.GetLocationKind(reg);
- switch (location) {
- case DexRegisterMap::kNone:
+ DexRegisterLocation location = dex_register_map.GetLocationKindAndValue(reg);
+ switch (location.GetKind()) {
+ case DexRegisterLocation::Kind::kNone:
// Not set, should not be a reference.
CHECK(false);
break;
- case DexRegisterMap::kInStack:
- CHECK(stack_mask.LoadBit(dex_register_map.GetValue(reg) >> 2));
+ case DexRegisterLocation::Kind::kInStack:
+ DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
+ CHECK(stack_mask.LoadBit(location.GetValue() / kFrameSlotSize));
break;
- case DexRegisterMap::kInRegister:
- CHECK_NE(register_mask & (1 << dex_register_map.GetValue(reg)), 0u);
+ case DexRegisterLocation::Kind::kInRegister:
+ CHECK_NE(register_mask & (1 << location.GetValue()), 0u);
break;
- case DexRegisterMap::kInFpuRegister:
+ case DexRegisterLocation::Kind::kInFpuRegister:
// In Fpu register, should not be a reference.
CHECK(false);
break;
- case DexRegisterMap::kConstant:
- CHECK_EQ(dex_register_map.GetValue(reg), 0);
+ case DexRegisterLocation::Kind::kConstant:
+ CHECK_EQ(location.GetValue(), 0);
break;
+ default:
+ LOG(FATAL) << "Unexpected location kind"
+ << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
}
}
}
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 2989b8c6ce..700e1adf91 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -48,6 +48,7 @@
#include "leb128.h"
#include "oat.h"
#include "oat_file.h"
+#include "oat_file_assistant.h"
#include "object_lock.h"
#include "mirror/art_field-inl.h"
#include "mirror/art_method-inl.h"
@@ -82,8 +83,7 @@ static void ThrowNoClassDefFoundError(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionV(throw_location, "Ljava/lang/NoClassDefFoundError;", fmt, args);
+ self->ThrowNewExceptionV("Ljava/lang/NoClassDefFoundError;", fmt, args);
va_end(args);
}
@@ -103,16 +103,15 @@ static void ThrowEarlierClassFailure(mirror::Class* c)
if (runtime->IsAotCompiler()) {
// At compile time, accurate errors and NCDFE are disabled to speed compilation.
mirror::Throwable* pre_allocated = runtime->GetPreAllocatedNoClassDefFoundError();
- self->SetException(ThrowLocation(), pre_allocated);
+ self->SetException(pre_allocated);
} else {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
if (c->GetVerifyErrorClass() != NULL) {
// TODO: change the verifier to store an _instance_, with a useful detail message?
std::string temp;
- self->ThrowNewException(throw_location, c->GetVerifyErrorClass()->GetDescriptor(&temp),
+ self->ThrowNewException(c->GetVerifyErrorClass()->GetDescriptor(&temp),
PrettyDescriptor(c).c_str());
} else {
- self->ThrowNewException(throw_location, "Ljava/lang/NoClassDefFoundError;",
+ self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
PrettyDescriptor(c).c_str());
}
}
@@ -123,7 +122,7 @@ static void VlogClassInitializationFailure(Handle<mirror::Class> klass)
if (VLOG_IS_ON(class_linker)) {
std::string temp;
LOG(INFO) << "Failed to initialize class " << klass->GetDescriptor(&temp) << " from "
- << klass->GetLocation() << "\n" << Thread::Current()->GetException(nullptr)->Dump();
+ << klass->GetLocation() << "\n" << Thread::Current()->GetException()->Dump();
}
}
@@ -141,9 +140,7 @@ static void WrapExceptionInInitializer(Handle<mirror::Class> klass)
// We only wrap non-Error exceptions; an Error can just be used as-is.
if (!is_error) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewWrappedException(throw_location, "Ljava/lang/ExceptionInInitializerError;",
- nullptr);
+ self->ThrowNewWrappedException("Ljava/lang/ExceptionInInitializerError;", nullptr);
}
VlogClassInitializationFailure(klass);
}
@@ -662,77 +659,6 @@ void ClassLinker::RunRootClinits() {
}
}
-bool ClassLinker::GenerateOatFile(const char* dex_filename,
- int oat_fd,
- const char* oat_cache_filename,
- std::string* error_msg) {
- Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
- std::string dex2oat(Runtime::Current()->GetCompilerExecutable());
-
- gc::Heap* heap = Runtime::Current()->GetHeap();
- std::string boot_image_option("--boot-image=");
- if (heap->GetImageSpace() == nullptr) {
- // TODO If we get a dex2dex compiler working we could maybe use that, OTOH since we are likely
- // out of space anyway it might not matter.
- *error_msg = StringPrintf("Cannot create oat file for '%s' because we are running "
- "without an image.", dex_filename);
- return false;
- }
- boot_image_option += heap->GetImageSpace()->GetImageLocation();
-
- std::string dex_file_option("--dex-file=");
- dex_file_option += dex_filename;
-
- std::string oat_fd_option("--oat-fd=");
- StringAppendF(&oat_fd_option, "%d", oat_fd);
-
- std::string oat_location_option("--oat-location=");
- oat_location_option += oat_cache_filename;
-
- std::vector<std::string> argv;
- argv.push_back(dex2oat);
- argv.push_back("--runtime-arg");
- argv.push_back("-classpath");
- argv.push_back("--runtime-arg");
- argv.push_back(Runtime::Current()->GetClassPathString());
-
- Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
-
- if (!Runtime::Current()->IsVerificationEnabled()) {
- argv.push_back("--compiler-filter=verify-none");
- }
-
- if (Runtime::Current()->MustRelocateIfPossible()) {
- argv.push_back("--runtime-arg");
- argv.push_back("-Xrelocate");
- } else {
- argv.push_back("--runtime-arg");
- argv.push_back("-Xnorelocate");
- }
-
- if (!kIsTargetBuild) {
- argv.push_back("--host");
- }
-
- argv.push_back(boot_image_option);
- argv.push_back(dex_file_option);
- argv.push_back(oat_fd_option);
- argv.push_back(oat_location_option);
- const std::vector<std::string>& compiler_options = Runtime::Current()->GetCompilerOptions();
- for (size_t i = 0; i < compiler_options.size(); ++i) {
- argv.push_back(compiler_options[i].c_str());
- }
-
- if (!Exec(argv, error_msg)) {
- // Manually delete the file. Ensures there is no garbage left over if the process unexpectedly
- // died. Ignore unlink failure, propagate the original error.
- TEMP_FAILURE_RETRY(unlink(oat_cache_filename));
- return false;
- }
-
- return true;
-}
-
const OatFile* ClassLinker::RegisterOatFile(const OatFile* oat_file) {
WriterMutexLock mu(Thread::Current(), dex_lock_);
if (kIsDebugBuild) {
@@ -782,504 +708,81 @@ const OatFile::OatDexFile* ClassLinker::FindOpenedOatDexFile(const char* oat_loc
return nullptr;
}
+std::vector<std::unique_ptr<const DexFile>> ClassLinker::OpenDexFilesFromOat(
+ const char* dex_location, const char* oat_location,
+ std::vector<std::string>* error_msgs) {
+ CHECK(error_msgs != nullptr);
-// Loads all multi dex files from the given oat file returning true on success.
-//
-// Parameters:
-// oat_file - the oat file to load from
-// dex_location - the dex location used to generate the oat file
-// dex_location_checksum - the checksum of the dex_location (may be null for pre-opted files)
-// generated - whether or not the oat_file existed before or was just (re)generated
-// error_msgs - any error messages will be appended here
-// dex_files - the loaded dex_files will be appended here (only if the loading succeeds)
-static bool LoadMultiDexFilesFromOatFile(const OatFile* oat_file,
- const char* dex_location,
- const uint32_t* dex_location_checksum,
- bool generated,
- std::vector<std::string>* error_msgs,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- if (oat_file == nullptr) {
- return false;
- }
-
- size_t old_size = dex_files->size(); // To rollback on error.
-
- bool success = true;
- for (size_t i = 0; success; ++i) {
- std::string next_name_str = DexFile::GetMultiDexClassesDexName(i, dex_location);
- const char* next_name = next_name_str.c_str();
-
- uint32_t next_location_checksum;
- uint32_t* next_location_checksum_pointer = &next_location_checksum;
- std::string error_msg;
- if ((i == 0) && (strcmp(next_name, dex_location) == 0)) {
- // When i=0 the multidex name should be the same as the location name. We already have the
- // checksum it so we don't need to recompute it.
- if (dex_location_checksum == nullptr) {
- next_location_checksum_pointer = nullptr;
- } else {
- next_location_checksum = *dex_location_checksum;
- }
- } else if (!DexFile::GetChecksum(next_name, next_location_checksum_pointer, &error_msg)) {
- DCHECK_EQ(false, i == 0 && generated);
- next_location_checksum_pointer = nullptr;
- }
+ // Verify we aren't holding the mutator lock, which could starve GC if we
+ // have to generate or relocate an oat file.
+ Locks::mutator_lock_->AssertNotHeld(Thread::Current());
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(next_name, nullptr, false);
+ OatFileAssistant oat_file_assistant(dex_location, oat_location, kRuntimeISA,
+ !Runtime::Current()->IsAotCompiler());
- if (oat_dex_file == nullptr) {
- if (i == 0 && generated) {
- error_msg = StringPrintf("\nFailed to find dex file '%s' (checksum 0x%x) in generated out "
- " file'%s'", dex_location, next_location_checksum,
- oat_file->GetLocation().c_str());
- error_msgs->push_back(error_msg);
- }
- break; // Not found, done.
- }
-
- // Checksum test. Test must succeed when generated.
- success = !generated;
- if (next_location_checksum_pointer != nullptr) {
- success = next_location_checksum == oat_dex_file->GetDexFileLocationChecksum();
- }
-
- if (success) {
- std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
- if (dex_file.get() == nullptr) {
- success = false;
- error_msgs->push_back(error_msg);
- } else {
- dex_files->push_back(std::move(dex_file));
- }
- }
-
- // When we generated the file, we expect success, or something is terribly wrong.
- CHECK_EQ(false, generated && !success)
- << "dex_location=" << next_name << " oat_location=" << oat_file->GetLocation().c_str()
- << std::hex << " dex_location_checksum=" << next_location_checksum
- << " OatDexFile::GetLocationChecksum()=" << oat_dex_file->GetDexFileLocationChecksum();
- }
-
- if (dex_files->size() == old_size) {
- success = false; // We did not even find classes.dex
- }
-
- if (success) {
- return true;
- } else {
- dex_files->erase(dex_files->begin() + old_size, dex_files->end());
- return false;
+ // Lock the target oat location to avoid races generating and loading the
+ // oat file.
+ std::string error_msg;
+ if (!oat_file_assistant.Lock(&error_msg)) {
+ // Don't worry too much if this fails. If it does fail, it's unlikely we
+ // can generate an oat file anyway.
+ VLOG(class_linker) << "OatFileAssistant::Lock: " << error_msg;
}
-}
-
-// Multidex files make it possible that some, but not all, dex files can be broken/outdated. This
-// complicates the loading process, as we should not use an iterative loading process, because that
-// would register the oat file and dex files that come before the broken one. Instead, check all
-// multidex ahead of time.
-bool ClassLinker::OpenDexFilesFromOat(const char* dex_location, const char* oat_location,
- std::vector<std::string>* error_msgs,
- std::vector<std::unique_ptr<const DexFile>>* dex_files) {
- // 1) Check whether we have an open oat file.
- // This requires a dex checksum, use the "primary" one.
- uint32_t dex_location_checksum;
- uint32_t* dex_location_checksum_pointer = &dex_location_checksum;
- bool have_checksum = true;
- std::string checksum_error_msg;
- if (!DexFile::GetChecksum(dex_location, dex_location_checksum_pointer, &checksum_error_msg)) {
- // This happens for pre-opted files since the corresponding dex files are no longer on disk.
- dex_location_checksum_pointer = nullptr;
- have_checksum = false;
- }
-
- bool needs_registering = false;
-
- const OatFile::OatDexFile* oat_dex_file = FindOpenedOatDexFile(oat_location, dex_location,
- dex_location_checksum_pointer);
- std::unique_ptr<const OatFile> open_oat_file(
- oat_dex_file != nullptr ? oat_dex_file->GetOatFile() : nullptr);
-
- // 2) If we do not have an open one, maybe there's one on disk already.
-
- // In case the oat file is not open, we play a locking game here so
- // that if two different processes race to load and register or generate
- // (or worse, one tries to open a partial generated file) we will be okay.
- // This is actually common with apps that use DexClassLoader to work
- // around the dex method reference limit and that have a background
- // service running in a separate process.
- ScopedFlock scoped_flock;
-
- if (open_oat_file.get() == nullptr) {
- if (oat_location != nullptr) {
- // Can only do this if we have a checksum, else error.
- if (!have_checksum) {
- error_msgs->push_back(checksum_error_msg);
- return false;
- }
- std::string error_msg;
-
- // We are loading or creating one in the future. Time to set up the file lock.
- if (!scoped_flock.Init(oat_location, &error_msg)) {
- error_msgs->push_back(error_msg);
- return false;
- }
-
- // TODO Caller specifically asks for this oat_location. We should honor it. Probably?
- open_oat_file.reset(FindOatFileInOatLocationForDexFile(dex_location, dex_location_checksum,
- oat_location, &error_msg));
-
- if (open_oat_file.get() == nullptr) {
- std::string compound_msg = StringPrintf("Failed to find dex file '%s' in oat location '%s': %s",
- dex_location, oat_location, error_msg.c_str());
- VLOG(class_linker) << compound_msg;
- error_msgs->push_back(compound_msg);
- }
- } else {
- // TODO: What to lock here?
- bool obsolete_file_cleanup_failed;
- open_oat_file.reset(FindOatFileContainingDexFileFromDexLocation(dex_location,
- dex_location_checksum_pointer,
- kRuntimeISA, error_msgs,
- &obsolete_file_cleanup_failed));
- // There's no point in going forward and eventually try to regenerate the
- // file if we couldn't remove the obsolete one. Mostly likely we will fail
- // with the same error when trying to write the new file.
- // TODO: should we maybe do this only when we get permission issues? (i.e. EACCESS).
- if (obsolete_file_cleanup_failed) {
- return false;
+ // Check if we already have an up-to-date oat file open.
+ const OatFile* source_oat_file = nullptr;
+ {
+ ReaderMutexLock mu(Thread::Current(), dex_lock_);
+ for (const OatFile* oat_file : oat_files_) {
+ CHECK(oat_file != nullptr);
+ if (oat_file_assistant.GivenOatFileIsUpToDate(*oat_file)) {
+ source_oat_file = oat_file;
+ break;
}
}
- needs_registering = true;
}
- // 3) If we have an oat file, check all contained multidex files for our dex_location.
- // Note: LoadMultiDexFilesFromOatFile will check for nullptr in the first argument.
- bool success = LoadMultiDexFilesFromOatFile(open_oat_file.get(), dex_location,
- dex_location_checksum_pointer,
- false, error_msgs, dex_files);
- if (success) {
- const OatFile* oat_file = open_oat_file.release(); // Avoid deleting it.
- if (needs_registering) {
- // We opened the oat file, so we must register it.
- RegisterOatFile(oat_file);
+ // If we didn't have an up-to-date oat file open, try to load one from disk.
+ if (source_oat_file == nullptr) {
+ // Update the oat file on disk if we can. This may fail, but that's okay.
+ // Best effort is all that matters here.
+ if (!oat_file_assistant.MakeUpToDate(&error_msg)) {
+ LOG(WARNING) << error_msg;
}
- // If the file isn't executable we failed patchoat but did manage to get the dex files.
- return oat_file->IsExecutable();
- } else {
- if (needs_registering) {
- // We opened it, delete it.
- open_oat_file.reset();
- } else {
- open_oat_file.release(); // Do not delete open oat files.
- }
- }
-
- // 4) If it's not the case (either no oat file or mismatches), regenerate and load.
- // Need a checksum, fail else.
- if (!have_checksum) {
- error_msgs->push_back(checksum_error_msg);
- return false;
+ // Get the oat file on disk.
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ if (oat_file.get() != nullptr) {
+ source_oat_file = oat_file.release();
+ RegisterOatFile(source_oat_file);
+ }
}
- // Look in cache location if no oat_location is given.
- std::string cache_location;
- if (oat_location == nullptr) {
- // Use the dalvik cache.
- const std::string dalvik_cache(GetDalvikCacheOrDie(GetInstructionSetString(kRuntimeISA)));
- cache_location = GetDalvikCacheFilenameOrDie(dex_location, dalvik_cache.c_str());
- oat_location = cache_location.c_str();
- }
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
- bool has_flock = true;
- // Definitely need to lock now.
- if (!scoped_flock.HasFile()) {
- std::string error_msg;
- if (!scoped_flock.Init(oat_location, &error_msg)) {
- error_msgs->push_back(error_msg);
- has_flock = false;
+ // Load the dex files from the oat file.
+ if (source_oat_file != nullptr) {
+ dex_files = oat_file_assistant.LoadDexFiles(*source_oat_file, dex_location);
+ if (dex_files.empty()) {
+ error_msgs->push_back("Failed to open dex files from "
+ + source_oat_file->GetLocation());
}
}
- if (Runtime::Current()->IsDex2OatEnabled() && has_flock && scoped_flock.HasFile()) {
- // Create the oat file.
- open_oat_file.reset(CreateOatFileForDexLocation(dex_location, scoped_flock.GetFile()->Fd(),
- oat_location, error_msgs));
- }
-
- // Failed, bail.
- if (open_oat_file.get() == nullptr) {
- // dex2oat was disabled or crashed. Add the dex file in the list of dex_files to make progress.
+ // Fall back to running out of the original dex file if we couldn't load any
+ // dex_files from the oat file.
+ if (dex_files.empty()) {
if (Runtime::Current()->IsDexFileFallbackEnabled()) {
- std::string error_msg;
- if (!DexFile::Open(dex_location, dex_location, &error_msg, dex_files)) {
- error_msgs->push_back(error_msg);
+ if (!DexFile::Open(dex_location, dex_location, &error_msg, &dex_files)) {
+ LOG(WARNING) << error_msg;
+ error_msgs->push_back("Failed to open dex files from "
+ + std::string(dex_location));
}
} else {
error_msgs->push_back("Fallback mode disabled, skipping dex files.");
}
- return false;
- }
-
- // Try to load again, but stronger checks.
- success = LoadMultiDexFilesFromOatFile(open_oat_file.get(), dex_location,
- dex_location_checksum_pointer,
- true, error_msgs, dex_files);
- if (success) {
- RegisterOatFile(open_oat_file.release());
- return true;
- } else {
- return false;
- }
-}
-
-const OatFile* ClassLinker::FindOatFileInOatLocationForDexFile(const char* dex_location,
- uint32_t dex_location_checksum,
- const char* oat_location,
- std::string* error_msg) {
- std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsAotCompiler(), error_msg));
- if (oat_file.get() == nullptr) {
- *error_msg = StringPrintf("Failed to find existing oat file at %s: %s", oat_location,
- error_msg->c_str());
- return nullptr;
- }
- Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- if (image_space != nullptr) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- uint32_t expected_image_oat_checksum = image_header.GetOatChecksum();
- uint32_t actual_image_oat_checksum = oat_file->GetOatHeader().GetImageFileLocationOatChecksum();
- if (expected_image_oat_checksum != actual_image_oat_checksum) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' with expected image oat checksum of "
- "0x%x, found 0x%x", oat_location, expected_image_oat_checksum,
- actual_image_oat_checksum);
- return nullptr;
- }
-
- uintptr_t expected_image_oat_offset = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
- uint32_t actual_image_oat_offset = oat_file->GetOatHeader().GetImageFileLocationOatDataBegin();
- if (expected_image_oat_offset != actual_image_oat_offset) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' with expected image oat offset %"
- PRIuPTR ", found %ud", oat_location, expected_image_oat_offset,
- actual_image_oat_offset);
- return nullptr;
- }
- int32_t expected_patch_delta = image_header.GetPatchDelta();
- int32_t actual_patch_delta = oat_file->GetOatHeader().GetImagePatchDelta();
- if (expected_patch_delta != actual_patch_delta) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' with expected patch delta %d, "
- " found %d", oat_location, expected_patch_delta, actual_patch_delta);
- return nullptr;
- }
- }
-
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
- &dex_location_checksum);
- if (oat_dex_file == nullptr) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' containing '%s'", oat_location,
- dex_location);
- return nullptr;
- }
- uint32_t expected_dex_checksum = dex_location_checksum;
- uint32_t actual_dex_checksum = oat_dex_file->GetDexFileLocationChecksum();
- if (expected_dex_checksum != actual_dex_checksum) {
- *error_msg = StringPrintf("Failed to find oat file at '%s' with expected dex checksum of 0x%x, "
- "found 0x%x", oat_location, expected_dex_checksum,
- actual_dex_checksum);
- return nullptr;
- }
- std::unique_ptr<const DexFile> dex_file(oat_dex_file->OpenDexFile(error_msg));
- if (dex_file.get() != nullptr) {
- return oat_file.release();
- } else {
- return nullptr;
- }
-}
-
-const OatFile* ClassLinker::CreateOatFileForDexLocation(const char* dex_location,
- int fd, const char* oat_location,
- std::vector<std::string>* error_msgs) {
- // Generate the output oat file for the dex file
- VLOG(class_linker) << "Generating oat file " << oat_location << " for " << dex_location;
- std::string error_msg;
- if (!GenerateOatFile(dex_location, fd, oat_location, &error_msg)) {
- CHECK(!error_msg.empty());
- error_msgs->push_back(error_msg);
- return nullptr;
- }
- std::unique_ptr<OatFile> oat_file(OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsAotCompiler(),
- &error_msg));
- if (oat_file.get() == nullptr) {
- std::string compound_msg = StringPrintf("\nFailed to open generated oat file '%s': %s",
- oat_location, error_msg.c_str());
- error_msgs->push_back(compound_msg);
- return nullptr;
- }
-
- return oat_file.release();
-}
-
-bool ClassLinker::VerifyOatImageChecksum(const OatFile* oat_file,
- const InstructionSet instruction_set) {
- Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- if (image_space == nullptr) {
- return false;
- }
- uint32_t image_oat_checksum = 0;
- if (instruction_set == kRuntimeISA) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- image_oat_checksum = image_header.GetOatChecksum();
- } else {
- std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
- image_space->GetImageLocation().c_str(), instruction_set));
- image_oat_checksum = image_header->GetOatChecksum();
- }
- return oat_file->GetOatHeader().GetImageFileLocationOatChecksum() == image_oat_checksum;
-}
-
-bool ClassLinker::VerifyOatChecksums(const OatFile* oat_file,
- const InstructionSet instruction_set,
- std::string* error_msg) {
- Runtime* runtime = Runtime::Current();
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- if (image_space == nullptr) {
- *error_msg = "No image space for verification against";
- return false;
- }
-
- // If the requested instruction set is the same as the current runtime,
- // we can use the checksums directly. If it isn't, we'll have to read the
- // image header from the image for the right instruction set.
- uint32_t image_oat_checksum = 0;
- uintptr_t image_oat_data_begin = 0;
- int32_t image_patch_delta = 0;
- if (instruction_set == runtime->GetInstructionSet()) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- image_oat_checksum = image_header.GetOatChecksum();
- image_oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
- image_patch_delta = image_header.GetPatchDelta();
- } else {
- std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
- image_space->GetImageLocation().c_str(), instruction_set));
- image_oat_checksum = image_header->GetOatChecksum();
- image_oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
- image_patch_delta = image_header->GetPatchDelta();
- }
- const OatHeader& oat_header = oat_file->GetOatHeader();
- bool ret = (oat_header.GetImageFileLocationOatChecksum() == image_oat_checksum);
-
- // If the oat file is PIC, it doesn't care if/how image was relocated. Ignore these checks.
- if (!oat_file->IsPic()) {
- ret = ret && (oat_header.GetImagePatchDelta() == image_patch_delta)
- && (oat_header.GetImageFileLocationOatDataBegin() == image_oat_data_begin);
- }
- if (!ret) {
- *error_msg = StringPrintf("oat file '%s' mismatch (0x%x, %d, %d) with (0x%x, %" PRIdPTR ", %d)",
- oat_file->GetLocation().c_str(),
- oat_file->GetOatHeader().GetImageFileLocationOatChecksum(),
- oat_file->GetOatHeader().GetImageFileLocationOatDataBegin(),
- oat_file->GetOatHeader().GetImagePatchDelta(),
- image_oat_checksum, image_oat_data_begin, image_patch_delta);
- }
- return ret;
-}
-
-bool ClassLinker::VerifyOatAndDexFileChecksums(const OatFile* oat_file,
- const char* dex_location,
- uint32_t dex_location_checksum,
- const InstructionSet instruction_set,
- std::string* error_msg) {
- if (!VerifyOatChecksums(oat_file, instruction_set, error_msg)) {
- return false;
- }
-
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location,
- &dex_location_checksum);
- if (oat_dex_file == nullptr) {
- *error_msg = StringPrintf("oat file '%s' does not contain contents for '%s' with checksum 0x%x",
- oat_file->GetLocation().c_str(), dex_location, dex_location_checksum);
- for (const OatFile::OatDexFile* oat_dex_file_in : oat_file->GetOatDexFiles()) {
- *error_msg += StringPrintf("\noat file '%s' contains contents for '%s' with checksum 0x%x",
- oat_file->GetLocation().c_str(),
- oat_dex_file_in->GetDexFileLocation().c_str(),
- oat_dex_file_in->GetDexFileLocationChecksum());
- }
- return false;
- }
-
- DCHECK_EQ(dex_location_checksum, oat_dex_file->GetDexFileLocationChecksum());
- return true;
-}
-
-bool ClassLinker::VerifyOatWithDexFile(const OatFile* oat_file,
- const char* dex_location,
- const uint32_t* dex_location_checksum,
- std::string* error_msg) {
- CHECK(oat_file != nullptr);
- CHECK(dex_location != nullptr);
- std::unique_ptr<const DexFile> dex_file;
- if (dex_location_checksum == nullptr) {
- // If no classes.dex found in dex_location, it has been stripped or is corrupt, assume oat is
- // up-to-date. This is the common case in user builds for jar's and apk's in the /system
- // directory.
- const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_location, nullptr);
- if (oat_dex_file == nullptr) {
- *error_msg = StringPrintf("Dex checksum mismatch for location '%s' and failed to find oat "
- "dex file '%s': %s", oat_file->GetLocation().c_str(), dex_location,
- error_msg->c_str());
- return false;
- }
- dex_file = oat_dex_file->OpenDexFile(error_msg);
- } else {
- bool verified = VerifyOatAndDexFileChecksums(oat_file, dex_location, *dex_location_checksum,
- kRuntimeISA, error_msg);
- if (!verified) {
- return false;
- }
- dex_file = oat_file->GetOatDexFile(dex_location,
- dex_location_checksum)->OpenDexFile(error_msg);
- }
- return dex_file.get() != nullptr;
-}
-
-const OatFile* ClassLinker::FindOatFileContainingDexFileFromDexLocation(
- const char* dex_location,
- const uint32_t* dex_location_checksum,
- InstructionSet isa,
- std::vector<std::string>* error_msgs,
- bool* obsolete_file_cleanup_failed) {
- *obsolete_file_cleanup_failed = false;
- bool already_opened = false;
- std::string dex_location_str(dex_location);
- std::unique_ptr<const OatFile> oat_file(OpenOatFileFromDexLocation(dex_location_str, isa,
- &already_opened,
- obsolete_file_cleanup_failed,
- error_msgs));
- std::string error_msg;
- if (oat_file.get() == nullptr) {
- error_msgs->push_back(StringPrintf("Failed to open oat file from dex location '%s'",
- dex_location));
- return nullptr;
- } else if (oat_file->IsExecutable() &&
- !VerifyOatWithDexFile(oat_file.get(), dex_location,
- dex_location_checksum, &error_msg)) {
- error_msgs->push_back(StringPrintf("Failed to verify oat file '%s' found for dex location "
- "'%s': %s", oat_file->GetLocation().c_str(), dex_location,
- error_msg.c_str()));
- return nullptr;
- } else if (!oat_file->IsExecutable() &&
- Runtime::Current()->GetHeap()->HasImageSpace() &&
- !VerifyOatImageChecksum(oat_file.get(), isa)) {
- error_msgs->push_back(StringPrintf("Failed to verify non-executable oat file '%s' found for "
- "dex location '%s'. Image checksum incorrect.",
- oat_file->GetLocation().c_str(), dex_location));
- return nullptr;
- } else {
- return oat_file.release();
}
+ return dex_files;
}
const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string& oat_location) {
@@ -1294,335 +797,6 @@ const OatFile* ClassLinker::FindOpenedOatFileFromOatLocation(const std::string&
return nullptr;
}
-const OatFile* ClassLinker::OpenOatFileFromDexLocation(const std::string& dex_location,
- InstructionSet isa,
- bool *already_opened,
- bool *obsolete_file_cleanup_failed,
- std::vector<std::string>* error_msgs) {
- // Find out if we've already opened the file
- const OatFile* ret = nullptr;
- std::string odex_filename(DexFilenameToOdexFilename(dex_location, isa));
- ret = FindOpenedOatFileFromOatLocation(odex_filename);
- if (ret != nullptr) {
- *already_opened = true;
- return ret;
- }
-
- std::string dalvik_cache;
- bool have_android_data = false;
- bool have_dalvik_cache = false;
- bool is_global_cache = false;
- GetDalvikCache(GetInstructionSetString(kRuntimeISA), false, &dalvik_cache,
- &have_android_data, &have_dalvik_cache, &is_global_cache);
- std::string cache_filename;
- if (have_dalvik_cache) {
- cache_filename = GetDalvikCacheFilenameOrDie(dex_location.c_str(), dalvik_cache.c_str());
- ret = FindOpenedOatFileFromOatLocation(cache_filename);
- if (ret != nullptr) {
- *already_opened = true;
- return ret;
- }
- } else {
- // If we need to relocate we should just place odex back where it started.
- cache_filename = odex_filename;
- }
-
- ret = nullptr;
-
- // We know that neither the odex nor the cache'd version is already in use, if it even exists.
- //
- // Now we do the following:
- // 1) Try and open the odex version
- // 2) If present, checksum-verified & relocated correctly return it
- // 3) Close the odex version to free up its address space.
- // 4) Try and open the cache version
- // 5) If present, checksum-verified & relocated correctly return it
- // 6) Close the cache version to free up its address space.
- // 7) If we should relocate:
- // a) If we have opened and checksum-verified the odex version relocate it to
- // 'cache_filename' and return it
- // b) If we have opened and checksum-verified the cache version relocate it in place and return
- // it. This should not happen often (I think only the run-test's will hit this case).
- // 8) If the cache-version was present we should delete it since it must be obsolete if we get to
- // this point.
- // 9) Return nullptr
-
- *already_opened = false;
- const Runtime* runtime = Runtime::Current();
- CHECK(runtime != nullptr);
- bool executable = !runtime->IsAotCompiler();
-
- std::string odex_error_msg;
- bool should_patch_system = false;
- bool odex_checksum_verified = false;
- bool have_system_odex = false;
- {
- // There is a high probability that both these oat files map similar/the same address
- // spaces so we must scope them like this so they each gets its turn.
- std::unique_ptr<OatFile> odex_oat_file(OatFile::Open(odex_filename, odex_filename, nullptr,
- nullptr,
- executable, &odex_error_msg));
- if (odex_oat_file.get() != nullptr && CheckOatFile(runtime, odex_oat_file.get(), isa,
- &odex_checksum_verified,
- &odex_error_msg)) {
- return odex_oat_file.release();
- } else {
- if (odex_checksum_verified) {
- // We can just relocate
- should_patch_system = true;
- odex_error_msg = "Image Patches are incorrect";
- }
- if (odex_oat_file.get() != nullptr) {
- have_system_odex = true;
- }
- }
- }
-
- std::string cache_error_msg;
- bool should_patch_cache = false;
- bool cache_checksum_verified = false;
- if (have_dalvik_cache) {
- std::unique_ptr<OatFile> cache_oat_file(OatFile::Open(cache_filename, cache_filename, nullptr,
- nullptr,
- executable, &cache_error_msg));
- if (cache_oat_file.get() != nullptr && CheckOatFile(runtime, cache_oat_file.get(), isa,
- &cache_checksum_verified,
- &cache_error_msg)) {
- return cache_oat_file.release();
- } else if (cache_checksum_verified) {
- // We can just relocate
- should_patch_cache = true;
- cache_error_msg = "Image Patches are incorrect";
- }
- } else if (have_android_data) {
- // dalvik_cache does not exist but android data does. This means we should be able to create
- // it, so we should try.
- GetDalvikCacheOrDie(GetInstructionSetString(kRuntimeISA), true);
- }
-
- ret = nullptr;
- std::string error_msg;
- if (runtime->CanRelocate()) {
- // Run relocation
- gc::space::ImageSpace* space = Runtime::Current()->GetHeap()->GetImageSpace();
- if (space != nullptr) {
- const std::string& image_location = space->GetImageLocation();
- if (odex_checksum_verified && should_patch_system) {
- ret = PatchAndRetrieveOat(odex_filename, cache_filename, image_location, isa, &error_msg);
- } else if (cache_checksum_verified && should_patch_cache) {
- CHECK(have_dalvik_cache);
- ret = PatchAndRetrieveOat(cache_filename, cache_filename, image_location, isa, &error_msg);
- }
- } else if (have_system_odex) {
- ret = GetInterpretedOnlyOat(odex_filename, isa, &error_msg);
- }
- }
- if (ret == nullptr && have_dalvik_cache && OS::FileExists(cache_filename.c_str())) {
- // implicitly: were able to fine where the cached version is but we were unable to use it,
- // either as a destination for relocation or to open a file. We should delete it if it is
- // there.
- if (TEMP_FAILURE_RETRY(unlink(cache_filename.c_str())) != 0) {
- std::string rm_error_msg = StringPrintf("Failed to remove obsolete file from %s when "
- "searching for dex file %s: %s",
- cache_filename.c_str(), dex_location.c_str(),
- strerror(errno));
- error_msgs->push_back(rm_error_msg);
- VLOG(class_linker) << rm_error_msg;
- // Let the caller know that we couldn't remove the obsolete file.
- // This is a good indication that further writes may fail as well.
- *obsolete_file_cleanup_failed = true;
- }
- }
- if (ret == nullptr) {
- VLOG(class_linker) << error_msg;
- error_msgs->push_back(error_msg);
- std::string relocation_msg;
- if (runtime->CanRelocate()) {
- relocation_msg = StringPrintf(" and relocation failed");
- }
- if (have_dalvik_cache && cache_checksum_verified) {
- error_msg = StringPrintf("Failed to open oat file from %s (error %s) or %s "
- "(error %s)%s.", odex_filename.c_str(), odex_error_msg.c_str(),
- cache_filename.c_str(), cache_error_msg.c_str(),
- relocation_msg.c_str());
- } else {
- error_msg = StringPrintf("Failed to open oat file from %s (error %s) (no "
- "dalvik_cache availible)%s.", odex_filename.c_str(),
- odex_error_msg.c_str(), relocation_msg.c_str());
- }
- VLOG(class_linker) << error_msg;
- error_msgs->push_back(error_msg);
- }
- return ret;
-}
-
-const OatFile* ClassLinker::GetInterpretedOnlyOat(const std::string& oat_path,
- InstructionSet isa,
- std::string* error_msg) {
- // We open it non-executable
- std::unique_ptr<OatFile> output(OatFile::Open(oat_path, oat_path, nullptr, nullptr, false, error_msg));
- if (output.get() == nullptr) {
- return nullptr;
- }
- if (!Runtime::Current()->GetHeap()->HasImageSpace() ||
- VerifyOatImageChecksum(output.get(), isa)) {
- return output.release();
- } else {
- *error_msg = StringPrintf("Could not use oat file '%s', image checksum failed to verify.",
- oat_path.c_str());
- return nullptr;
- }
-}
-
-const OatFile* ClassLinker::PatchAndRetrieveOat(const std::string& input_oat,
- const std::string& output_oat,
- const std::string& image_location,
- InstructionSet isa,
- std::string* error_msg) {
- Runtime* runtime = Runtime::Current();
- DCHECK(runtime != nullptr);
- if (!runtime->GetHeap()->HasImageSpace()) {
- // We don't have an image space so there is no point in trying to patchoat.
- LOG(WARNING) << "Patching of oat file '" << input_oat << "' not attempted because we are "
- << "running without an image. Attempting to use oat file for interpretation.";
- return GetInterpretedOnlyOat(input_oat, isa, error_msg);
- }
- if (!runtime->IsDex2OatEnabled()) {
- // We don't have dex2oat so we can assume we don't have patchoat either. We should just use the
- // input_oat but make sure we only do interpretation on it's dex files.
- LOG(WARNING) << "Patching of oat file '" << input_oat << "' not attempted due to dex2oat being "
- << "disabled. Attempting to use oat file for interpretation";
- return GetInterpretedOnlyOat(input_oat, isa, error_msg);
- }
- Locks::mutator_lock_->AssertNotHeld(Thread::Current()); // Avoid starving GC.
- std::string patchoat(runtime->GetPatchoatExecutable());
-
- std::string isa_arg("--instruction-set=");
- isa_arg += GetInstructionSetString(isa);
- std::string input_oat_filename_arg("--input-oat-file=");
- input_oat_filename_arg += input_oat;
- std::string output_oat_filename_arg("--output-oat-file=");
- output_oat_filename_arg += output_oat;
- std::string patched_image_arg("--patched-image-location=");
- patched_image_arg += image_location;
-
- std::vector<std::string> argv;
- argv.push_back(patchoat);
- argv.push_back(isa_arg);
- argv.push_back(input_oat_filename_arg);
- argv.push_back(output_oat_filename_arg);
- argv.push_back(patched_image_arg);
-
- std::string command_line(Join(argv, ' '));
- LOG(INFO) << "Relocate Oat File: " << command_line;
- bool success = Exec(argv, error_msg);
- if (success) {
- std::unique_ptr<OatFile> output(OatFile::Open(output_oat, output_oat, nullptr, nullptr,
- !runtime->IsAotCompiler(), error_msg));
- bool checksum_verified = false;
- if (output.get() != nullptr && CheckOatFile(runtime, output.get(), isa, &checksum_verified,
- error_msg)) {
- return output.release();
- } else if (output.get() != nullptr) {
- *error_msg = StringPrintf("Patching of oat file '%s' succeeded "
- "but output file '%s' failed verifcation: %s",
- input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
- } else {
- *error_msg = StringPrintf("Patching of oat file '%s' succeeded "
- "but was unable to open output file '%s': %s",
- input_oat.c_str(), output_oat.c_str(), error_msg->c_str());
- }
- } else if (!runtime->IsAotCompiler()) {
- // patchoat failed which means we probably don't have enough room to place the output oat file,
- // instead of failing we should just run the interpreter from the dex files in the input oat.
- LOG(WARNING) << "Patching of oat file '" << input_oat << "' failed. Attempting to use oat file "
- << "for interpretation. patchoat failure was: " << *error_msg;
- return GetInterpretedOnlyOat(input_oat, isa, error_msg);
- } else {
- *error_msg = StringPrintf("Patching of oat file '%s to '%s' "
- "failed: %s", input_oat.c_str(), output_oat.c_str(),
- error_msg->c_str());
- }
- return nullptr;
-}
-
-bool ClassLinker::CheckOatFile(const Runtime* runtime, const OatFile* oat_file, InstructionSet isa,
- bool* checksum_verified,
- std::string* error_msg) {
- const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
- if (image_space == nullptr) {
- *error_msg = "No image space present";
- return false;
- }
- uint32_t real_image_checksum;
- void* real_image_oat_offset;
- int32_t real_patch_delta;
- if (isa == runtime->GetInstructionSet()) {
- const ImageHeader& image_header = image_space->GetImageHeader();
- real_image_checksum = image_header.GetOatChecksum();
- real_image_oat_offset = image_header.GetOatDataBegin();
- real_patch_delta = image_header.GetPatchDelta();
- } else {
- std::unique_ptr<ImageHeader> image_header(gc::space::ImageSpace::ReadImageHeaderOrDie(
- image_space->GetImageLocation().c_str(), isa));
- real_image_checksum = image_header->GetOatChecksum();
- real_image_oat_offset = image_header->GetOatDataBegin();
- real_patch_delta = image_header->GetPatchDelta();
- }
-
- const OatHeader& oat_header = oat_file->GetOatHeader();
- std::string compound_msg;
-
- uint32_t oat_image_checksum = oat_header.GetImageFileLocationOatChecksum();
- *checksum_verified = oat_image_checksum == real_image_checksum;
- if (!*checksum_verified) {
- StringAppendF(&compound_msg, " Oat Image Checksum Incorrect (expected 0x%x, received 0x%x)",
- real_image_checksum, oat_image_checksum);
- }
-
- bool offset_verified;
- bool patch_delta_verified;
-
- if (!oat_file->IsPic()) {
- // If an oat file is not PIC, we need to check that the image is at the expected location and
- // patched in the same way.
- void* oat_image_oat_offset =
- reinterpret_cast<void*>(oat_header.GetImageFileLocationOatDataBegin());
- offset_verified = oat_image_oat_offset == real_image_oat_offset;
- if (!offset_verified) {
- StringAppendF(&compound_msg, " Oat Image oat offset incorrect (expected 0x%p, received 0x%p)",
- real_image_oat_offset, oat_image_oat_offset);
- }
-
- int32_t oat_patch_delta = oat_header.GetImagePatchDelta();
- patch_delta_verified = oat_patch_delta == real_patch_delta;
- if (!patch_delta_verified) {
- StringAppendF(&compound_msg, " Oat image patch delta incorrect (expected 0x%x, "
- "received 0x%x)", real_patch_delta, oat_patch_delta);
- }
- } else {
- // If an oat file is PIC, we ignore offset and patching delta.
- offset_verified = true;
- patch_delta_verified = true;
- }
-
- bool ret = (*checksum_verified && offset_verified && patch_delta_verified);
- if (!ret) {
- *error_msg = "Oat file failed to verify:" + compound_msg;
- }
- return ret;
-}
-
-const OatFile* ClassLinker::FindOatFileFromOatLocation(const std::string& oat_location,
- std::string* error_msg) {
- const OatFile* oat_file = FindOpenedOatFileFromOatLocation(oat_location);
- if (oat_file != nullptr) {
- return oat_file;
- }
- return OatFile::Open(oat_location, oat_location, nullptr, nullptr,
- !Runtime::Current()->IsAotCompiler(), error_msg);
-}
-
void ClassLinker::InitFromImageInterpretOnlyCallback(mirror::Object* obj, void* arg) {
ClassLinker* class_linker = reinterpret_cast<ClassLinker*>(arg);
DCHECK(obj != nullptr);
@@ -2195,7 +1369,7 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
// expected and will be wrapped in a ClassNotFoundException. Use the pre-allocated error to
// trigger the chaining with a proper stack trace.
mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
- self->SetException(ThrowLocation(), pre_allocated);
+ self->SetException(pre_allocated);
return nullptr;
}
} else if (Runtime::Current()->UseCompileTimeClassPath()) {
@@ -2227,7 +1401,7 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
} else {
// Use the pre-allocated NCDFE at compile time to avoid wasting time constructing exceptions.
mirror::Throwable* pre_allocated = Runtime::Current()->GetPreAllocatedNoClassDefFoundError();
- self->SetException(ThrowLocation(), pre_allocated);
+ self->SetException(pre_allocated);
return nullptr;
}
} else {
@@ -2259,8 +1433,8 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
return nullptr;
} else if (result.get() == nullptr) {
// broken loader - throw NPE to be compatible with Dalvik
- ThrowNullPointerException(nullptr, StringPrintf("ClassLoader.loadClass returned null for %s",
- class_name_string.c_str()).c_str());
+ ThrowNullPointerException(StringPrintf("ClassLoader.loadClass returned null for %s",
+ class_name_string.c_str()).c_str());
return nullptr;
} else {
// success, return mirror::Class*
@@ -3529,13 +2703,13 @@ void ClassLinker::VerifyClass(Thread* self, Handle<mirror::Class> klass) {
PrettyDescriptor(klass.Get()).c_str(),
PrettyDescriptor(super.Get()).c_str()));
LOG(WARNING) << error_msg << " in " << klass->GetDexCache()->GetLocation()->ToModifiedUtf8();
- Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
+ Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException()));
if (cause.Get() != nullptr) {
self->ClearException();
}
ThrowVerifyError(klass.Get(), "%s", error_msg.c_str());
if (cause.Get() != nullptr) {
- self->GetException(nullptr)->SetCause(cause.Get());
+ self->GetException()->SetCause(cause.Get());
}
ClassReference ref(klass->GetDexCache()->GetDexFile(), klass->GetDexClassDefIndex());
if (Runtime::Current()->IsAotCompiler()) {
@@ -4168,7 +3342,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass,
<< PrettyDescriptor(handle_scope_super.Get())
<< " that has unexpected status " << handle_scope_super->GetStatus()
<< "\nPending exception:\n"
- << (self->GetException(nullptr) != nullptr ? self->GetException(nullptr)->Dump() : "");
+ << (self->GetException() != nullptr ? self->GetException()->Dump() : "");
ObjectLock<mirror::Class> lock(self, klass);
// Initialization failed because the super-class is erroneous.
klass->SetStatus(mirror::Class::kStatusError, self);
@@ -5671,12 +4845,12 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i
<< "Expected pending exception for failed resolution of: " << descriptor;
// Convert a ClassNotFoundException to a NoClassDefFoundError.
StackHandleScope<1> hs(self);
- Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException(nullptr)));
+ Handle<mirror::Throwable> cause(hs.NewHandle(self->GetException()));
if (cause->InstanceOf(GetClassRoot(kJavaLangClassNotFoundException))) {
DCHECK(resolved == nullptr); // No Handle needed to preserve resolved.
self->ClearException();
ThrowNoClassDefFoundError("Failed resolution of: %s", descriptor);
- self->GetException(nullptr)->SetCause(cause.Get());
+ self->GetException()->SetCause(cause.Get());
}
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 6570c5f02a..75fbdf3f59 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -313,33 +313,25 @@ class ClassLinker {
LOCKS_EXCLUDED(dex_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Generate an oat file from a dex file
- bool GenerateOatFile(const char* dex_filename,
- int oat_fd,
- const char* oat_cache_filename,
- std::string* error_msg)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
-
- // Find or create the oat file holding dex_location. Then load all corresponding dex files
- // (if multidex) into the given vector.
- bool OpenDexFilesFromOat(const char* dex_location, const char* oat_location,
- std::vector<std::string>* error_msgs,
- std::vector<std::unique_ptr<const DexFile>>* dex_files)
+ // Finds or creates the oat file holding dex_location. Then loads and returns
+ // all corresponding dex files (there may be more than one dex file loaded
+ // in the case of multidex).
+ // This may return the original, unquickened dex files if the oat file could
+ // not be generated.
+ //
+ // Returns an empty vector if the dex files could not be loaded. In this
+ // case, there will be at least one error message returned describing why no
+ // dex files could not be loaded. The 'error_msgs' argument must not be
+ // null, regardless of whether there is an error or not.
+ //
+ // This method should not be called with the mutator_lock_ held, because it
+ // could end up starving GC if we need to generate or relocate any oat
+ // files.
+ std::vector<std::unique_ptr<const DexFile>> OpenDexFilesFromOat(
+ const char* dex_location, const char* oat_location,
+ std::vector<std::string>* error_msgs)
LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
- // Returns true if the given oat file has the same image checksum as the image it is paired with.
- static bool VerifyOatImageChecksum(const OatFile* oat_file, const InstructionSet instruction_set);
- // Returns true if the oat file checksums match with the image and the offsets are such that it
- // could be loaded with it.
- static bool VerifyOatChecksums(const OatFile* oat_file, const InstructionSet instruction_set,
- std::string* error_msg);
- // Returns true if oat file contains the dex file with the given location and checksum.
- static bool VerifyOatAndDexFileChecksums(const OatFile* oat_file,
- const char* dex_location,
- uint32_t dex_location_checksum,
- InstructionSet instruction_set,
- std::string* error_msg);
-
// Allocate an instance of a java.lang.Object.
mirror::Object* AllocObject(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -612,73 +604,9 @@ class ClassLinker {
const uint32_t* dex_location_checksum)
LOCKS_EXCLUDED(dex_lock_);
- // Will open the oat file directly without relocating, even if we could/should do relocation.
- const OatFile* FindOatFileFromOatLocation(const std::string& oat_location,
- std::string* error_msg)
- LOCKS_EXCLUDED(dex_lock_);
-
const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location)
LOCKS_EXCLUDED(dex_lock_);
- const OatFile* OpenOatFileFromDexLocation(const std::string& dex_location,
- InstructionSet isa,
- bool* already_opened,
- bool* obsolete_file_cleanup_failed,
- std::vector<std::string>* error_msg)
- LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
-
- const OatFile* GetInterpretedOnlyOat(const std::string& oat_path,
- InstructionSet isa,
- std::string* error_msg);
-
- const OatFile* PatchAndRetrieveOat(const std::string& input, const std::string& output,
- const std::string& image_location, InstructionSet isa,
- std::string* error_msg)
- LOCKS_EXCLUDED(Locks::mutator_lock_);
-
- bool CheckOatFile(const Runtime* runtime, const OatFile* oat_file, InstructionSet isa,
- bool* checksum_verified, std::string* error_msg);
-
- // Note: will not register the oat file.
- const OatFile* FindOatFileInOatLocationForDexFile(const char* dex_location,
- uint32_t dex_location_checksum,
- const char* oat_location,
- std::string* error_msg)
- LOCKS_EXCLUDED(dex_lock_);
-
- // Creates the oat file from the dex_location to the oat_location. Needs a file descriptor for
- // the file to be written, which is assumed to be under a lock.
- const OatFile* CreateOatFileForDexLocation(const char* dex_location,
- int fd, const char* oat_location,
- std::vector<std::string>* error_msgs)
- LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
-
- // Finds an OatFile that contains a DexFile for the given a DexFile location.
- //
- // Note 1: this will not check open oat files, which are assumed to be stale when this is run.
- // Note 2: Does not register the oat file. It is the caller's job to register if the file is to
- // be kept.
- const OatFile* FindOatFileContainingDexFileFromDexLocation(const char* dex_location,
- const uint32_t* dex_location_checksum,
- InstructionSet isa,
- std::vector<std::string>* error_msgs,
- bool* obsolete_file_cleanup_failed)
- LOCKS_EXCLUDED(dex_lock_, Locks::mutator_lock_);
-
- // Verifies:
- // - that the oat file contains the dex file (with a matching checksum, which may be null if the
- // file was pre-opted)
- // - the checksums of the oat file (against the image space)
- // - the checksum of the dex file against dex_location_checksum
- // - that the dex file can be opened
- // Returns true iff all verification succeed.
- //
- // The dex_location is the dex location as stored in the oat file header.
- // (see DexFile::GetDexCanonicalLocation for a description of location conventions)
- bool VerifyOatWithDexFile(const OatFile* oat_file, const char* dex_location,
- const uint32_t* dex_location_checksum,
- std::string* error_msg);
-
mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass,
mirror::Class* proxy_class)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -803,7 +731,6 @@ class ClassLinker {
friend class ImageWriter; // for GetClassRoots
friend class ImageDumper; // for FindOpenedOatFileFromOatLocation
- friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation
friend class JniCompilerTest; // for GetRuntimeQuickGenericJniStub
friend class NoDex2OatTest; // for FindOpenedOatFileForDexFile
friend class NoPatchoatTest; // for FindOpenedOatFileForDexFile
diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc
index 64e129c384..1789ab12d2 100644
--- a/runtime/class_linker_test.cc
+++ b/runtime/class_linker_test.cc
@@ -48,7 +48,7 @@ class ClassLinkerTest : public CommonRuntimeTest {
Thread* self = Thread::Current();
EXPECT_TRUE(class_linker_->FindSystemClass(self, descriptor.c_str()) == nullptr);
EXPECT_TRUE(self->IsExceptionPending());
- mirror::Object* exception = self->GetException(nullptr);
+ mirror::Object* exception = self->GetException();
self->ClearException();
mirror::Class* exception_class =
class_linker_->FindSystemClass(self, "Ljava/lang/NoClassDefFoundError;");
diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc
index b7ffd609b7..e0d62d7012 100644
--- a/runtime/common_runtime_test.cc
+++ b/runtime/common_runtime_test.cc
@@ -34,8 +34,10 @@
#include "gc_root-inl.h"
#include "gc/heap.h"
#include "gtest/gtest.h"
+#include "interpreter/unstarted_runtime.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
+#include "mem_map.h"
#include "noop_compiler_callbacks.h"
#include "os.h"
#include "runtime-inl.h"
@@ -107,6 +109,8 @@ void ScratchFile::Unlink() {
CHECK_EQ(0, unlink_result);
}
+static bool unstarted_initialized_ = false;
+
CommonRuntimeTest::CommonRuntimeTest() {}
CommonRuntimeTest::~CommonRuntimeTest() {
// Ensure the dex files are cleaned up before the runtime.
@@ -194,6 +198,7 @@ std::string CommonRuntimeTest::GetCoreOatLocation() {
std::unique_ptr<const DexFile> CommonRuntimeTest::LoadExpectSingleDexFile(const char* location) {
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::string error_msg;
+ MemMap::Init();
if (!DexFile::Open(location, location, &error_msg, &dex_files)) {
LOG(FATAL) << "Could not open .dex file '" << location << "': " << error_msg << "\n";
UNREACHABLE();
@@ -225,13 +230,23 @@ void CommonRuntimeTest::SetUp() {
options.push_back(std::make_pair("compilercallbacks", callbacks_.get()));
SetUpRuntimeOptions(&options);
+ PreRuntimeCreate();
if (!Runtime::Create(options, false)) {
LOG(FATAL) << "Failed to create runtime";
return;
}
+ PostRuntimeCreate();
runtime_.reset(Runtime::Current());
class_linker_ = runtime_->GetClassLinker();
class_linker_->FixupDexCaches(runtime_->GetResolutionMethod());
+
+ // Initialize maps for unstarted runtime. This needs to be here, as running clinits needs this
+ // set up.
+ if (!unstarted_initialized_) {
+ interpreter::UnstartedRuntimeInitialize();
+ unstarted_initialized_ = true;
+ }
+
class_linker_->RunRootClinits();
boot_class_path_ = class_linker_->GetBootClassPath();
java_lang_dex_file_ = boot_class_path_[0];
@@ -248,6 +263,8 @@ void CommonRuntimeTest::SetUp() {
// pool is created by the runtime.
runtime_->GetHeap()->CreateThreadPool();
runtime_->GetHeap()->VerifyHeap(); // Check for heap corruption before the test
+ // Reduce timinig-dependent flakiness in OOME behavior (eg StubTest.AllocObject).
+ runtime_->GetHeap()->SetMinIntervalHomogeneousSpaceCompactionByOom(0U);
// Get the boot class path from the runtime so it can be used in tests.
boot_class_path_ = class_linker_->GetBootClassPath();
@@ -335,7 +352,7 @@ std::string CommonRuntimeTest::GetTestAndroidRoot() {
#define ART_TARGET_NATIVETEST_DIR_STRING ""
#endif
-std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTest::OpenTestDexFiles(const char* name) {
+std::string CommonRuntimeTest::GetTestDexFileName(const char* name) {
CHECK(name != nullptr);
std::string filename;
if (IsHost()) {
@@ -347,6 +364,11 @@ std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTest::OpenTestDexFiles(
filename += "art-gtest-";
filename += name;
filename += ".jar";
+ return filename;
+}
+
+std::vector<std::unique_ptr<const DexFile>> CommonRuntimeTest::OpenTestDexFiles(const char* name) {
+ std::string filename = GetTestDexFileName(name);
std::string error_msg;
std::vector<std::unique_ptr<const DexFile>> dex_files;
bool success = DexFile::Open(filename.c_str(), filename.c_str(), &error_msg, &dex_files);
diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h
index 9efea84743..cce8485a42 100644
--- a/runtime/common_runtime_test.h
+++ b/runtime/common_runtime_test.h
@@ -101,11 +101,19 @@ class CommonRuntimeTest : public testing::Test {
virtual void TearDown();
+ // Called before the runtime is created.
+ virtual void PreRuntimeCreate() {}
+
+ // Called after the runtime is created.
+ virtual void PostRuntimeCreate() {}
+
// Gets the path of the specified dex file for host or target.
static std::string GetDexFileName(const std::string& jar_prefix);
std::string GetTestAndroidRoot();
+ std::string GetTestDexFileName(const char* name);
+
std::vector<std::unique_ptr<const DexFile>> OpenTestDexFiles(const char* name)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc
index f5b435400f..36de221549 100644
--- a/runtime/common_throws.cc
+++ b/runtime/common_throws.cc
@@ -43,7 +43,7 @@ static void AddReferrerLocation(std::ostream& os, mirror::Class* referrer)
}
}
-static void ThrowException(const ThrowLocation* throw_location, const char* exception_descriptor,
+static void ThrowException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = NULL)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
@@ -56,16 +56,10 @@ static void ThrowException(const ThrowLocation* throw_location, const char* exce
}
AddReferrerLocation(msg, referrer);
Thread* self = Thread::Current();
- if (throw_location == NULL) {
- ThrowLocation computed_throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(computed_throw_location, exception_descriptor, msg.str().c_str());
- } else {
- self->ThrowNewException(*throw_location, exception_descriptor, msg.str().c_str());
- }
+ self->ThrowNewException(exception_descriptor, msg.str().c_str());
}
-static void ThrowWrappedException(const ThrowLocation* throw_location,
- const char* exception_descriptor,
+static void ThrowWrappedException(const char* exception_descriptor,
mirror::Class* referrer, const char* fmt, va_list* args = NULL)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
@@ -78,18 +72,13 @@ static void ThrowWrappedException(const ThrowLocation* throw_location,
}
AddReferrerLocation(msg, referrer);
Thread* self = Thread::Current();
- if (throw_location == NULL) {
- ThrowLocation computed_throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewWrappedException(computed_throw_location, exception_descriptor, msg.str().c_str());
- } else {
- self->ThrowNewWrappedException(*throw_location, exception_descriptor, msg.str().c_str());
- }
+ self->ThrowNewWrappedException(exception_descriptor, msg.str().c_str());
}
// AbstractMethodError
void ThrowAbstractMethodError(mirror::ArtMethod* method) {
- ThrowException(NULL, "Ljava/lang/AbstractMethodError;", NULL,
+ ThrowException("Ljava/lang/AbstractMethodError;", NULL,
StringPrintf("abstract method \"%s\"",
PrettyMethod(method).c_str()).c_str());
}
@@ -97,20 +86,20 @@ void ThrowAbstractMethodError(mirror::ArtMethod* method) {
// ArithmeticException
void ThrowArithmeticExceptionDivideByZero() {
- ThrowException(NULL, "Ljava/lang/ArithmeticException;", NULL, "divide by zero");
+ ThrowException("Ljava/lang/ArithmeticException;", NULL, "divide by zero");
}
// ArrayIndexOutOfBoundsException
void ThrowArrayIndexOutOfBoundsException(int index, int length) {
- ThrowException(NULL, "Ljava/lang/ArrayIndexOutOfBoundsException;", NULL,
+ ThrowException("Ljava/lang/ArrayIndexOutOfBoundsException;", NULL,
StringPrintf("length=%d; index=%d", length, index).c_str());
}
// ArrayStoreException
void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array_class) {
- ThrowException(NULL, "Ljava/lang/ArrayStoreException;", NULL,
+ ThrowException("Ljava/lang/ArrayStoreException;", NULL,
StringPrintf("%s cannot be stored in an array of type %s",
PrettyDescriptor(element_class).c_str(),
PrettyDescriptor(array_class).c_str()).c_str());
@@ -119,14 +108,14 @@ void ThrowArrayStoreException(mirror::Class* element_class, mirror::Class* array
// ClassCastException
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type) {
- ThrowException(NULL, "Ljava/lang/ClassCastException;", NULL,
+ ThrowException("Ljava/lang/ClassCastException;", NULL,
StringPrintf("%s cannot be cast to %s",
PrettyDescriptor(src_type).c_str(),
PrettyDescriptor(dest_type).c_str()).c_str());
}
-void ThrowClassCastException(const ThrowLocation* throw_location, const char* msg) {
- ThrowException(throw_location, "Ljava/lang/ClassCastException;", NULL, msg);
+void ThrowClassCastException(const char* msg) {
+ ThrowException("Ljava/lang/ClassCastException;", NULL, msg);
}
// ClassCircularityError
@@ -134,7 +123,7 @@ void ThrowClassCastException(const ThrowLocation* throw_location, const char* ms
void ThrowClassCircularityError(mirror::Class* c) {
std::ostringstream msg;
msg << PrettyDescriptor(c);
- ThrowException(NULL, "Ljava/lang/ClassCircularityError;", c, msg.str().c_str());
+ ThrowException("Ljava/lang/ClassCircularityError;", c, msg.str().c_str());
}
// ClassFormatError
@@ -142,7 +131,7 @@ void ThrowClassCircularityError(mirror::Class* c) {
void ThrowClassFormatError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/ClassFormatError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/ClassFormatError;", referrer, fmt, &args);
va_end(args);}
// IllegalAccessError
@@ -151,7 +140,7 @@ void ThrowIllegalAccessErrorClass(mirror::Class* referrer, mirror::Class* access
std::ostringstream msg;
msg << "Illegal class access: '" << PrettyDescriptor(referrer) << "' attempting to access '"
<< PrettyDescriptor(accessed) << "'";
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirror::Class* accessed,
@@ -161,21 +150,21 @@ void ThrowIllegalAccessErrorClassForMethodDispatch(mirror::Class* referrer, mirr
msg << "Illegal class access ('" << PrettyDescriptor(referrer) << "' attempting to access '"
<< PrettyDescriptor(accessed) << "') in attempt to invoke " << type
<< " method " << PrettyMethod(called).c_str();
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
void ThrowIllegalAccessErrorMethod(mirror::Class* referrer, mirror::ArtMethod* accessed) {
std::ostringstream msg;
msg << "Method '" << PrettyMethod(accessed) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
void ThrowIllegalAccessErrorField(mirror::Class* referrer, mirror::ArtField* accessed) {
std::ostringstream msg;
msg << "Field '" << PrettyField(accessed, false) << "' is inaccessible to class '"
<< PrettyDescriptor(referrer) << "'";
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, msg.str().c_str());
}
void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer,
@@ -183,7 +172,7 @@ void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer,
std::ostringstream msg;
msg << "Final field '" << PrettyField(accessed, false) << "' cannot be written to by method '"
<< PrettyMethod(referrer) << "'";
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;",
+ ThrowException("Ljava/lang/IllegalAccessError;",
referrer != NULL ? referrer->GetClass() : NULL,
msg.str().c_str());
}
@@ -191,20 +180,20 @@ void ThrowIllegalAccessErrorFinalField(mirror::ArtMethod* referrer,
void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/IllegalAccessError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/IllegalAccessError;", referrer, fmt, &args);
va_end(args);
}
// IllegalAccessException
-void ThrowIllegalAccessException(const ThrowLocation* throw_location, const char* msg) {
- ThrowException(throw_location, "Ljava/lang/IllegalAccessException;", NULL, msg);
+void ThrowIllegalAccessException(const char* msg) {
+ ThrowException("Ljava/lang/IllegalAccessException;", NULL, msg);
}
// IllegalArgumentException
-void ThrowIllegalArgumentException(const ThrowLocation* throw_location, const char* msg) {
- ThrowException(throw_location, "Ljava/lang/IllegalArgumentException;", NULL, msg);
+void ThrowIllegalArgumentException(const char* msg) {
+ ThrowException("Ljava/lang/IllegalArgumentException;", NULL, msg);
}
@@ -216,7 +205,7 @@ void ThrowIncompatibleClassChangeError(InvokeType expected_type, InvokeType foun
std::ostringstream msg;
msg << "The method '" << PrettyMethod(method) << "' was expected to be of type "
<< expected_type << " but instead was found to be of type " << found_type;
- ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;",
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;",
referrer != NULL ? referrer->GetClass() : NULL,
msg.str().c_str());
}
@@ -232,7 +221,7 @@ void ThrowIncompatibleClassChangeErrorClassForInterfaceDispatch(mirror::ArtMetho
<< "' does not implement interface '"
<< PrettyDescriptor(interface_method->GetDeclaringClass())
<< "' in call to '" << PrettyMethod(interface_method) << "'";
- ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;",
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;",
referrer != NULL ? referrer->GetClass() : NULL,
msg.str().c_str());
}
@@ -243,14 +232,14 @@ void ThrowIncompatibleClassChangeErrorField(mirror::ArtField* resolved_field, bo
msg << "Expected '" << PrettyField(resolved_field) << "' to be a "
<< (is_static ? "static" : "instance") << " field" << " rather than a "
<< (is_static ? "instance" : "static") << " field";
- ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", referrer->GetClass(),
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer->GetClass(),
msg.str().c_str());
}
void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/IncompatibleClassChangeError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/IncompatibleClassChangeError;", referrer, fmt, &args);
va_end(args);
}
@@ -259,14 +248,14 @@ void ThrowIncompatibleClassChangeError(mirror::Class* referrer, const char* fmt,
void ThrowIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowException("Ljava/io/IOException;", NULL, fmt, &args);
va_end(args);
}
void ThrowWrappedIOException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowWrappedException(NULL, "Ljava/io/IOException;", NULL, fmt, &args);
+ ThrowWrappedException("Ljava/io/IOException;", NULL, fmt, &args);
va_end(args);
}
@@ -275,19 +264,19 @@ void ThrowWrappedIOException(const char* fmt, ...) {
void ThrowLinkageError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/LinkageError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/LinkageError;", referrer, fmt, &args);
va_end(args);
}
// NegativeArraySizeException
void ThrowNegativeArraySizeException(int size) {
- ThrowException(NULL, "Ljava/lang/NegativeArraySizeException;", NULL,
+ ThrowException("Ljava/lang/NegativeArraySizeException;", NULL,
StringPrintf("%d", size).c_str());
}
void ThrowNegativeArraySizeException(const char* msg) {
- ThrowException(NULL, "Ljava/lang/NegativeArraySizeException;", NULL, msg);
+ ThrowException("Ljava/lang/NegativeArraySizeException;", NULL, msg);
}
// NoSuchFieldError
@@ -299,7 +288,7 @@ void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c,
std::string temp;
msg << "No " << scope << "field " << name << " of type " << type
<< " in class " << c->GetDescriptor(&temp) << " or its superclasses";
- ThrowException(NULL, "Ljava/lang/NoSuchFieldError;", c, msg.str().c_str());
+ ThrowException("Ljava/lang/NoSuchFieldError;", c, msg.str().c_str());
}
// NoSuchMethodError
@@ -310,97 +299,91 @@ void ThrowNoSuchMethodError(InvokeType type, mirror::Class* c, const StringPiece
std::string temp;
msg << "No " << type << " method " << name << signature
<< " in class " << c->GetDescriptor(&temp) << " or its super classes";
- ThrowException(NULL, "Ljava/lang/NoSuchMethodError;", c, msg.str().c_str());
+ ThrowException("Ljava/lang/NoSuchMethodError;", c, msg.str().c_str());
}
void ThrowNoSuchMethodError(uint32_t method_idx) {
- Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- mirror::DexCache* dex_cache = throw_location.GetMethod()->GetDeclaringClass()->GetDexCache();
+ mirror::ArtMethod* method = Thread::Current()->GetCurrentMethod(nullptr);
+ mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
std::ostringstream msg;
msg << "No method '" << PrettyMethod(method_idx, dex_file, true) << "'";
- ThrowException(&throw_location, "Ljava/lang/NoSuchMethodError;",
- throw_location.GetMethod()->GetDeclaringClass(), msg.str().c_str());
+ ThrowException("Ljava/lang/NoSuchMethodError;",
+ method->GetDeclaringClass(), msg.str().c_str());
}
// NullPointerException
-void ThrowNullPointerExceptionForFieldAccess(const ThrowLocation& throw_location,
- mirror::ArtField* field, bool is_read) {
+void ThrowNullPointerExceptionForFieldAccess(mirror::ArtField* field, bool is_read) {
std::ostringstream msg;
msg << "Attempt to " << (is_read ? "read from" : "write to")
<< " field '" << PrettyField(field, true) << "' on a null object reference";
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
}
-static void ThrowNullPointerExceptionForMethodAccessImpl(const ThrowLocation& throw_location,
- uint32_t method_idx,
+static void ThrowNullPointerExceptionForMethodAccessImpl(uint32_t method_idx,
const DexFile& dex_file,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::ostringstream msg;
msg << "Attempt to invoke " << type << " method '"
<< PrettyMethod(method_idx, dex_file, true) << "' on a null object reference";
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
+ ThrowException("Ljava/lang/NullPointerException;", NULL, msg.str().c_str());
}
-void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
- uint32_t method_idx,
+void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type) {
- mirror::DexCache* dex_cache = throw_location.GetMethod()->GetDeclaringClass()->GetDexCache();
+ mirror::DexCache* dex_cache =
+ Thread::Current()->GetCurrentMethod(nullptr)->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
- ThrowNullPointerExceptionForMethodAccessImpl(throw_location, method_idx,
- dex_file, type);
+ ThrowNullPointerExceptionForMethodAccessImpl(method_idx, dex_file, type);
}
-void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
- mirror::ArtMethod* method,
+void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method,
InvokeType type) {
mirror::DexCache* dex_cache = method->GetDeclaringClass()->GetDexCache();
const DexFile& dex_file = *dex_cache->GetDexFile();
- ThrowNullPointerExceptionForMethodAccessImpl(throw_location, method->GetDexMethodIndex(),
+ ThrowNullPointerExceptionForMethodAccessImpl(method->GetDexMethodIndex(),
dex_file, type);
}
-void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
- const DexFile::CodeItem* code = throw_location.GetMethod()->GetCodeItem();
- uint32_t throw_dex_pc = throw_location.GetDexPc();
+void ThrowNullPointerExceptionFromDexPC() {
+ uint32_t throw_dex_pc;
+ mirror::ArtMethod* method = Thread::Current()->GetCurrentMethod(&throw_dex_pc);
+ const DexFile::CodeItem* code = method->GetCodeItem();
CHECK_LT(throw_dex_pc, code->insns_size_in_code_units_);
const Instruction* instr = Instruction::At(&code->insns_[throw_dex_pc]);
switch (instr->Opcode()) {
case Instruction::INVOKE_DIRECT:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_35c(), kDirect);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_35c(), kDirect);
break;
case Instruction::INVOKE_DIRECT_RANGE:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_3rc(), kDirect);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_3rc(), kDirect);
break;
case Instruction::INVOKE_VIRTUAL:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_35c(), kVirtual);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_35c(), kVirtual);
break;
case Instruction::INVOKE_VIRTUAL_RANGE:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_3rc(), kVirtual);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_3rc(), kVirtual);
break;
case Instruction::INVOKE_INTERFACE:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_35c(), kInterface);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_35c(), kInterface);
break;
case Instruction::INVOKE_INTERFACE_RANGE:
- ThrowNullPointerExceptionForMethodAccess(throw_location, instr->VRegB_3rc(), kInterface);
+ ThrowNullPointerExceptionForMethodAccess(instr->VRegB_3rc(), kInterface);
break;
case Instruction::INVOKE_VIRTUAL_QUICK:
case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: {
// Since we replaced the method index, we ask the verifier to tell us which
// method is invoked at this location.
- mirror::ArtMethod* method =
- verifier::MethodVerifier::FindInvokedMethodAtDexPc(throw_location.GetMethod(),
- throw_location.GetDexPc());
- if (method != NULL) {
+ mirror::ArtMethod* invoked_method =
+ verifier::MethodVerifier::FindInvokedMethodAtDexPc(method, throw_dex_pc);
+ if (invoked_method != NULL) {
// NPE with precise message.
- ThrowNullPointerExceptionForMethodAccess(throw_location, method, kVirtual);
+ ThrowNullPointerExceptionForMethodAccess(invoked_method, kVirtual);
} else {
// NPE with imprecise message.
- ThrowNullPointerException(&throw_location,
- "Attempt to invoke a virtual method on a null object reference");
+ ThrowNullPointerException("Attempt to invoke a virtual method on a null object reference");
}
break;
}
@@ -412,9 +395,8 @@ void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
case Instruction::IGET_CHAR:
case Instruction::IGET_SHORT: {
mirror::ArtField* field =
- Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(),
- throw_location.GetMethod(), false);
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true /* read */);
+ Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(), method, false);
+ ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
break;
}
case Instruction::IGET_QUICK:
@@ -427,15 +409,13 @@ void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
// Since we replaced the field index, we ask the verifier to tell us which
// field is accessed at this location.
mirror::ArtField* field =
- verifier::MethodVerifier::FindAccessedFieldAtDexPc(throw_location.GetMethod(),
- throw_location.GetDexPc());
+ verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
if (field != NULL) {
// NPE with precise message.
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true /* read */);
+ ThrowNullPointerExceptionForFieldAccess(field, true /* read */);
} else {
// NPE with imprecise message.
- ThrowNullPointerException(&throw_location,
- "Attempt to read from a field on a null object reference");
+ ThrowNullPointerException("Attempt to read from a field on a null object reference");
}
break;
}
@@ -447,9 +427,8 @@ void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
case Instruction::IPUT_CHAR:
case Instruction::IPUT_SHORT: {
mirror::ArtField* field =
- Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(),
- throw_location.GetMethod(), false);
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false /* write */);
+ Runtime::Current()->GetClassLinker()->ResolveField(instr->VRegC_22c(), method, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
break;
}
case Instruction::IPUT_QUICK:
@@ -462,15 +441,13 @@ void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
// Since we replaced the field index, we ask the verifier to tell us which
// field is accessed at this location.
mirror::ArtField* field =
- verifier::MethodVerifier::FindAccessedFieldAtDexPc(throw_location.GetMethod(),
- throw_location.GetDexPc());
+ verifier::MethodVerifier::FindAccessedFieldAtDexPc(method, throw_dex_pc);
if (field != NULL) {
// NPE with precise message.
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false /* write */);
+ ThrowNullPointerExceptionForFieldAccess(field, false /* write */);
} else {
// NPE with imprecise message.
- ThrowNullPointerException(&throw_location,
- "Attempt to write to a field on a null object reference");
+ ThrowNullPointerException("Attempt to write to a field on a null object reference");
}
break;
}
@@ -481,7 +458,7 @@ void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
case Instruction::AGET_BYTE:
case Instruction::AGET_CHAR:
case Instruction::AGET_SHORT:
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", NULL,
"Attempt to read from null array");
break;
case Instruction::APUT:
@@ -491,28 +468,28 @@ void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
case Instruction::APUT_BYTE:
case Instruction::APUT_CHAR:
case Instruction::APUT_SHORT:
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", NULL,
"Attempt to write to null array");
break;
case Instruction::ARRAY_LENGTH:
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL,
+ ThrowException("Ljava/lang/NullPointerException;", NULL,
"Attempt to get length of null array");
break;
default: {
// TODO: We should have covered all the cases where we expect a NPE above, this
// message/logging is so we can improve any cases we've missed in the future.
- const DexFile& dex_file =
- *throw_location.GetMethod()->GetDeclaringClass()->GetDexCache()->GetDexFile();
- ThrowException(&throw_location, "Ljava/lang/NullPointerException;", NULL,
+ const DexFile* dex_file =
+ method->GetDeclaringClass()->GetDexCache()->GetDexFile();
+ ThrowException("Ljava/lang/NullPointerException;", NULL,
StringPrintf("Null pointer exception during instruction '%s'",
- instr->DumpString(&dex_file).c_str()).c_str());
+ instr->DumpString(dex_file).c_str()).c_str());
break;
}
}
}
-void ThrowNullPointerException(const ThrowLocation* throw_location, const char* msg) {
- ThrowException(throw_location, "Ljava/lang/NullPointerException;", NULL, msg);
+void ThrowNullPointerException(const char* msg) {
+ ThrowException("Ljava/lang/NullPointerException;", NULL, msg);
}
// RuntimeException
@@ -520,7 +497,7 @@ void ThrowNullPointerException(const ThrowLocation* throw_location, const char*
void ThrowRuntimeException(const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/RuntimeException;", NULL, fmt, &args);
+ ThrowException("Ljava/lang/RuntimeException;", NULL, fmt, &args);
va_end(args);
}
@@ -529,7 +506,7 @@ void ThrowRuntimeException(const char* fmt, ...) {
void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowException(NULL, "Ljava/lang/VerifyError;", referrer, fmt, &args);
+ ThrowException("Ljava/lang/VerifyError;", referrer, fmt, &args);
va_end(args);
}
diff --git a/runtime/common_throws.h b/runtime/common_throws.h
index ebedae00ed..9e749e3df6 100644
--- a/runtime/common_throws.h
+++ b/runtime/common_throws.h
@@ -29,7 +29,6 @@ namespace mirror {
} // namespace mirror
class Signature;
class StringPiece;
-class ThrowLocation;
// AbstractMethodError
@@ -60,7 +59,7 @@ void ThrowClassCircularityError(mirror::Class* c)
void ThrowClassCastException(mirror::Class* dest_type, mirror::Class* src_type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowClassCastException(const ThrowLocation* throw_location, const char* msg)
+void ThrowClassCastException(const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// ClassFormatError
@@ -94,12 +93,12 @@ void ThrowIllegalAccessError(mirror::Class* referrer, const char* fmt, ...)
// IllegalAccessException
-void ThrowIllegalAccessException(const ThrowLocation* throw_location, const char* msg)
+void ThrowIllegalAccessException(const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// IllegalArgumentException
-void ThrowIllegalArgumentException(const ThrowLocation* throw_location, const char* msg)
+void ThrowIllegalArgumentException(const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// IncompatibleClassChangeError
@@ -161,25 +160,22 @@ void ThrowNoSuchMethodError(uint32_t method_idx)
// NullPointerException
-void ThrowNullPointerExceptionForFieldAccess(const ThrowLocation& throw_location,
- mirror::ArtField* field,
+void ThrowNullPointerExceptionForFieldAccess(mirror::ArtField* field,
bool is_read)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
- uint32_t method_idx,
+void ThrowNullPointerExceptionForMethodAccess(uint32_t method_idx,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerExceptionForMethodAccess(const ThrowLocation& throw_location,
- mirror::ArtMethod* method,
+void ThrowNullPointerExceptionForMethodAccess(mirror::ArtMethod* method,
InvokeType type)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location)
+void ThrowNullPointerExceptionFromDexPC()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
-void ThrowNullPointerException(const ThrowLocation* throw_location, const char* msg)
+void ThrowNullPointerException(const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) COLD_ATTR;
// RuntimeException
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 246125bd58..9f2a09bf22 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -47,7 +47,6 @@
#include "ScopedPrimitiveArray.h"
#include "handle_scope-inl.h"
#include "thread_list.h"
-#include "throw_location.h"
#include "utf.h"
#include "verifier/method_verifier-inl.h"
#include "well_known_classes.h"
@@ -280,11 +279,9 @@ class DebugInstrumentationListener FINAL : public instrumentation::Instrumentati
Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
}
- void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+ void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
+ Dbg::PostException(exception_object);
}
// We only care about how many backward branches were executed in the Jit.
@@ -349,26 +346,9 @@ uint32_t Dbg::instrumentation_events_ = 0;
static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
- if (receiver != nullptr) {
- callback(&receiver, arg, root_info);
- }
- if (thread != nullptr) {
- callback(&thread, arg, root_info);
- }
- if (klass != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&klass), arg, root_info);
- }
- if (method != nullptr) {
- callback(reinterpret_cast<mirror::Object**>(&method), arg, root_info);
- }
-}
-
-void DebugInvokeReq::Clear() {
- invoke_needed = false;
- receiver = nullptr;
- thread = nullptr;
- klass = nullptr;
- method = nullptr;
+ receiver.VisitRootIfNonNull(callback, arg, root_info); // null for static method call.
+ klass.VisitRoot(callback, arg, root_info);
+ method.VisitRoot(callback, arg, root_info);
}
void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
@@ -2785,19 +2765,110 @@ void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
}
-void Dbg::PostException(const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method,
- uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
+/**
+ * Finds the location where this exception will be caught. We search until we reach the top
+ * frame, in which case this exception is considered uncaught.
+ */
+class CatchLocationFinder : public StackVisitor {
+ public:
+ CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ : StackVisitor(self, context),
+ self_(self),
+ exception_(exception),
+ handle_scope_(self),
+ this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
+ catch_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)),
+ throw_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)),
+ catch_dex_pc_(DexFile::kDexNoIndex),
+ throw_dex_pc_(DexFile::kDexNoIndex) {
+ }
+
+ bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = GetMethod();
+ DCHECK(method != nullptr);
+ if (method->IsRuntimeMethod()) {
+ // Ignore callee save method.
+ DCHECK(method->IsCalleeSaveMethod());
+ return true;
+ }
+
+ uint32_t dex_pc = GetDexPc();
+ if (throw_method_.Get() == nullptr) {
+ // First Java method found. It is either the method that threw the exception,
+ // or the Java native method that is reporting an exception thrown by
+ // native code.
+ this_at_throw_.Assign(GetThisObject());
+ throw_method_.Assign(method);
+ throw_dex_pc_ = dex_pc;
+ }
+
+ if (dex_pc != DexFile::kDexNoIndex) {
+ StackHandleScope<2> hs(self_);
+ uint32_t found_dex_pc;
+ Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
+ Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
+ bool unused_clear_exception;
+ found_dex_pc = mirror::ArtMethod::FindCatchBlock(
+ h_method, exception_class, dex_pc, &unused_clear_exception);
+ if (found_dex_pc != DexFile::kDexNoIndex) {
+ catch_method_.Assign(method);
+ catch_dex_pc_ = found_dex_pc;
+ return false; // End stack walk.
+ }
+ }
+ return true; // Continue stack walk.
+ }
+
+ mirror::ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return catch_method_.Get();
+ }
+
+ mirror::ArtMethod* GetThrowMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return throw_method_.Get();
+ }
+
+ mirror::Object* GetThisAtThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return this_at_throw_.Get();
+ }
+
+ uint32_t GetCatchDexPc() const {
+ return catch_dex_pc_;
+ }
+
+ uint32_t GetThrowDexPc() const {
+ return throw_dex_pc_;
+ }
+
+ private:
+ Thread* const self_;
+ const Handle<mirror::Throwable>& exception_;
+ StackHandleScope<3> handle_scope_;
+ MutableHandle<mirror::Object> this_at_throw_;
+ MutableHandle<mirror::ArtMethod> catch_method_;
+ MutableHandle<mirror::ArtMethod> throw_method_;
+ uint32_t catch_dex_pc_;
+ uint32_t throw_dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
+};
+
+void Dbg::PostException(mirror::Throwable* exception_object) {
if (!IsDebuggerActive()) {
return;
}
+ StackHandleScope<1> handle_scope(Thread::Current());
+ Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
+ std::unique_ptr<Context> context(Context::Create());
+ CatchLocationFinder clf(Thread::Current(), h_exception, context.get());
+ clf.WalkStack(/* include_transitions */ false);
JDWP::EventLocation exception_throw_location;
- SetEventLocation(&exception_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
+ SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
JDWP::EventLocation exception_catch_location;
- SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
+ SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
- gJdwpState->PostException(&exception_throw_location, exception_object, &exception_catch_location,
- throw_location.GetThis());
+ gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
+ clf.GetThisAtThrow());
}
void Dbg::PostClassPrepare(mirror::Class* c) {
@@ -3428,10 +3499,14 @@ JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize
};
// Allocate single step.
- SingleStepControl* single_step_control = new SingleStepControl(step_size, step_depth,
- visitor.stack_depth,
- visitor.method);
- CHECK(single_step_control != nullptr) << "Failed to allocate SingleStepControl";
+ SingleStepControl* single_step_control =
+ new (std::nothrow) SingleStepControl(step_size, step_depth,
+ visitor.stack_depth, visitor.method);
+ if (single_step_control == nullptr) {
+ LOG(ERROR) << "Failed to allocate SingleStepControl";
+ return JDWP::ERR_OUT_OF_MEMORY;
+ }
+
mirror::ArtMethod* m = single_step_control->GetMethod();
const int32_t line_number = visitor.line_number;
if (!m->IsNative()) {
@@ -3508,7 +3583,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
ThreadList* thread_list = Runtime::Current()->GetThreadList();
Thread* targetThread = nullptr;
- DebugInvokeReq* req = nullptr;
+ std::unique_ptr<DebugInvokeReq> req;
Thread* self = Thread::Current();
{
ScopedObjectAccessUnchecked soa(self);
@@ -3519,8 +3594,13 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
return error;
}
- req = targetThread->GetInvokeReq();
- if (!req->ready) {
+ if (targetThread->GetInvokeReq() != nullptr) {
+ // Thread is already invoking a method on behalf of the debugger.
+ LOG(ERROR) << "InvokeMethod request for thread already invoking a method: " << *targetThread;
+ return JDWP::ERR_ALREADY_INVOKING;
+ }
+ if (!targetThread->IsReadyForDebugInvoke()) {
+ // Thread is not suspended by an event so it cannot invoke a method.
LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
return JDWP::ERR_INVALID_THREAD;
}
@@ -3554,11 +3634,10 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
return JDWP::ERR_INVALID_OBJECT;
}
- mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id, &error);
+ gRegistry->Get<mirror::Object*>(thread_id, &error);
if (error != JDWP::ERR_NONE) {
return JDWP::ERR_INVALID_OBJECT;
}
- // TODO: check that 'thread' is actually a java.lang.Thread!
mirror::Class* c = DecodeClass(class_id, &error);
if (c == nullptr) {
@@ -3616,14 +3695,17 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
}
}
- req->receiver = receiver;
- req->thread = thread;
- req->klass = c;
- req->method = m;
- req->arg_count = arg_count;
- req->arg_values = arg_values;
- req->options = options;
- req->invoke_needed = true;
+ // Allocates a DebugInvokeReq.
+ req.reset(new (std::nothrow) DebugInvokeReq(receiver, c, m, options, arg_values, arg_count));
+ if (req.get() == nullptr) {
+ LOG(ERROR) << "Failed to allocate DebugInvokeReq";
+ return JDWP::ERR_OUT_OF_MEMORY;
+ }
+
+ // Attach the DebugInvokeReq to the target thread so it executes the method when
+ // it is resumed. Once the invocation completes, it will detach it and signal us
+ // before suspending itself.
+ targetThread->SetDebugInvokeReq(req.get());
}
// The fact that we've released the thread list lock is a bit risky --- if the thread goes
@@ -3657,7 +3739,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
gJdwpState->ReleaseJdwpTokenForCommand();
// Wait for the request to finish executing.
- while (req->invoke_needed) {
+ while (targetThread->GetInvokeReq() != nullptr) {
req->cond.Wait(self);
}
}
@@ -3690,11 +3772,7 @@ JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId objec
// Copy the result.
*pResultTag = req->result_tag;
- if (IsPrimitiveTag(req->result_tag)) {
- *pResultValue = req->result_value.GetJ();
- } else {
- *pResultValue = gRegistry->Add(req->result_value.GetL());
- }
+ *pResultValue = req->result_value;
*pExceptionId = req->exception;
return req->error;
}
@@ -3705,71 +3783,58 @@ void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
// We can be called while an exception is pending. We need
// to preserve that across the method invocation.
StackHandleScope<4> hs(soa.Self());
- auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
- auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
- auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
- uint32_t old_throw_dex_pc;
- {
- ThrowLocation old_throw_location;
- mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
- old_throw_this_object.Assign(old_throw_location.GetThis());
- old_throw_method.Assign(old_throw_location.GetMethod());
- old_exception.Assign(old_exception_obj);
- old_throw_dex_pc = old_throw_location.GetDexPc();
- soa.Self()->ClearException();
- }
+ auto old_exception = hs.NewHandle<mirror::Throwable>(soa.Self()->GetException());
+ soa.Self()->ClearException();
// Translate the method through the vtable, unless the debugger wants to suppress it.
- MutableHandle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
- if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != nullptr) {
- mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
+ MutableHandle<mirror::ArtMethod> m(hs.NewHandle(pReq->method.Read()));
+ if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
+ mirror::ArtMethod* actual_method = pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m.Get());
if (actual_method != m.Get()) {
- VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
+ VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get())
+ << " to " << PrettyMethod(actual_method);
m.Assign(actual_method);
}
}
VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
- << " receiver=" << pReq->receiver
+ << " receiver=" << pReq->receiver.Read()
<< " arg_count=" << pReq->arg_count;
CHECK(m.Get() != nullptr);
CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
- pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
- reinterpret_cast<jvalue*>(pReq->arg_values));
+ JValue result = InvokeWithJValues(soa, pReq->receiver.Read(), soa.EncodeMethod(m.Get()),
+ reinterpret_cast<jvalue*>(pReq->arg_values));
- mirror::Throwable* exception = soa.Self()->GetException(nullptr);
- soa.Self()->ClearException();
- pReq->exception = gRegistry->Add(exception);
pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
+ const bool is_object_result = (pReq->result_tag == JDWP::JT_OBJECT);
+ Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
+ Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
+ soa.Self()->ClearException();
+ pReq->exception = gRegistry->Add(exception.Get());
if (pReq->exception != 0) {
- VLOG(jdwp) << " JDWP invocation returning with exception=" << exception
- << " " << exception->Dump();
- pReq->result_value.SetJ(0);
- } else if (pReq->result_tag == JDWP::JT_OBJECT) {
+ VLOG(jdwp) << " JDWP invocation returning with exception=" << exception.Get()
+ << " " << exception->Dump();
+ pReq->result_value = 0;
+ } else if (is_object_result) {
/* if no exception thrown, examine object result more closely */
- JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
+ JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
if (new_tag != pReq->result_tag) {
VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
pReq->result_tag = new_tag;
}
- /*
- * Register the object. We don't actually need an ObjectId yet,
- * but we do need to be sure that the GC won't move or discard the
- * object when we switch out of RUNNING. The ObjectId conversion
- * will add the object to the "do not touch" list.
- *
- * We can't use the "tracked allocation" mechanism here because
- * the object is going to be handed off to a different thread.
- */
- gRegistry->Add(pReq->result_value.GetL());
+ // Register the object in the registry and reference its ObjectId. This ensures
+ // GC safety and prevents from accessing stale reference if the object is moved.
+ pReq->result_value = gRegistry->Add(object_result.Get());
+ } else {
+ // Primitive result.
+ DCHECK(IsPrimitiveTag(pReq->result_tag));
+ pReq->result_value = result.GetJ();
}
if (old_exception.Get() != nullptr) {
- ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
- old_throw_dex_pc);
- soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
+ soa.Self()->SetException(old_exception.Get());
}
}
diff --git a/runtime/debugger.h b/runtime/debugger.h
index 0c22148c99..01c9d5dbab 100644
--- a/runtime/debugger.h
+++ b/runtime/debugger.h
@@ -48,40 +48,33 @@ class ObjectRegistry;
class ScopedObjectAccessUnchecked;
class StackVisitor;
class Thread;
-class ThrowLocation;
/*
* Invoke-during-breakpoint support.
*/
struct DebugInvokeReq {
- DebugInvokeReq()
- : ready(false), invoke_needed(false),
- receiver(NULL), thread(NULL), klass(NULL), method(NULL),
- arg_count(0), arg_values(NULL), options(0), error(JDWP::ERR_NONE),
- result_tag(JDWP::JT_VOID), exception(0),
+ DebugInvokeReq(mirror::Object* invoke_receiver, mirror::Class* invoke_class,
+ mirror::ArtMethod* invoke_method, uint32_t invoke_options,
+ uint64_t* args, uint32_t args_count)
+ : receiver(invoke_receiver), klass(invoke_class), method(invoke_method),
+ arg_count(args_count), arg_values(args), options(invoke_options),
+ error(JDWP::ERR_NONE), result_tag(JDWP::JT_VOID), result_value(0), exception(0),
lock("a DebugInvokeReq lock", kBreakpointInvokeLock),
cond("a DebugInvokeReq condition variable", lock) {
}
- /* boolean; only set when we're in the tail end of an event handler */
- bool ready;
-
- /* boolean; set if the JDWP thread wants this thread to do work */
- bool invoke_needed;
-
/* request */
- mirror::Object* receiver; /* not used for ClassType.InvokeMethod */
- mirror::Object* thread;
- mirror::Class* klass;
- mirror::ArtMethod* method;
- uint32_t arg_count;
- uint64_t* arg_values; /* will be NULL if arg_count_ == 0 */
- uint32_t options;
+ GcRoot<mirror::Object> receiver; // not used for ClassType.InvokeMethod
+ GcRoot<mirror::Class> klass;
+ GcRoot<mirror::ArtMethod> method;
+ const uint32_t arg_count;
+ uint64_t* const arg_values; // will be NULL if arg_count_ == 0
+ const uint32_t options;
/* result */
JDWP::JdwpError error;
JDWP::JdwpTag result_tag;
- JValue result_value;
+ uint64_t result_value; // either a primitive value or an ObjectId
JDWP::ObjectId exception;
/* condition variable to wait on while the method executes */
@@ -91,8 +84,6 @@ struct DebugInvokeReq {
void VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void Clear();
-
private:
DISALLOW_COPY_AND_ASSIGN(DebugInvokeReq);
};
@@ -529,8 +520,7 @@ class Dbg {
mirror::Object* this_object, mirror::ArtField* f,
const JValue* field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void PostException(const ThrowLocation& throw_location, mirror::ArtMethod* catch_method,
- uint32_t catch_dex_pc, mirror::Throwable* exception)
+ static void PostException(mirror::Throwable* exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static void PostThreadStart(Thread* t)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -582,6 +572,8 @@ class Dbg {
LOCKS_EXCLUDED(Locks::thread_list_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ // Invoke support for commands ClassType.InvokeMethod, ClassType.NewInstance and
+ // ObjectReference.InvokeMethod.
static JDWP::JdwpError InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
JDWP::RefTypeId class_id, JDWP::MethodId method_id,
uint32_t arg_count, uint64_t* arg_values,
diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h
index 9d84e4ac48..8a13d3498b 100644
--- a/runtime/entrypoints/entrypoint_utils-inl.h
+++ b/runtime/entrypoints/entrypoint_utils-inl.h
@@ -53,9 +53,7 @@ inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
}
if (kAccessCheck) {
if (UNLIKELY(!klass->IsInstantiable())) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(throw_location, "Ljava/lang/InstantiationError;",
- PrettyDescriptor(klass).c_str());
+ self->ThrowNewException("Ljava/lang/InstantiationError;", PrettyDescriptor(klass).c_str());
*slow_path = true;
return nullptr; // Failure
}
@@ -294,9 +292,7 @@ inline mirror::ArtField* FindFieldFromCode(uint32_t field_idx, mirror::ArtMethod
} else {
if (UNLIKELY(resolved_field->IsPrimitiveType() != is_primitive ||
resolved_field->FieldSize() != expected_size)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- DCHECK(throw_location.GetMethod() == referrer);
- self->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
+ self->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;",
"Attempted read of %zd-bit %s on field '%s'",
expected_size * (32 / sizeof(int32_t)),
is_primitive ? "primitive" : "non-primitive",
@@ -367,9 +363,7 @@ inline mirror::ArtMethod* FindMethodFromCode(uint32_t method_idx,
} else if (UNLIKELY(*this_object == nullptr && type != kStatic)) {
// Maintain interpreter-like semantics where NullPointerException is thrown
// after potential NoSuchMethodError from class linker.
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- DCHECK_EQ(*referrer, throw_location.GetMethod());
- ThrowNullPointerExceptionForMethodAccess(throw_location, method_idx, type);
+ ThrowNullPointerExceptionForMethodAccess(method_idx, type);
return nullptr; // Failure.
} else if (access_check) {
// Incompatible class change should have been handled in resolve method.
@@ -613,9 +607,8 @@ inline mirror::String* ResolveStringFromCode(mirror::ArtMethod* referrer,
inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
// Save any pending exception over monitor exit call.
mirror::Throwable* saved_exception = NULL;
- ThrowLocation saved_throw_location;
if (UNLIKELY(self->IsExceptionPending())) {
- saved_exception = self->GetException(&saved_throw_location);
+ saved_exception = self->GetException();
self->ClearException();
}
// Decode locked object and unlock, before popping local references.
@@ -624,11 +617,11 @@ inline void UnlockJniSynchronizedMethod(jobject locked, Thread* self) {
LOG(FATAL) << "Synchronized JNI code returning with an exception:\n"
<< saved_exception->Dump()
<< "\nEncountered second exception during implicit MonitorExit:\n"
- << self->GetException(NULL)->Dump();
+ << self->GetException()->Dump();
}
// Restore pending exception.
if (saved_exception != NULL) {
- self->SetException(saved_throw_location, saved_exception);
+ self->SetException(saved_exception);
}
}
diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc
index 5ea9f708a2..70e2851acd 100644
--- a/runtime/entrypoints/entrypoint_utils.cc
+++ b/runtime/entrypoints/entrypoint_utils.cc
@@ -55,10 +55,8 @@ static inline mirror::Class* CheckFilledNewArrayAlloc(uint32_t type_idx,
ThrowRuntimeException("Bad filled array request for type %s",
PrettyDescriptor(klass).c_str());
} else {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- DCHECK(throw_location.GetMethod() == referrer);
self->ThrowNewExceptionF(
- throw_location, "Ljava/lang/InternalError;",
+ "Ljava/lang/InternalError;",
"Found type %s; filled-new-array not implemented for anything but 'int'",
PrettyDescriptor(klass).c_str());
}
@@ -187,8 +185,7 @@ void ThrowStackOverflowError(Thread* self) {
error_msg = "Could not create stack trace.";
}
// Throw the exception.
- self->SetException(self->GetCurrentLocationForThrow(),
- reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get())));
+ self->SetException(reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get())));
} else {
// Could not allocate a string object.
error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed.";
@@ -282,18 +279,8 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
// This can cause thread suspension.
mirror::Class* result_type = h_interface_method->GetReturnType();
mirror::Object* result_ref = soa.Decode<mirror::Object*>(result);
- mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj);
- mirror::ArtMethod* proxy_method;
- if (h_interface_method->GetDeclaringClass()->IsInterface()) {
- proxy_method = rcvr->GetClass()->FindVirtualMethodForInterface(h_interface_method.Get());
- } else {
- // Proxy dispatch to a method defined in Object.
- DCHECK(h_interface_method->GetDeclaringClass()->IsObjectClass());
- proxy_method = h_interface_method.Get();
- }
- ThrowLocation throw_location(rcvr, proxy_method, -1);
JValue result_unboxed;
- if (!UnboxPrimitiveForResult(throw_location, result_ref, result_type, &result_unboxed)) {
+ if (!UnboxPrimitiveForResult(result_ref, result_type, &result_unboxed)) {
DCHECK(soa.Self()->IsExceptionPending());
return zero;
}
@@ -302,7 +289,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
} else {
// In the case of checked exceptions that aren't declared, the exception must be wrapped by
// a UndeclaredThrowableException.
- mirror::Throwable* exception = soa.Self()->GetException(NULL);
+ mirror::Throwable* exception = soa.Self()->GetException();
if (exception->IsCheckedException()) {
mirror::Object* rcvr = soa.Decode<mirror::Object*>(rcvr_jobj);
mirror::Class* proxy_class = rcvr->GetClass();
@@ -328,9 +315,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
declares_exception = declared_exception->IsAssignableFrom(exception_class);
}
if (!declares_exception) {
- ThrowLocation throw_location(rcvr, proxy_method, -1);
- soa.Self()->ThrowNewWrappedException(throw_location,
- "Ljava/lang/reflect/UndeclaredThrowableException;",
+ soa.Self()->ThrowNewWrappedException("Ljava/lang/reflect/UndeclaredThrowableException;",
NULL);
}
}
@@ -341,16 +326,14 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons
bool FillArrayData(mirror::Object* obj, const Instruction::ArrayDataPayload* payload) {
DCHECK_EQ(payload->ident, static_cast<uint16_t>(Instruction::kArrayDataSignature));
if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerException(nullptr, "null array in FILL_ARRAY_DATA");
+ ThrowNullPointerException("null array in FILL_ARRAY_DATA");
return false;
}
mirror::Array* array = obj->AsArray();
DCHECK(!array->IsObjectArray());
if (UNLIKELY(static_cast<int32_t>(payload->element_count) > array->GetLength())) {
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionF(throw_location,
- "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ self->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"failed FILL_ARRAY_DATA; length=%d, index=%d",
array->GetLength(), payload->element_count);
return false;
diff --git a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
index 14ab320b97..d88d262306 100644
--- a/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_deoptimization_entrypoints.cc
@@ -29,7 +29,7 @@ namespace art {
extern "C" void artDeoptimize(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
- self->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
+ self->SetException(Thread::GetDeoptimizationException());
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_field_entrypoints.cc b/runtime/entrypoints/quick/quick_field_entrypoints.cc
index 7326fcfc69..22bf939f6b 100644
--- a/runtime/entrypoints/quick/quick_field_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_field_entrypoints.cc
@@ -155,8 +155,7 @@ extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object*
sizeof(int8_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetByte(obj);
}
@@ -177,8 +176,7 @@ extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Obj
sizeof(int8_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetBoolean(obj);
}
@@ -198,8 +196,7 @@ extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Objec
sizeof(int16_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetShort(obj);
}
@@ -220,8 +217,7 @@ extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Objec
sizeof(int16_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetChar(obj);
}
@@ -242,8 +238,7 @@ extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object*
sizeof(int32_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->Get32(obj);
}
@@ -264,8 +259,7 @@ extern "C" uint64_t artGet64InstanceFromCode(uint32_t field_idx, mirror::Object*
sizeof(int64_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->Get64(obj);
}
@@ -287,8 +281,7 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror:
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+ ThrowNullPointerExceptionForFieldAccess(field, true);
} else {
return field->GetObj(obj);
}
@@ -448,8 +441,7 @@ extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
}
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
@@ -489,8 +481,7 @@ extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
}
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
Primitive::Type type = field->GetTypeAsPrimitiveType();
// Compiled code can't use transactional mode.
@@ -525,8 +516,7 @@ extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
}
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
// Compiled code can't use transactional mode.
field->Set32<false>(obj, new_value);
@@ -551,8 +541,7 @@ extern "C" int artSet64InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
sizeof(int64_t));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
// Compiled code can't use transactional mode.
field->Set64<false>(obj, new_value);
@@ -578,8 +567,7 @@ extern "C" int artSetObjInstanceFromCode(uint32_t field_idx, mirror::Object* obj
sizeof(mirror::HeapReference<mirror::Object>));
if (LIKELY(field != nullptr)) {
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+ ThrowNullPointerExceptionForFieldAccess(field, false);
} else {
// Compiled code can't use transactional mode.
field->SetObj<false>(obj, new_value);
diff --git a/runtime/entrypoints/quick/quick_lock_entrypoints.cc b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
index 8ceac971e1..4423c08288 100644
--- a/runtime/entrypoints/quick/quick_lock_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_lock_entrypoints.cc
@@ -25,9 +25,7 @@ extern "C" int artLockObjectFromCode(mirror::Object* obj, Thread* self)
NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location(self->GetCurrentLocationForThrow());
- ThrowNullPointerException(&throw_location,
- "Null reference used for synchronization (monitor-enter)");
+ ThrowNullPointerException("Null reference used for synchronization (monitor-enter)");
return -1; // Failure.
} else {
if (kIsDebugBuild) {
@@ -47,9 +45,7 @@ extern "C" int artUnlockObjectFromCode(mirror::Object* obj, Thread* self)
NO_THREAD_SAFETY_ANALYSIS /* UNLOCK_FUNCTION(Monitor::monitor_lock_) */ {
ScopedQuickEntrypointChecks sqec(self);
if (UNLIKELY(obj == nullptr)) {
- ThrowLocation throw_location(self->GetCurrentLocationForThrow());
- ThrowNullPointerException(&throw_location,
- "Null reference used for synchronization (monitor-exit)");
+ ThrowNullPointerException("Null reference used for synchronization (monitor-exit)");
return -1; // Failure.
} else {
// MonitorExit may throw exception.
diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
index 25df40b6c0..70317bb5ff 100644
--- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc
@@ -41,12 +41,10 @@ extern "C" void artDeliverExceptionFromCode(mirror::Throwable* exception, Thread
* exception_ in thread and delivering the exception.
*/
ScopedQuickEntrypointChecks sqec(self);
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
if (exception == nullptr) {
- self->ThrowNewException(throw_location, "Ljava/lang/NullPointerException;",
- "throw with null exception");
+ self->ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception");
} else {
- self->SetException(throw_location, exception);
+ self->SetException(exception);
}
self->QuickDeliverException();
}
@@ -56,8 +54,7 @@ extern "C" void artThrowNullPointerExceptionFromCode(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ScopedQuickEntrypointChecks sqec(self);
self->NoteSignalBeingHandled();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- ThrowNullPointerExceptionFromDexPC(throw_location);
+ ThrowNullPointerExceptionFromDexPC();
self->NoteSignalHandlerDone();
self->QuickDeliverException();
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 00251ffacc..70ee04246a 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -154,8 +154,6 @@ class QuickArgumentVisitor {
// | F7 | f_arg7
// | F6 | f_arg6
// | F5 | f_arg5
- // | F6 | f_arg6
- // | F5 | f_arg5
// | F4 | f_arg4
// | F3 | f_arg3
// | F2 | f_arg2
diff --git a/runtime/entrypoints_order_test.cc b/runtime/entrypoints_order_test.cc
index 9173357134..0fdfcb3afe 100644
--- a/runtime/entrypoints_order_test.cc
+++ b/runtime/entrypoints_order_test.cc
@@ -93,8 +93,7 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, opeer, jpeer, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, jpeer, stack_begin, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_begin, stack_size, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_size, throw_location, sizeof(void*));
- EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, throw_location, stack_trace_sample, sizeof(ThrowLocation));
+ EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_size, stack_trace_sample, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, stack_trace_sample, wait_next, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, wait_next, monitor_enter_object, sizeof(void*));
EXPECT_OFFSET_DIFFP(Thread, tlsPtr_, monitor_enter_object, top_handle_scope, sizeof(void*));
diff --git a/runtime/gc/accounting/mod_union_table_test.cc b/runtime/gc/accounting/mod_union_table_test.cc
index 87ce166147..77809358e4 100644
--- a/runtime/gc/accounting/mod_union_table_test.cc
+++ b/runtime/gc/accounting/mod_union_table_test.cc
@@ -48,9 +48,9 @@ class ModUnionTableTest : public CommonRuntimeTest {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
auto* klass = GetObjectArrayClass(self, space);
const size_t size = ComputeArraySize(self, klass, component_count, 2);
- size_t bytes_allocated = 0;
+ size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
auto* obj = down_cast<mirror::ObjectArray<mirror::Object>*>(
- space->Alloc(self, size, &bytes_allocated, nullptr));
+ space->Alloc(self, size, &bytes_allocated, nullptr, &bytes_tl_bulk_allocated));
if (obj != nullptr) {
obj->SetClass(klass);
obj->SetLength(static_cast<int32_t>(component_count));
@@ -77,9 +77,10 @@ class ModUnionTableTest : public CommonRuntimeTest {
// copy of the class in the same space that we are allocating in.
DCHECK(java_lang_object_array_ != nullptr);
const size_t class_size = java_lang_object_array_->GetClassSize();
- size_t bytes_allocated = 0;
+ size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
auto* klass = down_cast<mirror::Class*>(space->Alloc(self, class_size, &bytes_allocated,
- nullptr));
+ nullptr,
+ &bytes_tl_bulk_allocated));
DCHECK(klass != nullptr);
memcpy(klass, java_lang_object_array_, class_size);
Runtime::Current()->GetHeap()->GetCardTable()->MarkCard(klass);
diff --git a/runtime/gc/allocator/rosalloc-inl.h b/runtime/gc/allocator/rosalloc-inl.h
index f6c9d3c144..bba92a1f40 100644
--- a/runtime/gc/allocator/rosalloc-inl.h
+++ b/runtime/gc/allocator/rosalloc-inl.h
@@ -28,15 +28,19 @@ inline ALWAYS_INLINE bool RosAlloc::ShouldCheckZeroMemory() {
}
template<bool kThreadSafe>
-inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* bytes_allocated) {
+inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
if (UNLIKELY(size > kLargeSizeThreshold)) {
- return AllocLargeObject(self, size, bytes_allocated);
+ return AllocLargeObject(self, size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
void* m;
if (kThreadSafe) {
- m = AllocFromRun(self, size, bytes_allocated);
+ m = AllocFromRun(self, size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
} else {
- m = AllocFromRunThreadUnsafe(self, size, bytes_allocated);
+ m = AllocFromRunThreadUnsafe(self, size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
// Check if the returned memory is really all zero.
if (ShouldCheckZeroMemory() && m != nullptr) {
@@ -48,6 +52,115 @@ inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* by
return m;
}
+inline bool RosAlloc::Run::IsFull() {
+ const size_t num_vec = NumberOfBitmapVectors();
+ for (size_t v = 0; v < num_vec; ++v) {
+ if (~alloc_bit_map_[v] != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+inline bool RosAlloc::CanAllocFromThreadLocalRun(Thread* self, size_t size) {
+ if (UNLIKELY(!IsSizeForThreadLocal(size))) {
+ return false;
+ }
+ size_t bracket_size;
+ size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
+ DCHECK_EQ(idx, SizeToIndex(size));
+ DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
+ DCHECK_EQ(bracket_size, bracketSizes[idx]);
+ DCHECK_LE(size, bracket_size);
+ DCHECK(size > 512 || bracket_size - size < 16);
+ DCHECK_LT(idx, kNumThreadLocalSizeBrackets);
+ Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
+ if (kIsDebugBuild) {
+ // Need the lock to prevent race conditions.
+ MutexLock mu(self, *size_bracket_locks_[idx]);
+ CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
+ CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
+ }
+ DCHECK(thread_local_run != nullptr);
+ DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
+ return !thread_local_run->IsFull();
+}
+
+inline void* RosAlloc::AllocFromThreadLocalRun(Thread* self, size_t size,
+ size_t* bytes_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ if (UNLIKELY(!IsSizeForThreadLocal(size))) {
+ return nullptr;
+ }
+ size_t bracket_size;
+ size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
+ Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
+ if (kIsDebugBuild) {
+ // Need the lock to prevent race conditions.
+ MutexLock mu(self, *size_bracket_locks_[idx]);
+ CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
+ CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
+ }
+ DCHECK(thread_local_run != nullptr);
+ DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
+ void* slot_addr = thread_local_run->AllocSlot();
+ if (LIKELY(slot_addr != nullptr)) {
+ *bytes_allocated = bracket_size;
+ }
+ return slot_addr;
+}
+
+inline size_t RosAlloc::MaxBytesBulkAllocatedFor(size_t size) {
+ if (UNLIKELY(!IsSizeForThreadLocal(size))) {
+ return size;
+ }
+ size_t bracket_size;
+ size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
+ return numOfSlots[idx] * bracket_size;
+}
+
+inline void* RosAlloc::Run::AllocSlot() {
+ const size_t idx = size_bracket_idx_;
+ while (true) {
+ if (kIsDebugBuild) {
+ // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
+ for (size_t i = 0; i < first_search_vec_idx_; ++i) {
+ CHECK_EQ(~alloc_bit_map_[i], 0U);
+ }
+ }
+ uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
+ uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
+ if (LIKELY(ffz1 != 0)) {
+ const uint32_t ffz = ffz1 - 1;
+ const uint32_t slot_idx = ffz +
+ first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
+ const uint32_t mask = 1U << ffz;
+ DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
+ // Found an empty slot. Set the bit.
+ DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
+ *alloc_bitmap_ptr |= mask;
+ DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
+ uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) +
+ headerSizes[idx] + slot_idx * bracketSizes[idx];
+ if (kTraceRosAlloc) {
+ LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex
+ << reinterpret_cast<intptr_t>(slot_addr)
+ << ", bracket_size=" << std::dec << bracketSizes[idx]
+ << ", slot_idx=" << slot_idx;
+ }
+ return slot_addr;
+ }
+ const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
+ if (first_search_vec_idx_ + 1 >= num_words) {
+ DCHECK(IsFull());
+ // Already at the last word, return null.
+ return nullptr;
+ }
+ // Increase the index to the next word and try again.
+ ++first_search_vec_idx_;
+ }
+}
+
} // namespace allocator
} // namespace gc
} // namespace art
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index f51093aa57..f64a4ff8df 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -454,7 +454,10 @@ size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) {
return byte_size;
}
-void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
+void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ DCHECK(usable_size != nullptr);
DCHECK_GT(size, kLargeSizeThreshold);
size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
void* r;
@@ -470,6 +473,8 @@ void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_alloca
}
const size_t total_bytes = num_pages * kPageSize;
*bytes_allocated = total_bytes;
+ *usable_size = total_bytes;
+ *bytes_tl_bulk_allocated = total_bytes;
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r)
<< "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize)
@@ -622,7 +627,12 @@ inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) {
return slot_addr;
}
-void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated) {
+void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ DCHECK(usable_size != nullptr);
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
DCHECK_LE(size, kLargeSizeThreshold);
size_t bracket_size;
size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
@@ -634,14 +644,19 @@ void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* byte
Locks::mutator_lock_->AssertExclusiveHeld(self);
void* slot_addr = AllocFromCurrentRunUnlocked(self, idx);
if (LIKELY(slot_addr != nullptr)) {
- DCHECK(bytes_allocated != nullptr);
*bytes_allocated = bracket_size;
- // Caller verifies that it is all 0.
+ *usable_size = bracket_size;
+ *bytes_tl_bulk_allocated = bracket_size;
}
+ // Caller verifies that it is all 0.
return slot_addr;
}
-void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
+void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ DCHECK(usable_size != nullptr);
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
DCHECK_LE(size, kLargeSizeThreshold);
size_t bracket_size;
size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
@@ -712,31 +727,43 @@ void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
self->SetRosAllocRun(idx, thread_local_run);
DCHECK(!thread_local_run->IsFull());
}
-
DCHECK(thread_local_run != nullptr);
DCHECK(!thread_local_run->IsFull());
DCHECK(thread_local_run->IsThreadLocal());
+ // Account for all the free slots in the new or refreshed thread local run.
+ *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size;
slot_addr = thread_local_run->AllocSlot();
// Must succeed now with a new run.
DCHECK(slot_addr != nullptr);
+ } else {
+ // The slot is already counted. Leave it as is.
+ *bytes_tl_bulk_allocated = 0;
}
+ DCHECK(slot_addr != nullptr);
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
+ LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex
+ << reinterpret_cast<intptr_t>(slot_addr)
<< "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
<< "(" << std::dec << (bracket_size) << ")";
}
+ *bytes_allocated = bracket_size;
+ *usable_size = bracket_size;
} else {
// Use the (shared) current run.
MutexLock mu(self, *size_bracket_locks_[idx]);
slot_addr = AllocFromCurrentRunUnlocked(self, idx);
if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
+ LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex
+ << reinterpret_cast<intptr_t>(slot_addr)
<< "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size)
<< "(" << std::dec << (bracket_size) << ")";
}
+ if (LIKELY(slot_addr != nullptr)) {
+ *bytes_allocated = bracket_size;
+ *usable_size = bracket_size;
+ *bytes_tl_bulk_allocated = bracket_size;
+ }
}
- DCHECK(bytes_allocated != nullptr);
- *bytes_allocated = bracket_size;
// Caller verifies that it is all 0.
return slot_addr;
}
@@ -852,44 +879,6 @@ std::string RosAlloc::Run::Dump() {
return stream.str();
}
-inline void* RosAlloc::Run::AllocSlot() {
- const size_t idx = size_bracket_idx_;
- while (true) {
- if (kIsDebugBuild) {
- // Make sure that no slots leaked, the bitmap should be full for all previous vectors.
- for (size_t i = 0; i < first_search_vec_idx_; ++i) {
- CHECK_EQ(~alloc_bit_map_[i], 0U);
- }
- }
- uint32_t* const alloc_bitmap_ptr = &alloc_bit_map_[first_search_vec_idx_];
- uint32_t ffz1 = __builtin_ffs(~*alloc_bitmap_ptr);
- if (LIKELY(ffz1 != 0)) {
- const uint32_t ffz = ffz1 - 1;
- const uint32_t slot_idx = ffz + first_search_vec_idx_ * sizeof(*alloc_bitmap_ptr) * kBitsPerByte;
- const uint32_t mask = 1U << ffz;
- DCHECK_LT(slot_idx, numOfSlots[idx]) << "out of range";
- // Found an empty slot. Set the bit.
- DCHECK_EQ(*alloc_bitmap_ptr & mask, 0U);
- *alloc_bitmap_ptr |= mask;
- DCHECK_NE(*alloc_bitmap_ptr & mask, 0U);
- uint8_t* slot_addr = reinterpret_cast<uint8_t*>(this) + headerSizes[idx] + slot_idx * bracketSizes[idx];
- if (kTraceRosAlloc) {
- LOG(INFO) << "RosAlloc::Run::AllocSlot() : 0x" << std::hex << reinterpret_cast<intptr_t>(slot_addr)
- << ", bracket_size=" << std::dec << bracketSizes[idx] << ", slot_idx=" << slot_idx;
- }
- return slot_addr;
- }
- const size_t num_words = RoundUp(numOfSlots[idx], 32) / 32;
- if (first_search_vec_idx_ + 1 >= num_words) {
- DCHECK(IsFull());
- // Already at the last word, return null.
- return nullptr;
- }
- // Increase the index to the next word and try again.
- ++first_search_vec_idx_;
- }
-}
-
void RosAlloc::Run::FreeSlot(void* ptr) {
DCHECK(!IsThreadLocal());
const uint8_t idx = size_bracket_idx_;
@@ -920,6 +909,25 @@ void RosAlloc::Run::FreeSlot(void* ptr) {
}
}
+size_t RosAlloc::Run::NumberOfFreeSlots() {
+ size_t num_alloc_slots = 0;
+ const size_t idx = size_bracket_idx_;
+ const size_t num_slots = numOfSlots[idx];
+ const size_t num_vec = RoundUp(num_slots, 32) / 32;
+ DCHECK_NE(num_vec, 0U);
+ for (size_t v = 0; v < num_vec - 1; v++) {
+ num_alloc_slots += POPCOUNT(alloc_bit_map_[v]);
+ }
+ // Don't count the invalid bits in the last vector.
+ uint32_t last_vec_masked = alloc_bit_map_[num_vec - 1] &
+ ~GetBitmapLastVectorMask(num_slots, num_vec);
+ num_alloc_slots += POPCOUNT(last_vec_masked);
+ size_t num_free_slots = num_slots - num_alloc_slots;
+ DCHECK_LE(num_alloc_slots, num_slots);
+ DCHECK_LE(num_free_slots, num_slots);
+ return num_free_slots;
+}
+
inline bool RosAlloc::Run::MergeThreadLocalFreeBitMapToAllocBitMap(bool* is_all_free_after_out) {
DCHECK(IsThreadLocal());
// Free slots in the alloc bit map based on the thread local free bit map.
@@ -1055,16 +1063,6 @@ inline bool RosAlloc::Run::IsAllFree() {
return alloc_bit_map_[num_vec - 1] == GetBitmapLastVectorMask(num_slots, num_vec);
}
-inline bool RosAlloc::Run::IsFull() {
- const size_t num_vec = NumberOfBitmapVectors();
- for (size_t v = 0; v < num_vec; ++v) {
- if (~alloc_bit_map_[v] != 0) {
- return false;
- }
- }
- return true;
-}
-
inline bool RosAlloc::Run::IsBulkFreeBitmapClean() {
const size_t num_vec = NumberOfBitmapVectors();
for (size_t v = 0; v < num_vec; v++) {
@@ -1654,10 +1652,11 @@ void RosAlloc::SetFootprintLimit(size_t new_capacity) {
}
}
-void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
+size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
Thread* self = Thread::Current();
// Avoid race conditions on the bulk free bit maps with BulkFree() (GC).
ReaderMutexLock wmu(self, bulk_free_lock_);
+ size_t free_bytes = 0U;
for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) {
MutexLock mu(self, *size_bracket_locks_[idx]);
Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx));
@@ -1665,9 +1664,12 @@ void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
// Invalid means already revoked.
DCHECK(thread_local_run->IsThreadLocal());
if (thread_local_run != dedicated_full_run_) {
+ // Note the thread local run may not be full here.
thread->SetRosAllocRun(idx, dedicated_full_run_);
DCHECK_EQ(thread_local_run->magic_num_, kMagicNum);
- // Note the thread local run may not be full here.
+ // Count the number of free slots left.
+ size_t num_free_slots = thread_local_run->NumberOfFreeSlots();
+ free_bytes += num_free_slots * bracketSizes[idx];
bool dont_care;
thread_local_run->MergeThreadLocalFreeBitMapToAllocBitMap(&dont_care);
thread_local_run->SetIsThreadLocal(false);
@@ -1677,6 +1679,7 @@ void RosAlloc::RevokeThreadLocalRuns(Thread* thread) {
RevokeRun(self, idx, thread_local_run);
}
}
+ return free_bytes;
}
void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) {
@@ -1719,16 +1722,18 @@ void RosAlloc::RevokeThreadUnsafeCurrentRuns() {
}
}
-void RosAlloc::RevokeAllThreadLocalRuns() {
+size_t RosAlloc::RevokeAllThreadLocalRuns() {
// This is called when a mutator thread won't allocate such as at
// the Zygote creation time or during the GC pause.
MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_);
MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_);
std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
+ size_t free_bytes = 0U;
for (Thread* thread : thread_list) {
- RevokeThreadLocalRuns(thread);
+ free_bytes += RevokeThreadLocalRuns(thread);
}
RevokeThreadUnsafeCurrentRuns();
+ return free_bytes;
}
void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) {
diff --git a/runtime/gc/allocator/rosalloc.h b/runtime/gc/allocator/rosalloc.h
index 3269e102bc..d1e7ad91a0 100644
--- a/runtime/gc/allocator/rosalloc.h
+++ b/runtime/gc/allocator/rosalloc.h
@@ -230,8 +230,10 @@ class RosAlloc {
static uint32_t GetBitmapLastVectorMask(size_t num_slots, size_t num_vec);
// Returns true if all the slots in the run are not in use.
bool IsAllFree();
+ // Returns the number of free slots.
+ size_t NumberOfFreeSlots();
// Returns true if all the slots in the run are in use.
- bool IsFull();
+ ALWAYS_INLINE bool IsFull();
// Returns true if the bulk free bit map is clean.
bool IsBulkFreeBitmapClean();
// Returns true if the thread local free bit map is clean.
@@ -309,6 +311,15 @@ class RosAlloc {
DCHECK(bracketSizes[idx] == size);
return idx;
}
+ // Returns true if the given allocation size is for a thread local allocation.
+ static bool IsSizeForThreadLocal(size_t size) {
+ DCHECK_GT(kNumThreadLocalSizeBrackets, 0U);
+ size_t max_thread_local_bracket_idx = kNumThreadLocalSizeBrackets - 1;
+ bool is_size_for_thread_local = size <= bracketSizes[max_thread_local_bracket_idx];
+ DCHECK(size > kLargeSizeThreshold ||
+ (is_size_for_thread_local == (SizeToIndex(size) < kNumThreadLocalSizeBrackets)));
+ return is_size_for_thread_local;
+ }
// Rounds up the size up the nearest bracket size.
static size_t RoundToBracketSize(size_t size) {
DCHECK(size <= kLargeSizeThreshold);
@@ -504,11 +515,13 @@ class RosAlloc {
size_t FreePages(Thread* self, void* ptr, bool already_zero) EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Allocate/free a run slot.
- void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated)
+ void* AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
LOCKS_EXCLUDED(lock_);
// Allocate/free a run slot without acquiring locks.
// TODO: EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated)
+ void* AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
LOCKS_EXCLUDED(lock_);
void* AllocFromCurrentRunUnlocked(Thread* self, size_t idx);
@@ -527,7 +540,9 @@ class RosAlloc {
size_t FreeInternal(Thread* self, void* ptr) LOCKS_EXCLUDED(lock_);
// Allocates large objects.
- void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) LOCKS_EXCLUDED(lock_);
+ void* AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated,
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ LOCKS_EXCLUDED(lock_);
// Revoke a run by adding it to non_full_runs_ or freeing the pages.
void RevokeRun(Thread* self, size_t idx, Run* run);
@@ -551,13 +566,26 @@ class RosAlloc {
// If kThreadUnsafe is true then the allocator may avoid acquiring some locks as an optimization.
// If used, this may cause race conditions if multiple threads are allocating at the same time.
template<bool kThreadSafe = true>
- void* Alloc(Thread* self, size_t size, size_t* bytes_allocated)
+ void* Alloc(Thread* self, size_t size, size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
LOCKS_EXCLUDED(lock_);
size_t Free(Thread* self, void* ptr)
LOCKS_EXCLUDED(bulk_free_lock_);
size_t BulkFree(Thread* self, void** ptrs, size_t num_ptrs)
LOCKS_EXCLUDED(bulk_free_lock_);
+ // Returns true if the given allocation request can be allocated in
+ // an existing thread local run without allocating a new run.
+ ALWAYS_INLINE bool CanAllocFromThreadLocalRun(Thread* self, size_t size);
+ // Allocate the given allocation request in an existing thread local
+ // run without allocating a new run.
+ ALWAYS_INLINE void* AllocFromThreadLocalRun(Thread* self, size_t size, size_t* bytes_allocated);
+
+ // Returns the maximum bytes that could be allocated for the given
+ // size in bulk, that is the maximum value for the
+ // bytes_allocated_bulk out param returned by RosAlloc::Alloc().
+ ALWAYS_INLINE size_t MaxBytesBulkAllocatedFor(size_t size);
+
// Returns the size of the allocated slot for a given allocated memory chunk.
size_t UsableSize(const void* ptr);
// Returns the size of the allocated slot for a given size.
@@ -586,9 +614,13 @@ class RosAlloc {
void SetFootprintLimit(size_t bytes) LOCKS_EXCLUDED(lock_);
// Releases the thread-local runs assigned to the given thread back to the common set of runs.
- void RevokeThreadLocalRuns(Thread* thread);
+ // Returns the total bytes of free slots in the revoked thread local runs. This is to be
+ // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
+ size_t RevokeThreadLocalRuns(Thread* thread);
// Releases the thread-local runs assigned to all the threads back to the common set of runs.
- void RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
+ // Returns the total bytes of free slots in the revoked thread local runs. This is to be
+ // subtracted from Heap::num_bytes_allocated_ to cancel out the ahead-of-time counting.
+ size_t RevokeAllThreadLocalRuns() LOCKS_EXCLUDED(Locks::thread_list_lock_);
// Assert the thread local runs of a thread are revoked.
void AssertThreadLocalRunsAreRevoked(Thread* thread);
// Assert all the thread local runs are revoked.
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index dd45ecab7f..db7a4ef7e7 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1259,8 +1259,9 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
size_t region_space_bytes_allocated = 0U;
size_t non_moving_space_bytes_allocated = 0U;
size_t bytes_allocated = 0U;
+ size_t dummy;
mirror::Object* to_ref = region_space_->AllocNonvirtual<true>(
- region_space_alloc_size, &region_space_bytes_allocated, nullptr);
+ region_space_alloc_size, &region_space_bytes_allocated, nullptr, &dummy);
bytes_allocated = region_space_bytes_allocated;
if (to_ref != nullptr) {
DCHECK_EQ(region_space_alloc_size, region_space_bytes_allocated);
@@ -1286,7 +1287,7 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) {
}
fall_back_to_non_moving = true;
to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size,
- &non_moving_space_bytes_allocated, nullptr);
+ &non_moving_space_bytes_allocated, nullptr, &dummy);
CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed";
bytes_allocated = non_moving_space_bytes_allocated;
// Mark it in the mark bitmap.
diff --git a/runtime/gc/collector/garbage_collector.cc b/runtime/gc/collector/garbage_collector.cc
index 8be18be676..eafcc45a13 100644
--- a/runtime/gc/collector/garbage_collector.cc
+++ b/runtime/gc/collector/garbage_collector.cc
@@ -48,6 +48,7 @@ void Iteration::Reset(GcCause gc_cause, bool clear_soft_references) {
gc_cause_ = gc_cause;
freed_ = ObjectBytePair();
freed_los_ = ObjectBytePair();
+ freed_bytes_revoke_ = 0;
}
uint64_t Iteration::GetEstimatedThroughput() const {
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index b8094694b0..ed5207a356 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -75,6 +75,12 @@ class Iteration {
uint64_t GetFreedLargeObjects() const {
return freed_los_.objects;
}
+ uint64_t GetFreedRevokeBytes() const {
+ return freed_bytes_revoke_;
+ }
+ void SetFreedRevoke(uint64_t freed) {
+ freed_bytes_revoke_ = freed;
+ }
void Reset(GcCause gc_cause, bool clear_soft_references);
// Returns the estimated throughput of the iteration.
uint64_t GetEstimatedThroughput() const;
@@ -99,6 +105,7 @@ class Iteration {
TimingLogger timings_;
ObjectBytePair freed_;
ObjectBytePair freed_los_;
+ uint64_t freed_bytes_revoke_; // see Heap::num_bytes_freed_revoke_.
std::vector<uint64_t> pause_times_;
friend class GarbageCollector;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 8aac484f7f..ee4e752608 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -292,6 +292,7 @@ void MarkSweep::ReclaimPhase() {
Runtime::Current()->AllowNewSystemWeaks();
{
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+ GetHeap()->RecordFreeRevoke();
// Reclaim unmarked objects.
Sweep(false);
// Swap the live and mark bitmaps for each space which we modified space. This is an
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index c1ba5e3f72..b3d59f2a51 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -242,6 +242,7 @@ void SemiSpace::MarkingPhase() {
// Revoke buffers before measuring how many objects were moved since the TLABs need to be revoked
// before they are properly counted.
RevokeAllThreadLocalBuffers();
+ GetHeap()->RecordFreeRevoke(); // this is for the non-moving rosalloc space used by GSS.
// Record freed memory.
const int64_t from_bytes = from_space_->GetBytesAllocated();
const int64_t to_bytes = bytes_moved_;
@@ -489,17 +490,18 @@ static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size
mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
const size_t object_size = obj->SizeOf();
- size_t bytes_allocated;
+ size_t bytes_allocated, dummy;
mirror::Object* forward_address = nullptr;
if (generational_ && reinterpret_cast<uint8_t*>(obj) < last_gc_to_space_end_) {
// If it's allocated before the last GC (older), move
// (pseudo-promote) it to the main free list space (as sort
// of an old generation.)
forward_address = promo_dest_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
- nullptr);
+ nullptr, &dummy);
if (UNLIKELY(forward_address == nullptr)) {
// If out of space, fall back to the to-space.
- forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
+ &dummy);
// No logic for marking the bitmap, so it must be null.
DCHECK(to_space_live_bitmap_ == nullptr);
} else {
@@ -544,7 +546,8 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
}
} else {
// If it's allocated after the last GC (younger), copy it to the to-space.
- forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr);
+ forward_address = to_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated, nullptr,
+ &dummy);
if (forward_address != nullptr && to_space_live_bitmap_ != nullptr) {
to_space_live_bitmap_->Set(forward_address);
}
@@ -552,7 +555,7 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
// If it's still null, attempt to use the fallback space.
if (UNLIKELY(forward_address == nullptr)) {
forward_address = fallback_space_->AllocThreadUnsafe(self_, object_size, &bytes_allocated,
- nullptr);
+ nullptr, &dummy);
CHECK(forward_address != nullptr) << "Out of memory in the to-space and fallback space.";
accounting::ContinuousSpaceBitmap* bitmap = fallback_space_->GetLiveBitmap();
if (bitmap != nullptr) {
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index b8c24521a2..b770096671 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -64,6 +64,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
// fragmentation.
}
AllocationTimer alloc_timer(this, &obj);
+ // bytes allocated for the (individual) object.
size_t bytes_allocated;
size_t usable_size;
size_t new_num_bytes_allocated = 0;
@@ -86,13 +87,29 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
usable_size = bytes_allocated;
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
+ } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc &&
+ (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) &&
+ LIKELY(obj != nullptr)) {
+ DCHECK(!running_on_valgrind_);
+ obj->SetClass(klass);
+ if (kUseBakerOrBrooksReadBarrier) {
+ if (kUseBrooksReadBarrier) {
+ obj->SetReadBarrierPointer(obj);
+ }
+ obj->AssertReadBarrierPointer();
+ }
+ usable_size = bytes_allocated;
+ pre_fence_visitor(obj, usable_size);
+ QuasiAtomic::ThreadFenceForConstructor();
} else {
+ // bytes allocated that takes bulk thread-local buffer allocations into account.
+ size_t bytes_tl_bulk_allocated = 0;
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
- &usable_size);
+ &usable_size, &bytes_tl_bulk_allocated);
if (UNLIKELY(obj == nullptr)) {
bool is_current_allocator = allocator == GetCurrentAllocator();
obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
- &klass);
+ &bytes_tl_bulk_allocated, &klass);
if (obj == nullptr) {
bool after_is_current_allocator = allocator == GetCurrentAllocator();
// If there is a pending exception, fail the allocation right away since the next one
@@ -126,9 +143,9 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Clas
WriteBarrierField(obj, mirror::Object::ClassOffset(), klass);
}
pre_fence_visitor(obj, usable_size);
- new_num_bytes_allocated =
- static_cast<size_t>(num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated))
- + bytes_allocated;
+ new_num_bytes_allocated = static_cast<size_t>(
+ num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_tl_bulk_allocated))
+ + bytes_tl_bulk_allocated;
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
CHECK_LE(obj->SizeOf(), usable_size);
@@ -196,8 +213,10 @@ inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klas
template <const bool kInstrumented, const bool kGrow>
inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
size_t alloc_size, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
if (allocator_type != kAllocatorTypeTLAB && allocator_type != kAllocatorTypeRegionTLAB &&
+ allocator_type != kAllocatorTypeRosAlloc &&
UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
return nullptr;
}
@@ -210,35 +229,56 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator
if (LIKELY(ret != nullptr)) {
*bytes_allocated = alloc_size;
*usable_size = alloc_size;
+ *bytes_tl_bulk_allocated = alloc_size;
}
break;
}
case kAllocatorTypeRosAlloc: {
if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
// If running on valgrind, we should be using the instrumented path.
- ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
+ size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size);
+ if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
+ max_bytes_tl_bulk_allocated))) {
+ return nullptr;
+ }
+ ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
} else {
DCHECK(!running_on_valgrind_);
- ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
+ size_t max_bytes_tl_bulk_allocated =
+ rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size);
+ if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type,
+ max_bytes_tl_bulk_allocated))) {
+ return nullptr;
+ }
+ if (!kInstrumented) {
+ DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size));
+ }
+ ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
break;
}
case kAllocatorTypeDlMalloc: {
if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
// If running on valgrind, we should be using the instrumented path.
- ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
+ ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
} else {
DCHECK(!running_on_valgrind_);
- ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
+ ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
break;
}
case kAllocatorTypeNonMoving: {
- ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
+ ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
break;
}
case kAllocatorTypeLOS: {
- ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
+ ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
// Note that the bump pointer spaces aren't necessarily next to
// the other continuous spaces like the non-moving alloc space or
// the zygote space.
@@ -257,20 +297,22 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator
if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) {
return nullptr;
}
- *bytes_allocated = new_tlab_size;
+ *bytes_tl_bulk_allocated = new_tlab_size;
} else {
- *bytes_allocated = 0;
+ *bytes_tl_bulk_allocated = 0;
}
// The allocation can't fail.
ret = self->AllocTlab(alloc_size);
DCHECK(ret != nullptr);
+ *bytes_allocated = alloc_size;
*usable_size = alloc_size;
break;
}
case kAllocatorTypeRegion: {
DCHECK(region_space_ != nullptr);
alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment);
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size);
+ ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
break;
}
case kAllocatorTypeRegionTLAB: {
@@ -283,15 +325,17 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator
// Try to allocate a tlab.
if (!region_space_->AllocNewTlab(self)) {
// Failed to allocate a tlab. Try non-tlab.
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size);
+ ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
return ret;
}
- *bytes_allocated = space::RegionSpace::kRegionSize;
+ *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize;
// Fall-through.
} else {
// Check OOME for a non-tlab allocation.
if (!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size)) {
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size);
+ ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
return ret;
} else {
// Neither tlab or non-tlab works. Give up.
@@ -301,18 +345,20 @@ inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator
} else {
// Large. Check OOME.
if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
- ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size);
+ ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
return ret;
} else {
return nullptr;
}
}
} else {
- *bytes_allocated = 0;
+ *bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer.
}
// The allocation can't fail.
ret = self->AllocTlab(alloc_size);
DCHECK(ret != nullptr);
+ *bytes_allocated = alloc_size;
*usable_size = alloc_size;
break;
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 9343622fda..9421db5139 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -156,6 +156,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
native_bytes_allocated_(0),
+ num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
verify_pre_gc_heap_(verify_pre_gc_heap),
@@ -438,20 +439,31 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
// Create our garbage collectors.
for (size_t i = 0; i < 2; ++i) {
const bool concurrent = i != 0;
- garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
- garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
- garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+ if ((MayUseCollector(kCollectorTypeCMS) && concurrent) ||
+ (MayUseCollector(kCollectorTypeMS) && !concurrent)) {
+ garbage_collectors_.push_back(new collector::MarkSweep(this, concurrent));
+ garbage_collectors_.push_back(new collector::PartialMarkSweep(this, concurrent));
+ garbage_collectors_.push_back(new collector::StickyMarkSweep(this, concurrent));
+ }
}
if (kMovingCollector) {
- // TODO: Clean this up.
- const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
- semi_space_collector_ = new collector::SemiSpace(this, generational,
- generational ? "generational" : "");
- garbage_collectors_.push_back(semi_space_collector_);
- concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
- garbage_collectors_.push_back(concurrent_copying_collector_);
- mark_compact_collector_ = new collector::MarkCompact(this);
- garbage_collectors_.push_back(mark_compact_collector_);
+ if (MayUseCollector(kCollectorTypeSS) || MayUseCollector(kCollectorTypeGSS) ||
+ MayUseCollector(kCollectorTypeHomogeneousSpaceCompact) ||
+ use_homogeneous_space_compaction_for_oom_) {
+ // TODO: Clean this up.
+ const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
+ semi_space_collector_ = new collector::SemiSpace(this, generational,
+ generational ? "generational" : "");
+ garbage_collectors_.push_back(semi_space_collector_);
+ }
+ if (MayUseCollector(kCollectorTypeCC)) {
+ concurrent_copying_collector_ = new collector::ConcurrentCopying(this);
+ garbage_collectors_.push_back(concurrent_copying_collector_);
+ }
+ if (MayUseCollector(kCollectorTypeMC)) {
+ mark_compact_collector_ = new collector::MarkCompact(this);
+ garbage_collectors_.push_back(mark_compact_collector_);
+ }
}
if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
(is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
@@ -487,6 +499,10 @@ MemMap* Heap::MapAnonymousPreferredAddress(const char* name, uint8_t* request_be
return nullptr;
}
+bool Heap::MayUseCollector(CollectorType type) const {
+ return foreground_collector_type_ == type || background_collector_type_ == type;
+}
+
space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
size_t growth_limit, size_t capacity,
const char* name, bool can_move_objects) {
@@ -1329,6 +1345,19 @@ void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) {
}
}
+void Heap::RecordFreeRevoke() {
+ // Subtract num_bytes_freed_revoke_ from num_bytes_allocated_ to cancel out the
+ // the ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers.
+ // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_
+ // all the way to zero exactly as the remainder will be subtracted at the next GC.
+ size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent();
+ CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed),
+ bytes_freed) << "num_bytes_freed_revoke_ underflow";
+ CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed),
+ bytes_freed) << "num_bytes_allocated_ underflow";
+ GetCurrentGcIteration()->SetFreedRevoke(bytes_freed);
+}
+
space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
for (const auto& space : continuous_spaces_) {
if (space->AsContinuousSpace()->IsRosAllocSpace()) {
@@ -1343,6 +1372,7 @@ space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc)
mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
size_t alloc_size, size_t* bytes_allocated,
size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated,
mirror::Class** klass) {
bool was_default_allocator = allocator == GetCurrentAllocator();
// Make sure there is no pending exception since we may need to throw an OOME.
@@ -1362,7 +1392,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
}
// A GC was in progress and we blocked, retry allocation now that memory has been freed.
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
@@ -1376,7 +1406,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
}
if (gc_ran) {
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
@@ -1396,7 +1426,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
if (plan_gc_ran) {
// Did we free sufficient memory for the allocation to succeed?
mirror::Object* ptr = TryToAllocate<true, false>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
@@ -1405,7 +1435,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
// Allocations have failed after GCs; this is an exceptional state.
// Try harder, growing the heap if necessary.
mirror::Object* ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
return ptr;
}
@@ -1422,7 +1452,8 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
if (was_default_allocator && allocator != GetCurrentAllocator()) {
return nullptr;
}
- ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+ ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
if (ptr == nullptr) {
const uint64_t current_time = NanoTime();
switch (allocator) {
@@ -1438,7 +1469,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
case HomogeneousSpaceCompactResult::kSuccess:
// If the allocation succeeded, we delayed an oom.
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
if (ptr != nullptr) {
count_delayed_oom_++;
}
@@ -1483,7 +1514,7 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocat
} else {
LOG(WARNING) << "Disabled moving GC due to the non moving space being full";
ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
}
}
break;
@@ -1969,8 +2000,8 @@ class ZygoteCompactingCollector FINAL : public collector::SemiSpace {
if (it == bins_.end()) {
// No available space in the bins, place it in the target space instead (grows the zygote
// space).
- size_t bytes_allocated;
- forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr);
+ size_t bytes_allocated, dummy;
+ forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr, &dummy);
if (to_space_live_bitmap_ != nullptr) {
to_space_live_bitmap_->Set(forward_address);
} else {
@@ -2033,8 +2064,6 @@ void Heap::PreZygoteFork() {
non_moving_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
const bool same_space = non_moving_space_ == main_space_;
if (kCompactZygote) {
- // Can't compact if the non moving space is the same as the main space.
- DCHECK(semi_space_collector_ != nullptr);
// Temporarily disable rosalloc verification because the zygote
// compaction will mess up the rosalloc internal metadata.
ScopedDisableRosAllocVerification disable_rosalloc_verif(this);
@@ -2053,6 +2082,8 @@ void Heap::PreZygoteFork() {
}
} else {
CHECK(main_space_ != nullptr);
+ CHECK_NE(main_space_, non_moving_space_)
+ << "Does not make sense to compact within the same space";
// Copy from the main space.
zygote_collector.SetFromSpace(main_space_);
reset_main_space = true;
@@ -3069,7 +3100,8 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
SetIdealFootprint(target_size);
if (IsGcConcurrent()) {
const uint64_t freed_bytes = current_gc_iteration_.GetFreedBytes() +
- current_gc_iteration_.GetFreedLargeObjectBytes();
+ current_gc_iteration_.GetFreedLargeObjectBytes() +
+ current_gc_iteration_.GetFreedRevokeBytes();
// Bytes allocated will shrink by freed_bytes after the GC runs, so if we want to figure out
// how many bytes were allocated during the GC we need to add freed_bytes back on.
CHECK_GE(bytes_allocated + freed_bytes, bytes_allocated_before_gc);
@@ -3275,31 +3307,43 @@ void Heap::RequestTrim(Thread* self) {
void Heap::RevokeThreadLocalBuffers(Thread* thread) {
if (rosalloc_space_ != nullptr) {
- rosalloc_space_->RevokeThreadLocalBuffers(thread);
+ size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
+ if (freed_bytes_revoke > 0U) {
+ num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
+ CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
+ }
}
if (bump_pointer_space_ != nullptr) {
- bump_pointer_space_->RevokeThreadLocalBuffers(thread);
+ CHECK_EQ(bump_pointer_space_->RevokeThreadLocalBuffers(thread), 0U);
}
if (region_space_ != nullptr) {
- region_space_->RevokeThreadLocalBuffers(thread);
+ CHECK_EQ(region_space_->RevokeThreadLocalBuffers(thread), 0U);
}
}
void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) {
if (rosalloc_space_ != nullptr) {
- rosalloc_space_->RevokeThreadLocalBuffers(thread);
+ size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread);
+ if (freed_bytes_revoke > 0U) {
+ num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
+ CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
+ }
}
}
void Heap::RevokeAllThreadLocalBuffers() {
if (rosalloc_space_ != nullptr) {
- rosalloc_space_->RevokeAllThreadLocalBuffers();
+ size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers();
+ if (freed_bytes_revoke > 0U) {
+ num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke);
+ CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed());
+ }
}
if (bump_pointer_space_ != nullptr) {
- bump_pointer_space_->RevokeAllThreadLocalBuffers();
+ CHECK_EQ(bump_pointer_space_->RevokeAllThreadLocalBuffers(), 0U);
}
if (region_space_ != nullptr) {
- region_space_->RevokeAllThreadLocalBuffers();
+ CHECK_EQ(region_space_->RevokeAllThreadLocalBuffers(), 0U);
}
}
@@ -3340,6 +3384,8 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
// Just finished a GC, attempt to run finalizers.
RunFinalization(env);
CHECK(!env->ExceptionCheck());
+ // Native bytes allocated may be updated by finalization, refresh it.
+ new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
}
// If we still are over the watermark, attempt a GC for alloc and run finalizers.
if (new_native_bytes_allocated > growth_limit_) {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index b2478e6d70..959ff18516 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -390,6 +390,9 @@ class Heap {
// free-list backed space.
void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
+ // Record the bytes freed by thread-local buffer revoke.
+ void RecordFreeRevoke();
+
// Must be called if a field of an Object in the heap changes, and before any GC safe-point.
// The call is not needed if NULL is stored in the field.
ALWAYS_INLINE void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
@@ -661,6 +664,14 @@ class Heap {
// Request asynchronous GC.
void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(pending_task_lock_);
+ // Whether or not we may use a garbage collector, used so that we only create collectors we need.
+ bool MayUseCollector(CollectorType type) const;
+
+ // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
+ void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
+ min_interval_homogeneous_space_compaction_by_oom_ = interval;
+ }
+
private:
class ConcurrentGCTask;
class CollectorTransitionTask;
@@ -721,6 +732,7 @@ class Heap {
// an initial allocation attempt failed.
mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated,
mirror::Class** klass)
LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -739,7 +751,8 @@ class Heap {
template <const bool kInstrumented, const bool kGrow>
ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
size_t alloc_size, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
@@ -995,6 +1008,13 @@ class Heap {
// Bytes which are allocated and managed by native code but still need to be accounted for.
Atomic<size_t> native_bytes_allocated_;
+ // Number of bytes freed by thread local buffer revokes. This will
+ // cancel out the ahead-of-time bulk counting of bytes allocated in
+ // rosalloc thread-local buffers. It is temporarily accumulated
+ // here to be subtracted from num_bytes_allocated_ later at the next
+ // GC.
+ Atomic<size_t> num_bytes_freed_revoke_;
+
// Info related to the current or previous GC iteration.
collector::Iteration current_gc_iteration_;
diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h
index 9f1f9533d0..14a93d1611 100644
--- a/runtime/gc/space/bump_pointer_space-inl.h
+++ b/runtime/gc/space/bump_pointer_space-inl.h
@@ -24,7 +24,8 @@ namespace gc {
namespace space {
inline mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
num_bytes = RoundUp(num_bytes, kAlignment);
mirror::Object* ret = AllocNonvirtual(num_bytes);
if (LIKELY(ret != nullptr)) {
@@ -32,13 +33,15 @@ inline mirror::Object* BumpPointerSpace::Alloc(Thread*, size_t num_bytes, size_t
if (usable_size != nullptr) {
*usable_size = num_bytes;
}
+ *bytes_tl_bulk_allocated = num_bytes;
}
return ret;
}
inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
Locks::mutator_lock_->AssertExclusiveHeld(self);
num_bytes = RoundUp(num_bytes, kAlignment);
uint8_t* end = end_.LoadRelaxed();
@@ -54,6 +57,7 @@ inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t
if (UNLIKELY(usable_size != nullptr)) {
*usable_size = num_bytes;
}
+ *bytes_tl_bulk_allocated = num_bytes;
return obj;
}
diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc
index fbfc4495e0..1303d7729e 100644
--- a/runtime/gc/space/bump_pointer_space.cc
+++ b/runtime/gc/space/bump_pointer_space.cc
@@ -93,12 +93,13 @@ mirror::Object* BumpPointerSpace::GetNextObject(mirror::Object* obj) {
return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
}
-void BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
+size_t BumpPointerSpace::RevokeThreadLocalBuffers(Thread* thread) {
MutexLock mu(Thread::Current(), block_lock_);
RevokeThreadLocalBuffersLocked(thread);
+ return 0U;
}
-void BumpPointerSpace::RevokeAllThreadLocalBuffers() {
+size_t BumpPointerSpace::RevokeAllThreadLocalBuffers() {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -107,6 +108,7 @@ void BumpPointerSpace::RevokeAllThreadLocalBuffers() {
for (Thread* thread : thread_list) {
RevokeThreadLocalBuffers(thread);
}
+ return 0U;
}
void BumpPointerSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h
index 089ede4453..c496a422e0 100644
--- a/runtime/gc/space/bump_pointer_space.h
+++ b/runtime/gc/space/bump_pointer_space.h
@@ -47,10 +47,10 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
// Allocate num_bytes, returns nullptr if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(size_t num_bytes);
@@ -103,9 +103,9 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace {
void Dump(std::ostream& os) const;
- void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_);
- void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
- Locks::thread_list_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(block_lock_);
+ size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
+ Locks::thread_list_lock_);
void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(block_lock_);
void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
Locks::thread_list_lock_);
diff --git a/runtime/gc/space/dlmalloc_space-inl.h b/runtime/gc/space/dlmalloc_space-inl.h
index 4c8a35e0f7..9eace897e6 100644
--- a/runtime/gc/space/dlmalloc_space-inl.h
+++ b/runtime/gc/space/dlmalloc_space-inl.h
@@ -27,11 +27,13 @@ namespace space {
inline mirror::Object* DlMallocSpace::AllocNonvirtual(Thread* self, size_t num_bytes,
size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
mirror::Object* obj;
{
MutexLock mu(self, lock_);
- obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
+ obj = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
if (LIKELY(obj != NULL)) {
// Zero freshly allocated memory, done while not holding the space's lock.
@@ -49,9 +51,11 @@ inline size_t DlMallocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_
return size + kChunkOverhead;
}
-inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(Thread* /*self*/, size_t num_bytes,
- size_t* bytes_allocated,
- size_t* usable_size) {
+inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(
+ Thread* /*self*/, size_t num_bytes,
+ size_t* bytes_allocated,
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
mirror::Object* result = reinterpret_cast<mirror::Object*>(mspace_malloc(mspace_, num_bytes));
if (LIKELY(result != NULL)) {
if (kDebugSpaces) {
@@ -61,6 +65,7 @@ inline mirror::Object* DlMallocSpace::AllocWithoutGrowthLocked(Thread* /*self*/,
size_t allocation_size = AllocationSizeNonvirtual(result, usable_size);
DCHECK(bytes_allocated != NULL);
*bytes_allocated = allocation_size;
+ *bytes_tl_bulk_allocated = allocation_size;
}
return result;
}
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index b8a9dd6639..225861db60 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -123,7 +123,8 @@ void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t ini
}
mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
mirror::Object* result;
{
MutexLock mu(self, lock_);
@@ -131,7 +132,8 @@ mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
size_t max_allowed = Capacity();
mspace_set_footprint_limit(mspace_, max_allowed);
// Try the allocation.
- result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
+ result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
// Shrink back down as small as possible.
size_t footprint = mspace_footprint(mspace_);
mspace_set_footprint_limit(mspace_, footprint);
diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h
index 6ce138c235..1f80f1fd6b 100644
--- a/runtime/gc/space/dlmalloc_space.h
+++ b/runtime/gc/space/dlmalloc_space.h
@@ -48,11 +48,15 @@ class DlMallocSpace : public MallocSpace {
// Virtual to allow ValgrindMallocSpace to intercept.
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_);
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
+ OVERRIDE LOCKS_EXCLUDED(lock_);
// Virtual to allow ValgrindMallocSpace to intercept.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_) {
- return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE LOCKS_EXCLUDED(lock_) {
+ return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
// Virtual to allow ValgrindMallocSpace to intercept.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
@@ -67,15 +71,22 @@ class DlMallocSpace : public MallocSpace {
LOCKS_EXCLUDED(lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ return num_bytes;
+ }
+
// DlMallocSpaces don't have thread local state.
- void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ return 0U;
}
- void RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ return 0U;
}
// Faster non-virtual allocation path.
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) LOCKS_EXCLUDED(lock_);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ LOCKS_EXCLUDED(lock_);
// Faster non-virtual allocation size path.
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size);
@@ -134,7 +145,8 @@ class DlMallocSpace : public MallocSpace {
private:
mirror::Object* AllocWithoutGrowthLocked(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
diff --git a/runtime/gc/space/large_object_space.cc b/runtime/gc/space/large_object_space.cc
index 7523de58bf..5c8e4b9299 100644
--- a/runtime/gc/space/large_object_space.cc
+++ b/runtime/gc/space/large_object_space.cc
@@ -38,10 +38,11 @@ class ValgrindLargeObjectMapSpace FINAL : public LargeObjectMapSpace {
}
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE {
mirror::Object* obj =
LargeObjectMapSpace::Alloc(self, num_bytes + kValgrindRedZoneBytes * 2, bytes_allocated,
- usable_size);
+ usable_size, bytes_tl_bulk_allocated);
mirror::Object* object_without_rdz = reinterpret_cast<mirror::Object*>(
reinterpret_cast<uintptr_t>(obj) + kValgrindRedZoneBytes);
VALGRIND_MAKE_MEM_NOACCESS(reinterpret_cast<void*>(obj), kValgrindRedZoneBytes);
@@ -108,7 +109,8 @@ LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
}
mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
std::string error_msg;
MemMap* mem_map = MemMap::MapAnonymous("large object space allocation", nullptr, num_bytes,
PROT_READ | PROT_WRITE, true, false, &error_msg);
@@ -131,6 +133,8 @@ mirror::Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes,
if (usable_size != nullptr) {
*usable_size = allocation_size;
}
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
+ *bytes_tl_bulk_allocated = allocation_size;
num_bytes_allocated_ += allocation_size;
total_bytes_allocated_ += allocation_size;
++num_objects_allocated_;
@@ -413,7 +417,7 @@ size_t FreeListSpace::AllocationSize(mirror::Object* obj, size_t* usable_size) {
}
mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
MutexLock mu(self, lock_);
const size_t allocation_size = RoundUp(num_bytes, kAlignment);
AllocationInfo temp_info;
@@ -451,6 +455,8 @@ mirror::Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes, size_t* byt
if (usable_size != nullptr) {
*usable_size = allocation_size;
}
+ DCHECK(bytes_tl_bulk_allocated != nullptr);
+ *bytes_tl_bulk_allocated = allocation_size;
// Need to do these inside of the lock.
++num_objects_allocated_;
++total_objects_allocated_;
diff --git a/runtime/gc/space/large_object_space.h b/runtime/gc/space/large_object_space.h
index 847f575815..d1f9386d09 100644
--- a/runtime/gc/space/large_object_space.h
+++ b/runtime/gc/space/large_object_space.h
@@ -62,9 +62,11 @@ class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
}
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
// LargeObjectSpaces don't have thread local state.
- void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ return 0U;
}
- void RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ return 0U;
}
bool IsAllocSpace() const OVERRIDE {
return true;
@@ -124,7 +126,7 @@ class LargeObjectMapSpace : public LargeObjectSpace {
// Return the storage space required by obj.
size_t AllocationSize(mirror::Object* obj, size_t* usable_size);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated);
size_t Free(Thread* self, mirror::Object* ptr);
void Walk(DlMallocSpace::WalkCallback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
// TODO: disabling thread safety analysis as this may be called when we already hold lock_.
@@ -153,7 +155,7 @@ class FreeListSpace FINAL : public LargeObjectSpace {
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
size_t Free(Thread* self, mirror::Object* obj) OVERRIDE;
void Walk(DlMallocSpace::WalkCallback callback, void* arg) OVERRIDE LOCKS_EXCLUDED(lock_);
void Dump(std::ostream& os) const;
diff --git a/runtime/gc/space/large_object_space_test.cc b/runtime/gc/space/large_object_space_test.cc
index e17bad8a14..a261663ec7 100644
--- a/runtime/gc/space/large_object_space_test.cc
+++ b/runtime/gc/space/large_object_space_test.cc
@@ -49,11 +49,13 @@ void LargeObjectSpaceTest::LargeObjectTest() {
while (requests.size() < num_allocations) {
size_t request_size = test_rand(&rand_seed) % max_allocation_size;
size_t allocation_size = 0;
+ size_t bytes_tl_bulk_allocated;
mirror::Object* obj = los->Alloc(Thread::Current(), request_size, &allocation_size,
- nullptr);
+ nullptr, &bytes_tl_bulk_allocated);
ASSERT_TRUE(obj != nullptr);
ASSERT_EQ(allocation_size, los->AllocationSize(obj, nullptr));
ASSERT_GE(allocation_size, request_size);
+ ASSERT_EQ(allocation_size, bytes_tl_bulk_allocated);
// Fill in our magic value.
uint8_t magic = (request_size & 0xFF) | 1;
memset(obj, magic, request_size);
@@ -83,9 +85,10 @@ void LargeObjectSpaceTest::LargeObjectTest() {
// Test that dump doesn't crash.
los->Dump(LOG(INFO));
- size_t bytes_allocated = 0;
+ size_t bytes_allocated = 0, bytes_tl_bulk_allocated;
// Checks that the coalescing works.
- mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated, nullptr);
+ mirror::Object* obj = los->Alloc(Thread::Current(), 100 * MB, &bytes_allocated, nullptr,
+ &bytes_tl_bulk_allocated);
EXPECT_TRUE(obj != nullptr);
los->Free(Thread::Current(), obj);
@@ -102,8 +105,9 @@ class AllocRaceTask : public Task {
void Run(Thread* self) {
for (size_t i = 0; i < iterations_ ; ++i) {
- size_t alloc_size;
- mirror::Object* ptr = los_->Alloc(self, size_, &alloc_size, nullptr);
+ size_t alloc_size, bytes_tl_bulk_allocated;
+ mirror::Object* ptr = los_->Alloc(self, size_, &alloc_size, nullptr,
+ &bytes_tl_bulk_allocated);
NanoSleep((id_ + 3) * 1000); // (3+id) mu s
diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h
index 06239e5e73..bbf1bbbdbd 100644
--- a/runtime/gc/space/malloc_space.h
+++ b/runtime/gc/space/malloc_space.h
@@ -55,10 +55,11 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
// Allocate num_bytes allowing the underlying space to grow.
virtual mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) = 0;
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) = 0;
// Allocate num_bytes without allowing the underlying space to grow.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) = 0;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
// Return the storage space required by obj. If usable_size isn't nullptr then it is set to the
// amount of the storage space that may be used by obj.
virtual size_t AllocationSize(mirror::Object* obj, size_t* usable_size) = 0;
@@ -67,6 +68,11 @@ class MallocSpace : public ContinuousMemMapAllocSpace {
virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ // Returns the maximum bytes that could be allocated for the given
+ // size in bulk, that is the maximum value for the
+ // bytes_allocated_bulk out param returned by MallocSpace::Alloc().
+ virtual size_t MaxBytesBulkAllocatedFor(size_t num_bytes) = 0;
+
#ifndef NDEBUG
virtual void CheckMoreCoreForPrecondition() {} // to be overridden in the debug build.
#else
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index a4ed7187c0..1cdf69dbe5 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -24,30 +24,36 @@ namespace gc {
namespace space {
inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
num_bytes = RoundUp(num_bytes, kAlignment);
- return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size);
+ return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
Locks::mutator_lock_->AssertExclusiveHeld(self);
- return Alloc(self, num_bytes, bytes_allocated, usable_size);
+ return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
}
template<bool kForEvac>
inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
DCHECK(IsAligned<kAlignment>(num_bytes));
mirror::Object* obj;
if (LIKELY(num_bytes <= kRegionSize)) {
// Non-large object.
if (!kForEvac) {
- obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
} else {
DCHECK(evac_region_ != nullptr);
- obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
if (LIKELY(obj != nullptr)) {
return obj;
@@ -55,9 +61,11 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
MutexLock mu(Thread::Current(), region_lock_);
// Retry with current region since another thread may have updated it.
if (!kForEvac) {
- obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
} else {
- obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
if (LIKELY(obj != nullptr)) {
return obj;
@@ -73,7 +81,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
r->Unfree(time_);
r->SetNewlyAllocated();
++num_non_free_regions_;
- obj = r->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
CHECK(obj != nullptr);
current_region_ = r;
return obj;
@@ -85,7 +93,7 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
if (r->IsFree()) {
r->Unfree(time_);
++num_non_free_regions_;
- obj = r->Alloc(num_bytes, bytes_allocated, usable_size);
+ obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
CHECK(obj != nullptr);
evac_region_ = r;
return obj;
@@ -94,7 +102,8 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
}
} else {
// Large object.
- obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size);
+ obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
if (LIKELY(obj != nullptr)) {
return obj;
}
@@ -103,7 +112,8 @@ inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* by
}
inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
DCHECK(IsAllocated() && IsInToSpace());
DCHECK(IsAligned<kAlignment>(num_bytes));
Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
@@ -124,6 +134,7 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* byte
if (usable_size != nullptr) {
*usable_size = num_bytes;
}
+ *bytes_tl_bulk_allocated = num_bytes;
return reinterpret_cast<mirror::Object*>(old_top);
}
@@ -253,7 +264,8 @@ inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
template<bool kForEvac>
mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
DCHECK(IsAligned<kAlignment>(num_bytes));
DCHECK_GT(num_bytes, kRegionSize);
size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
@@ -300,6 +312,7 @@ mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocate
if (usable_size != nullptr) {
*usable_size = num_regs * kRegionSize;
}
+ *bytes_tl_bulk_allocated = num_bytes;
return reinterpret_cast<mirror::Object*>(first_reg->Begin());
} else {
// right points to the non-free region. Start with the one after it.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 8bb73d614c..814ab6ce92 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -76,7 +76,7 @@ RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
current_region_ = &full_region_;
evac_region_ = nullptr;
size_t ignored;
- DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr) == nullptr);
+ DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
}
size_t RegionSpace::FromSpaceSize() {
@@ -356,9 +356,10 @@ bool RegionSpace::AllocNewTlab(Thread* self) {
return false;
}
-void RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
+size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
MutexLock mu(Thread::Current(), region_lock_);
RevokeThreadLocalBuffersLocked(thread);
+ return 0U;
}
void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
@@ -377,7 +378,7 @@ void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
thread->SetTlab(nullptr, nullptr);
}
-void RegionSpace::RevokeAllThreadLocalBuffers() {
+size_t RegionSpace::RevokeAllThreadLocalBuffers() {
Thread* self = Thread::Current();
MutexLock mu(self, *Locks::runtime_shutdown_lock_);
MutexLock mu2(self, *Locks::thread_list_lock_);
@@ -385,6 +386,7 @@ void RegionSpace::RevokeAllThreadLocalBuffers() {
for (Thread* thread : thread_list) {
RevokeThreadLocalBuffers(thread);
}
+ return 0U;
}
void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index 416054716c..b88ce24114 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -42,18 +42,20 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
// Allocate num_bytes, returns nullptr if the space is full.
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// The main allocation routine.
template<bool kForEvac>
ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size);
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated);
// Allocate/free large objects (objects that are larger than the region size.)
template<bool kForEvac>
- mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size);
+ mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated);
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated);
// Return the storage space required by obj.
@@ -87,10 +89,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
void DumpRegions(std::ostream& os);
void DumpNonFreeRegions(std::ostream& os);
- void RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
+ size_t RevokeThreadLocalBuffers(Thread* thread) LOCKS_EXCLUDED(region_lock_);
void RevokeThreadLocalBuffersLocked(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(region_lock_);
- void RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
- Locks::thread_list_lock_);
+ size_t RevokeAllThreadLocalBuffers() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
+ Locks::thread_list_lock_);
void AssertThreadLocalBuffersAreRevoked(Thread* thread) LOCKS_EXCLUDED(region_lock_);
void AssertAllThreadLocalBuffersAreRevoked() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_,
Locks::thread_list_lock_);
@@ -269,7 +271,8 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace {
}
ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size);
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated);
bool IsFree() const {
bool is_free = state_ == RegionState::kRegionStateFree;
diff --git a/runtime/gc/space/rosalloc_space-inl.h b/runtime/gc/space/rosalloc_space-inl.h
index 5d6642d349..9d582a3f86 100644
--- a/runtime/gc/space/rosalloc_space-inl.h
+++ b/runtime/gc/space/rosalloc_space-inl.h
@@ -26,13 +26,19 @@ namespace art {
namespace gc {
namespace space {
+template<bool kMaybeRunningOnValgrind>
inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
// obj is a valid object. Use its class in the header to get the size.
// Don't use verification since the object may be dead if we are sweeping.
size_t size = obj->SizeOf<kVerifyNone>();
- bool running_on_valgrind = RUNNING_ON_VALGRIND != 0;
- if (running_on_valgrind) {
- size += 2 * kDefaultValgrindRedZoneBytes;
+ bool running_on_valgrind = false;
+ if (kMaybeRunningOnValgrind) {
+ running_on_valgrind = RUNNING_ON_VALGRIND != 0;
+ if (running_on_valgrind) {
+ size += 2 * kDefaultValgrindRedZoneBytes;
+ }
+ } else {
+ DCHECK_EQ(RUNNING_ON_VALGRIND, 0U);
}
size_t size_by_size = rosalloc_->UsableSize(size);
if (kIsDebugBuild) {
@@ -55,28 +61,50 @@ inline size_t RosAllocSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_
template<bool kThreadSafe>
inline mirror::Object* RosAllocSpace::AllocCommon(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
- size_t rosalloc_size = 0;
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
+ size_t rosalloc_bytes_allocated = 0;
+ size_t rosalloc_usable_size = 0;
+ size_t rosalloc_bytes_tl_bulk_allocated = 0;
if (!kThreadSafe) {
Locks::mutator_lock_->AssertExclusiveHeld(self);
}
mirror::Object* result = reinterpret_cast<mirror::Object*>(
- rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_size));
+ rosalloc_->Alloc<kThreadSafe>(self, num_bytes, &rosalloc_bytes_allocated,
+ &rosalloc_usable_size,
+ &rosalloc_bytes_tl_bulk_allocated));
if (LIKELY(result != NULL)) {
if (kDebugSpaces) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
DCHECK(bytes_allocated != NULL);
- *bytes_allocated = rosalloc_size;
- DCHECK_EQ(rosalloc_size, rosalloc_->UsableSize(result));
+ *bytes_allocated = rosalloc_bytes_allocated;
+ DCHECK_EQ(rosalloc_usable_size, rosalloc_->UsableSize(result));
if (usable_size != nullptr) {
- *usable_size = rosalloc_size;
+ *usable_size = rosalloc_usable_size;
}
+ DCHECK(bytes_tl_bulk_allocated != NULL);
+ *bytes_tl_bulk_allocated = rosalloc_bytes_tl_bulk_allocated;
}
return result;
}
+inline bool RosAllocSpace::CanAllocThreadLocal(Thread* self, size_t num_bytes) {
+ return rosalloc_->CanAllocFromThreadLocalRun(self, num_bytes);
+}
+
+inline mirror::Object* RosAllocSpace::AllocThreadLocal(Thread* self, size_t num_bytes,
+ size_t* bytes_allocated) {
+ DCHECK(bytes_allocated != nullptr);
+ return reinterpret_cast<mirror::Object*>(
+ rosalloc_->AllocFromThreadLocalRun(self, num_bytes, bytes_allocated));
+}
+
+inline size_t RosAllocSpace::MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes) {
+ return rosalloc_->MaxBytesBulkAllocatedFor(num_bytes);
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc
index ced25a40bb..f140021f76 100644
--- a/runtime/gc/space/rosalloc_space.cc
+++ b/runtime/gc/space/rosalloc_space.cc
@@ -154,7 +154,8 @@ allocator::RosAlloc* RosAllocSpace::CreateRosAlloc(void* begin, size_t morecore_
}
mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
mirror::Object* result;
{
MutexLock mu(self, lock_);
@@ -162,7 +163,8 @@ mirror::Object* RosAllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
size_t max_allowed = Capacity();
rosalloc_->SetFootprintLimit(max_allowed);
// Try the allocation.
- result = AllocCommon(self, num_bytes, bytes_allocated, usable_size);
+ result = AllocCommon(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
// Shrink back down as small as possible.
size_t footprint = rosalloc_->Footprint();
rosalloc_->SetFootprintLimit(footprint);
@@ -209,7 +211,7 @@ size_t RosAllocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** p
__builtin_prefetch(reinterpret_cast<char*>(ptrs[i + kPrefetchLookAhead]));
}
if (kVerifyFreedBytes) {
- verify_bytes += AllocationSizeNonvirtual(ptrs[i], nullptr);
+ verify_bytes += AllocationSizeNonvirtual<true>(ptrs[i], nullptr);
}
}
@@ -338,12 +340,12 @@ void RosAllocSpace::InspectAllRosAlloc(void (*callback)(void *start, void *end,
}
}
-void RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
- rosalloc_->RevokeThreadLocalRuns(thread);
+size_t RosAllocSpace::RevokeThreadLocalBuffers(Thread* thread) {
+ return rosalloc_->RevokeThreadLocalRuns(thread);
}
-void RosAllocSpace::RevokeAllThreadLocalBuffers() {
- rosalloc_->RevokeAllThreadLocalRuns();
+size_t RosAllocSpace::RevokeAllThreadLocalBuffers() {
+ return rosalloc_->RevokeAllThreadLocalRuns();
}
void RosAllocSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h
index c856e9560a..36268f76f8 100644
--- a/runtime/gc/space/rosalloc_space.h
+++ b/runtime/gc/space/rosalloc_space.h
@@ -47,18 +47,21 @@ class RosAllocSpace : public MallocSpace {
bool low_memory_mode, bool can_move_objects);
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE LOCKS_EXCLUDED(lock_);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE LOCKS_EXCLUDED(lock_);
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE {
- return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE {
+ return AllocNonvirtual(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size);
+ return AllocNonvirtualThreadUnsafe(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE {
- return AllocationSizeNonvirtual(obj, usable_size);
+ return AllocationSizeNonvirtual<true>(obj, usable_size);
}
size_t Free(Thread* self, mirror::Object* ptr) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -66,17 +69,33 @@ class RosAllocSpace : public MallocSpace {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
mirror::Object* AllocNonvirtual(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) {
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) {
// RosAlloc zeroes memory internally.
- return AllocCommon(self, num_bytes, bytes_allocated, usable_size);
+ return AllocCommon(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
mirror::Object* AllocNonvirtualThreadUnsafe(Thread* self, size_t num_bytes,
- size_t* bytes_allocated, size_t* usable_size) {
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated) {
// RosAlloc zeroes memory internally. Pass in false for thread unsafe.
- return AllocCommon<false>(self, num_bytes, bytes_allocated, usable_size);
+ return AllocCommon<false>(self, num_bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
}
+ // Returns true if the given allocation request can be allocated in
+ // an existing thread local run without allocating a new run.
+ ALWAYS_INLINE bool CanAllocThreadLocal(Thread* self, size_t num_bytes);
+ // Allocate the given allocation request in an existing thread local
+ // run without allocating a new run.
+ ALWAYS_INLINE mirror::Object* AllocThreadLocal(Thread* self, size_t num_bytes,
+ size_t* bytes_allocated);
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE {
+ return MaxBytesBulkAllocatedForNonvirtual(num_bytes);
+ }
+ ALWAYS_INLINE size_t MaxBytesBulkAllocatedForNonvirtual(size_t num_bytes);
+
// TODO: NO_THREAD_SAFETY_ANALYSIS because SizeOf() requires that mutator_lock is held.
+ template<bool kMaybeRunningOnValgrind>
size_t AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
NO_THREAD_SAFETY_ANALYSIS;
@@ -99,8 +118,8 @@ class RosAllocSpace : public MallocSpace {
uint64_t GetBytesAllocated() OVERRIDE;
uint64_t GetObjectsAllocated() OVERRIDE;
- void RevokeThreadLocalBuffers(Thread* thread);
- void RevokeAllThreadLocalBuffers();
+ size_t RevokeThreadLocalBuffers(Thread* thread);
+ size_t RevokeAllThreadLocalBuffers();
void AssertThreadLocalBuffersAreRevoked(Thread* thread);
void AssertAllThreadLocalBuffersAreRevoked();
@@ -134,7 +153,7 @@ class RosAllocSpace : public MallocSpace {
private:
template<bool kThreadSafe = true>
mirror::Object* AllocCommon(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated);
void* CreateAllocator(void* base, size_t morecore_start, size_t initial_size,
size_t maximum_size, bool low_memory_mode) OVERRIDE {
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index d24650b60d..f2378d9ff0 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -203,14 +203,24 @@ class AllocSpace {
// succeeds, the output parameter bytes_allocated will be set to the
// actually allocated bytes which is >= num_bytes.
// Alloc can be called from multiple threads at the same time and must be thread-safe.
+ //
+ // bytes_tl_bulk_allocated - bytes allocated in bulk ahead of time for a thread local allocation,
+ // if applicable. It can be
+ // 1) equal to bytes_allocated if it's not a thread local allocation,
+ // 2) greater than bytes_allocated if it's a thread local
+ // allocation that required a new buffer, or
+ // 3) zero if it's a thread local allocation in an existing
+ // buffer.
+ // This is what is to be added to Heap::num_bytes_allocated_.
virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) = 0;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) = 0;
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
virtual mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size)
+ size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return Alloc(self, num_bytes, bytes_allocated, usable_size);
+ return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
}
// Return the storage space required by obj.
@@ -224,11 +234,15 @@ class AllocSpace {
// Revoke any sort of thread-local buffers that are used to speed up allocations for the given
// thread, if the alloc space implementation uses any.
- virtual void RevokeThreadLocalBuffers(Thread* thread) = 0;
+ // Returns the total free bytes in the revoked thread local runs that's to be subtracted
+ // from Heap::num_bytes_allocated_ or zero if unnecessary.
+ virtual size_t RevokeThreadLocalBuffers(Thread* thread) = 0;
// Revoke any sort of thread-local buffers that are used to speed up allocations for all the
// threads, if the alloc space implementation uses any.
- virtual void RevokeAllThreadLocalBuffers() = 0;
+ // Returns the total free bytes in the revoked thread local runs that's to be subtracted
+ // from Heap::num_bytes_allocated_ or zero if unnecessary.
+ virtual size_t RevokeAllThreadLocalBuffers() = 0;
virtual void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) = 0;
diff --git a/runtime/gc/space/space_test.h b/runtime/gc/space/space_test.h
index 09d10dd94b..3e9e9f7a49 100644
--- a/runtime/gc/space/space_test.h
+++ b/runtime/gc/space/space_test.h
@@ -61,11 +61,13 @@ class SpaceTest : public CommonRuntimeTest {
}
mirror::Object* Alloc(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
- size_t* bytes_allocated, size_t* usable_size)
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
- mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size);
+ mirror::Object* obj = alloc_space->Alloc(self, bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
if (obj != nullptr) {
InstallClass(obj, byte_array_class.Get(), bytes);
}
@@ -73,11 +75,13 @@ class SpaceTest : public CommonRuntimeTest {
}
mirror::Object* AllocWithGrowth(space::MallocSpace* alloc_space, Thread* self, size_t bytes,
- size_t* bytes_allocated, size_t* usable_size)
+ size_t* bytes_allocated, size_t* usable_size,
+ size_t* bytes_tl_bulk_allocated)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
StackHandleScope<1> hs(self);
Handle<mirror::Class> byte_array_class(hs.NewHandle(GetByteArrayClass(self)));
- mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size);
+ mirror::Object* obj = alloc_space->AllocWithGrowth(self, bytes, bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated);
if (obj != nullptr) {
InstallClass(obj, byte_array_class.Get(), bytes);
}
@@ -182,34 +186,38 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
ScopedObjectAccess soa(self);
// Succeeds, fits without adjusting the footprint limit.
- size_t ptr1_bytes_allocated, ptr1_usable_size;
+ size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
StackHandleScope<3> hs(soa.Self());
MutableHandle<mirror::Object> ptr1(
- hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
+ &ptr1_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
+ EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- size_t ptr3_bytes_allocated, ptr3_usable_size;
+ size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
MutableHandle<mirror::Object> ptr3(
- hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
+ &ptr3_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
+ EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr4 = space->Alloc(self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr4 == nullptr);
// Also fails, requires a higher allowed footprint.
- mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr5 = space->AllocWithGrowth(self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
@@ -219,13 +227,15 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
- size_t ptr6_bytes_allocated, ptr6_usable_size;
+ size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
Handle<mirror::Object> ptr6(
- hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
+ &ptr6_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
+ EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
// Final clean up.
size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
@@ -233,7 +243,7 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
EXPECT_LE(1U * MB, free1);
// Make sure that the zygote space isn't directly at the start of the space.
- EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr) != nullptr);
+ EXPECT_TRUE(space->Alloc(self, 1U * MB, &dummy, nullptr, &dummy) != nullptr);
gc::Heap* heap = Runtime::Current()->GetHeap();
space::Space* old_space = space;
@@ -250,22 +260,26 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) {
AddSpace(space, false);
// Succeeds, fits without adjusting the footprint limit.
- ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size));
+ ptr1.Assign(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
+ &ptr1_bytes_tl_bulk_allocated));
EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
+ EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
// Fails, requires a higher footprint limit.
- ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
+ ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size));
+ ptr3.Assign(AllocWithGrowth(space, self, 2 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
+ &ptr3_bytes_tl_bulk_allocated));
EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(2U * MB, ptr3_bytes_allocated);
EXPECT_LE(2U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
+ EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
space->Free(self, ptr3.Assign(nullptr));
// Final clean up.
@@ -285,34 +299,38 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
- size_t ptr1_bytes_allocated, ptr1_usable_size;
+ size_t ptr1_bytes_allocated, ptr1_usable_size, ptr1_bytes_tl_bulk_allocated;
StackHandleScope<3> hs(soa.Self());
MutableHandle<mirror::Object> ptr1(
- hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size)));
+ hs.NewHandle(Alloc(space, self, 1 * MB, &ptr1_bytes_allocated, &ptr1_usable_size,
+ &ptr1_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr1.Get() != nullptr);
EXPECT_LE(1U * MB, ptr1_bytes_allocated);
EXPECT_LE(1U * MB, ptr1_usable_size);
EXPECT_LE(ptr1_usable_size, ptr1_bytes_allocated);
+ EXPECT_EQ(ptr1_bytes_tl_bulk_allocated, ptr1_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr2 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr2 == nullptr);
// Succeeds, adjusts the footprint.
- size_t ptr3_bytes_allocated, ptr3_usable_size;
+ size_t ptr3_bytes_allocated, ptr3_usable_size, ptr3_bytes_tl_bulk_allocated;
MutableHandle<mirror::Object> ptr3(
- hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size)));
+ hs.NewHandle(AllocWithGrowth(space, self, 8 * MB, &ptr3_bytes_allocated, &ptr3_usable_size,
+ &ptr3_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr3.Get() != nullptr);
EXPECT_LE(8U * MB, ptr3_bytes_allocated);
EXPECT_LE(8U * MB, ptr3_usable_size);
EXPECT_LE(ptr3_usable_size, ptr3_bytes_allocated);
+ EXPECT_EQ(ptr3_bytes_tl_bulk_allocated, ptr3_bytes_allocated);
// Fails, requires a higher footprint limit.
- mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr4 = Alloc(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr4 == nullptr);
// Also fails, requires a higher allowed footprint.
- mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr);
+ mirror::Object* ptr5 = AllocWithGrowth(space, self, 8 * MB, &dummy, nullptr, &dummy);
EXPECT_TRUE(ptr5 == nullptr);
// Release some memory.
@@ -322,13 +340,15 @@ void SpaceTest::AllocAndFreeTestBody(CreateSpaceFn create_space) {
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
- size_t ptr6_bytes_allocated, ptr6_usable_size;
+ size_t ptr6_bytes_allocated, ptr6_usable_size, ptr6_bytes_tl_bulk_allocated;
Handle<mirror::Object> ptr6(
- hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size)));
+ hs.NewHandle(AllocWithGrowth(space, self, 9 * MB, &ptr6_bytes_allocated, &ptr6_usable_size,
+ &ptr6_bytes_tl_bulk_allocated)));
EXPECT_TRUE(ptr6.Get() != nullptr);
EXPECT_LE(9U * MB, ptr6_bytes_allocated);
EXPECT_LE(9U * MB, ptr6_usable_size);
EXPECT_LE(ptr6_usable_size, ptr6_bytes_allocated);
+ EXPECT_EQ(ptr6_bytes_tl_bulk_allocated, ptr6_bytes_allocated);
// Final clean up.
size_t free1 = space->AllocationSize(ptr1.Get(), nullptr);
@@ -348,14 +368,16 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
// Succeeds, fits without adjusting the max allowed footprint.
mirror::Object* lots_of_objects[1024];
for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- size_t allocation_size, usable_size;
+ size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
size_t size_of_zero_length_byte_array = SizeOfZeroLengthByteArray();
lots_of_objects[i] = Alloc(space, self, size_of_zero_length_byte_array, &allocation_size,
- &usable_size);
+ &usable_size, &bytes_tl_bulk_allocated);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
+ EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
+ bytes_tl_bulk_allocated >= allocation_size);
}
// Release memory.
@@ -363,12 +385,15 @@ void SpaceTest::AllocAndFreeListTestBody(CreateSpaceFn create_space) {
// Succeeds, fits by adjusting the max allowed footprint.
for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- size_t allocation_size, usable_size;
- lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size);
+ size_t allocation_size, usable_size, bytes_tl_bulk_allocated;
+ lots_of_objects[i] = AllocWithGrowth(space, self, 1024, &allocation_size, &usable_size,
+ &bytes_tl_bulk_allocated);
EXPECT_TRUE(lots_of_objects[i] != nullptr);
size_t computed_usable_size;
EXPECT_EQ(allocation_size, space->AllocationSize(lots_of_objects[i], &computed_usable_size));
EXPECT_EQ(usable_size, computed_usable_size);
+ EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
+ bytes_tl_bulk_allocated >= allocation_size);
}
// Release memory.
@@ -425,10 +450,13 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
StackHandleScope<1> hs(soa.Self());
auto object(hs.NewHandle<mirror::Object>(nullptr));
size_t bytes_allocated = 0;
+ size_t bytes_tl_bulk_allocated;
if (round <= 1) {
- object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(Alloc(space, self, alloc_size, &bytes_allocated, nullptr,
+ &bytes_tl_bulk_allocated));
} else {
- object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr));
+ object.Assign(AllocWithGrowth(space, self, alloc_size, &bytes_allocated, nullptr,
+ &bytes_tl_bulk_allocated));
}
footprint = space->GetFootprint();
EXPECT_GE(space->Size(), footprint); // invariant
@@ -441,6 +469,8 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
} else {
EXPECT_GE(allocation_size, 8u);
}
+ EXPECT_TRUE(bytes_tl_bulk_allocated == 0 ||
+ bytes_tl_bulk_allocated >= allocation_size);
amount_allocated += allocation_size;
break;
}
@@ -518,11 +548,13 @@ void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(MallocSpace* space, intptr_t
auto large_object(hs.NewHandle<mirror::Object>(nullptr));
size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
size_t bytes_allocated = 0;
+ size_t bytes_tl_bulk_allocated;
if (round <= 1) {
- large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr));
+ large_object.Assign(Alloc(space, self, three_quarters_space, &bytes_allocated, nullptr,
+ &bytes_tl_bulk_allocated));
} else {
large_object.Assign(AllocWithGrowth(space, self, three_quarters_space, &bytes_allocated,
- nullptr));
+ nullptr, &bytes_tl_bulk_allocated));
}
EXPECT_TRUE(large_object.Get() != nullptr);
diff --git a/runtime/gc/space/valgrind_malloc_space-inl.h b/runtime/gc/space/valgrind_malloc_space-inl.h
index ae8e892e29..bc329e129c 100644
--- a/runtime/gc/space/valgrind_malloc_space-inl.h
+++ b/runtime/gc/space/valgrind_malloc_space-inl.h
@@ -32,10 +32,15 @@ namespace valgrind_details {
template <size_t kValgrindRedZoneBytes, bool kUseObjSizeForUsable>
inline mirror::Object* AdjustForValgrind(void* obj_with_rdz, size_t num_bytes,
size_t bytes_allocated, size_t usable_size,
- size_t* bytes_allocated_out, size_t* usable_size_out) {
+ size_t bytes_tl_bulk_allocated,
+ size_t* bytes_allocated_out, size_t* usable_size_out,
+ size_t* bytes_tl_bulk_allocated_out) {
if (bytes_allocated_out != nullptr) {
*bytes_allocated_out = bytes_allocated;
}
+ if (bytes_tl_bulk_allocated_out != nullptr) {
+ *bytes_tl_bulk_allocated_out = bytes_tl_bulk_allocated;
+ }
// This cuts over-provision and is a trade-off between testing the over-provisioning code paths
// vs checking overflows in the regular paths.
@@ -82,20 +87,25 @@ ValgrindMallocSpace<S,
kValgrindRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::AllocWithGrowth(
- Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out) {
+ Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+ size_t* bytes_tl_bulk_allocated_out) {
size_t bytes_allocated;
size_t usable_size;
+ size_t bytes_tl_bulk_allocated;
void* obj_with_rdz = S::AllocWithGrowth(self, num_bytes + 2 * kValgrindRedZoneBytes,
- &bytes_allocated, &usable_size);
+ &bytes_allocated, &usable_size,
+ &bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
- kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
- bytes_allocated, usable_size,
- bytes_allocated_out,
- usable_size_out);
+ return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+ obj_with_rdz, num_bytes,
+ bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated,
+ bytes_allocated_out,
+ usable_size_out,
+ bytes_tl_bulk_allocated_out);
}
template <typename S,
@@ -106,11 +116,13 @@ mirror::Object* ValgrindMallocSpace<S,
kValgrindRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::Alloc(
- Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out) {
+ Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+ size_t* bytes_tl_bulk_allocated_out) {
size_t bytes_allocated;
size_t usable_size;
+ size_t bytes_tl_bulk_allocated;
void* obj_with_rdz = S::Alloc(self, num_bytes + 2 * kValgrindRedZoneBytes,
- &bytes_allocated, &usable_size);
+ &bytes_allocated, &usable_size, &bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
@@ -118,8 +130,10 @@ mirror::Object* ValgrindMallocSpace<S,
return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated,
bytes_allocated_out,
- usable_size_out);
+ usable_size_out,
+ bytes_tl_bulk_allocated_out);
}
template <typename S,
@@ -130,20 +144,25 @@ mirror::Object* ValgrindMallocSpace<S,
kValgrindRedZoneBytes,
kAdjustForRedzoneInAllocSize,
kUseObjSizeForUsable>::AllocThreadUnsafe(
- Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out) {
+ Thread* self, size_t num_bytes, size_t* bytes_allocated_out, size_t* usable_size_out,
+ size_t* bytes_tl_bulk_allocated_out) {
size_t bytes_allocated;
size_t usable_size;
+ size_t bytes_tl_bulk_allocated;
void* obj_with_rdz = S::AllocThreadUnsafe(self, num_bytes + 2 * kValgrindRedZoneBytes,
- &bytes_allocated, &usable_size);
+ &bytes_allocated, &usable_size,
+ &bytes_tl_bulk_allocated);
if (obj_with_rdz == nullptr) {
return nullptr;
}
- return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes,
- kUseObjSizeForUsable>(obj_with_rdz, num_bytes,
- bytes_allocated, usable_size,
- bytes_allocated_out,
- usable_size_out);
+ return valgrind_details::AdjustForValgrind<kValgrindRedZoneBytes, kUseObjSizeForUsable>(
+ obj_with_rdz, num_bytes,
+ bytes_allocated, usable_size,
+ bytes_tl_bulk_allocated,
+ bytes_allocated_out,
+ usable_size_out,
+ bytes_tl_bulk_allocated_out);
}
template <typename S,
@@ -226,6 +245,17 @@ ValgrindMallocSpace<S,
mem_map->Size() - initial_size);
}
+template <typename S,
+ size_t kValgrindRedZoneBytes,
+ bool kAdjustForRedzoneInAllocSize,
+ bool kUseObjSizeForUsable>
+size_t ValgrindMallocSpace<S,
+ kValgrindRedZoneBytes,
+ kAdjustForRedzoneInAllocSize,
+ kUseObjSizeForUsable>::MaxBytesBulkAllocatedFor(size_t num_bytes) {
+ return S::MaxBytesBulkAllocatedFor(num_bytes + 2 * kValgrindRedZoneBytes);
+}
+
} // namespace space
} // namespace gc
} // namespace art
diff --git a/runtime/gc/space/valgrind_malloc_space.h b/runtime/gc/space/valgrind_malloc_space.h
index 707ea69a20..a6b010a2a1 100644
--- a/runtime/gc/space/valgrind_malloc_space.h
+++ b/runtime/gc/space/valgrind_malloc_space.h
@@ -34,12 +34,13 @@ template <typename BaseMallocSpaceType,
class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
public:
mirror::Object* AllocWithGrowth(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE;
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
@@ -53,6 +54,8 @@ class ValgrindMallocSpace FINAL : public BaseMallocSpaceType {
UNUSED(ptr);
}
+ size_t MaxBytesBulkAllocatedFor(size_t num_bytes) OVERRIDE;
+
template <typename... Params>
explicit ValgrindMallocSpace(MemMap* mem_map, size_t initial_size, Params... params);
virtual ~ValgrindMallocSpace() {}
diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc
index a868e6831d..9e882a898e 100644
--- a/runtime/gc/space/zygote_space.cc
+++ b/runtime/gc/space/zygote_space.cc
@@ -77,7 +77,7 @@ void ZygoteSpace::Dump(std::ostream& os) const {
<< ",name=\"" << GetName() << "\"]";
}
-mirror::Object* ZygoteSpace::Alloc(Thread*, size_t, size_t*, size_t*) {
+mirror::Object* ZygoteSpace::Alloc(Thread*, size_t, size_t*, size_t*, size_t*) {
UNIMPLEMENTED(FATAL);
UNREACHABLE();
}
diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h
index 0cf4bb139c..934a234345 100644
--- a/runtime/gc/space/zygote_space.h
+++ b/runtime/gc/space/zygote_space.h
@@ -46,7 +46,7 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
}
mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size) OVERRIDE;
+ size_t* usable_size, size_t* bytes_tl_bulk_allocated) OVERRIDE;
size_t AllocationSize(mirror::Object* obj, size_t* usable_size) OVERRIDE;
@@ -55,9 +55,11 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace {
size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) OVERRIDE;
// ZygoteSpaces don't have thread local state.
- void RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ size_t RevokeThreadLocalBuffers(art::Thread*) OVERRIDE {
+ return 0U;
}
- void RevokeAllThreadLocalBuffers() OVERRIDE {
+ size_t RevokeAllThreadLocalBuffers() OVERRIDE {
+ return 0U;
}
uint64_t GetBytesAllocated() {
diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc
index d2e93bc14c..5a7b7e1032 100644
--- a/runtime/hprof/hprof.cc
+++ b/runtime/hprof/hprof.cc
@@ -1008,17 +1008,26 @@ void Hprof::DumpHeapClass(mirror::Class* klass, EndianOutput* output) {
HprofBasicType t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
__ AddStringId(LookupStringId(f->GetName()));
__ AddU1(t);
- switch (size) {
- case 1:
- __ AddU1(static_cast<uint8_t>(f->Get32(klass)));
+ switch (t) {
+ case hprof_basic_byte:
+ __ AddU1(f->GetByte(klass));
break;
- case 2:
- __ AddU2(static_cast<uint16_t>(f->Get32(klass)));
+ case hprof_basic_boolean:
+ __ AddU1(f->GetBoolean(klass));
break;
- case 4:
+ case hprof_basic_char:
+ __ AddU2(f->GetChar(klass));
+ break;
+ case hprof_basic_short:
+ __ AddU2(f->GetShort(klass));
+ break;
+ case hprof_basic_float:
+ case hprof_basic_int:
+ case hprof_basic_object:
__ AddU4(f->Get32(klass));
break;
- case 8:
+ case hprof_basic_double:
+ case hprof_basic_long:
__ AddU8(f->Get64(klass));
break;
default:
@@ -1099,16 +1108,29 @@ void Hprof::DumpHeapInstanceObject(mirror::Object* obj, mirror::Class* klass,
for (int i = 0; i < ifieldCount; ++i) {
mirror::ArtField* f = klass->GetInstanceField(i);
size_t size;
- SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
- if (size == 1) {
- __ AddU1(f->Get32(obj));
- } else if (size == 2) {
- __ AddU2(f->Get32(obj));
- } else if (size == 4) {
+ auto t = SignatureToBasicTypeAndSize(f->GetTypeDescriptor(), &size);
+ switch (t) {
+ case hprof_basic_byte:
+ __ AddU1(f->GetByte(obj));
+ break;
+ case hprof_basic_boolean:
+ __ AddU1(f->GetBoolean(obj));
+ break;
+ case hprof_basic_char:
+ __ AddU2(f->GetChar(obj));
+ break;
+ case hprof_basic_short:
+ __ AddU2(f->GetShort(obj));
+ break;
+ case hprof_basic_float:
+ case hprof_basic_int:
+ case hprof_basic_object:
__ AddU4(f->Get32(obj));
- } else {
- CHECK_EQ(size, 8U);
+ break;
+ case hprof_basic_double:
+ case hprof_basic_long:
__ AddU8(f->Get64(obj));
+ break;
}
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index c94dab988e..085062c641 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -949,19 +949,16 @@ void Instrumentation::FieldWriteEventImpl(Thread* thread, mirror::Object* this_o
}
}
-void Instrumentation::ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method,
- uint32_t catch_dex_pc,
+void Instrumentation::ExceptionCaughtEvent(Thread* thread,
mirror::Throwable* exception_object) const {
if (HasExceptionCaughtListeners()) {
- DCHECK_EQ(thread->GetException(nullptr), exception_object);
+ DCHECK_EQ(thread->GetException(), exception_object);
thread->ClearException();
std::shared_ptr<std::list<InstrumentationListener*>> original(exception_caught_listeners_);
for (InstrumentationListener* listener : *original.get()) {
- listener->ExceptionCaught(thread, throw_location, catch_method, catch_dex_pc,
- exception_object);
+ listener->ExceptionCaught(thread, exception_object);
}
- thread->SetException(throw_location, exception_object);
+ thread->SetException(exception_object);
}
}
diff --git a/runtime/instrumentation.h b/runtime/instrumentation.h
index b667a40fc8..8972f3a502 100644
--- a/runtime/instrumentation.h
+++ b/runtime/instrumentation.h
@@ -38,7 +38,6 @@ namespace mirror {
} // namespace mirror
union JValue;
class Thread;
-class ThrowLocation;
namespace instrumentation {
@@ -90,9 +89,7 @@ struct InstrumentationListener {
uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value) = 0;
// Call-back when an exception is caught.
- virtual void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+ virtual void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
// Call-back for when we get a backward branch.
@@ -322,9 +319,7 @@ class Instrumentation {
}
// Inform listeners that an exception was caught.
- void ExceptionCaughtEvent(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object) const
+ void ExceptionCaughtEvent(Thread* thread, mirror::Throwable* exception_object) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Called when an instrumented method is entered. The intended link register (lr) is saved so
diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc
index 9d988e978d..686b518c5f 100644
--- a/runtime/interpreter/interpreter.cc
+++ b/runtime/interpreter/interpreter.cc
@@ -19,116 +19,13 @@
#include <limits>
#include "mirror/string-inl.h"
+#include "scoped_thread_state_change.h"
+#include "ScopedLocalRef.h"
+#include "unstarted_runtime.h"
namespace art {
namespace interpreter {
-// Hand select a number of methods to be run in a not yet started runtime without using JNI.
-static void UnstartedRuntimeJni(Thread* self, ArtMethod* method,
- Object* receiver, uint32_t* args, JValue* result)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- std::string name(PrettyMethod(method));
- if (name == "java.lang.Object dalvik.system.VMRuntime.newUnpaddedArray(java.lang.Class, int)") {
- int32_t length = args[1];
- DCHECK_GE(length, 0);
- mirror::Class* element_class = reinterpret_cast<Object*>(args[0])->AsClass();
- Runtime* runtime = Runtime::Current();
- mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(self, &element_class);
- DCHECK(array_class != nullptr);
- gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
- result->SetL(mirror::Array::Alloc<true, true>(self, array_class, length,
- array_class->GetComponentSizeShift(), allocator));
- } else if (name == "java.lang.ClassLoader dalvik.system.VMStack.getCallingClassLoader()") {
- result->SetL(NULL);
- } else if (name == "java.lang.Class dalvik.system.VMStack.getStackClass2()") {
- NthCallerVisitor visitor(self, 3);
- visitor.WalkStack();
- result->SetL(visitor.caller->GetDeclaringClass());
- } else if (name == "double java.lang.Math.log(double)") {
- JValue value;
- value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
- result->SetD(log(value.GetD()));
- } else if (name == "java.lang.String java.lang.Class.getNameNative()") {
- StackHandleScope<1> hs(self);
- result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
- } else if (name == "int java.lang.Float.floatToRawIntBits(float)") {
- result->SetI(args[0]);
- } else if (name == "float java.lang.Float.intBitsToFloat(int)") {
- result->SetI(args[0]);
- } else if (name == "double java.lang.Math.exp(double)") {
- JValue value;
- value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
- result->SetD(exp(value.GetD()));
- } else if (name == "java.lang.Object java.lang.Object.internalClone()") {
- result->SetL(receiver->Clone(self));
- } else if (name == "void java.lang.Object.notifyAll()") {
- receiver->NotifyAll(self);
- } else if (name == "int java.lang.String.compareTo(java.lang.String)") {
- String* rhs = reinterpret_cast<Object*>(args[0])->AsString();
- CHECK(rhs != NULL);
- result->SetI(receiver->AsString()->CompareTo(rhs));
- } else if (name == "java.lang.String java.lang.String.intern()") {
- result->SetL(receiver->AsString()->Intern());
- } else if (name == "int java.lang.String.fastIndexOf(int, int)") {
- result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1]));
- } else if (name == "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])") {
- StackHandleScope<2> hs(self);
- auto h_class(hs.NewHandle(reinterpret_cast<mirror::Class*>(args[0])->AsClass()));
- auto h_dimensions(hs.NewHandle(reinterpret_cast<mirror::IntArray*>(args[1])->AsIntArray()));
- result->SetL(Array::CreateMultiArray(self, h_class, h_dimensions));
- } else if (name == "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()") {
- ScopedObjectAccessUnchecked soa(self);
- if (Runtime::Current()->IsActiveTransaction()) {
- result->SetL(soa.Decode<Object*>(self->CreateInternalStackTrace<true>(soa)));
- } else {
- result->SetL(soa.Decode<Object*>(self->CreateInternalStackTrace<false>(soa)));
- }
- } else if (name == "int java.lang.System.identityHashCode(java.lang.Object)") {
- mirror::Object* obj = reinterpret_cast<Object*>(args[0]);
- result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
- } else if (name == "boolean java.nio.ByteOrder.isLittleEndian()") {
- result->SetZ(JNI_TRUE);
- } else if (name == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
- Object* obj = reinterpret_cast<Object*>(args[0]);
- jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
- jint expectedValue = args[3];
- jint newValue = args[4];
- bool success;
- if (Runtime::Current()->IsActiveTransaction()) {
- success = obj->CasFieldStrongSequentiallyConsistent32<true>(MemberOffset(offset),
- expectedValue, newValue);
- } else {
- success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset),
- expectedValue, newValue);
- }
- result->SetZ(success ? JNI_TRUE : JNI_FALSE);
- } else if (name == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") {
- Object* obj = reinterpret_cast<Object*>(args[0]);
- jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
- Object* newValue = reinterpret_cast<Object*>(args[3]);
- if (Runtime::Current()->IsActiveTransaction()) {
- obj->SetFieldObject<true>(MemberOffset(offset), newValue);
- } else {
- obj->SetFieldObject<false>(MemberOffset(offset), newValue);
- }
- } else if (name == "int sun.misc.Unsafe.getArrayBaseOffsetForComponentType(java.lang.Class)") {
- mirror::Class* component = reinterpret_cast<Object*>(args[0])->AsClass();
- Primitive::Type primitive_type = component->GetPrimitiveType();
- result->SetI(mirror::Array::DataOffset(Primitive::ComponentSize(primitive_type)).Int32Value());
- } else if (name == "int sun.misc.Unsafe.getArrayIndexScaleForComponentType(java.lang.Class)") {
- mirror::Class* component = reinterpret_cast<Object*>(args[0])->AsClass();
- Primitive::Type primitive_type = component->GetPrimitiveType();
- result->SetI(Primitive::ComponentSize(primitive_type));
- } else if (Runtime::Current()->IsActiveTransaction()) {
- AbortTransaction(self, "Attempt to invoke native method in non-started runtime: %s",
- name.c_str());
-
- } else {
- LOG(FATAL) << "Calling native method " << PrettyMethod(method) << " in an unstarted "
- "non-transactional runtime";
- }
-}
-
static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& shorty,
Object* receiver, uint32_t* args, JValue* result)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index 604e1337e6..26ab602dc1 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -19,12 +19,13 @@
#include <cmath>
#include "mirror/array-inl.h"
+#include "unstarted_runtime.h"
namespace art {
namespace interpreter {
-void ThrowNullPointerExceptionFromInterpreter(const ShadowFrame& shadow_frame) {
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+void ThrowNullPointerExceptionFromInterpreter() {
+ ThrowNullPointerExceptionFromDexPC();
}
template<FindFieldType find_type, Primitive::Type field_type, bool do_access_check>
@@ -44,7 +45,7 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst
} else {
obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(), f, true);
+ ThrowNullPointerExceptionForFieldAccess(f, true);
return false;
}
}
@@ -126,7 +127,7 @@ bool DoIGetQuick(ShadowFrame& shadow_frame, const Instruction* inst, uint16_t in
if (UNLIKELY(obj == nullptr)) {
// We lost the reference to the field index so we cannot get a more
// precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromDexPC();
return false;
}
MemberOffset field_offset(inst->VRegC_22c());
@@ -238,8 +239,7 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
} else {
obj = shadow_frame.GetVRegReference(inst->VRegB_22c(inst_data));
if (UNLIKELY(obj == nullptr)) {
- ThrowNullPointerExceptionForFieldAccess(shadow_frame.GetCurrentLocationForThrow(),
- f, false);
+ ThrowNullPointerExceptionForFieldAccess(f, false);
return false;
}
}
@@ -289,8 +289,7 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
if (!reg->VerifierInstanceOf(field_class)) {
// This should never happen.
std::string temp1, temp2, temp3;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Put '%s' that is not instance of field '%s' in '%s'",
reg->GetClass()->GetDescriptor(&temp1),
field_class->GetDescriptor(&temp2),
@@ -346,7 +345,7 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1
if (UNLIKELY(obj == nullptr)) {
// We lost the reference to the field index so we cannot get a more
// precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromDexPC();
return false;
}
MemberOffset field_offset(inst->VRegC_22c());
@@ -413,90 +412,16 @@ EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimNot) // iput-objec
#undef EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL
#undef EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL
-/**
- * Finds the location where this exception will be caught. We search until we reach either the top
- * frame or a native frame, in which cases this exception is considered uncaught.
- */
-class CatchLocationFinder : public StackVisitor {
- public:
- explicit CatchLocationFinder(Thread* self, Handle<mirror::Throwable>* exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- : StackVisitor(self, nullptr), self_(self), handle_scope_(self), exception_(exception),
- catch_method_(handle_scope_.NewHandle<mirror::ArtMethod>(nullptr)),
- catch_dex_pc_(DexFile::kDexNoIndex), clear_exception_(false) {
- }
-
- bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- mirror::ArtMethod* method = GetMethod();
- if (method == nullptr) {
- return true;
- }
- if (method->IsRuntimeMethod()) {
- // Ignore callee save method.
- DCHECK(method->IsCalleeSaveMethod());
- return true;
- }
- if (method->IsNative()) {
- return false; // End stack walk.
- }
- DCHECK(!method->IsNative());
- uint32_t dex_pc = GetDexPc();
- if (dex_pc != DexFile::kDexNoIndex) {
- uint32_t found_dex_pc;
- {
- StackHandleScope<3> hs(self_);
- Handle<mirror::Class> exception_class(hs.NewHandle((*exception_)->GetClass()));
- Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
- found_dex_pc = mirror::ArtMethod::FindCatchBlock(h_method, exception_class, dex_pc,
- &clear_exception_);
- }
- if (found_dex_pc != DexFile::kDexNoIndex) {
- catch_method_.Assign(method);
- catch_dex_pc_ = found_dex_pc;
- return false; // End stack walk.
- }
- }
- return true; // Continue stack walk.
- }
-
- ArtMethod* GetCatchMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return catch_method_.Get();
- }
-
- uint32_t GetCatchDexPc() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return catch_dex_pc_;
- }
-
- bool NeedClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return clear_exception_;
- }
-
- private:
- Thread* const self_;
- StackHandleScope<1> handle_scope_;
- Handle<mirror::Throwable>* exception_;
- MutableHandle<mirror::ArtMethod> catch_method_;
- uint32_t catch_dex_pc_;
- bool clear_exception_;
-
-
- DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
-};
-
uint32_t FindNextInstructionFollowingException(Thread* self,
ShadowFrame& shadow_frame,
uint32_t dex_pc,
const instrumentation::Instrumentation* instrumentation) {
self->VerifyStack();
- ThrowLocation throw_location;
StackHandleScope<3> hs(self);
- Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException(&throw_location)));
+ Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException()));
if (instrumentation->HasExceptionCaughtListeners()
&& self->IsExceptionThrownByCurrentMethod(exception.Get())) {
- CatchLocationFinder clf(self, &exception);
- clf.WalkStack(false);
- instrumentation->ExceptionCaughtEvent(self, throw_location, clf.GetCatchMethod(),
- clf.GetCatchDexPc(), exception.Get());
+ instrumentation->ExceptionCaughtEvent(self, exception.Get());
}
bool clear_exception = false;
uint32_t found_dex_pc;
@@ -526,10 +451,6 @@ void UnexpectedOpcode(const Instruction* inst, const ShadowFrame& shadow_frame)
UNREACHABLE();
}
-static void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Assign register 'src_reg' from shadow_frame to register 'dest_reg' into new_shadow_frame.
static inline void AssignRegister(ShadowFrame* new_shadow_frame, const ShadowFrame& shadow_frame,
size_t dest_reg, size_t src_reg)
@@ -629,8 +550,7 @@ bool DoCall(ArtMethod* called_method, Thread* self, ShadowFrame& shadow_frame,
if (!o->VerifierInstanceOf(arg_type)) {
// This should never happen.
std::string temp1, temp2;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Invoking %s with bad arg %d, type '%s' not instance of '%s'",
new_shadow_frame->GetMethod()->GetName(), shorty_pos,
o->GetClass()->GetDescriptor(&temp1),
@@ -732,8 +652,7 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame,
ThrowRuntimeException("Bad filled array request for type %s",
PrettyDescriptor(componentClass).c_str());
} else {
- self->ThrowNewExceptionF(shadow_frame.GetCurrentLocationForThrow(),
- "Ljava/lang/InternalError;",
+ self->ThrowNewExceptionF("Ljava/lang/InternalError;",
"Found type %s; filled-new-array not implemented for anything but 'int'",
PrettyDescriptor(componentClass).c_str());
}
@@ -811,282 +730,6 @@ void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
}
}
-// Helper function to deal with class loading in an unstarted runtime.
-static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> className,
- Handle<mirror::ClassLoader> class_loader, JValue* result,
- const std::string& method_name, bool initialize_class,
- bool abort_if_not_found)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- CHECK(className.Get() != nullptr);
- std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str()));
- ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-
- Class* found = class_linker->FindClass(self, descriptor.c_str(), class_loader);
- if (found == nullptr && abort_if_not_found) {
- if (!self->IsExceptionPending()) {
- AbortTransaction(self, "%s failed in un-started runtime for class: %s",
- method_name.c_str(), PrettyDescriptor(descriptor.c_str()).c_str());
- }
- return;
- }
- if (found != nullptr && initialize_class) {
- StackHandleScope<1> hs(self);
- Handle<mirror::Class> h_class(hs.NewHandle(found));
- if (!class_linker->EnsureInitialized(self, h_class, true, true)) {
- CHECK(self->IsExceptionPending());
- return;
- }
- }
- result->SetL(found);
-}
-
-// Common helper for class-loading cutouts in an unstarted runtime. We call Runtime methods that
-// rely on Java code to wrap errors in the correct exception class (i.e., NoClassDefFoundError into
-// ClassNotFoundException), so need to do the same. The only exception is if the exception is
-// actually InternalError. This must not be wrapped, as it signals an initialization abort.
-static void CheckExceptionGenerateClassNotFound(Thread* self)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (self->IsExceptionPending()) {
- // If it is not an InternalError, wrap it.
- std::string type(PrettyTypeOf(self->GetException(nullptr)));
- if (type != "java.lang.InternalError") {
- self->ThrowNewWrappedException(self->GetCurrentLocationForThrow(),
- "Ljava/lang/ClassNotFoundException;",
- "ClassNotFoundException");
- }
- }
-}
-
-static void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
- ShadowFrame* shadow_frame,
- JValue* result, size_t arg_offset) {
- // In a runtime that's not started we intercept certain methods to avoid complicated dependency
- // problems in core libraries.
- std::string name(PrettyMethod(shadow_frame->GetMethod()));
- if (name == "java.lang.Class java.lang.Class.forName(java.lang.String)") {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
- StackHandleScope<1> hs(self);
- Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result, name,
- true, false);
- CheckExceptionGenerateClassNotFound(self);
- } else if (name == "java.lang.Class java.lang.Class.forName(java.lang.String, boolean, java.lang.ClassLoader)") {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
- bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
- mirror::ClassLoader* class_loader =
- down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
- StackHandleScope<2> hs(self);
- Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
- UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, initialize_class,
- false);
- CheckExceptionGenerateClassNotFound(self);
- } else if (name == "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)") {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
- bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
- mirror::ClassLoader* class_loader =
- down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
- StackHandleScope<2> hs(self);
- Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
- UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, initialize_class,
- false);
- CheckExceptionGenerateClassNotFound(self);
- } else if (name == "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)") {
- mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
- mirror::ClassLoader* class_loader =
- down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset));
- StackHandleScope<2> hs(self);
- Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
- Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
- UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, name, false, false);
- // This might have an error pending. But semantics are to just return null.
- if (self->IsExceptionPending()) {
- // If it is an InternalError, keep it. See CheckExceptionGenerateClassNotFound.
- std::string type(PrettyTypeOf(self->GetException(nullptr)));
- if (type != "java.lang.InternalError") {
- self->ClearException();
- }
- }
- } else if (name == "java.lang.Class java.lang.Void.lookupType()") {
- result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
- } else if (name == "java.lang.Object java.lang.Class.newInstance()") {
- StackHandleScope<3> hs(self); // Class, constructor, object.
- Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- Handle<Class> h_klass(hs.NewHandle(klass));
- // There are two situations in which we'll abort this run.
- // 1) If the class isn't yet initialized and initialization fails.
- // 2) If we can't find the default constructor. We'll postpone the exception to runtime.
- // Note that 2) could likely be handled here, but for safety abort the transaction.
- bool ok = false;
- if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
- Handle<ArtMethod> h_cons(hs.NewHandle(h_klass->FindDeclaredDirectMethod("<init>", "()V")));
- if (h_cons.Get() != nullptr) {
- Handle<Object> h_obj(hs.NewHandle(klass->AllocObject(self)));
- CHECK(h_obj.Get() != nullptr); // We don't expect OOM at compile-time.
- EnterInterpreterFromInvoke(self, h_cons.Get(), h_obj.Get(), nullptr, nullptr);
- if (!self->IsExceptionPending()) {
- result->SetL(h_obj.Get());
- ok = true;
- }
- } else {
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
- "Could not find default constructor for '%s'",
- PrettyClass(h_klass.Get()).c_str());
- }
- }
- if (!ok) {
- std::string error_msg = StringPrintf("Failed in Class.newInstance for '%s' with %s",
- PrettyClass(h_klass.Get()).c_str(),
- PrettyTypeOf(self->GetException(nullptr)).c_str());
- self->ThrowNewWrappedException(self->GetCurrentLocationForThrow(),
- "Ljava/lang/InternalError;",
- error_msg.c_str());
- }
- } else if (name == "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)") {
- // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
- // going the reflective Dex way.
- Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
- String* name2 = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
- ArtField* found = NULL;
- ObjectArray<ArtField>* fields = klass->GetIFields();
- for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
- ArtField* f = fields->Get(i);
- if (name2->Equals(f->GetName())) {
- found = f;
- }
- }
- if (found == NULL) {
- fields = klass->GetSFields();
- for (int32_t i = 0; i < fields->GetLength() && found == NULL; ++i) {
- ArtField* f = fields->Get(i);
- if (name2->Equals(f->GetName())) {
- found = f;
- }
- }
- }
- CHECK(found != NULL)
- << "Failed to find field in Class.getDeclaredField in un-started runtime. name="
- << name2->ToModifiedUtf8() << " class=" << PrettyDescriptor(klass);
- // TODO: getDeclaredField calls GetType once the field is found to ensure a
- // NoClassDefFoundError is thrown if the field's type cannot be resolved.
- Class* jlr_Field = self->DecodeJObject(WellKnownClasses::java_lang_reflect_Field)->AsClass();
- StackHandleScope<1> hs(self);
- Handle<Object> field(hs.NewHandle(jlr_Field->AllocNonMovableObject(self)));
- CHECK(field.Get() != NULL);
- ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>", "(Ljava/lang/reflect/ArtField;)V");
- uint32_t args[1];
- args[0] = StackReference<mirror::Object>::FromMirrorPtr(found).AsVRegValue();
- EnterInterpreterFromInvoke(self, c, field.Get(), args, NULL);
- result->SetL(field.Get());
- } else if (name == "int java.lang.Object.hashCode()") {
- Object* obj = shadow_frame->GetVRegReference(arg_offset);
- result->SetI(obj->IdentityHashCode());
- } else if (name == "java.lang.String java.lang.reflect.ArtMethod.getMethodName(java.lang.reflect.ArtMethod)") {
- mirror::ArtMethod* method = shadow_frame->GetVRegReference(arg_offset)->AsArtMethod();
- result->SetL(method->GetNameAsString(self));
- } else if (name == "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)" ||
- name == "void java.lang.System.arraycopy(char[], int, char[], int, int)") {
- // Special case array copying without initializing System.
- Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
- jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
- jint dstPos = shadow_frame->GetVReg(arg_offset + 3);
- jint length = shadow_frame->GetVReg(arg_offset + 4);
- if (!ctype->IsPrimitive()) {
- ObjectArray<Object>* src = shadow_frame->GetVRegReference(arg_offset)->AsObjectArray<Object>();
- ObjectArray<Object>* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsObjectArray<Object>();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else if (ctype->IsPrimitiveChar()) {
- CharArray* src = shadow_frame->GetVRegReference(arg_offset)->AsCharArray();
- CharArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else if (ctype->IsPrimitiveInt()) {
- IntArray* src = shadow_frame->GetVRegReference(arg_offset)->AsIntArray();
- IntArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsIntArray();
- for (jint i = 0; i < length; ++i) {
- dst->Set(dstPos + i, src->Get(srcPos + i));
- }
- } else {
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
- "Unimplemented System.arraycopy for type '%s'",
- PrettyDescriptor(ctype).c_str());
- }
- } else if (name == "long java.lang.Double.doubleToRawLongBits(double)") {
- double in = shadow_frame->GetVRegDouble(arg_offset);
- result->SetJ(bit_cast<int64_t>(in));
- } else if (name == "double java.lang.Math.ceil(double)") {
- double in = shadow_frame->GetVRegDouble(arg_offset);
- double out;
- // Special cases:
- // 1) NaN, infinity, +0, -0 -> out := in. All are guaranteed by cmath.
- // -1 < in < 0 -> out := -0.
- if (-1.0 < in && in < 0) {
- out = -0.0;
- } else {
- out = ceil(in);
- }
- result->SetD(out);
- } else if (name == "java.lang.Object java.lang.ThreadLocal.get()") {
- std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
- bool ok = false;
- if (caller == "java.lang.String java.lang.IntegralToString.convertInt(java.lang.AbstractStringBuilder, int)") {
- // Allocate non-threadlocal buffer.
- result->SetL(mirror::CharArray::Alloc(self, 11));
- ok = true;
- } else if (caller == "java.lang.RealToString java.lang.RealToString.getInstance()") {
- // Note: RealToString is implemented and used in a different fashion than IntegralToString.
- // Conversion is done over an actual object of RealToString (the conversion method is an
- // instance method). This means it is not as clear whether it is correct to return a new
- // object each time. The caller needs to be inspected by hand to see whether it (incorrectly)
- // stores the object for later use.
- // See also b/19548084 for a possible rewrite and bringing it in line with IntegralToString.
- if (shadow_frame->GetLink()->GetLink() != nullptr) {
- std::string caller2(PrettyMethod(shadow_frame->GetLink()->GetLink()->GetMethod()));
- if (caller2 == "java.lang.String java.lang.Double.toString(double)") {
- // Allocate new object.
- StackHandleScope<2> hs(self);
- Handle<Class> h_real_to_string_class(hs.NewHandle(
- shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
- Handle<Object> h_real_to_string_obj(hs.NewHandle(
- h_real_to_string_class->AllocObject(self)));
- if (h_real_to_string_obj.Get() != nullptr) {
- mirror::ArtMethod* init_method =
- h_real_to_string_class->FindDirectMethod("<init>", "()V");
- if (init_method == nullptr) {
- h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
- } else {
- JValue invoke_result;
- EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
- nullptr);
- if (!self->IsExceptionPending()) {
- result->SetL(h_real_to_string_obj.Get());
- ok = true;
- }
- }
- }
-
- if (!ok) {
- // We'll abort, so clear exception.
- self->ClearException();
- }
- }
- }
- }
-
- if (!ok) {
- self->ThrowNewException(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
- "Unimplemented ThreadLocal.get");
- }
- } else {
- // Not special, continue with regular interpreter execution.
- artInterpreterToInterpreterBridge(self, code_item, shadow_frame, result);
- }
-}
-
// Explicit DoCall template function declarations.
#define EXPLICIT_DO_CALL_TEMPLATE_DECL(_is_range, _do_assignability_check) \
template SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) \
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 06b809f028..15396d6e90 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -30,21 +30,14 @@
#include "common_throws.h"
#include "dex_file-inl.h"
#include "dex_instruction-inl.h"
-#include "dex_instruction.h"
#include "entrypoints/entrypoint_utils-inl.h"
-#include "gc/accounting/card_table-inl.h"
#include "handle_scope-inl.h"
-#include "nth_caller_visitor.h"
#include "mirror/art_field-inl.h"
-#include "mirror/art_method.h"
#include "mirror/art_method-inl.h"
-#include "mirror/class.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
-#include "ScopedLocalRef.h"
-#include "scoped_thread_state_change.h"
#include "thread.h"
#include "well_known_classes.h"
@@ -77,7 +70,7 @@ template<bool do_access_check, bool transaction_active>
extern JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item,
ShadowFrame& shadow_frame, JValue result_register);
-void ThrowNullPointerExceptionFromInterpreter(const ShadowFrame& shadow_frame)
+void ThrowNullPointerExceptionFromInterpreter()
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
static inline void DoMonitorEnter(Thread* self, Object* ref) NO_THREAD_SAFETY_ANALYSIS {
@@ -138,7 +131,7 @@ static inline bool DoInvokeVirtualQuick(Thread* self, ShadowFrame& shadow_frame,
if (UNLIKELY(receiver == nullptr)) {
// We lost the reference to the method index so we cannot get a more
// precised exception message.
- ThrowNullPointerExceptionFromDexPC(shadow_frame.GetCurrentLocationForThrow());
+ ThrowNullPointerExceptionFromDexPC();
return false;
}
const uint32_t vtable_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc
index 37324eac82..5f97f941fb 100644
--- a/runtime/interpreter/interpreter_goto_table_impl.cc
+++ b/runtime/interpreter/interpreter_goto_table_impl.cc
@@ -244,7 +244,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_END();
HANDLE_INSTRUCTION_START(MOVE_EXCEPTION) {
- Throwable* exception = self->GetException(nullptr);
+ Throwable* exception = self->GetException();
DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
self->ClearException();
@@ -341,8 +341,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
if (!obj_result->VerifierInstanceOf(return_type)) {
// This should never happen.
std::string temp1, temp2;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Returning '%s' that is not instance of return type '%s'",
obj_result->GetClass()->GetDescriptor(&temp1),
return_type->GetDescriptor(&temp2));
@@ -465,7 +464,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(MONITOR_ENTER) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorEnter(self, obj);
@@ -477,7 +476,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(MONITOR_EXIT) {
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorExit(self, obj);
@@ -519,7 +518,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(ARRAY_LENGTH) {
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
if (UNLIKELY(array == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
@@ -596,16 +595,15 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(THROW) {
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(exception == NULL)) {
- ThrowNullPointerException(NULL, "throw with null exception");
+ ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
std::string temp;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Throwing '%s' that is not instance of Throwable",
exception->GetClass()->GetDescriptor(&temp));
} else {
- self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
+ self->SetException(exception->AsThrowable());
}
HANDLE_PENDING_EXCEPTION();
}
@@ -972,7 +970,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -990,7 +988,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1008,7 +1006,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1026,7 +1024,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1044,7 +1042,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1062,7 +1060,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1080,7 +1078,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(AGET_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
@@ -1098,7 +1096,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_BOOLEAN) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1117,7 +1115,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_BYTE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1136,7 +1134,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_CHAR) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1155,7 +1153,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_SHORT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1174,7 +1172,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data));
@@ -1193,7 +1191,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_WIDE) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data));
@@ -1212,7 +1210,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF
HANDLE_INSTRUCTION_START(APUT_OBJECT) {
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
int32_t index = shadow_frame.GetVReg(inst->VRegC_23x());
diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc
index 2f85587604..9313c750eb 100644
--- a/runtime/interpreter/interpreter_switch_impl.cc
+++ b/runtime/interpreter/interpreter_switch_impl.cc
@@ -163,7 +163,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
break;
case Instruction::MOVE_EXCEPTION: {
PREAMBLE();
- Throwable* exception = self->GetException(nullptr);
+ Throwable* exception = self->GetException();
DCHECK(exception != nullptr) << "No pending exception on MOVE_EXCEPTION instruction";
shadow_frame.SetVRegReference(inst->VRegA_11x(inst_data), exception);
self->ClearException();
@@ -248,8 +248,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
if (!obj_result->VerifierInstanceOf(return_type)) {
// This should never happen.
std::string temp1, temp2;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Returning '%s' that is not instance of return type '%s'",
obj_result->GetClass()->GetDescriptor(&temp1),
return_type->GetDescriptor(&temp2));
@@ -370,7 +369,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorEnter(self, obj);
@@ -382,7 +381,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* obj = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(obj == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
DoMonitorExit(self, obj);
@@ -424,7 +423,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* array = shadow_frame.GetVRegReference(inst->VRegB_12x(inst_data));
if (UNLIKELY(array == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
} else {
shadow_frame.SetVReg(inst->VRegA_12x(inst_data), array->AsArray()->GetLength());
@@ -506,16 +505,15 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* exception = shadow_frame.GetVRegReference(inst->VRegA_11x(inst_data));
if (UNLIKELY(exception == NULL)) {
- ThrowNullPointerException(NULL, "throw with null exception");
+ ThrowNullPointerException("throw with null exception");
} else if (do_assignability_check && !exception->GetClass()->IsThrowableClass()) {
// This should never happen.
std::string temp;
- self->ThrowNewExceptionF(self->GetCurrentLocationForThrow(),
- "Ljava/lang/VirtualMachineError;",
+ self->ThrowNewExceptionF("Ljava/lang/VirtualMachineError;",
"Throwing '%s' that is not instance of Throwable",
exception->GetClass()->GetDescriptor(&temp));
} else {
- self->SetException(shadow_frame.GetCurrentLocationForThrow(), exception->AsThrowable());
+ self->SetException(exception->AsThrowable());
}
HANDLE_PENDING_EXCEPTION();
break;
@@ -817,7 +815,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -835,7 +833,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -853,7 +851,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -871,7 +869,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -889,7 +887,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -907,7 +905,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -925,7 +923,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -943,7 +941,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -962,7 +960,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -981,7 +979,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1000,7 +998,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1019,7 +1017,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1038,7 +1036,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
@@ -1057,7 +1055,7 @@ JValue ExecuteSwitchImpl(Thread* self, const DexFile::CodeItem* code_item,
PREAMBLE();
Object* a = shadow_frame.GetVRegReference(inst->VRegB_23x());
if (UNLIKELY(a == NULL)) {
- ThrowNullPointerExceptionFromInterpreter(shadow_frame);
+ ThrowNullPointerExceptionFromInterpreter();
HANDLE_PENDING_EXCEPTION();
break;
}
diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc
new file mode 100644
index 0000000000..356a438085
--- /dev/null
+++ b/runtime/interpreter/unstarted_runtime.cc
@@ -0,0 +1,951 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "unstarted_runtime.h"
+
+#include <cmath>
+#include <unordered_map>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "class_linker.h"
+#include "common_throws.h"
+#include "entrypoints/entrypoint_utils-inl.h"
+#include "handle_scope-inl.h"
+#include "interpreter/interpreter_common.h"
+#include "mirror/array-inl.h"
+#include "mirror/art_method-inl.h"
+#include "mirror/class.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array-inl.h"
+#include "mirror/string-inl.h"
+#include "nth_caller_visitor.h"
+#include "thread.h"
+#include "well_known_classes.h"
+
+namespace art {
+namespace interpreter {
+
+static void AbortTransactionOrFail(Thread* self, const char* fmt, ...)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ va_list args;
+ va_start(args, fmt);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ AbortTransaction(self, fmt, args);
+ va_end(args);
+ } else {
+ LOG(FATAL) << "Trying to abort, but not in transaction mode: " << StringPrintf(fmt, args);
+ UNREACHABLE();
+ }
+}
+
+// Helper function to deal with class loading in an unstarted runtime.
+static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> className,
+ Handle<mirror::ClassLoader> class_loader, JValue* result,
+ const std::string& method_name, bool initialize_class,
+ bool abort_if_not_found)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ CHECK(className.Get() != nullptr);
+ std::string descriptor(DotToDescriptor(className->ToModifiedUtf8().c_str()));
+ ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+
+ mirror::Class* found = class_linker->FindClass(self, descriptor.c_str(), class_loader);
+ if (found == nullptr && abort_if_not_found) {
+ if (!self->IsExceptionPending()) {
+ AbortTransactionOrFail(self, "%s failed in un-started runtime for class: %s",
+ method_name.c_str(), PrettyDescriptor(descriptor.c_str()).c_str());
+ }
+ return;
+ }
+ if (found != nullptr && initialize_class) {
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Class> h_class(hs.NewHandle(found));
+ if (!class_linker->EnsureInitialized(self, h_class, true, true)) {
+ CHECK(self->IsExceptionPending());
+ return;
+ }
+ }
+ result->SetL(found);
+}
+
+// Common helper for class-loading cutouts in an unstarted runtime. We call Runtime methods that
+// rely on Java code to wrap errors in the correct exception class (i.e., NoClassDefFoundError into
+// ClassNotFoundException), so need to do the same. The only exception is if the exception is
+// actually InternalError. This must not be wrapped, as it signals an initialization abort.
+static void CheckExceptionGenerateClassNotFound(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ if (self->IsExceptionPending()) {
+ // If it is not an InternalError, wrap it.
+ std::string type(PrettyTypeOf(self->GetException()));
+ if (type != "java.lang.InternalError") {
+ self->ThrowNewWrappedException("Ljava/lang/ClassNotFoundException;",
+ "ClassNotFoundException");
+ }
+ }
+}
+
+static void UnstartedClassForName(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+ UnstartedRuntimeFindClass(self, h_class_name, NullHandle<mirror::ClassLoader>(), result,
+ "Class.forName", true, false);
+ CheckExceptionGenerateClassNotFound(self);
+}
+
+static void UnstartedClassForNameLong(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
+ mirror::ClassLoader* class_loader =
+ down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+ UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, "Class.forName",
+ initialize_class, false);
+ CheckExceptionGenerateClassNotFound(self);
+}
+
+static void UnstartedClassClassForName(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset)->AsString();
+ bool initialize_class = shadow_frame->GetVReg(arg_offset + 1) != 0;
+ mirror::ClassLoader* class_loader =
+ down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset + 2));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+ UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result, "Class.classForName",
+ initialize_class, false);
+ CheckExceptionGenerateClassNotFound(self);
+}
+
+static void UnstartedClassNewInstance(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ StackHandleScope<3> hs(self); // Class, constructor, object.
+ mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ Handle<mirror::Class> h_klass(hs.NewHandle(klass));
+
+ // Check that it's not null.
+ if (h_klass.Get() == nullptr) {
+ AbortTransactionOrFail(self, "Class reference is null for newInstance");
+ return;
+ }
+
+ // If we're in a transaction, class must not be finalizable (it or a superclass has a finalizer).
+ if (Runtime::Current()->IsActiveTransaction()) {
+ if (h_klass.Get()->IsFinalizable()) {
+ AbortTransaction(self, "Class for newInstance is finalizable: '%s'",
+ PrettyClass(h_klass.Get()).c_str());
+ return;
+ }
+ }
+
+ // There are two situations in which we'll abort this run.
+ // 1) If the class isn't yet initialized and initialization fails.
+ // 2) If we can't find the default constructor. We'll postpone the exception to runtime.
+ // Note that 2) could likely be handled here, but for safety abort the transaction.
+ bool ok = false;
+ if (Runtime::Current()->GetClassLinker()->EnsureInitialized(self, h_klass, true, true)) {
+ Handle<mirror::ArtMethod> h_cons(hs.NewHandle(
+ h_klass->FindDeclaredDirectMethod("<init>", "()V")));
+ if (h_cons.Get() != nullptr) {
+ Handle<mirror::Object> h_obj(hs.NewHandle(klass->AllocObject(self)));
+ CHECK(h_obj.Get() != nullptr); // We don't expect OOM at compile-time.
+ EnterInterpreterFromInvoke(self, h_cons.Get(), h_obj.Get(), nullptr, nullptr);
+ if (!self->IsExceptionPending()) {
+ result->SetL(h_obj.Get());
+ ok = true;
+ }
+ } else {
+ self->ThrowNewExceptionF("Ljava/lang/InternalError;",
+ "Could not find default constructor for '%s'",
+ PrettyClass(h_klass.Get()).c_str());
+ }
+ }
+ if (!ok) {
+ AbortTransactionOrFail(self, "Failed in Class.newInstance for '%s' with %s",
+ PrettyClass(h_klass.Get()).c_str(),
+ PrettyTypeOf(self->GetException()).c_str());
+ }
+}
+
+static void UnstartedClassGetDeclaredField(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Special managed code cut-out to allow field lookup in a un-started runtime that'd fail
+ // going the reflective Dex way.
+ mirror::Class* klass = shadow_frame->GetVRegReference(arg_offset)->AsClass();
+ mirror::String* name2 = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+ mirror::ArtField* found = nullptr;
+ mirror::ObjectArray<mirror::ArtField>* fields = klass->GetIFields();
+ for (int32_t i = 0; i < fields->GetLength() && found == nullptr; ++i) {
+ mirror::ArtField* f = fields->Get(i);
+ if (name2->Equals(f->GetName())) {
+ found = f;
+ }
+ }
+ if (found == nullptr) {
+ fields = klass->GetSFields();
+ for (int32_t i = 0; i < fields->GetLength() && found == nullptr; ++i) {
+ mirror::ArtField* f = fields->Get(i);
+ if (name2->Equals(f->GetName())) {
+ found = f;
+ }
+ }
+ }
+ if (found == nullptr) {
+ AbortTransactionOrFail(self, "Failed to find field in Class.getDeclaredField in un-started "
+ " runtime. name=%s class=%s", name2->ToModifiedUtf8().c_str(),
+ PrettyDescriptor(klass).c_str());
+ return;
+ }
+ // TODO: getDeclaredField calls GetType once the field is found to ensure a
+ // NoClassDefFoundError is thrown if the field's type cannot be resolved.
+ mirror::Class* jlr_Field = self->DecodeJObject(
+ WellKnownClasses::java_lang_reflect_Field)->AsClass();
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Object> field(hs.NewHandle(jlr_Field->AllocNonMovableObject(self)));
+ CHECK(field.Get() != nullptr);
+ mirror::ArtMethod* c = jlr_Field->FindDeclaredDirectMethod("<init>",
+ "(Ljava/lang/reflect/ArtField;)V");
+ uint32_t args[1];
+ args[0] = StackReference<mirror::Object>::FromMirrorPtr(found).AsVRegValue();
+ EnterInterpreterFromInvoke(self, c, field.Get(), args, nullptr);
+ result->SetL(field.Get());
+}
+
+static void UnstartedVmClassLoaderFindLoadedClass(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* class_name = shadow_frame->GetVRegReference(arg_offset + 1)->AsString();
+ mirror::ClassLoader* class_loader =
+ down_cast<mirror::ClassLoader*>(shadow_frame->GetVRegReference(arg_offset));
+ StackHandleScope<2> hs(self);
+ Handle<mirror::String> h_class_name(hs.NewHandle(class_name));
+ Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(class_loader));
+ UnstartedRuntimeFindClass(self, h_class_name, h_class_loader, result,
+ "VMClassLoader.findLoadedClass", false, false);
+ // This might have an error pending. But semantics are to just return null.
+ if (self->IsExceptionPending()) {
+ // If it is an InternalError, keep it. See CheckExceptionGenerateClassNotFound.
+ std::string type(PrettyTypeOf(self->GetException()));
+ if (type != "java.lang.InternalError") {
+ self->ClearException();
+ }
+ }
+}
+
+static void UnstartedVoidLookupType(Thread* self ATTRIBUTE_UNUSED,
+ ShadowFrame* shadow_frame ATTRIBUTE_UNUSED,
+ JValue* result,
+ size_t arg_offset ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ result->SetL(Runtime::Current()->GetClassLinker()->FindPrimitiveClass('V'));
+}
+
+static void UnstartedSystemArraycopy(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // Special case array copying without initializing System.
+ mirror::Class* ctype = shadow_frame->GetVRegReference(arg_offset)->GetClass()->GetComponentType();
+ jint srcPos = shadow_frame->GetVReg(arg_offset + 1);
+ jint dstPos = shadow_frame->GetVReg(arg_offset + 3);
+ jint length = shadow_frame->GetVReg(arg_offset + 4);
+ if (!ctype->IsPrimitive()) {
+ mirror::ObjectArray<mirror::Object>* src = shadow_frame->GetVRegReference(arg_offset)->
+ AsObjectArray<mirror::Object>();
+ mirror::ObjectArray<mirror::Object>* dst = shadow_frame->GetVRegReference(arg_offset + 2)->
+ AsObjectArray<mirror::Object>();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else if (ctype->IsPrimitiveChar()) {
+ mirror::CharArray* src = shadow_frame->GetVRegReference(arg_offset)->AsCharArray();
+ mirror::CharArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsCharArray();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else if (ctype->IsPrimitiveInt()) {
+ mirror::IntArray* src = shadow_frame->GetVRegReference(arg_offset)->AsIntArray();
+ mirror::IntArray* dst = shadow_frame->GetVRegReference(arg_offset + 2)->AsIntArray();
+ for (jint i = 0; i < length; ++i) {
+ dst->Set(dstPos + i, src->Get(srcPos + i));
+ }
+ } else {
+ AbortTransactionOrFail(self, "Unimplemented System.arraycopy for type '%s'",
+ PrettyDescriptor(ctype).c_str());
+ }
+}
+
+static void UnstartedThreadLocalGet(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string caller(PrettyMethod(shadow_frame->GetLink()->GetMethod()));
+ bool ok = false;
+ if (caller == "java.lang.String java.lang.IntegralToString.convertInt"
+ "(java.lang.AbstractStringBuilder, int)") {
+ // Allocate non-threadlocal buffer.
+ result->SetL(mirror::CharArray::Alloc(self, 11));
+ ok = true;
+ } else if (caller == "java.lang.RealToString java.lang.RealToString.getInstance()") {
+ // Note: RealToString is implemented and used in a different fashion than IntegralToString.
+ // Conversion is done over an actual object of RealToString (the conversion method is an
+ // instance method). This means it is not as clear whether it is correct to return a new
+ // object each time. The caller needs to be inspected by hand to see whether it (incorrectly)
+ // stores the object for later use.
+ // See also b/19548084 for a possible rewrite and bringing it in line with IntegralToString.
+ if (shadow_frame->GetLink()->GetLink() != nullptr) {
+ std::string caller2(PrettyMethod(shadow_frame->GetLink()->GetLink()->GetMethod()));
+ if (caller2 == "java.lang.String java.lang.Double.toString(double)") {
+ // Allocate new object.
+ StackHandleScope<2> hs(self);
+ Handle<mirror::Class> h_real_to_string_class(hs.NewHandle(
+ shadow_frame->GetLink()->GetMethod()->GetDeclaringClass()));
+ Handle<mirror::Object> h_real_to_string_obj(hs.NewHandle(
+ h_real_to_string_class->AllocObject(self)));
+ if (h_real_to_string_obj.Get() != nullptr) {
+ mirror::ArtMethod* init_method =
+ h_real_to_string_class->FindDirectMethod("<init>", "()V");
+ if (init_method == nullptr) {
+ h_real_to_string_class->DumpClass(LOG(FATAL), mirror::Class::kDumpClassFullDetail);
+ } else {
+ JValue invoke_result;
+ EnterInterpreterFromInvoke(self, init_method, h_real_to_string_obj.Get(), nullptr,
+ nullptr);
+ if (!self->IsExceptionPending()) {
+ result->SetL(h_real_to_string_obj.Get());
+ ok = true;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (!ok) {
+ AbortTransactionOrFail(self, "Could not create RealToString object");
+ }
+}
+
+static void UnstartedMathCeil(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ double in = shadow_frame->GetVRegDouble(arg_offset);
+ double out;
+ // Special cases:
+ // 1) NaN, infinity, +0, -0 -> out := in. All are guaranteed by cmath.
+ // -1 < in < 0 -> out := -0.
+ if (-1.0 < in && in < 0) {
+ out = -0.0;
+ } else {
+ out = ceil(in);
+ }
+ result->SetD(out);
+}
+
+static void UnstartedArtMethodGetMethodName(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = shadow_frame->GetVRegReference(arg_offset)->AsArtMethod();
+ result->SetL(method->GetNameAsString(self));
+}
+
+static void UnstartedObjectHashCode(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset);
+ result->SetI(obj->IdentityHashCode());
+}
+
+static void UnstartedDoubleDoubleToRawLongBits(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ double in = shadow_frame->GetVRegDouble(arg_offset);
+ result->SetJ(bit_cast<int64_t>(in));
+}
+
+static mirror::Object* GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const DexFile* dex_file = dex_cache->GetDexFile();
+ if (dex_file == nullptr) {
+ return nullptr;
+ }
+
+ // Create the direct byte buffer.
+ JNIEnv* env = self->GetJniEnv();
+ DCHECK(env != nullptr);
+ void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin()));
+ jobject byte_buffer = env->NewDirectByteBuffer(address, dex_file->Size());
+ if (byte_buffer == nullptr) {
+ DCHECK(self->IsExceptionPending());
+ return nullptr;
+ }
+
+ jvalue args[1];
+ args[0].l = byte_buffer;
+ return self->DecodeJObject(
+ env->CallStaticObjectMethodA(WellKnownClasses::com_android_dex_Dex,
+ WellKnownClasses::com_android_dex_Dex_create,
+ args));
+}
+
+static void UnstartedDexCacheGetDexNative(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ // We will create the Dex object, but the image writer will release it before creating the
+ // art file.
+ mirror::Object* src = shadow_frame->GetVRegReference(arg_offset);
+ bool have_dex = false;
+ if (src != nullptr) {
+ mirror::Object* dex = GetDexFromDexCache(self, reinterpret_cast<mirror::DexCache*>(src));
+ if (dex != nullptr) {
+ have_dex = true;
+ result->SetL(dex);
+ }
+ }
+ if (!have_dex) {
+ self->ClearException();
+ Runtime::Current()->AbortTransactionAndThrowInternalError(self, "Could not create Dex object");
+ }
+}
+
+static void UnstartedMemoryPeek(
+ Primitive::Type type, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ int64_t address = shadow_frame->GetVRegLong(arg_offset);
+ // TODO: Check that this is in the heap somewhere. Otherwise we will segfault instead of
+ // aborting the transaction.
+
+ switch (type) {
+ case Primitive::kPrimByte: {
+ result->SetB(*reinterpret_cast<int8_t*>(static_cast<intptr_t>(address)));
+ return;
+ }
+
+ case Primitive::kPrimShort: {
+ result->SetS(*reinterpret_cast<int16_t*>(static_cast<intptr_t>(address)));
+ return;
+ }
+
+ case Primitive::kPrimInt: {
+ result->SetI(*reinterpret_cast<int32_t*>(static_cast<intptr_t>(address)));
+ return;
+ }
+
+ case Primitive::kPrimLong: {
+ result->SetJ(*reinterpret_cast<int64_t*>(static_cast<intptr_t>(address)));
+ return;
+ }
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimVoid:
+ case Primitive::kPrimNot:
+ LOG(FATAL) << "Not in the Memory API: " << type;
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+}
+
+static void UnstartedMemoryPeekEntry(
+ Thread* self ATTRIBUTE_UNUSED, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string name(PrettyMethod(shadow_frame->GetMethod()));
+ if (name == "byte libcore.io.Memory.peekByte(long)") {
+ UnstartedMemoryPeek(Primitive::kPrimByte, shadow_frame, result, arg_offset);
+ } else if (name == "short libcore.io.Memory.peekShortNative(long)") {
+ UnstartedMemoryPeek(Primitive::kPrimShort, shadow_frame, result, arg_offset);
+ } else if (name == "int libcore.io.Memory.peekIntNative(long)") {
+ UnstartedMemoryPeek(Primitive::kPrimInt, shadow_frame, result, arg_offset);
+ } else if (name == "long libcore.io.Memory.peekLongNative(long)") {
+ UnstartedMemoryPeek(Primitive::kPrimLong, shadow_frame, result, arg_offset);
+ } else {
+ LOG(FATAL) << "Unsupported Memory.peek entry: " << name;
+ UNREACHABLE();
+ }
+}
+
+static void UnstartedMemoryPeekArray(
+ Primitive::Type type, Thread* self, ShadowFrame* shadow_frame, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int64_t address_long = shadow_frame->GetVRegLong(arg_offset);
+ mirror::Object* obj = shadow_frame->GetVRegReference(arg_offset + 2);
+ if (obj == nullptr) {
+ Runtime::Current()->AbortTransactionAndThrowInternalError(self, "Null pointer in peekArray");
+ return;
+ }
+ mirror::Array* array = obj->AsArray();
+
+ int offset = shadow_frame->GetVReg(arg_offset + 3);
+ int count = shadow_frame->GetVReg(arg_offset + 4);
+ if (offset < 0 || offset + count > array->GetLength()) {
+ std::string error_msg(StringPrintf("Array out of bounds in peekArray: %d/%d vs %d",
+ offset, count, array->GetLength()));
+ Runtime::Current()->AbortTransactionAndThrowInternalError(self, error_msg.c_str());
+ return;
+ }
+
+ switch (type) {
+ case Primitive::kPrimByte: {
+ int8_t* address = reinterpret_cast<int8_t*>(static_cast<intptr_t>(address_long));
+ mirror::ByteArray* byte_array = array->AsByteArray();
+ for (int32_t i = 0; i < count; ++i, ++address) {
+ byte_array->SetWithoutChecks<true>(i + offset, *address);
+ }
+ return;
+ }
+
+ case Primitive::kPrimShort:
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ LOG(FATAL) << "Type unimplemented for Memory Array API, should not reach here: " << type;
+ UNREACHABLE();
+
+ case Primitive::kPrimBoolean:
+ case Primitive::kPrimChar:
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ case Primitive::kPrimVoid:
+ case Primitive::kPrimNot:
+ LOG(FATAL) << "Not in the Memory API: " << type;
+ UNREACHABLE();
+ }
+ LOG(FATAL) << "Should not reach here";
+ UNREACHABLE();
+}
+
+static void UnstartedMemoryPeekArrayEntry(
+ Thread* self, ShadowFrame* shadow_frame, JValue* result ATTRIBUTE_UNUSED, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ std::string name(PrettyMethod(shadow_frame->GetMethod()));
+ if (name == "void libcore.io.Memory.peekByteArray(long, byte[], int, int)") {
+ UnstartedMemoryPeekArray(Primitive::kPrimByte, self, shadow_frame, arg_offset);
+ } else {
+ LOG(FATAL) << "Unsupported Memory.peekArray entry: " << name;
+ UNREACHABLE();
+ }
+}
+
+static void UnstartedJNIVMRuntimeNewUnpaddedArray(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ int32_t length = args[1];
+ DCHECK_GE(length, 0);
+ mirror::Class* element_class = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
+ Runtime* runtime = Runtime::Current();
+ mirror::Class* array_class = runtime->GetClassLinker()->FindArrayClass(self, &element_class);
+ DCHECK(array_class != nullptr);
+ gc::AllocatorType allocator = runtime->GetHeap()->GetCurrentAllocator();
+ result->SetL(mirror::Array::Alloc<true, true>(self, array_class, length,
+ array_class->GetComponentSizeShift(), allocator));
+}
+
+static void UnstartedJNIVMStackGetCallingClassLoader(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result) {
+ result->SetL(nullptr);
+}
+
+static void UnstartedJNIVMStackGetStackClass2(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ NthCallerVisitor visitor(self, 3);
+ visitor.WalkStack();
+ if (visitor.caller != nullptr) {
+ result->SetL(visitor.caller->GetDeclaringClass());
+ }
+}
+
+static void UnstartedJNIMathLog(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ JValue value;
+ value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
+ result->SetD(log(value.GetD()));
+}
+
+static void UnstartedJNIMathExp(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ JValue value;
+ value.SetJ((static_cast<uint64_t>(args[1]) << 32) | args[0]);
+ result->SetD(exp(value.GetD()));
+}
+
+static void UnstartedJNIClassGetNameNative(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ StackHandleScope<1> hs(self);
+ result->SetL(mirror::Class::ComputeName(hs.NewHandle(receiver->AsClass())));
+}
+
+static void UnstartedJNIFloatFloatToRawIntBits(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ result->SetI(args[0]);
+}
+
+static void UnstartedJNIFloatIntBitsToFloat(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result) {
+ result->SetI(args[0]);
+}
+
+static void UnstartedJNIObjectInternalClone(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ result->SetL(receiver->Clone(self));
+}
+
+static void UnstartedJNIObjectNotifyAll(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ receiver->NotifyAll(self);
+}
+
+static void UnstartedJNIStringCompareTo(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::String* rhs = reinterpret_cast<mirror::Object*>(args[0])->AsString();
+ if (rhs == nullptr) {
+ AbortTransactionOrFail(self, "String.compareTo with null object");
+ }
+ result->SetI(receiver->AsString()->CompareTo(rhs));
+}
+
+static void UnstartedJNIStringIntern(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ result->SetL(receiver->AsString()->Intern());
+}
+
+static void UnstartedJNIStringFastIndexOf(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ result->SetI(receiver->AsString()->FastIndexOf(args[0], args[1]));
+}
+
+static void UnstartedJNIArrayCreateMultiArray(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ StackHandleScope<2> hs(self);
+ auto h_class(hs.NewHandle(reinterpret_cast<mirror::Class*>(args[0])->AsClass()));
+ auto h_dimensions(hs.NewHandle(reinterpret_cast<mirror::IntArray*>(args[1])->AsIntArray()));
+ result->SetL(mirror::Array::CreateMultiArray(self, h_class, h_dimensions));
+}
+
+static void UnstartedJNIThrowableNativeFillInStackTrace(Thread* self,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ScopedObjectAccessUnchecked soa(self);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ result->SetL(soa.Decode<mirror::Object*>(self->CreateInternalStackTrace<true>(soa)));
+ } else {
+ result->SetL(soa.Decode<mirror::Object*>(self->CreateInternalStackTrace<false>(soa)));
+ }
+}
+
+static void UnstartedJNISystemIdentityHashCode(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+ result->SetI((obj != nullptr) ? obj->IdentityHashCode() : 0);
+}
+
+static void UnstartedJNIByteOrderIsLittleEndian(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args ATTRIBUTE_UNUSED,
+ JValue* result) {
+ result->SetZ(JNI_TRUE);
+}
+
+static void UnstartedJNIUnsafeCompareAndSwapInt(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+ jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
+ jint expectedValue = args[3];
+ jint newValue = args[4];
+ bool success;
+ if (Runtime::Current()->IsActiveTransaction()) {
+ success = obj->CasFieldStrongSequentiallyConsistent32<true>(MemberOffset(offset),
+ expectedValue, newValue);
+ } else {
+ success = obj->CasFieldStrongSequentiallyConsistent32<false>(MemberOffset(offset),
+ expectedValue, newValue);
+ }
+ result->SetZ(success ? JNI_TRUE : JNI_FALSE);
+}
+
+static void UnstartedJNIUnsafePutObject(Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result ATTRIBUTE_UNUSED)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Object* obj = reinterpret_cast<mirror::Object*>(args[0]);
+ jlong offset = (static_cast<uint64_t>(args[2]) << 32) | args[1];
+ mirror::Object* newValue = reinterpret_cast<mirror::Object*>(args[3]);
+ if (Runtime::Current()->IsActiveTransaction()) {
+ obj->SetFieldObject<true>(MemberOffset(offset), newValue);
+ } else {
+ obj->SetFieldObject<false>(MemberOffset(offset), newValue);
+ }
+}
+
+static void UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType(
+ Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
+ Primitive::Type primitive_type = component->GetPrimitiveType();
+ result->SetI(mirror::Array::DataOffset(Primitive::ComponentSize(primitive_type)).Int32Value());
+}
+
+static void UnstartedJNIUnsafeGetArrayIndexScaleForComponentType(
+ Thread* self ATTRIBUTE_UNUSED,
+ mirror::ArtMethod* method ATTRIBUTE_UNUSED,
+ mirror::Object* receiver ATTRIBUTE_UNUSED,
+ uint32_t* args,
+ JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::Class* component = reinterpret_cast<mirror::Object*>(args[0])->AsClass();
+ Primitive::Type primitive_type = component->GetPrimitiveType();
+ result->SetI(Primitive::ComponentSize(primitive_type));
+}
+
+typedef void (*InvokeHandler)(Thread* self, ShadowFrame* shadow_frame, JValue* result,
+ size_t arg_size);
+
+typedef void (*JNIHandler)(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
+ uint32_t* args, JValue* result);
+
+static bool tables_initialized_ = false;
+static std::unordered_map<std::string, InvokeHandler> invoke_handlers_;
+static std::unordered_map<std::string, JNIHandler> jni_handlers_;
+
+static void UnstartedRuntimeInitializeInvokeHandlers() {
+ struct InvokeHandlerDef {
+ std::string name;
+ InvokeHandler function;
+ };
+
+ InvokeHandlerDef defs[] {
+ { "java.lang.Class java.lang.Class.forName(java.lang.String)",
+ &UnstartedClassForName },
+ { "java.lang.Class java.lang.Class.forName(java.lang.String, boolean, java.lang.ClassLoader)",
+ &UnstartedClassForNameLong },
+ { "java.lang.Class java.lang.Class.classForName(java.lang.String, boolean, java.lang.ClassLoader)",
+ &UnstartedClassClassForName },
+ { "java.lang.Class java.lang.VMClassLoader.findLoadedClass(java.lang.ClassLoader, java.lang.String)",
+ &UnstartedVmClassLoaderFindLoadedClass },
+ { "java.lang.Class java.lang.Void.lookupType()",
+ &UnstartedVoidLookupType },
+ { "java.lang.Object java.lang.Class.newInstance()",
+ &UnstartedClassNewInstance },
+ { "java.lang.reflect.Field java.lang.Class.getDeclaredField(java.lang.String)",
+ &UnstartedClassGetDeclaredField },
+ { "int java.lang.Object.hashCode()",
+ &UnstartedObjectHashCode },
+ { "java.lang.String java.lang.reflect.ArtMethod.getMethodName(java.lang.reflect.ArtMethod)",
+ &UnstartedArtMethodGetMethodName },
+ { "void java.lang.System.arraycopy(java.lang.Object, int, java.lang.Object, int, int)",
+ &UnstartedSystemArraycopy},
+ { "void java.lang.System.arraycopy(char[], int, char[], int, int)",
+ &UnstartedSystemArraycopy },
+ { "void java.lang.System.arraycopy(int[], int, int[], int, int)",
+ &UnstartedSystemArraycopy },
+ { "long java.lang.Double.doubleToRawLongBits(double)",
+ &UnstartedDoubleDoubleToRawLongBits },
+ { "double java.lang.Math.ceil(double)",
+ &UnstartedMathCeil },
+ { "java.lang.Object java.lang.ThreadLocal.get()",
+ &UnstartedThreadLocalGet },
+ { "com.android.dex.Dex java.lang.DexCache.getDexNative()",
+ &UnstartedDexCacheGetDexNative },
+ { "byte libcore.io.Memory.peekByte(long)",
+ &UnstartedMemoryPeekEntry },
+ { "short libcore.io.Memory.peekShortNative(long)",
+ &UnstartedMemoryPeekEntry },
+ { "int libcore.io.Memory.peekIntNative(long)",
+ &UnstartedMemoryPeekEntry },
+ { "long libcore.io.Memory.peekLongNative(long)",
+ &UnstartedMemoryPeekEntry },
+ { "void libcore.io.Memory.peekByteArray(long, byte[], int, int)",
+ &UnstartedMemoryPeekArrayEntry },
+ };
+
+ for (auto& def : defs) {
+ invoke_handlers_.insert(std::make_pair(def.name, def.function));
+ }
+}
+
+static void UnstartedRuntimeInitializeJNIHandlers() {
+ struct JNIHandlerDef {
+ std::string name;
+ JNIHandler function;
+ };
+
+ JNIHandlerDef defs[] {
+ { "java.lang.Object dalvik.system.VMRuntime.newUnpaddedArray(java.lang.Class, int)",
+ &UnstartedJNIVMRuntimeNewUnpaddedArray },
+ { "java.lang.ClassLoader dalvik.system.VMStack.getCallingClassLoader()",
+ &UnstartedJNIVMStackGetCallingClassLoader },
+ { "java.lang.Class dalvik.system.VMStack.getStackClass2()",
+ &UnstartedJNIVMStackGetStackClass2 },
+ { "double java.lang.Math.log(double)",
+ &UnstartedJNIMathLog },
+ { "java.lang.String java.lang.Class.getNameNative()",
+ &UnstartedJNIClassGetNameNative },
+ { "int java.lang.Float.floatToRawIntBits(float)",
+ &UnstartedJNIFloatFloatToRawIntBits },
+ { "float java.lang.Float.intBitsToFloat(int)",
+ &UnstartedJNIFloatIntBitsToFloat },
+ { "double java.lang.Math.exp(double)",
+ &UnstartedJNIMathExp },
+ { "java.lang.Object java.lang.Object.internalClone()",
+ &UnstartedJNIObjectInternalClone },
+ { "void java.lang.Object.notifyAll()",
+ &UnstartedJNIObjectNotifyAll},
+ { "int java.lang.String.compareTo(java.lang.String)",
+ &UnstartedJNIStringCompareTo },
+ { "java.lang.String java.lang.String.intern()",
+ &UnstartedJNIStringIntern },
+ { "int java.lang.String.fastIndexOf(int, int)",
+ &UnstartedJNIStringFastIndexOf },
+ { "java.lang.Object java.lang.reflect.Array.createMultiArray(java.lang.Class, int[])",
+ &UnstartedJNIArrayCreateMultiArray },
+ { "java.lang.Object java.lang.Throwable.nativeFillInStackTrace()",
+ &UnstartedJNIThrowableNativeFillInStackTrace },
+ { "int java.lang.System.identityHashCode(java.lang.Object)",
+ &UnstartedJNISystemIdentityHashCode },
+ { "boolean java.nio.ByteOrder.isLittleEndian()",
+ &UnstartedJNIByteOrderIsLittleEndian },
+ { "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)",
+ &UnstartedJNIUnsafeCompareAndSwapInt },
+ { "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)",
+ &UnstartedJNIUnsafePutObject },
+ { "int sun.misc.Unsafe.getArrayBaseOffsetForComponentType(java.lang.Class)",
+ &UnstartedJNIUnsafeGetArrayBaseOffsetForComponentType },
+ { "int sun.misc.Unsafe.getArrayIndexScaleForComponentType(java.lang.Class)",
+ &UnstartedJNIUnsafeGetArrayIndexScaleForComponentType },
+ };
+
+ for (auto& def : defs) {
+ jni_handlers_.insert(std::make_pair(def.name, def.function));
+ }
+}
+
+void UnstartedRuntimeInitialize() {
+ CHECK(!tables_initialized_);
+
+ UnstartedRuntimeInitializeInvokeHandlers();
+ UnstartedRuntimeInitializeJNIHandlers();
+
+ tables_initialized_ = true;
+}
+
+void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) {
+ // In a runtime that's not started we intercept certain methods to avoid complicated dependency
+ // problems in core libraries.
+ CHECK(tables_initialized_);
+
+ std::string name(PrettyMethod(shadow_frame->GetMethod()));
+ const auto& iter = invoke_handlers_.find(name);
+ if (iter != invoke_handlers_.end()) {
+ (*iter->second)(self, shadow_frame, result, arg_offset);
+ } else {
+ // Not special, continue with regular interpreter execution.
+ artInterpreterToInterpreterBridge(self, code_item, shadow_frame, result);
+ }
+}
+
+// Hand select a number of methods to be run in a not yet started runtime without using JNI.
+void UnstartedRuntimeJni(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
+ uint32_t* args, JValue* result) {
+ std::string name(PrettyMethod(method));
+ const auto& iter = jni_handlers_.find(name);
+ if (iter != jni_handlers_.end()) {
+ (*iter->second)(self, method, receiver, args, result);
+ } else if (Runtime::Current()->IsActiveTransaction()) {
+ AbortTransaction(self, "Attempt to invoke native method in non-started runtime: %s",
+ name.c_str());
+ } else {
+ LOG(FATAL) << "Calling native method " << PrettyMethod(method) << " in an unstarted "
+ "non-transactional runtime";
+ }
+}
+
+} // namespace interpreter
+} // namespace art
diff --git a/runtime/interpreter/unstarted_runtime.h b/runtime/interpreter/unstarted_runtime.h
new file mode 100644
index 0000000000..2d7d38011c
--- /dev/null
+++ b/runtime/interpreter/unstarted_runtime.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_H_
+#define ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_H_
+
+#include "interpreter.h"
+
+#include "dex_file.h"
+#include "jvalue.h"
+
+namespace art {
+
+class Thread;
+class ShadowFrame;
+
+namespace mirror {
+
+class ArtMethod;
+class Object;
+
+} // namespace mirror
+
+namespace interpreter {
+
+void UnstartedRuntimeInitialize();
+
+void UnstartedRuntimeInvoke(Thread* self, const DexFile::CodeItem* code_item,
+ ShadowFrame* shadow_frame,
+ JValue* result, size_t arg_offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+void UnstartedRuntimeJni(Thread* self, mirror::ArtMethod* method, mirror::Object* receiver,
+ uint32_t* args, JValue* result)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+} // namespace interpreter
+} // namespace art
+
+#endif // ART_RUNTIME_INTERPRETER_UNSTARTED_RUNTIME_H_
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
index 08332d3499..e68616fc7d 100644
--- a/runtime/java_vm_ext.cc
+++ b/runtime/java_vm_ext.cc
@@ -741,8 +741,7 @@ void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) {
}
// Throwing can cause libraries_lock to be reacquired.
if (native_method == nullptr) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(throw_location, "Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
+ self->ThrowNewException("Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
}
return native_method;
}
diff --git a/runtime/jdwp/jdwp_event.cc b/runtime/jdwp/jdwp_event.cc
index fc08d23274..4bf7142692 100644
--- a/runtime/jdwp/jdwp_event.cc
+++ b/runtime/jdwp/jdwp_event.cc
@@ -596,17 +596,15 @@ void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId
return;
}
- DebugInvokeReq* pReq = Dbg::GetInvokeReq();
while (true) {
- pReq->ready = true;
Dbg::SuspendSelf();
- pReq->ready = false;
/*
* The JDWP thread has told us (and possibly all other threads) to
* resume. See if it has left anything in our DebugInvokeReq mailbox.
*/
- if (!pReq->invoke_needed) {
+ DebugInvokeReq* const pReq = Dbg::GetInvokeReq();
+ if (pReq == nullptr) {
/*LOGD("SuspendByPolicy: no invoke needed");*/
break;
}
@@ -614,10 +612,7 @@ void JdwpState::SuspendByPolicy(JdwpSuspendPolicy suspend_policy, JDWP::ObjectId
/* grab this before posting/suspending again */
AcquireJdwpTokenForEvent(thread_self_id);
- /* leave pReq->invoke_needed_ raised so we can check reentrancy */
Dbg::ExecuteMethod(pReq);
-
- pReq->error = ERR_NONE;
}
}
@@ -650,7 +645,7 @@ void JdwpState::SendRequestAndPossiblySuspend(ExpandBuf* pReq, JdwpSuspendPolicy
*/
bool JdwpState::InvokeInProgress() {
DebugInvokeReq* pReq = Dbg::GetInvokeReq();
- return pReq->invoke_needed;
+ return pReq != nullptr;
}
void JdwpState::AcquireJdwpTokenForCommand() {
diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc
index 0ce4de7f61..c7083dcedc 100644
--- a/runtime/jdwp/jdwp_handler.cc
+++ b/runtime/jdwp/jdwp_handler.cc
@@ -91,9 +91,9 @@ static JdwpError WriteTaggedObjectList(ExpandBuf* reply, const std::vector<Objec
* If "is_constructor" is set, this returns "object_id" rather than the
* expected-to-be-void return value of the called function.
*/
-static JdwpError FinishInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
- ObjectId thread_id, ObjectId object_id,
- RefTypeId class_id, MethodId method_id, bool is_constructor)
+static JdwpError RequestInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
+ ObjectId thread_id, ObjectId object_id,
+ RefTypeId class_id, MethodId method_id, bool is_constructor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(!is_constructor || object_id != 0);
@@ -131,37 +131,35 @@ static JdwpError FinishInvoke(JdwpState*, Request* request, ExpandBuf* pReply,
return err;
}
- if (err == ERR_NONE) {
- if (is_constructor) {
- // If we invoked a constructor (which actually returns void), return the receiver,
- // unless we threw, in which case we return NULL.
- resultTag = JT_OBJECT;
- resultValue = (exceptObjId == 0) ? object_id : 0;
- }
+ if (is_constructor) {
+ // If we invoked a constructor (which actually returns void), return the receiver,
+ // unless we threw, in which case we return NULL.
+ resultTag = JT_OBJECT;
+ resultValue = (exceptObjId == 0) ? object_id : 0;
+ }
- size_t width = Dbg::GetTagWidth(resultTag);
- expandBufAdd1(pReply, resultTag);
- if (width != 0) {
- WriteValue(pReply, width, resultValue);
- }
- expandBufAdd1(pReply, JT_OBJECT);
- expandBufAddObjectId(pReply, exceptObjId);
-
- VLOG(jdwp) << " --> returned " << resultTag
- << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", resultValue, exceptObjId);
-
- /* show detailed debug output */
- if (resultTag == JT_STRING && exceptObjId == 0) {
- if (resultValue != 0) {
- if (VLOG_IS_ON(jdwp)) {
- std::string result_string;
- JDWP::JdwpError error = Dbg::StringToUtf8(resultValue, &result_string);
- CHECK_EQ(error, JDWP::ERR_NONE);
- VLOG(jdwp) << " string '" << result_string << "'";
- }
- } else {
- VLOG(jdwp) << " string (null)";
+ size_t width = Dbg::GetTagWidth(resultTag);
+ expandBufAdd1(pReply, resultTag);
+ if (width != 0) {
+ WriteValue(pReply, width, resultValue);
+ }
+ expandBufAdd1(pReply, JT_OBJECT);
+ expandBufAddObjectId(pReply, exceptObjId);
+
+ VLOG(jdwp) << " --> returned " << resultTag
+ << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", resultValue, exceptObjId);
+
+ /* show detailed debug output */
+ if (resultTag == JT_STRING && exceptObjId == 0) {
+ if (resultValue != 0) {
+ if (VLOG_IS_ON(jdwp)) {
+ std::string result_string;
+ JDWP::JdwpError error = Dbg::StringToUtf8(resultValue, &result_string);
+ CHECK_EQ(error, JDWP::ERR_NONE);
+ VLOG(jdwp) << " string '" << result_string << "'";
}
+ } else {
+ VLOG(jdwp) << " string (null)";
}
}
@@ -693,7 +691,7 @@ static JdwpError CT_InvokeMethod(JdwpState* state, Request* request, ExpandBuf*
ObjectId thread_id = request->ReadThreadId();
MethodId method_id = request->ReadMethodId();
- return FinishInvoke(state, request, pReply, thread_id, 0, class_id, method_id, false);
+ return RequestInvoke(state, request, pReply, thread_id, 0, class_id, method_id, false);
}
/*
@@ -717,7 +715,7 @@ static JdwpError CT_NewInstance(JdwpState* state, Request* request, ExpandBuf* p
if (object_id == 0) {
return ERR_OUT_OF_MEMORY;
}
- return FinishInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, true);
+ return RequestInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, true);
}
/*
@@ -879,7 +877,7 @@ static JdwpError OR_InvokeMethod(JdwpState* state, Request* request, ExpandBuf*
RefTypeId class_id = request->ReadRefTypeId();
MethodId method_id = request->ReadMethodId();
- return FinishInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, false);
+ return RequestInvoke(state, request, pReply, thread_id, object_id, class_id, method_id, false);
}
static JdwpError OR_DisableCollection(JdwpState*, Request* request, ExpandBuf*)
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 539c181952..9b894593bf 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -40,16 +40,32 @@ JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& opt
options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheCapacity);
jit_options->compile_threshold_ =
options.GetOrDefault(RuntimeArgumentMap::JITCompileThreshold);
+ jit_options->dump_info_on_shutdown_ =
+ options.Exists(RuntimeArgumentMap::DumpJITInfoOnShutdown);
return jit_options;
}
+void Jit::DumpInfo(std::ostream& os) {
+ os << "Code cache size=" << PrettySize(code_cache_->CodeCacheSize())
+ << " data cache size=" << PrettySize(code_cache_->DataCacheSize())
+ << " num methods=" << code_cache_->NumMethods()
+ << "\n";
+ cumulative_timings_.Dump(os);
+}
+
+void Jit::AddTimingLogger(const TimingLogger& logger) {
+ cumulative_timings_.AddLogger(logger);
+}
+
Jit::Jit()
: jit_library_handle_(nullptr), jit_compiler_handle_(nullptr), jit_load_(nullptr),
- jit_compile_method_(nullptr) {
+ jit_compile_method_(nullptr), dump_info_on_shutdown_(false),
+ cumulative_timings_("JIT timings") {
}
Jit* Jit::Create(JitOptions* options, std::string* error_msg) {
std::unique_ptr<Jit> jit(new Jit);
+ jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown();
if (!jit->LoadCompiler(error_msg)) {
return nullptr;
}
@@ -133,6 +149,9 @@ void Jit::DeleteThreadPool() {
}
Jit::~Jit() {
+ if (dump_info_on_shutdown_) {
+ DumpInfo(LOG(INFO));
+ }
DeleteThreadPool();
if (jit_compiler_handle_ != nullptr) {
jit_unload_(jit_compiler_handle_);
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index b80015feca..6b206d15b8 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -24,6 +24,7 @@
#include "atomic.h"
#include "base/macros.h"
#include "base/mutex.h"
+#include "base/timing_logger.h"
#include "gc_root.h"
#include "jni.h"
#include "object_callbacks.h"
@@ -61,6 +62,11 @@ class Jit {
return code_cache_.get();
}
void DeleteThreadPool();
+ // Dump interesting info: #methods compiled, code vs data size, compile / verify cumulative
+ // loggers.
+ void DumpInfo(std::ostream& os);
+ // Add a timing logger to cumulative_timings_.
+ void AddTimingLogger(const TimingLogger& logger);
private:
Jit();
@@ -73,6 +79,10 @@ class Jit {
void (*jit_unload_)(void*);
bool (*jit_compile_method_)(void*, mirror::ArtMethod*, Thread*);
+ // Performance monitoring.
+ bool dump_info_on_shutdown_;
+ CumulativeLogger cumulative_timings_;
+
std::unique_ptr<jit::JitInstrumentationCache> instrumentation_cache_;
std::unique_ptr<jit::JitCodeCache> code_cache_;
CompilerCallbacks* compiler_callbacks_; // Owned by the jit compiler.
@@ -87,12 +97,16 @@ class JitOptions {
size_t GetCodeCacheCapacity() const {
return code_cache_capacity_;
}
+ bool DumpJitInfoOnShutdown() const {
+ return dump_info_on_shutdown_;
+ }
private:
size_t code_cache_capacity_;
size_t compile_threshold_;
+ bool dump_info_on_shutdown_;
- JitOptions() : code_cache_capacity_(0), compile_threshold_(0) {
+ JitOptions() : code_cache_capacity_(0), compile_threshold_(0), dump_info_on_shutdown_(false) {
}
};
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 4ae4d570fc..4d367e01eb 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -77,6 +77,7 @@ uint8_t* JitCodeCache::ReserveCode(Thread* self, size_t size) {
if (size > CodeCacheRemain()) {
return nullptr;
}
+ ++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
code_cache_ptr_ += size;
return code_cache_ptr_ - size;
}
diff --git a/runtime/jit/jit_instrumentation.h b/runtime/jit/jit_instrumentation.h
index 9576f4b341..425d2d3d2d 100644
--- a/runtime/jit/jit_instrumentation.h
+++ b/runtime/jit/jit_instrumentation.h
@@ -39,7 +39,6 @@ namespace mirror {
} // namespace mirror
union JValue;
class Thread;
-class ThrowLocation;
namespace jit {
@@ -83,8 +82,7 @@ class JitInstrumentationListener : public instrumentation::InstrumentationListen
mirror::ArtMethod* /*method*/, uint32_t /*dex_pc*/,
mirror::ArtField* /*field*/, const JValue& /*field_value*/)
OVERRIDE { }
- virtual void ExceptionCaught(Thread* /*thread*/, const ThrowLocation& /*throw_location*/,
- mirror::ArtMethod* /*catch_method*/, uint32_t /*catch_dex_pc*/,
+ virtual void ExceptionCaught(Thread* /*thread*/,
mirror::Throwable* /*exception_object*/) OVERRIDE { }
virtual void DexPcMoved(Thread* /*self*/, mirror::Object* /*this_object*/,
diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc
index 561302e33a..6063e1e8e8 100644
--- a/runtime/jni_internal.cc
+++ b/runtime/jni_internal.cc
@@ -45,7 +45,6 @@
#include "mirror/object_array-inl.h"
#include "mirror/string-inl.h"
#include "mirror/throwable.h"
-#include "nativebridge/native_bridge.h"
#include "parsed_options.h"
#include "reflection.h"
#include "runtime.h"
@@ -89,9 +88,8 @@ static std::string NormalizeJniClassDescriptor(const char* name) {
static void ThrowNoSuchMethodError(ScopedObjectAccess& soa, mirror::Class* c,
const char* name, const char* sig, const char* kind)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
std::string temp;
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchMethodError;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"no %s method \"%s.%s%s\"",
kind, c->GetDescriptor(&temp), name, sig);
}
@@ -102,8 +100,7 @@ static void ReportInvalidJNINativeMethod(const ScopedObjectAccess& soa, mirror::
LOG(return_errors ? ERROR : FATAL) << "Failed to register native method in "
<< PrettyDescriptor(c) << " in " << c->GetDexCache()->GetLocation()->ToModifiedUtf8()
<< ": " << kind << " is null at index " << idx;
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchMethodError;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchMethodError;",
"%s is null at index %d", kind, idx);
}
@@ -196,16 +193,15 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
if (field_type == nullptr) {
// Failed to find type from the signature of the field.
DCHECK(soa.Self()->IsExceptionPending());
- ThrowLocation throw_location;
StackHandleScope<1> hs2(soa.Self());
- Handle<mirror::Throwable> cause(hs2.NewHandle(soa.Self()->GetException(&throw_location)));
+ Handle<mirror::Throwable> cause(hs2.NewHandle(soa.Self()->GetException()));
soa.Self()->ClearException();
std::string temp;
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;",
"no type \"%s\" found and so no field \"%s\" "
"could be found in class \"%s\" or its superclasses", sig, name,
c->GetDescriptor(&temp));
- soa.Self()->GetException(nullptr)->SetCause(cause.Get());
+ soa.Self()->GetException()->SetCause(cause.Get());
return nullptr;
}
std::string temp;
@@ -216,8 +212,7 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
field = c->FindInstanceField(name, field_type->GetDescriptor(&temp));
}
if (field == nullptr) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/NoSuchFieldError;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/NoSuchFieldError;",
"no \"%s\" field \"%s\" in class \"%s\" or its superclasses",
sig, name, c->GetDescriptor(&temp));
return nullptr;
@@ -229,8 +224,7 @@ static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize sta
jsize length, const char* identifier)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string type(PrettyTypeOf(array));
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"%s offset=%d length=%d %s.length=%d",
type.c_str(), start, length, identifier, array->GetLength());
}
@@ -238,8 +232,7 @@ static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize sta
static void ThrowSIOOBE(ScopedObjectAccess& soa, jsize start, jsize length,
jsize array_length)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/StringIndexOutOfBoundsException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/StringIndexOutOfBoundsException;",
"offset=%d length=%d string.length()=%d", start, length,
array_length);
}
@@ -282,8 +275,7 @@ int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobj
return JNI_ERR;
}
ScopedObjectAccess soa(env);
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->SetException(throw_location, soa.Decode<mirror::Throwable*>(exception.get()));
+ soa.Self()->SetException(soa.Decode<mirror::Throwable*>(exception.get()));
return JNI_OK;
}
@@ -433,8 +425,7 @@ class JNI {
if (exception == nullptr) {
return JNI_ERR;
}
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->SetException(throw_location, exception);
+ soa.Self()->SetException(exception);
return JNI_OK;
}
@@ -456,25 +447,14 @@ class JNI {
ScopedObjectAccess soa(env);
// If we have no exception to describe, pass through.
- if (!soa.Self()->GetException(nullptr)) {
+ if (!soa.Self()->GetException()) {
return;
}
- StackHandleScope<3> hs(soa.Self());
- // TODO: Use nullptr instead of null handles?
- auto old_throw_this_object(hs.NewHandle<mirror::Object>(nullptr));
- auto old_throw_method(hs.NewHandle<mirror::ArtMethod>(nullptr));
- auto old_exception(hs.NewHandle<mirror::Throwable>(nullptr));
- uint32_t old_throw_dex_pc;
- {
- ThrowLocation old_throw_location;
- mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
- old_throw_this_object.Assign(old_throw_location.GetThis());
- old_throw_method.Assign(old_throw_location.GetMethod());
- old_exception.Assign(old_exception_obj);
- old_throw_dex_pc = old_throw_location.GetDexPc();
- soa.Self()->ClearException();
- }
+ StackHandleScope<1> hs(soa.Self());
+ Handle<mirror::Throwable> old_exception(
+ hs.NewHandle<mirror::Throwable>(soa.Self()->GetException()));
+ soa.Self()->ClearException();
ScopedLocalRef<jthrowable> exception(env,
soa.AddLocalReference<jthrowable>(old_exception.Get()));
ScopedLocalRef<jclass> exception_class(env, env->GetObjectClass(exception.get()));
@@ -485,20 +465,17 @@ class JNI {
} else {
env->CallVoidMethod(exception.get(), mid);
if (soa.Self()->IsExceptionPending()) {
- LOG(WARNING) << "JNI WARNING: " << PrettyTypeOf(soa.Self()->GetException(nullptr))
+ LOG(WARNING) << "JNI WARNING: " << PrettyTypeOf(soa.Self()->GetException())
<< " thrown while calling printStackTrace";
soa.Self()->ClearException();
}
}
- ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
- old_throw_dex_pc);
-
- soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
+ soa.Self()->SetException(old_exception.Get());
}
static jthrowable ExceptionOccurred(JNIEnv* env) {
ScopedObjectAccess soa(env);
- mirror::Object* exception = soa.Self()->GetException(nullptr);
+ mirror::Object* exception = soa.Self()->GetException();
return soa.AddLocalReference<jthrowable>(exception);
}
diff --git a/runtime/memory_region.h b/runtime/memory_region.h
index b3820be26c..939a1a9212 100644
--- a/runtime/memory_region.h
+++ b/runtime/memory_region.h
@@ -23,6 +23,7 @@
#include "base/macros.h"
#include "base/value_object.h"
#include "globals.h"
+#include "utils.h"
namespace art {
@@ -45,14 +46,64 @@ class MemoryRegion FINAL : public ValueObject {
uint8_t* start() const { return reinterpret_cast<uint8_t*>(pointer_); }
uint8_t* end() const { return start() + size_; }
+ // Load value of type `T` at `offset`. The memory address corresponding
+ // to `offset` should be word-aligned.
template<typename T> T Load(uintptr_t offset) const {
+ // TODO: DCHECK that the address is word-aligned.
return *ComputeInternalPointer<T>(offset);
}
+ // Store `value` (of type `T`) at `offset`. The memory address
+ // corresponding to `offset` should be word-aligned.
template<typename T> void Store(uintptr_t offset, T value) const {
+ // TODO: DCHECK that the address is word-aligned.
*ComputeInternalPointer<T>(offset) = value;
}
+ // TODO: Local hack to prevent name clashes between two conflicting
+ // implementations of bit_cast:
+ // - art::bit_cast<Destination, Source> runtime/base/casts.h, and
+ // - art::bit_cast<Source, Destination> from runtime/utils.h.
+ // Remove this when these routines have been merged.
+ template<typename Source, typename Destination>
+ static Destination local_bit_cast(Source in) {
+ static_assert(sizeof(Source) <= sizeof(Destination),
+ "Size of Source not <= size of Destination");
+ union {
+ Source u;
+ Destination v;
+ } tmp;
+ tmp.u = in;
+ return tmp.v;
+ }
+
+ // Load value of type `T` at `offset`. The memory address corresponding
+ // to `offset` does not need to be word-aligned.
+ template<typename T> T LoadUnaligned(uintptr_t offset) const {
+ // Equivalent unsigned integer type corresponding to T.
+ typedef typename UnsignedIntegerType<sizeof(T)>::type U;
+ U equivalent_unsigned_integer_value = 0;
+ // Read the value byte by byte in a little-endian fashion.
+ for (size_t i = 0; i < sizeof(U); ++i) {
+ equivalent_unsigned_integer_value +=
+ *ComputeInternalPointer<uint8_t>(offset + i) << (i * kBitsPerByte);
+ }
+ return local_bit_cast<U, T>(equivalent_unsigned_integer_value);
+ }
+
+ // Store `value` (of type `T`) at `offset`. The memory address
+ // corresponding to `offset` does not need to be word-aligned.
+ template<typename T> void StoreUnaligned(uintptr_t offset, T value) const {
+ // Equivalent unsigned integer type corresponding to T.
+ typedef typename UnsignedIntegerType<sizeof(T)>::type U;
+ U equivalent_unsigned_integer_value = local_bit_cast<T, U>(value);
+ // Write the value byte by byte in a little-endian fashion.
+ for (size_t i = 0; i < sizeof(U); ++i) {
+ *ComputeInternalPointer<uint8_t>(offset + i) =
+ (equivalent_unsigned_integer_value >> (i * kBitsPerByte)) & 0xFF;
+ }
+ }
+
template<typename T> T* PointerTo(uintptr_t offset) const {
return ComputeInternalPointer<T>(offset);
}
diff --git a/runtime/memory_region_test.cc b/runtime/memory_region_test.cc
new file mode 100644
index 0000000000..72e03a485a
--- /dev/null
+++ b/runtime/memory_region_test.cc
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "memory_region.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+TEST(MemoryRegion, LoadUnaligned) {
+ const size_t n = 8;
+ uint8_t data[n] = { 0, 1, 2, 3, 4, 5, 6, 7 };
+ MemoryRegion region(&data, n);
+
+ ASSERT_EQ(0, region.LoadUnaligned<char>(0));
+ ASSERT_EQ(1u
+ + (2u << kBitsPerByte)
+ + (3u << 2 * kBitsPerByte)
+ + (4u << 3 * kBitsPerByte),
+ region.LoadUnaligned<uint32_t>(1));
+ ASSERT_EQ(5 + (6 << kBitsPerByte), region.LoadUnaligned<int16_t>(5));
+ ASSERT_EQ(7u, region.LoadUnaligned<unsigned char>(7));
+}
+
+TEST(MemoryRegion, StoreUnaligned) {
+ const size_t n = 8;
+ uint8_t data[n] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ MemoryRegion region(&data, n);
+
+ region.StoreUnaligned<unsigned char>(0u, 7);
+ region.StoreUnaligned<int16_t>(1, 6 + (5 << kBitsPerByte));
+ region.StoreUnaligned<uint32_t>(3,
+ 4u
+ + (3u << kBitsPerByte)
+ + (2u << 2 * kBitsPerByte)
+ + (1u << 3 * kBitsPerByte));
+ region.StoreUnaligned<char>(7, 0);
+
+ uint8_t expected[n] = { 7, 6, 5, 4, 3, 2, 1, 0 };
+ for (size_t i = 0; i < n; ++i) {
+ ASSERT_EQ(expected[i], data[i]);
+ }
+}
+
+} // namespace art
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 85fc5f3c96..bc58709805 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -271,9 +271,8 @@ uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> excep
const DexFile::CodeItem* code_item = h_this->GetCodeItem();
// Set aside the exception while we resolve its type.
Thread* self = Thread::Current();
- ThrowLocation throw_location;
StackHandleScope<1> hs(self);
- Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException(&throw_location)));
+ Handle<mirror::Throwable> exception(hs.NewHandle(self->GetException()));
self->ClearException();
// Default to handler not found.
uint32_t found_dex_pc = DexFile::kDexNoIndex;
@@ -309,7 +308,7 @@ uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> excep
}
// Put the exception back.
if (exception.Get() != nullptr) {
- self->SetException(throw_location, exception.Get());
+ self->SetException(exception.Get());
}
return found_dex_pc;
}
@@ -434,7 +433,7 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
#else
(*art_quick_invoke_stub)(this, args, args_size, self, result, shorty);
#endif
- if (UNLIKELY(self->GetException(nullptr) == Thread::GetDeoptimizationException())) {
+ if (UNLIKELY(self->GetException() == Thread::GetDeoptimizationException())) {
// Unusual case where we were running generated code and an
// exception was thrown to force the activations to be removed from the
// stack. Continue execution in the interpreter.
diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc
index 96b15dd676..6f4ef60e85 100644
--- a/runtime/mirror/class.cc
+++ b/runtime/mirror/class.cc
@@ -77,13 +77,9 @@ void Class::SetStatus(Status new_status, Thread* self) {
<< "Attempt to set as erroneous an already erroneous class " << PrettyClass(this);
// Stash current exception.
- StackHandleScope<3> hs(self);
- ThrowLocation old_throw_location;
- Handle<mirror::Throwable> old_exception(hs.NewHandle(self->GetException(&old_throw_location)));
+ StackHandleScope<1> hs(self);
+ Handle<mirror::Throwable> old_exception(hs.NewHandle(self->GetException()));
CHECK(old_exception.Get() != nullptr);
- Handle<mirror::Object> old_throw_this_object(hs.NewHandle(old_throw_location.GetThis()));
- Handle<mirror::ArtMethod> old_throw_method(hs.NewHandle(old_throw_location.GetMethod()));
- uint32_t old_throw_dex_pc = old_throw_location.GetDexPc();
Class* eiie_class;
// Do't attempt to use FindClass if we have an OOM error since this can try to do more
// allocations and may cause infinite loops.
@@ -109,9 +105,7 @@ void Class::SetStatus(Status new_status, Thread* self) {
}
// Restore exception.
- ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
- old_throw_dex_pc);
- self->SetException(gc_safe_throw_location, old_exception.Get());
+ self->SetException(old_exception.Get());
}
static_assert(sizeof(Status) == sizeof(uint32_t), "Size of status not equal to uint32");
if (Runtime::Current()->IsActiveTransaction()) {
diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h
index 3c947ab37b..c548c03e63 100644
--- a/runtime/mirror/dex_cache.h
+++ b/runtime/mirror/dex_cache.h
@@ -59,6 +59,10 @@ class MANAGED DexCache FINAL : public Object {
return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_));
}
+ static MemberOffset DexOffset() {
+ return OFFSET_OF_OBJECT_MEMBER(DexCache, dex_);
+ }
+
static MemberOffset StringsOffset() {
return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_);
}
diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h
index 96d426b59e..80d51359a6 100644
--- a/runtime/mirror/object_array-inl.h
+++ b/runtime/mirror/object_array-inl.h
@@ -233,9 +233,8 @@ inline void ObjectArray<T>::AssignableCheckingMemcpy(int32_t dst_pos, ObjectArra
std::string actualSrcType(PrettyTypeOf(o));
std::string dstType(PrettyTypeOf(this));
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
if (throw_exception) {
- self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;",
+ self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
"source[%d] of type %s cannot be stored in destination array of type %s",
src_pos + i, actualSrcType.c_str(), dstType.c_str());
} else {
diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc
index 9b345a65d2..21972a1ccf 100644
--- a/runtime/mirror/object_test.cc
+++ b/runtime/mirror/object_test.cc
@@ -124,12 +124,12 @@ TEST_F(ObjectTest, AllocObjectArray) {
EXPECT_TRUE(oa->Get(-1) == NULL);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
EXPECT_TRUE(oa->Get(2) == NULL);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
ASSERT_TRUE(oa->GetClass() != NULL);
@@ -213,12 +213,12 @@ void TestPrimitiveArray(ClassLinker* cl) {
EXPECT_EQ(0, a->Get(-1));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
EXPECT_EQ(0, a->Get(2));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
}
@@ -262,12 +262,12 @@ TEST_F(ObjectTest, PrimitiveArray_Double_Alloc) {
EXPECT_DOUBLE_EQ(0, a->Get(-1));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
EXPECT_DOUBLE_EQ(0, a->Get(2));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
}
@@ -292,12 +292,12 @@ TEST_F(ObjectTest, PrimitiveArray_Float_Alloc) {
EXPECT_FLOAT_EQ(0, a->Get(-1));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
EXPECT_FLOAT_EQ(0, a->Get(2));
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(aioobe, soa.Self()->GetException(NULL)->GetClass());
+ EXPECT_EQ(aioobe, soa.Self()->GetException()->GetClass());
soa.Self()->ClearException();
}
@@ -335,7 +335,7 @@ TEST_F(ObjectTest, CreateMultiArray) {
dims->Set<false>(0, -1);
multi = Array::CreateMultiArray(soa.Self(), c, dims);
EXPECT_TRUE(soa.Self()->IsExceptionPending());
- EXPECT_EQ(PrettyDescriptor(soa.Self()->GetException(NULL)->GetClass()),
+ EXPECT_EQ(PrettyDescriptor(soa.Self()->GetException()->GetClass()),
"java.lang.NegativeArraySizeException");
soa.Self()->ClearException();
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 45a971d22d..d41d37e40a 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -300,13 +300,12 @@ static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...)
va_list args;
va_start(args, fmt);
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionV(throw_location, "Ljava/lang/IllegalMonitorStateException;", fmt, args);
+ self->ThrowNewExceptionV("Ljava/lang/IllegalMonitorStateException;", fmt, args);
if (!Runtime::Current()->IsStarted() || VLOG_IS_ON(monitor)) {
std::ostringstream ss;
self->Dump(ss);
LOG(Runtime::Current()->IsStarted() ? INFO : ERROR)
- << self->GetException(NULL)->Dump() << "\n" << ss.str();
+ << self->GetException()->Dump() << "\n" << ss.str();
}
va_end(args);
}
@@ -428,8 +427,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
// Enforce the timeout range.
if (ms < 0 || ns < 0 || ns > 999999) {
monitor_lock_.Unlock(self);
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionF(throw_location, "Ljava/lang/IllegalArgumentException;",
+ self->ThrowNewExceptionF("Ljava/lang/IllegalArgumentException;",
"timeout arguments out of range: ms=%" PRId64 " ns=%d", ms, ns);
return;
}
@@ -540,8 +538,7 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
self->SetInterruptedLocked(false);
}
if (interruptShouldThrow) {
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewException(throw_location, "Ljava/lang/InterruptedException;", NULL);
+ self->ThrowNewException("Ljava/lang/InterruptedException;", NULL);
}
}
}
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index e1fe3eb918..c182a4d9ad 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -16,31 +16,17 @@
#include "dalvik_system_DexFile.h"
-#include <algorithm>
-#include <set>
-#include <fcntl.h>
-#ifdef __linux__
-#include <sys/sendfile.h>
-#else
-#include <sys/socket.h>
-#endif
-#include <sys/stat.h>
-#include <unistd.h>
-
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/stringprintf.h"
#include "class_linker.h"
#include "common_throws.h"
#include "dex_file-inl.h"
-#include "gc/space/image_space.h"
-#include "gc/space/space-inl.h"
-#include "image.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
#include "mirror/string.h"
-#include "oat.h"
+#include "oat_file_assistant.h"
#include "os.h"
#include "profiler.h"
#include "runtime.h"
@@ -51,11 +37,6 @@
#include "well_known_classes.h"
#include "zip_archive.h"
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wshadow"
-#include "ScopedFd.h"
-#pragma GCC diagnostic pop
-
namespace art {
static std::unique_ptr<std::vector<const DexFile*>>
@@ -182,10 +163,9 @@ static jobject DexFile_openDexFileNative(JNIEnv* env, jclass, jstring javaSource
std::vector<std::unique_ptr<const DexFile>> dex_files;
std::vector<std::string> error_msgs;
- bool success = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs,
- &dex_files);
+ dex_files = linker->OpenDexFilesFromOat(sourceName.c_str(), outputName.c_str(), &error_msgs);
- if (success || !dex_files.empty()) {
+ if (!dex_files.empty()) {
jlongArray array = ConvertNativeToJavaArray(env, dex_files);
if (array == nullptr) {
ScopedObjectAccess soa(env);
@@ -197,9 +177,6 @@ static jobject DexFile_openDexFileNative(JNIEnv* env, jclass, jstring javaSource
}
return array;
} else {
- // The vector should be empty after a failed loading attempt.
- DCHECK_EQ(0U, dex_files.size());
-
ScopedObjectAccess soa(env);
CHECK(!error_msgs.empty());
// The most important message is at the end. So set up nesting by going forward, which will
@@ -320,40 +297,6 @@ static jobjectArray DexFile_getClassNameList(JNIEnv* env, jclass, jobject cookie
return result;
}
-static void CopyProfileFile(const char* oldfile, const char* newfile) {
- ScopedFd src(open(oldfile, O_RDONLY));
- if (src.get() == -1) {
- PLOG(ERROR) << "Failed to open profile file " << oldfile
- << ". My uid:gid is " << getuid() << ":" << getgid();
- return;
- }
-
- struct stat stat_src;
- if (fstat(src.get(), &stat_src) == -1) {
- PLOG(ERROR) << "Failed to get stats for profile file " << oldfile
- << ". My uid:gid is " << getuid() << ":" << getgid();
- return;
- }
-
- // Create the copy with rw------- (only accessible by system)
- ScopedFd dst(open(newfile, O_WRONLY|O_CREAT|O_TRUNC, 0600));
- if (dst.get() == -1) {
- PLOG(ERROR) << "Failed to create/write prev profile file " << newfile
- << ". My uid:gid is " << getuid() << ":" << getgid();
- return;
- }
-
-#ifdef __linux__
- if (sendfile(dst.get(), src.get(), nullptr, stat_src.st_size) == -1) {
-#else
- off_t len;
- if (sendfile(dst.get(), src.get(), 0, &len, nullptr, 0) == -1) {
-#endif
- PLOG(ERROR) << "Failed to copy profile file " << oldfile << " to " << newfile
- << ". My uid:gid is " << getuid() << ":" << getgid();
- }
-}
-
// Java: dalvik.system.DexFile.UP_TO_DATE
static const jbyte kUpToDate = 0;
// Java: dalvik.system.DexFile.DEXOPT_NEEDED
@@ -361,102 +304,8 @@ static const jbyte kPatchoatNeeded = 1;
// Java: dalvik.system.DexFile.PATCHOAT_NEEDED
static const jbyte kDexoptNeeded = 2;
-template <const bool kVerboseLogging, const bool kReasonLogging>
-static jbyte IsDexOptNeededForFile(const std::string& oat_filename, const char* filename,
- InstructionSet target_instruction_set,
- bool* oat_is_pic) {
- std::string error_msg;
- std::unique_ptr<const OatFile> oat_file(OatFile::Open(oat_filename, oat_filename, nullptr,
- nullptr,
- false, &error_msg));
- if (oat_file.get() == nullptr) {
- // Note that even though this is kDexoptNeeded, we use
- // kVerboseLogging instead of the usual kReasonLogging since it is
- // the common case on first boot and very spammy.
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded failed to open oat file '" << oat_filename
- << "' for file location '" << filename << "': " << error_msg;
- }
- error_msg.clear();
- return kDexoptNeeded;
- }
-
- // Pass-up the information about if this is PIC.
- // TODO: Refactor this function to be less complicated.
- *oat_is_pic = oat_file->IsPic();
-
- bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
- uint32_t location_checksum = 0;
- const art::OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(filename, nullptr,
- kReasonLogging);
- if (oat_dex_file != nullptr) {
- // If its not possible to read the classes.dex assume up-to-date as we won't be able to
- // compile it anyway.
- if (!DexFile::GetChecksum(filename, &location_checksum, &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded found precompiled stripped file: "
- << filename << " for " << oat_filename << ": " << error_msg;
- }
- if (ClassLinker::VerifyOatChecksums(oat_file.get(), target_instruction_set, &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " is up-to-date for " << filename;
- }
- return kUpToDate;
- } else if (should_relocate_if_possible &&
- ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " needs to be relocated for " << filename;
- }
- return kPatchoatNeeded;
- } else {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " is out of date for " << filename;
- }
- return kDexoptNeeded;
- }
- // If we get here the file is out of date and we should use the system one to relocate.
- } else {
- if (ClassLinker::VerifyOatAndDexFileChecksums(oat_file.get(), filename, location_checksum,
- target_instruction_set, &error_msg)) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " is up-to-date for " << filename;
- }
- return kUpToDate;
- } else if (location_checksum == oat_dex_file->GetDexFileLocationChecksum()
- && should_relocate_if_possible
- && ClassLinker::VerifyOatImageChecksum(oat_file.get(), target_instruction_set)) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " needs to be relocated for " << filename;
- }
- return kPatchoatNeeded;
- } else {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " is out of date for " << filename;
- }
- return kDexoptNeeded;
- }
- }
- } else {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded file " << oat_filename
- << " does not contain " << filename;
- }
- return kDexoptNeeded;
- }
-}
-
static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
const char* pkgname, const char* instruction_set, const jboolean defer) {
- // Spammy logging for kUpToDate
- const bool kVerboseLogging = false;
- // Logging of reason for returning kDexoptNeeded or kPatchoatNeeded.
- const bool kReasonLogging = true;
if ((filename == nullptr) || !OS::FileExists(filename)) {
LOG(ERROR) << "DexFile_isDexOptNeeded file '" << filename << "' does not exist";
@@ -466,117 +315,6 @@ static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
return kUpToDate;
}
- // Always treat elements of the bootclasspath as up-to-date. The
- // fact that code is running at all means that this should be true.
- Runtime* runtime = Runtime::Current();
- ClassLinker* class_linker = runtime->GetClassLinker();
- // TODO: We're assuming that the 64 and 32 bit runtimes have identical
- // class paths. isDexOptNeeded will not necessarily be called on a runtime
- // that has the same instruction set as the file being dexopted.
- const std::vector<const DexFile*>& boot_class_path = class_linker->GetBootClassPath();
- for (size_t i = 0; i < boot_class_path.size(); i++) {
- if (boot_class_path[i]->GetLocation() == filename) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeeded ignoring boot class path file: " << filename;
- }
- return kUpToDate;
- }
- }
-
- bool force_system_only = false;
- bool require_system_version = false;
-
- // Check the profile file. We need to rerun dex2oat if the profile has changed significantly
- // since the last time, or it's new.
- // If the 'defer' argument is true then this will be retried later. In this case we
- // need to make sure that the profile file copy is not made so that we will get the
- // same result second time.
- std::string profile_file;
- std::string prev_profile_file;
- bool should_copy_profile = false;
- if (Runtime::Current()->GetProfilerOptions().IsEnabled() && (pkgname != nullptr)) {
- profile_file = GetDalvikCacheOrDie("profiles", false /* create_if_absent */)
- + std::string("/") + pkgname;
- prev_profile_file = profile_file + std::string("@old");
-
- struct stat profstat, prevstat;
- int e1 = stat(profile_file.c_str(), &profstat);
- int e1_errno = errno;
- int e2 = stat(prev_profile_file.c_str(), &prevstat);
- int e2_errno = errno;
- if (e1 < 0) {
- if (e1_errno != EACCES) {
- // No profile file, need to run dex2oat, unless we find a file in system
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal profile file " << profile_file << " doesn't exist. "
- << "Will check odex to see if we can find a working version.";
- }
- // Force it to only accept system files/files with versions in system.
- require_system_version = true;
- } else {
- LOG(INFO) << "DexFile_isDexOptNeededInternal recieved EACCES trying to stat profile file "
- << profile_file;
- }
- } else if (e2 == 0) {
- // There is a previous profile file. Check if the profile has changed significantly.
- // A change in profile is considered significant if X% (change_thr property) of the top K%
- // (compile_thr property) samples has changed.
- double top_k_threshold = Runtime::Current()->GetProfilerOptions().GetTopKThreshold();
- double change_threshold = Runtime::Current()->GetProfilerOptions().GetTopKChangeThreshold();
- double change_percent = 0.0;
- ProfileFile new_profile, old_profile;
- bool new_ok = new_profile.LoadFile(profile_file);
- bool old_ok = old_profile.LoadFile(prev_profile_file);
- if (!new_ok || !old_ok) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal Ignoring invalid profiles: "
- << (new_ok ? "" : profile_file) << " " << (old_ok ? "" : prev_profile_file);
- }
- } else {
- std::set<std::string> new_top_k, old_top_k;
- new_profile.GetTopKSamples(new_top_k, top_k_threshold);
- old_profile.GetTopKSamples(old_top_k, top_k_threshold);
- if (new_top_k.empty()) {
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal empty profile: " << profile_file;
- }
- // If the new topK is empty we shouldn't optimize so we leave the change_percent at 0.0.
- } else {
- std::set<std::string> diff;
- std::set_difference(new_top_k.begin(), new_top_k.end(), old_top_k.begin(), old_top_k.end(),
- std::inserter(diff, diff.end()));
- // TODO: consider using the usedPercentage instead of the plain diff count.
- change_percent = 100.0 * static_cast<double>(diff.size()) / static_cast<double>(new_top_k.size());
- if (kVerboseLogging) {
- std::set<std::string>::iterator end = diff.end();
- for (std::set<std::string>::iterator it = diff.begin(); it != end; it++) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal new in topK: " << *it;
- }
- }
- }
- }
-
- if (change_percent > change_threshold) {
- if (kReasonLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal size of new profile file " << profile_file <<
- " is significantly different from old profile file " << prev_profile_file << " (top "
- << top_k_threshold << "% samples changed in proportion of " << change_percent << "%)";
- }
- should_copy_profile = !defer;
- // Force us to only accept system files.
- force_system_only = true;
- }
- } else if (e2_errno == ENOENT) {
- // Previous profile does not exist. Make a copy of the current one.
- if (kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal previous profile doesn't exist: " << prev_profile_file;
- }
- should_copy_profile = !defer;
- } else {
- PLOG(INFO) << "Unable to stat previous profile file " << prev_profile_file;
- }
- }
-
const InstructionSet target_instruction_set = GetInstructionSetFromString(instruction_set);
if (target_instruction_set == kNone) {
ScopedLocalRef<jclass> iae(env, env->FindClass("java/lang/IllegalArgumentException"));
@@ -585,75 +323,43 @@ static jbyte IsDexOptNeededInternal(JNIEnv* env, const char* filename,
return 0;
}
- // Get the filename for odex file next to the dex file.
- std::string odex_filename(DexFilenameToOdexFilename(filename, target_instruction_set));
- // Get the filename for the dalvik-cache file
- std::string cache_dir;
- bool have_android_data = false;
- bool dalvik_cache_exists = false;
- bool is_global_cache = false;
- GetDalvikCache(instruction_set, false, &cache_dir, &have_android_data, &dalvik_cache_exists,
- &is_global_cache);
- std::string cache_filename; // was cache_location
- bool have_cache_filename = false;
- if (dalvik_cache_exists) {
- std::string error_msg;
- have_cache_filename = GetDalvikCacheFilename(filename, cache_dir.c_str(), &cache_filename,
- &error_msg);
- if (!have_cache_filename && kVerboseLogging) {
- LOG(INFO) << "DexFile_isDexOptNeededInternal failed to find cache file for dex file " << filename
- << ": " << error_msg;
- }
- }
-
- bool should_relocate_if_possible = Runtime::Current()->ShouldRelocate();
-
- jbyte dalvik_cache_decision = -1;
- // Lets try the cache first (since we want to load from there since thats where the relocated
- // versions will be).
- if (have_cache_filename && !force_system_only) {
- bool oat_is_pic;
- // We can use the dalvik-cache if we find a good file.
- dalvik_cache_decision =
- IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(cache_filename, filename,
- target_instruction_set, &oat_is_pic);
-
- // Apps that are compiled with --compile-pic never need to be patchoat-d
- if (oat_is_pic && dalvik_cache_decision == kPatchoatNeeded) {
- dalvik_cache_decision = kUpToDate;
- }
- // We will only return DexOptNeeded if both the cache and system return it.
- if (dalvik_cache_decision != kDexoptNeeded && !require_system_version) {
- CHECK(!(dalvik_cache_decision == kPatchoatNeeded && !should_relocate_if_possible))
- << "May not return PatchoatNeeded when patching is disabled.";
- return dalvik_cache_decision;
- }
- // We couldn't find one thats easy. We should now try the system.
- }
+ // TODO: Verify the dex location is well formed, and throw an IOException if
+ // not?
- bool oat_is_pic;
- jbyte system_decision =
- IsDexOptNeededForFile<kVerboseLogging, kReasonLogging>(odex_filename, filename,
- target_instruction_set, &oat_is_pic);
- CHECK(!(system_decision == kPatchoatNeeded && !should_relocate_if_possible))
- << "May not return PatchoatNeeded when patching is disabled.";
+ OatFileAssistant oat_file_assistant(filename, target_instruction_set, false, pkgname);
- // Apps that are compiled with --compile-pic never need to be patchoat-d
- if (oat_is_pic && system_decision == kPatchoatNeeded) {
- system_decision = kUpToDate;
+ // Always treat elements of the bootclasspath as up-to-date.
+ if (oat_file_assistant.IsInBootClassPath()) {
+ return kUpToDate;
}
- if (require_system_version && system_decision == kPatchoatNeeded
- && dalvik_cache_decision == kUpToDate) {
- // We have a version from system relocated to the cache. Return it.
- return dalvik_cache_decision;
+ // TODO: Checking the profile should probably be done in the GetStatus()
+ // function. We have it here because GetStatus() should not be copying
+ // profile files. But who should be copying profile files?
+ if (oat_file_assistant.OdexFileIsOutOfDate()) {
+ // Needs recompile if profile has changed significantly.
+ if (Runtime::Current()->GetProfilerOptions().IsEnabled()) {
+ if (oat_file_assistant.IsProfileChangeSignificant()) {
+ if (!defer) {
+ oat_file_assistant.CopyProfileFile();
+ }
+ return kDexoptNeeded;
+ } else if (oat_file_assistant.ProfileExists()
+ && !oat_file_assistant.OldProfileExists()) {
+ if (!defer) {
+ oat_file_assistant.CopyProfileFile();
+ }
+ }
+ }
}
- if (should_copy_profile && system_decision == kDexoptNeeded) {
- CopyProfileFile(profile_file.c_str(), prev_profile_file.c_str());
+ OatFileAssistant::Status status = oat_file_assistant.GetStatus();
+ switch (status) {
+ case OatFileAssistant::kUpToDate: return kUpToDate;
+ case OatFileAssistant::kNeedsRelocation: return kPatchoatNeeded;
+ case OatFileAssistant::kOutOfDate: return kDexoptNeeded;
}
-
- return system_decision;
+ UNREACHABLE();
}
static jbyte DexFile_isDexOptNeededInternal(JNIEnv* env, jclass, jstring javaFilename,
diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc
index 6c82eb22bd..57ca2b1303 100644
--- a/runtime/native/dalvik_system_VMDebug.cc
+++ b/runtime/native/dalvik_system_VMDebug.cc
@@ -93,8 +93,7 @@ static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceF
int fd = dup(originalFd);
if (fd < 0) {
ScopedObjectAccess soa(env);
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/RuntimeException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/RuntimeException;",
"dup(%d) failed: %s", originalFd, strerror(errno));
return;
}
@@ -148,8 +147,7 @@ static jlong VMDebug_lastDebuggerActivity(JNIEnv*, jclass) {
static void ThrowUnsupportedOperationException(JNIEnv* env) {
ScopedObjectAccess soa(env);
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewException(throw_location, "Ljava/lang/UnsupportedOperationException;", NULL);
+ soa.Self()->ThrowNewException("Ljava/lang/UnsupportedOperationException;", NULL);
}
static void VMDebug_startInstructionCounting(JNIEnv* env, jclass) {
@@ -196,7 +194,7 @@ static void VMDebug_dumpHprofData(JNIEnv* env, jclass, jstring javaFilename, job
// Only one of these may be NULL.
if (javaFilename == NULL && javaFd == NULL) {
ScopedObjectAccess soa(env);
- ThrowNullPointerException(NULL, "fileName == null && fd == null");
+ ThrowNullPointerException("fileName == null && fd == null");
return;
}
diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc
index 599d97fb51..6e3f1bc913 100644
--- a/runtime/native/dalvik_system_VMRuntime.cc
+++ b/runtime/native/dalvik_system_VMRuntime.cc
@@ -72,7 +72,7 @@ static jobject VMRuntime_newNonMovableArray(JNIEnv* env, jobject, jclass javaEle
}
mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
if (UNLIKELY(element_class == nullptr)) {
- ThrowNullPointerException(NULL, "element class == null");
+ ThrowNullPointerException("element class == null");
return nullptr;
}
Runtime* runtime = Runtime::Current();
@@ -97,7 +97,7 @@ static jobject VMRuntime_newUnpaddedArray(JNIEnv* env, jobject, jclass javaEleme
}
mirror::Class* element_class = soa.Decode<mirror::Class*>(javaElementClass);
if (UNLIKELY(element_class == nullptr)) {
- ThrowNullPointerException(NULL, "element class == null");
+ ThrowNullPointerException("element class == null");
return nullptr;
}
Runtime* runtime = Runtime::Current();
@@ -120,7 +120,7 @@ static jlong VMRuntime_addressOf(JNIEnv* env, jobject, jobject javaArray) {
ScopedFastNativeObjectAccess soa(env);
mirror::Array* array = soa.Decode<mirror::Array*>(javaArray);
if (!array->IsArrayInstance()) {
- ThrowIllegalArgumentException(NULL, "not an array");
+ ThrowIllegalArgumentException("not an array");
return 0;
}
if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc
index 1ea75f386f..60d14e9c7a 100644
--- a/runtime/native/java_lang_Class.cc
+++ b/runtime/native/java_lang_Class.cc
@@ -55,8 +55,7 @@ static jclass Class_classForName(JNIEnv* env, jclass, jstring javaName, jboolean
// is especially handy for array types, since we want to avoid
// auto-generating bogus array classes.
if (!IsValidBinaryClassName(name.c_str())) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ClassNotFoundException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/ClassNotFoundException;",
"Invalid name: %s", name.c_str());
return nullptr;
}
diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc
index 4ea2546e30..6afe83bbb0 100644
--- a/runtime/native/java_lang_String.cc
+++ b/runtime/native/java_lang_String.cc
@@ -29,7 +29,7 @@ namespace art {
static jint String_compareTo(JNIEnv* env, jobject javaThis, jobject javaRhs) {
ScopedFastNativeObjectAccess soa(env);
if (UNLIKELY(javaRhs == NULL)) {
- ThrowNullPointerException(NULL, "rhs == null");
+ ThrowNullPointerException("rhs == null");
return -1;
} else {
return soa.Decode<mirror::String*>(javaThis)->CompareTo(soa.Decode<mirror::String*>(javaRhs));
diff --git a/runtime/native/java_lang_System.cc b/runtime/native/java_lang_System.cc
index f79be56aeb..736b42b739 100644
--- a/runtime/native/java_lang_System.cc
+++ b/runtime/native/java_lang_System.cc
@@ -39,8 +39,7 @@ static void ThrowArrayStoreException_NotAnArray(const char* identifier, mirror::
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
std::string actualType(PrettyTypeOf(array));
Thread* self = Thread::Current();
- ThrowLocation throw_location = self->GetCurrentLocationForThrow();
- self->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;",
+ self->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
"%s of type %s is not an array", identifier, actualType.c_str());
}
@@ -52,11 +51,11 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos,
// Null pointer checks.
if (UNLIKELY(javaSrc == nullptr)) {
- ThrowNullPointerException(nullptr, "src == null");
+ ThrowNullPointerException("src == null");
return;
}
if (UNLIKELY(javaDst == nullptr)) {
- ThrowNullPointerException(nullptr, "dst == null");
+ ThrowNullPointerException("dst == null");
return;
}
@@ -78,8 +77,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos,
if (UNLIKELY(srcPos < 0) || UNLIKELY(dstPos < 0) || UNLIKELY(count < 0) ||
UNLIKELY(srcPos > srcArray->GetLength() - count) ||
UNLIKELY(dstPos > dstArray->GetLength() - count)) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayIndexOutOfBoundsException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayIndexOutOfBoundsException;",
"src.length=%d srcPos=%d dst.length=%d dstPos=%d length=%d",
srcArray->GetLength(), srcPos, dstArray->GetLength(), dstPos,
count);
@@ -132,8 +130,7 @@ static void System_arraycopy(JNIEnv* env, jclass, jobject javaSrc, jint srcPos,
srcComponentType->IsPrimitive())) {
std::string srcType(PrettyTypeOf(srcArray));
std::string dstType(PrettyTypeOf(dstArray));
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/ArrayStoreException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/ArrayStoreException;",
"Incompatible types: src=%s, dst=%s",
srcType.c_str(), dstType.c_str());
return;
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index e4b8db1371..d3b52ba47d 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -100,7 +100,7 @@ static jboolean Thread_nativeHoldsLock(JNIEnv* env, jobject java_thread, jobject
ScopedObjectAccess soa(env);
mirror::Object* object = soa.Decode<mirror::Object*>(java_object);
if (object == NULL) {
- ThrowNullPointerException(NULL, "object == null");
+ ThrowNullPointerException("object == null");
return JNI_FALSE;
}
MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
diff --git a/runtime/native/java_lang_reflect_Constructor.cc b/runtime/native/java_lang_reflect_Constructor.cc
index 3121a90d09..765f54893a 100644
--- a/runtime/native/java_lang_reflect_Constructor.cc
+++ b/runtime/native/java_lang_reflect_Constructor.cc
@@ -42,8 +42,7 @@ static jobject Constructor_newInstance(JNIEnv* env, jobject javaMethod, jobjectA
StackHandleScope<1> hs(soa.Self());
Handle<mirror::Class> c(hs.NewHandle(m->GetDeclaringClass()));
if (UNLIKELY(c->IsAbstract())) {
- ThrowLocation throw_location = soa.Self()->GetCurrentLocationForThrow();
- soa.Self()->ThrowNewExceptionF(throw_location, "Ljava/lang/InstantiationException;",
+ soa.Self()->ThrowNewExceptionF("Ljava/lang/InstantiationException;",
"Can't instantiate %s %s",
c->IsInterface() ? "interface" : "abstract class",
PrettyDescriptor(c.Get()).c_str());
diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc
index 2cebf02471..9c5bde9c1e 100644
--- a/runtime/native/java_lang_reflect_Field.cc
+++ b/runtime/native/java_lang_reflect_Field.cc
@@ -34,7 +34,7 @@ ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::ArtFiel
mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (kIsSet && field->IsFinal()) {
- ThrowIllegalAccessException(nullptr,
+ ThrowIllegalAccessException(
StringPrintf("Cannot set %s field %s of class %s",
PrettyJavaAccessFlags(field->GetAccessFlags()).c_str(),
PrettyField(field).c_str(),
@@ -45,7 +45,7 @@ ALWAYS_INLINE inline static bool VerifyFieldAccess(Thread* self, mirror::ArtFiel
mirror::Class* calling_class = nullptr;
if (!VerifyAccess(self, obj, field->GetDeclaringClass(), field->GetAccessFlags(),
&calling_class)) {
- ThrowIllegalAccessException(nullptr,
+ ThrowIllegalAccessException(
StringPrintf("Class %s cannot access %s field %s of class %s",
calling_class == nullptr ? "null" : PrettyClass(calling_class).c_str(),
PrettyJavaAccessFlags(field->GetAccessFlags()).c_str(),
@@ -98,8 +98,8 @@ ALWAYS_INLINE inline static bool GetFieldValue(mirror::Object* o, mirror::ArtFie
// Never okay.
break;
}
- ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ ThrowIllegalArgumentException(StringPrintf("Not a primitive field: %s",
+ PrettyField(f).c_str()).c_str());
return false;
}
@@ -190,7 +190,7 @@ ALWAYS_INLINE inline static JValue GetPrimitiveField(JNIEnv* env, jobject javaFi
}
// Widen it if necessary (and possible).
JValue wide_value;
- if (!ConvertPrimitiveValue(nullptr, false, field_type, kPrimitiveType, field_value,
+ if (!ConvertPrimitiveValue(false, field_type, kPrimitiveType, field_value,
&wide_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return JValue();
@@ -270,8 +270,8 @@ static void SetFieldValue(mirror::Object* o, mirror::ArtField* f, Primitive::Typ
FALLTHROUGH_INTENDED;
case Primitive::kPrimVoid:
// Never okay.
- ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ ThrowIllegalArgumentException(StringPrintf("Not a primitive field: %s",
+ PrettyField(f).c_str()).c_str());
return;
}
}
@@ -329,14 +329,14 @@ static void SetPrimitiveField(JNIEnv* env, jobject javaField, jobject javaObj,
}
Primitive::Type field_type = f->GetTypeAsPrimitiveType();
if (UNLIKELY(field_type == Primitive::kPrimNot)) {
- ThrowIllegalArgumentException(nullptr, StringPrintf("Not a primitive field: %s",
- PrettyField(f).c_str()).c_str());
+ ThrowIllegalArgumentException(StringPrintf("Not a primitive field: %s",
+ PrettyField(f).c_str()).c_str());
return;
}
// Widen the value if necessary (and possible).
JValue wide_value;
- if (!ConvertPrimitiveValue(nullptr, false, kPrimitiveType, field_type, new_value, &wide_value)) {
+ if (!ConvertPrimitiveValue(false, kPrimitiveType, field_type, new_value, &wide_value)) {
DCHECK(soa.Self()->IsExceptionPending());
return;
}
diff --git a/runtime/oat.h b/runtime/oat.h
index f973b28978..79cb024674 100644
--- a/runtime/oat.h
+++ b/runtime/oat.h
@@ -32,7 +32,7 @@ class InstructionSetFeatures;
class PACKED(4) OatHeader {
public:
static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' };
- static constexpr uint8_t kOatVersion[] = { '0', '5', '6', '\0' };
+ static constexpr uint8_t kOatVersion[] = { '0', '5', '8', '\0' };
static constexpr const char* kImageLocationKey = "image-location";
static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline";
diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc
new file mode 100644
index 0000000000..f87fa4f8f4
--- /dev/null
+++ b/runtime/oat_file_assistant.cc
@@ -0,0 +1,952 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_file_assistant.h"
+
+#include <fcntl.h>
+#ifdef __linux__
+#include <sys/sendfile.h>
+#else
+#include <sys/socket.h>
+#endif
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include <set>
+
+#include "base/logging.h"
+#include "base/stringprintf.h"
+#include "class_linker.h"
+#include "gc/heap.h"
+#include "gc/space/image_space.h"
+#include "image.h"
+#include "oat.h"
+#include "os.h"
+#include "profiler.h"
+#include "runtime.h"
+#include "ScopedFd.h"
+#include "utils.h"
+
+namespace art {
+
+OatFileAssistant::OatFileAssistant(const char* dex_location,
+ const InstructionSet isa,
+ bool load_executable)
+ : OatFileAssistant(dex_location, nullptr, isa, load_executable, nullptr) { }
+
+OatFileAssistant::OatFileAssistant(const char* dex_location,
+ const char* oat_location,
+ const InstructionSet isa,
+ bool load_executable)
+ : OatFileAssistant(dex_location, oat_location, isa, load_executable, nullptr) { }
+
+OatFileAssistant::OatFileAssistant(const char* dex_location,
+ const InstructionSet isa,
+ bool load_executable,
+ const char* package_name)
+ : OatFileAssistant(dex_location, nullptr, isa, load_executable, package_name) { }
+
+OatFileAssistant::OatFileAssistant(const char* dex_location,
+ const char* oat_location,
+ const InstructionSet isa,
+ bool load_executable,
+ const char* package_name)
+ : dex_location_(dex_location), isa_(isa),
+ package_name_(package_name), load_executable_(load_executable) {
+ if (load_executable_ && isa != kRuntimeISA) {
+ LOG(WARNING) << "OatFileAssistant: Load executable specified, "
+ << "but isa is not kRuntimeISA. Will not attempt to load executable.";
+ load_executable_ = false;
+ }
+
+ // If the user gave a target oat location, save that as the cached oat
+ // location now so we won't try to construct the default location later.
+ if (oat_location != nullptr) {
+ cached_oat_file_name_ = std::string(oat_location);
+ cached_oat_file_name_attempted_ = true;
+ cached_oat_file_name_found_ = true;
+ }
+
+ // If there is no package name given, we will not be able to find any
+ // profiles associated with this dex location. Preemptively mark that to
+ // be the case, rather than trying to find and load the profiles later.
+ // Similarly, if profiling is disabled.
+ if (package_name == nullptr
+ || !Runtime::Current()->GetProfilerOptions().IsEnabled()) {
+ profile_load_attempted_ = true;
+ profile_load_succeeded_ = false;
+ old_profile_load_attempted_ = true;
+ old_profile_load_succeeded_ = false;
+ }
+}
+
+OatFileAssistant::~OatFileAssistant() {
+ // Clean up the lock file.
+ if (lock_file_.get() != nullptr) {
+ lock_file_->Erase();
+ TEMP_FAILURE_RETRY(unlink(lock_file_->GetPath().c_str()));
+ }
+}
+
+bool OatFileAssistant::IsInBootClassPath() {
+ // Note: We check the current boot class path, regardless of the ISA
+ // specified by the user. This is okay, because the boot class path should
+ // be the same for all ISAs.
+ // TODO: Can we verify the boot class path is the same for all ISAs?
+ Runtime* runtime = Runtime::Current();
+ ClassLinker* class_linker = runtime->GetClassLinker();
+ const auto& boot_class_path = class_linker->GetBootClassPath();
+ for (size_t i = 0; i < boot_class_path.size(); i++) {
+ if (boot_class_path[i]->GetLocation() == std::string(dex_location_)) {
+ VLOG(oat) << "Dex location " << dex_location_ << " is in boot class path";
+ return true;
+ }
+ }
+ return false;
+}
+
+bool OatFileAssistant::Lock(std::string* error_msg) {
+ CHECK(error_msg != nullptr);
+ CHECK(lock_file_.get() == nullptr) << "OatFileAssistant::Lock already acquired";
+
+ if (OatFileName() == nullptr) {
+ *error_msg = "Failed to determine lock file";
+ return false;
+ }
+ std::string lock_file_name = *OatFileName() + ".flock";
+
+ lock_file_.reset(OS::CreateEmptyFile(lock_file_name.c_str()));
+ if (lock_file_.get() == nullptr) {
+ *error_msg = "Failed to create lock file " + lock_file_name;
+ return false;
+ }
+
+ if (!flock_.Init(lock_file_.get(), error_msg)) {
+ TEMP_FAILURE_RETRY(unlink(lock_file_name.c_str()));
+ return false;
+ }
+ return true;
+}
+
+OatFileAssistant::Status OatFileAssistant::GetStatus() {
+ // TODO: If the profiling code is ever restored, it's worth considering
+ // whether we should check to see if the profile is out of date here.
+
+ if (OdexFileIsOutOfDate()) {
+ // The DEX file is not pre-compiled.
+ // TODO: What if the oat file is not out of date? Could we relocate it
+ // from itself?
+ return OatFileIsUpToDate() ? kUpToDate : kOutOfDate;
+ } else {
+ // The DEX file is pre-compiled. If the oat file isn't up to date, we can
+ // patch the pre-compiled version rather than recompiling.
+ if (OatFileIsUpToDate() || OdexFileIsUpToDate()) {
+ return kUpToDate;
+ } else {
+ return kNeedsRelocation;
+ }
+ }
+}
+
+bool OatFileAssistant::MakeUpToDate(std::string* error_msg) {
+ switch (GetStatus()) {
+ case kUpToDate: return true;
+ case kNeedsRelocation: return RelocateOatFile(error_msg);
+ case kOutOfDate: return GenerateOatFile(error_msg);
+ }
+ UNREACHABLE();
+}
+
+std::unique_ptr<OatFile> OatFileAssistant::GetBestOatFile() {
+ if (OatFileIsUpToDate()) {
+ oat_file_released_ = true;
+ return std::move(cached_oat_file_);
+ }
+
+ if (OdexFileIsUpToDate()) {
+ oat_file_released_ = true;
+ return std::move(cached_odex_file_);
+ }
+
+ if (load_executable_) {
+ VLOG(oat) << "Oat File Assistant: No relocated oat file found,"
+ << " attempting to fall back to interpreting oat file instead.";
+
+ if (!OatFileIsOutOfDate()) {
+ load_executable_ = false;
+ ClearOatFileCache();
+ if (!OatFileIsOutOfDate()) {
+ oat_file_released_ = true;
+ return std::move(cached_oat_file_);
+ }
+ }
+
+ if (!OdexFileIsOutOfDate()) {
+ load_executable_ = false;
+ ClearOdexFileCache();
+ if (!OdexFileIsOutOfDate()) {
+ oat_file_released_ = true;
+ return std::move(cached_odex_file_);
+ }
+ }
+ }
+
+ return std::unique_ptr<OatFile>();
+}
+
+std::vector<std::unique_ptr<const DexFile>> OatFileAssistant::LoadDexFiles(
+ const OatFile& oat_file, const char* dex_location) {
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+
+ // Load the primary dex file.
+ std::string error_msg;
+ const OatFile::OatDexFile* oat_dex_file = oat_file.GetOatDexFile(
+ dex_location, nullptr, false);
+ if (oat_dex_file == nullptr) {
+ LOG(WARNING) << "Attempt to load out-of-date oat file "
+ << oat_file.GetLocation() << " for dex location " << dex_location;
+ return std::vector<std::unique_ptr<const DexFile>>();
+ }
+
+ std::unique_ptr<const DexFile> dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ if (dex_file.get() == nullptr) {
+ LOG(WARNING) << "Failed to open dex file from oat dex file: " << error_msg;
+ return std::vector<std::unique_ptr<const DexFile>>();
+ }
+ dex_files.push_back(std::move(dex_file));
+
+ // Load secondary multidex files
+ for (int i = 1; ; i++) {
+ std::string secondary_dex_location = DexFile::GetMultiDexClassesDexName(i, dex_location);
+ oat_dex_file = oat_file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
+ if (oat_dex_file == NULL) {
+ // There are no more secondary dex files to load.
+ break;
+ }
+
+ dex_file = oat_dex_file->OpenDexFile(&error_msg);
+ if (dex_file.get() == nullptr) {
+ LOG(WARNING) << "Failed to open dex file from oat dex file: " << error_msg;
+ return std::vector<std::unique_ptr<const DexFile>>();
+ }
+ dex_files.push_back(std::move(dex_file));
+ }
+ return dex_files;
+}
+
+const std::string* OatFileAssistant::OdexFileName() {
+ if (!cached_odex_file_name_attempted_) {
+ CHECK(dex_location_ != nullptr) << "OatFileAssistant: null dex location";
+ cached_odex_file_name_attempted_ = true;
+
+ std::string error_msg;
+ cached_odex_file_name_found_ = DexFilenameToOdexFilename(
+ dex_location_, isa_, &cached_odex_file_name_, &error_msg);
+ if (!cached_odex_file_name_found_) {
+ // If we can't figure out the odex file, we treat it as if the odex
+ // file was inaccessible.
+ LOG(WARNING) << "Failed to determine odex file name: " << error_msg;
+ }
+ }
+ return cached_odex_file_name_found_ ? &cached_odex_file_name_ : nullptr;
+}
+
+bool OatFileAssistant::OdexFileExists() {
+ return GetOdexFile() != nullptr;
+}
+
+OatFileAssistant::Status OatFileAssistant::OdexFileStatus() {
+ if (OdexFileIsOutOfDate()) {
+ return kOutOfDate;
+ }
+ if (OdexFileIsUpToDate()) {
+ return kUpToDate;
+ }
+ return kNeedsRelocation;
+}
+
+bool OatFileAssistant::OdexFileIsOutOfDate() {
+ if (!odex_file_is_out_of_date_attempted_) {
+ odex_file_is_out_of_date_attempted_ = true;
+ const OatFile* odex_file = GetOdexFile();
+ if (odex_file == nullptr) {
+ cached_odex_file_is_out_of_date_ = true;
+ } else {
+ cached_odex_file_is_out_of_date_ = GivenOatFileIsOutOfDate(*odex_file);
+ }
+ }
+ return cached_odex_file_is_out_of_date_;
+}
+
+bool OatFileAssistant::OdexFileNeedsRelocation() {
+ return OdexFileStatus() == kNeedsRelocation;
+}
+
+bool OatFileAssistant::OdexFileIsUpToDate() {
+ if (!odex_file_is_up_to_date_attempted_) {
+ odex_file_is_up_to_date_attempted_ = true;
+ const OatFile* odex_file = GetOdexFile();
+ if (odex_file == nullptr) {
+ cached_odex_file_is_up_to_date_ = false;
+ } else {
+ cached_odex_file_is_up_to_date_ = GivenOatFileIsUpToDate(*odex_file);
+ }
+ }
+ return cached_odex_file_is_up_to_date_;
+}
+
+const std::string* OatFileAssistant::OatFileName() {
+ if (!cached_oat_file_name_attempted_) {
+ cached_oat_file_name_attempted_ = true;
+
+ // Compute the oat file name from the dex location.
+ CHECK(dex_location_ != nullptr) << "OatFileAssistant: null dex location";
+
+ // TODO: The oat file assistant should be the definitive place for
+ // determining the oat file name from the dex location, not
+ // GetDalvikCacheFilename.
+ std::string cache_dir = StringPrintf("%s%s",
+ DalvikCacheDirectory().c_str(), GetInstructionSetString(isa_));
+ std::string error_msg;
+ cached_oat_file_name_found_ = GetDalvikCacheFilename(dex_location_,
+ cache_dir.c_str(), &cached_oat_file_name_, &error_msg);
+ if (!cached_oat_file_name_found_) {
+ // If we can't determine the oat file name, we treat the oat file as
+ // inaccessible.
+ LOG(WARNING) << "Failed to determine oat file name for dex location "
+ << dex_location_ << ": " << error_msg;
+ }
+ }
+ return cached_oat_file_name_found_ ? &cached_oat_file_name_ : nullptr;
+}
+
+bool OatFileAssistant::OatFileExists() {
+ return GetOatFile() != nullptr;
+}
+
+OatFileAssistant::Status OatFileAssistant::OatFileStatus() {
+ if (OatFileIsOutOfDate()) {
+ return kOutOfDate;
+ }
+ if (OatFileIsUpToDate()) {
+ return kUpToDate;
+ }
+ return kNeedsRelocation;
+}
+
+bool OatFileAssistant::OatFileIsOutOfDate() {
+ if (!oat_file_is_out_of_date_attempted_) {
+ oat_file_is_out_of_date_attempted_ = true;
+ const OatFile* oat_file = GetOatFile();
+ if (oat_file == nullptr) {
+ cached_oat_file_is_out_of_date_ = true;
+ } else {
+ cached_oat_file_is_out_of_date_ = GivenOatFileIsOutOfDate(*oat_file);
+ }
+ }
+ return cached_oat_file_is_out_of_date_;
+}
+
+bool OatFileAssistant::OatFileNeedsRelocation() {
+ return OatFileStatus() == kNeedsRelocation;
+}
+
+bool OatFileAssistant::OatFileIsUpToDate() {
+ if (!oat_file_is_up_to_date_attempted_) {
+ oat_file_is_up_to_date_attempted_ = true;
+ const OatFile* oat_file = GetOatFile();
+ if (oat_file == nullptr) {
+ cached_oat_file_is_up_to_date_ = false;
+ } else {
+ cached_oat_file_is_up_to_date_ = GivenOatFileIsUpToDate(*oat_file);
+ }
+ }
+ return cached_oat_file_is_up_to_date_;
+}
+
+OatFileAssistant::Status OatFileAssistant::GivenOatFileStatus(const OatFile& file) {
+ // TODO: This could cause GivenOatFileIsOutOfDate to be called twice, which
+ // is more work than we need to do. If performance becomes a concern, and
+ // this method is actually called, this should be fixed.
+ if (GivenOatFileIsOutOfDate(file)) {
+ return kOutOfDate;
+ }
+ if (GivenOatFileIsUpToDate(file)) {
+ return kUpToDate;
+ }
+ return kNeedsRelocation;
+}
+
+bool OatFileAssistant::GivenOatFileIsOutOfDate(const OatFile& file) {
+ // Verify the dex checksum.
+ // Note: GetOatDexFile will return NULL if the dex checksum doesn't match
+ // what we provide, which verifies the primary dex checksum for us.
+ const uint32_t* dex_checksum_pointer = GetRequiredDexChecksum();
+ const OatFile::OatDexFile* oat_dex_file = file.GetOatDexFile(
+ dex_location_, dex_checksum_pointer, false);
+ if (oat_dex_file == NULL) {
+ return true;
+ }
+
+ // Verify the dex checksums for any secondary multidex files
+ for (int i = 1; ; i++) {
+ std::string secondary_dex_location
+ = DexFile::GetMultiDexClassesDexName(i, dex_location_);
+ const OatFile::OatDexFile* secondary_oat_dex_file
+ = file.GetOatDexFile(secondary_dex_location.c_str(), nullptr, false);
+ if (secondary_oat_dex_file == NULL) {
+ // There are no more secondary dex files to check.
+ break;
+ }
+
+ std::string error_msg;
+ uint32_t expected_secondary_checksum = 0;
+ if (DexFile::GetChecksum(secondary_dex_location.c_str(),
+ &expected_secondary_checksum, &error_msg)) {
+ uint32_t actual_secondary_checksum
+ = secondary_oat_dex_file->GetDexFileLocationChecksum();
+ if (expected_secondary_checksum != actual_secondary_checksum) {
+ VLOG(oat) << "Dex checksum does not match for secondary dex: "
+ << secondary_dex_location
+ << ". Expected: " << expected_secondary_checksum
+ << ", Actual: " << actual_secondary_checksum;
+ return false;
+ }
+ } else {
+ // If we can't get the checksum for the secondary location, we assume
+ // the dex checksum is up to date for this and all other secondary dex
+ // files.
+ break;
+ }
+ }
+
+ // Verify the image checksum
+ const ImageInfo* image_info = GetImageInfo();
+ if (image_info == nullptr) {
+ VLOG(oat) << "No image for oat image checksum to match against.";
+ return true;
+ }
+
+ if (file.GetOatHeader().GetImageFileLocationOatChecksum() != image_info->oat_checksum) {
+ VLOG(oat) << "Oat image checksum does not match image checksum.";
+ return true;
+ }
+
+ // The checksums are all good; the dex file is not out of date.
+ return false;
+}
+
+bool OatFileAssistant::GivenOatFileNeedsRelocation(const OatFile& file) {
+ return GivenOatFileStatus(file) == kNeedsRelocation;
+}
+
+bool OatFileAssistant::GivenOatFileIsUpToDate(const OatFile& file) {
+ if (GivenOatFileIsOutOfDate(file)) {
+ return false;
+ }
+
+ if (file.IsPic()) {
+ return true;
+ }
+
+ const ImageInfo* image_info = GetImageInfo();
+ if (image_info == nullptr) {
+ VLOG(oat) << "No image for to check oat relocation against.";
+ return false;
+ }
+
+ // Verify the oat_data_begin recorded for the image in the oat file matches
+ // the actual oat_data_begin for boot.oat in the image.
+ const OatHeader& oat_header = file.GetOatHeader();
+ uintptr_t oat_data_begin = oat_header.GetImageFileLocationOatDataBegin();
+ if (oat_data_begin != image_info->oat_data_begin) {
+ VLOG(oat) << file.GetLocation() <<
+ ": Oat file image oat_data_begin (" << oat_data_begin << ")"
+ << " does not match actual image oat_data_begin ("
+ << image_info->oat_data_begin << ")";
+ return false;
+ }
+
+ // Verify the oat_patch_delta recorded for the image in the oat file matches
+ // the actual oat_patch_delta for the image.
+ int32_t oat_patch_delta = oat_header.GetImagePatchDelta();
+ if (oat_patch_delta != image_info->patch_delta) {
+ VLOG(oat) << file.GetLocation() <<
+ ": Oat file image patch delta (" << oat_patch_delta << ")"
+ << " does not match actual image patch delta ("
+ << image_info->patch_delta << ")";
+ return false;
+ }
+ return true;
+}
+
+bool OatFileAssistant::ProfileExists() {
+ return GetProfile() != nullptr;
+}
+
+bool OatFileAssistant::OldProfileExists() {
+ return GetOldProfile() != nullptr;
+}
+
+// TODO: The IsProfileChangeSignificant implementation was copied from likely
+// bit-rotted code.
+bool OatFileAssistant::IsProfileChangeSignificant() {
+ ProfileFile* profile = GetProfile();
+ if (profile == nullptr) {
+ return false;
+ }
+
+ ProfileFile* old_profile = GetOldProfile();
+ if (old_profile == nullptr) {
+ return false;
+ }
+
+ // TODO: The following code to compare two profile files should live with
+ // the rest of the profiler code, not the oat file assistant code.
+
+ // A change in profile is considered significant if X% (change_thr property)
+ // of the top K% (compile_thr property) samples has changed.
+ const ProfilerOptions& options = Runtime::Current()->GetProfilerOptions();
+ const double top_k_threshold = options.GetTopKThreshold();
+ const double change_threshold = options.GetTopKChangeThreshold();
+ std::set<std::string> top_k, old_top_k;
+ profile->GetTopKSamples(top_k, top_k_threshold);
+ old_profile->GetTopKSamples(old_top_k, top_k_threshold);
+ std::set<std::string> diff;
+ std::set_difference(top_k.begin(), top_k.end(), old_top_k.begin(),
+ old_top_k.end(), std::inserter(diff, diff.end()));
+
+ // TODO: consider using the usedPercentage instead of the plain diff count.
+ double change_percent = 100.0 * static_cast<double>(diff.size())
+ / static_cast<double>(top_k.size());
+ std::set<std::string>::iterator end = diff.end();
+ for (std::set<std::string>::iterator it = diff.begin(); it != end; it++) {
+ VLOG(oat) << "Profile new in topK: " << *it;
+ }
+
+ if (change_percent > change_threshold) {
+ VLOG(oat) << "Oat File Assistant: Profile for " << dex_location_
+ << "has changed significantly: (top "
+ << top_k_threshold << "% samples changed in proportion of "
+ << change_percent << "%)";
+ return true;
+ }
+ return false;
+}
+
+// TODO: The CopyProfileFile implementation was copied from likely bit-rotted
+// code.
+void OatFileAssistant::CopyProfileFile() {
+ if (!ProfileExists()) {
+ return;
+ }
+
+ std::string profile_name = ProfileFileName();
+ std::string old_profile_name = OldProfileFileName();
+
+ ScopedFd src(open(old_profile_name.c_str(), O_RDONLY));
+ if (src.get() == -1) {
+ PLOG(WARNING) << "Failed to open profile file " << old_profile_name
+ << ". My uid:gid is " << getuid() << ":" << getgid();
+ return;
+ }
+
+ struct stat stat_src;
+ if (fstat(src.get(), &stat_src) == -1) {
+ PLOG(WARNING) << "Failed to get stats for profile file " << old_profile_name
+ << ". My uid:gid is " << getuid() << ":" << getgid();
+ return;
+ }
+
+ // Create the copy with rw------- (only accessible by system)
+ ScopedFd dst(open(profile_name.c_str(), O_WRONLY|O_CREAT|O_TRUNC, 0600));
+ if (dst.get() == -1) {
+ PLOG(WARNING) << "Failed to create/write prev profile file " << profile_name
+ << ". My uid:gid is " << getuid() << ":" << getgid();
+ return;
+ }
+
+#ifdef __linux__
+ if (sendfile(dst.get(), src.get(), nullptr, stat_src.st_size) == -1) {
+#else
+ off_t len;
+ if (sendfile(dst.get(), src.get(), 0, &len, nullptr, 0) == -1) {
+#endif
+ PLOG(WARNING) << "Failed to copy profile file " << old_profile_name
+ << " to " << profile_name << ". My uid:gid is " << getuid()
+ << ":" << getgid();
+ }
+}
+
+bool OatFileAssistant::RelocateOatFile(std::string* error_msg) {
+ CHECK(error_msg != nullptr);
+
+ if (OdexFileName() == nullptr) {
+ *error_msg = "Patching of oat file for dex location "
+ + std::string(dex_location_)
+ + " not attempted because the odex file name could not be determined.";
+ return false;
+ }
+ const std::string& odex_file_name = *OdexFileName();
+
+ if (OatFileName() == nullptr) {
+ *error_msg = "Patching of oat file for dex location "
+ + std::string(dex_location_)
+ + " not attempted because the oat file name could not be determined.";
+ return false;
+ }
+ const std::string& oat_file_name = *OatFileName();
+
+ const ImageInfo* image_info = GetImageInfo();
+ Runtime* runtime = Runtime::Current();
+ if (image_info == nullptr) {
+ *error_msg = "Patching of oat file " + oat_file_name
+ + " not attempted because no image location was found.";
+ return false;
+ }
+
+ if (!runtime->IsDex2OatEnabled()) {
+ *error_msg = "Patching of oat file " + oat_file_name
+ + " not attempted because dex2oat is disabled";
+ return false;
+ }
+
+ std::vector<std::string> argv;
+ argv.push_back(runtime->GetPatchoatExecutable());
+ argv.push_back("--instruction-set=" + std::string(GetInstructionSetString(isa_)));
+ argv.push_back("--input-oat-file=" + odex_file_name);
+ argv.push_back("--output-oat-file=" + oat_file_name);
+ argv.push_back("--patched-image-location=" + image_info->location);
+
+ std::string command_line(Join(argv, ' '));
+ if (!Exec(argv, error_msg)) {
+ // Manually delete the file. This ensures there is no garbage left over if
+ // the process unexpectedly died.
+ TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
+ return false;
+ }
+
+ // Mark that the oat file has changed and we should try to reload.
+ ClearOatFileCache();
+ return true;
+}
+
+bool OatFileAssistant::GenerateOatFile(std::string* error_msg) {
+ CHECK(error_msg != nullptr);
+
+ if (OatFileName() == nullptr) {
+ *error_msg = "Generation of oat file for dex location "
+ + std::string(dex_location_)
+ + " not attempted because the oat file name could not be determined.";
+ return false;
+ }
+ const std::string& oat_file_name = *OatFileName();
+
+ Runtime* runtime = Runtime::Current();
+ if (!runtime->IsDex2OatEnabled()) {
+ *error_msg = "Generation of oat file " + oat_file_name
+ + " not attempted because dex2oat is disabled";
+ return false;
+ }
+
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + std::string(dex_location_));
+ args.push_back("--oat-file=" + oat_file_name);
+
+ // dex2oat ignores missing dex files and doesn't report an error.
+ // Check explicitly here so we can detect the error properly.
+ // TODO: Why does dex2oat behave that way?
+ if (!OS::FileExists(dex_location_)) {
+ *error_msg = "Dex location " + std::string(dex_location_) + " does not exists.";
+ return false;
+ }
+
+ if (!Dex2Oat(args, error_msg)) {
+ // Manually delete the file. This ensures there is no garbage left over if
+ // the process unexpectedly died.
+ TEMP_FAILURE_RETRY(unlink(oat_file_name.c_str()));
+ return false;
+ }
+
+ // Mark that the oat file has changed and we should try to reload.
+ ClearOatFileCache();
+ return true;
+}
+
+bool OatFileAssistant::Dex2Oat(const std::vector<std::string>& args,
+ std::string* error_msg) {
+ Runtime* runtime = Runtime::Current();
+ std::string image_location = ImageLocation();
+ if (image_location.empty()) {
+ *error_msg = "No image location found for Dex2Oat.";
+ return false;
+ }
+
+ std::vector<std::string> argv;
+ argv.push_back(runtime->GetCompilerExecutable());
+ argv.push_back("--runtime-arg");
+ argv.push_back("-classpath");
+ argv.push_back("--runtime-arg");
+ argv.push_back(runtime->GetClassPathString());
+ runtime->AddCurrentRuntimeFeaturesAsDex2OatArguments(&argv);
+
+ if (!runtime->IsVerificationEnabled()) {
+ argv.push_back("--compiler-filter=verify-none");
+ }
+
+ if (runtime->MustRelocateIfPossible()) {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xrelocate");
+ } else {
+ argv.push_back("--runtime-arg");
+ argv.push_back("-Xnorelocate");
+ }
+
+ if (!kIsTargetBuild) {
+ argv.push_back("--host");
+ }
+
+ argv.push_back("--boot-image=" + image_location);
+
+ std::vector<std::string> compiler_options = runtime->GetCompilerOptions();
+ argv.insert(argv.end(), compiler_options.begin(), compiler_options.end());
+
+ argv.insert(argv.end(), args.begin(), args.end());
+
+ std::string command_line(Join(argv, ' '));
+ return Exec(argv, error_msg);
+}
+
+bool OatFileAssistant::DexFilenameToOdexFilename(const std::string& location,
+ InstructionSet isa, std::string* odex_filename, std::string* error_msg) {
+ CHECK(odex_filename != nullptr);
+ CHECK(error_msg != nullptr);
+
+ // The odex file name is formed by replacing the dex_location extension with
+ // .odex and inserting an isa directory. For example:
+ // location = /foo/bar/baz.jar
+ // odex_location = /foo/bar/<isa>/baz.odex
+
+ // Find the directory portion of the dex location and add the isa directory.
+ size_t pos = location.rfind('/');
+ if (pos == std::string::npos) {
+ *error_msg = "Dex location " + location + " has no directory.";
+ return false;
+ }
+ std::string dir = location.substr(0, pos+1);
+ dir += std::string(GetInstructionSetString(isa));
+
+ // Find the file portion of the dex location.
+ std::string file;
+ if (pos == std::string::npos) {
+ file = location;
+ } else {
+ file = location.substr(pos+1);
+ }
+
+ // Get the base part of the file without the extension.
+ pos = file.rfind('.');
+ if (pos == std::string::npos) {
+ *error_msg = "Dex location " + location + " has no extension.";
+ return false;
+ }
+ std::string base = file.substr(0, pos);
+
+ *odex_filename = dir + "/" + base + ".odex";
+ return true;
+}
+
+std::string OatFileAssistant::DalvikCacheDirectory() {
+ // Note: We don't cache this, because it will only be called once by
+ // OatFileName, and we don't care about the performance of the profiling
+ // code, which isn't used in practice.
+
+ // TODO: The work done in GetDalvikCache is overkill for what we need.
+ // Ideally a new API for getting the DalvikCacheDirectory the way we want
+ // (without existence testing, creation, or death) is provided with the rest
+ // of the GetDalvikCache family of functions. Until such an API is in place,
+ // we use GetDalvikCache to avoid duplicating the logic for determining the
+ // dalvik cache directory.
+ std::string result;
+ bool have_android_data;
+ bool dalvik_cache_exists;
+ bool is_global_cache;
+ GetDalvikCache("", false, &result, &have_android_data, &dalvik_cache_exists, &is_global_cache);
+ return result;
+}
+
+std::string OatFileAssistant::ProfileFileName() {
+ if (package_name_ != nullptr) {
+ return DalvikCacheDirectory() + std::string("profiles/") + package_name_;
+ }
+ return "";
+}
+
+std::string OatFileAssistant::OldProfileFileName() {
+ std::string profile_name = ProfileFileName();
+ if (profile_name.empty()) {
+ return "";
+ }
+ return profile_name + "@old";
+}
+
+std::string OatFileAssistant::ImageLocation() {
+ Runtime* runtime = Runtime::Current();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ if (image_space == nullptr) {
+ return "";
+ }
+ return image_space->GetImageLocation();
+}
+
+const uint32_t* OatFileAssistant::GetRequiredDexChecksum() {
+ if (!required_dex_checksum_attempted) {
+ required_dex_checksum_attempted = true;
+ required_dex_checksum_found = false;
+ std::string error_msg;
+ CHECK(dex_location_ != nullptr) << "OatFileAssistant provided no dex location";
+ if (DexFile::GetChecksum(dex_location_, &cached_required_dex_checksum, &error_msg)) {
+ required_dex_checksum_found = true;
+ } else {
+ // This can happen if the original dex file has been stripped from the
+ // apk.
+ VLOG(oat) << "OatFileAssistant: " << error_msg;
+
+ // Get the checksum from the odex if we can.
+ const OatFile* odex_file = GetOdexFile();
+ if (odex_file != nullptr) {
+ const OatFile::OatDexFile* odex_dex_file = odex_file->GetOatDexFile(
+ dex_location_, nullptr, false);
+ if (odex_dex_file != nullptr) {
+ cached_required_dex_checksum = odex_dex_file->GetDexFileLocationChecksum();
+ required_dex_checksum_found = true;
+ }
+ }
+ }
+ }
+ return required_dex_checksum_found ? &cached_required_dex_checksum : nullptr;
+}
+
+const OatFile* OatFileAssistant::GetOdexFile() {
+ CHECK(!oat_file_released_) << "OdexFile called after oat file released.";
+ if (!odex_file_load_attempted_) {
+ odex_file_load_attempted_ = true;
+ if (OdexFileName() != nullptr) {
+ const std::string& odex_file_name = *OdexFileName();
+ std::string error_msg;
+ cached_odex_file_.reset(OatFile::Open(odex_file_name.c_str(),
+ odex_file_name.c_str(), nullptr, nullptr, load_executable_,
+ &error_msg));
+ if (cached_odex_file_.get() == nullptr) {
+ VLOG(oat) << "OatFileAssistant test for existing pre-compiled oat file "
+ << odex_file_name << ": " << error_msg;
+ }
+ }
+ }
+ return cached_odex_file_.get();
+}
+
+void OatFileAssistant::ClearOdexFileCache() {
+ odex_file_load_attempted_ = false;
+ cached_odex_file_.reset();
+ odex_file_is_out_of_date_attempted_ = false;
+ odex_file_is_up_to_date_attempted_ = false;
+}
+
+const OatFile* OatFileAssistant::GetOatFile() {
+ CHECK(!oat_file_released_) << "OatFile called after oat file released.";
+ if (!oat_file_load_attempted_) {
+ oat_file_load_attempted_ = true;
+ if (OatFileName() != nullptr) {
+ const std::string& oat_file_name = *OatFileName();
+ std::string error_msg;
+ cached_oat_file_.reset(OatFile::Open(oat_file_name.c_str(),
+ oat_file_name.c_str(), nullptr, nullptr, load_executable_, &error_msg));
+ if (cached_oat_file_.get() == nullptr) {
+ VLOG(oat) << "OatFileAssistant test for existing oat file "
+ << oat_file_name << ": " << error_msg;
+ }
+ }
+ }
+ return cached_oat_file_.get();
+}
+
+void OatFileAssistant::ClearOatFileCache() {
+ oat_file_load_attempted_ = false;
+ cached_oat_file_.reset();
+ oat_file_is_out_of_date_attempted_ = false;
+ oat_file_is_up_to_date_attempted_ = false;
+}
+
+const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() {
+ if (!image_info_load_attempted_) {
+ image_info_load_attempted_ = true;
+
+ Runtime* runtime = Runtime::Current();
+ const gc::space::ImageSpace* image_space = runtime->GetHeap()->GetImageSpace();
+ if (image_space != nullptr) {
+ cached_image_info_.location = image_space->GetImageLocation();
+
+ if (isa_ == kRuntimeISA) {
+ const ImageHeader& image_header = image_space->GetImageHeader();
+ cached_image_info_.oat_checksum = image_header.GetOatChecksum();
+ cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin());
+ cached_image_info_.patch_delta = image_header.GetPatchDelta();
+ } else {
+ std::unique_ptr<ImageHeader> image_header(
+ gc::space::ImageSpace::ReadImageHeaderOrDie(
+ cached_image_info_.location.c_str(), isa_));
+ cached_image_info_.oat_checksum = image_header->GetOatChecksum();
+ cached_image_info_.oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin());
+ cached_image_info_.patch_delta = image_header->GetPatchDelta();
+ }
+ }
+ image_info_load_succeeded_ = (image_space != nullptr);
+ }
+ return image_info_load_succeeded_ ? &cached_image_info_ : nullptr;
+}
+
+ProfileFile* OatFileAssistant::GetProfile() {
+ if (!profile_load_attempted_) {
+ CHECK(package_name_ != nullptr)
+ << "pakage_name_ is nullptr: "
+ << "profile_load_attempted_ should have been true";
+ profile_load_attempted_ = true;
+ std::string profile_name = ProfileFileName();
+ if (!profile_name.empty()) {
+ profile_load_succeeded_ = cached_profile_.LoadFile(profile_name);
+ }
+ }
+ return profile_load_succeeded_ ? &cached_profile_ : nullptr;
+}
+
+ProfileFile* OatFileAssistant::GetOldProfile() {
+ if (!old_profile_load_attempted_) {
+ CHECK(package_name_ != nullptr)
+ << "pakage_name_ is nullptr: "
+ << "old_profile_load_attempted_ should have been true";
+ old_profile_load_attempted_ = true;
+ std::string old_profile_name = OldProfileFileName();
+ if (!old_profile_name.empty()) {
+ old_profile_load_succeeded_ = cached_old_profile_.LoadFile(old_profile_name);
+ }
+ }
+ return old_profile_load_succeeded_ ? &cached_old_profile_ : nullptr;
+}
+
+} // namespace art
+
diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h
new file mode 100644
index 0000000000..958b44048d
--- /dev/null
+++ b/runtime/oat_file_assistant.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_OAT_FILE_ASSISTANT_H_
+#define ART_RUNTIME_OAT_FILE_ASSISTANT_H_
+
+#include <cstdint>
+#include <memory>
+#include <string>
+
+#include "arch/instruction_set.h"
+#include "base/scoped_flock.h"
+#include "base/unix_file/fd_file.h"
+#include "oat_file.h"
+#include "os.h"
+#include "profiler.h"
+
+namespace art {
+
+// Class for assisting with oat file management.
+//
+// This class collects common utilities for determining the status of an oat
+// file on the device, updating the oat file, and loading the oat file.
+//
+// The oat file assistant is intended to be used with dex locations not on the
+// boot class path. See the IsInBootClassPath method for a way to check if the
+// dex location is in the boot class path.
+//
+// TODO: All the profiling related code is old and untested. It should either
+// be restored and tested, or removed.
+class OatFileAssistant {
+ public:
+ enum Status {
+ // kOutOfDate - An oat file is said to be out of date if the file does not
+ // exist, or is out of date with respect to the dex file or boot image.
+ kOutOfDate,
+
+ // kNeedsRelocation - An oat file is said to need relocation if the code
+ // is up to date, but not yet properly relocated for address space layout
+ // randomization (ASLR). In this case, the oat file is neither "out of
+ // date" nor "up to date".
+ kNeedsRelocation,
+
+ // kUpToDate - An oat file is said to be up to date if it is not out of
+ // date and has been properly relocated for the purposes of ASLR.
+ kUpToDate,
+ };
+
+ // Constructs an OatFileAssistant object to assist the oat file
+ // corresponding to the given dex location with the target instruction set.
+ //
+ // The dex_location must not be NULL and should remain available and
+ // unchanged for the duration of the lifetime of the OatFileAssistant object.
+ // Typically the dex_location is the absolute path to the original,
+ // un-optimized dex file.
+ //
+ //
+ // Note: Currently the dex_location must have an extension.
+ // TODO: Relax this restriction?
+ //
+ // The isa should be either the 32 bit or 64 bit variant for the current
+ // device. For example, on an arm device, use arm or arm64. An oat file can
+ // be loaded executable only if the ISA matches the current runtime.
+ OatFileAssistant(const char* dex_location, const InstructionSet isa,
+ bool load_executable);
+
+ // Constructs an OatFileAssistant, providing an explicit target oat_location
+ // to use instead of the standard oat location.
+ OatFileAssistant(const char* dex_location, const char* oat_location,
+ const InstructionSet isa, bool load_executable);
+
+ // Constructs an OatFileAssistant, providing an additional package_name used
+ // solely for the purpose of locating profile files.
+ //
+ // TODO: Why is the name of the profile file based on the package name and
+ // not the dex location? If there is no technical reason the dex_location
+ // can't be used, we should prefer that instead.
+ OatFileAssistant(const char* dex_location, const InstructionSet isa,
+ bool load_executable, const char* package_name);
+
+ // Constructs an OatFileAssistant with user specified oat location and a
+ // package name.
+ OatFileAssistant(const char* dex_location, const char* oat_location,
+ const InstructionSet isa, bool load_executable,
+ const char* package_name);
+
+ ~OatFileAssistant();
+
+ // Returns true if the dex location refers to an element of the boot class
+ // path.
+ bool IsInBootClassPath();
+
+ // Obtains a lock on the target oat file.
+ // Only one OatFileAssistant object can hold the lock for a target oat file
+ // at a time. The Lock is released automatically when the OatFileAssistant
+ // object goes out of scope. The Lock() method must not be called if the
+ // lock has already been acquired.
+ //
+ // Returns true on success.
+ // Returns false on error, in which case error_msg will contain more
+ // information on the error.
+ //
+ // The 'error_msg' argument must not be null.
+ //
+ // This is intended to be used to avoid race conditions when multiple
+ // processes generate oat files, such as when a foreground Activity and
+ // a background Service both use DexClassLoaders pointing to the same dex
+ // file.
+ bool Lock(std::string* error_msg);
+
+ // Returns the overall compilation status for the given dex location.
+ Status GetStatus();
+
+ // Attempts to generate or relocate the oat file as needed to make it up to
+ // date.
+ // Returns true on success.
+ //
+ // If there is a failure, the value of error_msg will be set to a string
+ // describing why there was failure. error_msg must not be nullptr.
+ bool MakeUpToDate(std::string* error_msg);
+
+ // Returns an oat file that can be used for loading dex files.
+ // Returns nullptr if no suitable oat file was found.
+ //
+ // After this call, no other methods of the OatFileAssistant should be
+ // called, because access to the loaded oat file has been taken away from
+ // the OatFileAssistant object.
+ std::unique_ptr<OatFile> GetBestOatFile();
+
+ // Loads the dex files in the given oat file for the given dex location.
+ // The oat file should be up to date for the given dex location.
+ // This loads multiple dex files in the case of multidex.
+ // Returns an empty vector if no dex files for that location could be loaded
+ // from the oat file.
+ //
+ // The caller is responsible for freeing the dex_files returned, if any. The
+ // dex_files will only remain valid as long as the oat_file is valid.
+ static std::vector<std::unique_ptr<const DexFile>> LoadDexFiles(
+ const OatFile& oat_file, const char* dex_location);
+
+ // If the dex file has been pre-compiled on the host, the compiled oat file
+ // will have the extension .odex, and is referred to as the odex file.
+ // It is called odex for legacy reasons; the file is really an oat file. The
+ // odex file will typically have a patch delta of 0 and need to be relocated
+ // before use for the purposes of ASLR.
+ // These methods return the location and status of the odex file for the dex
+ // location.
+ // Notes:
+ // * OdexFileName may return null if the odex file name could not be
+ // determined.
+ const std::string* OdexFileName();
+ bool OdexFileExists();
+ Status OdexFileStatus();
+ bool OdexFileIsOutOfDate();
+ bool OdexFileNeedsRelocation();
+ bool OdexFileIsUpToDate();
+
+ // When the dex files is compiled on the target device, the oat file is the
+ // result. The oat file will have been relocated to some
+ // (possibly-out-of-date) offset for ASLR.
+ // These methods return the location and status of the target oat file for
+ // the dex location.
+ //
+ // Notes:
+ // * To get the overall status of the compiled code for this dex_location,
+ // use the GetStatus() method, not the OatFileStatus() method.
+ // * OatFileName may return null if the oat file name could not be
+ // determined.
+ const std::string* OatFileName();
+ bool OatFileExists();
+ Status OatFileStatus();
+ bool OatFileIsOutOfDate();
+ bool OatFileNeedsRelocation();
+ bool OatFileIsUpToDate();
+
+ // These methods return the status for a given opened oat file with respect
+ // to the dex location.
+ Status GivenOatFileStatus(const OatFile& file);
+ bool GivenOatFileIsOutOfDate(const OatFile& file);
+ bool GivenOatFileNeedsRelocation(const OatFile& file);
+ bool GivenOatFileIsUpToDate(const OatFile& file);
+
+ // Returns true if there is an accessible profile associated with the dex
+ // location.
+ // This returns false if profiling is disabled.
+ bool ProfileExists();
+
+ // The old profile is a file containing a previous snapshot of profiling
+ // information associated with the dex file code. This is used to track how
+ // the profiling information has changed over time.
+ //
+ // Returns true if there is an accessible old profile associated with the
+ // dex location.
+ // This returns false if profiling is disabled.
+ bool OldProfileExists();
+
+ // Returns true if there has been a significant change between the old
+ // profile and the current profile.
+ // This returns false if profiling is disabled.
+ bool IsProfileChangeSignificant();
+
+ // Copy the current profile to the old profile location.
+ void CopyProfileFile();
+
+ // Generates the oat file by relocation from the odex file.
+ // This does not check the current status before attempting to relocate the
+ // oat file.
+ // Returns true on success.
+ // This will fail if dex2oat is not enabled in the current runtime.
+ //
+ // If there is a failure, the value of error_msg will be set to a string
+ // describing why there was failure. error_msg must not be nullptr.
+ bool RelocateOatFile(std::string* error_msg);
+
+ // Generate the oat file from the dex file.
+ // This does not check the current status before attempting to generate the
+ // oat file.
+ // Returns true on success.
+ // This will fail if dex2oat is not enabled in the current runtime.
+ //
+ // If there is a failure, the value of error_msg will be set to a string
+ // describing why there was failure. error_msg must not be nullptr.
+ bool GenerateOatFile(std::string* error_msg);
+
+ // Executes dex2oat using the current runtime configuration overridden with
+ // the given arguments. This does not check to see if dex2oat is enabled in
+ // the runtime configuration.
+ // Returns true on success.
+ //
+ // If there is a failure, the value of error_msg will be set to a string
+ // describing why there was failure. error_msg must not be nullptr.
+ //
+ // TODO: The OatFileAssistant probably isn't the right place to have this
+ // function.
+ static bool Dex2Oat(const std::vector<std::string>& args, std::string* error_msg);
+
+ // Constructs the odex file name for the given dex location.
+ // Returns true on success, in which case odex_filename is set to the odex
+ // file name.
+ // Returns false on error, in which case error_msg describes the error.
+ // Neither odex_filename nor error_msg may be null.
+ static bool DexFilenameToOdexFilename(const std::string& location,
+ InstructionSet isa, std::string* odex_filename, std::string* error_msg);
+
+ private:
+ struct ImageInfo {
+ uint32_t oat_checksum = 0;
+ uintptr_t oat_data_begin = 0;
+ int32_t patch_delta = 0;
+ std::string location;
+ };
+
+ // Returns the path to the dalvik cache directory.
+ // Does not check existence of the cache or try to create it.
+ // Includes the trailing slash.
+ // Returns an empty string if we can't get the dalvik cache directory path.
+ std::string DalvikCacheDirectory();
+
+ // Constructs the filename for the profile file.
+ // Returns an empty string if we do not have the necessary information to
+ // construct the filename.
+ std::string ProfileFileName();
+
+ // Constructs the filename for the old profile file.
+ // Returns an empty string if we do not have the necessary information to
+ // construct the filename.
+ std::string OldProfileFileName();
+
+ // Returns the current image location.
+ // Returns an empty string if the image location could not be retrieved.
+ //
+ // TODO: This method should belong with an image file manager, not
+ // the oat file assistant.
+ static std::string ImageLocation();
+
+ // Gets the dex checksum required for an up-to-date oat file.
+ // Returns dex_checksum if a required checksum was located. Returns
+ // nullptr if the required checksum was not found.
+ // The caller shouldn't clean up or free the returned pointer.
+ const uint32_t* GetRequiredDexChecksum();
+
+ // Returns the loaded odex file.
+ // Loads the file if needed. Returns nullptr if the file failed to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ const OatFile* GetOdexFile();
+
+ // Clear any cached information about the odex file that depends on the
+ // contents of the file.
+ void ClearOdexFileCache();
+
+ // Returns the loaded oat file.
+ // Loads the file if needed. Returns nullptr if the file failed to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ const OatFile* GetOatFile();
+
+ // Clear any cached information about the oat file that depends on the
+ // contents of the file.
+ void ClearOatFileCache();
+
+ // Returns the loaded image info.
+ // Loads the image info if needed. Returns nullptr if the image info failed
+ // to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ const ImageInfo* GetImageInfo();
+
+ // Returns the loaded profile.
+ // Loads the profile if needed. Returns nullptr if the profile failed
+ // to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ ProfileFile* GetProfile();
+
+ // Returns the loaded old profile.
+ // Loads the old profile if needed. Returns nullptr if the old profile
+ // failed to load.
+ // The caller shouldn't clean up or free the returned pointer.
+ ProfileFile* GetOldProfile();
+
+ // To implement Lock(), we lock a dummy file where the oat file would go
+ // (adding ".flock" to the target file name) and retain the lock for the
+ // remaining lifetime of the OatFileAssistant object.
+ std::unique_ptr<File> lock_file_;
+ ScopedFlock flock_;
+
+ // In a properly constructed OatFileAssistant object, dex_location_ should
+ // never be nullptr.
+ const char* dex_location_ = nullptr;
+
+ // In a properly constructed OatFileAssistant object, isa_ should be either
+ // the 32 or 64 bit variant for the current device.
+ const InstructionSet isa_ = kNone;
+
+ // The package name, used solely to find the profile file.
+ // This may be nullptr in a properly constructed object. In this case,
+ // profile_load_attempted_ and old_profile_load_attempted_ will be true, and
+ // profile_load_succeeded_ and old_profile_load_succeeded_ will be false.
+ const char* package_name_ = nullptr;
+
+ // Whether we will attempt to load oat files executable.
+ bool load_executable_ = false;
+
+ // Cached value of the required dex checksum.
+ // This should be accessed only by the GetRequiredDexChecksum() method.
+ uint32_t cached_required_dex_checksum;
+ bool required_dex_checksum_attempted = false;
+ bool required_dex_checksum_found;
+
+ // Cached value of the odex file name.
+ // This should be accessed only by the OdexFileName() method.
+ bool cached_odex_file_name_attempted_ = false;
+ bool cached_odex_file_name_found_;
+ std::string cached_odex_file_name_;
+
+ // Cached value of the loaded odex file.
+ // Use the GetOdexFile method rather than accessing this directly, unless you
+ // know the odex file isn't out of date.
+ bool odex_file_load_attempted_ = false;
+ std::unique_ptr<OatFile> cached_odex_file_;
+
+ // Cached results for OdexFileIsOutOfDate
+ bool odex_file_is_out_of_date_attempted_ = false;
+ bool cached_odex_file_is_out_of_date_;
+
+ // Cached results for OdexFileIsUpToDate
+ bool odex_file_is_up_to_date_attempted_ = false;
+ bool cached_odex_file_is_up_to_date_;
+
+ // Cached value of the oat file name.
+ // This should be accessed only by the OatFileName() method.
+ bool cached_oat_file_name_attempted_ = false;
+ bool cached_oat_file_name_found_;
+ std::string cached_oat_file_name_;
+
+ // Cached value of the loaded odex file.
+ // Use the GetOatFile method rather than accessing this directly, unless you
+ // know the odex file isn't out of date.
+ bool oat_file_load_attempted_ = false;
+ std::unique_ptr<OatFile> cached_oat_file_;
+
+ // Cached results for OatFileIsOutOfDate
+ bool oat_file_is_out_of_date_attempted_ = false;
+ bool cached_oat_file_is_out_of_date_;
+
+ // Cached results for OatFileIsUpToDate
+ bool oat_file_is_up_to_date_attempted_ = false;
+ bool cached_oat_file_is_up_to_date_;
+
+ // Cached value of the image info.
+ // Use the GetImageInfo method rather than accessing these directly.
+ // TODO: The image info should probably be moved out of the oat file
+ // assistant to an image file manager.
+ bool image_info_load_attempted_ = false;
+ bool image_info_load_succeeded_ = false;
+ ImageInfo cached_image_info_;
+
+ // Cached value of the profile file.
+ // Use the GetProfile method rather than accessing these directly.
+ bool profile_load_attempted_ = false;
+ bool profile_load_succeeded_ = false;
+ ProfileFile cached_profile_;
+
+ // Cached value of the profile file.
+ // Use the GetOldProfile method rather than accessing these directly.
+ bool old_profile_load_attempted_ = false;
+ bool old_profile_load_succeeded_ = false;
+ ProfileFile cached_old_profile_;
+
+ // For debugging only.
+ // If this flag is set, the oat or odex file has been released to the user
+ // of the OatFileAssistant object and the OatFileAssistant object is in a
+ // bad state and should no longer be used.
+ bool oat_file_released_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(OatFileAssistant);
+};
+
+} // namespace art
+
+#endif // ART_RUNTIME_OAT_FILE_ASSISTANT_H_
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
new file mode 100644
index 0000000000..71679ae480
--- /dev/null
+++ b/runtime/oat_file_assistant_test.cc
@@ -0,0 +1,878 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "oat_file_assistant.h"
+
+#include <algorithm>
+#include <fstream>
+#include <string>
+#include <vector>
+#include <sys/param.h>
+
+#include <backtrace/BacktraceMap.h>
+#include <gtest/gtest.h>
+
+#include "class_linker.h"
+#include "common_runtime_test.h"
+#include "mem_map.h"
+#include "os.h"
+#include "thread-inl.h"
+#include "utils.h"
+
+namespace art {
+
+class OatFileAssistantTest : public CommonRuntimeTest {
+ public:
+ virtual void SetUp() {
+ ReserveImageSpace();
+ CommonRuntimeTest::SetUp();
+
+ // Create a scratch directory to work from.
+ scratch_dir_ = android_data_ + "/OatFileAssistantTest";
+ ASSERT_EQ(0, mkdir(scratch_dir_.c_str(), 0700));
+
+ // Create a subdirectory in scratch for the current isa.
+ // This is the location that will be used for odex files in the tests.
+ isa_dir_ = scratch_dir_ + "/" + GetInstructionSetString(kRuntimeISA);
+ ASSERT_EQ(0, mkdir(isa_dir_.c_str(), 0700));
+
+ // Verify the environment is as we expect
+ uint32_t checksum;
+ std::string error_msg;
+ ASSERT_TRUE(OS::FileExists(GetImageFile().c_str()))
+ << "Expected pre-compiled boot image to be at: " << GetImageFile();
+ ASSERT_TRUE(OS::FileExists(GetDexSrc1().c_str()))
+ << "Expected dex file to be at: " << GetDexSrc1();
+ ASSERT_TRUE(OS::FileExists(GetStrippedDexSrc1().c_str()))
+ << "Expected stripped dex file to be at: " << GetStrippedDexSrc1();
+ ASSERT_FALSE(DexFile::GetChecksum(GetStrippedDexSrc1().c_str(), &checksum, &error_msg))
+ << "Expected stripped dex file to be stripped: " << GetStrippedDexSrc1();
+ ASSERT_TRUE(OS::FileExists(GetMultiDexSrc1().c_str()))
+ << "Expected multidex file to be at: " << GetMultiDexSrc1();
+ ASSERT_TRUE(OS::FileExists(GetDexSrc2().c_str()))
+ << "Expected dex file to be at: " << GetDexSrc2();
+ }
+
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
+ // options->push_back(std::make_pair("-verbose:oat", nullptr));
+
+ // Set up the image location.
+ options->push_back(std::make_pair("-Ximage:" + GetImageLocation(),
+ nullptr));
+ // Make sure compilercallbacks are not set so that relocation will be
+ // enabled.
+ for (std::pair<std::string, const void*>& pair : *options) {
+ if (pair.first == "compilercallbacks") {
+ pair.second = nullptr;
+ }
+ }
+ }
+
+ virtual void PreRuntimeCreate() {
+ UnreserveImageSpace();
+ }
+
+ virtual void PostRuntimeCreate() {
+ ReserveImageSpace();
+ }
+
+ virtual void TearDown() {
+ ClearDirectory(isa_dir_.c_str());
+ ASSERT_EQ(0, rmdir(isa_dir_.c_str()));
+
+ ClearDirectory(scratch_dir_.c_str());
+ ASSERT_EQ(0, rmdir(scratch_dir_.c_str()));
+
+ CommonRuntimeTest::TearDown();
+ }
+
+ void Copy(std::string src, std::string dst) {
+ std::ifstream src_stream(src, std::ios::binary);
+ std::ofstream dst_stream(dst, std::ios::binary);
+
+ dst_stream << src_stream.rdbuf();
+ }
+
+ // Returns the directory where the pre-compiled core.art can be found.
+ // TODO: We should factor out this into common tests somewhere rather than
+ // re-hardcoding it here (This was copied originally from the elf writer
+ // test).
+ std::string GetImageDirectory() {
+ if (IsHost()) {
+ const char* host_dir = getenv("ANDROID_HOST_OUT");
+ CHECK(host_dir != NULL);
+ return std::string(host_dir) + "/framework";
+ } else {
+ return std::string("/data/art-test");
+ }
+ }
+
+ std::string GetImageLocation() {
+ return GetImageDirectory() + "/core.art";
+ }
+
+ std::string GetImageFile() {
+ return GetImageDirectory() + "/" + GetInstructionSetString(kRuntimeISA)
+ + "/core.art";
+ }
+
+ std::string GetDexSrc1() {
+ return GetTestDexFileName("Main");
+ }
+
+ // Returns the path to a dex file equivalent to GetDexSrc1, but with the dex
+ // file stripped.
+ std::string GetStrippedDexSrc1() {
+ return GetTestDexFileName("MainStripped");
+ }
+
+ std::string GetMultiDexSrc1() {
+ return GetTestDexFileName("MultiDex");
+ }
+
+ std::string GetDexSrc2() {
+ return GetTestDexFileName("Nested");
+ }
+
+ // Scratch directory, for dex and odex files (oat files will go in the
+ // dalvik cache).
+ std::string GetScratchDir() {
+ return scratch_dir_;
+ }
+
+ // ISA directory is the subdirectory in the scratch directory where odex
+ // files should be located.
+ std::string GetISADir() {
+ return isa_dir_;
+ }
+
+ // Generate an odex file for the purposes of test.
+ // If pic is true, generates a PIC odex.
+ void GenerateOdexForTest(const std::string& dex_location,
+ const std::string& odex_location,
+ bool pic = false) {
+ // For this operation, we temporarily redirect the dalvik cache so dex2oat
+ // doesn't find the relocated image file.
+ std::string android_data_tmp = GetScratchDir() + "AndroidDataTmp";
+ setenv("ANDROID_DATA", android_data_tmp.c_str(), 1);
+ std::vector<std::string> args;
+ args.push_back("--dex-file=" + dex_location);
+ args.push_back("--oat-file=" + odex_location);
+ if (pic) {
+ args.push_back("--compile-pic");
+ } else {
+ args.push_back("--include-patch-information");
+
+ // We need to use the quick compiler to generate non-PIC code, because
+ // the optimizing compiler always generates PIC.
+ args.push_back("--compiler-backend=Quick");
+ }
+ args.push_back("--runtime-arg");
+ args.push_back("-Xnorelocate");
+ std::string error_msg;
+ ASSERT_TRUE(OatFileAssistant::Dex2Oat(args, &error_msg)) << error_msg;
+ setenv("ANDROID_DATA", android_data_.c_str(), 1);
+ }
+
+ void GeneratePicOdexForTest(const std::string& dex_location,
+ const std::string& odex_location) {
+ GenerateOdexForTest(dex_location, odex_location, true);
+ }
+
+ private:
+ // Reserve memory around where the image will be loaded so other memory
+ // won't conflict when it comes time to load the image.
+ // This can be called with an already loaded image to reserve the space
+ // around it.
+ void ReserveImageSpace() {
+ MemMap::Init();
+
+ // Ensure a chunk of memory is reserved for the image space.
+ uintptr_t reservation_start = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MIN_DELTA;
+ uintptr_t reservation_end = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MAX_DELTA
+ + 100 * 1024 * 1024;
+
+ std::string error_msg;
+ std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
+ ASSERT_TRUE(map.get() != nullptr) << "Failed to build process map";
+ for (BacktraceMap::const_iterator it = map->begin();
+ reservation_start < reservation_end && it != map->end(); ++it) {
+ if (it->end <= reservation_start) {
+ continue;
+ }
+
+ if (it->start < reservation_start) {
+ reservation_start = std::min(reservation_end, it->end);
+ }
+
+ image_reservation_.push_back(std::unique_ptr<MemMap>(
+ MemMap::MapAnonymous("image reservation",
+ reinterpret_cast<uint8_t*>(reservation_start),
+ std::min(it->start, reservation_end) - reservation_start,
+ PROT_NONE, false, false, &error_msg)));
+ ASSERT_TRUE(image_reservation_.back().get() != nullptr) << error_msg;
+ LOG(INFO) << "Reserved space for image " <<
+ reinterpret_cast<void*>(image_reservation_.back()->Begin()) << "-" <<
+ reinterpret_cast<void*>(image_reservation_.back()->End());
+ reservation_start = it->end;
+ }
+ }
+
+
+ // Unreserve any memory reserved by ReserveImageSpace. This should be called
+ // before the image is loaded.
+ void UnreserveImageSpace() {
+ image_reservation_.clear();
+ }
+
+ std::string scratch_dir_;
+ std::string isa_dir_;
+ std::vector<std::unique_ptr<MemMap>> image_reservation_;
+};
+
+class OatFileAssistantNoDex2OatTest : public OatFileAssistantTest {
+ public:
+ virtual void SetUpRuntimeOptions(RuntimeOptions* options) {
+ OatFileAssistantTest::SetUpRuntimeOptions(options);
+ options->push_back(std::make_pair("-Xnodex2oat", nullptr));
+ }
+};
+
+// Generate an oat file for the purposes of test, as opposed to testing
+// generation of oat files.
+static void GenerateOatForTest(const char* dex_location) {
+ OatFileAssistant oat_file_assistant(dex_location, kRuntimeISA, false);
+
+ std::string error_msg;
+ ASSERT_TRUE(oat_file_assistant.GenerateOatFile(&error_msg)) << error_msg;
+}
+
+// Case: We have a DEX file, but no OAT file for it.
+// Expect: The oat file status is kOutOfDate.
+TEST_F(OatFileAssistantTest, DexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexNoOat.jar";
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.OdexFileStatus());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.OatFileStatus());
+}
+
+// Case: We have no DEX file and no OAT file.
+// Expect: Status is out of date. Loading should fail, but not crash.
+TEST_F(OatFileAssistantTest, NoDexNoOat) {
+ std::string dex_location = GetScratchDir() + "/NoDexNoOat.jar";
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ EXPECT_EQ(nullptr, oat_file.get());
+}
+
+// Case: We have a DEX file and up-to-date OAT file for it.
+// Expect: The oat file status is kUpToDate.
+TEST_F(OatFileAssistantTest, OatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/OatUpToDate.jar";
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.GetStatus());
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileNeedsRelocation());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsUpToDate());
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.OatFileStatus());
+}
+
+// Case: We have a MultiDEX file and up-to-date OAT file for it.
+// Expect: The oat file status is kUpToDate.
+TEST_F(OatFileAssistantTest, MultiDexOatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/MultiDexOatUpToDate.jar";
+ Copy(GetMultiDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ // Verify we can load both dex files.
+ OatFileAssistant executable_oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+ std::unique_ptr<OatFile> oat_file = executable_oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = executable_oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(2u, dex_files.size());
+}
+
+// Case: We have a DEX file and out of date OAT file.
+// Expect: The oat file status is kOutOfDate.
+TEST_F(OatFileAssistantTest, OatOutOfDate) {
+ std::string dex_location = GetScratchDir() + "/OatOutOfDate.jar";
+
+ // We create a dex, generate an oat for it, then overwrite the dex with a
+ // different dex to make the oat out of date.
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+ Copy(GetDexSrc2(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// Case: We have a DEX file and an ODEX file, but no OAT file.
+// Expect: The oat file status is kNeedsRelocation.
+TEST_F(OatFileAssistantTest, DexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/DexOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Verify the status.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// Case: We have a stripped DEX file and an ODEX file, but no OAT file.
+// Expect: The oat file status is kNeedsRelocation.
+TEST_F(OatFileAssistantTest, StrippedDexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/StrippedDexOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/StrippedDexOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Strip the dex file
+ Copy(GetStrippedDexSrc1(), dex_location);
+
+ // Verify the status.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Make the oat file up to date.
+ std::string error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Verify we can load the dex files from it.
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a stripped DEX file, an ODEX file, and an out of date OAT file.
+// Expect: The oat file status is kNeedsRelocation.
+TEST_F(OatFileAssistantTest, StrippedDexOdexOat) {
+ std::string dex_location = GetScratchDir() + "/StrippedDexOdexOat.jar";
+ std::string odex_location = GetISADir() + "/StrippedDexOdexOat.odex";
+
+ // Create the oat file from a different dex file so it looks out of date.
+ Copy(GetDexSrc2(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ // Create the odex file
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Strip the dex file.
+ Copy(GetStrippedDexSrc1(), dex_location);
+
+ // Verify the status.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_TRUE(oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Make the oat file up to date.
+ std::string error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_TRUE(oat_file_assistant.OdexFileNeedsRelocation());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileNeedsRelocation());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Verify we can load the dex files from it.
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a DEX file, an ODEX file and an OAT file, where the ODEX and
+// OAT files both have patch delta of 0.
+// Expect: It shouldn't crash.
+TEST_F(OatFileAssistantTest, OdexOatOverlap) {
+ std::string dex_location = GetScratchDir() + "/OdexOatOverlap.jar";
+ std::string odex_location = GetISADir() + "/OdexOatOverlap.odex";
+ std::string oat_location = GetISADir() + "/OdexOatOverlap.oat";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Create the oat file by copying the odex so they are located in the same
+ // place in memory.
+ Copy(odex_location, oat_location);
+
+ // Verify things don't go bad.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(),
+ oat_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_EQ(OatFileAssistant::kNeedsRelocation, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ // Things aren't relocated, so it should fall back to interpreted.
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_FALSE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a DEX file and a PIC ODEX file, but no OAT file.
+// Expect: The oat file status is kUpToDate, because PIC needs no relocation.
+TEST_F(OatFileAssistantTest, DexPicOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/DexPicOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/DexPicOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GeneratePicOdexForTest(dex_location, odex_location);
+
+ // Verify the status.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kUpToDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_TRUE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// Case: We have a DEX file and up-to-date OAT file for it.
+// Expect: We should load an executable dex file.
+TEST_F(OatFileAssistantTest, LoadOatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/LoadOatUpToDate.jar";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ // Load the oat using an oat file assistant.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a DEX file and up-to-date OAT file for it.
+// Expect: Loading non-executable should load the oat non-executable.
+TEST_F(OatFileAssistantTest, LoadNoExecOatUpToDate) {
+ std::string dex_location = GetScratchDir() + "/LoadNoExecOatUpToDate.jar";
+
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOatForTest(dex_location.c_str());
+
+ // Load the oat using an oat file assistant.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_FALSE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a DEX file.
+// Expect: We should load an executable dex file from an alternative oat
+// location.
+TEST_F(OatFileAssistantTest, LoadDexNoAlternateOat) {
+ std::string dex_location = GetScratchDir() + "/LoadDexNoAlternateOat.jar";
+ std::string oat_location = GetScratchDir() + "/LoadDexNoAlternateOat.oat";
+
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(
+ dex_location.c_str(), oat_location.c_str(), kRuntimeISA, true);
+ std::string error_msg;
+ ASSERT_TRUE(oat_file_assistant.MakeUpToDate(&error_msg)) << error_msg;
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_TRUE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+
+ EXPECT_TRUE(OS::FileExists(oat_location.c_str()));
+
+ // Verify it didn't create an oat in the default location.
+ OatFileAssistant ofm(dex_location.c_str(), kRuntimeISA, false);
+ EXPECT_FALSE(ofm.OatFileExists());
+}
+
+// Case: Non-existent Dex location.
+// Expect: The dex code is out of date, and trying to update it fails.
+TEST_F(OatFileAssistantTest, NonExsistentDexLocation) {
+ std::string dex_location = GetScratchDir() + "/BadDexLocation.jar";
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ std::string error_msg;
+ EXPECT_FALSE(oat_file_assistant.MakeUpToDate(&error_msg));
+ EXPECT_FALSE(error_msg.empty());
+}
+
+// Turn an absolute path into a path relative to the current working
+// directory.
+static std::string MakePathRelative(std::string target) {
+ char buf[MAXPATHLEN];
+ std::string cwd = getcwd(buf, MAXPATHLEN);
+
+ // Split the target and cwd paths into components.
+ std::vector<std::string> target_path;
+ std::vector<std::string> cwd_path;
+ Split(target, '/', &target_path);
+ Split(cwd, '/', &cwd_path);
+
+ // Reverse the path components, so we can use pop_back().
+ std::reverse(target_path.begin(), target_path.end());
+ std::reverse(cwd_path.begin(), cwd_path.end());
+
+ // Drop the common prefix of the paths. Because we reversed the path
+ // components, this becomes the common suffix of target_path and cwd_path.
+ while (!target_path.empty() && !cwd_path.empty()
+ && target_path.back() == cwd_path.back()) {
+ target_path.pop_back();
+ cwd_path.pop_back();
+ }
+
+ // For each element of the remaining cwd_path, add '..' to the beginning
+ // of the target path. Because we reversed the path components, we add to
+ // the end of target_path.
+ for (unsigned int i = 0; i < cwd_path.size(); i++) {
+ target_path.push_back("..");
+ }
+
+ // Reverse again to get the right path order, and join to get the result.
+ std::reverse(target_path.begin(), target_path.end());
+ return Join(target_path, '/');
+}
+
+// Case: Non-absolute path to Dex location.
+// Expect: Not sure, but it shouldn't crash.
+TEST_F(OatFileAssistantTest, NonAbsoluteDexLocation) {
+ std::string abs_dex_location = GetScratchDir() + "/NonAbsoluteDexLocation.jar";
+ Copy(GetDexSrc1(), abs_dex_location);
+
+ std::string dex_location = MakePathRelative(abs_dex_location);
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// Case: Very short, non-existent Dex location.
+// Expect: Dex code is out of date, and trying to update it fails.
+TEST_F(OatFileAssistantTest, ShortDexLocation) {
+ std::string dex_location = "/xx";
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+
+ std::string error_msg;
+ EXPECT_FALSE(oat_file_assistant.MakeUpToDate(&error_msg));
+ EXPECT_FALSE(error_msg.empty());
+}
+
+// Case: Non-standard extension for dex file.
+// Expect: The oat file status is kOutOfDate.
+TEST_F(OatFileAssistantTest, LongDexExtension) {
+ std::string dex_location = GetScratchDir() + "/LongDexExtension.jarx";
+ Copy(GetDexSrc1(), dex_location);
+
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, false);
+
+ EXPECT_EQ(OatFileAssistant::kOutOfDate, oat_file_assistant.GetStatus());
+
+ EXPECT_FALSE(oat_file_assistant.IsInBootClassPath());
+ EXPECT_FALSE(oat_file_assistant.OdexFileExists());
+ EXPECT_TRUE(oat_file_assistant.OdexFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OdexFileIsUpToDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileExists());
+ EXPECT_TRUE(oat_file_assistant.OatFileIsOutOfDate());
+ EXPECT_FALSE(oat_file_assistant.OatFileIsUpToDate());
+}
+
+// A task to generate a dex location. Used by the RaceToGenerate test.
+class RaceGenerateTask : public Task {
+ public:
+ explicit RaceGenerateTask(const std::string& dex_location, const std::string& oat_location)
+ : dex_location_(dex_location), oat_location_(oat_location),
+ loaded_oat_file_(nullptr)
+ {}
+
+ void Run(Thread* self) {
+ UNUSED(self);
+
+ // Load the dex files, and save a pointer to the loaded oat file, so that
+ // we can verify only one oat file was loaded for the dex location.
+ ClassLinker* linker = Runtime::Current()->GetClassLinker();
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ std::vector<std::string> error_msgs;
+ dex_files = linker->OpenDexFilesFromOat(dex_location_.c_str(), oat_location_.c_str(), &error_msgs);
+ CHECK(!dex_files.empty()) << Join(error_msgs, '\n');
+ loaded_oat_file_ = dex_files[0]->GetOatFile();
+ }
+
+ const OatFile* GetLoadedOatFile() const {
+ return loaded_oat_file_;
+ }
+
+ private:
+ std::string dex_location_;
+ std::string oat_location_;
+ const OatFile* loaded_oat_file_;
+};
+
+// Test the case where multiple processes race to generate an oat file.
+// This simulates multiple processes using multiple threads.
+//
+// We want only one Oat file to be loaded when there is a race to load, to
+// avoid using up the virtual memory address space.
+TEST_F(OatFileAssistantTest, RaceToGenerate) {
+ std::string dex_location = GetScratchDir() + "/RaceToGenerate.jar";
+ std::string oat_location = GetISADir() + "/RaceToGenerate.oat";
+
+ // We use the lib core dex file, because it's large, and hopefully should
+ // take a while to generate.
+ Copy(GetLibCoreDexFileName(), dex_location);
+
+ const int kNumThreads = 32;
+ Thread* self = Thread::Current();
+ ThreadPool thread_pool("Oat file assistant test thread pool", kNumThreads);
+ std::vector<std::unique_ptr<RaceGenerateTask>> tasks;
+ for (int i = 0; i < kNumThreads; i++) {
+ std::unique_ptr<RaceGenerateTask> task(new RaceGenerateTask(dex_location, oat_location));
+ thread_pool.AddTask(self, task.get());
+ tasks.push_back(std::move(task));
+ }
+ thread_pool.StartWorkers(self);
+ thread_pool.Wait(self, true, false);
+
+ // Verify every task got the same pointer.
+ const OatFile* expected = tasks[0]->GetLoadedOatFile();
+ for (auto& task : tasks) {
+ EXPECT_EQ(expected, task->GetLoadedOatFile());
+ }
+}
+
+// Case: We have a DEX file and an ODEX file, no OAT file, and dex2oat is
+// disabled.
+// Expect: We should load the odex file non-executable.
+TEST_F(OatFileAssistantNoDex2OatTest, LoadDexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/LoadDexOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/LoadDexOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Load the oat using an executable oat file assistant.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_FALSE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(1u, dex_files.size());
+}
+
+// Case: We have a MultiDEX file and an ODEX file, no OAT file, and dex2oat is
+// disabled.
+// Expect: We should load the odex file non-executable.
+TEST_F(OatFileAssistantNoDex2OatTest, LoadMultiDexOdexNoOat) {
+ std::string dex_location = GetScratchDir() + "/LoadMultiDexOdexNoOat.jar";
+ std::string odex_location = GetISADir() + "/LoadMultiDexOdexNoOat.odex";
+
+ // Create the dex and odex files
+ Copy(GetMultiDexSrc1(), dex_location);
+ GenerateOdexForTest(dex_location, odex_location);
+
+ // Load the oat using an executable oat file assistant.
+ OatFileAssistant oat_file_assistant(dex_location.c_str(), kRuntimeISA, true);
+
+ std::unique_ptr<OatFile> oat_file = oat_file_assistant.GetBestOatFile();
+ ASSERT_TRUE(oat_file.get() != nullptr);
+ EXPECT_FALSE(oat_file->IsExecutable());
+ std::vector<std::unique_ptr<const DexFile>> dex_files;
+ dex_files = oat_file_assistant.LoadDexFiles(*oat_file, dex_location.c_str());
+ EXPECT_EQ(2u, dex_files.size());
+}
+
+TEST(OatFileAssistantUtilsTest, DexFilenameToOdexFilename) {
+ std::string error_msg;
+ std::string odex_file;
+
+ EXPECT_TRUE(OatFileAssistant::DexFilenameToOdexFilename(
+ "/foo/bar/baz.jar", kArm, &odex_file, &error_msg)) << error_msg;
+ EXPECT_EQ("/foo/bar/arm/baz.odex", odex_file);
+
+ EXPECT_TRUE(OatFileAssistant::DexFilenameToOdexFilename(
+ "/foo/bar/baz.funnyext", kArm, &odex_file, &error_msg)) << error_msg;
+ EXPECT_EQ("/foo/bar/arm/baz.odex", odex_file);
+
+ EXPECT_FALSE(OatFileAssistant::DexFilenameToOdexFilename(
+ "nopath.jar", kArm, &odex_file, &error_msg));
+ EXPECT_FALSE(OatFileAssistant::DexFilenameToOdexFilename(
+ "/foo/bar/baz_noext", kArm, &odex_file, &error_msg));
+}
+
+
+// TODO: More Tests:
+// * Test class linker falls back to unquickened dex for DexNoOat
+// * Test class linker falls back to unquickened dex for MultiDexNoOat
+// * Test multidex files:
+// - Multidex with only classes2.dex out of date should have status
+// kOutOfDate
+// * Test using secondary isa
+// * Test with profiling info?
+// * Test for status of oat while oat is being generated (how?)
+// * Test case where 32 and 64 bit boot class paths differ,
+// and we ask IsInBootClassPath for a class in exactly one of the 32 or
+// 64 bit boot class paths.
+// * Test unexpected scenarios (?):
+// - Dex is stripped, don't have odex.
+// - Oat file corrupted after status check, before reload unexecutable
+// because it's unrelocated and no dex2oat
+
+} // namespace art
diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc
index 607569ae1a..a53aeaa3d7 100644
--- a/runtime/parsed_options.cc
+++ b/runtime/parsed_options.cc
@@ -136,6 +136,8 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize
.IntoKey(M::LongGCLogThreshold)
.Define("-XX:DumpGCPerformanceOnShutdown")
.IntoKey(M::DumpGCPerformanceOnShutdown)
+ .Define("-XX:DumpJITInfoOnShutdown")
+ .IntoKey(M::DumpJITInfoOnShutdown)
.Define("-XX:IgnoreMaxFootprint")
.IntoKey(M::IgnoreMaxFootprint)
.Define("-XX:LowMemoryMode")
@@ -620,6 +622,7 @@ void ParsedOptions::Usage(const char* fmt, ...) {
UsageMessage(stream, " -XX:LongPauseLogThreshold=integervalue\n");
UsageMessage(stream, " -XX:LongGCLogThreshold=integervalue\n");
UsageMessage(stream, " -XX:DumpGCPerformanceOnShutdown\n");
+ UsageMessage(stream, " -XX:DumpJITInfoOnShutdown\n");
UsageMessage(stream, " -XX:IgnoreMaxFootprint\n");
UsageMessage(stream, " -XX:UseTLAB\n");
UsageMessage(stream, " -XX:BackgroundGC=none\n");
diff --git a/runtime/primitive.h b/runtime/primitive.h
index 9dda144755..2d6b6b30c7 100644
--- a/runtime/primitive.h
+++ b/runtime/primitive.h
@@ -165,6 +165,10 @@ class Primitive {
}
}
+ static bool IsIntOrLongType(Type type) {
+ return type == kPrimInt || type == kPrimLong;
+ }
+
static bool Is64BitType(Type type) {
return type == kPrimLong || type == kPrimDouble;
}
diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc
index 1ddb761142..0eb8eca7d2 100644
--- a/runtime/quick_exception_handler.cc
+++ b/runtime/quick_exception_handler.cc
@@ -114,8 +114,7 @@ class CatchBlockStackVisitor FINAL : public StackVisitor {
DISALLOW_COPY_AND_ASSIGN(CatchBlockStackVisitor);
};
-void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
- mirror::Throwable* exception) {
+void QuickExceptionHandler::FindCatch(mirror::Throwable* exception) {
DCHECK(!is_deoptimization_);
if (kDebugExceptionDelivery) {
mirror::String* msg = exception->GetDetailMessage();
@@ -145,15 +144,14 @@ void QuickExceptionHandler::FindCatch(const ThrowLocation& throw_location,
DCHECK(!self_->IsExceptionPending());
} else {
// Put exception back in root set with clear throw location.
- self_->SetException(ThrowLocation(), exception_ref.Get());
+ self_->SetException(exception_ref.Get());
}
// The debugger may suspend this thread and walk its stack. Let's do this before popping
// instrumentation frames.
instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
if (instrumentation->HasExceptionCaughtListeners()
&& self_->IsExceptionThrownByCurrentMethod(exception)) {
- instrumentation->ExceptionCaughtEvent(self_, throw_location, handler_method_, handler_dex_pc_,
- exception_ref.Get());
+ instrumentation->ExceptionCaughtEvent(self_, exception_ref.Get());
}
}
@@ -283,7 +281,7 @@ void QuickExceptionHandler::DeoptimizeStack() {
visitor.WalkStack(true);
// Restore deoptimization exception
- self_->SetException(ThrowLocation(), Thread::GetDeoptimizationException());
+ self_->SetException(Thread::GetDeoptimizationException());
}
// Unwinds all instrumentation stack frame prior to catch handler or upcall.
diff --git a/runtime/quick_exception_handler.h b/runtime/quick_exception_handler.h
index a0e6a79299..8cccec87e4 100644
--- a/runtime/quick_exception_handler.h
+++ b/runtime/quick_exception_handler.h
@@ -30,7 +30,6 @@ class Throwable;
} // namespace mirror
class Context;
class Thread;
-class ThrowLocation;
class ShadowFrame;
// Manages exception delivery for Quick backend.
@@ -44,8 +43,7 @@ class QuickExceptionHandler {
UNREACHABLE();
}
- void FindCatch(const ThrowLocation& throw_location, mirror::Throwable* exception)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ void FindCatch(mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void DeoptimizeStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void UpdateInstrumentationStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
NO_RETURN void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/reflection-inl.h b/runtime/reflection-inl.h
index be4d5603f9..f21c1a0eb4 100644
--- a/runtime/reflection-inl.h
+++ b/runtime/reflection-inl.h
@@ -27,7 +27,7 @@
namespace art {
-inline bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
+inline bool ConvertPrimitiveValue(bool unbox_for_result,
Primitive::Type srcType, Primitive::Type dstType,
const JValue& src, JValue* dst) {
DCHECK(srcType != Primitive::kPrimNot && dstType != Primitive::kPrimNot);
@@ -88,13 +88,11 @@ inline bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbo
break;
}
if (!unbox_for_result) {
- ThrowIllegalArgumentException(throw_location,
- StringPrintf("Invalid primitive conversion from %s to %s",
+ ThrowIllegalArgumentException(StringPrintf("Invalid primitive conversion from %s to %s",
PrettyDescriptor(srcType).c_str(),
PrettyDescriptor(dstType).c_str()).c_str());
} else {
- ThrowClassCastException(throw_location,
- StringPrintf("Couldn't convert result of type %s to %s",
+ ThrowClassCastException(StringPrintf("Couldn't convert result of type %s to %s",
PrettyDescriptor(srcType).c_str(),
PrettyDescriptor(dstType).c_str()).c_str());
}
diff --git a/runtime/reflection.cc b/runtime/reflection.cc
index 2aeb92d49a..a54a39d5d6 100644
--- a/runtime/reflection.cc
+++ b/runtime/reflection.cc
@@ -207,7 +207,7 @@ class ArgArray {
static void ThrowIllegalPrimitiveArgumentException(const char* expected,
const char* found_descriptor)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ThrowIllegalArgumentException(nullptr,
+ ThrowIllegalArgumentException(
StringPrintf("Invalid primitive conversion from %s to %s", expected,
PrettyDescriptor(found_descriptor).c_str()).c_str());
}
@@ -227,7 +227,7 @@ class ArgArray {
mirror::Class* dst_class =
h_m->GetClassFromTypeIndex(classes->GetTypeItem(args_offset).type_idx_, true);
if (UNLIKELY(arg == nullptr || !arg->InstanceOf(dst_class))) {
- ThrowIllegalArgumentException(nullptr,
+ ThrowIllegalArgumentException(
StringPrintf("method %s argument %zd has type %s, got %s",
PrettyMethod(h_m.Get(), false).c_str(),
args_offset + 1, // Humans don't count from 0.
@@ -255,7 +255,7 @@ class ArgArray {
ThrowIllegalPrimitiveArgumentException(expected, \
arg->GetClass<>()->GetDescriptor(&temp)); \
} else { \
- ThrowIllegalArgumentException(nullptr, \
+ ThrowIllegalArgumentException(\
StringPrintf("method %s argument %zd has type %s, got %s", \
PrettyMethod(h_m.Get(), false).c_str(), \
args_offset + 1, \
@@ -366,7 +366,7 @@ static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t*
CHECK(self->IsExceptionPending());
LOG(ERROR) << "Internal error: unresolvable type for argument type in JNI invoke: "
<< h_m->GetTypeDescriptorFromTypeIdx(type_idx) << "\n"
- << self->GetException(nullptr)->Dump();
+ << self->GetException()->Dump();
self->ClearException();
++error_count;
} else if (!param_type->IsPrimitive()) {
@@ -580,8 +580,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
uint32_t classes_size = (classes == nullptr) ? 0 : classes->Size();
uint32_t arg_count = (objects != nullptr) ? objects->GetLength() : 0;
if (arg_count != classes_size) {
- ThrowIllegalArgumentException(nullptr,
- StringPrintf("Wrong number of arguments; expected %d, got %d",
+ ThrowIllegalArgumentException(StringPrintf("Wrong number of arguments; expected %d, got %d",
classes_size, arg_count).c_str());
return nullptr;
}
@@ -590,7 +589,7 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
mirror::Class* calling_class = nullptr;
if (!accessible && !VerifyAccess(soa.Self(), receiver, declaring_class, m->GetAccessFlags(),
&calling_class)) {
- ThrowIllegalAccessException(nullptr,
+ ThrowIllegalAccessException(
StringPrintf("Class %s cannot access %s method %s of class %s",
calling_class == nullptr ? "null" : PrettyClass(calling_class).c_str(),
PrettyJavaAccessFlags(m->GetAccessFlags()).c_str(),
@@ -631,13 +630,12 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM
bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) {
if (o == nullptr) {
- ThrowNullPointerException(nullptr, "null receiver");
+ ThrowNullPointerException("null receiver");
return false;
} else if (!o->InstanceOf(c)) {
std::string expected_class_name(PrettyDescriptor(c));
std::string actual_class_name(PrettyTypeOf(o));
- ThrowIllegalArgumentException(nullptr,
- StringPrintf("Expected receiver of type %s, but got %s",
+ ThrowIllegalArgumentException(StringPrintf("Expected receiver of type %s, but got %s",
expected_class_name.c_str(),
actual_class_name.c_str()).c_str());
return false;
@@ -718,7 +716,7 @@ static std::string UnboxingFailureKind(mirror::ArtField* f)
return "result";
}
-static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object* o,
+static bool UnboxPrimitive(mirror::Object* o,
mirror::Class* dst_class, mirror::ArtField* f,
JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -726,14 +724,12 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
if (!dst_class->IsPrimitive()) {
if (UNLIKELY(o != nullptr && !o->InstanceOf(dst_class))) {
if (!unbox_for_result) {
- ThrowIllegalArgumentException(throw_location,
- StringPrintf("%s has type %s, got %s",
+ ThrowIllegalArgumentException(StringPrintf("%s has type %s, got %s",
UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyTypeOf(o).c_str()).c_str());
} else {
- ThrowClassCastException(throw_location,
- StringPrintf("Couldn't convert result of type %s to %s",
+ ThrowClassCastException(StringPrintf("Couldn't convert result of type %s to %s",
PrettyTypeOf(o).c_str(),
PrettyDescriptor(dst_class).c_str()).c_str());
}
@@ -743,20 +739,17 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
return true;
}
if (UNLIKELY(dst_class->GetPrimitiveType() == Primitive::kPrimVoid)) {
- ThrowIllegalArgumentException(throw_location,
- StringPrintf("Can't unbox %s to void",
+ ThrowIllegalArgumentException(StringPrintf("Can't unbox %s to void",
UnboxingFailureKind(f).c_str()).c_str());
return false;
}
if (UNLIKELY(o == nullptr)) {
if (!unbox_for_result) {
- ThrowIllegalArgumentException(throw_location,
- StringPrintf("%s has type %s, got null",
+ ThrowIllegalArgumentException(StringPrintf("%s has type %s, got null",
UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str()).c_str());
} else {
- ThrowNullPointerException(throw_location,
- StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
+ ThrowNullPointerException(StringPrintf("Expected to unbox a '%s' primitive type but was returned null",
PrettyDescriptor(dst_class).c_str()).c_str());
}
return false;
@@ -793,14 +786,14 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
boxed_value.SetS(primitive_field->GetShort(o));
} else {
std::string temp;
- ThrowIllegalArgumentException(throw_location,
+ ThrowIllegalArgumentException(
StringPrintf("%s has type %s, got %s", UnboxingFailureKind(f).c_str(),
PrettyDescriptor(dst_class).c_str(),
PrettyDescriptor(o->GetClass()->GetDescriptor(&temp)).c_str()).c_str());
return false;
}
- return ConvertPrimitiveValue(throw_location, unbox_for_result,
+ return ConvertPrimitiveValue(unbox_for_result,
src_class->GetPrimitiveType(), dst_class->GetPrimitiveType(),
boxed_value, unboxed_value);
}
@@ -808,12 +801,12 @@ static bool UnboxPrimitive(const ThrowLocation* throw_location, mirror::Object*
bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
JValue* unboxed_value) {
DCHECK(f != nullptr);
- return UnboxPrimitive(nullptr, o, dst_class, f, unboxed_value);
+ return UnboxPrimitive(o, dst_class, f, unboxed_value);
}
-bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
+bool UnboxPrimitiveForResult(mirror::Object* o,
mirror::Class* dst_class, JValue* unboxed_value) {
- return UnboxPrimitive(&throw_location, o, dst_class, nullptr, unboxed_value);
+ return UnboxPrimitive(o, dst_class, nullptr, unboxed_value);
}
bool VerifyAccess(Thread* self, mirror::Object* obj, mirror::Class* declaring_class,
diff --git a/runtime/reflection.h b/runtime/reflection.h
index 1a64871e92..857d63b07c 100644
--- a/runtime/reflection.h
+++ b/runtime/reflection.h
@@ -31,18 +31,16 @@ namespace mirror {
union JValue;
class ScopedObjectAccessAlreadyRunnable;
class ShadowFrame;
-class ThrowLocation;
mirror::Object* BoxPrimitive(Primitive::Type src_class, const JValue& value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
bool UnboxPrimitiveForField(mirror::Object* o, mirror::Class* dst_class, mirror::ArtField* f,
JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-bool UnboxPrimitiveForResult(const ThrowLocation& throw_location, mirror::Object* o,
- mirror::Class* dst_class, JValue* unboxed_value)
+bool UnboxPrimitiveForResult(mirror::Object* o, mirror::Class* dst_class, JValue* unboxed_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-ALWAYS_INLINE bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
+ALWAYS_INLINE bool ConvertPrimitiveValue(bool unbox_for_result,
Primitive::Type src_class, Primitive::Type dst_class,
const JValue& src, JValue* dst)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 35a9e6f61f..0728646f92 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -251,6 +251,7 @@ Runtime::~Runtime() {
VLOG(jit) << "Deleting jit";
jit_.reset(nullptr);
}
+ arena_pool_.reset();
// Shutdown the fault manager if it was initialized.
fault_manager.Shutdown();
@@ -306,11 +307,8 @@ struct AbortState {
DCHECK(Locks::mutator_lock_->IsExclusiveHeld(self) || Locks::mutator_lock_->IsSharedHeld(self));
self->Dump(os);
if (self->IsExceptionPending()) {
- ThrowLocation throw_location;
- mirror::Throwable* exception = self->GetException(&throw_location);
- os << "Pending exception " << PrettyTypeOf(exception)
- << " thrown by '" << throw_location.Dump() << "'\n"
- << exception->Dump();
+ mirror::Throwable* exception = self->GetException();
+ os << "Pending exception " << exception->Dump();
}
}
@@ -857,6 +855,11 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
CreateJit();
}
+ // Use MemMap arena pool for jit, malloc otherwise. Malloc arenas are faster to allocate but
+ // can't be trimmed as easily.
+ const bool use_malloc = jit_options_.get() == nullptr;
+ arena_pool_.reset(new ArenaPool(use_malloc));
+
BlockSignals();
InitPlatformSignalHandlers();
@@ -1017,17 +1020,17 @@ bool Runtime::Init(const RuntimeOptions& raw_options, bool ignore_unrecognized)
}
// Pre-allocate an OutOfMemoryError for the double-OOME case.
- self->ThrowNewException(ThrowLocation(), "Ljava/lang/OutOfMemoryError;",
+ self->ThrowNewException("Ljava/lang/OutOfMemoryError;",
"OutOfMemoryError thrown while trying to throw OutOfMemoryError; "
"no stack trace available");
- pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
+ pre_allocated_OutOfMemoryError_ = GcRoot<mirror::Throwable>(self->GetException());
self->ClearException();
// Pre-allocate a NoClassDefFoundError for the common case of failing to find a system class
// ahead of checking the application's class loader.
- self->ThrowNewException(ThrowLocation(), "Ljava/lang/NoClassDefFoundError;",
+ self->ThrowNewException("Ljava/lang/NoClassDefFoundError;",
"Class not found using the boot class loader; no stack trace available");
- pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException(NULL));
+ pre_allocated_NoClassDefFoundError_ = GcRoot<mirror::Throwable>(self->GetException());
self->ClearException();
// Look for a native bridge.
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 5078b7f1d6..4cddb5c255 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -28,6 +28,7 @@
#include "arch/instruction_set.h"
#include "base/allocator.h"
+#include "base/arena_allocator.h"
#include "base/macros.h"
#include "compiler_callbacks.h"
#include "gc_root.h"
@@ -545,6 +546,13 @@ class Runtime {
void CreateJit();
+ ArenaPool* GetArenaPool() {
+ return arena_pool_.get();
+ }
+ const ArenaPool* GetArenaPool() const {
+ return arena_pool_.get();
+ }
+
private:
static void InitPlatformSignalHandlers();
@@ -608,6 +616,8 @@ class Runtime {
gc::Heap* heap_;
+ std::unique_ptr<ArenaPool> arena_pool_;
+
// The number of spins that are done before thread suspension is used to forcibly inflate.
size_t max_spins_before_thin_lock_inflation_;
MonitorList* monitor_list_;
diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def
index d072ffa56d..8775f8dbee 100644
--- a/runtime/runtime_options.def
+++ b/runtime/runtime_options.def
@@ -59,6 +59,7 @@ RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
RUNTIME_OPTIONS_KEY (MillisecondsToNanoseconds, \
LongGCLogThreshold, gc::Heap::kDefaultLongGCLogThreshold)
RUNTIME_OPTIONS_KEY (Unit, DumpGCPerformanceOnShutdown)
+RUNTIME_OPTIONS_KEY (Unit, DumpJITInfoOnShutdown)
RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint)
RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode)
RUNTIME_OPTIONS_KEY (bool, UseTLAB, false)
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 97a8d01e05..b8ca21e867 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -28,7 +28,6 @@
#include "runtime.h"
#include "thread.h"
#include "thread_list.h"
-#include "throw_location.h"
#include "verify_object-inl.h"
#include "vmap_table.h"
@@ -57,10 +56,6 @@ mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
}
}
-ThrowLocation ShadowFrame::GetCurrentLocationForThrow() const {
- return ThrowLocation(GetThisObject(), GetMethod(), GetDexPC());
-}
-
size_t ManagedStack::NumJniShadowFrameReferences() const {
size_t count = 0;
for (const ManagedStack* current_fragment = this; current_fragment != NULL;
@@ -134,12 +129,6 @@ mirror::Object* StackVisitor::GetThisObject() const {
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
- } else if (m->IsOptimized(GetInstructionSetPointerSize(
- Runtime::Current()->GetInstructionSet()))) {
- // TODO: Implement, currently only used for exceptions when jdwp is enabled.
- UNIMPLEMENTED(WARNING)
- << "StackVisitor::GetThisObject is unimplemented with the optimizing compiler";
- return nullptr;
} else {
const DexFile::CodeItem* code_item = m->GetCodeItem();
if (code_item == nullptr) {
@@ -209,29 +198,32 @@ bool StackVisitor::GetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
// its instructions?
DCHECK_LT(vreg, code_item->registers_size_);
- DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map,
- code_item->registers_size_);
- DexRegisterMap::LocationKind location_kind = dex_register_map.GetLocationKind(vreg);
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, code_item->registers_size_);
+ DexRegisterLocation::Kind location_kind = dex_register_map.GetLocationKind(vreg);
switch (location_kind) {
- case DexRegisterMap::kInStack: {
+ case DexRegisterLocation::Kind::kInStack: {
const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg);
const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
*val = *reinterpret_cast<const uint32_t*>(addr);
return true;
}
- case DexRegisterMap::kInRegister:
- case DexRegisterMap::kInFpuRegister: {
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInFpuRegister: {
uint32_t reg = dex_register_map.GetMachineRegister(vreg);
return GetRegisterIfAccessible(reg, kind, val);
}
- case DexRegisterMap::kConstant:
+ case DexRegisterLocation::Kind::kConstant:
*val = dex_register_map.GetConstant(vreg);
return true;
- case DexRegisterMap::kNone:
+ case DexRegisterLocation::Kind::kNone:
return false;
+ default:
+ LOG(FATAL)
+ << "Unexpected location kind"
+ << DexRegisterLocation::PrettyDescriptor(dex_register_map.GetLocationInternalKind(vreg));
+ UNREACHABLE();
}
- UNREACHABLE();
- return false;
}
bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
@@ -391,29 +383,29 @@ bool StackVisitor::SetVRegFromOptimizedCode(mirror::ArtMethod* m, uint16_t vreg,
DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be NULL or how would we compile
// its instructions?
DCHECK_LT(vreg, code_item->registers_size_);
- DexRegisterMap dex_register_map = code_info.GetDexRegisterMapOf(stack_map,
- code_item->registers_size_);
- DexRegisterMap::LocationKind location_kind = dex_register_map.GetLocationKind(vreg);
+ DexRegisterMap dex_register_map =
+ code_info.GetDexRegisterMapOf(stack_map, code_item->registers_size_);
+ DexRegisterLocation::Kind location_kind = dex_register_map.GetLocationKind(vreg);
uint32_t dex_pc = m->ToDexPc(cur_quick_frame_pc_, false);
switch (location_kind) {
- case DexRegisterMap::kInStack: {
+ case DexRegisterLocation::Kind::kInStack: {
const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg);
uint8_t* addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + offset;
*reinterpret_cast<uint32_t*>(addr) = new_value;
return true;
}
- case DexRegisterMap::kInRegister:
- case DexRegisterMap::kInFpuRegister: {
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInFpuRegister: {
uint32_t reg = dex_register_map.GetMachineRegister(vreg);
return SetRegisterIfAccessible(reg, new_value, kind);
}
- case DexRegisterMap::kConstant:
+ case DexRegisterLocation::Kind::kConstant:
LOG(ERROR) << StringPrintf("Cannot change value of DEX register v%u used as a constant at "
"DEX pc 0x%x (native pc 0x%x) of method %s",
vreg, dex_pc, native_pc_offset,
PrettyMethod(cur_quick_frame_->AsMirrorPtr()).c_str());
return false;
- case DexRegisterMap::kNone:
+ case DexRegisterLocation::Kind::kNone:
LOG(ERROR) << StringPrintf("No location for DEX register v%u at DEX pc 0x%x "
"(native pc 0x%x) of method %s",
vreg, dex_pc, native_pc_offset,
diff --git a/runtime/stack.h b/runtime/stack.h
index b495f0343b..13bd47fa9d 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -25,7 +25,6 @@
#include "gc_root.h"
#include "mirror/object_reference.h"
#include "read_barrier.h"
-#include "throw_location.h"
#include "utils.h"
#include "verify_object.h"
@@ -40,6 +39,7 @@ class Context;
class ShadowFrame;
class HandleScope;
class ScopedObjectAccess;
+class StackVisitor;
class Thread;
// The kind of vreg being accessed in calls to Set/GetVReg.
@@ -258,8 +258,6 @@ class ShadowFrame {
mirror::Object* GetThisObject(uint16_t num_ins) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ThrowLocation GetCurrentLocationForThrow() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
bool Contains(StackReference<mirror::Object>* shadow_frame_entry_obj) const {
if (HasReferenceArray()) {
return ((&References()[0] <= shadow_frame_entry_obj) &&
diff --git a/runtime/stack_map.h b/runtime/stack_map.h
index 6d996722b4..e88820fc79 100644
--- a/runtime/stack_map.h
+++ b/runtime/stack_map.h
@@ -23,6 +23,14 @@
namespace art {
+// Size of a frame slot, in bytes. This constant is a signed value,
+// to please the compiler in arithmetic operations involving int32_t
+// (signed) values.
+static ssize_t constexpr kFrameSlotSize = 4;
+
+// Word alignment required on ARM, in bytes.
+static constexpr size_t kWordAlignment = 4;
+
/**
* Classes in the following file are wrapper on stack map information backed
* by a MemoryRegion. As such they read and write to the region, they don't have
@@ -58,6 +66,8 @@ class InlineInfo {
}
private:
+ // TODO: Instead of plain types such as "uint8_t", introduce
+ // typedefs (and document the memory layout of InlineInfo).
static constexpr int kDepthOffset = 0;
static constexpr int kFixedSize = kDepthOffset + sizeof(uint8_t);
@@ -68,82 +78,327 @@ class InlineInfo {
friend class StackMapStream;
};
+// Dex register location container used by DexRegisterMap and StackMapStream.
+class DexRegisterLocation {
+ public:
+ /*
+ * The location kind used to populate the Dex register information in a
+ * StackMapStream can either be:
+ * - kNone: the register has no location yet, meaning it has not been set;
+ * - kConstant: value holds the constant;
+ * - kStack: value holds the stack offset;
+ * - kRegister: value holds the physical register number;
+ * - kFpuRegister: value holds the physical register number.
+ *
+ * In addition, DexRegisterMap also uses these values:
+ * - kInStackLargeOffset: value holds a "large" stack offset (greater than
+ * 128 bytes);
+ * - kConstantLargeValue: value holds a "large" constant (lower than or
+ * equal to -16, or greater than 16).
+ */
+ enum class Kind : uint8_t {
+ // Short location kinds, for entries fitting on one byte (3 bits
+ // for the kind, 5 bits for the value) in a DexRegisterMap.
+ kNone = 0, // 0b000
+ kInStack = 1, // 0b001
+ kInRegister = 2, // 0b010
+ kInFpuRegister = 3, // 0b011
+ kConstant = 4, // 0b100
+
+ // Large location kinds, requiring a 5-byte encoding (1 byte for the
+ // kind, 4 bytes for the value).
+
+ // Stack location at a large offset, meaning that the offset value
+ // divided by the stack frame slot size (4 bytes) cannot fit on a
+ // 5-bit unsigned integer (i.e., this offset value is greater than
+ // or equal to 2^5 * 4 = 128 bytes).
+ kInStackLargeOffset = 5, // 0b101
+
+ // Large constant, that cannot fit on a 5-bit signed integer (i.e.,
+ // lower than -2^(5-1) = -16, or greater than or equal to
+ // 2^(5-1) - 1 = 15).
+ kConstantLargeValue = 6, // 0b110
+
+ kLastLocationKind = kConstantLargeValue
+ };
+
+ static_assert(
+ sizeof(Kind) == 1u,
+ "art::DexRegisterLocation::Kind has a size different from one byte.");
+
+ static const char* PrettyDescriptor(Kind kind) {
+ switch (kind) {
+ case Kind::kNone:
+ return "none";
+ case Kind::kInStack:
+ return "in stack";
+ case Kind::kInRegister:
+ return "in register";
+ case Kind::kInFpuRegister:
+ return "in fpu register";
+ case Kind::kConstant:
+ return "as constant";
+ case Kind::kInStackLargeOffset:
+ return "in stack (large offset)";
+ case Kind::kConstantLargeValue:
+ return "as constant (large value)";
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ static bool IsShortLocationKind(Kind kind) {
+ switch (kind) {
+ case Kind::kNone:
+ case Kind::kInStack:
+ case Kind::kInRegister:
+ case Kind::kInFpuRegister:
+ case Kind::kConstant:
+ return true;
+
+ case Kind::kInStackLargeOffset:
+ case Kind::kConstantLargeValue:
+ return false;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ // Convert `kind` to a "surface" kind, i.e. one that doesn't include
+ // any value with a "large" qualifier.
+ // TODO: Introduce another enum type for the surface kind?
+ static Kind ConvertToSurfaceKind(Kind kind) {
+ switch (kind) {
+ case Kind::kNone:
+ case Kind::kInStack:
+ case Kind::kInRegister:
+ case Kind::kInFpuRegister:
+ case Kind::kConstant:
+ return kind;
+
+ case Kind::kInStackLargeOffset:
+ return Kind::kInStack;
+
+ case Kind::kConstantLargeValue:
+ return Kind::kConstant;
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ DexRegisterLocation(Kind kind, int32_t value)
+ : kind_(kind), value_(value) {}
+
+ // Get the "surface" kind of the location, i.e., the one that doesn't
+ // include any value with a "large" qualifier.
+ Kind GetKind() const {
+ return ConvertToSurfaceKind(kind_);
+ }
+
+ // Get the value of the location.
+ int32_t GetValue() const { return value_; }
+
+ // Get the actual kind of the location.
+ Kind GetInternalKind() const { return kind_; }
+
+ private:
+ Kind kind_;
+ int32_t value_;
+};
+
/**
* Information on dex register values for a specific PC. The information is
* of the form:
* [location_kind, register_value]+.
- *
- * The location_kind for a Dex register can either be:
- * - kConstant: register_value holds the constant,
- * - kStack: register_value holds the stack offset,
- * - kRegister: register_value holds the physical register number.
- * - kFpuRegister: register_value holds the physical register number.
- * - kNone: the register has no location yet, meaning it has not been set.
+ * either on 1 or 5 bytes (see art::DexRegisterLocation::Kind).
*/
class DexRegisterMap {
public:
explicit DexRegisterMap(MemoryRegion region) : region_(region) {}
- enum LocationKind {
- kNone,
- kInStack,
- kInRegister,
- kInFpuRegister,
- kConstant
- };
+ // Short (compressed) location, fitting on one byte.
+ typedef uint8_t ShortLocation;
+
+ void SetRegisterInfo(size_t offset, const DexRegisterLocation& dex_register_location) {
+ DexRegisterLocation::Kind kind = ComputeCompressedKind(dex_register_location);
+ int32_t value = dex_register_location.GetValue();
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Compress the kind and the value as a single byte.
+ if (kind == DexRegisterLocation::Kind::kInStack) {
+ // Instead of storing stack offsets expressed in bytes for
+ // short stack locations, store slot offsets. A stack offset
+ // is a multiple of 4 (kFrameSlotSize). This means that by
+ // dividing it by 4, we can fit values from the [0, 128)
+ // interval in a short stack location, and not just values
+ // from the [0, 32) interval.
+ DCHECK_EQ(value % kFrameSlotSize, 0);
+ value /= kFrameSlotSize;
+ }
+ DCHECK(IsUint<kValueBits>(value)) << value;
+ region_.StoreUnaligned<ShortLocation>(offset, MakeShortLocation(kind, value));
+ } else {
+ // Large location. Write the location on one byte and the value
+ // on 4 bytes.
+ DCHECK(!IsUint<kValueBits>(value)) << value;
+ if (kind == DexRegisterLocation::Kind::kInStackLargeOffset) {
+ // Also divide large stack offsets by 4 for the sake of consistency.
+ DCHECK_EQ(value % kFrameSlotSize, 0);
+ value /= kFrameSlotSize;
+ }
+ // Data can be unaligned as the written Dex register locations can
+ // either be 1-byte or 5-byte wide. Use
+ // art::MemoryRegion::StoreUnaligned instead of
+ // art::MemoryRegion::Store to prevent unligned word accesses on ARM.
+ region_.StoreUnaligned<DexRegisterLocation::Kind>(offset, kind);
+ region_.StoreUnaligned<int32_t>(offset + sizeof(DexRegisterLocation::Kind), value);
+ }
+ }
- static const char* PrettyDescriptor(LocationKind kind) {
- switch (kind) {
- case kNone:
- return "none";
- case kInStack:
- return "in stack";
- case kInRegister:
- return "in register";
- case kInFpuRegister:
- return "in fpu register";
- case kConstant:
- return "as constant";
+ // Find the offset of the Dex register location number `dex_register_index`.
+ size_t FindLocationOffset(uint16_t dex_register_index) const {
+ size_t offset = kFixedSize;
+ // Skip the first `dex_register_index - 1` entries.
+ for (uint16_t i = 0; i < dex_register_index; ++i) {
+ // Read the first next byte and inspect its first 3 bits to decide
+ // whether it is a short or a large location.
+ DexRegisterLocation::Kind kind = ExtractKindAtOffset(offset);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Skip the current byte.
+ offset += SingleShortEntrySize();
+ } else {
+ // Large location. Skip the 5 next bytes.
+ offset += SingleLargeEntrySize();
+ }
}
- UNREACHABLE();
- return nullptr;
+ return offset;
}
- LocationKind GetLocationKind(uint16_t register_index) const {
- return region_.Load<LocationKind>(
- kFixedSize + register_index * SingleEntrySize());
+ // Get the surface kind.
+ DexRegisterLocation::Kind GetLocationKind(uint16_t dex_register_index) const {
+ return DexRegisterLocation::ConvertToSurfaceKind(GetLocationInternalKind(dex_register_index));
}
- void SetRegisterInfo(uint16_t register_index, LocationKind kind, int32_t value) {
- size_t entry = kFixedSize + register_index * SingleEntrySize();
- region_.Store<LocationKind>(entry, kind);
- region_.Store<int32_t>(entry + sizeof(LocationKind), value);
+ // Get the internal kind.
+ DexRegisterLocation::Kind GetLocationInternalKind(uint16_t dex_register_index) const {
+ size_t offset = FindLocationOffset(dex_register_index);
+ return ExtractKindAtOffset(offset);
}
- int32_t GetValue(uint16_t register_index) const {
- return region_.Load<int32_t>(
- kFixedSize + sizeof(LocationKind) + register_index * SingleEntrySize());
+ // TODO: Rename as GetDexRegisterLocation?
+ DexRegisterLocation GetLocationKindAndValue(uint16_t dex_register_index) const {
+ size_t offset = FindLocationOffset(dex_register_index);
+ // Read the first byte and inspect its first 3 bits to get the location.
+ ShortLocation first_byte = region_.LoadUnaligned<ShortLocation>(offset);
+ DexRegisterLocation::Kind kind = ExtractKindFromShortLocation(first_byte);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Extract the value from the remaining 5 bits.
+ int32_t value = ExtractValueFromShortLocation(first_byte);
+ if (kind == DexRegisterLocation::Kind::kInStack) {
+ // Convert the stack slot (short) offset to a byte offset value.
+ value *= kFrameSlotSize;
+ }
+ return DexRegisterLocation(kind, value);
+ } else {
+ // Large location. Read the four next bytes to get the value.
+ int32_t value = region_.LoadUnaligned<int32_t>(offset + sizeof(DexRegisterLocation::Kind));
+ if (kind == DexRegisterLocation::Kind::kInStackLargeOffset) {
+ // Convert the stack slot (large) offset to a byte offset value.
+ value *= kFrameSlotSize;
+ }
+ return DexRegisterLocation(kind, value);
+ }
}
- int32_t GetStackOffsetInBytes(uint16_t register_index) const {
- DCHECK(GetLocationKind(register_index) == kInStack);
- // We currently encode the offset in bytes.
- return GetValue(register_index);
+ int32_t GetStackOffsetInBytes(uint16_t dex_register_index) const {
+ DexRegisterLocation location = GetLocationKindAndValue(dex_register_index);
+ DCHECK(location.GetKind() == DexRegisterLocation::Kind::kInStack);
+ // GetLocationKindAndValue returns the offset in bytes.
+ return location.GetValue();
}
- int32_t GetConstant(uint16_t register_index) const {
- DCHECK(GetLocationKind(register_index) == kConstant);
- return GetValue(register_index);
+ int32_t GetConstant(uint16_t dex_register_index) const {
+ DexRegisterLocation location = GetLocationKindAndValue(dex_register_index);
+ DCHECK(location.GetKind() == DexRegisterLocation::Kind::kConstant);
+ return location.GetValue();
}
- int32_t GetMachineRegister(uint16_t register_index) const {
- DCHECK(GetLocationKind(register_index) == kInRegister
- || GetLocationKind(register_index) == kInFpuRegister);
- return GetValue(register_index);
+ int32_t GetMachineRegister(uint16_t dex_register_index) const {
+ DexRegisterLocation location = GetLocationKindAndValue(dex_register_index);
+ DCHECK(location.GetInternalKind() == DexRegisterLocation::Kind::kInRegister
+ || location.GetInternalKind() == DexRegisterLocation::Kind::kInFpuRegister)
+ << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
+ return location.GetValue();
}
- static size_t SingleEntrySize() {
- return sizeof(LocationKind) + sizeof(int32_t);
+ // Compute the compressed kind of `location`.
+ static DexRegisterLocation::Kind ComputeCompressedKind(const DexRegisterLocation& location) {
+ switch (location.GetInternalKind()) {
+ case DexRegisterLocation::Kind::kNone:
+ DCHECK_EQ(location.GetValue(), 0);
+ return DexRegisterLocation::Kind::kNone;
+
+ case DexRegisterLocation::Kind::kInRegister:
+ DCHECK_GE(location.GetValue(), 0);
+ DCHECK_LT(location.GetValue(), 1 << DexRegisterMap::kValueBits);
+ return DexRegisterLocation::Kind::kInRegister;
+
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ DCHECK_GE(location.GetValue(), 0);
+ DCHECK_LT(location.GetValue(), 1 << DexRegisterMap::kValueBits);
+ return DexRegisterLocation::Kind::kInFpuRegister;
+
+ case DexRegisterLocation::Kind::kInStack:
+ DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
+ return IsUint<DexRegisterMap::kValueBits>(location.GetValue() / kFrameSlotSize)
+ ? DexRegisterLocation::Kind::kInStack
+ : DexRegisterLocation::Kind::kInStackLargeOffset;
+
+ case DexRegisterLocation::Kind::kConstant:
+ return IsUint<DexRegisterMap::kValueBits>(location.GetValue())
+ ? DexRegisterLocation::Kind::kConstant
+ : DexRegisterLocation::Kind::kConstantLargeValue;
+
+ default:
+ LOG(FATAL) << "Unexpected location kind"
+ << DexRegisterLocation::PrettyDescriptor(location.GetInternalKind());
+ UNREACHABLE();
+ }
+ }
+
+ // Can `location` be turned into a short location?
+ static bool CanBeEncodedAsShortLocation(const DexRegisterLocation& location) {
+ switch (location.GetInternalKind()) {
+ case DexRegisterLocation::Kind::kNone:
+ case DexRegisterLocation::Kind::kInRegister:
+ case DexRegisterLocation::Kind::kInFpuRegister:
+ return true;
+
+ case DexRegisterLocation::Kind::kInStack:
+ DCHECK_EQ(location.GetValue() % kFrameSlotSize, 0);
+ return IsUint<kValueBits>(location.GetValue() / kFrameSlotSize);
+
+ case DexRegisterLocation::Kind::kConstant:
+ return IsUint<kValueBits>(location.GetValue());
+
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ static size_t EntrySize(const DexRegisterLocation& location) {
+ return CanBeEncodedAsShortLocation(location)
+ ? DexRegisterMap::SingleShortEntrySize()
+ : DexRegisterMap::SingleLargeEntrySize();
+ }
+
+ static size_t SingleShortEntrySize() {
+ return sizeof(ShortLocation);
+ }
+
+ static size_t SingleLargeEntrySize() {
+ return sizeof(DexRegisterLocation::Kind) + sizeof(int32_t);
}
size_t Size() const {
@@ -153,7 +408,43 @@ class DexRegisterMap {
static constexpr int kFixedSize = 0;
private:
+ // Width of the kind "field" in a short location, in bits.
+ static constexpr size_t kKindBits = 3;
+ // Width of the value "field" in a short location, in bits.
+ static constexpr size_t kValueBits = 5;
+
+ static constexpr uint8_t kKindMask = (1 << kKindBits) - 1;
+ static constexpr int32_t kValueMask = (1 << kValueBits) - 1;
+ static constexpr size_t kKindOffset = 0;
+ static constexpr size_t kValueOffset = kKindBits;
+
+ static ShortLocation MakeShortLocation(DexRegisterLocation::Kind kind, int32_t value) {
+ DCHECK(IsUint<kKindBits>(static_cast<uint8_t>(kind))) << static_cast<uint8_t>(kind);
+ DCHECK(IsUint<kValueBits>(value)) << value;
+ return (static_cast<uint8_t>(kind) & kKindMask) << kKindOffset
+ | (value & kValueMask) << kValueOffset;
+ }
+
+ static DexRegisterLocation::Kind ExtractKindFromShortLocation(ShortLocation location) {
+ uint8_t kind = (location >> kKindOffset) & kKindMask;
+ DCHECK_LE(kind, static_cast<uint8_t>(DexRegisterLocation::Kind::kLastLocationKind));
+ return static_cast<DexRegisterLocation::Kind>(kind);
+ }
+
+ static int32_t ExtractValueFromShortLocation(ShortLocation location) {
+ return (location >> kValueOffset) & kValueMask;
+ }
+
+ // Extract a location kind from the byte at position `offset`.
+ DexRegisterLocation::Kind ExtractKindAtOffset(size_t offset) const {
+ ShortLocation first_byte = region_.LoadUnaligned<ShortLocation>(offset);
+ return ExtractKindFromShortLocation(first_byte);
+ }
+
MemoryRegion region_;
+
+ friend class CodeInfo;
+ friend class StackMapStream;
};
/**
@@ -187,7 +478,7 @@ class StackMap {
}
void SetNativePcOffset(uint32_t native_pc_offset) {
- return region_.Store<uint32_t>(kNativePcOffsetOffset, native_pc_offset);
+ region_.Store<uint32_t>(kNativePcOffsetOffset, native_pc_offset);
}
uint32_t GetDexRegisterMapOffset() const {
@@ -195,7 +486,7 @@ class StackMap {
}
void SetDexRegisterMapOffset(uint32_t offset) {
- return region_.Store<uint32_t>(kDexRegisterMapOffsetOffset, offset);
+ region_.Store<uint32_t>(kDexRegisterMapOffsetOffset, offset);
}
uint32_t GetInlineDescriptorOffset() const {
@@ -203,7 +494,7 @@ class StackMap {
}
void SetInlineDescriptorOffset(uint32_t offset) {
- return region_.Store<uint32_t>(kInlineDescriptorOffsetOffset, offset);
+ region_.Store<uint32_t>(kInlineDescriptorOffsetOffset, offset);
}
uint32_t GetRegisterMask() const {
@@ -240,7 +531,7 @@ class StackMap {
static size_t ComputeAlignedStackMapSize(size_t stack_mask_size) {
// On ARM, the stack maps must be 4-byte aligned.
- return RoundUp(StackMap::kFixedSize + stack_mask_size, 4);
+ return RoundUp(StackMap::kFixedSize + stack_mask_size, kWordAlignment);
}
// Special (invalid) offset for the DexRegisterMapOffset field meaning
@@ -252,6 +543,8 @@ class StackMap {
static constexpr uint32_t kNoInlineInfo = -1;
private:
+ // TODO: Instead of plain types such as "uint32_t", introduce
+ // typedefs (and document the memory layout of StackMap).
static constexpr int kDexPcOffset = 0;
static constexpr int kNativePcOffsetOffset = kDexPcOffset + sizeof(uint32_t);
static constexpr int kDexRegisterMapOffsetOffset = kNativePcOffsetOffset + sizeof(uint32_t);
@@ -317,11 +610,15 @@ class CodeInfo {
return StackMap::ComputeAlignedStackMapSize(GetStackMaskSize());
}
+ uint32_t GetStackMapsOffset() const {
+ return kFixedSize;
+ }
+
DexRegisterMap GetDexRegisterMapOf(StackMap stack_map, uint32_t number_of_dex_registers) const {
DCHECK(stack_map.HasDexRegisterMap());
uint32_t offset = stack_map.GetDexRegisterMapOffset();
- return DexRegisterMap(region_.Subregion(offset,
- DexRegisterMap::kFixedSize + number_of_dex_registers * DexRegisterMap::SingleEntrySize()));
+ size_t size = ComputeDexRegisterMapSize(offset, number_of_dex_registers);
+ return DexRegisterMap(region_.Subregion(offset, size));
}
InlineInfo GetInlineInfoOf(StackMap stack_map) const {
@@ -356,6 +653,8 @@ class CodeInfo {
}
private:
+ // TODO: Instead of plain types such as "uint32_t", introduce
+ // typedefs (and document the memory layout of CodeInfo).
static constexpr int kOverallSizeOffset = 0;
static constexpr int kNumberOfStackMapsOffset = kOverallSizeOffset + sizeof(uint32_t);
static constexpr int kStackMaskSizeOffset = kNumberOfStackMapsOffset + sizeof(uint32_t);
@@ -367,6 +666,33 @@ class CodeInfo {
: region_.Subregion(kFixedSize, StackMapSize() * GetNumberOfStackMaps());
}
+ // Compute the size of a Dex register map starting at offset `origin` in
+ // `region_` and containing `number_of_dex_registers` locations.
+ size_t ComputeDexRegisterMapSize(uint32_t origin, uint32_t number_of_dex_registers) const {
+ // TODO: Ideally, we would like to use art::DexRegisterMap::Size or
+ // art::DexRegisterMap::FindLocationOffset, but the DexRegisterMap is not
+ // yet built. Try to factor common code.
+ size_t offset = origin + DexRegisterMap::kFixedSize;
+ // Skip the first `number_of_dex_registers - 1` entries.
+ for (uint16_t i = 0; i < number_of_dex_registers; ++i) {
+ // Read the first next byte and inspect its first 3 bits to decide
+ // whether it is a short or a large location.
+ DexRegisterMap::ShortLocation first_byte =
+ region_.LoadUnaligned<DexRegisterMap::ShortLocation>(offset);
+ DexRegisterLocation::Kind kind =
+ DexRegisterMap::ExtractKindFromShortLocation(first_byte);
+ if (DexRegisterLocation::IsShortLocationKind(kind)) {
+ // Short location. Skip the current byte.
+ offset += DexRegisterMap::SingleShortEntrySize();
+ } else {
+ // Large location. Skip the 5 next bytes.
+ offset += DexRegisterMap::SingleLargeEntrySize();
+ }
+ }
+ size_t size = offset - origin;
+ return size;
+ }
+
MemoryRegion region_;
friend class StackMapStream;
};
diff --git a/runtime/thread.cc b/runtime/thread.cc
index fdb1f9dd4c..e8e93555ac 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -1033,18 +1033,11 @@ void Thread::DumpJavaStack(std::ostream& os) const {
// assumption that there is no exception pending on entry. Thus, stash any pending exception.
// Thread::Current() instead of this in case a thread is dumping the stack of another suspended
// thread.
- StackHandleScope<3> scope(Thread::Current());
+ StackHandleScope<1> scope(Thread::Current());
Handle<mirror::Throwable> exc;
- Handle<mirror::Object> throw_location_this_object;
- Handle<mirror::ArtMethod> throw_location_method;
- uint32_t throw_location_dex_pc;
bool have_exception = false;
if (IsExceptionPending()) {
- ThrowLocation exc_location;
- exc = scope.NewHandle(GetException(&exc_location));
- throw_location_this_object = scope.NewHandle(exc_location.GetThis());
- throw_location_method = scope.NewHandle(exc_location.GetMethod());
- throw_location_dex_pc = exc_location.GetDexPc();
+ exc = scope.NewHandle(GetException());
const_cast<Thread*>(this)->ClearException();
have_exception = true;
}
@@ -1055,10 +1048,7 @@ void Thread::DumpJavaStack(std::ostream& os) const {
dumper.WalkStack();
if (have_exception) {
- ThrowLocation exc_location(throw_location_this_object.Get(),
- throw_location_method.Get(),
- throw_location_dex_pc);
- const_cast<Thread*>(this)->SetException(exc_location, exc.Get());
+ const_cast<Thread*>(this)->SetException(exc.Get());
}
}
@@ -1148,8 +1138,6 @@ void Thread::Shutdown() {
Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupted_(false) {
wait_mutex_ = new Mutex("a thread wait mutex");
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
- tlsPtr_.debug_invoke_req = new DebugInvokeReq;
- tlsPtr_.single_step_control = nullptr;
tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
tlsPtr_.name = new std::string(kThreadNameDuringStartup);
tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
@@ -1188,7 +1176,7 @@ void Thread::AssertPendingException() const {
void Thread::AssertNoPendingException() const {
if (UNLIKELY(IsExceptionPending())) {
ScopedObjectAccess soa(Thread::Current());
- mirror::Throwable* exception = GetException(nullptr);
+ mirror::Throwable* exception = GetException();
LOG(FATAL) << "No pending exception expected: " << exception->Dump();
}
}
@@ -1196,7 +1184,7 @@ void Thread::AssertNoPendingException() const {
void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
if (UNLIKELY(IsExceptionPending())) {
ScopedObjectAccess soa(Thread::Current());
- mirror::Throwable* exception = GetException(nullptr);
+ mirror::Throwable* exception = GetException();
LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
<< exception->Dump();
}
@@ -1301,7 +1289,6 @@ Thread::~Thread() {
CleanupCpu();
}
- delete tlsPtr_.debug_invoke_req;
if (tlsPtr_.single_step_control != nullptr) {
delete tlsPtr_.single_step_control;
}
@@ -1715,50 +1702,44 @@ jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
return result;
}
-void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location,
- const char* exception_class_descriptor, const char* fmt, ...) {
+void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
- ThrowNewExceptionV(throw_location, exception_class_descriptor,
- fmt, args);
+ ThrowNewExceptionV(exception_class_descriptor, fmt, args);
va_end(args);
}
-void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location,
- const char* exception_class_descriptor,
+void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
const char* fmt, va_list ap) {
std::string msg;
StringAppendV(&msg, fmt, ap);
- ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
+ ThrowNewException(exception_class_descriptor, msg.c_str());
}
-void Thread::ThrowNewException(const ThrowLocation& throw_location,
- const char* exception_class_descriptor,
+void Thread::ThrowNewException(const char* exception_class_descriptor,
const char* msg) {
// Callers should either clear or call ThrowNewWrappedException.
AssertNoPendingExceptionForNewException(msg);
- ThrowNewWrappedException(throw_location, exception_class_descriptor, msg);
+ ThrowNewWrappedException(exception_class_descriptor, msg);
}
-void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
- const char* exception_class_descriptor,
+static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
+ return method != nullptr
+ ? method->GetDeclaringClass()->GetClassLoader()
+ : nullptr;
+}
+
+void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
const char* msg) {
DCHECK_EQ(this, Thread::Current());
ScopedObjectAccessUnchecked soa(this);
- StackHandleScope<5> hs(soa.Self());
- // Ensure we don't forget arguments over object allocation.
- Handle<mirror::Object> saved_throw_this(hs.NewHandle(throw_location.GetThis()));
- Handle<mirror::ArtMethod> saved_throw_method(hs.NewHandle(throw_location.GetMethod()));
- // Ignore the cause throw location. TODO: should we report this as a re-throw?
- ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException(nullptr)));
+ StackHandleScope<3> hs(soa.Self());
+ Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
+ ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
ClearException();
Runtime* runtime = Runtime::Current();
-
- mirror::ClassLoader* cl = nullptr;
- if (saved_throw_method.Get() != nullptr) {
- cl = saved_throw_method.Get()->GetDeclaringClass()->GetClassLoader();
- }
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(cl));
Handle<mirror::Class> exception_class(
hs.NewHandle(runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
class_loader)));
@@ -1779,9 +1760,7 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
// If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
if (exception.Get() == nullptr) {
- ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
- throw_location.GetDexPc());
- SetException(gc_safe_throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
+ SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
return;
}
@@ -1831,9 +1810,7 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
if (trace.get() != nullptr) {
exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
}
- ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
- throw_location.GetDexPc());
- SetException(gc_safe_throw_location, exception.Get());
+ SetException(exception.Get());
} else {
jvalue jv_args[2];
size_t i = 0;
@@ -1848,9 +1825,7 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
}
InvokeWithJValues(soa, exception.Get(), soa.EncodeMethod(exception_init_method), jv_args);
if (LIKELY(!IsExceptionPending())) {
- ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
- throw_location.GetDexPc());
- SetException(gc_safe_throw_location, exception.Get());
+ SetException(exception.Get());
}
}
}
@@ -1858,14 +1833,13 @@ void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
void Thread::ThrowOutOfMemoryError(const char* msg) {
LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
- ThrowLocation throw_location = GetCurrentLocationForThrow();
if (!tls32_.throwing_OutOfMemoryError) {
tls32_.throwing_OutOfMemoryError = true;
- ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
+ ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
tls32_.throwing_OutOfMemoryError = false;
} else {
Dump(LOG(WARNING)); // The pre-allocated OOME has no stack, so help out and log one.
- SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
+ SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
}
}
@@ -2030,8 +2004,7 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
void Thread::QuickDeliverException() {
// Get exception from thread.
- ThrowLocation throw_location;
- mirror::Throwable* exception = GetException(&throw_location);
+ mirror::Throwable* exception = GetException();
CHECK(exception != nullptr);
// Don't leave exception visible while we try to find the handler, which may cause class
// resolution.
@@ -2041,7 +2014,7 @@ void Thread::QuickDeliverException() {
if (is_deoptimization) {
exception_handler.DeoptimizeStack();
} else {
- exception_handler.FindCatch(throw_location, exception);
+ exception_handler.FindCatch(exception);
}
exception_handler.UpdateInstrumentationStack();
exception_handler.DoLongJump();
@@ -2093,14 +2066,6 @@ mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_erro
return visitor.method_;
}
-ThrowLocation Thread::GetCurrentLocationForThrow() {
- Context* context = GetLongJumpContext();
- CurrentMethodVisitor visitor(this, context, true);
- visitor.WalkStack(false);
- ReleaseLongJumpContext(context);
- return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
-}
-
bool Thread::HoldsLock(mirror::Object* object) const {
if (object == nullptr) {
return false;
@@ -2302,7 +2267,6 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), arg,
RootInfo(kRootNativeStack, thread_id));
}
- tlsPtr_.throw_location.VisitRoots(visitor, arg);
if (tlsPtr_.monitor_enter_object != nullptr) {
visitor(&tlsPtr_.monitor_enter_object, arg, RootInfo(kRootNativeStack, thread_id));
}
@@ -2436,4 +2400,21 @@ void Thread::DeactivateSingleStepControl() {
delete ssc;
}
+void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
+ CHECK(Dbg::IsDebuggerActive());
+ CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this;
+ CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself";
+ CHECK(req != nullptr);
+ tlsPtr_.debug_invoke_req = req;
+}
+
+void Thread::ClearDebugInvokeReq() {
+ CHECK(Dbg::IsDebuggerActive());
+ CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
+ CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
+ // We do not own the DebugInvokeReq* so we must not delete it, it is the responsibility of
+ // the owner (the JDWP thread).
+ tlsPtr_.debug_invoke_req = nullptr;
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index e4c91b72b2..2e9ae3c42d 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -41,7 +41,6 @@
#include "runtime_stats.h"
#include "stack.h"
#include "thread_state.h"
-#include "throw_location.h"
namespace art {
@@ -326,11 +325,7 @@ class Thread {
return tlsPtr_.exception != nullptr;
}
- mirror::Throwable* GetException(ThrowLocation* throw_location) const
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- if (throw_location != nullptr) {
- *throw_location = tlsPtr_.throw_location;
- }
+ mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return tlsPtr_.exception;
}
@@ -338,17 +333,15 @@ class Thread {
void AssertNoPendingException() const;
void AssertNoPendingExceptionForNewException(const char* msg) const;
- void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
+ void SetException(mirror::Throwable* new_exception)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
CHECK(new_exception != NULL);
// TODO: DCHECK(!IsExceptionPending());
tlsPtr_.exception = new_exception;
- tlsPtr_.throw_location = throw_location;
}
void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
tlsPtr_.exception = nullptr;
- tlsPtr_.throw_location.Clear();
}
// Find catch block and perform long jump to appropriate exception handle
@@ -370,8 +363,6 @@ class Thread {
bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method) {
tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
}
@@ -386,24 +377,19 @@ class Thread {
}
// If 'msg' is NULL, no detail message is set.
- void ThrowNewException(const ThrowLocation& throw_location,
- const char* exception_class_descriptor, const char* msg)
+ void ThrowNewException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
// used as the new exception's cause.
- void ThrowNewWrappedException(const ThrowLocation& throw_location,
- const char* exception_class_descriptor,
- const char* msg)
+ void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ThrowNewExceptionF(const ThrowLocation& throw_location,
- const char* exception_class_descriptor, const char* fmt, ...)
- __attribute__((format(printf, 4, 5)))
+ void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
+ __attribute__((format(printf, 3, 4)))
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void ThrowNewExceptionV(const ThrowLocation& throw_location,
- const char* exception_class_descriptor, const char* fmt, va_list ap)
+ void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// OutOfMemoryError is special, because we need to pre-allocate an instance.
@@ -713,6 +699,16 @@ class Thread {
return tlsPtr_.single_step_control;
}
+ // Indicates whether this thread is ready to invoke a method for debugging. This
+ // is only true if the thread has been suspended by a debug event.
+ bool IsReadyForDebugInvoke() const {
+ return tls32_.ready_for_debug_invoke;
+ }
+
+ void SetReadyForDebugInvoke(bool ready) {
+ tls32_.ready_for_debug_invoke = ready;
+ }
+
// Activates single step control for debugging. The thread takes the
// ownership of the given SingleStepControl*. It is deleted by a call
// to DeactivateSingleStepControl or upon thread destruction.
@@ -721,6 +717,17 @@ class Thread {
// Deactivates single step control for debugging.
void DeactivateSingleStepControl();
+ // Sets debug invoke request for debugging. When the thread is resumed,
+ // it executes the method described by this request then suspends itself.
+ // The thread does not take ownership of the given DebugInvokeReq*, it is
+ // owned by the JDWP thread which is waiting for the execution of the
+ // method.
+ void SetDebugInvokeReq(DebugInvokeReq* req);
+
+ // Clears debug invoke request for debugging. When the thread completes
+ // method invocation, it clears its debug invoke request, signals the
+ // JDWP thread and suspends itself.
+ void ClearDebugInvokeReq();
// Returns the fake exception used to activate deoptimization.
static mirror::Throwable* GetDeoptimizationException() {
@@ -972,7 +979,8 @@ class Thread {
explicit tls_32bit_sized_values(bool is_daemon) :
suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
- thread_exit_check_count(0), handling_signal_(false), suspended_at_suspend_check(false) {
+ thread_exit_check_count(0), handling_signal_(false), suspended_at_suspend_check(false),
+ ready_for_debug_invoke(false) {
}
union StateAndFlags state_and_flags;
@@ -1016,6 +1024,11 @@ class Thread {
// used to distinguish runnable threads that are suspended due to
// a normal suspend check from other threads.
bool32_t suspended_at_suspend_check;
+
+ // True if the thread has been suspended by a debugger event. This is
+ // used to invoke method from the debugger which is only allowed when
+ // the thread is suspended by an event.
+ bool32_t ready_for_debug_invoke;
} tls32_;
struct PACKED(8) tls_64bit_sized_values {
@@ -1034,7 +1047,7 @@ class Thread {
struct PACKED(4) tls_ptr_sized_values {
tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
- jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
+ jpeer(nullptr), stack_begin(nullptr), stack_size(0),
stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
@@ -1084,9 +1097,6 @@ class Thread {
// Size of the stack.
size_t stack_size;
- // The location the current exception was thrown from.
- ThrowLocation throw_location;
-
// Pointer to previous stack trace captured by sampling profiler.
std::vector<mirror::ArtMethod*>* stack_trace_sample;
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index d4c1e8c39c..ddfbebd595 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -860,7 +860,8 @@ void ThreadList::SuspendAllForDebugger() {
}
void ThreadList::SuspendSelfForDebugger() {
- Thread* self = Thread::Current();
+ Thread* const self = Thread::Current();
+ self->SetReadyForDebugInvoke(true);
// The debugger thread must not suspend itself due to debugger activity!
Thread* debug_thread = Dbg::GetDebugThread();
@@ -881,11 +882,10 @@ void ThreadList::SuspendSelfForDebugger() {
VLOG(threads) << *self << " self-suspending (debugger)";
// Tell JDWP we've completed invocation and are ready to suspend.
- DebugInvokeReq* pReq = self->GetInvokeReq();
- DCHECK(pReq != NULL);
- if (pReq->invoke_needed) {
- // Clear this before signaling.
- pReq->Clear();
+ DebugInvokeReq* const pReq = self->GetInvokeReq();
+ if (pReq != nullptr) {
+ // Clear debug invoke request before signaling.
+ self->ClearDebugInvokeReq();
VLOG(jdwp) << "invoke complete, signaling";
MutexLock mu(self, pReq->lock);
@@ -916,6 +916,7 @@ void ThreadList::SuspendSelfForDebugger() {
CHECK_EQ(self->GetSuspendCount(), 0);
}
+ self->SetReadyForDebugInvoke(false);
VLOG(threads) << *self << " self-reviving (debugger)";
}
diff --git a/runtime/throw_location.cc b/runtime/throw_location.cc
deleted file mode 100644
index 4d2aec088e..0000000000
--- a/runtime/throw_location.cc
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "throw_location.h"
-
-#include "mirror/art_method-inl.h"
-#include "mirror/class-inl.h"
-#include "mirror/object-inl.h"
-#include "utils.h"
-
-namespace art {
-
-std::string ThrowLocation::Dump() const {
- if (method_ != nullptr) {
- return StringPrintf("%s:%d", PrettyMethod(method_).c_str(),
- method_->GetLineNumFromDexPC(dex_pc_));
- } else {
- return "unknown throw location";
- }
-}
-
-void ThrowLocation::VisitRoots(RootCallback* visitor, void* arg) {
- if (this_object_ != nullptr) {
- visitor(&this_object_, arg, RootInfo(kRootVMInternal));
- DCHECK(this_object_ != nullptr);
- }
- if (method_ != nullptr) {
- visitor(reinterpret_cast<mirror::Object**>(&method_), arg, RootInfo(kRootVMInternal));
- DCHECK(method_ != nullptr);
- }
-}
-
-} // namespace art
diff --git a/runtime/throw_location.h b/runtime/throw_location.h
deleted file mode 100644
index bec0da490a..0000000000
--- a/runtime/throw_location.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_RUNTIME_THROW_LOCATION_H_
-#define ART_RUNTIME_THROW_LOCATION_H_
-
-#include "object_callbacks.h"
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "gc_root.h"
-
-#include <stdint.h>
-#include <string>
-
-namespace art {
-
-namespace mirror {
-class ArtMethod;
-class Object;
-} // mirror
-
-class PACKED(4) ThrowLocation {
- public:
- ThrowLocation() {
- Clear();
- }
-
- ThrowLocation(mirror::Object* throw_this_object, mirror::ArtMethod* throw_method,
- uint32_t throw_dex_pc) :
- this_object_(throw_this_object),
- method_(throw_method),
- dex_pc_(throw_dex_pc)
-#ifdef __LP64__
- , pad_(0)
-#endif
-
- {
-#ifdef __LP64__
- UNUSED(pad_);
-#endif
- }
-
- mirror::Object* GetThis() const {
- return this_object_;
- }
-
- mirror::ArtMethod* GetMethod() const {
- return method_;
- }
-
- uint32_t GetDexPc() const {
- return dex_pc_;
- }
-
- void Clear() {
- this_object_ = NULL;
- method_ = NULL;
- dex_pc_ = -1;
- }
-
- std::string Dump() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- void VisitRoots(RootCallback* visitor, void* arg);
-
- private:
- // The 'this' reference of the throwing method.
- mirror::Object* this_object_;
- // The throwing method.
- mirror::ArtMethod* method_;
- // The instruction within the throwing method.
- uint32_t dex_pc_;
- // Ensure 8byte alignment on 64bit.
-#ifdef __LP64__
- uint32_t pad_;
-#endif
-};
-
-} // namespace art
-
-#endif // ART_RUNTIME_THROW_LOCATION_H_
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 93b3877bf4..8833a85120 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -401,9 +401,8 @@ void Trace::Start(const char* trace_filename, int trace_fd, int buffer_size, int
void Trace::Stop() {
bool stop_alloc_counting = false;
- Runtime* runtime = Runtime::Current();
- runtime->GetThreadList()->SuspendAll();
- Trace* the_trace = NULL;
+ Runtime* const runtime = Runtime::Current();
+ Trace* the_trace = nullptr;
pthread_t sampling_pthread = 0U;
{
MutexLock mu(Thread::Current(), *Locks::trace_lock_);
@@ -415,19 +414,27 @@ void Trace::Stop() {
sampling_pthread = sampling_pthread_;
}
}
- if (the_trace != NULL) {
+ // Make sure that we join before we delete the trace since we don't want to have
+ // the sampling thread access a stale pointer. This finishes since the sampling thread exits when
+ // the_trace_ is null.
+ if (sampling_pthread != 0U) {
+ CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown");
+ sampling_pthread_ = 0U;
+ }
+ runtime->GetThreadList()->SuspendAll();
+ if (the_trace != nullptr) {
stop_alloc_counting = (the_trace->flags_ & kTraceCountAllocs) != 0;
the_trace->FinishTracing();
if (the_trace->sampling_enabled_) {
MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, NULL);
+ runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
} else {
runtime->GetInstrumentation()->DisableMethodTracing();
- runtime->GetInstrumentation()->RemoveListener(the_trace,
- instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kMethodExited |
- instrumentation::Instrumentation::kMethodUnwind);
+ runtime->GetInstrumentation()->RemoveListener(
+ the_trace, instrumentation::Instrumentation::kMethodEntered |
+ instrumentation::Instrumentation::kMethodExited |
+ instrumentation::Instrumentation::kMethodUnwind);
}
if (the_trace->trace_file_.get() != nullptr) {
// Do not try to erase, so flush and close explicitly.
@@ -441,15 +448,9 @@ void Trace::Stop() {
delete the_trace;
}
runtime->GetThreadList()->ResumeAll();
-
if (stop_alloc_counting) {
// Can be racy since SetStatsEnabled is not guarded by any locks.
- Runtime::Current()->SetStatsEnabled(false);
- }
-
- if (sampling_pthread != 0U) {
- CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, NULL), "sampling thread shutdown");
- sampling_pthread_ = 0U;
+ runtime->SetStatsEnabled(false);
}
}
@@ -619,11 +620,9 @@ void Trace::MethodUnwind(Thread* thread, mirror::Object* this_object ATTRIBUTE_U
thread_clock_diff, wall_clock_diff);
}
-void Trace::ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+void Trace::ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- UNUSED(thread, throw_location, catch_method, catch_dex_pc, exception_object);
+ UNUSED(thread, exception_object);
LOG(ERROR) << "Unexpected exception caught event in tracing";
}
diff --git a/runtime/trace.h b/runtime/trace.h
index 9ba30d5f8f..dd8186a2cb 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -95,9 +95,7 @@ class Trace FINAL : public instrumentation::InstrumentationListener {
mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
const JValue& field_value)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
- void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
- mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
- mirror::Throwable* exception_object)
+ void ExceptionCaught(Thread* thread, mirror::Throwable* exception_object)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
void BackwardBranch(Thread* thread, mirror::ArtMethod* method, int32_t dex_pc_offset)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) OVERRIDE;
diff --git a/runtime/transaction.cc b/runtime/transaction.cc
index c0fd7a5a0b..3b708f6d8f 100644
--- a/runtime/transaction.cc
+++ b/runtime/transaction.cc
@@ -75,8 +75,7 @@ void Transaction::ThrowInternalError(Thread* self, bool rethrow) {
CHECK(IsAborted()) << "Rethrow InternalError while transaction is not aborted";
}
std::string abort_msg(GetAbortMessage());
- self->ThrowNewException(self->GetCurrentLocationForThrow(), "Ljava/lang/InternalError;",
- abort_msg.c_str());
+ self->ThrowNewWrappedException("Ljava/lang/InternalError;", abort_msg.c_str());
}
bool Transaction::IsAborted() {
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 851ecebb05..8a23ff7233 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -1508,23 +1508,6 @@ std::string GetSystemImageFilename(const char* location, const InstructionSet is
return filename;
}
-std::string DexFilenameToOdexFilename(const std::string& location, const InstructionSet isa) {
- // location = /foo/bar/baz.jar
- // odex_location = /foo/bar/<isa>/baz.odex
- std::string odex_location(location);
- InsertIsaDirectory(isa, &odex_location);
- size_t dot_index = odex_location.rfind('.');
-
- // The location must have an extension, otherwise it's not clear what we
- // should return.
- CHECK_NE(dot_index, std::string::npos) << odex_location;
- CHECK_EQ(std::string::npos, odex_location.find('/', dot_index)) << odex_location;
-
- odex_location.resize(dot_index + 1);
- odex_location += "odex";
- return odex_location;
-}
-
bool IsZipMagic(uint32_t magic) {
return (('P' == ((magic >> 0) & 0xff)) &&
('K' == ((magic >> 8) & 0xff)));
diff --git a/runtime/utils.h b/runtime/utils.h
index 9d04d35e26..9a9f51a7bc 100644
--- a/runtime/utils.h
+++ b/runtime/utils.h
@@ -173,6 +173,24 @@ static inline uint32_t High32Bits(uint64_t value) {
return static_cast<uint32_t>(value >> 32);
}
+// Traits class providing an unsigned integer type of (byte) size `n`.
+template <size_t n>
+struct UnsignedIntegerType {
+ // No defined `type`.
+};
+
+template <>
+struct UnsignedIntegerType<1> { typedef uint8_t type; };
+
+template <>
+struct UnsignedIntegerType<2> { typedef uint16_t type; };
+
+template <>
+struct UnsignedIntegerType<4> { typedef uint32_t type; };
+
+template <>
+struct UnsignedIntegerType<8> { typedef uint64_t type; };
+
// Type identity.
template <typename T>
struct TypeIdentity {
@@ -271,6 +289,12 @@ static constexpr int CTZ(T x) {
}
template<typename T>
+static inline int WhichPowerOf2(T x) {
+ DCHECK((x != 0) && IsPowerOfTwo(x));
+ return CTZ(x);
+}
+
+template<typename T>
static constexpr int POPCOUNT(T x) {
return (sizeof(T) == sizeof(uint32_t))
? __builtin_popcount(x)
@@ -309,7 +333,7 @@ std::string PrintableString(const char* utf8);
// Tests whether 's' starts with 'prefix'.
bool StartsWith(const std::string& s, const char* prefix);
-// Tests whether 's' starts with 'suffix'.
+// Tests whether 's' ends with 'suffix'.
bool EndsWith(const std::string& s, const char* suffix);
// Used to implement PrettyClass, PrettyField, PrettyMethod, and PrettyTypeOf,
@@ -516,12 +540,6 @@ std::string GetDalvikCacheFilenameOrDie(const char* file_location,
// Returns the system location for an image
std::string GetSystemImageFilename(const char* location, InstructionSet isa);
-// Returns an .odex file name adjacent to the dex location.
-// For example, for "/foo/bar/baz.jar", return "/foo/bar/<isa>/baz.odex".
-// The dex location must include a directory component and have an extension.
-// Note: does not support multidex location strings.
-std::string DexFilenameToOdexFilename(const std::string& location, InstructionSet isa);
-
// Check whether the given magic matches a known file type.
bool IsZipMagic(uint32_t magic);
bool IsDexMagic(uint32_t magic);
diff --git a/runtime/utils_test.cc b/runtime/utils_test.cc
index 5465762fd9..6b36c192e8 100644
--- a/runtime/utils_test.cc
+++ b/runtime/utils_test.cc
@@ -371,13 +371,6 @@ TEST_F(UtilsTest, GetSystemImageFilename) {
GetSystemImageFilename("/system/framework/boot.art", kArm).c_str());
}
-TEST_F(UtilsTest, DexFilenameToOdexFilename) {
- EXPECT_STREQ("/foo/bar/arm/baz.odex",
- DexFilenameToOdexFilename("/foo/bar/baz.jar", kArm).c_str());
- EXPECT_STREQ("/foo/bar/arm/baz.odex",
- DexFilenameToOdexFilename("/foo/bar/baz.funnyext", kArm).c_str());
-}
-
TEST_F(UtilsTest, ExecSuccess) {
std::vector<std::string> command;
if (kIsTargetBuild) {
diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc
index 87a29ed2f2..b3f686d5c1 100644
--- a/runtime/verifier/method_verifier.cc
+++ b/runtime/verifier/method_verifier.cc
@@ -513,7 +513,7 @@ mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(uint32_t dex_pc) {
}
const Instruction* inst = Instruction::At(code_item_->insns_ + dex_pc);
const bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
- return GetQuickInvokedMethod(inst, register_line, is_range);
+ return GetQuickInvokedMethod(inst, register_line, is_range, false);
}
bool MethodVerifier::Verify() {
@@ -572,6 +572,17 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
// If we fail again at runtime, mark that this instruction would throw and force this
// method to be executed using the interpreter with checks.
have_pending_runtime_throw_failure_ = true;
+
+ // We need to save the work_line if the instruction wasn't throwing before. Otherwise we'll
+ // try to merge garbage.
+ // Note: this assumes that Fail is called before we do any work_line modifications.
+ const uint16_t* insns = code_item_->insns_ + work_insn_idx_;
+ const Instruction* inst = Instruction::At(insns);
+ int opcode_flags = Instruction::FlagsOf(inst->Opcode());
+
+ if ((opcode_flags & Instruction::kThrow) == 0 && CurrentInsnFlags()->IsInTry()) {
+ saved_line_->CopyFromLine(work_line_.get());
+ }
}
break;
// Indication that verification should be retried at runtime.
@@ -3431,10 +3442,14 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgs(const Instruction* inst,
}
mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst,
- RegisterLine* reg_line, bool is_range) {
- DCHECK(inst->Opcode() == Instruction::INVOKE_VIRTUAL_QUICK ||
- inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
- const RegType& actual_arg_type = reg_line->GetInvocationThis(this, inst, is_range);
+ RegisterLine* reg_line, bool is_range,
+ bool allow_failure) {
+ if (is_range) {
+ DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_RANGE_QUICK);
+ } else {
+ DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_QUICK);
+ }
+ const RegType& actual_arg_type = reg_line->GetInvocationThis(this, inst, is_range, allow_failure);
if (!actual_arg_type.HasClass()) {
VLOG(verifier) << "Failed to get mirror::Class* from '" << actual_arg_type << "'";
return nullptr;
@@ -3445,29 +3460,29 @@ mirror::ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst
// Derive Object.class from Class.class.getSuperclass().
mirror::Class* object_klass = klass->GetClass()->GetSuperClass();
if (FailOrAbort(this, object_klass->IsObjectClass(),
- "Failed to find Object class in quickened invoke receiver",
- work_insn_idx_)) {
+ "Failed to find Object class in quickened invoke receiver", work_insn_idx_)) {
return nullptr;
}
dispatch_class = object_klass;
} else {
dispatch_class = klass;
}
- if (FailOrAbort(this, dispatch_class->HasVTable(),
- "Receiver class has no vtable for quickened invoke at ",
- work_insn_idx_)) {
+ if (!dispatch_class->HasVTable()) {
+ FailOrAbort(this, allow_failure, "Receiver class has no vtable for quickened invoke at ",
+ work_insn_idx_);
return nullptr;
}
uint16_t vtable_index = is_range ? inst->VRegB_3rc() : inst->VRegB_35c();
- if (FailOrAbort(this, static_cast<int32_t>(vtable_index) < dispatch_class->GetVTableLength(),
- "Receiver class has not enough vtable slots for quickened invoke at ",
- work_insn_idx_)) {
+ if (static_cast<int32_t>(vtable_index) >= dispatch_class->GetVTableLength()) {
+ FailOrAbort(this, allow_failure,
+ "Receiver class has not enough vtable slots for quickened invoke at ",
+ work_insn_idx_);
return nullptr;
}
mirror::ArtMethod* res_method = dispatch_class->GetVTableEntry(vtable_index);
- if (FailOrAbort(this, !self_->IsExceptionPending(),
- "Unexpected exception pending for quickened invoke at ",
- work_insn_idx_)) {
+ if (self_->IsExceptionPending()) {
+ FailOrAbort(this, allow_failure, "Unexpected exception pending for quickened invoke at ",
+ work_insn_idx_);
return nullptr;
}
return res_method;
@@ -3478,8 +3493,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instructio
DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_)
<< PrettyMethod(dex_method_idx_, *dex_file_, true) << "@" << work_insn_idx_;
- mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(),
- is_range);
+ mirror::ArtMethod* res_method = GetQuickInvokedMethod(inst, work_line_.get(), is_range, false);
if (res_method == nullptr) {
Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name();
return nullptr;
diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h
index bdd62596a6..d7c2071cbc 100644
--- a/runtime/verifier/method_verifier.h
+++ b/runtime/verifier/method_verifier.h
@@ -244,7 +244,7 @@ class MethodVerifier {
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the method of a quick invoke or nullptr if it cannot be found.
mirror::ArtMethod* GetQuickInvokedMethod(const Instruction* inst, RegisterLine* reg_line,
- bool is_range)
+ bool is_range, bool allow_failure)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns the access field of a quick field access (iget/iput-quick) or nullptr
// if it cannot be found.
diff --git a/runtime/verifier/register_line.cc b/runtime/verifier/register_line.cc
index 3b098718db..ed588fcdac 100644
--- a/runtime/verifier/register_line.cc
+++ b/runtime/verifier/register_line.cc
@@ -81,18 +81,23 @@ bool RegisterLine::CheckConstructorReturn(MethodVerifier* verifier) const {
}
const RegType& RegisterLine::GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
- bool is_range) {
+ bool is_range, bool allow_failure) {
const size_t args_count = is_range ? inst->VRegA_3rc() : inst->VRegA_35c();
if (args_count < 1) {
- verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
+ if (!allow_failure) {
+ verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invoke lacks 'this'";
+ }
return verifier->GetRegTypeCache()->Conflict();
}
/* Get the element type of the array held in vsrc */
const uint32_t this_reg = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
const RegType& this_type = GetRegisterType(verifier, this_reg);
if (!this_type.IsReferenceTypes()) {
- verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "tried to get class from non-reference register v"
- << this_reg << " (type=" << this_type << ")";
+ if (!allow_failure) {
+ verifier->Fail(VERIFY_ERROR_BAD_CLASS_HARD)
+ << "tried to get class from non-reference register v" << this_reg
+ << " (type=" << this_type << ")";
+ }
return verifier->GetRegTypeCache()->Conflict();
}
return this_type;
diff --git a/runtime/verifier/register_line.h b/runtime/verifier/register_line.h
index ca61a0b8f0..376dbf1fad 100644
--- a/runtime/verifier/register_line.h
+++ b/runtime/verifier/register_line.h
@@ -188,9 +188,11 @@ class RegisterLine {
*
* The argument count is in vA, and the first argument is in vC, for both "simple" and "range"
* versions. We just need to make sure vA is >= 1 and then return vC.
+ * allow_failure will return Conflict() instead of causing a verification failure if there is an
+ * error.
*/
const RegType& GetInvocationThis(MethodVerifier* verifier, const Instruction* inst,
- bool is_range)
+ bool is_range, bool allow_failure = false)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
/*