1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
|
/*
* Copyright (C) 2011 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ART_RUNTIME_RUNTIME_H_
#define ART_RUNTIME_RUNTIME_H_
#include <jni.h>
#include <stdio.h>
#include <iosfwd>
#include <set>
#include <string>
#include <utility>
#include <memory>
#include <vector>
#include "base/file_utils.h"
#include "base/locks.h"
#include "base/macros.h"
#include "base/mem_map.h"
#include "deoptimization_kind.h"
#include "dex/dex_file_types.h"
#include "experimental_flags.h"
#include "gc/space/image_space_loading_order.h"
#include "gc_root.h"
#include "instrumentation.h"
#include "jdwp_provider.h"
#include "obj_ptr.h"
#include "offsets.h"
#include "process_state.h"
#include "quick/quick_method_frame_info.h"
#include "runtime_stats.h"
namespace art {
namespace gc {
class AbstractSystemWeakHolder;
class Heap;
} // namespace gc
namespace hiddenapi {
enum class EnforcementPolicy;
} // namespace hiddenapi
namespace jit {
class Jit;
class JitCodeCache;
class JitOptions;
} // namespace jit
namespace mirror {
class Array;
class ClassLoader;
class DexCache;
template<class T> class ObjectArray;
template<class T> class PrimitiveArray;
typedef PrimitiveArray<int8_t> ByteArray;
class String;
class Throwable;
} // namespace mirror
namespace ti {
class Agent;
class AgentSpec;
} // namespace ti
namespace verifier {
class MethodVerifier;
enum class VerifyMode : int8_t;
} // namespace verifier
class ArenaPool;
class ArtMethod;
enum class CalleeSaveType: uint32_t;
class ClassLinker;
class CompilerCallbacks;
class DexFile;
enum class InstructionSet;
class InternTable;
class IsMarkedVisitor;
class JavaVMExt;
class LinearAlloc;
class MonitorList;
class MonitorPool;
class NullPointerHandler;
class OatFileAssistantTest;
class OatFileManager;
class Plugin;
struct RuntimeArgumentMap;
class RuntimeCallbacks;
class SignalCatcher;
class StackOverflowHandler;
class SuspensionHandler;
class ThreadList;
class ThreadPool;
class Trace;
struct TraceConfig;
class Transaction;
typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
class Runtime {
public:
// Parse raw runtime options.
static bool ParseOptions(const RuntimeOptions& raw_options,
bool ignore_unrecognized,
RuntimeArgumentMap* runtime_options);
// Creates and initializes a new runtime.
static bool Create(RuntimeArgumentMap&& runtime_options)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
// Creates and initializes a new runtime.
static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
// IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
bool IsAotCompiler() const {
return !UseJitCompilation() && IsCompiler();
}
// IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
bool IsCompiler() const {
return compiler_callbacks_ != nullptr;
}
// If a compiler, are we compiling a boot image?
bool IsCompilingBootImage() const;
bool CanRelocate() const;
bool ShouldRelocate() const {
return must_relocate_ && CanRelocate();
}
bool MustRelocateIfPossible() const {
return must_relocate_;
}
bool IsImageDex2OatEnabled() const {
return image_dex2oat_enabled_;
}
CompilerCallbacks* GetCompilerCallbacks() {
return compiler_callbacks_;
}
void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
CHECK(callbacks != nullptr);
compiler_callbacks_ = callbacks;
}
bool IsZygote() const {
return is_zygote_;
}
bool IsSystemServer() const {
return is_system_server_;
}
void SetSystemServer(bool value) {
is_system_server_ = value;
}
bool IsExplicitGcDisabled() const {
return is_explicit_gc_disabled_;
}
std::string GetCompilerExecutable() const;
const std::vector<std::string>& GetCompilerOptions() const {
return compiler_options_;
}
void AddCompilerOption(const std::string& option) {
compiler_options_.push_back(option);
}
const std::vector<std::string>& GetImageCompilerOptions() const {
return image_compiler_options_;
}
const std::string& GetImageLocation() const {
return image_location_;
}
bool IsUsingApexBootImageLocation() const {
return is_using_apex_boot_image_location_;
}
// Starts a runtime, which may cause threads to be started and code to run.
bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
bool IsShuttingDown(Thread* self);
bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
return shutting_down_;
}
size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
return threads_being_born_;
}
void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
threads_being_born_++;
}
void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
bool IsStarted() const {
return started_;
}
bool IsFinishedStarting() const {
return finished_starting_;
}
void RunRootClinits(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
static Runtime* Current() {
return instance_;
}
// Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
// callers should prefer.
NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
// Returns the "main" ThreadGroup, used when attaching user threads.
jobject GetMainThreadGroup() const;
// Returns the "system" ThreadGroup, used when attaching our internal threads.
jobject GetSystemThreadGroup() const;
// Returns the system ClassLoader which represents the CLASSPATH.
jobject GetSystemClassLoader() const;
// Attaches the calling native thread to the runtime.
bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
bool create_peer);
void CallExitHook(jint status);
// Detaches the current native thread from the runtime.
void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
void DumpDeoptimizations(std::ostream& os);
void DumpForSigQuit(std::ostream& os);
void DumpLockHolders(std::ostream& os);
~Runtime();
const std::vector<std::string>& GetBootClassPath() const {
return boot_class_path_;
}
const std::vector<std::string>& GetBootClassPathLocations() const {
DCHECK(boot_class_path_locations_.empty() ||
boot_class_path_locations_.size() == boot_class_path_.size());
return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
}
const std::string& GetClassPathString() const {
return class_path_string_;
}
ClassLinker* GetClassLinker() const {
return class_linker_;
}
size_t GetDefaultStackSize() const {
return default_stack_size_;
}
unsigned int GetFinalizerTimeoutMs() const {
return finalizer_timeout_ms_;
}
gc::Heap* GetHeap() const {
return heap_;
}
InternTable* GetInternTable() const {
DCHECK(intern_table_ != nullptr);
return intern_table_;
}
JavaVMExt* GetJavaVM() const {
return java_vm_.get();
}
size_t GetMaxSpinsBeforeThinLockInflation() const {
return max_spins_before_thin_lock_inflation_;
}
MonitorList* GetMonitorList() const {
return monitor_list_;
}
MonitorPool* GetMonitorPool() const {
return monitor_pool_;
}
// Is the given object the special object used to mark a cleared JNI weak global?
bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
// Get the special object used to mark a cleared JNI weak global.
mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingException()
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME()
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow()
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
REQUIRES_SHARED(Locks::mutator_lock_);
const std::vector<std::string>& GetProperties() const {
return properties_;
}
ThreadList* GetThreadList() const {
return thread_list_;
}
static const char* GetVersion() {
return "2.1.0";
}
bool IsMethodHandlesEnabled() const {
return true;
}
void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
// broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
// checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
// access is reenabled.
void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
// Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
// clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Visit image roots, only used for hprof since the GC uses the image space mod union table
// instead.
void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all of the roots we can safely visit concurrently.
void VisitConcurrentRoots(RootVisitor* visitor,
VisitRootFlags flags = kVisitRootFlagAllRoots)
REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all of the non thread roots, we can do this with mutators unpaused.
void VisitNonThreadRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
void VisitTransactionRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
// Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
// system weak is updated to be the visitor's returned value.
void SweepSystemWeaks(IsMarkedVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime method resolution
ArtMethod* GetResolutionMethod();
bool HasResolutionMethod() const {
return resolution_method_ != nullptr;
}
void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
void ClearResolutionMethod() {
resolution_method_ = nullptr;
}
ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a special method that calls into a trampoline for runtime imt conflicts.
ArtMethod* GetImtConflictMethod();
ArtMethod* GetImtUnimplementedMethod();
bool HasImtConflictMethod() const {
return imt_conflict_method_ != nullptr;
}
void ClearImtConflictMethod() {
imt_conflict_method_ = nullptr;
}
void FixupConflictTables();
void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
REQUIRES_SHARED(Locks::mutator_lock_);
void ClearImtUnimplementedMethod() {
imt_unimplemented_method_ = nullptr;
}
bool HasCalleeSaveMethod(CalleeSaveType type) const {
return callee_save_methods_[static_cast<size_t>(type)] != 0u;
}
ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
REQUIRES_SHARED(Locks::mutator_lock_);
QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
REQUIRES_SHARED(Locks::mutator_lock_);
static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
}
InstructionSet GetInstructionSet() const {
return instruction_set_;
}
void SetInstructionSet(InstructionSet instruction_set);
void ClearInstructionSet();
void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
void ClearCalleeSaveMethods();
ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
int32_t GetStat(int kind);
RuntimeStats* GetStats() {
return &stats_;
}
bool HasStatsEnabled() const {
return stats_enabled_;
}
void ResetStats(int kinds);
void SetStatsEnabled(bool new_state)
REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
enum class NativeBridgeAction { // private
kUnload,
kInitialize
};
jit::Jit* GetJit() const {
return jit_.get();
}
// Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
bool UseJitCompilation() const;
void PreZygoteFork();
void PostZygoteFork();
void InitNonZygoteOrPostFork(
JNIEnv* env,
bool is_system_server,
NativeBridgeAction action,
const char* isa,
bool profile_system_server = false);
const instrumentation::Instrumentation* GetInstrumentation() const {
return &instrumentation_;
}
instrumentation::Instrumentation* GetInstrumentation() {
return &instrumentation_;
}
void RegisterAppInfo(const std::vector<std::string>& code_paths,
const std::string& profile_output_filename);
// Transaction support.
bool IsActiveTransaction() const;
void EnterTransactionMode();
void EnterTransactionMode(bool strict, mirror::Class* root);
void ExitTransactionMode();
void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
// Transaction rollback and exit transaction are always done together, it's convenience to
// do them in one function.
void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
bool IsTransactionAborted() const;
const std::unique_ptr<Transaction>& GetTransaction() const;
bool IsActiveStrictTransactionMode() const;
void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
REQUIRES_SHARED(Locks::mutator_lock_);
void ThrowTransactionAbortError(Thread* self)
REQUIRES_SHARED(Locks::mutator_lock_);
void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
bool is_volatile) const;
void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
bool is_volatile) const;
void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
bool is_volatile) const;
void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
bool is_volatile) const;
void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
bool is_volatile) const;
void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
bool is_volatile) const;
void RecordWriteFieldReference(mirror::Object* obj,
MemberOffset field_offset,
ObjPtr<mirror::Object> value,
bool is_volatile) const
REQUIRES_SHARED(Locks::mutator_lock_);
void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
REQUIRES_SHARED(Locks::mutator_lock_);
void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
REQUIRES(Locks::intern_table_lock_);
void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
REQUIRES(Locks::intern_table_lock_);
void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
REQUIRES(Locks::intern_table_lock_);
void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
REQUIRES(Locks::intern_table_lock_);
void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
REQUIRES_SHARED(Locks::mutator_lock_);
void SetFaultMessage(const std::string& message);
void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
bool ExplicitStackOverflowChecks() const {
return !implicit_so_checks_;
}
void DisableVerifier();
bool IsVerificationEnabled() const;
bool IsVerificationSoftFail() const;
void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
hidden_api_policy_ = policy;
}
hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const {
return hidden_api_policy_;
}
void SetCorePlatformApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
core_platform_api_policy_ = policy;
}
hiddenapi::EnforcementPolicy GetCorePlatformApiEnforcementPolicy() const {
return core_platform_api_policy_;
}
void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) {
hidden_api_exemptions_ = exemptions;
}
const std::vector<std::string>& GetHiddenApiExemptions() {
return hidden_api_exemptions_;
}
void SetDedupeHiddenApiWarnings(bool value) {
dedupe_hidden_api_warnings_ = value;
}
bool ShouldDedupeHiddenApiWarnings() {
return dedupe_hidden_api_warnings_;
}
void SetHiddenApiEventLogSampleRate(uint32_t rate) {
hidden_api_access_event_log_rate_ = rate;
}
uint32_t GetHiddenApiEventLogSampleRate() const {
return hidden_api_access_event_log_rate_;
}
const std::string& GetProcessPackageName() const {
return process_package_name_;
}
void SetProcessPackageName(const char* package_name) {
if (package_name == nullptr) {
process_package_name_.clear();
} else {
process_package_name_ = package_name;
}
}
const std::string& GetProcessDataDirectory() const {
return process_data_directory_;
}
void SetProcessDataDirectory(const char* data_dir) {
if (data_dir == nullptr) {
process_data_directory_.clear();
} else {
process_data_directory_ = data_dir;
}
}
bool IsDexFileFallbackEnabled() const {
return allow_dex_file_fallback_;
}
const std::vector<std::string>& GetCpuAbilist() const {
return cpu_abilist_;
}
bool IsRunningOnMemoryTool() const {
return is_running_on_memory_tool_;
}
void SetTargetSdkVersion(uint32_t version) {
target_sdk_version_ = version;
}
uint32_t GetTargetSdkVersion() const {
return target_sdk_version_;
}
uint32_t GetZygoteMaxFailedBoots() const {
return zygote_max_failed_boots_;
}
bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
}
void CreateJitCodeCache(bool rwx_memory_allowed);
// Create the JIT and instrumentation and code cache.
void CreateJit();
ArenaPool* GetArenaPool() {
return arena_pool_.get();
}
ArenaPool* GetJitArenaPool() {
return jit_arena_pool_.get();
}
const ArenaPool* GetArenaPool() const {
return arena_pool_.get();
}
void ReclaimArenaPoolMemory();
LinearAlloc* GetLinearAlloc() {
return linear_alloc_.get();
}
jit::JitOptions* GetJITOptions() {
return jit_options_.get();
}
bool IsJavaDebuggable() const {
return is_java_debuggable_;
}
void SetJavaDebuggable(bool value);
// Deoptimize the boot image, called for Java debuggable apps.
void DeoptimizeBootImage() REQUIRES(Locks::mutator_lock_);
bool IsNativeDebuggable() const {
return is_native_debuggable_;
}
void SetNativeDebuggable(bool value) {
is_native_debuggable_ = value;
}
bool AreNonStandardExitsEnabled() const {
return non_standard_exits_enabled_;
}
void SetNonStandardExitsEnabled() {
DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
}
bool AreAsyncExceptionsThrown() const {
return async_exceptions_thrown_;
}
void SetAsyncExceptionsThrown() {
DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
}
// Change state and re-check which interpreter should be used.
//
// This must be called whenever there is an event that forces
// us to use different interpreter (e.g. debugger is attached).
//
// Changing the state using the lamda gives us some multihreading safety.
// It ensures that two calls do not interfere with each other and
// it makes it possible to DCHECK that thread local flag is correct.
template<typename Action>
static void DoAndMaybeSwitchInterpreter(Action lamda);
// Returns the build fingerprint, if set. Otherwise an empty string is returned.
std::string GetFingerprint() {
return fingerprint_;
}
// Called from class linker.
void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
// For testing purpose only.
// TODO: Remove this when this is no longer needed (b/116087961).
GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_);
// Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
LinearAlloc* CreateLinearAlloc();
OatFileManager& GetOatFileManager() const {
DCHECK(oat_file_manager_ != nullptr);
return *oat_file_manager_;
}
double GetHashTableMinLoadFactor() const;
double GetHashTableMaxLoadFactor() const;
bool IsSafeMode() const {
return safe_mode_;
}
void SetSafeMode(bool mode) {
safe_mode_ = mode;
}
bool GetDumpNativeStackOnSigQuit() const {
return dump_native_stack_on_sig_quit_;
}
bool GetPrunedDalvikCache() const {
return pruned_dalvik_cache_;
}
void SetPrunedDalvikCache(bool pruned) {
pruned_dalvik_cache_ = pruned;
}
void UpdateProcessState(ProcessState process_state);
// Returns true if we currently care about long mutator pause.
bool InJankPerceptibleProcessState() const {
return process_state_ == kProcessStateJankPerceptible;
}
void RegisterSensitiveThread() const;
void SetZygoteNoThreadSection(bool val) {
zygote_no_threads_ = val;
}
bool IsZygoteNoThreadSection() const {
return zygote_no_threads_;
}
// Returns if the code can be deoptimized asynchronously. Code may be compiled with some
// optimization that makes it impossible to deoptimize.
bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
// Returns a saved copy of the environment (getenv/setenv values).
// Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
char** GetEnvSnapshot() const {
return env_snapshot_.GetSnapshot();
}
void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader);
const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
return agents_;
}
RuntimeCallbacks* GetRuntimeCallbacks();
bool HasLoadedPlugins() const {
return !plugins_.empty();
}
void InitThreadGroups(Thread* self);
void SetDumpGCPerformanceOnShutdown(bool value) {
dump_gc_performance_on_shutdown_ = value;
}
bool GetDumpGCPerformanceOnShutdown() const {
return dump_gc_performance_on_shutdown_;
}
void IncrementDeoptimizationCount(DeoptimizationKind kind) {
DCHECK_LE(kind, DeoptimizationKind::kLast);
deoptimization_counts_[static_cast<size_t>(kind)]++;
}
uint32_t GetNumberOfDeoptimizations() const {
uint32_t result = 0;
for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
result += deoptimization_counts_[i];
}
return result;
}
// Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
// This is beneficial for low RAM devices since it reduces page cache thrashing.
bool MAdviseRandomAccess() const {
return madvise_random_access_;
}
const std::string& GetJdwpOptions() {
return jdwp_options_;
}
JdwpProvider GetJdwpProvider() const {
return jdwp_provider_;
}
uint32_t GetVerifierLoggingThresholdMs() const {
return verifier_logging_threshold_ms_;
}
// Atomically delete the thread pool if the reference count is 0.
bool DeleteThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
// Wait for all the thread workers to be attached.
void WaitForThreadPoolWorkersToStart() REQUIRES(!Locks::runtime_thread_pool_lock_);
// Scoped usage of the runtime thread pool. Prevents the pool from being
// deleted. Note that the thread pool is only for startup and gets deleted after.
class ScopedThreadPoolUsage {
public:
ScopedThreadPoolUsage();
~ScopedThreadPoolUsage();
// Return the thread pool.
ThreadPool* GetThreadPool() const {
return thread_pool_;
}
private:
ThreadPool* const thread_pool_;
};
bool LoadAppImageStartupCache() const {
return load_app_image_startup_cache_;
}
void SetLoadAppImageStartupCacheEnabled(bool enabled) {
load_app_image_startup_cache_ = enabled;
}
// Notify the runtime that application startup is considered completed. Only has effect for the
// first call.
void NotifyStartupCompleted();
// Return true if startup is already completed.
bool GetStartupCompleted() const;
gc::space::ImageSpaceLoadingOrder GetImageSpaceLoadingOrder() const {
return image_space_loading_order_;
}
private:
static void InitPlatformSignalHandlers();
Runtime();
void BlockSignals();
bool Init(RuntimeArgumentMap&& runtime_options)
SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
void RegisterRuntimeNativeMethods(JNIEnv* env);
void StartDaemonThreads();
void StartSignalCatcher();
void MaybeSaveJitProfilingInfo();
// Visit all of the thread roots.
void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES_SHARED(Locks::mutator_lock_);
// Visit all other roots which must be done with mutators suspended.
void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
REQUIRES_SHARED(Locks::mutator_lock_);
// Constant roots are the roots which never change after the runtime is initialized, they only
// need to be visited once per GC cycle.
void VisitConstantRoots(RootVisitor* visitor)
REQUIRES_SHARED(Locks::mutator_lock_);
// Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null.
// As such, there is a window where a call will return an empty string. In general,
// only aborting code should retrieve this data (via GetFaultMessageForAbortLogging
// friend).
std::string GetFaultMessage();
ThreadPool* AcquireThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
void ReleaseThreadPool() REQUIRES(!Locks::runtime_thread_pool_lock_);
// A pointer to the active runtime or null.
static Runtime* instance_;
// NOTE: these must match the gc::ProcessState values as they come directly from the framework.
static constexpr int kProfileForground = 0;
static constexpr int kProfileBackground = 1;
static constexpr uint32_t kCalleeSaveSize = 6u;
// 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
uint64_t callee_save_methods_[kCalleeSaveSize];
// Pre-allocated exceptions (see Runtime::Init).
GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_exception_;
GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_oome_;
GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_handling_stack_overflow_;
GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
ArtMethod* resolution_method_;
ArtMethod* imt_conflict_method_;
// Unresolved method has the same behavior as the conflict method, it is used by the class linker
// for differentiating between unfilled imt slots vs conflict slots in superclasses.
ArtMethod* imt_unimplemented_method_;
// Special sentinel object used to invalid conditions in JNI (cleared weak references) and
// JDWP (invalid references).
GcRoot<mirror::Object> sentinel_;
InstructionSet instruction_set_;
CompilerCallbacks* compiler_callbacks_;
bool is_zygote_;
bool is_system_server_;
bool must_relocate_;
bool is_concurrent_gc_enabled_;
bool is_explicit_gc_disabled_;
bool image_dex2oat_enabled_;
std::string compiler_executable_;
std::vector<std::string> compiler_options_;
std::vector<std::string> image_compiler_options_;
std::string image_location_;
bool is_using_apex_boot_image_location_;
std::vector<std::string> boot_class_path_;
std::vector<std::string> boot_class_path_locations_;
std::string class_path_string_;
std::vector<std::string> properties_;
std::list<ti::AgentSpec> agent_specs_;
std::list<std::unique_ptr<ti::Agent>> agents_;
std::vector<Plugin> plugins_;
// The default stack size for managed threads created by the runtime.
size_t default_stack_size_;
// Finalizers running for longer than this many milliseconds abort the runtime.
unsigned int finalizer_timeout_ms_;
gc::Heap* heap_;
std::unique_ptr<ArenaPool> jit_arena_pool_;
std::unique_ptr<ArenaPool> arena_pool_;
// Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
// compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
// since the field arrays are int arrays in this case.
std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
// Shared linear alloc for now.
std::unique_ptr<LinearAlloc> linear_alloc_;
// The number of spins that are done before thread suspension is used to forcibly inflate.
size_t max_spins_before_thin_lock_inflation_;
MonitorList* monitor_list_;
MonitorPool* monitor_pool_;
ThreadList* thread_list_;
InternTable* intern_table_;
ClassLinker* class_linker_;
SignalCatcher* signal_catcher_;
std::unique_ptr<JavaVMExt> java_vm_;
std::unique_ptr<jit::Jit> jit_;
std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
std::unique_ptr<jit::JitOptions> jit_options_;
// Runtime thread pool. The pool is only for startup and gets deleted after.
std::unique_ptr<ThreadPool> thread_pool_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
size_t thread_pool_ref_count_ GUARDED_BY(Locks::runtime_thread_pool_lock_);
// Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
// lock-free, so needs to be atomic.
std::atomic<std::string*> fault_message_;
// A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
// the shutdown lock so that threads aren't born while we're shutting down.
size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
// Waited upon until no threads are being born.
std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
// Set when runtime shutdown is past the point that new threads may attach.
bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
// The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
bool started_;
// New flag added which tells us if the runtime has finished starting. If
// this flag is set then the Daemon threads are created and the class loader
// is created. This flag is needed for knowing if its safe to request CMS.
bool finished_starting_;
// Hooks supported by JNI_CreateJavaVM
jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
void (*exit_)(jint status);
void (*abort_)();
bool stats_enabled_;
RuntimeStats stats_;
const bool is_running_on_memory_tool_;
std::unique_ptr<TraceConfig> trace_config_;
instrumentation::Instrumentation instrumentation_;
jobject main_thread_group_;
jobject system_thread_group_;
// As returned by ClassLoader.getSystemClassLoader().
jobject system_class_loader_;
// If true, then we dump the GC cumulative timings on shutdown.
bool dump_gc_performance_on_shutdown_;
// Transactions used for pre-initializing classes at compilation time.
// Support nested transactions, maintain a list containing all transactions. Transactions are
// handled under a stack discipline. Because GC needs to go over all transactions, we choose list
// as substantial data structure instead of stack.
std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
// If kNone, verification is disabled. kEnable by default.
verifier::VerifyMode verify_;
// If true, the runtime may use dex files directly with the interpreter if an oat file is not
// available/usable.
bool allow_dex_file_fallback_;
// List of supported cpu abis.
std::vector<std::string> cpu_abilist_;
// Specifies target SDK version to allow workarounds for certain API levels.
uint32_t target_sdk_version_;
// Implicit checks flags.
bool implicit_null_checks_; // NullPointer checks are implicit.
bool implicit_so_checks_; // StackOverflow checks are implicit.
bool implicit_suspend_checks_; // Thread suspension checks are implicit.
// Whether or not the sig chain (and implicitly the fault handler) should be
// disabled. Tools like dex2oat don't need them. This enables
// building a statically link version of dex2oat.
bool no_sig_chain_;
// Force the use of native bridge even if the app ISA matches the runtime ISA.
bool force_native_bridge_;
// Whether or not a native bridge has been loaded.
//
// The native bridge allows running native code compiled for a foreign ISA. The way it works is,
// if standard dlopen fails to load native library associated with native activity, it calls to
// the native bridge to load it and then gets the trampoline for the entry to native activity.
//
// The option 'native_bridge_library_filename' specifies the name of the native bridge.
// When non-empty the native bridge will be loaded from the given file. An empty value means
// that there's no native bridge.
bool is_native_bridge_loaded_;
// Whether we are running under native debugger.
bool is_native_debuggable_;
// whether or not any async exceptions have ever been thrown. This is used to speed up the
// MterpShouldSwitchInterpreters function.
bool async_exceptions_thrown_;
// Whether anything is going to be using the shadow-frame APIs to force a function to return
// early. Doing this requires that (1) we be debuggable and (2) that mterp is exited.
bool non_standard_exits_enabled_;
// Whether Java code needs to be debuggable.
bool is_java_debuggable_;
// The maximum number of failed boots we allow before pruning the dalvik cache
// and trying again. This option is only inspected when we're running as a
// zygote.
uint32_t zygote_max_failed_boots_;
// Enable experimental opcodes that aren't fully specified yet. The intent is to
// eventually publish them as public-usable opcodes, but they aren't ready yet.
//
// Experimental opcodes should not be used by other production code.
ExperimentalFlags experimental_flags_;
// Contains the build fingerprint, if given as a parameter.
std::string fingerprint_;
// Oat file manager, keeps track of what oat files are open.
OatFileManager* oat_file_manager_;
// Whether or not we are on a low RAM device.
bool is_low_memory_mode_;
// Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
// This is beneficial for low RAM devices since it reduces page cache thrashing.
bool madvise_random_access_;
// Whether the application should run in safe mode, that is, interpreter only.
bool safe_mode_;
// Whether access checks on hidden API should be performed.
hiddenapi::EnforcementPolicy hidden_api_policy_;
// Whether access checks on core platform API should be performed.
hiddenapi::EnforcementPolicy core_platform_api_policy_;
// List of signature prefixes of methods that have been removed from the blacklist, and treated
// as if whitelisted.
std::vector<std::string> hidden_api_exemptions_;
// Do not warn about the same hidden API access violation twice.
// This is only used for testing.
bool dedupe_hidden_api_warnings_;
// How often to log hidden API access to the event log. An integer between 0
// (never) and 0x10000 (always).
uint32_t hidden_api_access_event_log_rate_;
// The package of the app running in this process.
std::string process_package_name_;
// The data directory of the app running in this process.
std::string process_data_directory_;
// Whether threads should dump their native stack on SIGQUIT.
bool dump_native_stack_on_sig_quit_;
// Whether the dalvik cache was pruned when initializing the runtime.
bool pruned_dalvik_cache_;
// Whether or not we currently care about pause times.
ProcessState process_state_;
// Whether zygote code is in a section that should not start threads.
bool zygote_no_threads_;
// The string containing requested jdwp options
std::string jdwp_options_;
// The jdwp provider we were configured with.
JdwpProvider jdwp_provider_;
// Saved environment.
class EnvSnapshot {
public:
EnvSnapshot() = default;
void TakeSnapshot();
char** GetSnapshot() const;
private:
std::unique_ptr<char*[]> c_env_vector_;
std::vector<std::unique_ptr<std::string>> name_value_pairs_;
DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
} env_snapshot_;
// Generic system-weak holders.
std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
std::unique_ptr<RuntimeCallbacks> callbacks_;
std::atomic<uint32_t> deoptimization_counts_[
static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
MemMap protected_fault_page_;
uint32_t verifier_logging_threshold_ms_;
bool load_app_image_startup_cache_ = false;
// If startup has completed, must happen at most once.
std::atomic<bool> startup_completed_ = false;
gc::space::ImageSpaceLoadingOrder image_space_loading_order_ =
gc::space::ImageSpaceLoadingOrder::kSystemFirst;
// Note: See comments on GetFaultMessage.
friend std::string GetFaultMessageForAbortLogging();
friend class ScopedThreadPoolUsage;
friend class OatFileAssistantTest;
DISALLOW_COPY_AND_ASSIGN(Runtime);
};
} // namespace art
#endif // ART_RUNTIME_RUNTIME_H_
|