summaryrefslogtreecommitdiffstats
path: root/runtime/stack.h
blob: 1f305d2625a2ceff4abf21ffa4ef465250158a7f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
/*
 * Copyright (C) 2011 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ART_RUNTIME_STACK_H_
#define ART_RUNTIME_STACK_H_

#include <stdint.h>
#include <string>

#include "base/locks.h"
#include "base/macros.h"
#include "quick/quick_method_frame_info.h"
#include "stack_map.h"

namespace art {

namespace mirror {
class Object;
}  // namespace mirror

class ArtMethod;
class Context;
class HandleScope;
class OatQuickMethodHeader;
class ShadowFrame;
class Thread;
union JValue;

// The kind of vreg being accessed in calls to Set/GetVReg.
enum VRegKind {
  kReferenceVReg,
  kIntVReg,
  kFloatVReg,
  kLongLoVReg,
  kLongHiVReg,
  kDoubleLoVReg,
  kDoubleHiVReg,
  kConstant,
  kImpreciseConstant,
  kUndefined,
};
std::ostream& operator<<(std::ostream& os, const VRegKind& rhs);

// Size in bytes of the should_deoptimize flag on stack.
// We just need 4 bytes for our purpose regardless of the architecture. Frame size
// calculation will automatically do alignment for the final frame size.
static constexpr size_t kShouldDeoptimizeFlagSize = 4;

/*
 * Our current stack layout.
 * The Dalvik registers come first, followed by the
 * Method*, followed by other special temporaries if any, followed by
 * regular compiler temporary. As of now we only have the Method* as
 * as a special compiler temporary.
 * A compiler temporary can be thought of as a virtual register that
 * does not exist in the dex but holds intermediate values to help
 * optimizations and code generation. A special compiler temporary is
 * one whose location in frame is well known while non-special ones
 * do not have a requirement on location in frame as long as code
 * generator itself knows how to access them.
 *
 * TODO: Update this documentation?
 *
 *     +-------------------------------+
 *     | IN[ins-1]                     |  {Note: resides in caller's frame}
 *     |       .                       |
 *     | IN[0]                         |
 *     | caller's ArtMethod            |  ... ArtMethod*
 *     +===============================+  {Note: start of callee's frame}
 *     | core callee-save spill        |  {variable sized}
 *     +-------------------------------+
 *     | fp callee-save spill          |
 *     +-------------------------------+
 *     | filler word                   |  {For compatibility, if V[locals-1] used as wide
 *     +-------------------------------+
 *     | V[locals-1]                   |
 *     | V[locals-2]                   |
 *     |      .                        |
 *     |      .                        |  ... (reg == 2)
 *     | V[1]                          |  ... (reg == 1)
 *     | V[0]                          |  ... (reg == 0) <---- "locals_start"
 *     +-------------------------------+
 *     | stack alignment padding       |  {0 to (kStackAlignWords-1) of padding}
 *     +-------------------------------+
 *     | Compiler temp region          |  ... (reg >= max_num_special_temps)
 *     |      .                        |
 *     |      .                        |
 *     | V[max_num_special_temps + 1]  |
 *     | V[max_num_special_temps + 0]  |
 *     +-------------------------------+
 *     | OUT[outs-1]                   |
 *     | OUT[outs-2]                   |
 *     |       .                       |
 *     | OUT[0]                        |
 *     | ArtMethod*                    |  ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
 *     +===============================+
 */

class StackVisitor {
 public:
  // This enum defines a flag to control whether inlined frames are included
  // when walking the stack.
  enum class StackWalkKind {
    kIncludeInlinedFrames,
    kSkipInlinedFrames,
  };

 protected:
  StackVisitor(Thread* thread,
               Context* context,
               StackWalkKind walk_kind,
               bool check_suspended = true);

  bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
      REQUIRES_SHARED(Locks::mutator_lock_);

 public:
  virtual ~StackVisitor() {}
  StackVisitor(const StackVisitor&) = default;
  StackVisitor(StackVisitor&&) = default;

  // Return 'true' if we should continue to visit more frames, 'false' to stop.
  virtual bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) = 0;

  enum class CountTransitions {
    kYes,
    kNo,
  };

  template <CountTransitions kCount = CountTransitions::kYes>
  void WalkStack(bool include_transitions = false) REQUIRES_SHARED(Locks::mutator_lock_);

  // Convenience helper function to walk the stack with a lambda as a visitor.
  template <CountTransitions kCountTransitions = CountTransitions::kYes,
            typename T>
  ALWAYS_INLINE static void WalkStack(const T& fn,
                                      Thread* thread,
                                      Context* context,
                                      StackWalkKind walk_kind,
                                      bool check_suspended = true,
                                      bool include_transitions = false)
      REQUIRES_SHARED(Locks::mutator_lock_) {
    class LambdaStackVisitor : public StackVisitor {
     public:
      LambdaStackVisitor(const T& fn,
                         Thread* thread,
                         Context* context,
                         StackWalkKind walk_kind,
                         bool check_suspended = true)
          : StackVisitor(thread, context, walk_kind, check_suspended), fn_(fn) {}

      bool VisitFrame() override REQUIRES_SHARED(Locks::mutator_lock_) {
        return fn_(this);
      }

     private:
      T fn_;
    };
    LambdaStackVisitor visitor(fn, thread, context, walk_kind, check_suspended);
    visitor.template WalkStack<kCountTransitions>(include_transitions);
  }

  Thread* GetThread() const {
    return thread_;
  }

  ArtMethod* GetMethod() const REQUIRES_SHARED(Locks::mutator_lock_);

  // Sets this stack frame's method pointer. This requires a full lock of the MutatorLock. This
  // doesn't work with inlined methods.
  void SetMethod(ArtMethod* method) REQUIRES(Locks::mutator_lock_);

  ArtMethod* GetOuterMethod() const {
    return *GetCurrentQuickFrame();
  }

  bool IsShadowFrame() const {
    return cur_shadow_frame_ != nullptr;
  }

  uint32_t GetDexPc(bool abort_on_failure = true) const REQUIRES_SHARED(Locks::mutator_lock_);

  mirror::Object* GetThisObject() const REQUIRES_SHARED(Locks::mutator_lock_);

  size_t GetNativePcOffset() const REQUIRES_SHARED(Locks::mutator_lock_);

  // Returns the height of the stack in the managed stack frames, including transitions.
  size_t GetFrameHeight() REQUIRES_SHARED(Locks::mutator_lock_) {
    return GetNumFrames() - cur_depth_ - 1;
  }

  // Returns a frame ID for JDWP use, starting from 1.
  size_t GetFrameId() REQUIRES_SHARED(Locks::mutator_lock_) {
    return GetFrameHeight() + 1;
  }

  size_t GetNumFrames() REQUIRES_SHARED(Locks::mutator_lock_) {
    if (num_frames_ == 0) {
      num_frames_ = ComputeNumFrames(thread_, walk_kind_);
    }
    return num_frames_;
  }

  size_t GetFrameDepth() const REQUIRES_SHARED(Locks::mutator_lock_) {
    return cur_depth_;
  }

  // Get the method and dex pc immediately after the one that's currently being visited.
  bool GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc)
      REQUIRES_SHARED(Locks::mutator_lock_);

  bool GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const
      REQUIRES_SHARED(Locks::mutator_lock_);

  bool GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
                   uint64_t* val) const
      REQUIRES_SHARED(Locks::mutator_lock_);

  // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
  // is triggered to make the values effective.
  bool SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, VRegKind kind)
      REQUIRES_SHARED(Locks::mutator_lock_);

  // Values will be set in debugger shadow frames. Debugger will make sure deoptimization
  // is triggered to make the values effective.
  bool SetVRegPair(ArtMethod* m,
                   uint16_t vreg,
                   uint64_t new_value,
                   VRegKind kind_lo,
                   VRegKind kind_hi)
      REQUIRES_SHARED(Locks::mutator_lock_);

  uintptr_t* GetGPRAddress(uint32_t reg) const;

  uintptr_t GetReturnPc() const REQUIRES_SHARED(Locks::mutator_lock_);

  void SetReturnPc(uintptr_t new_ret_pc) REQUIRES_SHARED(Locks::mutator_lock_);

  bool IsInInlinedFrame() const {
    return !current_inline_frames_.empty();
  }

  InlineInfo GetCurrentInlinedFrame() const {
    return current_inline_frames_.back();
  }

  uintptr_t GetCurrentQuickFramePc() const {
    return cur_quick_frame_pc_;
  }

  ArtMethod** GetCurrentQuickFrame() const {
    return cur_quick_frame_;
  }

  ShadowFrame* GetCurrentShadowFrame() const {
    return cur_shadow_frame_;
  }

  HandleScope* GetCurrentHandleScope(size_t pointer_size) const {
    ArtMethod** sp = GetCurrentQuickFrame();
    // Skip ArtMethod*; handle scope comes next;
    return reinterpret_cast<HandleScope*>(reinterpret_cast<uintptr_t>(sp) + pointer_size);
  }

  std::string DescribeLocation() const REQUIRES_SHARED(Locks::mutator_lock_);

  static size_t ComputeNumFrames(Thread* thread, StackWalkKind walk_kind)
      REQUIRES_SHARED(Locks::mutator_lock_);

  static void DescribeStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_);

  const OatQuickMethodHeader* GetCurrentOatQuickMethodHeader() const {
    return cur_oat_quick_method_header_;
  }

  QuickMethodFrameInfo GetCurrentQuickFrameInfo() const REQUIRES_SHARED(Locks::mutator_lock_);

 private:
  // Private constructor known in the case that num_frames_ has already been computed.
  StackVisitor(Thread* thread,
               Context* context,
               StackWalkKind walk_kind,
               size_t num_frames,
               bool check_suspended = true)
      REQUIRES_SHARED(Locks::mutator_lock_);

  bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
    return is_float ? IsAccessibleFPR(reg) : IsAccessibleGPR(reg);
  }
  uintptr_t GetRegister(uint32_t reg, bool is_float) const {
    DCHECK(IsAccessibleRegister(reg, is_float));
    return is_float ? GetFPR(reg) : GetGPR(reg);
  }

  bool IsAccessibleGPR(uint32_t reg) const;
  uintptr_t GetGPR(uint32_t reg) const;

  bool IsAccessibleFPR(uint32_t reg) const;
  uintptr_t GetFPR(uint32_t reg) const;

  bool GetVRegFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind, uint32_t* val) const
      REQUIRES_SHARED(Locks::mutator_lock_);
  bool GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
                                uint32_t* val) const
      REQUIRES_SHARED(Locks::mutator_lock_);

  bool GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, VRegKind kind_lo, VRegKind kind_hi,
                                          uint64_t* val) const
      REQUIRES_SHARED(Locks::mutator_lock_);
  bool GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
                                    VRegKind kind_lo, VRegKind kind_hi,
                                    uint64_t* val) const
      REQUIRES_SHARED(Locks::mutator_lock_);
  bool GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, VRegKind kind_lo,
                                   uint64_t* val) const
      REQUIRES_SHARED(Locks::mutator_lock_);

  void SanityCheckFrame() const REQUIRES_SHARED(Locks::mutator_lock_);

  Thread* const thread_;
  const StackWalkKind walk_kind_;
  ShadowFrame* cur_shadow_frame_;
  ArtMethod** cur_quick_frame_;
  uintptr_t cur_quick_frame_pc_;
  const OatQuickMethodHeader* cur_oat_quick_method_header_;
  // Lazily computed, number of frames in the stack.
  size_t num_frames_;
  // Depth of the frame we're currently at.
  size_t cur_depth_;
  // Current inlined frames of the method we are currently at.
  // We keep poping frames from the end as we visit the frames.
  CodeInfo current_code_info_;
  BitTableRange<InlineInfo> current_inline_frames_;

 protected:
  Context* const context_;
  const bool check_suspended_;
};

}  // namespace art

#endif  // ART_RUNTIME_STACK_H_