summaryrefslogtreecommitdiffstats
path: root/compiler/utils/arm64/assembler_arm64.h
blob: 2bada3fc9e1429545d389cf82ea0b7f3e126e46c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
/*
 * Copyright (C) 2014 The Android Open Source Project
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_
#define ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_

#include <vector>
#include <stdint.h>

#include "base/logging.h"
#include "constants_arm64.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/assembler.h"
#include "offsets.h"
#include "utils.h"
#include "UniquePtr.h"
#include "a64/macro-assembler-a64.h"
#include "a64/disasm-a64.h"

namespace art {
namespace arm64 {

#define MEM_OP(x...)      vixl::MemOperand(x)
#define COND_OP(x)        static_cast<vixl::Condition>(x)

enum Condition {
  kNoCondition = -1,
  EQ = 0,
  NE = 1,
  HS = 2,
  LO = 3,
  MI = 4,
  PL = 5,
  VS = 6,
  VC = 7,
  HI = 8,
  LS = 9,
  GE = 10,
  LT = 11,
  GT = 12,
  LE = 13,
  AL = 14,    // Always.
  NV = 15,    // Behaves as always/al.
  kMaxCondition = 16,
};

enum LoadOperandType {
  kLoadSignedByte,
  kLoadUnsignedByte,
  kLoadSignedHalfword,
  kLoadUnsignedHalfword,
  kLoadWord,
  kLoadCoreWord,
  kLoadSWord,
  kLoadDWord
};

enum StoreOperandType {
  kStoreByte,
  kStoreHalfword,
  kStoreWord,
  kStoreCoreWord,
  kStoreSWord,
  kStoreDWord
};

class Arm64Exception;

class Arm64Assembler : public Assembler {
 public:
  Arm64Assembler() : vixl_buf_(new byte[BUF_SIZE]),
  vixl_masm_(new vixl::MacroAssembler(vixl_buf_, BUF_SIZE)) {}

  virtual ~Arm64Assembler() {
    if (kIsDebugBuild) {
      vixl::Decoder *decoder = new vixl::Decoder();
      vixl::PrintDisassembler *test = new vixl::PrintDisassembler(stdout);
      decoder->AppendVisitor(test);

      for (size_t i = 0; i < CodeSize() / vixl::kInstructionSize; ++i) {
        vixl::Instruction *instr =
          reinterpret_cast<vixl::Instruction*>(vixl_buf_ + i * vixl::kInstructionSize);
        decoder->Decode(instr);
      }
    }
    delete[] vixl_buf_;
  }

  // Emit slow paths queued during assembly.
  void EmitSlowPaths();

  // Size of generated code.
  size_t CodeSize() const;

  // Copy instructions out of assembly buffer into the given region of memory.
  void FinalizeInstructions(const MemoryRegion& region);

  // Emit code that will create an activation on the stack.
  void BuildFrame(size_t frame_size, ManagedRegister method_reg,
                  const std::vector<ManagedRegister>& callee_save_regs,
                  const std::vector<ManagedRegister>& entry_spills);

  // Emit code that will remove an activation from the stack.
  void RemoveFrame(size_t frame_size,
                   const std::vector<ManagedRegister>& callee_save_regs);

  void IncreaseFrameSize(size_t adjust);
  void DecreaseFrameSize(size_t adjust);

  // Store routines.
  void Store(FrameOffset offs, ManagedRegister src, size_t size);
  void StoreRef(FrameOffset dest, ManagedRegister src);
  void StoreRawPtr(FrameOffset dest, ManagedRegister src);
  void StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
                                     ManagedRegister scratch);
  void StoreImmediateToThread(ThreadOffset dest, uint32_t imm,
                                      ManagedRegister scratch);
  void StoreStackOffsetToThread(ThreadOffset thr_offs,
                                        FrameOffset fr_offs,
                                        ManagedRegister scratch);
  void StoreStackPointerToThread(ThreadOffset thr_offs);
  void StoreSpanning(FrameOffset dest, ManagedRegister src,
                             FrameOffset in_off, ManagedRegister scratch);

  // Load routines.
  void Load(ManagedRegister dest, FrameOffset src, size_t size);
  void Load(ManagedRegister dest, ThreadOffset src, size_t size);
  void LoadRef(ManagedRegister dest, FrameOffset  src);
  void LoadRef(ManagedRegister dest, ManagedRegister base,
               MemberOffset offs);
  void LoadRawPtr(ManagedRegister dest, ManagedRegister base,
                  Offset offs);
  void LoadRawPtrFromThread(ManagedRegister dest,
                            ThreadOffset offs);
  // Copying routines.
  void Move(ManagedRegister dest, ManagedRegister src, size_t size);
  void CopyRawPtrFromThread(FrameOffset fr_offs, ThreadOffset thr_offs,
                            ManagedRegister scratch);
  void CopyRawPtrToThread(ThreadOffset thr_offs, FrameOffset fr_offs,
                          ManagedRegister scratch);
  void CopyRef(FrameOffset dest, FrameOffset src,
               ManagedRegister scratch);
  void Copy(FrameOffset dest, FrameOffset src, ManagedRegister scratch, size_t size);
  void Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
            ManagedRegister scratch, size_t size);
  void Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
            ManagedRegister scratch, size_t size);
  void Copy(FrameOffset dest, FrameOffset src_base, Offset src_offset,
            ManagedRegister scratch, size_t size);
  void Copy(ManagedRegister dest, Offset dest_offset,
            ManagedRegister src, Offset src_offset,
            ManagedRegister scratch, size_t size);
  void Copy(FrameOffset dest, Offset dest_offset, FrameOffset src, Offset src_offset,
            ManagedRegister scratch, size_t size);
  void MemoryBarrier(ManagedRegister scratch);

  // Sign extension.
  void SignExtend(ManagedRegister mreg, size_t size);

  // Zero extension.
  void ZeroExtend(ManagedRegister mreg, size_t size);

  // Exploit fast access in managed code to Thread::Current().
  void GetCurrentThread(ManagedRegister tr);
  void GetCurrentThread(FrameOffset dest_offset,
                        ManagedRegister scratch);

  // Set up out_reg to hold a Object** into the SIRT, or to be NULL if the
  // value is null and null_allowed. in_reg holds a possibly stale reference
  // that can be used to avoid loading the SIRT entry to see if the value is
  // NULL.
  void CreateSirtEntry(ManagedRegister out_reg, FrameOffset sirt_offset,
                       ManagedRegister in_reg, bool null_allowed);

  // Set up out_off to hold a Object** into the SIRT, or to be NULL if the
  // value is null and null_allowed.
  void CreateSirtEntry(FrameOffset out_off, FrameOffset sirt_offset,
                       ManagedRegister scratch, bool null_allowed);

  // src holds a SIRT entry (Object**) load this into dst.
  void LoadReferenceFromSirt(ManagedRegister dst,
                             ManagedRegister src);

  // Heap::VerifyObject on src. In some cases (such as a reference to this) we
  // know that src may not be null.
  void VerifyObject(ManagedRegister src, bool could_be_null);
  void VerifyObject(FrameOffset src, bool could_be_null);

  // Call to address held at [base+offset].
  void Call(ManagedRegister base, Offset offset, ManagedRegister scratch);
  void Call(FrameOffset base, Offset offset, ManagedRegister scratch);
  void Call(ThreadOffset offset, ManagedRegister scratch);

  // Generate code to check if Thread::Current()->exception_ is non-null
  // and branch to a ExceptionSlowPath if it is.
  void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust);

 private:
  static vixl::Register reg_x(int code) {
    CHECK(code < kNumberOfCoreRegisters) << code;
    if (code == SP) {
      return vixl::sp;
    }
    return vixl::Register::XRegFromCode(code);
  }

  static vixl::Register reg_w(int code) {
    return vixl::Register::WRegFromCode(code);
  }

  static vixl::FPRegister reg_d(int code) {
    return vixl::FPRegister::DRegFromCode(code);
  }

  static vixl::FPRegister reg_s(int code) {
    return vixl::FPRegister::SRegFromCode(code);
  }

  // Emits Exception block.
  void EmitExceptionPoll(Arm64Exception *exception);

  void StoreWToOffset(StoreOperandType type, WRegister source,
                      Register base, int32_t offset);
  void StoreToOffset(Register source, Register base, int32_t offset);
  void StoreSToOffset(SRegister source, Register base, int32_t offset);
  void StoreDToOffset(DRegister source, Register base, int32_t offset);

  void LoadImmediate(Register dest, int32_t value, Condition cond = AL);
  void Load(Arm64ManagedRegister dst, Register src, int32_t src_offset, size_t size);
  void LoadWFromOffset(LoadOperandType type, WRegister dest,
                      Register base, int32_t offset);
  void LoadFromOffset(Register dest, Register base, int32_t offset);
  void LoadSFromOffset(SRegister dest, Register base, int32_t offset);
  void LoadDFromOffset(DRegister dest, Register base, int32_t offset);
  void AddConstant(Register rd, int32_t value, Condition cond = AL);
  void AddConstant(Register rd, Register rn, int32_t value, Condition cond = AL);

  // Vixl buffer size.
  static constexpr size_t BUF_SIZE = 4096;

  // Vixl buffer.
  byte* vixl_buf_;

  // Unique ptr - vixl assembler.
  UniquePtr<vixl::MacroAssembler> vixl_masm_;

  // List of exception blocks to generate at the end of the code cache.
  std::vector<Arm64Exception*> exception_blocks_;
};

class Arm64Exception {
 private:
  explicit Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust)
      : scratch_(scratch), stack_adjust_(stack_adjust) {
    }

  vixl::Label* Entry() { return &exception_entry_; }

  // Register used for passing Thread::Current()->exception_ .
  const Arm64ManagedRegister scratch_;

  // Stack adjust for ExceptionPool.
  const size_t stack_adjust_;

  vixl::Label exception_entry_;

  friend class Arm64Assembler;
  DISALLOW_COPY_AND_ASSIGN(Arm64Exception);
};

}  // namespace arm64
}  // namespace art

#endif  // ART_COMPILER_UTILS_ARM64_ASSEMBLER_ARM64_H_