summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Wang <wangw@codeaurora.org>2014-09-30 11:46:29 -0700
committerLinux Build Service Account <lnxbuild@localhost>2014-11-04 08:24:02 -0700
commita9df2579886bc3b5184020dca5a5155dc6a2b6c6 (patch)
tree864c96a0fdfa17d47eb4495fd13c26529ac4a822
parent2fc46c371de174b664e4690dbf586fd92cad9c70 (diff)
downloadandroid_art-staging/cm-12.0-caf.tar.gz
android_art-staging/cm-12.0-caf.tar.bz2
android_art-staging/cm-12.0-caf.zip
more ART Extensionstaging/cm-12.0-caf
Vendor extension hooks. Change-Id: I9f03153bba698e82d1a1e992baf2bc1c48a5d059
-rw-r--r--compiler/dex/quick/arm/codegen_arm.h2
-rw-r--r--compiler/dex/quick/arm64/arm64_lir.h1
-rw-r--r--compiler/dex/quick/arm64/assemble_arm64.cc32
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h25
-rw-r--r--compiler/dex/quick/arm64/target_arm64.cc29
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc24
-rw-r--r--compiler/dex/reg_storage.h13
7 files changed, 97 insertions, 29 deletions
diff --git a/compiler/dex/quick/arm/codegen_arm.h b/compiler/dex/quick/arm/codegen_arm.h
index 32149c900f..d9e6c01102 100644
--- a/compiler/dex/quick/arm/codegen_arm.h
+++ b/compiler/dex/quick/arm/codegen_arm.h
@@ -194,7 +194,7 @@ class ArmMir2Lir FINAL : public Mir2Lir {
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
size_t GetInstructionOffset(LIR* lir);
- void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir);
+ void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) OVERRIDE;
void GenMoreMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) QC_WEAK;
//void MachineSpecificPreprocessMIR(BasicBlock* bb, MIR* mir);
diff --git a/compiler/dex/quick/arm64/arm64_lir.h b/compiler/dex/quick/arm64/arm64_lir.h
index a449cbd4f7..83d0590e32 100644
--- a/compiler/dex/quick/arm64/arm64_lir.h
+++ b/compiler/dex/quick/arm64/arm64_lir.h
@@ -317,6 +317,7 @@ enum ArmOpcode {
kA64Mov2rr, // mov [00101010000] rm[20-16] [000000] [11111] rd[4-0].
kA64Mvn2rr, // mov [00101010001] rm[20-16] [000000] [11111] rd[4-0].
kA64Mul3rrr, // mul [00011011000] rm[20-16] [011111] rn[9-5] rd[4-0].
+ kA64Madd4rrrr, // madd[s0011011000] rm[20-16] [0] ra[14-10] rn[9-5] rd[4-0].
kA64Msub4rrrr, // msub[s0011011000] rm[20-16] [1] ra[14-10] rn[9-5] rd[4-0].
kA64Neg3rro, // neg alias of "sub arg0, rzr, arg1, arg2".
kA64Orr3Rrl, // orr [s01100100] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
diff --git a/compiler/dex/quick/arm64/assemble_arm64.cc b/compiler/dex/quick/arm64/assemble_arm64.cc
index 15c89f2f18..0898f7fa41 100644
--- a/compiler/dex/quick/arm64/assemble_arm64.cc
+++ b/compiler/dex/quick/arm64/assemble_arm64.cc
@@ -462,6 +462,10 @@ const ArmEncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 20, 16,
kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE12,
"mul", "!0r, !1r, !2r", kFixupNone),
+ ENCODING_MAP(WIDE(kA64Madd4rrrr), SF_VARIANTS(0x1b000000),
+ kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 14, 10,
+ kFmtRegR, 20, 16, IS_QUAD_OP | REG_DEF0_USE123,
+ "madd", "!0r, !1r, !3r, !2r", kFixupNone),
ENCODING_MAP(WIDE(kA64Msub4rrrr), SF_VARIANTS(0x1b008000),
kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtRegR, 14, 10,
kFmtRegR, 20, 16, IS_QUAD_OP | REG_DEF0_USE123,
@@ -646,20 +650,33 @@ void Arm64Mir2Lir::InsertFixupBefore(LIR* prev_lir, LIR* orig_lir, LIR* new_lir)
}
}
+const ArmEncodingMap* Arm64Mir2Lir::GetEncoder(int opcode) {
+ const ArmEncodingMap* encoder = &EncodingMap[opcode];
+ return encoder;
+}
+
/* Nop, used for aligning code. Nop is an alias for hint #0. */
#define PADDING_NOP (UINT32_C(0xd503201f))
+uint32_t Arm64Mir2Lir::ProcessMoreEncodings(const ArmEncodingMap *encoder,
+ int i, uint32_t operand) {
+ LOG(FATAL) << "Bad fmt:" << encoder->field_loc[i].kind;
+ uint32_t value = 0;
+ return value;
+}
+
uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
for (; lir != nullptr; lir = NEXT_LIR(lir)) {
bool opcode_is_wide = IS_WIDE(lir->opcode);
ArmOpcode opcode = UNWIDE(lir->opcode);
+ bool extendedOpcode = false;
if (UNLIKELY(IsPseudoLirOp(opcode))) {
continue;
}
if (LIKELY(!lir->flags.is_nop)) {
- const ArmEncodingMap *encoder = &EncodingMap[opcode];
+ const ArmEncodingMap *encoder = GetEncoder(opcode);
// Select the right variant of the skeleton.
uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
@@ -788,8 +805,9 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
bits |= value;
break;
default:
- LOG(FATAL) << "Bad fmt for arg. " << i << " in " << encoder->name
- << " (" << kind << ")";
+ bits |= ProcessMoreEncodings(encoder, i, operand);
+ extendedOpcode = true;
+ break;
}
}
}
@@ -903,7 +921,7 @@ void Arm64Mir2Lir::AssembleLIR() {
break;
}
default:
- LOG(FATAL) << "Unexpected case " << lir->flags.fixup;
+ LOG(FATAL) << "Unexpected case: opcode: " << lir->opcode << ", fixup: " << lir->flags.fixup;
}
prev_lir = lir;
lir = lir->u.a.pcrel_next;
@@ -953,7 +971,7 @@ void Arm64Mir2Lir::AssembleLIR() {
size_t Arm64Mir2Lir::GetInsnSize(LIR* lir) {
ArmOpcode opcode = UNWIDE(lir->opcode);
DCHECK(!IsPseudoLirOp(opcode));
- return EncodingMap[opcode].size;
+ return GetEncoder(opcode)->size;
}
// Encode instruction bit pattern and assign offsets.
@@ -966,8 +984,8 @@ uint32_t Arm64Mir2Lir::LinkFixupInsns(LIR* head_lir, LIR* tail_lir, uint32_t off
if (!lir->flags.is_nop) {
if (lir->flags.fixup != kFixupNone) {
if (!IsPseudoLirOp(opcode)) {
- lir->flags.size = EncodingMap[opcode].size;
- lir->flags.fixup = EncodingMap[opcode].fixup;
+ lir->flags.size = GetEncoder(opcode)->size;
+ lir->flags.fixup = GetEncoder(opcode)->fixup;
} else {
DCHECK_NE(static_cast<int>(opcode), kPseudoPseudoAlign4);
lir->flags.size = 0;
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 3e1c18baf4..0edafe0884 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -22,8 +22,14 @@
#include <map>
-namespace art {
+#ifdef QC_STRONG
+#define QC_WEAK
+#else
+#define QC_WEAK __attribute__((weak))
+#endif
+namespace art {
+class QCArm64Mir2Lir;
class Arm64Mir2Lir FINAL : public Mir2Lir {
protected:
// TODO: consolidate 64-bit target support.
@@ -259,7 +265,10 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
- private:
+ virtual void GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) OVERRIDE;
+ void GenMoreMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) QC_WEAK;
+
+private:
/**
* @brief Given register xNN (dNN), returns register wNN (sNN).
* @param reg #RegStorage containing a Solo64 input register (e.g. @c x1 or @c d2).
@@ -394,6 +403,18 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
InToRegStorageMapping in_to_reg_storage_mapping_;
static const ArmEncodingMap EncodingMap[kA64Last];
+
+private:
+ static uint32_t ProcessMoreEncodings(const ArmEncodingMap* encoder, int i, uint32_t operand) QC_WEAK;
+ static const ArmEncodingMap* GetEncoder(int opcode) QC_WEAK;
+
+ virtual void ApplyArchOptimizations(LIR* head_lir, LIR* tail_lir, BasicBlock* bb) QC_WEAK;
+
+ void CompilerPostInitializeRegAlloc() QC_WEAK;
+ void Arm64Mir2LirPostInit(Arm64Mir2Lir* mir_to_lir) QC_WEAK;
+
+ friend class QCArm64Mir2Lir;
+ QCArm64Mir2Lir* qcm2l;
};
} // namespace art
diff --git a/compiler/dex/quick/arm64/target_arm64.cc b/compiler/dex/quick/arm64/target_arm64.cc
index 9b4546a94b..8368c5b7bc 100644
--- a/compiler/dex/quick/arm64/target_arm64.cc
+++ b/compiler/dex/quick/arm64/target_arm64.cc
@@ -138,6 +138,11 @@ RegStorage Arm64Mir2Lir::TargetReg(SpecialTargetRegister reg) {
return res_reg;
}
+void Arm64Mir2Lir::CompilerPostInitializeRegAlloc()
+{
+ //nothing here
+}
+
/*
* Decode the register id. This routine makes assumptions on the encoding made by RegStorage.
*/
@@ -587,6 +592,12 @@ Arm64Mir2Lir::Arm64Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAlloca
<< static_cast<int>(Arm64Mir2Lir::EncodingMap[i].opcode);
}
}
+
+ qcm2l = nullptr;
+ Arm64Mir2LirPostInit(this);
+}
+
+void Arm64Mir2Lir::Arm64Mir2LirPostInit(Arm64Mir2Lir* mir_to_lir) {
}
Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
@@ -633,6 +644,8 @@ void Arm64Mir2Lir::CompilerInitializeRegAlloc() {
reg_pool_->next_core_reg_ = 2;
reg_pool_->next_sp_reg_ = 0;
reg_pool_->next_dp_reg_ = 0;
+
+ CompilerPostInitializeRegAlloc();
}
/*
@@ -772,17 +785,17 @@ LIR* Arm64Mir2Lir::CheckSuspendUsingLoad() {
uint64_t Arm64Mir2Lir::GetTargetInstFlags(int opcode) {
DCHECK(!IsPseudoLirOp(opcode));
- return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].flags;
+ return GetEncoder(UNWIDE(opcode))->flags;
}
const char* Arm64Mir2Lir::GetTargetInstName(int opcode) {
DCHECK(!IsPseudoLirOp(opcode));
- return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].name;
+ return GetEncoder(UNWIDE(opcode))->name;
}
const char* Arm64Mir2Lir::GetTargetInstFmt(int opcode) {
DCHECK(!IsPseudoLirOp(opcode));
- return Arm64Mir2Lir::EncodingMap[UNWIDE(opcode)].fmt;
+ return GetEncoder(UNWIDE(opcode))->fmt;
}
RegStorage Arm64Mir2Lir::InToRegStorageArm64Mapper::GetNextReg(bool is_double_or_float,
@@ -1199,4 +1212,14 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
return call_state;
}
+void Arm64Mir2Lir::GenMoreMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
+}
+
+void Arm64Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
+ GenMoreMachineSpecificExtendedMethodMIR(bb, mir);
+}
+
+void Arm64Mir2Lir::ApplyArchOptimizations(LIR* head_lir, LIR* tail_lir, BasicBlock* bb) {
+}
+
} // namespace art
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index f58f83070b..070175ac9f 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -91,7 +91,7 @@ size_t Arm64Mir2Lir::GetLoadStoreSize(LIR* lir) {
bool opcode_is_wide = IS_WIDE(lir->opcode);
ArmOpcode opcode = UNWIDE(lir->opcode);
DCHECK(!IsPseudoLirOp(opcode));
- const ArmEncodingMap *encoder = &EncodingMap[opcode];
+ const ArmEncodingMap *encoder = GetEncoder(opcode);
uint32_t bits = opcode_is_wide ? encoder->xskeleton : encoder->wskeleton;
return (bits >> 30);
}
@@ -617,11 +617,11 @@ LIR* Arm64Mir2Lir::OpRegRegShift(OpKind op, RegStorage r_dest_src1, RegStorage r
}
DCHECK(!IsPseudoLirOp(opcode));
- if (EncodingMap[opcode].flags & IS_BINARY_OP) {
+ if (GetEncoder(opcode)->flags & IS_BINARY_OP) {
DCHECK_EQ(shift, ENCODE_NO_SHIFT);
return NewLIR2(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg());
- } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
- ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
+ } else if (GetEncoder(opcode)->flags & IS_TERTIARY_OP) {
+ ArmEncodingKind kind = GetEncoder(opcode)->field_loc[2].kind;
if (kind == kFmtShift) {
return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(), shift);
}
@@ -654,8 +654,8 @@ LIR* Arm64Mir2Lir::OpRegRegExtend(OpKind op, RegStorage r_dest_src1, RegStorage
}
DCHECK(!IsPseudoLirOp(opcode));
- if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
- ArmEncodingKind kind = EncodingMap[opcode].field_loc[2].kind;
+ if (GetEncoder(opcode)->flags & IS_TERTIARY_OP) {
+ ArmEncodingKind kind = GetEncoder(opcode)->field_loc[2].kind;
if (kind == kFmtExtend) {
return NewLIR3(opcode | wide, r_dest_src1.GetReg(), r_src2.GetReg(),
EncodeExtend(ext, amount));
@@ -750,11 +750,11 @@ LIR* Arm64Mir2Lir::OpRegRegRegShift(OpKind op, RegStorage r_dest, RegStorage r_s
ArmOpcode widened_opcode = r_dest.Is64Bit() ? WIDE(opcode) : opcode;
CHECK_EQ(r_dest.Is64Bit(), r_src1.Is64Bit());
CHECK_EQ(r_dest.Is64Bit(), r_src2.Is64Bit());
- if (EncodingMap[opcode].flags & IS_QUAD_OP) {
+ if (GetEncoder(opcode)->flags & IS_QUAD_OP) {
DCHECK(!IsExtendEncoding(shift));
return NewLIR4(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg(), shift);
} else {
- DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
+ DCHECK(GetEncoder(opcode)->flags & IS_TERTIARY_OP);
DCHECK_EQ(shift, ENCODE_NO_SHIFT);
return NewLIR3(widened_opcode, r_dest.GetReg(), r_src1.GetReg(), r_src2.GetReg());
}
@@ -924,7 +924,7 @@ LIR* Arm64Mir2Lir::OpRegRegImm64(OpKind op, RegStorage r_dest, RegStorage r_src1
r_scratch = AllocTemp();
LoadConstant(r_scratch, value);
}
- if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
+ if (GetEncoder(alt_opcode)->flags & IS_QUAD_OP)
res = NewLIR4(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg(), info);
else
res = NewLIR3(alt_opcode | wide, r_dest.GetReg(), r_src1.GetReg(), r_scratch.GetReg());
@@ -998,7 +998,7 @@ LIR* Arm64Mir2Lir::OpRegImm64(OpKind op, RegStorage r_dest_src1, int64_t value)
if (UNLIKELY(neg))
opcode = neg_opcode;
- if (EncodingMap[opcode].flags & IS_QUAD_OP)
+ if (GetEncoder(opcode)->flags & IS_QUAD_OP)
return NewLIR4(opcode | wide, r_dest_src1.GetReg(), r_dest_src1.GetReg(), abs_value,
(shift) ? 1 : 0);
else
@@ -1092,7 +1092,7 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
if (UNLIKELY(expected_scale == 0)) {
// This is a tertiary op (e.g. ldrb, ldrsb), it does not not support scale.
- DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
+ DCHECK_NE(GetEncoder(UNWIDE(opcode))->flags & IS_TERTIARY_OP, 0U);
DCHECK_EQ(scale, 0);
load = NewLIR3(opcode, r_dest.GetReg(), r_base.GetReg(), r_index.GetReg());
} else {
@@ -1173,7 +1173,7 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
if (UNLIKELY(expected_scale == 0)) {
// This is a tertiary op (e.g. strb), it does not not support scale.
- DCHECK_NE(EncodingMap[UNWIDE(opcode)].flags & IS_TERTIARY_OP, 0U);
+ DCHECK_NE(GetEncoder(UNWIDE(opcode))->flags & IS_TERTIARY_OP, 0U);
DCHECK_EQ(scale, 0);
store = NewLIR3(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg());
} else {
diff --git a/compiler/dex/reg_storage.h b/compiler/dex/reg_storage.h
index 94c051838b..6178bee570 100644
--- a/compiler/dex/reg_storage.h
+++ b/compiler/dex/reg_storage.h
@@ -258,13 +258,13 @@ class RegStorage {
// Create a stand-alone RegStorage from the low 32bit of 64bit float solo.
RegStorage GetLowFromFloatSolo64() const {
DCHECK(IsFloat() && Is64BitSolo());
- return RegStorage(k32BitSolo, ((reg_ & kRegNumMask)<<1) | kFloatingPoint);
+ return RegStorage(k32BitSolo, ((reg_ & kRegNumMask) << 1) | kFloatingPoint);
}
// Create a stand-alone RegStorage from the low 64bit of 128bit float solo.
RegStorage GetLowFromFloatSolo128() const {
DCHECK(IsFloat() && Is128BitSolo());
- return RegStorage(k64BitSolo, ((reg_ & kRegNumMask)<<1) | kFloatingPoint);
+ return RegStorage(k64BitSolo, ((reg_ & kRegNumMask) << 1) | kFloatingPoint);
}
// Retrieve the most significant register of a pair.
@@ -288,13 +288,13 @@ class RegStorage {
// Create a stand-alone RegStorage from the high 32bit of 64bit float solo.
RegStorage GetHighFromFloatSolo64() const {
DCHECK(IsFloat() && Is64BitSolo());
- return RegStorage(k32BitSolo, (((reg_ & kRegNumMask)<<1) +1) | kFloatingPoint);
+ return RegStorage(k32BitSolo, (((reg_ & kRegNumMask) << 1) +1) | kFloatingPoint);
}
// Create a stand-alone RegStorage from the high 64bit of 128bit float solo.
RegStorage GetHighFromFloatSolo128() const {
DCHECK(IsFloat() && Is128BitSolo());
- return RegStorage(k64BitSolo, (((reg_ & kRegNumMask)<<1) +1) | kFloatingPoint);
+ return RegStorage(k64BitSolo, (((reg_ & kRegNumMask) << 1) +1) | kFloatingPoint);
}
void SetHighReg(int reg) {
@@ -359,6 +359,11 @@ class RegStorage {
return RegStorage(k64BitSolo, (reg_num & kRegNumMask) | kFloatingPoint);
}
+ // Create a floating point 128-bit solo.
+ static RegStorage FloatSolo128(int reg_num) {
+ return RegStorage(k128BitSolo, (reg_num & kRegNumMask) | kFloatingPoint);
+ }
+
static constexpr RegStorage InvalidReg() {
return RegStorage(kInvalid);
}