summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndreas Gampe <agampe@google.com>2015-01-20 23:44:21 +0000
committerGerrit Code Review <noreply-gerritcodereview@google.com>2015-01-20 23:44:21 +0000
commite355ab6374e546bcd388a7d54b67d3df493fdadb (patch)
tree34789eac002e806be3796ab91cc62a48d1cf130a
parentc9aa8f88b7e710e90ac161654f1f5052aac44823 (diff)
parentf681570077563bb529a30f9e7c572b837cecfb83 (diff)
downloadandroid_art-e355ab6374e546bcd388a7d54b67d3df493fdadb.tar.gz
android_art-e355ab6374e546bcd388a7d54b67d3df493fdadb.tar.bz2
android_art-e355ab6374e546bcd388a7d54b67d3df493fdadb.zip
Merge "ART: Make some helpers non-virtual in Mir2Lir"
-rw-r--r--compiler/dex/quick/arm64/codegen_arm64.h7
-rw-r--r--compiler/dex/quick/arm64/utility_arm64.cc44
-rwxr-xr-xcompiler/dex/quick/gen_invoke.cc4
-rw-r--r--compiler/dex/quick/mir_to_lir.h20
4 files changed, 26 insertions, 49 deletions
diff --git a/compiler/dex/quick/arm64/codegen_arm64.h b/compiler/dex/quick/arm64/codegen_arm64.h
index 55866e2eb8..c68b1d0a37 100644
--- a/compiler/dex/quick/arm64/codegen_arm64.h
+++ b/compiler/dex/quick/arm64/codegen_arm64.h
@@ -66,21 +66,14 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
RegStorage LoadHelper(QuickEntrypointEnum trampoline) OVERRIDE;
LIR* LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_dest,
OpSize size, VolatileKind is_volatile) OVERRIDE;
- LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- VolatileKind is_volatile) OVERRIDE;
LIR* LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale,
OpSize size) OVERRIDE;
- LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest, int scale)
- OVERRIDE;
LIR* LoadConstantNoClobber(RegStorage r_dest, int value) OVERRIDE;
LIR* LoadConstantWide(RegStorage r_dest, int64_t value) OVERRIDE;
LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
VolatileKind is_volatile) OVERRIDE;
- LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, VolatileKind is_volatile)
- OVERRIDE;
LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
OpSize size) OVERRIDE;
- LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) OVERRIDE;
/// @copydoc Mir2Lir::UnconditionallyMarkGCCard(RegStorage)
void UnconditionallyMarkGCCard(RegStorage tgt_addr_reg) OVERRIDE;
diff --git a/compiler/dex/quick/arm64/utility_arm64.cc b/compiler/dex/quick/arm64/utility_arm64.cc
index 78a6df8a1c..1c29ab8bcb 100644
--- a/compiler/dex/quick/arm64/utility_arm64.cc
+++ b/compiler/dex/quick/arm64/utility_arm64.cc
@@ -1062,9 +1062,11 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
opcode = WIDE(kA64Ldr4rXxG);
expected_scale = 3;
break;
- case kSingle: // Intentional fall-through.
- case k32: // Intentional fall-through.
case kReference:
+ r_dest = As32BitReg(r_dest);
+ FALLTHROUGH_INTENDED;
+ case kSingle: // Intentional fall-through.
+ case k32:
r_dest = Check32BitReg(r_dest);
opcode = kA64Ldr4rXxG;
expected_scale = 2;
@@ -1105,11 +1107,6 @@ LIR* Arm64Mir2Lir::LoadBaseIndexed(RegStorage r_base, RegStorage r_index, RegSto
return load;
}
-LIR* Arm64Mir2Lir::LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
- int scale) {
- return LoadBaseIndexed(r_base, r_index, As32BitReg(r_dest), scale, kReference);
-}
-
LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale, OpSize size) {
LIR* store;
@@ -1150,9 +1147,11 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
opcode = WIDE(kA64Str4rXxG);
expected_scale = 3;
break;
- case kSingle: // Intentional fall-trough.
- case k32: // Intentional fall-trough.
case kReference:
+ r_src = As32BitReg(r_src);
+ FALLTHROUGH_INTENDED;
+ case kSingle: // Intentional fall-trough.
+ case k32:
r_src = Check32BitReg(r_src);
opcode = kA64Str4rXxG;
expected_scale = 2;
@@ -1185,11 +1184,6 @@ LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegSt
return store;
}
-LIR* Arm64Mir2Lir::StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
- int scale) {
- return StoreBaseIndexed(r_base, r_index, As32BitReg(r_src), scale, kReference);
-}
-
/*
* Load value from base + displacement. Optionally perform null check
* on base (which must have an associated s_reg and MIR). If not
@@ -1217,9 +1211,11 @@ LIR* Arm64Mir2Lir::LoadBaseDispBody(RegStorage r_base, int displacement, RegStor
alt_opcode = WIDE(kA64Ldur3rXd);
}
break;
- case kSingle: // Intentional fall-through.
- case k32: // Intentional fall-trough.
case kReference:
+ r_dest = As32BitReg(r_dest);
+ FALLTHROUGH_INTENDED;
+ case kSingle: // Intentional fall-through.
+ case k32:
r_dest = Check32BitReg(r_dest);
scale = 2;
if (r_dest.IsFloat()) {
@@ -1287,11 +1283,6 @@ LIR* Arm64Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage
return load;
}
-LIR* Arm64Mir2Lir::LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
- VolatileKind is_volatile) {
- return LoadBaseDisp(r_base, displacement, As32BitReg(r_dest), kReference, is_volatile);
-}
-
LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src,
OpSize size) {
LIR* store = NULL;
@@ -1314,9 +1305,11 @@ LIR* Arm64Mir2Lir::StoreBaseDispBody(RegStorage r_base, int displacement, RegSto
alt_opcode = WIDE(kA64Stur3rXd);
}
break;
- case kSingle: // Intentional fall-through.
- case k32: // Intentional fall-trough.
case kReference:
+ r_src = As32BitReg(r_src);
+ FALLTHROUGH_INTENDED;
+ case kSingle: // Intentional fall-through.
+ case k32:
r_src = Check32BitReg(r_src);
scale = 2;
if (r_src.IsFloat()) {
@@ -1385,11 +1378,6 @@ LIR* Arm64Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage
return store;
}
-LIR* Arm64Mir2Lir::StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
- VolatileKind is_volatile) {
- return StoreBaseDisp(r_base, displacement, As32BitReg(r_src), kReference, is_volatile);
-}
-
LIR* Arm64Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) {
UNUSED(r_dest, r_src);
LOG(FATAL) << "Unexpected use of OpFpRegCopy for Arm64";
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 73629e8b7c..2a6dfefe22 100755
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -918,8 +918,8 @@ bool Mir2Lir::GenInlinedReferenceGetReferent(CallInfo* info) {
RegStorage reg_slow_path = AllocTemp();
RegStorage reg_disabled = AllocTemp();
- Load8Disp(reg_class, slow_path_flag_offset, reg_slow_path);
- Load8Disp(reg_class, disable_flag_offset, reg_disabled);
+ LoadBaseDisp(reg_class, slow_path_flag_offset, reg_slow_path, kSignedByte, kNotVolatile);
+ LoadBaseDisp(reg_class, disable_flag_offset, reg_disabled, kSignedByte, kNotVolatile);
FreeTemp(reg_class);
LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
FreeTemp(reg_disabled);
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index c3e9bb5964..fabf941e4f 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -959,24 +959,20 @@ class Mir2Lir : public Backend {
void LoadCurrMethodDirect(RegStorage r_tgt);
virtual LIR* LoadConstant(RegStorage r_dest, int value);
// Natural word size.
- virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
+ LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile);
}
- // Load 8 bits, regardless of target.
- virtual LIR* Load8Disp(RegStorage r_base, int displacement, RegStorage r_dest) {
- return LoadBaseDisp(r_base, displacement, r_dest, kSignedByte, kNotVolatile);
- }
// Load 32 bits, regardless of target.
- virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) {
+ LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest) {
return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile);
}
// Load a reference at base + displacement and decompress into register.
- virtual LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
+ LIR* LoadRefDisp(RegStorage r_base, int displacement, RegStorage r_dest,
VolatileKind is_volatile) {
return LoadBaseDisp(r_base, displacement, r_dest, kReference, is_volatile);
}
// Load a reference at base + index and decompress into register.
- virtual LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
+ LIR* LoadRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_dest,
int scale) {
return LoadBaseIndexed(r_base, r_index, r_dest, scale, kReference);
}
@@ -993,21 +989,21 @@ class Mir2Lir : public Backend {
// Load Dalvik value with 64-bit memory storage.
virtual void LoadValueDirectWideFixed(RegLocation rl_src, RegStorage r_dest);
// Store an item of natural word size.
- virtual LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
+ LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) {
return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile);
}
// Store an uncompressed reference into a compressed 32-bit container.
- virtual LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
+ LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src,
VolatileKind is_volatile) {
return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile);
}
// Store an uncompressed reference into a compressed 32-bit container by index.
- virtual LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
+ LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
int scale) {
return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference);
}
// Store 32 bits, regardless of target.
- virtual LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) {
+ LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) {
return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile);
}