summaryrefslogtreecommitdiffstats
path: root/compiler/dex/quick/local_optimizations.cc
diff options
context:
space:
mode:
authorDmitry Petrochenko <dmitry.petrochenko@intel.com>2014-04-02 17:27:59 +0700
committerDmitry Petrochenko <dmitry.petrochenko@intel.com>2014-04-03 13:06:55 +0700
commit6a58cb16d803c9a7b3a75ccac8be19dd9d4e520d (patch)
treec142777f40178fd9b9090cd7316be694befb3f21 /compiler/dex/quick/local_optimizations.cc
parent8549cf9d83688f7decbbea2a8de761ce29e95f3c (diff)
downloadandroid_art-6a58cb16d803c9a7b3a75ccac8be19dd9d4e520d.tar.gz
android_art-6a58cb16d803c9a7b3a75ccac8be19dd9d4e520d.tar.bz2
android_art-6a58cb16d803c9a7b3a75ccac8be19dd9d4e520d.zip
art: Handle x86_64 architecture equal to x86
This patch forces FE/ME to treat x86_64 as x86 exactly. The x86_64 logic will be revised later when assembly will be ready. Change-Id: I4a92477a6eeaa9a11fd710d35c602d8d6f88cbb6 Signed-off-by: Dmitry Petrochenko <dmitry.petrochenko@intel.com>
Diffstat (limited to 'compiler/dex/quick/local_optimizations.cc')
-rw-r--r--compiler/dex/quick/local_optimizations.cc8
1 files changed, 4 insertions, 4 deletions
diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc
index 8f64408b46..4bdc9fa4bb 100644
--- a/compiler/dex/quick/local_optimizations.cc
+++ b/compiler/dex/quick/local_optimizations.cc
@@ -100,7 +100,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
}
int native_reg_id;
- if (cu_->instruction_set == kX86) {
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
// If x86, location differs depending on whether memory/reg operation.
native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0];
} else {
@@ -121,7 +121,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
uint64_t stop_def_reg_mask = this_lir->u.m.def_mask & ~ENCODE_MEM;
uint64_t stop_use_reg_mask;
- if (cu_->instruction_set == kX86) {
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
stop_use_reg_mask = (IS_BRANCH | this_lir->u.m.use_mask) & ~ENCODE_MEM;
} else {
/*
@@ -241,7 +241,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) {
}
if (stop_here == true) {
- if (cu_->instruction_set == kX86) {
+ if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
// Prevent stores from being sunk between ops that generate ccodes and
// ops that use them.
uint64_t flags = GetTargetInstFlags(check_lir->opcode);
@@ -306,7 +306,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) {
uint64_t stop_use_all_mask = this_lir->u.m.use_mask;
- if (cu_->instruction_set != kX86) {
+ if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) {
/*
* Branches for null/range checks are marked with the true resource
* bits, and loads to Dalvik registers, constant pools, and non-alias