summaryrefslogtreecommitdiffstats
path: root/compiler/jni
diff options
context:
space:
mode:
authorMark Mendell <mark.p.mendell@intel.com>2015-01-13 17:32:55 -0500
committerMark Mendell <mark.p.mendell@intel.com>2015-01-23 12:53:45 -0500
commit3d2c8e74c27efee58e24ec31441124f3f21384b9 (patch)
tree416a60f70414b026395e3660edeee5e1cb10b6f7 /compiler/jni
parentd834380c94af85b498560f3b5feae21ef7fab1ed (diff)
downloadandroid_art-3d2c8e74c27efee58e24ec31441124f3f21384b9.tar.gz
android_art-3d2c8e74c27efee58e24ec31441124f3f21384b9.tar.bz2
android_art-3d2c8e74c27efee58e24ec31441124f3f21384b9.zip
ART: Implement X86 hard float (Quick/JNI/Baseline)
Use XMM0-XMM3 as parameter registers for float/double on X86. X86_64 already uses XMM0-XMM7 for parameters. Change the 'hidden' argument register from XMM0 to XMM7 to avoid a conflict. Add support for FPR save/restore in runtime/arch/x86. Minimal support for Optimizing baseline compiler. Bump the version in runtime/oat.h because this is an ABI change. Change-Id: Ia6fe150e8488b9e582b0178c0dda65fc81d5a8ba Signed-off-by: Mark Mendell <mark.p.mendell@intel.com>
Diffstat (limited to 'compiler/jni')
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.cc59
-rw-r--r--compiler/jni/quick/x86/calling_convention_x86.h6
2 files changed, 54 insertions, 11 deletions
diff --git a/compiler/jni/quick/x86/calling_convention_x86.cc b/compiler/jni/quick/x86/calling_convention_x86.cc
index a5686e1ac7..fc72e88c00 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.cc
+++ b/compiler/jni/quick/x86/calling_convention_x86.cc
@@ -77,12 +77,34 @@ bool X86ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
}
bool X86ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
- return true; // Everything is passed by stack
+ // We assume all parameters are on stack, args coming via registers are spilled as entry_spills.
+ return true;
}
ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamRegister() {
- LOG(FATAL) << "Should not reach here";
- return ManagedRegister::NoRegister();
+ ManagedRegister res = ManagedRegister::NoRegister();
+ if (!IsCurrentParamAFloatOrDouble()) {
+ switch (gpr_arg_count_) {
+ case 0: res = X86ManagedRegister::FromCpuRegister(ECX); break;
+ case 1: res = X86ManagedRegister::FromCpuRegister(EDX); break;
+ case 2: res = X86ManagedRegister::FromCpuRegister(EBX); break;
+ }
+ } else if (itr_float_and_doubles_ < 4) {
+ // First four float parameters are passed via XMM0..XMM3
+ res = X86ManagedRegister::FromXmmRegister(
+ static_cast<XmmRegister>(XMM0 + itr_float_and_doubles_));
+ }
+ return res;
+}
+
+ManagedRegister X86ManagedRuntimeCallingConvention::CurrentParamHighLongRegister() {
+ ManagedRegister res = ManagedRegister::NoRegister();
+ DCHECK(IsCurrentParamALong());
+ switch (gpr_arg_count_) {
+ case 0: res = X86ManagedRegister::FromCpuRegister(EDX); break;
+ case 1: res = X86ManagedRegister::FromCpuRegister(EBX); break;
+ }
+ return res;
}
FrameOffset X86ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
@@ -95,15 +117,32 @@ const ManagedRegisterEntrySpills& X86ManagedRuntimeCallingConvention::EntrySpill
// We spill the argument registers on X86 to free them up for scratch use, we then assume
// all arguments are on the stack.
if (entry_spills_.size() == 0) {
- size_t num_spills = NumArgs() + NumLongOrDoubleArgs();
- if (num_spills > 0) {
- entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(ECX));
- if (num_spills > 1) {
- entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(EDX));
- if (num_spills > 2) {
- entry_spills_.push_back(X86ManagedRegister::FromCpuRegister(EBX));
+ ResetIterator(FrameOffset(0));
+ while (HasNext()) {
+ ManagedRegister in_reg = CurrentParamRegister();
+ if (!in_reg.IsNoRegister()) {
+ int32_t size = IsParamADouble(itr_args_) ? 8 : 4;
+ int32_t spill_offset = CurrentParamStackOffset().Uint32Value();
+ ManagedRegisterSpill spill(in_reg, size, spill_offset);
+ entry_spills_.push_back(spill);
+ if (IsCurrentParamALong() && !IsCurrentParamAReference()) { // Long.
+ // special case, as we may need a second register here.
+ in_reg = CurrentParamHighLongRegister();
+ if (!in_reg.IsNoRegister()) {
+ // We have to spill the second half of the long.
+ ManagedRegisterSpill spill2(in_reg, size, spill_offset + 4);
+ entry_spills_.push_back(spill2);
+ // Long was allocated in 2 registers.
+ gpr_arg_count_++;
+ }
+ }
+
+ // Keep track of the number of GPRs allocated.
+ if (!IsCurrentParamAFloatOrDouble()) {
+ gpr_arg_count_++;
}
}
+ Next();
}
}
return entry_spills_;
diff --git a/compiler/jni/quick/x86/calling_convention_x86.h b/compiler/jni/quick/x86/calling_convention_x86.h
index 025eb6d40e..b1b3598a8e 100644
--- a/compiler/jni/quick/x86/calling_convention_x86.h
+++ b/compiler/jni/quick/x86/calling_convention_x86.h
@@ -28,7 +28,8 @@ class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
public:
explicit X86ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized,
const char* shorty)
- : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize),
+ gpr_arg_count_(0) {}
~X86ManagedRuntimeCallingConvention() OVERRIDE {}
// Calling convention
ManagedRegister ReturnRegister() OVERRIDE;
@@ -40,7 +41,10 @@ class X86ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingCon
ManagedRegister CurrentParamRegister() OVERRIDE;
FrameOffset CurrentParamStackOffset() OVERRIDE;
const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+
private:
+ int gpr_arg_count_;
+ ManagedRegister CurrentParamHighLongRegister();
ManagedRegisterEntrySpills entry_spills_;
DISALLOW_COPY_AND_ASSIGN(X86ManagedRuntimeCallingConvention);
};