summaryrefslogtreecommitdiffstats
path: root/compiler/jni
diff options
context:
space:
mode:
authorMaja Gagic <maja.gagic@imgtec.com>2015-02-24 16:55:04 +0100
committerAndreas Gampe <agampe@google.com>2015-03-06 13:05:47 -0800
commit6ea651f0f4c7de4580beb2e887d86802c1ae0738 (patch)
treefd97dcbd7301892cb785ca34aee21ad86437c3b3 /compiler/jni
parent0e242b5cad3c0b68b72f28c1e5fd3fdd4c05bfd8 (diff)
downloadandroid_art-6ea651f0f4c7de4580beb2e887d86802c1ae0738.tar.gz
android_art-6ea651f0f4c7de4580beb2e887d86802c1ae0738.tar.bz2
android_art-6ea651f0f4c7de4580beb2e887d86802c1ae0738.zip
Initial support for quick compiler on MIPS64r6.
Change-Id: I6f43027b84e4a98ea320cddb972d9cf39bf7c4f8
Diffstat (limited to 'compiler/jni')
-rw-r--r--compiler/jni/quick/calling_convention.cc5
-rw-r--r--compiler/jni/quick/jni_compiler.cc4
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.cc201
-rw-r--r--compiler/jni/quick/mips64/calling_convention_mips64.h91
4 files changed, 300 insertions, 1 deletions
diff --git a/compiler/jni/quick/calling_convention.cc b/compiler/jni/quick/calling_convention.cc
index 95c2d402b3..d25acc74e2 100644
--- a/compiler/jni/quick/calling_convention.cc
+++ b/compiler/jni/quick/calling_convention.cc
@@ -20,6 +20,7 @@
#include "jni/quick/arm/calling_convention_arm.h"
#include "jni/quick/arm64/calling_convention_arm64.h"
#include "jni/quick/mips/calling_convention_mips.h"
+#include "jni/quick/mips64/calling_convention_mips64.h"
#include "jni/quick/x86/calling_convention_x86.h"
#include "jni/quick/x86_64/calling_convention_x86_64.h"
#include "utils.h"
@@ -38,6 +39,8 @@ ManagedRuntimeCallingConvention* ManagedRuntimeCallingConvention::Create(
return new arm64::Arm64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kMips:
return new mips::MipsManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
+ case kMips64:
+ return new mips64::Mips64ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty);
case kX86_64:
@@ -111,6 +114,8 @@ JniCallingConvention* JniCallingConvention::Create(bool is_static, bool is_synch
return new arm64::Arm64JniCallingConvention(is_static, is_synchronized, shorty);
case kMips:
return new mips::MipsJniCallingConvention(is_static, is_synchronized, shorty);
+ case kMips64:
+ return new mips64::Mips64JniCallingConvention(is_static, is_synchronized, shorty);
case kX86:
return new x86::X86JniCallingConvention(is_static, is_synchronized, shorty);
case kX86_64:
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index ba73828931..2d9e03a718 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -36,6 +36,7 @@
#include "utils/arm/managed_register_arm.h"
#include "utils/arm64/managed_register_arm64.h"
#include "utils/mips/managed_register_mips.h"
+#include "utils/mips64/managed_register_mips64.h"
#include "utils/x86/managed_register_x86.h"
#include "thread.h"
@@ -329,7 +330,8 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
// 11. Save return value
FrameOffset return_save_location = main_jni_conv->ReturnValueSaveLocation();
if (main_jni_conv->SizeOfReturnValue() != 0 && !reference_return) {
- if (instruction_set == kMips && main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
+ if ((instruction_set == kMips || instruction_set == kMips64) &&
+ main_jni_conv->GetReturnType() == Primitive::kPrimDouble &&
return_save_location.Uint32Value() % 8 != 0) {
// Ensure doubles are 8-byte aligned for MIPS
return_save_location = FrameOffset(return_save_location.Uint32Value() + kMipsPointerSize);
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.cc b/compiler/jni/quick/mips64/calling_convention_mips64.cc
new file mode 100644
index 0000000000..17325d6d49
--- /dev/null
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.cc
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "calling_convention_mips64.h"
+
+#include "base/logging.h"
+#include "handle_scope-inl.h"
+#include "utils/mips64/managed_register_mips64.h"
+
+namespace art {
+namespace mips64 {
+
+static const GpuRegister kGpuArgumentRegisters[] = {
+ A0, A1, A2, A3, A4, A5, A6, A7
+};
+
+static const FpuRegister kFpuArgumentRegisters[] = {
+ F12, F13, F14, F15, F16, F17, F18, F19
+};
+
+// Calling convention
+ManagedRegister Mips64ManagedRuntimeCallingConvention::InterproceduralScratchRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(T9);
+}
+
+ManagedRegister Mips64JniCallingConvention::InterproceduralScratchRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(T9);
+}
+
+static ManagedRegister ReturnRegisterForShorty(const char* shorty) {
+ if (shorty[0] == 'F' || shorty[0] == 'D') {
+ return Mips64ManagedRegister::FromFpuRegister(F0);
+ } else if (shorty[0] == 'V') {
+ return Mips64ManagedRegister::NoRegister();
+ } else {
+ return Mips64ManagedRegister::FromGpuRegister(V0);
+ }
+}
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Mips64JniCallingConvention::ReturnRegister() {
+ return ReturnRegisterForShorty(GetShorty());
+}
+
+ManagedRegister Mips64JniCallingConvention::IntReturnRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(V0);
+}
+
+// Managed runtime calling convention
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::MethodRegister() {
+ return Mips64ManagedRegister::FromGpuRegister(A0);
+}
+
+bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamInRegister() {
+ return false; // Everything moved to stack on entry.
+}
+
+bool Mips64ManagedRuntimeCallingConvention::IsCurrentParamOnStack() {
+ return true;
+}
+
+ManagedRegister Mips64ManagedRuntimeCallingConvention::CurrentParamRegister() {
+ LOG(FATAL) << "Should not reach here";
+ return ManagedRegister::NoRegister();
+}
+
+FrameOffset Mips64ManagedRuntimeCallingConvention::CurrentParamStackOffset() {
+ CHECK(IsCurrentParamOnStack());
+ FrameOffset result =
+ FrameOffset(displacement_.Int32Value() + // displacement
+ sizeof(StackReference<mirror::ArtMethod>) + // Method ref
+ (itr_slots_ * sizeof(uint32_t))); // offset into in args
+ return result;
+}
+
+const ManagedRegisterEntrySpills& Mips64ManagedRuntimeCallingConvention::EntrySpills() {
+ // We spill the argument registers on MIPS64 to free them up for scratch use,
+ // we then assume all arguments are on the stack.
+ if ((entry_spills_.size() == 0) && (NumArgs() > 0)) {
+ int reg_index = 1; // we start from A1, A0 holds ArtMethod*.
+
+ // We need to choose the correct register size since the managed
+ // stack uses 32bit stack slots.
+ ResetIterator(FrameOffset(0));
+ while (HasNext()) {
+ if (reg_index < 8) {
+ if (IsCurrentParamAFloatOrDouble()) { // FP regs.
+ FpuRegister arg = kFpuArgumentRegisters[reg_index];
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromFpuRegister(arg);
+ entry_spills_.push_back(reg, IsCurrentParamADouble() ? 8 : 4);
+ } else { // GP regs.
+ GpuRegister arg = kGpuArgumentRegisters[reg_index];
+ Mips64ManagedRegister reg = Mips64ManagedRegister::FromGpuRegister(arg);
+ entry_spills_.push_back(reg,
+ (IsCurrentParamALong() && (!IsCurrentParamAReference())) ? 8 : 4);
+ }
+ // e.g. A1, A2, F3, A4, F5, F6, A7
+ reg_index++;
+ }
+
+ Next();
+ }
+ }
+ return entry_spills_;
+}
+
+// JNI calling convention
+
+Mips64JniCallingConvention::Mips64JniCallingConvention(bool is_static, bool is_synchronized,
+ const char* shorty)
+ : JniCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S0));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S1));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S2));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S3));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S4));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S5));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S6));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S7));
+
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(GP));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(SP));
+ callee_save_regs_.push_back(Mips64ManagedRegister::FromGpuRegister(S8));
+}
+
+uint32_t Mips64JniCallingConvention::CoreSpillMask() const {
+ // Compute spill mask to agree with callee saves initialized in the constructor
+ uint32_t result = 0;
+ result = 1 << S0 | 1 << S1 | 1 << S2 | 1 << S3 | 1 << S4 | 1 << S5 | 1 << S6 |
+ 1 << S7 | 1 << GP | 1 << SP | 1 << S8;
+ return result;
+}
+
+ManagedRegister Mips64JniCallingConvention::ReturnScratchRegister() const {
+ return Mips64ManagedRegister::FromGpuRegister(AT);
+}
+
+size_t Mips64JniCallingConvention::FrameSize() {
+ // Mehtod* and callee save area size, local reference segment state
+ size_t frame_data_size = sizeof(StackReference<mirror::ArtMethod>) +
+ CalleeSaveRegisters().size() * kFramePointerSize + sizeof(uint32_t);
+ // References plus 2 words for HandleScope header
+ size_t handle_scope_size = HandleScope::SizeOf(kFramePointerSize, ReferenceCount());
+ // Plus return value spill area size
+ return RoundUp(frame_data_size + handle_scope_size + SizeOfReturnValue(), kStackAlignment);
+}
+
+size_t Mips64JniCallingConvention::OutArgSize() {
+ return RoundUp(NumberOfOutgoingStackArgs() * kFramePointerSize, kStackAlignment);
+}
+
+bool Mips64JniCallingConvention::IsCurrentParamInRegister() {
+ return itr_args_ < 8;
+}
+
+bool Mips64JniCallingConvention::IsCurrentParamOnStack() {
+ return !IsCurrentParamInRegister();
+}
+
+ManagedRegister Mips64JniCallingConvention::CurrentParamRegister() {
+ CHECK(IsCurrentParamInRegister());
+ if (IsCurrentParamAFloatOrDouble()) {
+ return Mips64ManagedRegister::FromFpuRegister(kFpuArgumentRegisters[itr_args_]);
+ } else {
+ return Mips64ManagedRegister::FromGpuRegister(kGpuArgumentRegisters[itr_args_]);
+ }
+}
+
+FrameOffset Mips64JniCallingConvention::CurrentParamStackOffset() {
+ CHECK(IsCurrentParamOnStack());
+ size_t offset = displacement_.Int32Value() - OutArgSize() + ((itr_args_ - 8) * kFramePointerSize);
+ CHECK_LT(offset, OutArgSize());
+ return FrameOffset(offset);
+}
+
+size_t Mips64JniCallingConvention::NumberOfOutgoingStackArgs() {
+ // all arguments including JNI args
+ size_t all_args = NumArgs() + NumberOfExtraArgumentsForJni();
+
+ // Nothing on the stack unless there are more than 8 arguments
+ return (all_args > 8) ? all_args - 8 : 0;
+}
+} // namespace mips64
+} // namespace art
diff --git a/compiler/jni/quick/mips64/calling_convention_mips64.h b/compiler/jni/quick/mips64/calling_convention_mips64.h
new file mode 100644
index 0000000000..dc9273b92a
--- /dev/null
+++ b/compiler/jni/quick/mips64/calling_convention_mips64.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+#define ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_
+
+#include "jni/quick/calling_convention.h"
+
+namespace art {
+namespace mips64 {
+
+constexpr size_t kFramePointerSize = 8;
+
+class Mips64ManagedRuntimeCallingConvention FINAL : public ManagedRuntimeCallingConvention {
+ public:
+ Mips64ManagedRuntimeCallingConvention(bool is_static, bool is_synchronized, const char* shorty)
+ : ManagedRuntimeCallingConvention(is_static, is_synchronized, shorty, kFramePointerSize) {}
+ ~Mips64ManagedRuntimeCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // Managed runtime calling convention
+ ManagedRegister MethodRegister() OVERRIDE;
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+ const ManagedRegisterEntrySpills& EntrySpills() OVERRIDE;
+
+ private:
+ ManagedRegisterEntrySpills entry_spills_;
+
+ DISALLOW_COPY_AND_ASSIGN(Mips64ManagedRuntimeCallingConvention);
+};
+
+class Mips64JniCallingConvention FINAL : public JniCallingConvention {
+ public:
+ explicit Mips64JniCallingConvention(bool is_static, bool is_synchronized, const char* shorty);
+ ~Mips64JniCallingConvention() OVERRIDE {}
+ // Calling convention
+ ManagedRegister ReturnRegister() OVERRIDE;
+ ManagedRegister IntReturnRegister() OVERRIDE;
+ ManagedRegister InterproceduralScratchRegister() OVERRIDE;
+ // JNI calling convention
+ size_t FrameSize() OVERRIDE;
+ size_t OutArgSize() OVERRIDE;
+ const std::vector<ManagedRegister>& CalleeSaveRegisters() const OVERRIDE {
+ return callee_save_regs_;
+ }
+ ManagedRegister ReturnScratchRegister() const OVERRIDE;
+ uint32_t CoreSpillMask() const OVERRIDE;
+ uint32_t FpSpillMask() const OVERRIDE {
+ return 0; // Floats aren't spilled in JNI down call
+ }
+ bool IsCurrentParamInRegister() OVERRIDE;
+ bool IsCurrentParamOnStack() OVERRIDE;
+ ManagedRegister CurrentParamRegister() OVERRIDE;
+ FrameOffset CurrentParamStackOffset() OVERRIDE;
+
+ // Mips64 does not need to extend small return types.
+ bool RequiresSmallResultTypeExtension() const OVERRIDE {
+ return false;
+ }
+
+ protected:
+ size_t NumberOfOutgoingStackArgs() OVERRIDE;
+
+ private:
+ // TODO: these values aren't unique and can be shared amongst instances
+ std::vector<ManagedRegister> callee_save_regs_;
+
+ DISALLOW_COPY_AND_ASSIGN(Mips64JniCallingConvention);
+};
+
+} // namespace mips64
+} // namespace art
+
+#endif // ART_COMPILER_JNI_QUICK_MIPS64_CALLING_CONVENTION_MIPS64_H_