summaryrefslogtreecommitdiffstats
path: root/compiler/optimizing/codegen_test.cc
diff options
context:
space:
mode:
authorSerban Constantinescu <serban.constantinescu@arm.com>2015-02-22 20:51:33 +0000
committerSerban Constantinescu <serban.constantinescu@arm.com>2015-03-02 14:16:56 +0000
commit579885a26d761f5ba9550f2a1cd7f0f598c2e1e3 (patch)
tree58d144157b7a24bbdf7f8892631a15abeefa2c9f /compiler/optimizing/codegen_test.cc
parent2eb5168bd9e43b80452eaee5be32c063e124886e (diff)
downloadandroid_art-579885a26d761f5ba9550f2a1cd7f0f598c2e1e3.tar.gz
android_art-579885a26d761f5ba9550f2a1cd7f0f598c2e1e3.tar.bz2
android_art-579885a26d761f5ba9550f2a1cd7f0f598c2e1e3.zip
Opt Compiler: ARM64: Enable explicit memory barriers over acquire/release
Implement remaining explicit memory barrier code paths and temporarily enable the use of explicit memory barriers for testing. This CL also enables the use of instruction set features in the ARM64 backend. kUseAcquireRelease has been replaced with PreferAcquireRelease(), which for now is statically set to false (prefer explicit memory barriers). Please note that we still prefer acquire-release for the ARM64 Optimizing Compiler, but we would like to exercise the explicit memory barrier code path too. Change-Id: I84e047ecd43b6fbefc5b82cf532e3f5c59076458 Signed-off-by: Serban Constantinescu <serban.constantinescu@arm.com>
Diffstat (limited to 'compiler/optimizing/codegen_test.cc')
-rw-r--r--compiler/optimizing/codegen_test.cc13
1 files changed, 9 insertions, 4 deletions
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index e0e0b4c3e8..868fc5b867 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -18,6 +18,7 @@
#include "arch/instruction_set.h"
#include "arch/arm/instruction_set_features_arm.h"
+#include "arch/arm64/instruction_set_features_arm64.h"
#include "base/macros.h"
#include "builder.h"
#include "code_generator_arm.h"
@@ -115,9 +116,9 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
Run(allocator, codegenX86, has_result, expected);
}
- std::unique_ptr<const ArmInstructionSetFeatures> features(
+ std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
ArmInstructionSetFeatures::FromCppDefines());
- TestCodeGeneratorARM codegenARM(graph, *features.get(), compiler_options);
+ TestCodeGeneratorARM codegenARM(graph, *features_arm.get(), compiler_options);
codegenARM.CompileBaseline(&allocator, true);
if (kRuntimeISA == kArm || kRuntimeISA == kThumb2) {
Run(allocator, codegenARM, has_result, expected);
@@ -129,7 +130,9 @@ static void RunCodeBaseline(HGraph* graph, bool has_result, Expected expected) {
Run(allocator, codegenX86_64, has_result, expected);
}
- arm64::CodeGeneratorARM64 codegenARM64(graph, compiler_options);
+ std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
+ Arm64InstructionSetFeatures::FromCppDefines());
+ arm64::CodeGeneratorARM64 codegenARM64(graph, *features_arm64.get(), compiler_options);
codegenARM64.CompileBaseline(&allocator, true);
if (kRuntimeISA == kArm64) {
Run(allocator, codegenARM64, has_result, expected);
@@ -166,7 +169,9 @@ static void RunCodeOptimized(HGraph* graph,
compiler_options);
RunCodeOptimized(&codegenARM, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kArm64) {
- arm64::CodeGeneratorARM64 codegenARM64(graph, compiler_options);
+ arm64::CodeGeneratorARM64 codegenARM64(graph,
+ *Arm64InstructionSetFeatures::FromCppDefines(),
+ compiler_options);
RunCodeOptimized(&codegenARM64, graph, hook_before_codegen, has_result, expected);
} else if (kRuntimeISA == kX86) {
x86::CodeGeneratorX86 codegenX86(graph, compiler_options);