summaryrefslogtreecommitdiffstats
path: root/compiler/dex/quick/arm/call_arm.cc
diff options
context:
space:
mode:
authorHiroshi Yamauchi <yamauchi@google.com>2015-02-09 17:11:42 -0800
committerHiroshi Yamauchi <yamauchi@google.com>2015-03-03 17:33:18 -0800
commite15ea086439b41a805d164d2beb07b4ba96aaa97 (patch)
tree465ee3780acd8b7cb35c8a7f42a1f3c5df3d26ec /compiler/dex/quick/arm/call_arm.cc
parent0b25c71ac93fb10c484dbacb9e23db505a8e2353 (diff)
downloadart-e15ea086439b41a805d164d2beb07b4ba96aaa97.tar.gz
art-e15ea086439b41a805d164d2beb07b4ba96aaa97.tar.bz2
art-e15ea086439b41a805d164d2beb07b4ba96aaa97.zip
Reserve bits in the lock word for read barriers.
This prepares for the CC collector to use the standard object header model by storing the read barrier state in the lock word. Bug: 19355854 Bug: 12687968 Change-Id: Ia7585662dd2cebf0479a3e74f734afe5059fb70f
Diffstat (limited to 'compiler/dex/quick/arm/call_arm.cc')
-rw-r--r--compiler/dex/quick/arm/call_arm.cc103
1 files changed, 80 insertions, 23 deletions
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 9cf005bc48..1a9dbeae0f 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -161,7 +161,11 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
MarkPossibleNullPointerException(opt_flags);
- LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r1, 0, NULL);
+ // Zero out the read barrier bits.
+ OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
+ LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r3, 0, NULL);
+ // r1 is zero except for the rb bits here. Copy the read barrier bits into r2.
+ OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
@@ -189,7 +193,14 @@ void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
MarkPossibleNullPointerException(opt_flags);
- OpRegImm(kOpCmp, rs_r1, 0);
+ // Zero out the read barrier bits.
+ OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
+ // r1 will be zero except for the rb bits if the following
+ // cmp-and-branch branches to eq where r2 will be used. Copy the
+ // read barrier bits into r2.
+ OpRegRegReg(kOpOr, rs_r2, rs_r2, rs_r1);
+ OpRegImm(kOpCmp, rs_r3, 0);
+
LIR* it = OpIT(kCondEq, "");
NewLIR4(kThumb2Strex/*eq*/, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
mirror::Object::MonitorOffset().Int32Value() >> 2);
@@ -228,14 +239,28 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
}
}
- Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
+ if (!kUseReadBarrier) {
+ Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock
+ } else {
+ NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
+ mirror::Object::MonitorOffset().Int32Value() >> 2);
+ }
MarkPossibleNullPointerException(opt_flags);
- LoadConstantNoClobber(rs_r3, 0);
- LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r1, rs_r2, NULL);
+ // Zero out the read barrier bits.
+ OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
+ // Zero out except the read barrier bits.
+ OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
+ LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r3, rs_r2, NULL);
GenMemBarrier(kAnyStore);
- Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
- LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
-
+ LIR* unlock_success_branch;
+ if (!kUseReadBarrier) {
+ Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
+ unlock_success_branch = OpUnconditionalBranch(NULL);
+ } else {
+ NewLIR4(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
+ mirror::Object::MonitorOffset().Int32Value() >> 2);
+ unlock_success_branch = OpCmpImmBranch(kCondEq, rs_r2, 0, NULL);
+ }
LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
slow_unlock_branch->target = slow_path_target;
if (null_check_branch != nullptr) {
@@ -253,25 +278,57 @@ void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
} else {
// Explicit null-check as slow-path is entered using an IT.
GenNullCheck(rs_r0, opt_flags);
- Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock
+ if (!kUseReadBarrier) {
+ Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1); // Get lock
+ } else {
+ // If we use read barriers, we need to use atomic instructions.
+ NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
+ mirror::Object::MonitorOffset().Int32Value() >> 2);
+ }
MarkPossibleNullPointerException(opt_flags);
Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
- LoadConstantNoClobber(rs_r3, 0);
+ // Zero out the read barrier bits.
+ OpRegRegImm(kOpAnd, rs_r3, rs_r1, LockWord::kReadBarrierStateMaskShiftedToggled);
+ // Zero out except the read barrier bits.
+ OpRegRegImm(kOpAnd, rs_r1, rs_r1, LockWord::kReadBarrierStateMaskShifted);
// Is lock unheld on lock or held by us (==thread_id) on unlock?
- OpRegReg(kOpCmp, rs_r1, rs_r2);
-
- LIR* it = OpIT(kCondEq, "EE");
- if (GenMemBarrier(kAnyStore)) {
- UpdateIT(it, "TEE");
+ OpRegReg(kOpCmp, rs_r3, rs_r2);
+ if (!kUseReadBarrier) {
+ LIR* it = OpIT(kCondEq, "EE");
+ if (GenMemBarrier(kAnyStore)) {
+ UpdateIT(it, "TEE");
+ }
+ Store32Disp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
+ // Go expensive route - UnlockObjectFromCode(obj);
+ LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(),
+ rs_rARM_LR);
+ ClobberCallerSave();
+ LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
+ OpEndIT(it);
+ MarkSafepointPC(call_inst);
+ } else {
+ // If we use read barriers, we need to use atomic instructions.
+ LIR* it = OpIT(kCondEq, "");
+ if (GenMemBarrier(kAnyStore)) {
+ UpdateIT(it, "T");
+ }
+ NewLIR4/*eq*/(kThumb2Strex, rs_r2.GetReg(), rs_r1.GetReg(), rs_r0.GetReg(),
+ mirror::Object::MonitorOffset().Int32Value() >> 2);
+ OpEndIT(it);
+ // Since we know r2 wasn't zero before the above it instruction,
+ // if r2 is zero here, we know r3 was equal to r2 and the strex
+ // suceeded (we're done). Otherwise (either r3 wasn't equal to r2
+ // or the strex failed), call the entrypoint.
+ OpRegImm(kOpCmp, rs_r2, 0);
+ LIR* it2 = OpIT(kCondNe, "T");
+ // Go expensive route - UnlockObjectFromCode(obj);
+ LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(),
+ rs_rARM_LR);
+ ClobberCallerSave();
+ LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
+ OpEndIT(it2);
+ MarkSafepointPC(call_inst);
}
- Store32Disp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
- // Go expensive route - UnlockObjectFromCode(obj);
- LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(),
- rs_rARM_LR);
- ClobberCallerSave();
- LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
- OpEndIT(it);
- MarkSafepointPC(call_inst);
}
}