aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/spinlock.h36
1 files changed, 25 insertions, 11 deletions
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index 0d04d468f660..e9a960e28f3c 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -103,11 +103,9 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
/* We only yield to the hypervisor if we are in shared processor mode */
void splpar_spin_yield(arch_spinlock_t *lock);
void splpar_rw_yield(arch_rwlock_t *lock);
-#define __spin_yield(x) splpar_spin_yield(x)
-#define __rw_yield(x) splpar_rw_yield(x)
#else /* SPLPAR */
-#define __spin_yield(x) barrier()
-#define __rw_yield(x) barrier()
+static inline void splpar_spin_yield(arch_spinlock_t *lock) {};
+static inline void splpar_rw_yield(arch_rwlock_t *lock) {};
#endif
static inline bool is_shared_processor(void)
@@ -124,6 +122,22 @@ static inline bool is_shared_processor(void)
#endif
}
+static inline void spin_yield(arch_spinlock_t *lock)
+{
+ if (is_shared_processor())
+ splpar_spin_yield(lock);
+ else
+ barrier();
+}
+
+static inline void rw_yield(arch_rwlock_t *lock)
+{
+ if (is_shared_processor())
+ splpar_rw_yield(lock);
+ else
+ barrier();
+}
+
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
while (1) {
@@ -132,7 +146,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
do {
HMT_low();
if (is_shared_processor())
- __spin_yield(lock);
+ splpar_spin_yield(lock);
} while (unlikely(lock->slock != 0));
HMT_medium();
}
@@ -151,7 +165,7 @@ void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
do {
HMT_low();
if (is_shared_processor())
- __spin_yield(lock);
+ splpar_spin_yield(lock);
} while (unlikely(lock->slock != 0));
HMT_medium();
local_irq_restore(flags_dis);
@@ -241,7 +255,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
do {
HMT_low();
if (is_shared_processor())
- __rw_yield(rw);
+ splpar_rw_yield(rw);
} while (unlikely(rw->lock < 0));
HMT_medium();
}
@@ -255,7 +269,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
do {
HMT_low();
if (is_shared_processor())
- __rw_yield(rw);
+ splpar_rw_yield(rw);
} while (unlikely(rw->lock != 0));
HMT_medium();
}
@@ -295,9 +309,9 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
rw->lock = 0;
}
-#define arch_spin_relax(lock) __spin_yield(lock)
-#define arch_read_relax(lock) __rw_yield(lock)
-#define arch_write_relax(lock) __rw_yield(lock)
+#define arch_spin_relax(lock) spin_yield(lock)
+#define arch_read_relax(lock) rw_yield(lock)
+#define arch_write_relax(lock) rw_yield(lock)
/* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb()