aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-rwsem.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-11-26 09:23:57 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-11-26 09:23:57 +1100
commit2a859ab07b6ab66f4134c4fffc341398bd3d328c (patch)
treec5e7eaf3bffbc18feb326940e39794328d98dc07 /include/linux/percpu-rwsem.h
parentcedddd812a79a4fda3885a15711aee3de78c4a24 (diff)
parente716e014384688d1a50d1aa5213ee74748c6d4e0 (diff)
downloadkernel_replicant_linux-2a859ab07b6ab66f4134c4fffc341398bd3d328c.tar.gz
kernel_replicant_linux-2a859ab07b6ab66f4134c4fffc341398bd3d328c.tar.bz2
kernel_replicant_linux-2a859ab07b6ab66f4134c4fffc341398bd3d328c.zip
Merge branch 'merge' into next
Merge my own merge branch to get various fixes from there and upstream, especially the hvc console tty refcouting fixes which which testing is quite a bit harder...
Diffstat (limited to 'include/linux/percpu-rwsem.h')
-rw-r--r--include/linux/percpu-rwsem.h28
1 files changed, 11 insertions, 17 deletions
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index cf80f7e5277f..250a4acddb2b 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -12,34 +12,27 @@ struct percpu_rw_semaphore {
struct mutex mtx;
};
+#define light_mb() barrier()
+#define heavy_mb() synchronize_sched()
+
static inline void percpu_down_read(struct percpu_rw_semaphore *p)
{
- rcu_read_lock();
+ rcu_read_lock_sched();
if (unlikely(p->locked)) {
- rcu_read_unlock();
+ rcu_read_unlock_sched();
mutex_lock(&p->mtx);
this_cpu_inc(*p->counters);
mutex_unlock(&p->mtx);
return;
}
this_cpu_inc(*p->counters);
- rcu_read_unlock();
+ rcu_read_unlock_sched();
+ light_mb(); /* A, between read of p->locked and read of data, paired with D */
}
static inline void percpu_up_read(struct percpu_rw_semaphore *p)
{
- /*
- * On X86, write operation in this_cpu_dec serves as a memory unlock
- * barrier (i.e. memory accesses may be moved before the write, but
- * no memory accesses are moved past the write).
- * On other architectures this may not be the case, so we need smp_mb()
- * there.
- */
-#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
- barrier();
-#else
- smp_mb();
-#endif
+ light_mb(); /* B, between read of the data and write to p->counter, paired with C */
this_cpu_dec(*p->counters);
}
@@ -58,14 +51,15 @@ static inline void percpu_down_write(struct percpu_rw_semaphore *p)
{
mutex_lock(&p->mtx);
p->locked = true;
- synchronize_rcu();
+ synchronize_sched(); /* make sure that all readers exit the rcu_read_lock_sched region */
while (__percpu_count(p->counters))
msleep(1);
- smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
+ heavy_mb(); /* C, between read of p->counter and write to data, paired with B */
}
static inline void percpu_up_write(struct percpu_rw_semaphore *p)
{
+ heavy_mb(); /* D, between write to data and write to p->locked, paired with A */
p->locked = false;
mutex_unlock(&p->mtx);
}