aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2010-01-04 15:09:00 -0800
committerIngo Molnar <mingo@elte.hu>2010-01-13 09:06:01 +0100
commitf96e9232e04856c781d4f71923a46dd3f7b429fa (patch)
tree3c320a0a2bc03f54fea5c115b6c0fa1e4bcc7e27
parent7284ce6c9f6153d1777df5f310c959724d1bd446 (diff)
downloadkernel_samsung_smdk4412-f96e9232e04856c781d4f71923a46dd3f7b429fa.tar.gz
kernel_samsung_smdk4412-f96e9232e04856c781d4f71923a46dd3f7b429fa.tar.bz2
kernel_samsung_smdk4412-f96e9232e04856c781d4f71923a46dd3f7b429fa.zip
rcu: Adjust force_quiescent_state() locking, step 1
This causes rnp->lock to be held on entry to force_quiescent_state()'s switch statement. This is a first step towards prohibiting starting grace periods while force_quiescent_state() is executing, which will reduce the number and complexity of races that force_quiescent_state() is involved in. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <12626465501455-git-send-email-> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/rcutree.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 53ae9598f79..eae331da6be 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1204,7 +1204,7 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
}
if (relaxed &&
(long)(rsp->jiffies_force_qs - jiffies) >= 0)
- goto unlock_ret; /* no emergency and done recently. */
+ goto unlock_fqs_ret; /* no emergency and done recently. */
rsp->n_force_qs++;
spin_lock(&rnp->lock);
lastcomp = rsp->gpnum - 1;
@@ -1213,31 +1213,32 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
if(!rcu_gp_in_progress(rsp)) {
rsp->n_force_qs_ngp++;
spin_unlock(&rnp->lock);
- goto unlock_ret; /* no GP in progress, time updated. */
+ goto unlock_fqs_ret; /* no GP in progress, time updated. */
}
- spin_unlock(&rnp->lock);
switch (signaled) {
case RCU_GP_IDLE:
case RCU_GP_INIT:
+ spin_unlock(&rnp->lock);
break; /* grace period idle or initializing, ignore. */
case RCU_SAVE_DYNTICK:
+ spin_unlock(&rnp->lock);
if (RCU_SIGNAL_INIT != RCU_SAVE_DYNTICK)
break; /* So gcc recognizes the dead code. */
/* Record dyntick-idle state. */
if (rcu_process_dyntick(rsp, lastcomp,
dyntick_save_progress_counter))
- goto unlock_ret;
+ goto unlock_fqs_ret;
+ spin_lock(&rnp->lock);
/* fall into next case. */
case RCU_SAVE_COMPLETED:
/* Update state, record completion counter. */
forcenow = 0;
- spin_lock(&rnp->lock);
if (lastcomp + 1 == rsp->gpnum &&
lastcomp == rsp->completed &&
rsp->signaled == signaled) {
@@ -1245,23 +1246,31 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
rsp->completed_fqs = lastcomp;
forcenow = signaled == RCU_SAVE_COMPLETED;
}
- spin_unlock(&rnp->lock);
- if (!forcenow)
+ if (!forcenow) {
+ spin_unlock(&rnp->lock);
break;
+ }
/* fall into next case. */
case RCU_FORCE_QS:
/* Check dyntick-idle state, send IPI to laggarts. */
+ spin_unlock(&rnp->lock);
if (rcu_process_dyntick(rsp, rsp->completed_fqs,
rcu_implicit_dynticks_qs))
- goto unlock_ret;
+ goto unlock_fqs_ret;
/* Leave state in case more forcing is required. */
break;
+
+ default:
+
+ spin_unlock(&rnp->lock);
+ WARN_ON_ONCE(1);
+ break;
}
-unlock_ret:
+unlock_fqs_ret:
spin_unlock_irqrestore(&rsp->fqslock, flags);
}