aboutsummaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2012-02-22 13:06:51 -0800
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-04-30 10:48:20 -0700
commit4b7a3e9e32114a09c61995048f055615b5d4c26d (patch)
tree562ff1829abb99e830b5e3b3719834b059376b03 /kernel
parentcef50120b61c2af4ce34bc165e19cad66296f93d (diff)
downloadlinux-4b7a3e9e32114a09c61995048f055615b5d4c26d.tar.gz
rcu: Remove fast check path from __synchronize_srcu()
The fastpath in __synchronize_srcu() is designed to handle cases where there are a large number of concurrent calls for the same srcu_struct structure. However, the Linux kernel currently does not use SRCU in this manner, so remove the fastpath checks for simplicity. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/srcu.c25
1 files changed, 1 insertions, 24 deletions
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 84c9b97dc3d..17e95bcc901 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -308,7 +308,7 @@ static void flip_idx_and_wait(struct srcu_struct *sp, bool expedited)
*/
static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
{
- int idx;
+ int idx = 0;
rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
!lock_is_held(&rcu_bh_lock_map) &&
@@ -316,32 +316,9 @@ static void __synchronize_srcu(struct srcu_struct *sp, bool expedited)
!lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
- smp_mb(); /* Ensure prior action happens before grace period. */
- idx = ACCESS_ONCE(sp->completed);
- smp_mb(); /* Access to ->completed before lock acquisition. */
mutex_lock(&sp->mutex);
/*
- * Check to see if someone else did the work for us while we were
- * waiting to acquire the lock. We need -three- advances of
- * the counter, not just one. If there was but one, we might have
- * shown up -after- our helper's first synchronize_sched(), thus
- * having failed to prevent CPU-reordering races with concurrent
- * srcu_read_unlock()s on other CPUs (see comment below). If there
- * was only two, we are guaranteed to have waited through only one
- * full index-flip phase. So we either (1) wait for three or
- * (2) supply the additional ones we need.
- */
-
- if (sp->completed == idx + 2)
- idx = 1;
- else if (sp->completed == idx + 3) {
- mutex_unlock(&sp->mutex);
- return;
- } else
- idx = 0;
-
- /*
* If there were no helpers, then we need to do two flips of
* the index. The first flip is required if there are any
* outstanding SRCU readers even if there are no new readers