196b903f5SPaul E. McKenney // SPDX-License-Identifier: GPL-2.0+
2cc44ca84SOleg Nesterov /*
3cc44ca84SOleg Nesterov * RCU-based infrastructure for lightweight reader-writer locking
4cc44ca84SOleg Nesterov *
5cc44ca84SOleg Nesterov * Copyright (c) 2015, Red Hat, Inc.
6cc44ca84SOleg Nesterov *
7cc44ca84SOleg Nesterov * Author: Oleg Nesterov <oleg@redhat.com>
8cc44ca84SOleg Nesterov */
9cc44ca84SOleg Nesterov
10cc44ca84SOleg Nesterov #include <linux/rcu_sync.h>
11cc44ca84SOleg Nesterov #include <linux/sched.h>
12cc44ca84SOleg Nesterov
1389da3b94SOleg Nesterov enum { GP_IDLE = 0, GP_ENTER, GP_PASSED, GP_EXIT, GP_REPLAY };
14cc44ca84SOleg Nesterov
15cc44ca84SOleg Nesterov #define rss_lock gp_wait.lock
16cc44ca84SOleg Nesterov
17cc44ca84SOleg Nesterov /**
18cc44ca84SOleg Nesterov * rcu_sync_init() - Initialize an rcu_sync structure
19cc44ca84SOleg Nesterov * @rsp: Pointer to rcu_sync structure to be initialized
20cc44ca84SOleg Nesterov */
rcu_sync_init(struct rcu_sync * rsp)2195bf33b5SOleg Nesterov void rcu_sync_init(struct rcu_sync *rsp)
22cc44ca84SOleg Nesterov {
23cc44ca84SOleg Nesterov memset(rsp, 0, sizeof(*rsp));
24cc44ca84SOleg Nesterov init_waitqueue_head(&rsp->gp_wait);
25cc44ca84SOleg Nesterov }
26cc44ca84SOleg Nesterov
27cc44ca84SOleg Nesterov /**
2827fdb35fSPaul E. McKenney * rcu_sync_enter_start - Force readers onto slow path for multiple updates
2927fdb35fSPaul E. McKenney * @rsp: Pointer to rcu_sync structure to use for synchronization
3027fdb35fSPaul E. McKenney *
313942a9bdSPeter Zijlstra * Must be called after rcu_sync_init() and before first use.
323942a9bdSPeter Zijlstra *
333942a9bdSPeter Zijlstra * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
343942a9bdSPeter Zijlstra * pairs turn into NO-OPs.
353942a9bdSPeter Zijlstra */
rcu_sync_enter_start(struct rcu_sync * rsp)363942a9bdSPeter Zijlstra void rcu_sync_enter_start(struct rcu_sync *rsp)
373942a9bdSPeter Zijlstra {
383942a9bdSPeter Zijlstra rsp->gp_count++;
393942a9bdSPeter Zijlstra rsp->gp_state = GP_PASSED;
403942a9bdSPeter Zijlstra }
413942a9bdSPeter Zijlstra
4289da3b94SOleg Nesterov
4389da3b94SOleg Nesterov static void rcu_sync_func(struct rcu_head *rhp);
4489da3b94SOleg Nesterov
rcu_sync_call(struct rcu_sync * rsp)4589da3b94SOleg Nesterov static void rcu_sync_call(struct rcu_sync *rsp)
4689da3b94SOleg Nesterov {
47*7651d6b2SJoel Fernandes (Google) call_rcu_hurry(&rsp->cb_head, rcu_sync_func);
4889da3b94SOleg Nesterov }
4989da3b94SOleg Nesterov
5089da3b94SOleg Nesterov /**
5189da3b94SOleg Nesterov * rcu_sync_func() - Callback function managing reader access to fastpath
5289da3b94SOleg Nesterov * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
5389da3b94SOleg Nesterov *
5489da3b94SOleg Nesterov * This function is passed to call_rcu() function by rcu_sync_enter() and
5589da3b94SOleg Nesterov * rcu_sync_exit(), so that it is invoked after a grace period following the
5689da3b94SOleg Nesterov * that invocation of enter/exit.
5789da3b94SOleg Nesterov *
5889da3b94SOleg Nesterov * If it is called by rcu_sync_enter() it signals that all the readers were
5989da3b94SOleg Nesterov * switched onto slow path.
6089da3b94SOleg Nesterov *
6189da3b94SOleg Nesterov * If it is called by rcu_sync_exit() it takes action based on events that
6289da3b94SOleg Nesterov * have taken place in the meantime, so that closely spaced rcu_sync_enter()
6389da3b94SOleg Nesterov * and rcu_sync_exit() pairs need not wait for a grace period.
6489da3b94SOleg Nesterov *
6589da3b94SOleg Nesterov * If another rcu_sync_enter() is invoked before the grace period
6689da3b94SOleg Nesterov * ended, reset state to allow the next rcu_sync_exit() to let the
6789da3b94SOleg Nesterov * readers back onto their fastpaths (after a grace period). If both
6889da3b94SOleg Nesterov * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
6989da3b94SOleg Nesterov * before the grace period ended, re-invoke call_rcu() on behalf of that
7089da3b94SOleg Nesterov * rcu_sync_exit(). Otherwise, set all state back to idle so that readers
7189da3b94SOleg Nesterov * can again use their fastpaths.
7289da3b94SOleg Nesterov */
rcu_sync_func(struct rcu_head * rhp)7389da3b94SOleg Nesterov static void rcu_sync_func(struct rcu_head *rhp)
7489da3b94SOleg Nesterov {
7589da3b94SOleg Nesterov struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
7689da3b94SOleg Nesterov unsigned long flags;
7789da3b94SOleg Nesterov
7889da3b94SOleg Nesterov WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
7989da3b94SOleg Nesterov WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
8089da3b94SOleg Nesterov
8189da3b94SOleg Nesterov spin_lock_irqsave(&rsp->rss_lock, flags);
8289da3b94SOleg Nesterov if (rsp->gp_count) {
8389da3b94SOleg Nesterov /*
8489da3b94SOleg Nesterov * We're at least a GP after the GP_IDLE->GP_ENTER transition.
8589da3b94SOleg Nesterov */
8689da3b94SOleg Nesterov WRITE_ONCE(rsp->gp_state, GP_PASSED);
8789da3b94SOleg Nesterov wake_up_locked(&rsp->gp_wait);
8889da3b94SOleg Nesterov } else if (rsp->gp_state == GP_REPLAY) {
8989da3b94SOleg Nesterov /*
9089da3b94SOleg Nesterov * A new rcu_sync_exit() has happened; requeue the callback to
9189da3b94SOleg Nesterov * catch a later GP.
9289da3b94SOleg Nesterov */
9389da3b94SOleg Nesterov WRITE_ONCE(rsp->gp_state, GP_EXIT);
9489da3b94SOleg Nesterov rcu_sync_call(rsp);
9589da3b94SOleg Nesterov } else {
9689da3b94SOleg Nesterov /*
97a616aec9SIngo Molnar * We're at least a GP after the last rcu_sync_exit(); everybody
9889da3b94SOleg Nesterov * will now have observed the write side critical section.
99a616aec9SIngo Molnar * Let 'em rip!
10089da3b94SOleg Nesterov */
10189da3b94SOleg Nesterov WRITE_ONCE(rsp->gp_state, GP_IDLE);
10289da3b94SOleg Nesterov }
10389da3b94SOleg Nesterov spin_unlock_irqrestore(&rsp->rss_lock, flags);
10489da3b94SOleg Nesterov }
10589da3b94SOleg Nesterov
1063942a9bdSPeter Zijlstra /**
107cc44ca84SOleg Nesterov * rcu_sync_enter() - Force readers onto slowpath
108cc44ca84SOleg Nesterov * @rsp: Pointer to rcu_sync structure to use for synchronization
109cc44ca84SOleg Nesterov *
110cc44ca84SOleg Nesterov * This function is used by updaters who need readers to make use of
111cc44ca84SOleg Nesterov * a slowpath during the update. After this function returns, all
112cc44ca84SOleg Nesterov * subsequent calls to rcu_sync_is_idle() will return false, which
113cc44ca84SOleg Nesterov * tells readers to stay off their fastpaths. A later call to
114f1efe84dSDavid Vernet * rcu_sync_exit() re-enables reader fastpaths.
115cc44ca84SOleg Nesterov *
116cc44ca84SOleg Nesterov * When called in isolation, rcu_sync_enter() must wait for a grace
117cc44ca84SOleg Nesterov * period, however, closely spaced calls to rcu_sync_enter() can
118cc44ca84SOleg Nesterov * optimize away the grace-period wait via a state machine implemented
119cc44ca84SOleg Nesterov * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
120cc44ca84SOleg Nesterov */
rcu_sync_enter(struct rcu_sync * rsp)121cc44ca84SOleg Nesterov void rcu_sync_enter(struct rcu_sync *rsp)
122cc44ca84SOleg Nesterov {
12389da3b94SOleg Nesterov int gp_state;
124cc44ca84SOleg Nesterov
125cc44ca84SOleg Nesterov spin_lock_irq(&rsp->rss_lock);
12689da3b94SOleg Nesterov gp_state = rsp->gp_state;
12789da3b94SOleg Nesterov if (gp_state == GP_IDLE) {
12889da3b94SOleg Nesterov WRITE_ONCE(rsp->gp_state, GP_ENTER);
12989da3b94SOleg Nesterov WARN_ON_ONCE(rsp->gp_count);
13089da3b94SOleg Nesterov /*
13189da3b94SOleg Nesterov * Note that we could simply do rcu_sync_call(rsp) here and
13289da3b94SOleg Nesterov * avoid the "if (gp_state == GP_IDLE)" block below.
13389da3b94SOleg Nesterov *
13489da3b94SOleg Nesterov * However, synchronize_rcu() can be faster if rcu_expedited
13589da3b94SOleg Nesterov * or rcu_blocking_is_gp() is true.
13689da3b94SOleg Nesterov *
13789da3b94SOleg Nesterov * Another reason is that we can't wait for rcu callback if
13889da3b94SOleg Nesterov * we are called at early boot time but this shouldn't happen.
13989da3b94SOleg Nesterov */
14089da3b94SOleg Nesterov }
14189da3b94SOleg Nesterov rsp->gp_count++;
142cc44ca84SOleg Nesterov spin_unlock_irq(&rsp->rss_lock);
143cc44ca84SOleg Nesterov
14489da3b94SOleg Nesterov if (gp_state == GP_IDLE) {
14589da3b94SOleg Nesterov /*
14689da3b94SOleg Nesterov * See the comment above, this simply does the "synchronous"
14789da3b94SOleg Nesterov * call_rcu(rcu_sync_func) which does GP_ENTER -> GP_PASSED.
14889da3b94SOleg Nesterov */
14995bf33b5SOleg Nesterov synchronize_rcu();
15089da3b94SOleg Nesterov rcu_sync_func(&rsp->cb_head);
15189da3b94SOleg Nesterov /* Not really needed, wait_event() would see GP_PASSED. */
15289da3b94SOleg Nesterov return;
153cc44ca84SOleg Nesterov }
15489da3b94SOleg Nesterov
15589da3b94SOleg Nesterov wait_event(rsp->gp_wait, READ_ONCE(rsp->gp_state) >= GP_PASSED);
156cc44ca84SOleg Nesterov }
157cc44ca84SOleg Nesterov
158cc44ca84SOleg Nesterov /**
15989da3b94SOleg Nesterov * rcu_sync_exit() - Allow readers back onto fast path after grace period
160cc44ca84SOleg Nesterov * @rsp: Pointer to rcu_sync structure to use for synchronization
161cc44ca84SOleg Nesterov *
162cc44ca84SOleg Nesterov * This function is used by updaters who have completed, and can therefore
163cc44ca84SOleg Nesterov * now allow readers to make use of their fastpaths after a grace period
164cc44ca84SOleg Nesterov * has elapsed. After this grace period has completed, all subsequent
165cc44ca84SOleg Nesterov * calls to rcu_sync_is_idle() will return true, which tells readers that
166cc44ca84SOleg Nesterov * they can once again use their fastpaths.
167cc44ca84SOleg Nesterov */
rcu_sync_exit(struct rcu_sync * rsp)168cc44ca84SOleg Nesterov void rcu_sync_exit(struct rcu_sync *rsp)
169cc44ca84SOleg Nesterov {
17089da3b94SOleg Nesterov WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_IDLE);
17189da3b94SOleg Nesterov WARN_ON_ONCE(READ_ONCE(rsp->gp_count) == 0);
17289da3b94SOleg Nesterov
173cc44ca84SOleg Nesterov spin_lock_irq(&rsp->rss_lock);
174cc44ca84SOleg Nesterov if (!--rsp->gp_count) {
17589da3b94SOleg Nesterov if (rsp->gp_state == GP_PASSED) {
17689da3b94SOleg Nesterov WRITE_ONCE(rsp->gp_state, GP_EXIT);
17789da3b94SOleg Nesterov rcu_sync_call(rsp);
17889da3b94SOleg Nesterov } else if (rsp->gp_state == GP_EXIT) {
17989da3b94SOleg Nesterov WRITE_ONCE(rsp->gp_state, GP_REPLAY);
180cc44ca84SOleg Nesterov }
181cc44ca84SOleg Nesterov }
182cc44ca84SOleg Nesterov spin_unlock_irq(&rsp->rss_lock);
183cc44ca84SOleg Nesterov }
18407899a6eSOleg Nesterov
18507899a6eSOleg Nesterov /**
18607899a6eSOleg Nesterov * rcu_sync_dtor() - Clean up an rcu_sync structure
18707899a6eSOleg Nesterov * @rsp: Pointer to rcu_sync structure to be cleaned up
18807899a6eSOleg Nesterov */
rcu_sync_dtor(struct rcu_sync * rsp)18907899a6eSOleg Nesterov void rcu_sync_dtor(struct rcu_sync *rsp)
19007899a6eSOleg Nesterov {
19189da3b94SOleg Nesterov int gp_state;
19207899a6eSOleg Nesterov
19389da3b94SOleg Nesterov WARN_ON_ONCE(READ_ONCE(rsp->gp_count));
19489da3b94SOleg Nesterov WARN_ON_ONCE(READ_ONCE(rsp->gp_state) == GP_PASSED);
19507899a6eSOleg Nesterov
19607899a6eSOleg Nesterov spin_lock_irq(&rsp->rss_lock);
19789da3b94SOleg Nesterov if (rsp->gp_state == GP_REPLAY)
19889da3b94SOleg Nesterov WRITE_ONCE(rsp->gp_state, GP_EXIT);
19989da3b94SOleg Nesterov gp_state = rsp->gp_state;
20007899a6eSOleg Nesterov spin_unlock_irq(&rsp->rss_lock);
20107899a6eSOleg Nesterov
20289da3b94SOleg Nesterov if (gp_state != GP_IDLE) {
20395bf33b5SOleg Nesterov rcu_barrier();
20489da3b94SOleg Nesterov WARN_ON_ONCE(rsp->gp_state != GP_IDLE);
20507899a6eSOleg Nesterov }
20607899a6eSOleg Nesterov }
207