xref: /openbmc/linux/kernel/rcu/sync.c (revision 6dfcd296)
1 /*
2  * RCU-based infrastructure for lightweight reader-writer locking
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright (c) 2015, Red Hat, Inc.
19  *
20  * Author: Oleg Nesterov <oleg@redhat.com>
21  */
22 
23 #include <linux/rcu_sync.h>
24 #include <linux/sched.h>
25 
26 #ifdef CONFIG_PROVE_RCU
27 #define __INIT_HELD(func)	.held = func,
28 #else
29 #define __INIT_HELD(func)
30 #endif
31 
32 static const struct {
33 	void (*sync)(void);
34 	void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
35 	void (*wait)(void);
36 #ifdef CONFIG_PROVE_RCU
37 	int  (*held)(void);
38 #endif
39 } gp_ops[] = {
40 	[RCU_SYNC] = {
41 		.sync = synchronize_rcu,
42 		.call = call_rcu,
43 		.wait = rcu_barrier,
44 		__INIT_HELD(rcu_read_lock_held)
45 	},
46 	[RCU_SCHED_SYNC] = {
47 		.sync = synchronize_sched,
48 		.call = call_rcu_sched,
49 		.wait = rcu_barrier_sched,
50 		__INIT_HELD(rcu_read_lock_sched_held)
51 	},
52 	[RCU_BH_SYNC] = {
53 		.sync = synchronize_rcu_bh,
54 		.call = call_rcu_bh,
55 		.wait = rcu_barrier_bh,
56 		__INIT_HELD(rcu_read_lock_bh_held)
57 	},
58 };
59 
60 enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
61 enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
62 
63 #define	rss_lock	gp_wait.lock
64 
65 #ifdef CONFIG_PROVE_RCU
66 void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
67 {
68 	RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
69 			 "suspicious rcu_sync_is_idle() usage");
70 }
71 
72 EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
73 #endif
74 
75 /**
76  * rcu_sync_init() - Initialize an rcu_sync structure
77  * @rsp: Pointer to rcu_sync structure to be initialized
78  * @type: Flavor of RCU with which to synchronize rcu_sync structure
79  */
80 void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
81 {
82 	memset(rsp, 0, sizeof(*rsp));
83 	init_waitqueue_head(&rsp->gp_wait);
84 	rsp->gp_type = type;
85 }
86 
87 /**
88  * Must be called after rcu_sync_init() and before first use.
89  *
90  * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
91  * pairs turn into NO-OPs.
92  */
93 void rcu_sync_enter_start(struct rcu_sync *rsp)
94 {
95 	rsp->gp_count++;
96 	rsp->gp_state = GP_PASSED;
97 }
98 
99 /**
100  * rcu_sync_enter() - Force readers onto slowpath
101  * @rsp: Pointer to rcu_sync structure to use for synchronization
102  *
103  * This function is used by updaters who need readers to make use of
104  * a slowpath during the update.  After this function returns, all
105  * subsequent calls to rcu_sync_is_idle() will return false, which
106  * tells readers to stay off their fastpaths.  A later call to
107  * rcu_sync_exit() re-enables reader slowpaths.
108  *
109  * When called in isolation, rcu_sync_enter() must wait for a grace
110  * period, however, closely spaced calls to rcu_sync_enter() can
111  * optimize away the grace-period wait via a state machine implemented
112  * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
113  */
114 void rcu_sync_enter(struct rcu_sync *rsp)
115 {
116 	bool need_wait, need_sync;
117 
118 	spin_lock_irq(&rsp->rss_lock);
119 	need_wait = rsp->gp_count++;
120 	need_sync = rsp->gp_state == GP_IDLE;
121 	if (need_sync)
122 		rsp->gp_state = GP_PENDING;
123 	spin_unlock_irq(&rsp->rss_lock);
124 
125 	BUG_ON(need_wait && need_sync);
126 
127 	if (need_sync) {
128 		gp_ops[rsp->gp_type].sync();
129 		rsp->gp_state = GP_PASSED;
130 		wake_up_all(&rsp->gp_wait);
131 	} else if (need_wait) {
132 		wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
133 	} else {
134 		/*
135 		 * Possible when there's a pending CB from a rcu_sync_exit().
136 		 * Nobody has yet been allowed the 'fast' path and thus we can
137 		 * avoid doing any sync(). The callback will get 'dropped'.
138 		 */
139 		BUG_ON(rsp->gp_state != GP_PASSED);
140 	}
141 }
142 
143 /**
144  * rcu_sync_func() - Callback function managing reader access to fastpath
145  * @rsp: Pointer to rcu_sync structure to use for synchronization
146  *
147  * This function is passed to one of the call_rcu() functions by
148  * rcu_sync_exit(), so that it is invoked after a grace period following the
149  * that invocation of rcu_sync_exit().  It takes action based on events that
150  * have taken place in the meantime, so that closely spaced rcu_sync_enter()
151  * and rcu_sync_exit() pairs need not wait for a grace period.
152  *
153  * If another rcu_sync_enter() is invoked before the grace period
154  * ended, reset state to allow the next rcu_sync_exit() to let the
155  * readers back onto their fastpaths (after a grace period).  If both
156  * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
157  * before the grace period ended, re-invoke call_rcu() on behalf of that
158  * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
159  * can again use their fastpaths.
160  */
161 static void rcu_sync_func(struct rcu_head *rcu)
162 {
163 	struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head);
164 	unsigned long flags;
165 
166 	BUG_ON(rsp->gp_state != GP_PASSED);
167 	BUG_ON(rsp->cb_state == CB_IDLE);
168 
169 	spin_lock_irqsave(&rsp->rss_lock, flags);
170 	if (rsp->gp_count) {
171 		/*
172 		 * A new rcu_sync_begin() has happened; drop the callback.
173 		 */
174 		rsp->cb_state = CB_IDLE;
175 	} else if (rsp->cb_state == CB_REPLAY) {
176 		/*
177 		 * A new rcu_sync_exit() has happened; requeue the callback
178 		 * to catch a later GP.
179 		 */
180 		rsp->cb_state = CB_PENDING;
181 		gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
182 	} else {
183 		/*
184 		 * We're at least a GP after rcu_sync_exit(); eveybody will now
185 		 * have observed the write side critical section. Let 'em rip!.
186 		 */
187 		rsp->cb_state = CB_IDLE;
188 		rsp->gp_state = GP_IDLE;
189 	}
190 	spin_unlock_irqrestore(&rsp->rss_lock, flags);
191 }
192 
193 /**
194  * rcu_sync_exit() - Allow readers back onto fast patch after grace period
195  * @rsp: Pointer to rcu_sync structure to use for synchronization
196  *
197  * This function is used by updaters who have completed, and can therefore
198  * now allow readers to make use of their fastpaths after a grace period
199  * has elapsed.  After this grace period has completed, all subsequent
200  * calls to rcu_sync_is_idle() will return true, which tells readers that
201  * they can once again use their fastpaths.
202  */
203 void rcu_sync_exit(struct rcu_sync *rsp)
204 {
205 	spin_lock_irq(&rsp->rss_lock);
206 	if (!--rsp->gp_count) {
207 		if (rsp->cb_state == CB_IDLE) {
208 			rsp->cb_state = CB_PENDING;
209 			gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
210 		} else if (rsp->cb_state == CB_PENDING) {
211 			rsp->cb_state = CB_REPLAY;
212 		}
213 	}
214 	spin_unlock_irq(&rsp->rss_lock);
215 }
216 
217 /**
218  * rcu_sync_dtor() - Clean up an rcu_sync structure
219  * @rsp: Pointer to rcu_sync structure to be cleaned up
220  */
221 void rcu_sync_dtor(struct rcu_sync *rsp)
222 {
223 	int cb_state;
224 
225 	BUG_ON(rsp->gp_count);
226 
227 	spin_lock_irq(&rsp->rss_lock);
228 	if (rsp->cb_state == CB_REPLAY)
229 		rsp->cb_state = CB_PENDING;
230 	cb_state = rsp->cb_state;
231 	spin_unlock_irq(&rsp->rss_lock);
232 
233 	if (cb_state != CB_IDLE) {
234 		gp_ops[rsp->gp_type].wait();
235 		BUG_ON(rsp->cb_state != CB_IDLE);
236 	}
237 }
238