xref: /openbmc/linux/drivers/md/bcache/closure.h (revision 2fa49589)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_CLOSURE_H
3 #define _LINUX_CLOSURE_H
4 
5 #include <linux/llist.h>
6 #include <linux/sched.h>
7 #include <linux/sched/task_stack.h>
8 #include <linux/workqueue.h>
9 
10 /*
11  * Closure is perhaps the most overused and abused term in computer science, but
12  * since I've been unable to come up with anything better you're stuck with it
13  * again.
14  *
15  * What are closures?
16  *
17  * They embed a refcount. The basic idea is they count "things that are in
18  * progress" - in flight bios, some other thread that's doing something else -
19  * anything you might want to wait on.
20  *
21  * The refcount may be manipulated with closure_get() and closure_put().
22  * closure_put() is where many of the interesting things happen, when it causes
23  * the refcount to go to 0.
24  *
25  * Closures can be used to wait on things both synchronously and asynchronously,
26  * and synchronous and asynchronous use can be mixed without restriction. To
27  * wait synchronously, use closure_sync() - you will sleep until your closure's
28  * refcount hits 1.
29  *
30  * To wait asynchronously, use
31  *   continue_at(cl, next_function, workqueue);
32  *
33  * passing it, as you might expect, the function to run when nothing is pending
34  * and the workqueue to run that function out of.
35  *
36  * continue_at() also, critically, requires a 'return' immediately following the
37  * location where this macro is referenced, to return to the calling function.
38  * There's good reason for this.
39  *
40  * To use safely closures asynchronously, they must always have a refcount while
41  * they are running owned by the thread that is running them. Otherwise, suppose
42  * you submit some bios and wish to have a function run when they all complete:
43  *
44  * foo_endio(struct bio *bio)
45  * {
46  *	closure_put(cl);
47  * }
48  *
49  * closure_init(cl);
50  *
51  * do_stuff();
52  * closure_get(cl);
53  * bio1->bi_endio = foo_endio;
54  * bio_submit(bio1);
55  *
56  * do_more_stuff();
57  * closure_get(cl);
58  * bio2->bi_endio = foo_endio;
59  * bio_submit(bio2);
60  *
61  * continue_at(cl, complete_some_read, system_wq);
62  *
63  * If closure's refcount started at 0, complete_some_read() could run before the
64  * second bio was submitted - which is almost always not what you want! More
65  * importantly, it wouldn't be possible to say whether the original thread or
66  * complete_some_read()'s thread owned the closure - and whatever state it was
67  * associated with!
68  *
69  * So, closure_init() initializes a closure's refcount to 1 - and when a
70  * closure_fn is run, the refcount will be reset to 1 first.
71  *
72  * Then, the rule is - if you got the refcount with closure_get(), release it
73  * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
74  * on a closure because you called closure_init() or you were run out of a
75  * closure - _always_ use continue_at(). Doing so consistently will help
76  * eliminate an entire class of particularly pernicious races.
77  *
78  * Lastly, you might have a wait list dedicated to a specific event, and have no
79  * need for specifying the condition - you just want to wait until someone runs
80  * closure_wake_up() on the appropriate wait list. In that case, just use
81  * closure_wait(). It will return either true or false, depending on whether the
82  * closure was already on a wait list or not - a closure can only be on one wait
83  * list at a time.
84  *
85  * Parents:
86  *
87  * closure_init() takes two arguments - it takes the closure to initialize, and
88  * a (possibly null) parent.
89  *
90  * If parent is non null, the new closure will have a refcount for its lifetime;
91  * a closure is considered to be "finished" when its refcount hits 0 and the
92  * function to run is null. Hence
93  *
94  * continue_at(cl, NULL, NULL);
95  *
96  * returns up the (spaghetti) stack of closures, precisely like normal return
97  * returns up the C stack. continue_at() with non null fn is better thought of
98  * as doing a tail call.
99  *
100  * All this implies that a closure should typically be embedded in a particular
101  * struct (which its refcount will normally control the lifetime of), and that
102  * struct can very much be thought of as a stack frame.
103  */
104 
105 struct closure;
106 struct closure_syncer;
107 typedef void (closure_fn) (struct closure *);
108 extern struct dentry *bcache_debug;
109 
110 struct closure_waitlist {
111 	struct llist_head	list;
112 };
113 
114 enum closure_state {
115 	/*
116 	 * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
117 	 * the thread that owns the closure, and cleared by the thread that's
118 	 * waking up the closure.
119 	 *
120 	 * The rest are for debugging and don't affect behaviour:
121 	 *
122 	 * CLOSURE_RUNNING: Set when a closure is running (i.e. by
123 	 * closure_init() and when closure_put() runs then next function), and
124 	 * must be cleared before remaining hits 0. Primarily to help guard
125 	 * against incorrect usage and accidentally transferring references.
126 	 * continue_at() and closure_return() clear it for you, if you're doing
127 	 * something unusual you can use closure_set_dead() which also helps
128 	 * annotate where references are being transferred.
129 	 */
130 
131 	CLOSURE_BITS_START	= (1U << 26),
132 	CLOSURE_DESTRUCTOR	= (1U << 26),
133 	CLOSURE_WAITING		= (1U << 28),
134 	CLOSURE_RUNNING		= (1U << 30),
135 };
136 
137 #define CLOSURE_GUARD_MASK					\
138 	((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
139 
140 #define CLOSURE_REMAINING_MASK		(CLOSURE_BITS_START - 1)
141 #define CLOSURE_REMAINING_INITIALIZER	(1|CLOSURE_RUNNING)
142 
143 struct closure {
144 	union {
145 		struct {
146 			struct workqueue_struct *wq;
147 			struct closure_syncer	*s;
148 			struct llist_node	list;
149 			closure_fn		*fn;
150 		};
151 		struct work_struct	work;
152 	};
153 
154 	struct closure		*parent;
155 
156 	atomic_t		remaining;
157 
158 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
159 #define CLOSURE_MAGIC_DEAD	0xc054dead
160 #define CLOSURE_MAGIC_ALIVE	0xc054a11e
161 
162 	unsigned int		magic;
163 	struct list_head	all;
164 	unsigned long		ip;
165 	unsigned long		waiting_on;
166 #endif
167 };
168 
169 void closure_sub(struct closure *cl, int v);
170 void closure_put(struct closure *cl);
171 void __closure_wake_up(struct closure_waitlist *list);
172 bool closure_wait(struct closure_waitlist *list, struct closure *cl);
173 void __closure_sync(struct closure *cl);
174 
175 /**
176  * closure_sync - sleep until a closure a closure has nothing left to wait on
177  *
178  * Sleeps until the refcount hits 1 - the thread that's running the closure owns
179  * the last refcount.
180  */
181 static inline void closure_sync(struct closure *cl)
182 {
183 	if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
184 		__closure_sync(cl);
185 }
186 
187 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
188 
189 void closure_debug_init(void);
190 void closure_debug_create(struct closure *cl);
191 void closure_debug_destroy(struct closure *cl);
192 
193 #else
194 
195 static inline void closure_debug_init(void) {}
196 static inline void closure_debug_create(struct closure *cl) {}
197 static inline void closure_debug_destroy(struct closure *cl) {}
198 
199 #endif
200 
201 static inline void closure_set_ip(struct closure *cl)
202 {
203 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
204 	cl->ip = _THIS_IP_;
205 #endif
206 }
207 
208 static inline void closure_set_ret_ip(struct closure *cl)
209 {
210 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
211 	cl->ip = _RET_IP_;
212 #endif
213 }
214 
215 static inline void closure_set_waiting(struct closure *cl, unsigned long f)
216 {
217 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
218 	cl->waiting_on = f;
219 #endif
220 }
221 
222 static inline void closure_set_stopped(struct closure *cl)
223 {
224 	atomic_sub(CLOSURE_RUNNING, &cl->remaining);
225 }
226 
227 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
228 				  struct workqueue_struct *wq)
229 {
230 	closure_set_ip(cl);
231 	cl->fn = fn;
232 	cl->wq = wq;
233 	/* between atomic_dec() in closure_put() */
234 	smp_mb__before_atomic();
235 }
236 
237 static inline void closure_queue(struct closure *cl)
238 {
239 	struct workqueue_struct *wq = cl->wq;
240 	/**
241 	 * Changes made to closure, work_struct, or a couple of other structs
242 	 * may cause work.func not pointing to the right location.
243 	 */
244 	BUILD_BUG_ON(offsetof(struct closure, fn)
245 		     != offsetof(struct work_struct, func));
246 	if (wq) {
247 		INIT_WORK(&cl->work, cl->work.func);
248 		BUG_ON(!queue_work(wq, &cl->work));
249 	} else
250 		cl->fn(cl);
251 }
252 
253 /**
254  * closure_get - increment a closure's refcount
255  */
256 static inline void closure_get(struct closure *cl)
257 {
258 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
259 	BUG_ON((atomic_inc_return(&cl->remaining) &
260 		CLOSURE_REMAINING_MASK) <= 1);
261 #else
262 	atomic_inc(&cl->remaining);
263 #endif
264 }
265 
266 /**
267  * closure_init - Initialize a closure, setting the refcount to 1
268  * @cl:		closure to initialize
269  * @parent:	parent of the new closure. cl will take a refcount on it for its
270  *		lifetime; may be NULL.
271  */
272 static inline void closure_init(struct closure *cl, struct closure *parent)
273 {
274 	memset(cl, 0, sizeof(struct closure));
275 	cl->parent = parent;
276 	if (parent)
277 		closure_get(parent);
278 
279 	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
280 
281 	closure_debug_create(cl);
282 	closure_set_ip(cl);
283 }
284 
285 static inline void closure_init_stack(struct closure *cl)
286 {
287 	memset(cl, 0, sizeof(struct closure));
288 	atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
289 }
290 
291 /**
292  * closure_wake_up - wake up all closures on a wait list,
293  *		     with memory barrier
294  */
295 static inline void closure_wake_up(struct closure_waitlist *list)
296 {
297 	/* Memory barrier for the wait list */
298 	smp_mb();
299 	__closure_wake_up(list);
300 }
301 
302 /**
303  * continue_at - jump to another function with barrier
304  *
305  * After @cl is no longer waiting on anything (i.e. all outstanding refs have
306  * been dropped with closure_put()), it will resume execution at @fn running out
307  * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
308  *
309  * This is because after calling continue_at() you no longer have a ref on @cl,
310  * and whatever @cl owns may be freed out from under you - a running closure fn
311  * has a ref on its own closure which continue_at() drops.
312  *
313  * Note you are expected to immediately return after using this macro.
314  */
315 #define continue_at(_cl, _fn, _wq)					\
316 do {									\
317 	set_closure_fn(_cl, _fn, _wq);					\
318 	closure_sub(_cl, CLOSURE_RUNNING + 1);				\
319 } while (0)
320 
321 /**
322  * closure_return - finish execution of a closure
323  *
324  * This is used to indicate that @cl is finished: when all outstanding refs on
325  * @cl have been dropped @cl's ref on its parent closure (as passed to
326  * closure_init()) will be dropped, if one was specified - thus this can be
327  * thought of as returning to the parent closure.
328  */
329 #define closure_return(_cl)	continue_at((_cl), NULL, NULL)
330 
331 /**
332  * continue_at_nobarrier - jump to another function without barrier
333  *
334  * Causes @fn to be executed out of @cl, in @wq context (or called directly if
335  * @wq is NULL).
336  *
337  * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
338  * thus it's not safe to touch anything protected by @cl after a
339  * continue_at_nobarrier().
340  */
341 #define continue_at_nobarrier(_cl, _fn, _wq)				\
342 do {									\
343 	set_closure_fn(_cl, _fn, _wq);					\
344 	closure_queue(_cl);						\
345 } while (0)
346 
347 /**
348  * closure_return_with_destructor - finish execution of a closure,
349  *				    with destructor
350  *
351  * Works like closure_return(), except @destructor will be called when all
352  * outstanding refs on @cl have been dropped; @destructor may be used to safely
353  * free the memory occupied by @cl, and it is called with the ref on the parent
354  * closure still held - so @destructor could safely return an item to a
355  * freelist protected by @cl's parent.
356  */
357 #define closure_return_with_destructor(_cl, _destructor)		\
358 do {									\
359 	set_closure_fn(_cl, _destructor, NULL);				\
360 	closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1);	\
361 } while (0)
362 
363 /**
364  * closure_call - execute @fn out of a new, uninitialized closure
365  *
366  * Typically used when running out of one closure, and we want to run @fn
367  * asynchronously out of a new closure - @parent will then wait for @cl to
368  * finish.
369  */
370 static inline void closure_call(struct closure *cl, closure_fn fn,
371 				struct workqueue_struct *wq,
372 				struct closure *parent)
373 {
374 	closure_init(cl, parent);
375 	continue_at_nobarrier(cl, fn, wq);
376 }
377 
378 #endif /* _LINUX_CLOSURE_H */
379