xref: /openbmc/linux/drivers/md/bcache/closure.c (revision 12eb4683)
1 /*
2  * Asynchronous refcounty things
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/module.h>
10 #include <linux/seq_file.h>
11 
12 #include "closure.h"
13 
14 #define CL_FIELD(type, field)					\
15 	case TYPE_ ## type:					\
16 	return &container_of(cl, struct type, cl)->field
17 
18 static struct closure_waitlist *closure_waitlist(struct closure *cl)
19 {
20 	switch (cl->type) {
21 		CL_FIELD(closure_with_waitlist, wait);
22 	default:
23 		return NULL;
24 	}
25 }
26 
27 static inline void closure_put_after_sub(struct closure *cl, int flags)
28 {
29 	int r = flags & CLOSURE_REMAINING_MASK;
30 
31 	BUG_ON(flags & CLOSURE_GUARD_MASK);
32 	BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
33 
34 	/* Must deliver precisely one wakeup */
35 	if (r == 1 && (flags & CLOSURE_SLEEPING))
36 		wake_up_process(cl->task);
37 
38 	if (!r) {
39 		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
40 			atomic_set(&cl->remaining,
41 				   CLOSURE_REMAINING_INITIALIZER);
42 			closure_queue(cl);
43 		} else {
44 			struct closure *parent = cl->parent;
45 			struct closure_waitlist *wait = closure_waitlist(cl);
46 			closure_fn *destructor = cl->fn;
47 
48 			closure_debug_destroy(cl);
49 
50 			smp_mb();
51 			atomic_set(&cl->remaining, -1);
52 
53 			if (wait)
54 				closure_wake_up(wait);
55 
56 			if (destructor)
57 				destructor(cl);
58 
59 			if (parent)
60 				closure_put(parent);
61 		}
62 	}
63 }
64 
65 /* For clearing flags with the same atomic op as a put */
66 void closure_sub(struct closure *cl, int v)
67 {
68 	closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
69 }
70 EXPORT_SYMBOL(closure_sub);
71 
72 void closure_put(struct closure *cl)
73 {
74 	closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
75 }
76 EXPORT_SYMBOL(closure_put);
77 
78 static void set_waiting(struct closure *cl, unsigned long f)
79 {
80 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
81 	cl->waiting_on = f;
82 #endif
83 }
84 
85 void __closure_wake_up(struct closure_waitlist *wait_list)
86 {
87 	struct llist_node *list;
88 	struct closure *cl;
89 	struct llist_node *reverse = NULL;
90 
91 	list = llist_del_all(&wait_list->list);
92 
93 	/* We first reverse the list to preserve FIFO ordering and fairness */
94 
95 	while (list) {
96 		struct llist_node *t = list;
97 		list = llist_next(list);
98 
99 		t->next = reverse;
100 		reverse = t;
101 	}
102 
103 	/* Then do the wakeups */
104 
105 	while (reverse) {
106 		cl = container_of(reverse, struct closure, list);
107 		reverse = llist_next(reverse);
108 
109 		set_waiting(cl, 0);
110 		closure_sub(cl, CLOSURE_WAITING + 1);
111 	}
112 }
113 EXPORT_SYMBOL(__closure_wake_up);
114 
115 bool closure_wait(struct closure_waitlist *list, struct closure *cl)
116 {
117 	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
118 		return false;
119 
120 	set_waiting(cl, _RET_IP_);
121 	atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
122 	llist_add(&cl->list, &list->list);
123 
124 	return true;
125 }
126 EXPORT_SYMBOL(closure_wait);
127 
128 /**
129  * closure_sync() - sleep until a closure a closure has nothing left to wait on
130  *
131  * Sleeps until the refcount hits 1 - the thread that's running the closure owns
132  * the last refcount.
133  */
134 void closure_sync(struct closure *cl)
135 {
136 	while (1) {
137 		__closure_start_sleep(cl);
138 		closure_set_ret_ip(cl);
139 
140 		if ((atomic_read(&cl->remaining) &
141 		     CLOSURE_REMAINING_MASK) == 1)
142 			break;
143 
144 		schedule();
145 	}
146 
147 	__closure_end_sleep(cl);
148 }
149 EXPORT_SYMBOL(closure_sync);
150 
151 /**
152  * closure_trylock() - try to acquire the closure, without waiting
153  * @cl:		closure to lock
154  *
155  * Returns true if the closure was succesfully locked.
156  */
157 bool closure_trylock(struct closure *cl, struct closure *parent)
158 {
159 	if (atomic_cmpxchg(&cl->remaining, -1,
160 			   CLOSURE_REMAINING_INITIALIZER) != -1)
161 		return false;
162 
163 	smp_mb();
164 
165 	cl->parent = parent;
166 	if (parent)
167 		closure_get(parent);
168 
169 	closure_set_ret_ip(cl);
170 	closure_debug_create(cl);
171 	return true;
172 }
173 EXPORT_SYMBOL(closure_trylock);
174 
175 void __closure_lock(struct closure *cl, struct closure *parent,
176 		    struct closure_waitlist *wait_list)
177 {
178 	struct closure wait;
179 	closure_init_stack(&wait);
180 
181 	while (1) {
182 		if (closure_trylock(cl, parent))
183 			return;
184 
185 		closure_wait_event(wait_list, &wait,
186 				   atomic_read(&cl->remaining) == -1);
187 	}
188 }
189 EXPORT_SYMBOL(__closure_lock);
190 
191 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
192 
193 static LIST_HEAD(closure_list);
194 static DEFINE_SPINLOCK(closure_list_lock);
195 
196 void closure_debug_create(struct closure *cl)
197 {
198 	unsigned long flags;
199 
200 	BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
201 	cl->magic = CLOSURE_MAGIC_ALIVE;
202 
203 	spin_lock_irqsave(&closure_list_lock, flags);
204 	list_add(&cl->all, &closure_list);
205 	spin_unlock_irqrestore(&closure_list_lock, flags);
206 }
207 EXPORT_SYMBOL(closure_debug_create);
208 
209 void closure_debug_destroy(struct closure *cl)
210 {
211 	unsigned long flags;
212 
213 	BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
214 	cl->magic = CLOSURE_MAGIC_DEAD;
215 
216 	spin_lock_irqsave(&closure_list_lock, flags);
217 	list_del(&cl->all);
218 	spin_unlock_irqrestore(&closure_list_lock, flags);
219 }
220 EXPORT_SYMBOL(closure_debug_destroy);
221 
222 static struct dentry *debug;
223 
224 #define work_data_bits(work) ((unsigned long *)(&(work)->data))
225 
226 static int debug_seq_show(struct seq_file *f, void *data)
227 {
228 	struct closure *cl;
229 	spin_lock_irq(&closure_list_lock);
230 
231 	list_for_each_entry(cl, &closure_list, all) {
232 		int r = atomic_read(&cl->remaining);
233 
234 		seq_printf(f, "%p: %pF -> %pf p %p r %i ",
235 			   cl, (void *) cl->ip, cl->fn, cl->parent,
236 			   r & CLOSURE_REMAINING_MASK);
237 
238 		seq_printf(f, "%s%s%s%s\n",
239 			   test_bit(WORK_STRUCT_PENDING,
240 				    work_data_bits(&cl->work)) ? "Q" : "",
241 			   r & CLOSURE_RUNNING	? "R" : "",
242 			   r & CLOSURE_STACK	? "S" : "",
243 			   r & CLOSURE_SLEEPING	? "Sl" : "");
244 
245 		if (r & CLOSURE_WAITING)
246 			seq_printf(f, " W %pF\n",
247 				   (void *) cl->waiting_on);
248 
249 		seq_printf(f, "\n");
250 	}
251 
252 	spin_unlock_irq(&closure_list_lock);
253 	return 0;
254 }
255 
256 static int debug_seq_open(struct inode *inode, struct file *file)
257 {
258 	return single_open(file, debug_seq_show, NULL);
259 }
260 
261 static const struct file_operations debug_ops = {
262 	.owner		= THIS_MODULE,
263 	.open		= debug_seq_open,
264 	.read		= seq_read,
265 	.release	= single_release
266 };
267 
268 void __init closure_debug_init(void)
269 {
270 	debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
271 }
272 
273 #endif
274 
275 MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
276 MODULE_LICENSE("GPL");
277