xref: /openbmc/linux/drivers/md/bcache/closure.c (revision 44c6dc94)
1 /*
2  * Asynchronous refcounty things
3  *
4  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
5  * Copyright 2012 Google, Inc.
6  */
7 
8 #include <linux/debugfs.h>
9 #include <linux/module.h>
10 #include <linux/seq_file.h>
11 #include <linux/sched/debug.h>
12 
13 #include "closure.h"
14 
15 static inline void closure_put_after_sub(struct closure *cl, int flags)
16 {
17 	int r = flags & CLOSURE_REMAINING_MASK;
18 
19 	BUG_ON(flags & CLOSURE_GUARD_MASK);
20 	BUG_ON(!r && (flags & ~CLOSURE_DESTRUCTOR));
21 
22 	if (!r) {
23 		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
24 			atomic_set(&cl->remaining,
25 				   CLOSURE_REMAINING_INITIALIZER);
26 			closure_queue(cl);
27 		} else {
28 			struct closure *parent = cl->parent;
29 			closure_fn *destructor = cl->fn;
30 
31 			closure_debug_destroy(cl);
32 
33 			if (destructor)
34 				destructor(cl);
35 
36 			if (parent)
37 				closure_put(parent);
38 		}
39 	}
40 }
41 
42 /* For clearing flags with the same atomic op as a put */
43 void closure_sub(struct closure *cl, int v)
44 {
45 	closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
46 }
47 EXPORT_SYMBOL(closure_sub);
48 
49 /**
50  * closure_put - decrement a closure's refcount
51  */
52 void closure_put(struct closure *cl)
53 {
54 	closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
55 }
56 EXPORT_SYMBOL(closure_put);
57 
58 /**
59  * closure_wake_up - wake up all closures on a wait list, without memory barrier
60  */
61 void __closure_wake_up(struct closure_waitlist *wait_list)
62 {
63 	struct llist_node *list;
64 	struct closure *cl, *t;
65 	struct llist_node *reverse = NULL;
66 
67 	list = llist_del_all(&wait_list->list);
68 
69 	/* We first reverse the list to preserve FIFO ordering and fairness */
70 	reverse = llist_reverse_order(list);
71 
72 	/* Then do the wakeups */
73 	llist_for_each_entry_safe(cl, t, reverse, list) {
74 		closure_set_waiting(cl, 0);
75 		closure_sub(cl, CLOSURE_WAITING + 1);
76 	}
77 }
78 EXPORT_SYMBOL(__closure_wake_up);
79 
80 /**
81  * closure_wait - add a closure to a waitlist
82  *
83  * @waitlist will own a ref on @cl, which will be released when
84  * closure_wake_up() is called on @waitlist.
85  *
86  */
87 bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
88 {
89 	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
90 		return false;
91 
92 	closure_set_waiting(cl, _RET_IP_);
93 	atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
94 	llist_add(&cl->list, &waitlist->list);
95 
96 	return true;
97 }
98 EXPORT_SYMBOL(closure_wait);
99 
100 struct closure_syncer {
101 	struct task_struct	*task;
102 	int			done;
103 };
104 
105 static void closure_sync_fn(struct closure *cl)
106 {
107 	cl->s->done = 1;
108 	wake_up_process(cl->s->task);
109 }
110 
111 void __sched __closure_sync(struct closure *cl)
112 {
113 	struct closure_syncer s = { .task = current };
114 
115 	cl->s = &s;
116 	continue_at(cl, closure_sync_fn, NULL);
117 
118 	while (1) {
119 		set_current_state(TASK_UNINTERRUPTIBLE);
120 		if (s.done)
121 			break;
122 		schedule();
123 	}
124 
125 	__set_current_state(TASK_RUNNING);
126 }
127 EXPORT_SYMBOL(__closure_sync);
128 
129 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
130 
131 static LIST_HEAD(closure_list);
132 static DEFINE_SPINLOCK(closure_list_lock);
133 
134 void closure_debug_create(struct closure *cl)
135 {
136 	unsigned long flags;
137 
138 	BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
139 	cl->magic = CLOSURE_MAGIC_ALIVE;
140 
141 	spin_lock_irqsave(&closure_list_lock, flags);
142 	list_add(&cl->all, &closure_list);
143 	spin_unlock_irqrestore(&closure_list_lock, flags);
144 }
145 EXPORT_SYMBOL(closure_debug_create);
146 
147 void closure_debug_destroy(struct closure *cl)
148 {
149 	unsigned long flags;
150 
151 	BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
152 	cl->magic = CLOSURE_MAGIC_DEAD;
153 
154 	spin_lock_irqsave(&closure_list_lock, flags);
155 	list_del(&cl->all);
156 	spin_unlock_irqrestore(&closure_list_lock, flags);
157 }
158 EXPORT_SYMBOL(closure_debug_destroy);
159 
160 static struct dentry *debug;
161 
162 static int debug_seq_show(struct seq_file *f, void *data)
163 {
164 	struct closure *cl;
165 	spin_lock_irq(&closure_list_lock);
166 
167 	list_for_each_entry(cl, &closure_list, all) {
168 		int r = atomic_read(&cl->remaining);
169 
170 		seq_printf(f, "%p: %pF -> %pf p %p r %i ",
171 			   cl, (void *) cl->ip, cl->fn, cl->parent,
172 			   r & CLOSURE_REMAINING_MASK);
173 
174 		seq_printf(f, "%s%s\n",
175 			   test_bit(WORK_STRUCT_PENDING_BIT,
176 				    work_data_bits(&cl->work)) ? "Q" : "",
177 			   r & CLOSURE_RUNNING	? "R" : "");
178 
179 		if (r & CLOSURE_WAITING)
180 			seq_printf(f, " W %pF\n",
181 				   (void *) cl->waiting_on);
182 
183 		seq_printf(f, "\n");
184 	}
185 
186 	spin_unlock_irq(&closure_list_lock);
187 	return 0;
188 }
189 
190 static int debug_seq_open(struct inode *inode, struct file *file)
191 {
192 	return single_open(file, debug_seq_show, NULL);
193 }
194 
195 static const struct file_operations debug_ops = {
196 	.owner		= THIS_MODULE,
197 	.open		= debug_seq_open,
198 	.read		= seq_read,
199 	.release	= single_release
200 };
201 
202 void __init closure_debug_init(void)
203 {
204 	debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops);
205 }
206 
207 #endif
208 
209 MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
210 MODULE_LICENSE("GPL");
211