xref: /openbmc/linux/drivers/md/bcache/closure.c (revision 66127f0d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Asynchronous refcounty things
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include <linux/debugfs.h>
10 #include <linux/module.h>
11 #include <linux/seq_file.h>
12 #include <linux/sched/debug.h>
13 
14 #include "closure.h"
15 
16 static inline void closure_put_after_sub(struct closure *cl, int flags)
17 {
18 	int r = flags & CLOSURE_REMAINING_MASK;
19 
20 	if (WARN(flags & CLOSURE_GUARD_MASK,
21 		 "closure has guard bits set: %x (%u)",
22 		 flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
23 		r &= ~CLOSURE_GUARD_MASK;
24 
25 	if (!r) {
26 		WARN(flags & ~CLOSURE_DESTRUCTOR,
27 		     "closure ref hit 0 with incorrect flags set: %x (%u)",
28 		     flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
29 
30 		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
31 			atomic_set(&cl->remaining,
32 				   CLOSURE_REMAINING_INITIALIZER);
33 			closure_queue(cl);
34 		} else {
35 			struct closure *parent = cl->parent;
36 			closure_fn *destructor = cl->fn;
37 
38 			closure_debug_destroy(cl);
39 
40 			if (destructor)
41 				destructor(cl);
42 
43 			if (parent)
44 				closure_put(parent);
45 		}
46 	}
47 }
48 
49 /* For clearing flags with the same atomic op as a put */
50 void closure_sub(struct closure *cl, int v)
51 {
52 	closure_put_after_sub(cl, atomic_sub_return(v, &cl->remaining));
53 }
54 
55 /*
56  * closure_put - decrement a closure's refcount
57  */
58 void closure_put(struct closure *cl)
59 {
60 	closure_put_after_sub(cl, atomic_dec_return(&cl->remaining));
61 }
62 
63 /*
64  * closure_wake_up - wake up all closures on a wait list, without memory barrier
65  */
66 void __closure_wake_up(struct closure_waitlist *wait_list)
67 {
68 	struct llist_node *list;
69 	struct closure *cl, *t;
70 	struct llist_node *reverse = NULL;
71 
72 	list = llist_del_all(&wait_list->list);
73 
74 	/* We first reverse the list to preserve FIFO ordering and fairness */
75 	reverse = llist_reverse_order(list);
76 
77 	/* Then do the wakeups */
78 	llist_for_each_entry_safe(cl, t, reverse, list) {
79 		closure_set_waiting(cl, 0);
80 		closure_sub(cl, CLOSURE_WAITING + 1);
81 	}
82 }
83 
84 /**
85  * closure_wait - add a closure to a waitlist
86  * @waitlist: will own a ref on @cl, which will be released when
87  * closure_wake_up() is called on @waitlist.
88  * @cl: closure pointer.
89  *
90  */
91 bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
92 {
93 	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
94 		return false;
95 
96 	closure_set_waiting(cl, _RET_IP_);
97 	atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
98 	llist_add(&cl->list, &waitlist->list);
99 
100 	return true;
101 }
102 
103 struct closure_syncer {
104 	struct task_struct	*task;
105 	int			done;
106 };
107 
108 static void closure_sync_fn(struct closure *cl)
109 {
110 	struct closure_syncer *s = cl->s;
111 	struct task_struct *p;
112 
113 	rcu_read_lock();
114 	p = READ_ONCE(s->task);
115 	s->done = 1;
116 	wake_up_process(p);
117 	rcu_read_unlock();
118 }
119 
120 void __sched __closure_sync(struct closure *cl)
121 {
122 	struct closure_syncer s = { .task = current };
123 
124 	cl->s = &s;
125 	continue_at(cl, closure_sync_fn, NULL);
126 
127 	while (1) {
128 		set_current_state(TASK_UNINTERRUPTIBLE);
129 		if (s.done)
130 			break;
131 		schedule();
132 	}
133 
134 	__set_current_state(TASK_RUNNING);
135 }
136 
137 #ifdef CONFIG_BCACHE_CLOSURES_DEBUG
138 
139 static LIST_HEAD(closure_list);
140 static DEFINE_SPINLOCK(closure_list_lock);
141 
142 void closure_debug_create(struct closure *cl)
143 {
144 	unsigned long flags;
145 
146 	BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
147 	cl->magic = CLOSURE_MAGIC_ALIVE;
148 
149 	spin_lock_irqsave(&closure_list_lock, flags);
150 	list_add(&cl->all, &closure_list);
151 	spin_unlock_irqrestore(&closure_list_lock, flags);
152 }
153 
154 void closure_debug_destroy(struct closure *cl)
155 {
156 	unsigned long flags;
157 
158 	BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
159 	cl->magic = CLOSURE_MAGIC_DEAD;
160 
161 	spin_lock_irqsave(&closure_list_lock, flags);
162 	list_del(&cl->all);
163 	spin_unlock_irqrestore(&closure_list_lock, flags);
164 }
165 
166 static struct dentry *closure_debug;
167 
168 static int debug_show(struct seq_file *f, void *data)
169 {
170 	struct closure *cl;
171 
172 	spin_lock_irq(&closure_list_lock);
173 
174 	list_for_each_entry(cl, &closure_list, all) {
175 		int r = atomic_read(&cl->remaining);
176 
177 		seq_printf(f, "%p: %pS -> %pS p %p r %i ",
178 			   cl, (void *) cl->ip, cl->fn, cl->parent,
179 			   r & CLOSURE_REMAINING_MASK);
180 
181 		seq_printf(f, "%s%s\n",
182 			   test_bit(WORK_STRUCT_PENDING_BIT,
183 				    work_data_bits(&cl->work)) ? "Q" : "",
184 			   r & CLOSURE_RUNNING	? "R" : "");
185 
186 		if (r & CLOSURE_WAITING)
187 			seq_printf(f, " W %pS\n",
188 				   (void *) cl->waiting_on);
189 
190 		seq_printf(f, "\n");
191 	}
192 
193 	spin_unlock_irq(&closure_list_lock);
194 	return 0;
195 }
196 
197 DEFINE_SHOW_ATTRIBUTE(debug);
198 
199 void  __init closure_debug_init(void)
200 {
201 	if (!IS_ERR_OR_NULL(bcache_debug))
202 		/*
203 		 * it is unnecessary to check return value of
204 		 * debugfs_create_file(), we should not care
205 		 * about this.
206 		 */
207 		closure_debug = debugfs_create_file(
208 			"closures", 0400, bcache_debug, NULL, &debug_fops);
209 }
210 #endif
211 
212 MODULE_AUTHOR("Kent Overstreet <koverstreet@google.com>");
213 MODULE_LICENSE("GPL");
214