xref: /openbmc/linux/fs/btrfs/async-thread.c (revision 5534b673)
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  * Copyright (C) 2014 Fujitsu.  All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public
7  * License v2 as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
12  * General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public
15  * License along with this program; if not, write to the
16  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17  * Boston, MA 021110-1307, USA.
18  */
19 
20 #include <linux/kthread.h>
21 #include <linux/slab.h>
22 #include <linux/list.h>
23 #include <linux/spinlock.h>
24 #include <linux/freezer.h>
25 #include "async-thread.h"
26 #include "ctree.h"
27 
28 #define WORK_DONE_BIT 0
29 #define WORK_ORDER_DONE_BIT 1
30 #define WORK_HIGH_PRIO_BIT 2
31 
32 #define NO_THRESHOLD (-1)
33 #define DFT_THRESHOLD (32)
34 
35 struct __btrfs_workqueue {
36 	struct workqueue_struct *normal_wq;
37 	/* List head pointing to ordered work list */
38 	struct list_head ordered_list;
39 
40 	/* Spinlock for ordered_list */
41 	spinlock_t list_lock;
42 
43 	/* Thresholding related variants */
44 	atomic_t pending;
45 	int max_active;
46 	int current_max;
47 	int thresh;
48 	unsigned int count;
49 	spinlock_t thres_lock;
50 };
51 
52 struct btrfs_workqueue {
53 	struct __btrfs_workqueue *normal;
54 	struct __btrfs_workqueue *high;
55 };
56 
57 static void normal_work_helper(struct btrfs_work *work);
58 
59 #define BTRFS_WORK_HELPER(name)					\
60 void btrfs_##name(struct work_struct *arg)				\
61 {									\
62 	struct btrfs_work *work = container_of(arg, struct btrfs_work,	\
63 					       normal_work);		\
64 	normal_work_helper(work);					\
65 }
66 
67 BTRFS_WORK_HELPER(worker_helper);
68 BTRFS_WORK_HELPER(delalloc_helper);
69 BTRFS_WORK_HELPER(flush_delalloc_helper);
70 BTRFS_WORK_HELPER(cache_helper);
71 BTRFS_WORK_HELPER(submit_helper);
72 BTRFS_WORK_HELPER(fixup_helper);
73 BTRFS_WORK_HELPER(endio_helper);
74 BTRFS_WORK_HELPER(endio_meta_helper);
75 BTRFS_WORK_HELPER(endio_meta_write_helper);
76 BTRFS_WORK_HELPER(endio_raid56_helper);
77 BTRFS_WORK_HELPER(rmw_helper);
78 BTRFS_WORK_HELPER(endio_write_helper);
79 BTRFS_WORK_HELPER(freespace_write_helper);
80 BTRFS_WORK_HELPER(delayed_meta_helper);
81 BTRFS_WORK_HELPER(readahead_helper);
82 BTRFS_WORK_HELPER(qgroup_rescan_helper);
83 BTRFS_WORK_HELPER(extent_refs_helper);
84 BTRFS_WORK_HELPER(scrub_helper);
85 BTRFS_WORK_HELPER(scrubwrc_helper);
86 BTRFS_WORK_HELPER(scrubnc_helper);
87 
88 static struct __btrfs_workqueue *
89 __btrfs_alloc_workqueue(const char *name, int flags, int max_active,
90 			 int thresh)
91 {
92 	struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
93 
94 	if (unlikely(!ret))
95 		return NULL;
96 
97 	ret->max_active = max_active;
98 	atomic_set(&ret->pending, 0);
99 	if (thresh == 0)
100 		thresh = DFT_THRESHOLD;
101 	/* For low threshold, disabling threshold is a better choice */
102 	if (thresh < DFT_THRESHOLD) {
103 		ret->current_max = max_active;
104 		ret->thresh = NO_THRESHOLD;
105 	} else {
106 		ret->current_max = 1;
107 		ret->thresh = thresh;
108 	}
109 
110 	if (flags & WQ_HIGHPRI)
111 		ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
112 						 ret->max_active,
113 						 "btrfs", name);
114 	else
115 		ret->normal_wq = alloc_workqueue("%s-%s", flags,
116 						 ret->max_active, "btrfs",
117 						 name);
118 	if (unlikely(!ret->normal_wq)) {
119 		kfree(ret);
120 		return NULL;
121 	}
122 
123 	INIT_LIST_HEAD(&ret->ordered_list);
124 	spin_lock_init(&ret->list_lock);
125 	spin_lock_init(&ret->thres_lock);
126 	trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI);
127 	return ret;
128 }
129 
130 static inline void
131 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
132 
133 struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
134 					      int flags,
135 					      int max_active,
136 					      int thresh)
137 {
138 	struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
139 
140 	if (unlikely(!ret))
141 		return NULL;
142 
143 	ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
144 					      max_active, thresh);
145 	if (unlikely(!ret->normal)) {
146 		kfree(ret);
147 		return NULL;
148 	}
149 
150 	if (flags & WQ_HIGHPRI) {
151 		ret->high = __btrfs_alloc_workqueue(name, flags, max_active,
152 						    thresh);
153 		if (unlikely(!ret->high)) {
154 			__btrfs_destroy_workqueue(ret->normal);
155 			kfree(ret);
156 			return NULL;
157 		}
158 	}
159 	return ret;
160 }
161 
162 /*
163  * Hook for threshold which will be called in btrfs_queue_work.
164  * This hook WILL be called in IRQ handler context,
165  * so workqueue_set_max_active MUST NOT be called in this hook
166  */
167 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
168 {
169 	if (wq->thresh == NO_THRESHOLD)
170 		return;
171 	atomic_inc(&wq->pending);
172 }
173 
174 /*
175  * Hook for threshold which will be called before executing the work,
176  * This hook is called in kthread content.
177  * So workqueue_set_max_active is called here.
178  */
179 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
180 {
181 	int new_max_active;
182 	long pending;
183 	int need_change = 0;
184 
185 	if (wq->thresh == NO_THRESHOLD)
186 		return;
187 
188 	atomic_dec(&wq->pending);
189 	spin_lock(&wq->thres_lock);
190 	/*
191 	 * Use wq->count to limit the calling frequency of
192 	 * workqueue_set_max_active.
193 	 */
194 	wq->count++;
195 	wq->count %= (wq->thresh / 4);
196 	if (!wq->count)
197 		goto  out;
198 	new_max_active = wq->current_max;
199 
200 	/*
201 	 * pending may be changed later, but it's OK since we really
202 	 * don't need it so accurate to calculate new_max_active.
203 	 */
204 	pending = atomic_read(&wq->pending);
205 	if (pending > wq->thresh)
206 		new_max_active++;
207 	if (pending < wq->thresh / 2)
208 		new_max_active--;
209 	new_max_active = clamp_val(new_max_active, 1, wq->max_active);
210 	if (new_max_active != wq->current_max)  {
211 		need_change = 1;
212 		wq->current_max = new_max_active;
213 	}
214 out:
215 	spin_unlock(&wq->thres_lock);
216 
217 	if (need_change) {
218 		workqueue_set_max_active(wq->normal_wq, wq->current_max);
219 	}
220 }
221 
222 static void run_ordered_work(struct __btrfs_workqueue *wq)
223 {
224 	struct list_head *list = &wq->ordered_list;
225 	struct btrfs_work *work;
226 	spinlock_t *lock = &wq->list_lock;
227 	unsigned long flags;
228 
229 	while (1) {
230 		spin_lock_irqsave(lock, flags);
231 		if (list_empty(list))
232 			break;
233 		work = list_entry(list->next, struct btrfs_work,
234 				  ordered_list);
235 		if (!test_bit(WORK_DONE_BIT, &work->flags))
236 			break;
237 
238 		/*
239 		 * we are going to call the ordered done function, but
240 		 * we leave the work item on the list as a barrier so
241 		 * that later work items that are done don't have their
242 		 * functions called before this one returns
243 		 */
244 		if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
245 			break;
246 		trace_btrfs_ordered_sched(work);
247 		spin_unlock_irqrestore(lock, flags);
248 		work->ordered_func(work);
249 
250 		/* now take the lock again and drop our item from the list */
251 		spin_lock_irqsave(lock, flags);
252 		list_del(&work->ordered_list);
253 		spin_unlock_irqrestore(lock, flags);
254 
255 		/*
256 		 * we don't want to call the ordered free functions
257 		 * with the lock held though
258 		 */
259 		work->ordered_free(work);
260 		trace_btrfs_all_work_done(work);
261 	}
262 	spin_unlock_irqrestore(lock, flags);
263 }
264 
265 static void normal_work_helper(struct btrfs_work *work)
266 {
267 	struct __btrfs_workqueue *wq;
268 	int need_order = 0;
269 
270 	/*
271 	 * We should not touch things inside work in the following cases:
272 	 * 1) after work->func() if it has no ordered_free
273 	 *    Since the struct is freed in work->func().
274 	 * 2) after setting WORK_DONE_BIT
275 	 *    The work may be freed in other threads almost instantly.
276 	 * So we save the needed things here.
277 	 */
278 	if (work->ordered_func)
279 		need_order = 1;
280 	wq = work->wq;
281 
282 	trace_btrfs_work_sched(work);
283 	thresh_exec_hook(wq);
284 	work->func(work);
285 	if (need_order) {
286 		set_bit(WORK_DONE_BIT, &work->flags);
287 		run_ordered_work(wq);
288 	}
289 	if (!need_order)
290 		trace_btrfs_all_work_done(work);
291 }
292 
293 void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
294 		     btrfs_func_t func,
295 		     btrfs_func_t ordered_func,
296 		     btrfs_func_t ordered_free)
297 {
298 	work->func = func;
299 	work->ordered_func = ordered_func;
300 	work->ordered_free = ordered_free;
301 	INIT_WORK(&work->normal_work, uniq_func);
302 	INIT_LIST_HEAD(&work->ordered_list);
303 	work->flags = 0;
304 }
305 
306 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
307 				      struct btrfs_work *work)
308 {
309 	unsigned long flags;
310 
311 	work->wq = wq;
312 	thresh_queue_hook(wq);
313 	if (work->ordered_func) {
314 		spin_lock_irqsave(&wq->list_lock, flags);
315 		list_add_tail(&work->ordered_list, &wq->ordered_list);
316 		spin_unlock_irqrestore(&wq->list_lock, flags);
317 	}
318 	queue_work(wq->normal_wq, &work->normal_work);
319 	trace_btrfs_work_queued(work);
320 }
321 
322 void btrfs_queue_work(struct btrfs_workqueue *wq,
323 		      struct btrfs_work *work)
324 {
325 	struct __btrfs_workqueue *dest_wq;
326 
327 	if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
328 		dest_wq = wq->high;
329 	else
330 		dest_wq = wq->normal;
331 	__btrfs_queue_work(dest_wq, work);
332 }
333 
334 static inline void
335 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
336 {
337 	destroy_workqueue(wq->normal_wq);
338 	trace_btrfs_workqueue_destroy(wq);
339 	kfree(wq);
340 }
341 
342 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
343 {
344 	if (!wq)
345 		return;
346 	if (wq->high)
347 		__btrfs_destroy_workqueue(wq->high);
348 	__btrfs_destroy_workqueue(wq->normal);
349 	kfree(wq);
350 }
351 
352 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
353 {
354 	if (!wq)
355 		return;
356 	wq->normal->max_active = max;
357 	if (wq->high)
358 		wq->high->max_active = max;
359 }
360 
361 void btrfs_set_work_high_priority(struct btrfs_work *work)
362 {
363 	set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
364 }
365