xref: /openbmc/linux/mm/backing-dev.c (revision cfd6ed45)
1 
2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
6 #include <linux/fs.h>
7 #include <linux/pagemap.h>
8 #include <linux/mm.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
13 #include <trace/events/writeback.h>
14 
15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16 
17 struct backing_dev_info noop_backing_dev_info = {
18 	.name		= "noop",
19 	.capabilities	= BDI_CAP_NO_ACCT_AND_WRITEBACK,
20 };
21 EXPORT_SYMBOL_GPL(noop_backing_dev_info);
22 
23 static struct class *bdi_class;
24 
25 /*
26  * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side
27  * locking.
28  */
29 DEFINE_SPINLOCK(bdi_lock);
30 LIST_HEAD(bdi_list);
31 
32 /* bdi_wq serves all asynchronous writeback tasks */
33 struct workqueue_struct *bdi_wq;
34 
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 
39 static struct dentry *bdi_debug_root;
40 
41 static void bdi_debug_init(void)
42 {
43 	bdi_debug_root = debugfs_create_dir("bdi", NULL);
44 }
45 
46 static int bdi_debug_stats_show(struct seq_file *m, void *v)
47 {
48 	struct backing_dev_info *bdi = m->private;
49 	struct bdi_writeback *wb = &bdi->wb;
50 	unsigned long background_thresh;
51 	unsigned long dirty_thresh;
52 	unsigned long wb_thresh;
53 	unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time;
54 	struct inode *inode;
55 
56 	nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0;
57 	spin_lock(&wb->list_lock);
58 	list_for_each_entry(inode, &wb->b_dirty, i_io_list)
59 		nr_dirty++;
60 	list_for_each_entry(inode, &wb->b_io, i_io_list)
61 		nr_io++;
62 	list_for_each_entry(inode, &wb->b_more_io, i_io_list)
63 		nr_more_io++;
64 	list_for_each_entry(inode, &wb->b_dirty_time, i_io_list)
65 		if (inode->i_state & I_DIRTY_TIME)
66 			nr_dirty_time++;
67 	spin_unlock(&wb->list_lock);
68 
69 	global_dirty_limits(&background_thresh, &dirty_thresh);
70 	wb_thresh = wb_calc_thresh(wb, dirty_thresh);
71 
72 #define K(x) ((x) << (PAGE_SHIFT - 10))
73 	seq_printf(m,
74 		   "BdiWriteback:       %10lu kB\n"
75 		   "BdiReclaimable:     %10lu kB\n"
76 		   "BdiDirtyThresh:     %10lu kB\n"
77 		   "DirtyThresh:        %10lu kB\n"
78 		   "BackgroundThresh:   %10lu kB\n"
79 		   "BdiDirtied:         %10lu kB\n"
80 		   "BdiWritten:         %10lu kB\n"
81 		   "BdiWriteBandwidth:  %10lu kBps\n"
82 		   "b_dirty:            %10lu\n"
83 		   "b_io:               %10lu\n"
84 		   "b_more_io:          %10lu\n"
85 		   "b_dirty_time:       %10lu\n"
86 		   "bdi_list:           %10u\n"
87 		   "state:              %10lx\n",
88 		   (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
89 		   (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
90 		   K(wb_thresh),
91 		   K(dirty_thresh),
92 		   K(background_thresh),
93 		   (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
94 		   (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
95 		   (unsigned long) K(wb->write_bandwidth),
96 		   nr_dirty,
97 		   nr_io,
98 		   nr_more_io,
99 		   nr_dirty_time,
100 		   !list_empty(&bdi->bdi_list), bdi->wb.state);
101 #undef K
102 
103 	return 0;
104 }
105 
106 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
107 {
108 	return single_open(file, bdi_debug_stats_show, inode->i_private);
109 }
110 
111 static const struct file_operations bdi_debug_stats_fops = {
112 	.open		= bdi_debug_stats_open,
113 	.read		= seq_read,
114 	.llseek		= seq_lseek,
115 	.release	= single_release,
116 };
117 
118 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
119 {
120 	bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
121 	bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
122 					       bdi, &bdi_debug_stats_fops);
123 }
124 
125 static void bdi_debug_unregister(struct backing_dev_info *bdi)
126 {
127 	debugfs_remove(bdi->debug_stats);
128 	debugfs_remove(bdi->debug_dir);
129 }
130 #else
131 static inline void bdi_debug_init(void)
132 {
133 }
134 static inline void bdi_debug_register(struct backing_dev_info *bdi,
135 				      const char *name)
136 {
137 }
138 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
139 {
140 }
141 #endif
142 
143 static ssize_t read_ahead_kb_store(struct device *dev,
144 				  struct device_attribute *attr,
145 				  const char *buf, size_t count)
146 {
147 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
148 	unsigned long read_ahead_kb;
149 	ssize_t ret;
150 
151 	ret = kstrtoul(buf, 10, &read_ahead_kb);
152 	if (ret < 0)
153 		return ret;
154 
155 	bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
156 
157 	return count;
158 }
159 
160 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
161 
162 #define BDI_SHOW(name, expr)						\
163 static ssize_t name##_show(struct device *dev,				\
164 			   struct device_attribute *attr, char *page)	\
165 {									\
166 	struct backing_dev_info *bdi = dev_get_drvdata(dev);		\
167 									\
168 	return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);	\
169 }									\
170 static DEVICE_ATTR_RW(name);
171 
172 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
173 
174 static ssize_t min_ratio_store(struct device *dev,
175 		struct device_attribute *attr, const char *buf, size_t count)
176 {
177 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
178 	unsigned int ratio;
179 	ssize_t ret;
180 
181 	ret = kstrtouint(buf, 10, &ratio);
182 	if (ret < 0)
183 		return ret;
184 
185 	ret = bdi_set_min_ratio(bdi, ratio);
186 	if (!ret)
187 		ret = count;
188 
189 	return ret;
190 }
191 BDI_SHOW(min_ratio, bdi->min_ratio)
192 
193 static ssize_t max_ratio_store(struct device *dev,
194 		struct device_attribute *attr, const char *buf, size_t count)
195 {
196 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
197 	unsigned int ratio;
198 	ssize_t ret;
199 
200 	ret = kstrtouint(buf, 10, &ratio);
201 	if (ret < 0)
202 		return ret;
203 
204 	ret = bdi_set_max_ratio(bdi, ratio);
205 	if (!ret)
206 		ret = count;
207 
208 	return ret;
209 }
210 BDI_SHOW(max_ratio, bdi->max_ratio)
211 
212 static ssize_t stable_pages_required_show(struct device *dev,
213 					  struct device_attribute *attr,
214 					  char *page)
215 {
216 	struct backing_dev_info *bdi = dev_get_drvdata(dev);
217 
218 	return snprintf(page, PAGE_SIZE-1, "%d\n",
219 			bdi_cap_stable_pages_required(bdi) ? 1 : 0);
220 }
221 static DEVICE_ATTR_RO(stable_pages_required);
222 
223 static struct attribute *bdi_dev_attrs[] = {
224 	&dev_attr_read_ahead_kb.attr,
225 	&dev_attr_min_ratio.attr,
226 	&dev_attr_max_ratio.attr,
227 	&dev_attr_stable_pages_required.attr,
228 	NULL,
229 };
230 ATTRIBUTE_GROUPS(bdi_dev);
231 
232 static __init int bdi_class_init(void)
233 {
234 	bdi_class = class_create(THIS_MODULE, "bdi");
235 	if (IS_ERR(bdi_class))
236 		return PTR_ERR(bdi_class);
237 
238 	bdi_class->dev_groups = bdi_dev_groups;
239 	bdi_debug_init();
240 
241 	return 0;
242 }
243 postcore_initcall(bdi_class_init);
244 
245 static int __init default_bdi_init(void)
246 {
247 	int err;
248 
249 	bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
250 					      WQ_UNBOUND | WQ_SYSFS, 0);
251 	if (!bdi_wq)
252 		return -ENOMEM;
253 
254 	err = bdi_init(&noop_backing_dev_info);
255 
256 	return err;
257 }
258 subsys_initcall(default_bdi_init);
259 
260 /*
261  * This function is used when the first inode for this wb is marked dirty. It
262  * wakes-up the corresponding bdi thread which should then take care of the
263  * periodic background write-out of dirty inodes. Since the write-out would
264  * starts only 'dirty_writeback_interval' centisecs from now anyway, we just
265  * set up a timer which wakes the bdi thread up later.
266  *
267  * Note, we wouldn't bother setting up the timer, but this function is on the
268  * fast-path (used by '__mark_inode_dirty()'), so we save few context switches
269  * by delaying the wake-up.
270  *
271  * We have to be careful not to postpone flush work if it is scheduled for
272  * earlier. Thus we use queue_delayed_work().
273  */
274 void wb_wakeup_delayed(struct bdi_writeback *wb)
275 {
276 	unsigned long timeout;
277 
278 	timeout = msecs_to_jiffies(dirty_writeback_interval * 10);
279 	spin_lock_bh(&wb->work_lock);
280 	if (test_bit(WB_registered, &wb->state))
281 		queue_delayed_work(bdi_wq, &wb->dwork, timeout);
282 	spin_unlock_bh(&wb->work_lock);
283 }
284 
285 /*
286  * Initial write bandwidth: 100 MB/s
287  */
288 #define INIT_BW		(100 << (20 - PAGE_SHIFT))
289 
290 static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
291 		   int blkcg_id, gfp_t gfp)
292 {
293 	int i, err;
294 
295 	memset(wb, 0, sizeof(*wb));
296 
297 	wb->bdi = bdi;
298 	wb->last_old_flush = jiffies;
299 	INIT_LIST_HEAD(&wb->b_dirty);
300 	INIT_LIST_HEAD(&wb->b_io);
301 	INIT_LIST_HEAD(&wb->b_more_io);
302 	INIT_LIST_HEAD(&wb->b_dirty_time);
303 	spin_lock_init(&wb->list_lock);
304 
305 	wb->bw_time_stamp = jiffies;
306 	wb->balanced_dirty_ratelimit = INIT_BW;
307 	wb->dirty_ratelimit = INIT_BW;
308 	wb->write_bandwidth = INIT_BW;
309 	wb->avg_write_bandwidth = INIT_BW;
310 
311 	spin_lock_init(&wb->work_lock);
312 	INIT_LIST_HEAD(&wb->work_list);
313 	INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
314 	wb->dirty_sleep = jiffies;
315 
316 	wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
317 	if (!wb->congested)
318 		return -ENOMEM;
319 
320 	err = fprop_local_init_percpu(&wb->completions, gfp);
321 	if (err)
322 		goto out_put_cong;
323 
324 	for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
325 		err = percpu_counter_init(&wb->stat[i], 0, gfp);
326 		if (err)
327 			goto out_destroy_stat;
328 	}
329 
330 	return 0;
331 
332 out_destroy_stat:
333 	while (i--)
334 		percpu_counter_destroy(&wb->stat[i]);
335 	fprop_local_destroy_percpu(&wb->completions);
336 out_put_cong:
337 	wb_congested_put(wb->congested);
338 	return err;
339 }
340 
341 /*
342  * Remove bdi from the global list and shutdown any threads we have running
343  */
344 static void wb_shutdown(struct bdi_writeback *wb)
345 {
346 	/* Make sure nobody queues further work */
347 	spin_lock_bh(&wb->work_lock);
348 	if (!test_and_clear_bit(WB_registered, &wb->state)) {
349 		spin_unlock_bh(&wb->work_lock);
350 		return;
351 	}
352 	spin_unlock_bh(&wb->work_lock);
353 
354 	/*
355 	 * Drain work list and shutdown the delayed_work.  !WB_registered
356 	 * tells wb_workfn() that @wb is dying and its work_list needs to
357 	 * be drained no matter what.
358 	 */
359 	mod_delayed_work(bdi_wq, &wb->dwork, 0);
360 	flush_delayed_work(&wb->dwork);
361 	WARN_ON(!list_empty(&wb->work_list));
362 }
363 
364 static void wb_exit(struct bdi_writeback *wb)
365 {
366 	int i;
367 
368 	WARN_ON(delayed_work_pending(&wb->dwork));
369 
370 	for (i = 0; i < NR_WB_STAT_ITEMS; i++)
371 		percpu_counter_destroy(&wb->stat[i]);
372 
373 	fprop_local_destroy_percpu(&wb->completions);
374 	wb_congested_put(wb->congested);
375 }
376 
377 #ifdef CONFIG_CGROUP_WRITEBACK
378 
379 #include <linux/memcontrol.h>
380 
381 /*
382  * cgwb_lock protects bdi->cgwb_tree, bdi->cgwb_congested_tree,
383  * blkcg->cgwb_list, and memcg->cgwb_list.  bdi->cgwb_tree is also RCU
384  * protected.  cgwb_release_wait is used to wait for the completion of cgwb
385  * releases from bdi destruction path.
386  */
387 static DEFINE_SPINLOCK(cgwb_lock);
388 static DECLARE_WAIT_QUEUE_HEAD(cgwb_release_wait);
389 
390 /**
391  * wb_congested_get_create - get or create a wb_congested
392  * @bdi: associated bdi
393  * @blkcg_id: ID of the associated blkcg
394  * @gfp: allocation mask
395  *
396  * Look up the wb_congested for @blkcg_id on @bdi.  If missing, create one.
397  * The returned wb_congested has its reference count incremented.  Returns
398  * NULL on failure.
399  */
400 struct bdi_writeback_congested *
401 wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
402 {
403 	struct bdi_writeback_congested *new_congested = NULL, *congested;
404 	struct rb_node **node, *parent;
405 	unsigned long flags;
406 retry:
407 	spin_lock_irqsave(&cgwb_lock, flags);
408 
409 	node = &bdi->cgwb_congested_tree.rb_node;
410 	parent = NULL;
411 
412 	while (*node != NULL) {
413 		parent = *node;
414 		congested = rb_entry(parent, struct bdi_writeback_congested,
415 				     rb_node);
416 		if (congested->blkcg_id < blkcg_id)
417 			node = &parent->rb_left;
418 		else if (congested->blkcg_id > blkcg_id)
419 			node = &parent->rb_right;
420 		else
421 			goto found;
422 	}
423 
424 	if (new_congested) {
425 		/* !found and storage for new one already allocated, insert */
426 		congested = new_congested;
427 		new_congested = NULL;
428 		rb_link_node(&congested->rb_node, parent, node);
429 		rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
430 		goto found;
431 	}
432 
433 	spin_unlock_irqrestore(&cgwb_lock, flags);
434 
435 	/* allocate storage for new one and retry */
436 	new_congested = kzalloc(sizeof(*new_congested), gfp);
437 	if (!new_congested)
438 		return NULL;
439 
440 	atomic_set(&new_congested->refcnt, 0);
441 	new_congested->bdi = bdi;
442 	new_congested->blkcg_id = blkcg_id;
443 	goto retry;
444 
445 found:
446 	atomic_inc(&congested->refcnt);
447 	spin_unlock_irqrestore(&cgwb_lock, flags);
448 	kfree(new_congested);
449 	return congested;
450 }
451 
452 /**
453  * wb_congested_put - put a wb_congested
454  * @congested: wb_congested to put
455  *
456  * Put @congested and destroy it if the refcnt reaches zero.
457  */
458 void wb_congested_put(struct bdi_writeback_congested *congested)
459 {
460 	unsigned long flags;
461 
462 	local_irq_save(flags);
463 	if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
464 		local_irq_restore(flags);
465 		return;
466 	}
467 
468 	/* bdi might already have been destroyed leaving @congested unlinked */
469 	if (congested->bdi) {
470 		rb_erase(&congested->rb_node,
471 			 &congested->bdi->cgwb_congested_tree);
472 		congested->bdi = NULL;
473 	}
474 
475 	spin_unlock_irqrestore(&cgwb_lock, flags);
476 	kfree(congested);
477 }
478 
479 static void cgwb_release_workfn(struct work_struct *work)
480 {
481 	struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
482 						release_work);
483 	struct backing_dev_info *bdi = wb->bdi;
484 
485 	spin_lock_irq(&cgwb_lock);
486 	list_del_rcu(&wb->bdi_node);
487 	spin_unlock_irq(&cgwb_lock);
488 
489 	wb_shutdown(wb);
490 
491 	css_put(wb->memcg_css);
492 	css_put(wb->blkcg_css);
493 
494 	fprop_local_destroy_percpu(&wb->memcg_completions);
495 	percpu_ref_exit(&wb->refcnt);
496 	wb_exit(wb);
497 	kfree_rcu(wb, rcu);
498 
499 	if (atomic_dec_and_test(&bdi->usage_cnt))
500 		wake_up_all(&cgwb_release_wait);
501 }
502 
503 static void cgwb_release(struct percpu_ref *refcnt)
504 {
505 	struct bdi_writeback *wb = container_of(refcnt, struct bdi_writeback,
506 						refcnt);
507 	schedule_work(&wb->release_work);
508 }
509 
510 static void cgwb_kill(struct bdi_writeback *wb)
511 {
512 	lockdep_assert_held(&cgwb_lock);
513 
514 	WARN_ON(!radix_tree_delete(&wb->bdi->cgwb_tree, wb->memcg_css->id));
515 	list_del(&wb->memcg_node);
516 	list_del(&wb->blkcg_node);
517 	percpu_ref_kill(&wb->refcnt);
518 }
519 
520 static int cgwb_create(struct backing_dev_info *bdi,
521 		       struct cgroup_subsys_state *memcg_css, gfp_t gfp)
522 {
523 	struct mem_cgroup *memcg;
524 	struct cgroup_subsys_state *blkcg_css;
525 	struct blkcg *blkcg;
526 	struct list_head *memcg_cgwb_list, *blkcg_cgwb_list;
527 	struct bdi_writeback *wb;
528 	unsigned long flags;
529 	int ret = 0;
530 
531 	memcg = mem_cgroup_from_css(memcg_css);
532 	blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys);
533 	blkcg = css_to_blkcg(blkcg_css);
534 	memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
535 	blkcg_cgwb_list = &blkcg->cgwb_list;
536 
537 	/* look up again under lock and discard on blkcg mismatch */
538 	spin_lock_irqsave(&cgwb_lock, flags);
539 	wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
540 	if (wb && wb->blkcg_css != blkcg_css) {
541 		cgwb_kill(wb);
542 		wb = NULL;
543 	}
544 	spin_unlock_irqrestore(&cgwb_lock, flags);
545 	if (wb)
546 		goto out_put;
547 
548 	/* need to create a new one */
549 	wb = kmalloc(sizeof(*wb), gfp);
550 	if (!wb)
551 		return -ENOMEM;
552 
553 	ret = wb_init(wb, bdi, blkcg_css->id, gfp);
554 	if (ret)
555 		goto err_free;
556 
557 	ret = percpu_ref_init(&wb->refcnt, cgwb_release, 0, gfp);
558 	if (ret)
559 		goto err_wb_exit;
560 
561 	ret = fprop_local_init_percpu(&wb->memcg_completions, gfp);
562 	if (ret)
563 		goto err_ref_exit;
564 
565 	wb->memcg_css = memcg_css;
566 	wb->blkcg_css = blkcg_css;
567 	INIT_WORK(&wb->release_work, cgwb_release_workfn);
568 	set_bit(WB_registered, &wb->state);
569 
570 	/*
571 	 * The root wb determines the registered state of the whole bdi and
572 	 * memcg_cgwb_list and blkcg_cgwb_list's next pointers indicate
573 	 * whether they're still online.  Don't link @wb if any is dead.
574 	 * See wb_memcg_offline() and wb_blkcg_offline().
575 	 */
576 	ret = -ENODEV;
577 	spin_lock_irqsave(&cgwb_lock, flags);
578 	if (test_bit(WB_registered, &bdi->wb.state) &&
579 	    blkcg_cgwb_list->next && memcg_cgwb_list->next) {
580 		/* we might have raced another instance of this function */
581 		ret = radix_tree_insert(&bdi->cgwb_tree, memcg_css->id, wb);
582 		if (!ret) {
583 			atomic_inc(&bdi->usage_cnt);
584 			list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
585 			list_add(&wb->memcg_node, memcg_cgwb_list);
586 			list_add(&wb->blkcg_node, blkcg_cgwb_list);
587 			css_get(memcg_css);
588 			css_get(blkcg_css);
589 		}
590 	}
591 	spin_unlock_irqrestore(&cgwb_lock, flags);
592 	if (ret) {
593 		if (ret == -EEXIST)
594 			ret = 0;
595 		goto err_fprop_exit;
596 	}
597 	goto out_put;
598 
599 err_fprop_exit:
600 	fprop_local_destroy_percpu(&wb->memcg_completions);
601 err_ref_exit:
602 	percpu_ref_exit(&wb->refcnt);
603 err_wb_exit:
604 	wb_exit(wb);
605 err_free:
606 	kfree(wb);
607 out_put:
608 	css_put(blkcg_css);
609 	return ret;
610 }
611 
612 /**
613  * wb_get_create - get wb for a given memcg, create if necessary
614  * @bdi: target bdi
615  * @memcg_css: cgroup_subsys_state of the target memcg (must have positive ref)
616  * @gfp: allocation mask to use
617  *
618  * Try to get the wb for @memcg_css on @bdi.  If it doesn't exist, try to
619  * create one.  The returned wb has its refcount incremented.
620  *
621  * This function uses css_get() on @memcg_css and thus expects its refcnt
622  * to be positive on invocation.  IOW, rcu_read_lock() protection on
623  * @memcg_css isn't enough.  try_get it before calling this function.
624  *
625  * A wb is keyed by its associated memcg.  As blkcg implicitly enables
626  * memcg on the default hierarchy, memcg association is guaranteed to be
627  * more specific (equal or descendant to the associated blkcg) and thus can
628  * identify both the memcg and blkcg associations.
629  *
630  * Because the blkcg associated with a memcg may change as blkcg is enabled
631  * and disabled closer to root in the hierarchy, each wb keeps track of
632  * both the memcg and blkcg associated with it and verifies the blkcg on
633  * each lookup.  On mismatch, the existing wb is discarded and a new one is
634  * created.
635  */
636 struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
637 				    struct cgroup_subsys_state *memcg_css,
638 				    gfp_t gfp)
639 {
640 	struct bdi_writeback *wb;
641 
642 	might_sleep_if(gfpflags_allow_blocking(gfp));
643 
644 	if (!memcg_css->parent)
645 		return &bdi->wb;
646 
647 	do {
648 		rcu_read_lock();
649 		wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
650 		if (wb) {
651 			struct cgroup_subsys_state *blkcg_css;
652 
653 			/* see whether the blkcg association has changed */
654 			blkcg_css = cgroup_get_e_css(memcg_css->cgroup,
655 						     &io_cgrp_subsys);
656 			if (unlikely(wb->blkcg_css != blkcg_css ||
657 				     !wb_tryget(wb)))
658 				wb = NULL;
659 			css_put(blkcg_css);
660 		}
661 		rcu_read_unlock();
662 	} while (!wb && !cgwb_create(bdi, memcg_css, gfp));
663 
664 	return wb;
665 }
666 
667 static int cgwb_bdi_init(struct backing_dev_info *bdi)
668 {
669 	int ret;
670 
671 	INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
672 	bdi->cgwb_congested_tree = RB_ROOT;
673 	atomic_set(&bdi->usage_cnt, 1);
674 
675 	ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
676 	if (!ret) {
677 		bdi->wb.memcg_css = &root_mem_cgroup->css;
678 		bdi->wb.blkcg_css = blkcg_root_css;
679 	}
680 	return ret;
681 }
682 
683 static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
684 {
685 	struct radix_tree_iter iter;
686 	void **slot;
687 
688 	WARN_ON(test_bit(WB_registered, &bdi->wb.state));
689 
690 	spin_lock_irq(&cgwb_lock);
691 	radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
692 		cgwb_kill(*slot);
693 	spin_unlock_irq(&cgwb_lock);
694 
695 	/*
696 	 * All cgwb's must be shutdown and released before returning.  Drain
697 	 * the usage counter to wait for all cgwb's ever created on @bdi.
698 	 */
699 	atomic_dec(&bdi->usage_cnt);
700 	wait_event(cgwb_release_wait, !atomic_read(&bdi->usage_cnt));
701 	/*
702 	 * Grab back our reference so that we hold it when @bdi gets
703 	 * re-registered.
704 	 */
705 	atomic_inc(&bdi->usage_cnt);
706 }
707 
708 /**
709  * wb_memcg_offline - kill all wb's associated with a memcg being offlined
710  * @memcg: memcg being offlined
711  *
712  * Also prevents creation of any new wb's associated with @memcg.
713  */
714 void wb_memcg_offline(struct mem_cgroup *memcg)
715 {
716 	LIST_HEAD(to_destroy);
717 	struct list_head *memcg_cgwb_list = mem_cgroup_cgwb_list(memcg);
718 	struct bdi_writeback *wb, *next;
719 
720 	spin_lock_irq(&cgwb_lock);
721 	list_for_each_entry_safe(wb, next, memcg_cgwb_list, memcg_node)
722 		cgwb_kill(wb);
723 	memcg_cgwb_list->next = NULL;	/* prevent new wb's */
724 	spin_unlock_irq(&cgwb_lock);
725 }
726 
727 /**
728  * wb_blkcg_offline - kill all wb's associated with a blkcg being offlined
729  * @blkcg: blkcg being offlined
730  *
731  * Also prevents creation of any new wb's associated with @blkcg.
732  */
733 void wb_blkcg_offline(struct blkcg *blkcg)
734 {
735 	LIST_HEAD(to_destroy);
736 	struct bdi_writeback *wb, *next;
737 
738 	spin_lock_irq(&cgwb_lock);
739 	list_for_each_entry_safe(wb, next, &blkcg->cgwb_list, blkcg_node)
740 		cgwb_kill(wb);
741 	blkcg->cgwb_list.next = NULL;	/* prevent new wb's */
742 	spin_unlock_irq(&cgwb_lock);
743 }
744 
745 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
746 {
747 	struct rb_node *rbn;
748 
749 	spin_lock_irq(&cgwb_lock);
750 	while ((rbn = rb_first(&bdi->cgwb_congested_tree))) {
751 		struct bdi_writeback_congested *congested =
752 			rb_entry(rbn, struct bdi_writeback_congested, rb_node);
753 
754 		rb_erase(rbn, &bdi->cgwb_congested_tree);
755 		congested->bdi = NULL;	/* mark @congested unlinked */
756 	}
757 	spin_unlock_irq(&cgwb_lock);
758 }
759 
760 #else	/* CONFIG_CGROUP_WRITEBACK */
761 
762 static int cgwb_bdi_init(struct backing_dev_info *bdi)
763 {
764 	int err;
765 
766 	bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
767 	if (!bdi->wb_congested)
768 		return -ENOMEM;
769 
770 	atomic_set(&bdi->wb_congested->refcnt, 1);
771 
772 	err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
773 	if (err) {
774 		wb_congested_put(bdi->wb_congested);
775 		return err;
776 	}
777 	return 0;
778 }
779 
780 static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
781 
782 static void cgwb_bdi_exit(struct backing_dev_info *bdi)
783 {
784 	wb_congested_put(bdi->wb_congested);
785 }
786 
787 #endif	/* CONFIG_CGROUP_WRITEBACK */
788 
789 int bdi_init(struct backing_dev_info *bdi)
790 {
791 	int ret;
792 
793 	bdi->dev = NULL;
794 
795 	kref_init(&bdi->refcnt);
796 	bdi->min_ratio = 0;
797 	bdi->max_ratio = 100;
798 	bdi->max_prop_frac = FPROP_FRAC_BASE;
799 	INIT_LIST_HEAD(&bdi->bdi_list);
800 	INIT_LIST_HEAD(&bdi->wb_list);
801 	init_waitqueue_head(&bdi->wb_waitq);
802 
803 	ret = cgwb_bdi_init(bdi);
804 
805 	list_add_tail_rcu(&bdi->wb.bdi_node, &bdi->wb_list);
806 
807 	return ret;
808 }
809 EXPORT_SYMBOL(bdi_init);
810 
811 struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id)
812 {
813 	struct backing_dev_info *bdi;
814 
815 	bdi = kmalloc_node(sizeof(struct backing_dev_info),
816 			   gfp_mask | __GFP_ZERO, node_id);
817 	if (!bdi)
818 		return NULL;
819 
820 	if (bdi_init(bdi)) {
821 		kfree(bdi);
822 		return NULL;
823 	}
824 	return bdi;
825 }
826 
827 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
828 		const char *fmt, ...)
829 {
830 	va_list args;
831 	struct device *dev;
832 
833 	if (bdi->dev)	/* The driver needs to use separate queues per device */
834 		return 0;
835 
836 	va_start(args, fmt);
837 	dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
838 	va_end(args);
839 	if (IS_ERR(dev))
840 		return PTR_ERR(dev);
841 
842 	bdi->dev = dev;
843 
844 	bdi_debug_register(bdi, dev_name(dev));
845 	set_bit(WB_registered, &bdi->wb.state);
846 
847 	spin_lock_bh(&bdi_lock);
848 	list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
849 	spin_unlock_bh(&bdi_lock);
850 
851 	trace_writeback_bdi_register(bdi);
852 	return 0;
853 }
854 EXPORT_SYMBOL(bdi_register);
855 
856 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
857 {
858 	return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
859 }
860 EXPORT_SYMBOL(bdi_register_dev);
861 
862 int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner)
863 {
864 	int rc;
865 
866 	rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt),
867 			MINOR(owner->devt));
868 	if (rc)
869 		return rc;
870 	/* Leaking owner reference... */
871 	WARN_ON(bdi->owner);
872 	bdi->owner = owner;
873 	get_device(owner);
874 	return 0;
875 }
876 EXPORT_SYMBOL(bdi_register_owner);
877 
878 /*
879  * Remove bdi from bdi_list, and ensure that it is no longer visible
880  */
881 static void bdi_remove_from_list(struct backing_dev_info *bdi)
882 {
883 	spin_lock_bh(&bdi_lock);
884 	list_del_rcu(&bdi->bdi_list);
885 	spin_unlock_bh(&bdi_lock);
886 
887 	synchronize_rcu_expedited();
888 }
889 
890 void bdi_unregister(struct backing_dev_info *bdi)
891 {
892 	/* make sure nobody finds us on the bdi_list anymore */
893 	bdi_remove_from_list(bdi);
894 	wb_shutdown(&bdi->wb);
895 	cgwb_bdi_destroy(bdi);
896 
897 	if (bdi->dev) {
898 		bdi_debug_unregister(bdi);
899 		device_unregister(bdi->dev);
900 		bdi->dev = NULL;
901 	}
902 
903 	if (bdi->owner) {
904 		put_device(bdi->owner);
905 		bdi->owner = NULL;
906 	}
907 }
908 
909 static void bdi_exit(struct backing_dev_info *bdi)
910 {
911 	WARN_ON_ONCE(bdi->dev);
912 	wb_exit(&bdi->wb);
913 	cgwb_bdi_exit(bdi);
914 }
915 
916 static void release_bdi(struct kref *ref)
917 {
918 	struct backing_dev_info *bdi =
919 			container_of(ref, struct backing_dev_info, refcnt);
920 
921 	bdi_exit(bdi);
922 	kfree(bdi);
923 }
924 
925 void bdi_put(struct backing_dev_info *bdi)
926 {
927 	kref_put(&bdi->refcnt, release_bdi);
928 }
929 
930 void bdi_destroy(struct backing_dev_info *bdi)
931 {
932 	bdi_unregister(bdi);
933 	bdi_exit(bdi);
934 }
935 EXPORT_SYMBOL(bdi_destroy);
936 
937 /*
938  * For use from filesystems to quickly init and register a bdi associated
939  * with dirty writeback
940  */
941 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name)
942 {
943 	int err;
944 
945 	bdi->name = name;
946 	bdi->capabilities = 0;
947 	err = bdi_init(bdi);
948 	if (err)
949 		return err;
950 
951 	err = bdi_register(bdi, NULL, "%.28s-%ld", name,
952 			   atomic_long_inc_return(&bdi_seq));
953 	if (err) {
954 		bdi_destroy(bdi);
955 		return err;
956 	}
957 
958 	return 0;
959 }
960 EXPORT_SYMBOL(bdi_setup_and_register);
961 
962 static wait_queue_head_t congestion_wqh[2] = {
963 		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
964 		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
965 	};
966 static atomic_t nr_wb_congested[2];
967 
968 void clear_wb_congested(struct bdi_writeback_congested *congested, int sync)
969 {
970 	wait_queue_head_t *wqh = &congestion_wqh[sync];
971 	enum wb_congested_state bit;
972 
973 	bit = sync ? WB_sync_congested : WB_async_congested;
974 	if (test_and_clear_bit(bit, &congested->state))
975 		atomic_dec(&nr_wb_congested[sync]);
976 	smp_mb__after_atomic();
977 	if (waitqueue_active(wqh))
978 		wake_up(wqh);
979 }
980 EXPORT_SYMBOL(clear_wb_congested);
981 
982 void set_wb_congested(struct bdi_writeback_congested *congested, int sync)
983 {
984 	enum wb_congested_state bit;
985 
986 	bit = sync ? WB_sync_congested : WB_async_congested;
987 	if (!test_and_set_bit(bit, &congested->state))
988 		atomic_inc(&nr_wb_congested[sync]);
989 }
990 EXPORT_SYMBOL(set_wb_congested);
991 
992 /**
993  * congestion_wait - wait for a backing_dev to become uncongested
994  * @sync: SYNC or ASYNC IO
995  * @timeout: timeout in jiffies
996  *
997  * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
998  * write congestion.  If no backing_devs are congested then just wait for the
999  * next write to be completed.
1000  */
1001 long congestion_wait(int sync, long timeout)
1002 {
1003 	long ret;
1004 	unsigned long start = jiffies;
1005 	DEFINE_WAIT(wait);
1006 	wait_queue_head_t *wqh = &congestion_wqh[sync];
1007 
1008 	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1009 	ret = io_schedule_timeout(timeout);
1010 	finish_wait(wqh, &wait);
1011 
1012 	trace_writeback_congestion_wait(jiffies_to_usecs(timeout),
1013 					jiffies_to_usecs(jiffies - start));
1014 
1015 	return ret;
1016 }
1017 EXPORT_SYMBOL(congestion_wait);
1018 
1019 /**
1020  * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a pgdat to complete writes
1021  * @pgdat: A pgdat to check if it is heavily congested
1022  * @sync: SYNC or ASYNC IO
1023  * @timeout: timeout in jiffies
1024  *
1025  * In the event of a congested backing_dev (any backing_dev) and the given
1026  * @pgdat has experienced recent congestion, this waits for up to @timeout
1027  * jiffies for either a BDI to exit congestion of the given @sync queue
1028  * or a write to complete.
1029  *
1030  * In the absence of pgdat congestion, cond_resched() is called to yield
1031  * the processor if necessary but otherwise does not sleep.
1032  *
1033  * The return value is 0 if the sleep is for the full timeout. Otherwise,
1034  * it is the number of jiffies that were still remaining when the function
1035  * returned. return_value == timeout implies the function did not sleep.
1036  */
1037 long wait_iff_congested(struct pglist_data *pgdat, int sync, long timeout)
1038 {
1039 	long ret;
1040 	unsigned long start = jiffies;
1041 	DEFINE_WAIT(wait);
1042 	wait_queue_head_t *wqh = &congestion_wqh[sync];
1043 
1044 	/*
1045 	 * If there is no congestion, or heavy congestion is not being
1046 	 * encountered in the current pgdat, yield if necessary instead
1047 	 * of sleeping on the congestion queue
1048 	 */
1049 	if (atomic_read(&nr_wb_congested[sync]) == 0 ||
1050 	    !test_bit(PGDAT_CONGESTED, &pgdat->flags)) {
1051 		cond_resched();
1052 
1053 		/* In case we scheduled, work out time remaining */
1054 		ret = timeout - (jiffies - start);
1055 		if (ret < 0)
1056 			ret = 0;
1057 
1058 		goto out;
1059 	}
1060 
1061 	/* Sleep until uncongested or a write happens */
1062 	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1063 	ret = io_schedule_timeout(timeout);
1064 	finish_wait(wqh, &wait);
1065 
1066 out:
1067 	trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout),
1068 					jiffies_to_usecs(jiffies - start));
1069 
1070 	return ret;
1071 }
1072 EXPORT_SYMBOL(wait_iff_congested);
1073 
1074 int pdflush_proc_obsolete(struct ctl_table *table, int write,
1075 			void __user *buffer, size_t *lenp, loff_t *ppos)
1076 {
1077 	char kbuf[] = "0\n";
1078 
1079 	if (*ppos || *lenp < sizeof(kbuf)) {
1080 		*lenp = 0;
1081 		return 0;
1082 	}
1083 
1084 	if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
1085 		return -EFAULT;
1086 	pr_warn_once("%s exported in /proc is scheduled for removal\n",
1087 		     table->procname);
1088 
1089 	*lenp = 2;
1090 	*ppos += *lenp;
1091 	return 2;
1092 }
1093