backing-dev.c (74d83b7eedab14e4b963a2220ff76f98fa6d4cb8) backing-dev.c (d46db3d58233be4be980eb1e42eebe7808bcabab)
1
2#include <linux/wait.h>
3#include <linux/backing-dev.h>
4#include <linux/kthread.h>
5#include <linux/freezer.h>
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/mm.h>

--- 31 unchanged lines hidden (view full) ---

40LIST_HEAD(bdi_pending_list);
41
42static struct task_struct *sync_supers_tsk;
43static struct timer_list sync_supers_timer;
44
45static int bdi_sync_supers(void *);
46static void sync_supers_timer_fn(unsigned long);
47
1
2#include <linux/wait.h>
3#include <linux/backing-dev.h>
4#include <linux/kthread.h>
5#include <linux/freezer.h>
6#include <linux/fs.h>
7#include <linux/pagemap.h>
8#include <linux/mm.h>

--- 31 unchanged lines hidden (view full) ---

40LIST_HEAD(bdi_pending_list);
41
42static struct task_struct *sync_supers_tsk;
43static struct timer_list sync_supers_timer;
44
45static int bdi_sync_supers(void *);
46static void sync_supers_timer_fn(unsigned long);
47
48void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2)
49{
50 if (wb1 < wb2) {
51 spin_lock(&wb1->list_lock);
52 spin_lock_nested(&wb2->list_lock, 1);
53 } else {
54 spin_lock(&wb2->list_lock);
55 spin_lock_nested(&wb1->list_lock, 1);
56 }
57}
58
48#ifdef CONFIG_DEBUG_FS
49#include <linux/debugfs.h>
50#include <linux/seq_file.h>
51
52static struct dentry *bdi_debug_root;
53
54static void bdi_debug_init(void)
55{

--- 6 unchanged lines hidden (view full) ---

62 struct bdi_writeback *wb = &bdi->wb;
63 unsigned long background_thresh;
64 unsigned long dirty_thresh;
65 unsigned long bdi_thresh;
66 unsigned long nr_dirty, nr_io, nr_more_io;
67 struct inode *inode;
68
69 nr_dirty = nr_io = nr_more_io = 0;
59#ifdef CONFIG_DEBUG_FS
60#include <linux/debugfs.h>
61#include <linux/seq_file.h>
62
63static struct dentry *bdi_debug_root;
64
65static void bdi_debug_init(void)
66{

--- 6 unchanged lines hidden (view full) ---

73 struct bdi_writeback *wb = &bdi->wb;
74 unsigned long background_thresh;
75 unsigned long dirty_thresh;
76 unsigned long bdi_thresh;
77 unsigned long nr_dirty, nr_io, nr_more_io;
78 struct inode *inode;
79
80 nr_dirty = nr_io = nr_more_io = 0;
70 spin_lock(&inode_wb_list_lock);
81 spin_lock(&wb->list_lock);
71 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
72 nr_dirty++;
73 list_for_each_entry(inode, &wb->b_io, i_wb_list)
74 nr_io++;
75 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
76 nr_more_io++;
82 list_for_each_entry(inode, &wb->b_dirty, i_wb_list)
83 nr_dirty++;
84 list_for_each_entry(inode, &wb->b_io, i_wb_list)
85 nr_io++;
86 list_for_each_entry(inode, &wb->b_more_io, i_wb_list)
87 nr_more_io++;
77 spin_unlock(&inode_wb_list_lock);
88 spin_unlock(&wb->list_lock);
78
79 global_dirty_limits(&background_thresh, &dirty_thresh);
80 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
81
82#define K(x) ((x) << (PAGE_SHIFT - 10))
83 seq_printf(m,
84 "BdiWriteback: %8lu kB\n"
85 "BdiReclaimable: %8lu kB\n"

--- 158 unchanged lines hidden (view full) ---

244}
245subsys_initcall(default_bdi_init);
246
247int bdi_has_dirty_io(struct backing_dev_info *bdi)
248{
249 return wb_has_dirty_io(&bdi->wb);
250}
251
89
90 global_dirty_limits(&background_thresh, &dirty_thresh);
91 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
92
93#define K(x) ((x) << (PAGE_SHIFT - 10))
94 seq_printf(m,
95 "BdiWriteback: %8lu kB\n"
96 "BdiReclaimable: %8lu kB\n"

--- 158 unchanged lines hidden (view full) ---

255}
256subsys_initcall(default_bdi_init);
257
258int bdi_has_dirty_io(struct backing_dev_info *bdi)
259{
260 return wb_has_dirty_io(&bdi->wb);
261}
262
252static void bdi_flush_io(struct backing_dev_info *bdi)
253{
254 struct writeback_control wbc = {
255 .sync_mode = WB_SYNC_NONE,
256 .older_than_this = NULL,
257 .range_cyclic = 1,
258 .nr_to_write = 1024,
259 };
260
261 writeback_inodes_wb(&bdi->wb, &wbc);
262}
263
264/*
265 * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
266 * or we risk deadlocking on ->s_umount. The longer term solution would be
267 * to implement sync_supers_bdi() or similar and simply do it from the
268 * bdi writeback thread individually.
269 */
270static int bdi_sync_supers(void *unused)
271{

--- 169 unchanged lines hidden (view full) ---

441 switch (action) {
442 case FORK_THREAD:
443 __set_current_state(TASK_RUNNING);
444 task = kthread_create(bdi_writeback_thread, &bdi->wb,
445 "flush-%s", dev_name(bdi->dev));
446 if (IS_ERR(task)) {
447 /*
448 * If thread creation fails, force writeout of
263/*
264 * kupdated() used to do this. We cannot do it from the bdi_forker_thread()
265 * or we risk deadlocking on ->s_umount. The longer term solution would be
266 * to implement sync_supers_bdi() or similar and simply do it from the
267 * bdi writeback thread individually.
268 */
269static int bdi_sync_supers(void *unused)
270{

--- 169 unchanged lines hidden (view full) ---

440 switch (action) {
441 case FORK_THREAD:
442 __set_current_state(TASK_RUNNING);
443 task = kthread_create(bdi_writeback_thread, &bdi->wb,
444 "flush-%s", dev_name(bdi->dev));
445 if (IS_ERR(task)) {
446 /*
447 * If thread creation fails, force writeout of
449 * the bdi from the thread.
448 * the bdi from the thread. Hopefully 1024 is
449 * large enough for efficient IO.
450 */
450 */
451 bdi_flush_io(bdi);
451 writeback_inodes_wb(&bdi->wb, 1024);
452 } else {
453 /*
454 * The spinlock makes sure we do not lose
455 * wake-ups when racing with 'bdi_queue_work()'.
456 * And as soon as the bdi thread is visible, we
457 * can start it.
458 */
459 spin_lock_bh(&bdi->wb_lock);

--- 163 unchanged lines hidden (view full) ---

623{
624 memset(wb, 0, sizeof(*wb));
625
626 wb->bdi = bdi;
627 wb->last_old_flush = jiffies;
628 INIT_LIST_HEAD(&wb->b_dirty);
629 INIT_LIST_HEAD(&wb->b_io);
630 INIT_LIST_HEAD(&wb->b_more_io);
452 } else {
453 /*
454 * The spinlock makes sure we do not lose
455 * wake-ups when racing with 'bdi_queue_work()'.
456 * And as soon as the bdi thread is visible, we
457 * can start it.
458 */
459 spin_lock_bh(&bdi->wb_lock);

--- 163 unchanged lines hidden (view full) ---

623{
624 memset(wb, 0, sizeof(*wb));
625
626 wb->bdi = bdi;
627 wb->last_old_flush = jiffies;
628 INIT_LIST_HEAD(&wb->b_dirty);
629 INIT_LIST_HEAD(&wb->b_io);
630 INIT_LIST_HEAD(&wb->b_more_io);
631 spin_lock_init(&wb->list_lock);
631 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
632}
633
634int bdi_init(struct backing_dev_info *bdi)
635{
636 int i, err;
637
638 bdi->dev = NULL;

--- 32 unchanged lines hidden (view full) ---

671
672 /*
673 * Splice our entries to the default_backing_dev_info, if this
674 * bdi disappears
675 */
676 if (bdi_has_dirty_io(bdi)) {
677 struct bdi_writeback *dst = &default_backing_dev_info.wb;
678
632 setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi);
633}
634
635int bdi_init(struct backing_dev_info *bdi)
636{
637 int i, err;
638
639 bdi->dev = NULL;

--- 32 unchanged lines hidden (view full) ---

672
673 /*
674 * Splice our entries to the default_backing_dev_info, if this
675 * bdi disappears
676 */
677 if (bdi_has_dirty_io(bdi)) {
678 struct bdi_writeback *dst = &default_backing_dev_info.wb;
679
679 spin_lock(&inode_wb_list_lock);
680 bdi_lock_two(&bdi->wb, dst);
680 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
681 list_splice(&bdi->wb.b_io, &dst->b_io);
682 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
681 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
682 list_splice(&bdi->wb.b_io, &dst->b_io);
683 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
683 spin_unlock(&inode_wb_list_lock);
684 spin_unlock(&bdi->wb.list_lock);
685 spin_unlock(&dst->list_lock);
684 }
685
686 bdi_unregister(bdi);
687
688 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
689 percpu_counter_destroy(&bdi->bdi_stat[i]);
690
691 prop_local_destroy_percpu(&bdi->completions);

--- 141 unchanged lines hidden ---
686 }
687
688 bdi_unregister(bdi);
689
690 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
691 percpu_counter_destroy(&bdi->bdi_stat[i]);
692
693 prop_local_destroy_percpu(&bdi->completions);

--- 141 unchanged lines hidden ---