1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHE_WRITEBACK_H 3 #define _BCACHE_WRITEBACK_H 4 5 #define CUTOFF_WRITEBACK 40 6 #define CUTOFF_WRITEBACK_SYNC 70 7 8 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) 9 { 10 uint64_t i, ret = 0; 11 12 for (i = 0; i < d->nr_stripes; i++) 13 ret += atomic_read(d->stripe_sectors_dirty + i); 14 15 return ret; 16 } 17 18 static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) 19 { 20 uint64_t i, ret = 0; 21 22 mutex_lock(&bch_register_lock); 23 24 for (i = 0; i < c->nr_uuids; i++) { 25 struct bcache_device *d = c->devices[i]; 26 27 if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) 28 continue; 29 ret += bcache_dev_sectors_dirty(d); 30 } 31 32 mutex_unlock(&bch_register_lock); 33 34 return ret; 35 } 36 37 static inline unsigned offset_to_stripe(struct bcache_device *d, 38 uint64_t offset) 39 { 40 do_div(offset, d->stripe_size); 41 return offset; 42 } 43 44 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, 45 uint64_t offset, 46 unsigned nr_sectors) 47 { 48 unsigned stripe = offset_to_stripe(&dc->disk, offset); 49 50 while (1) { 51 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) 52 return true; 53 54 if (nr_sectors <= dc->disk.stripe_size) 55 return false; 56 57 nr_sectors -= dc->disk.stripe_size; 58 stripe++; 59 } 60 } 61 62 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, 63 unsigned cache_mode, bool would_skip) 64 { 65 unsigned in_use = dc->disk.c->gc_stats.in_use; 66 67 if (cache_mode != CACHE_MODE_WRITEBACK || 68 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 69 in_use > CUTOFF_WRITEBACK_SYNC) 70 return false; 71 72 if (dc->partial_stripes_expensive && 73 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, 74 bio_sectors(bio))) 75 return true; 76 77 if (would_skip) 78 return false; 79 80 return (op_is_sync(bio->bi_opf) || 81 bio->bi_opf & (REQ_META|REQ_PRIO) || 82 in_use <= CUTOFF_WRITEBACK); 83 } 84 85 static inline void bch_writeback_queue(struct cached_dev *dc) 86 { 87 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 88 wake_up_process(dc->writeback_thread); 89 } 90 91 static inline void bch_writeback_add(struct cached_dev *dc) 92 { 93 if (!atomic_read(&dc->has_dirty) && 94 !atomic_xchg(&dc->has_dirty, 1)) { 95 refcount_inc(&dc->count); 96 97 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { 98 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); 99 /* XXX: should do this synchronously */ 100 bch_write_bdev_super(dc, NULL); 101 } 102 103 bch_writeback_queue(dc); 104 } 105 } 106 107 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); 108 109 void bch_sectors_dirty_init(struct bcache_device *); 110 void bch_cached_dev_writeback_init(struct cached_dev *); 111 int bch_cached_dev_writeback_start(struct cached_dev *); 112 113 #endif 114