1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHE_WRITEBACK_H 3 #define _BCACHE_WRITEBACK_H 4 5 #define CUTOFF_WRITEBACK 40 6 #define CUTOFF_WRITEBACK_SYNC 70 7 8 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) 9 { 10 uint64_t i, ret = 0; 11 12 for (i = 0; i < d->nr_stripes; i++) 13 ret += atomic_read(d->stripe_sectors_dirty + i); 14 15 return ret; 16 } 17 18 static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) 19 { 20 uint64_t i, ret = 0; 21 22 mutex_lock(&bch_register_lock); 23 24 for (i = 0; i < c->nr_uuids; i++) { 25 struct bcache_device *d = c->devices[i]; 26 27 if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) 28 continue; 29 ret += bcache_dev_sectors_dirty(d); 30 } 31 32 mutex_unlock(&bch_register_lock); 33 34 return ret; 35 } 36 37 static inline unsigned offset_to_stripe(struct bcache_device *d, 38 uint64_t offset) 39 { 40 do_div(offset, d->stripe_size); 41 return offset; 42 } 43 44 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, 45 uint64_t offset, 46 unsigned nr_sectors) 47 { 48 unsigned stripe = offset_to_stripe(&dc->disk, offset); 49 50 while (1) { 51 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) 52 return true; 53 54 if (nr_sectors <= dc->disk.stripe_size) 55 return false; 56 57 nr_sectors -= dc->disk.stripe_size; 58 stripe++; 59 } 60 } 61 62 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, 63 unsigned cache_mode, bool would_skip) 64 { 65 unsigned in_use = dc->disk.c->gc_stats.in_use; 66 67 if (cache_mode != CACHE_MODE_WRITEBACK || 68 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 69 in_use > CUTOFF_WRITEBACK_SYNC) 70 return false; 71 72 if (dc->partial_stripes_expensive && 73 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, 74 bio_sectors(bio))) 75 return true; 76 77 if (would_skip) 78 return false; 79 80 return op_is_sync(bio->bi_opf) || in_use <= CUTOFF_WRITEBACK; 81 } 82 83 static inline void bch_writeback_queue(struct cached_dev *dc) 84 { 85 if (!IS_ERR_OR_NULL(dc->writeback_thread)) 86 wake_up_process(dc->writeback_thread); 87 } 88 89 static inline void bch_writeback_add(struct cached_dev *dc) 90 { 91 if (!atomic_read(&dc->has_dirty) && 92 !atomic_xchg(&dc->has_dirty, 1)) { 93 atomic_inc(&dc->count); 94 95 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { 96 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); 97 /* XXX: should do this synchronously */ 98 bch_write_bdev_super(dc, NULL); 99 } 100 101 bch_writeback_queue(dc); 102 } 103 } 104 105 void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int); 106 107 void bch_sectors_dirty_init(struct bcache_device *); 108 void bch_cached_dev_writeback_init(struct cached_dev *); 109 int bch_cached_dev_writeback_start(struct cached_dev *); 110 111 #endif 112