1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */ 2279afbadSKent Overstreet #ifndef _BCACHE_WRITEBACK_H 3279afbadSKent Overstreet #define _BCACHE_WRITEBACK_H 4279afbadSKent Overstreet 572c27061SKent Overstreet #define CUTOFF_WRITEBACK 40 672c27061SKent Overstreet #define CUTOFF_WRITEBACK_SYNC 70 772c27061SKent Overstreet 89aaf5165SColy Li #define CUTOFF_WRITEBACK_MAX 70 99aaf5165SColy Li #define CUTOFF_WRITEBACK_SYNC_MAX 90 109aaf5165SColy Li 11539d39ebSTang Junhui #define MAX_WRITEBACKS_IN_PASS 5 12539d39ebSTang Junhui #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */ 13539d39ebSTang Junhui 147a5e3ecbSColy Li #define WRITEBACK_RATE_UPDATE_SECS_MAX 60 157a5e3ecbSColy Li #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5 167a5e3ecbSColy Li 177a671d8eSColy Li #define BCH_AUTO_GC_DIRTY_THRESHOLD 50 187a671d8eSColy Li 19616486abSMichael Lyle /* 20616486abSMichael Lyle * 14 (16384ths) is chosen here as something that each backing device 21616486abSMichael Lyle * should be a reasonable fraction of the share, and not to blow up 22616486abSMichael Lyle * until individual backing devices are a petabyte. 23616486abSMichael Lyle */ 24616486abSMichael Lyle #define WRITEBACK_SHARE_SHIFT 14 25616486abSMichael Lyle 26279afbadSKent Overstreet static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d) 27279afbadSKent Overstreet { 28279afbadSKent Overstreet uint64_t i, ret = 0; 29279afbadSKent Overstreet 30279afbadSKent Overstreet for (i = 0; i < d->nr_stripes; i++) 31279afbadSKent Overstreet ret += atomic_read(d->stripe_sectors_dirty + i); 32279afbadSKent Overstreet 33279afbadSKent Overstreet return ret; 34279afbadSKent Overstreet } 35279afbadSKent Overstreet 366f10f7d1SColy Li static inline unsigned int offset_to_stripe(struct bcache_device *d, 3748a915a8SKent Overstreet uint64_t offset) 3848a915a8SKent Overstreet { 3948a915a8SKent Overstreet do_div(offset, d->stripe_size); 4048a915a8SKent Overstreet return offset; 4148a915a8SKent Overstreet } 4248a915a8SKent Overstreet 4348a915a8SKent Overstreet static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc, 4472c27061SKent Overstreet uint64_t offset, 456f10f7d1SColy Li unsigned int nr_sectors) 4672c27061SKent Overstreet { 476f10f7d1SColy Li unsigned int stripe = offset_to_stripe(&dc->disk, offset); 4872c27061SKent Overstreet 4972c27061SKent Overstreet while (1) { 5048a915a8SKent Overstreet if (atomic_read(dc->disk.stripe_sectors_dirty + stripe)) 5172c27061SKent Overstreet return true; 5272c27061SKent Overstreet 5348a915a8SKent Overstreet if (nr_sectors <= dc->disk.stripe_size) 5472c27061SKent Overstreet return false; 5572c27061SKent Overstreet 5648a915a8SKent Overstreet nr_sectors -= dc->disk.stripe_size; 5772c27061SKent Overstreet stripe++; 5872c27061SKent Overstreet } 5972c27061SKent Overstreet } 6072c27061SKent Overstreet 619aaf5165SColy Li extern unsigned int bch_cutoff_writeback; 629aaf5165SColy Li extern unsigned int bch_cutoff_writeback_sync; 639aaf5165SColy Li 6472c27061SKent Overstreet static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, 656f10f7d1SColy Li unsigned int cache_mode, bool would_skip) 6672c27061SKent Overstreet { 676f10f7d1SColy Li unsigned int in_use = dc->disk.c->gc_stats.in_use; 6872c27061SKent Overstreet 6972c27061SKent Overstreet if (cache_mode != CACHE_MODE_WRITEBACK || 70c4d951ddSKent Overstreet test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 719aaf5165SColy Li in_use > bch_cutoff_writeback_sync) 7272c27061SKent Overstreet return false; 7372c27061SKent Overstreet 749951379bSDaniel Axtens if (bio_op(bio) == REQ_OP_DISCARD) 759951379bSDaniel Axtens return false; 769951379bSDaniel Axtens 7772c27061SKent Overstreet if (dc->partial_stripes_expensive && 784f024f37SKent Overstreet bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, 7972c27061SKent Overstreet bio_sectors(bio))) 8072c27061SKent Overstreet return true; 8172c27061SKent Overstreet 8272c27061SKent Overstreet if (would_skip) 8372c27061SKent Overstreet return false; 8472c27061SKent Overstreet 85b41c9b02SEric Wheeler return (op_is_sync(bio->bi_opf) || 86b41c9b02SEric Wheeler bio->bi_opf & (REQ_META|REQ_PRIO) || 879aaf5165SColy Li in_use <= bch_cutoff_writeback); 8872c27061SKent Overstreet } 8972c27061SKent Overstreet 905e6926daSKent Overstreet static inline void bch_writeback_queue(struct cached_dev *dc) 915e6926daSKent Overstreet { 928d16ce54SStefan Bader if (!IS_ERR_OR_NULL(dc->writeback_thread)) 935e6926daSKent Overstreet wake_up_process(dc->writeback_thread); 945e6926daSKent Overstreet } 955e6926daSKent Overstreet 965e6926daSKent Overstreet static inline void bch_writeback_add(struct cached_dev *dc) 975e6926daSKent Overstreet { 985e6926daSKent Overstreet if (!atomic_read(&dc->has_dirty) && 995e6926daSKent Overstreet !atomic_xchg(&dc->has_dirty, 1)) { 1005e6926daSKent Overstreet if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { 1015e6926daSKent Overstreet SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); 1025e6926daSKent Overstreet /* XXX: should do this synchronously */ 1035e6926daSKent Overstreet bch_write_bdev_super(dc, NULL); 1045e6926daSKent Overstreet } 1055e6926daSKent Overstreet 1065e6926daSKent Overstreet bch_writeback_queue(dc); 1075e6926daSKent Overstreet } 1085e6926daSKent Overstreet } 1095e6926daSKent Overstreet 110fc2d5988SColy Li void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode, 111fc2d5988SColy Li uint64_t offset, int nr_sectors); 112279afbadSKent Overstreet 113fc2d5988SColy Li void bch_sectors_dirty_init(struct bcache_device *d); 114fc2d5988SColy Li void bch_cached_dev_writeback_init(struct cached_dev *dc); 115fc2d5988SColy Li int bch_cached_dev_writeback_start(struct cached_dev *dc); 116279afbadSKent Overstreet 117279afbadSKent Overstreet #endif 118