1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
2279afbadSKent Overstreet #ifndef _BCACHE_WRITEBACK_H
3279afbadSKent Overstreet #define _BCACHE_WRITEBACK_H
4279afbadSKent Overstreet
572c27061SKent Overstreet #define CUTOFF_WRITEBACK 40
672c27061SKent Overstreet #define CUTOFF_WRITEBACK_SYNC 70
772c27061SKent Overstreet
89aaf5165SColy Li #define CUTOFF_WRITEBACK_MAX 70
99aaf5165SColy Li #define CUTOFF_WRITEBACK_SYNC_MAX 90
109aaf5165SColy Li
11539d39ebSTang Junhui #define MAX_WRITEBACKS_IN_PASS 5
12539d39ebSTang Junhui #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
13539d39ebSTang Junhui
147a5e3ecbSColy Li #define WRITEBACK_RATE_UPDATE_SECS_MAX 60
157a5e3ecbSColy Li #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
167a5e3ecbSColy Li
177a671d8eSColy Li #define BCH_AUTO_GC_DIRTY_THRESHOLD 50
187a671d8eSColy Li
1971dda2a5Sdongdong tao #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50
2071dda2a5Sdongdong tao #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
2171dda2a5Sdongdong tao #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
2271dda2a5Sdongdong tao
23df973468SColy Li #define BCH_DIRTY_INIT_THRD_MAX 12
24616486abSMichael Lyle /*
25616486abSMichael Lyle * 14 (16384ths) is chosen here as something that each backing device
26616486abSMichael Lyle * should be a reasonable fraction of the share, and not to blow up
27616486abSMichael Lyle * until individual backing devices are a petabyte.
28616486abSMichael Lyle */
29616486abSMichael Lyle #define WRITEBACK_SHARE_SHIFT 14
30616486abSMichael Lyle
31b144e45fSColy Li struct bch_dirty_init_state;
32b144e45fSColy Li struct dirty_init_thrd_info {
33b144e45fSColy Li struct bch_dirty_init_state *state;
34b144e45fSColy Li struct task_struct *thread;
35b144e45fSColy Li };
36b144e45fSColy Li
37b144e45fSColy Li struct bch_dirty_init_state {
38b144e45fSColy Li struct cache_set *c;
39b144e45fSColy Li struct bcache_device *d;
40b144e45fSColy Li int total_threads;
41b144e45fSColy Li int key_idx;
42b144e45fSColy Li spinlock_t idx_lock;
43b144e45fSColy Li atomic_t started;
44b144e45fSColy Li atomic_t enough;
45b144e45fSColy Li wait_queue_head_t wait;
46b144e45fSColy Li struct dirty_init_thrd_info infos[BCH_DIRTY_INIT_THRD_MAX];
47b144e45fSColy Li };
48b144e45fSColy Li
bcache_dev_sectors_dirty(struct bcache_device * d)49279afbadSKent Overstreet static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
50279afbadSKent Overstreet {
51279afbadSKent Overstreet uint64_t i, ret = 0;
52279afbadSKent Overstreet
53279afbadSKent Overstreet for (i = 0; i < d->nr_stripes; i++)
54279afbadSKent Overstreet ret += atomic_read(d->stripe_sectors_dirty + i);
55279afbadSKent Overstreet
56279afbadSKent Overstreet return ret;
57279afbadSKent Overstreet }
58279afbadSKent Overstreet
offset_to_stripe(struct bcache_device * d,uint64_t offset)597a148126SColy Li static inline int offset_to_stripe(struct bcache_device *d,
6048a915a8SKent Overstreet uint64_t offset)
6148a915a8SKent Overstreet {
6248a915a8SKent Overstreet do_div(offset, d->stripe_size);
637a148126SColy Li
647a148126SColy Li /* d->nr_stripes is in range [1, INT_MAX] */
657a148126SColy Li if (unlikely(offset >= d->nr_stripes)) {
667a148126SColy Li pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
677a148126SColy Li offset, d->nr_stripes);
687a148126SColy Li return -EINVAL;
697a148126SColy Li }
707a148126SColy Li
717a148126SColy Li /*
727a148126SColy Li * Here offset is definitly smaller than INT_MAX,
737a148126SColy Li * return it as int will never overflow.
747a148126SColy Li */
7548a915a8SKent Overstreet return offset;
7648a915a8SKent Overstreet }
7748a915a8SKent Overstreet
bcache_dev_stripe_dirty(struct cached_dev * dc,uint64_t offset,unsigned int nr_sectors)7848a915a8SKent Overstreet static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
7972c27061SKent Overstreet uint64_t offset,
806f10f7d1SColy Li unsigned int nr_sectors)
8172c27061SKent Overstreet {
827a148126SColy Li int stripe = offset_to_stripe(&dc->disk, offset);
837a148126SColy Li
847a148126SColy Li if (stripe < 0)
857a148126SColy Li return false;
8672c27061SKent Overstreet
8772c27061SKent Overstreet while (1) {
8848a915a8SKent Overstreet if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
8972c27061SKent Overstreet return true;
9072c27061SKent Overstreet
9148a915a8SKent Overstreet if (nr_sectors <= dc->disk.stripe_size)
9272c27061SKent Overstreet return false;
9372c27061SKent Overstreet
9448a915a8SKent Overstreet nr_sectors -= dc->disk.stripe_size;
9572c27061SKent Overstreet stripe++;
9672c27061SKent Overstreet }
9772c27061SKent Overstreet }
9872c27061SKent Overstreet
999aaf5165SColy Li extern unsigned int bch_cutoff_writeback;
1009aaf5165SColy Li extern unsigned int bch_cutoff_writeback_sync;
1019aaf5165SColy Li
should_writeback(struct cached_dev * dc,struct bio * bio,unsigned int cache_mode,bool would_skip)10272c27061SKent Overstreet static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
1036f10f7d1SColy Li unsigned int cache_mode, bool would_skip)
10472c27061SKent Overstreet {
1056f10f7d1SColy Li unsigned int in_use = dc->disk.c->gc_stats.in_use;
10672c27061SKent Overstreet
10772c27061SKent Overstreet if (cache_mode != CACHE_MODE_WRITEBACK ||
108c4d951ddSKent Overstreet test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
1099aaf5165SColy Li in_use > bch_cutoff_writeback_sync)
11072c27061SKent Overstreet return false;
11172c27061SKent Overstreet
1129951379bSDaniel Axtens if (bio_op(bio) == REQ_OP_DISCARD)
1139951379bSDaniel Axtens return false;
1149951379bSDaniel Axtens
11572c27061SKent Overstreet if (dc->partial_stripes_expensive &&
1164f024f37SKent Overstreet bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
11772c27061SKent Overstreet bio_sectors(bio)))
11872c27061SKent Overstreet return true;
11972c27061SKent Overstreet
12072c27061SKent Overstreet if (would_skip)
12172c27061SKent Overstreet return false;
12272c27061SKent Overstreet
123b41c9b02SEric Wheeler return (op_is_sync(bio->bi_opf) ||
124b41c9b02SEric Wheeler bio->bi_opf & (REQ_META|REQ_PRIO) ||
1259aaf5165SColy Li in_use <= bch_cutoff_writeback);
12672c27061SKent Overstreet }
12772c27061SKent Overstreet
bch_writeback_queue(struct cached_dev * dc)1285e6926daSKent Overstreet static inline void bch_writeback_queue(struct cached_dev *dc)
1295e6926daSKent Overstreet {
1308d16ce54SStefan Bader if (!IS_ERR_OR_NULL(dc->writeback_thread))
1315e6926daSKent Overstreet wake_up_process(dc->writeback_thread);
1325e6926daSKent Overstreet }
1335e6926daSKent Overstreet
bch_writeback_add(struct cached_dev * dc)1345e6926daSKent Overstreet static inline void bch_writeback_add(struct cached_dev *dc)
1355e6926daSKent Overstreet {
1365e6926daSKent Overstreet if (!atomic_read(&dc->has_dirty) &&
1375e6926daSKent Overstreet !atomic_xchg(&dc->has_dirty, 1)) {
1385e6926daSKent Overstreet if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
1395e6926daSKent Overstreet SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
1405e6926daSKent Overstreet /* XXX: should do this synchronously */
1415e6926daSKent Overstreet bch_write_bdev_super(dc, NULL);
1425e6926daSKent Overstreet }
1435e6926daSKent Overstreet
1445e6926daSKent Overstreet bch_writeback_queue(dc);
1455e6926daSKent Overstreet }
1465e6926daSKent Overstreet }
1475e6926daSKent Overstreet
148fc2d5988SColy Li void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
149fc2d5988SColy Li uint64_t offset, int nr_sectors);
150279afbadSKent Overstreet
151fc2d5988SColy Li void bch_sectors_dirty_init(struct bcache_device *d);
152fc2d5988SColy Li void bch_cached_dev_writeback_init(struct cached_dev *dc);
153fc2d5988SColy Li int bch_cached_dev_writeback_start(struct cached_dev *dc);
154279afbadSKent Overstreet
155279afbadSKent Overstreet #endif
156