187418ef9SColy Li // SPDX-License-Identifier: GPL-2.0
2cafe5635SKent Overstreet /*
3cafe5635SKent Overstreet * bcache setup/teardown code, and some metadata io - read a superblock and
4cafe5635SKent Overstreet * figure out what to do with it.
5cafe5635SKent Overstreet *
6cafe5635SKent Overstreet * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7cafe5635SKent Overstreet * Copyright 2012 Google, Inc.
8cafe5635SKent Overstreet */
9cafe5635SKent Overstreet
10cafe5635SKent Overstreet #include "bcache.h"
11cafe5635SKent Overstreet #include "btree.h"
12cafe5635SKent Overstreet #include "debug.h"
1365d45231SKent Overstreet #include "extents.h"
14cafe5635SKent Overstreet #include "request.h"
15279afbadSKent Overstreet #include "writeback.h"
16d721a43fSColy Li #include "features.h"
17cafe5635SKent Overstreet
18c37511b8SKent Overstreet #include <linux/blkdev.h>
194ee60ec1SMatthew Wilcox (Oracle) #include <linux/pagemap.h>
20cafe5635SKent Overstreet #include <linux/debugfs.h>
2128935ab5SKent Overstreet #include <linux/idr.h>
2279826c35SKent Overstreet #include <linux/kthread.h>
23ee4a36f4SColy Li #include <linux/workqueue.h>
24cafe5635SKent Overstreet #include <linux/module.h>
25cafe5635SKent Overstreet #include <linux/random.h>
26cafe5635SKent Overstreet #include <linux/reboot.h>
27cafe5635SKent Overstreet #include <linux/sysfs.h>
28cafe5635SKent Overstreet
299aaf5165SColy Li unsigned int bch_cutoff_writeback;
309aaf5165SColy Li unsigned int bch_cutoff_writeback_sync;
319aaf5165SColy Li
32cafe5635SKent Overstreet static const char bcache_magic[] = {
33cafe5635SKent Overstreet 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
34cafe5635SKent Overstreet 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
35cafe5635SKent Overstreet };
36cafe5635SKent Overstreet
37cafe5635SKent Overstreet static const char invalid_uuid[] = {
38cafe5635SKent Overstreet 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
39cafe5635SKent Overstreet 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
40cafe5635SKent Overstreet };
41cafe5635SKent Overstreet
42cafe5635SKent Overstreet static struct kobject *bcache_kobj;
43cafe5635SKent Overstreet struct mutex bch_register_lock;
44a59ff6ccSColy Li bool bcache_is_reboot;
45cafe5635SKent Overstreet LIST_HEAD(bch_cache_sets);
46cafe5635SKent Overstreet static LIST_HEAD(uncached_devices);
47cafe5635SKent Overstreet
4828935ab5SKent Overstreet static int bcache_major;
491dbe32adSColy Li static DEFINE_IDA(bcache_device_idx);
50cafe5635SKent Overstreet static wait_queue_head_t unregister_wait;
51cafe5635SKent Overstreet struct workqueue_struct *bcache_wq;
52afe78ab4SKai Krakow struct workqueue_struct *bch_flush_wq;
530f843e65SGuoju Fang struct workqueue_struct *bch_journal_wq;
54cafe5635SKent Overstreet
55a59ff6ccSColy Li
56cafe5635SKent Overstreet #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
571dbe32adSColy Li /* limitation of partitions number on single bcache device */
581dbe32adSColy Li #define BCACHE_MINORS 128
591dbe32adSColy Li /* limitation of bcache devices number on single system */
601dbe32adSColy Li #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
61cafe5635SKent Overstreet
62cafe5635SKent Overstreet /* Superblock */
63cafe5635SKent Overstreet
get_bucket_size(struct cache_sb * sb,struct cache_sb_disk * s)64ffa47032SColy Li static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s)
65ffa47032SColy Li {
66ffa47032SColy Li unsigned int bucket_size = le16_to_cpu(s->bucket_size);
67ffa47032SColy Li
68b16671e8SColy Li if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
69b16671e8SColy Li if (bch_has_feature_large_bucket(sb)) {
70b16671e8SColy Li unsigned int max, order;
71b16671e8SColy Li
72b16671e8SColy Li max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
73b16671e8SColy Li order = le16_to_cpu(s->bucket_size);
74b16671e8SColy Li /*
75b16671e8SColy Li * bcache tool will make sure the overflow won't
76b16671e8SColy Li * happen, an error message here is enough.
77b16671e8SColy Li */
78b16671e8SColy Li if (order > max)
79b16671e8SColy Li pr_err("Bucket size (1 << %u) overflows\n",
80b16671e8SColy Li order);
81b16671e8SColy Li bucket_size = 1 << order;
82b16671e8SColy Li } else if (bch_has_feature_obso_large_bucket(sb)) {
83b16671e8SColy Li bucket_size +=
84b16671e8SColy Li le16_to_cpu(s->obso_bucket_size_hi) << 16;
85b16671e8SColy Li }
86b16671e8SColy Li }
87ffa47032SColy Li
88ffa47032SColy Li return bucket_size;
89ffa47032SColy Li }
90ffa47032SColy Li
read_super_common(struct cache_sb * sb,struct block_device * bdev,struct cache_sb_disk * s)915b21403cSColy Li static const char *read_super_common(struct cache_sb *sb, struct block_device *bdev,
925b21403cSColy Li struct cache_sb_disk *s)
93cafe5635SKent Overstreet {
94cafe5635SKent Overstreet const char *err;
956f10f7d1SColy Li unsigned int i;
96cafe5635SKent Overstreet
97cafe5635SKent Overstreet sb->first_bucket= le16_to_cpu(s->first_bucket);
985b21403cSColy Li sb->nbuckets = le64_to_cpu(s->nbuckets);
99ffa47032SColy Li sb->bucket_size = get_bucket_size(sb, s);
100cafe5635SKent Overstreet
1015b21403cSColy Li sb->nr_in_set = le16_to_cpu(s->nr_in_set);
1025b21403cSColy Li sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
103cafe5635SKent Overstreet
104cafe5635SKent Overstreet err = "Too many journal buckets";
105cafe5635SKent Overstreet if (sb->keys > SB_JOURNAL_BUCKETS)
106cafe5635SKent Overstreet goto err;
107cafe5635SKent Overstreet
108cafe5635SKent Overstreet err = "Too many buckets";
109cafe5635SKent Overstreet if (sb->nbuckets > LONG_MAX)
110cafe5635SKent Overstreet goto err;
111cafe5635SKent Overstreet
112cafe5635SKent Overstreet err = "Not enough buckets";
113cafe5635SKent Overstreet if (sb->nbuckets < 1 << 7)
114cafe5635SKent Overstreet goto err;
115cafe5635SKent Overstreet
116c557a5f7SColy Li err = "Bad block size (not power of 2)";
117c557a5f7SColy Li if (!is_power_of_2(sb->block_size))
118c557a5f7SColy Li goto err;
119c557a5f7SColy Li
120c557a5f7SColy Li err = "Bad block size (larger than page size)";
121c557a5f7SColy Li if (sb->block_size > PAGE_SECTORS)
122c557a5f7SColy Li goto err;
123c557a5f7SColy Li
124c557a5f7SColy Li err = "Bad bucket size (not power of 2)";
125c557a5f7SColy Li if (!is_power_of_2(sb->bucket_size))
126c557a5f7SColy Li goto err;
127c557a5f7SColy Li
128c557a5f7SColy Li err = "Bad bucket size (smaller than page size)";
129c557a5f7SColy Li if (sb->bucket_size < PAGE_SECTORS)
1302903381fSKent Overstreet goto err;
1312903381fSKent Overstreet
132cafe5635SKent Overstreet err = "Invalid superblock: device too small";
133b0d30981SColy Li if (get_capacity(bdev->bd_disk) <
134b0d30981SColy Li sb->bucket_size * sb->nbuckets)
135cafe5635SKent Overstreet goto err;
136cafe5635SKent Overstreet
137cafe5635SKent Overstreet err = "Bad UUID";
138169ef1cfSKent Overstreet if (bch_is_zero(sb->set_uuid, 16))
139cafe5635SKent Overstreet goto err;
140cafe5635SKent Overstreet
141cafe5635SKent Overstreet err = "Bad cache device number in set";
142cafe5635SKent Overstreet if (!sb->nr_in_set ||
143cafe5635SKent Overstreet sb->nr_in_set <= sb->nr_this_dev ||
144cafe5635SKent Overstreet sb->nr_in_set > MAX_CACHES_PER_SET)
145cafe5635SKent Overstreet goto err;
146cafe5635SKent Overstreet
147cafe5635SKent Overstreet err = "Journal buckets not sequential";
148cafe5635SKent Overstreet for (i = 0; i < sb->keys; i++)
149cafe5635SKent Overstreet if (sb->d[i] != sb->first_bucket + i)
150cafe5635SKent Overstreet goto err;
151cafe5635SKent Overstreet
152cafe5635SKent Overstreet err = "Too many journal buckets";
153cafe5635SKent Overstreet if (sb->first_bucket + sb->keys > sb->nbuckets)
154cafe5635SKent Overstreet goto err;
155cafe5635SKent Overstreet
156cafe5635SKent Overstreet err = "Invalid superblock: first bucket comes before end of super";
157cafe5635SKent Overstreet if (sb->first_bucket * sb->bucket_size < 16)
158cafe5635SKent Overstreet goto err;
1592903381fSKent Overstreet
1605b21403cSColy Li err = NULL;
1615b21403cSColy Li err:
1625b21403cSColy Li return err;
1635b21403cSColy Li }
1645b21403cSColy Li
1655b21403cSColy Li
read_super(struct cache_sb * sb,struct block_device * bdev,struct cache_sb_disk ** res)166cafe5635SKent Overstreet static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
167cafe5635SKent Overstreet struct cache_sb_disk **res)
168cafe5635SKent Overstreet {
169cafe5635SKent Overstreet const char *err;
170cafe5635SKent Overstreet struct cache_sb_disk *s;
171cafe5635SKent Overstreet struct page *page;
172cafe5635SKent Overstreet unsigned int i;
173cafe5635SKent Overstreet
174cafe5635SKent Overstreet page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
175cafe5635SKent Overstreet SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
176cafe5635SKent Overstreet if (IS_ERR(page))
177cafe5635SKent Overstreet return "IO error";
178cafe5635SKent Overstreet s = page_address(page) + offset_in_page(SB_OFFSET);
179cafe5635SKent Overstreet
180cafe5635SKent Overstreet sb->offset = le64_to_cpu(s->offset);
181cafe5635SKent Overstreet sb->version = le64_to_cpu(s->version);
182cafe5635SKent Overstreet
183cafe5635SKent Overstreet memcpy(sb->magic, s->magic, 16);
184cafe5635SKent Overstreet memcpy(sb->uuid, s->uuid, 16);
185cafe5635SKent Overstreet memcpy(sb->set_uuid, s->set_uuid, 16);
186cafe5635SKent Overstreet memcpy(sb->label, s->label, SB_LABEL_SIZE);
187cafe5635SKent Overstreet
188cafe5635SKent Overstreet sb->flags = le64_to_cpu(s->flags);
189cafe5635SKent Overstreet sb->seq = le64_to_cpu(s->seq);
190cafe5635SKent Overstreet sb->last_mount = le32_to_cpu(s->last_mount);
191cafe5635SKent Overstreet sb->keys = le16_to_cpu(s->keys);
192cafe5635SKent Overstreet
193cafe5635SKent Overstreet for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
194cafe5635SKent Overstreet sb->d[i] = le64_to_cpu(s->d[i]);
195cafe5635SKent Overstreet
196cafe5635SKent Overstreet pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n",
197cafe5635SKent Overstreet sb->version, sb->flags, sb->seq, sb->keys);
198cafe5635SKent Overstreet
199cafe5635SKent Overstreet err = "Not a bcache superblock (bad offset)";
200cafe5635SKent Overstreet if (sb->offset != SB_SECTOR)
201cafe5635SKent Overstreet goto err;
202cafe5635SKent Overstreet
203cafe5635SKent Overstreet err = "Not a bcache superblock (bad magic)";
204cafe5635SKent Overstreet if (memcmp(sb->magic, bcache_magic, 16))
205cafe5635SKent Overstreet goto err;
206cafe5635SKent Overstreet
207cafe5635SKent Overstreet err = "Bad checksum";
208cafe5635SKent Overstreet if (s->csum != csum_set(s))
209cafe5635SKent Overstreet goto err;
210cafe5635SKent Overstreet
211cafe5635SKent Overstreet err = "Bad UUID";
212cafe5635SKent Overstreet if (bch_is_zero(sb->uuid, 16))
213cafe5635SKent Overstreet goto err;
214cafe5635SKent Overstreet
215cafe5635SKent Overstreet sb->block_size = le16_to_cpu(s->block_size);
216cafe5635SKent Overstreet
217cafe5635SKent Overstreet err = "Superblock block size smaller than device block size";
218cafe5635SKent Overstreet if (sb->block_size << 9 < bdev_logical_block_size(bdev))
219cafe5635SKent Overstreet goto err;
220cafe5635SKent Overstreet
221cafe5635SKent Overstreet switch (sb->version) {
222cafe5635SKent Overstreet case BCACHE_SB_VERSION_BDEV:
223cafe5635SKent Overstreet sb->data_offset = BDEV_DATA_START_DEFAULT;
224cafe5635SKent Overstreet break;
225cafe5635SKent Overstreet case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
226d721a43fSColy Li case BCACHE_SB_VERSION_BDEV_WITH_FEATURES:
227cafe5635SKent Overstreet sb->data_offset = le64_to_cpu(s->data_offset);
228cafe5635SKent Overstreet
229cafe5635SKent Overstreet err = "Bad data offset";
230cafe5635SKent Overstreet if (sb->data_offset < BDEV_DATA_START_DEFAULT)
231cafe5635SKent Overstreet goto err;
232cafe5635SKent Overstreet
233cafe5635SKent Overstreet break;
234cafe5635SKent Overstreet case BCACHE_SB_VERSION_CDEV:
235cafe5635SKent Overstreet case BCACHE_SB_VERSION_CDEV_WITH_UUID:
2365b21403cSColy Li err = read_super_common(sb, bdev, s);
2375b21403cSColy Li if (err)
238cafe5635SKent Overstreet goto err;
2392903381fSKent Overstreet break;
240d721a43fSColy Li case BCACHE_SB_VERSION_CDEV_WITH_FEATURES:
241ffa47032SColy Li /*
242ffa47032SColy Li * Feature bits are needed in read_super_common(),
243ffa47032SColy Li * convert them firstly.
244ffa47032SColy Li */
245d721a43fSColy Li sb->feature_compat = le64_to_cpu(s->feature_compat);
246d721a43fSColy Li sb->feature_incompat = le64_to_cpu(s->feature_incompat);
247d721a43fSColy Li sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
2481dfc0686SColy Li
2491dfc0686SColy Li /* Check incompatible features */
2501dfc0686SColy Li err = "Unsupported compatible feature found";
2511dfc0686SColy Li if (bch_has_unknown_compat_features(sb))
2521dfc0686SColy Li goto err;
2531dfc0686SColy Li
2541dfc0686SColy Li err = "Unsupported read-only compatible feature found";
2551dfc0686SColy Li if (bch_has_unknown_ro_compat_features(sb))
2561dfc0686SColy Li goto err;
2571dfc0686SColy Li
2581dfc0686SColy Li err = "Unsupported incompatible feature found";
2591dfc0686SColy Li if (bch_has_unknown_incompat_features(sb))
2601dfc0686SColy Li goto err;
2611dfc0686SColy Li
262ffa47032SColy Li err = read_super_common(sb, bdev, s);
263ffa47032SColy Li if (err)
264ffa47032SColy Li goto err;
2652903381fSKent Overstreet break;
2662903381fSKent Overstreet default:
2672903381fSKent Overstreet err = "Unsupported superblock version";
2682903381fSKent Overstreet goto err;
2692903381fSKent Overstreet }
2702903381fSKent Overstreet
27175cbb3f1SArnd Bergmann sb->last_mount = (u32)ktime_get_real_seconds();
272cfa0c56dSChristoph Hellwig *res = s;
2736321bef0SChristoph Hellwig return NULL;
274cafe5635SKent Overstreet err:
2756321bef0SChristoph Hellwig put_page(page);
276cafe5635SKent Overstreet return err;
277cafe5635SKent Overstreet }
278cafe5635SKent Overstreet
write_bdev_super_endio(struct bio * bio)2794246a0b6SChristoph Hellwig static void write_bdev_super_endio(struct bio *bio)
280cafe5635SKent Overstreet {
281cafe5635SKent Overstreet struct cached_dev *dc = bio->bi_private;
28208ec1e62SColy Li
28308ec1e62SColy Li if (bio->bi_status)
28408ec1e62SColy Li bch_count_backing_io_errors(dc, bio);
285cafe5635SKent Overstreet
286cb7a583eSKent Overstreet closure_put(&dc->sb_write);
287cafe5635SKent Overstreet }
288cafe5635SKent Overstreet
__write_super(struct cache_sb * sb,struct cache_sb_disk * out,struct bio * bio)289475389aeSChristoph Hellwig static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
290475389aeSChristoph Hellwig struct bio *bio)
291cafe5635SKent Overstreet {
2926f10f7d1SColy Li unsigned int i;
293cafe5635SKent Overstreet
294475389aeSChristoph Hellwig bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
2954f024f37SKent Overstreet bio->bi_iter.bi_sector = SB_SECTOR;
296475389aeSChristoph Hellwig __bio_add_page(bio, virt_to_page(out), SB_SIZE,
297475389aeSChristoph Hellwig offset_in_page(out));
298cafe5635SKent Overstreet
299cafe5635SKent Overstreet out->offset = cpu_to_le64(sb->offset);
300cafe5635SKent Overstreet
301cafe5635SKent Overstreet memcpy(out->uuid, sb->uuid, 16);
302cafe5635SKent Overstreet memcpy(out->set_uuid, sb->set_uuid, 16);
303cafe5635SKent Overstreet memcpy(out->label, sb->label, SB_LABEL_SIZE);
304cafe5635SKent Overstreet
305cafe5635SKent Overstreet out->flags = cpu_to_le64(sb->flags);
306cafe5635SKent Overstreet out->seq = cpu_to_le64(sb->seq);
307cafe5635SKent Overstreet
308cafe5635SKent Overstreet out->last_mount = cpu_to_le32(sb->last_mount);
309cafe5635SKent Overstreet out->first_bucket = cpu_to_le16(sb->first_bucket);
310cafe5635SKent Overstreet out->keys = cpu_to_le16(sb->keys);
311cafe5635SKent Overstreet
312cafe5635SKent Overstreet for (i = 0; i < sb->keys; i++)
313cafe5635SKent Overstreet out->d[i] = cpu_to_le64(sb->d[i]);
314cafe5635SKent Overstreet
315d721a43fSColy Li if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
316d721a43fSColy Li out->feature_compat = cpu_to_le64(sb->feature_compat);
317d721a43fSColy Li out->feature_incompat = cpu_to_le64(sb->feature_incompat);
318d721a43fSColy Li out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat);
319d721a43fSColy Li }
320d721a43fSColy Li
321d721a43fSColy Li out->version = cpu_to_le64(sb->version);
322cafe5635SKent Overstreet out->csum = csum_set(out);
323cafe5635SKent Overstreet
32446f5aa88SJoe Perches pr_debug("ver %llu, flags %llu, seq %llu\n",
325cafe5635SKent Overstreet sb->version, sb->flags, sb->seq);
326cafe5635SKent Overstreet
3274e49ea4aSMike Christie submit_bio(bio);
328cafe5635SKent Overstreet }
329cafe5635SKent Overstreet
bch_write_bdev_super_unlock(struct closure * cl)330cb7a583eSKent Overstreet static void bch_write_bdev_super_unlock(struct closure *cl)
331cb7a583eSKent Overstreet {
332cb7a583eSKent Overstreet struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
333cb7a583eSKent Overstreet
334cb7a583eSKent Overstreet up(&dc->sb_write_mutex);
335cb7a583eSKent Overstreet }
336cb7a583eSKent Overstreet
bch_write_bdev_super(struct cached_dev * dc,struct closure * parent)337cafe5635SKent Overstreet void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
338cafe5635SKent Overstreet {
339cb7a583eSKent Overstreet struct closure *cl = &dc->sb_write;
340cafe5635SKent Overstreet struct bio *bio = &dc->sb_bio;
341cafe5635SKent Overstreet
342cb7a583eSKent Overstreet down(&dc->sb_write_mutex);
343cb7a583eSKent Overstreet closure_init(cl, parent);
344cafe5635SKent Overstreet
34549add496SChristoph Hellwig bio_init(bio, dc->bdev, dc->sb_bv, 1, 0);
346cafe5635SKent Overstreet bio->bi_end_io = write_bdev_super_endio;
347cafe5635SKent Overstreet bio->bi_private = dc;
348cafe5635SKent Overstreet
349cafe5635SKent Overstreet closure_get(cl);
35027a40ab9SColy Li /* I/O request sent to backing device */
351475389aeSChristoph Hellwig __write_super(&dc->sb, dc->sb_disk, bio);
352cafe5635SKent Overstreet
353cb7a583eSKent Overstreet closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
354cafe5635SKent Overstreet }
355cafe5635SKent Overstreet
write_super_endio(struct bio * bio)3564246a0b6SChristoph Hellwig static void write_super_endio(struct bio *bio)
357cafe5635SKent Overstreet {
358cafe5635SKent Overstreet struct cache *ca = bio->bi_private;
359cafe5635SKent Overstreet
3605138ac67SColy Li /* is_read = 0 */
3615138ac67SColy Li bch_count_io_errors(ca, bio->bi_status, 0,
3625138ac67SColy Li "writing superblock");
363cb7a583eSKent Overstreet closure_put(&ca->set->sb_write);
364cb7a583eSKent Overstreet }
365cb7a583eSKent Overstreet
bcache_write_super_unlock(struct closure * cl)366cb7a583eSKent Overstreet static void bcache_write_super_unlock(struct closure *cl)
367cb7a583eSKent Overstreet {
368cb7a583eSKent Overstreet struct cache_set *c = container_of(cl, struct cache_set, sb_write);
369cb7a583eSKent Overstreet
370cb7a583eSKent Overstreet up(&c->sb_write_mutex);
371cafe5635SKent Overstreet }
372cafe5635SKent Overstreet
bcache_write_super(struct cache_set * c)373cafe5635SKent Overstreet void bcache_write_super(struct cache_set *c)
374cafe5635SKent Overstreet {
375cb7a583eSKent Overstreet struct closure *cl = &c->sb_write;
37608fdb2cdSColy Li struct cache *ca = c->cache;
37708fdb2cdSColy Li struct bio *bio = &ca->sb_bio;
37808fdb2cdSColy Li unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
379cafe5635SKent Overstreet
380cb7a583eSKent Overstreet down(&c->sb_write_mutex);
381cb7a583eSKent Overstreet closure_init(cl, &c->cl);
382cafe5635SKent Overstreet
3834a784266SColy Li ca->sb.seq++;
384cafe5635SKent Overstreet
3854a784266SColy Li if (ca->sb.version < version)
386d721a43fSColy Li ca->sb.version = version;
387cafe5635SKent Overstreet
38849add496SChristoph Hellwig bio_init(bio, ca->bdev, ca->sb_bv, 1, 0);
389cafe5635SKent Overstreet bio->bi_end_io = write_super_endio;
390cafe5635SKent Overstreet bio->bi_private = ca;
391cafe5635SKent Overstreet
392cafe5635SKent Overstreet closure_get(cl);
393475389aeSChristoph Hellwig __write_super(&ca->sb, ca->sb_disk, bio);
394cafe5635SKent Overstreet
395cb7a583eSKent Overstreet closure_return_with_destructor(cl, bcache_write_super_unlock);
396cafe5635SKent Overstreet }
397cafe5635SKent Overstreet
398cafe5635SKent Overstreet /* UUID io */
399cafe5635SKent Overstreet
uuid_endio(struct bio * bio)4004246a0b6SChristoph Hellwig static void uuid_endio(struct bio *bio)
401cafe5635SKent Overstreet {
402cafe5635SKent Overstreet struct closure *cl = bio->bi_private;
403cb7a583eSKent Overstreet struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
404cafe5635SKent Overstreet
4054e4cbee9SChristoph Hellwig cache_set_err_on(bio->bi_status, c, "accessing uuids");
406cafe5635SKent Overstreet bch_bbio_free(bio, c);
407cafe5635SKent Overstreet closure_put(cl);
408cafe5635SKent Overstreet }
409cafe5635SKent Overstreet
uuid_io_unlock(struct closure * cl)410cb7a583eSKent Overstreet static void uuid_io_unlock(struct closure *cl)
411cb7a583eSKent Overstreet {
412cb7a583eSKent Overstreet struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
413cb7a583eSKent Overstreet
414cb7a583eSKent Overstreet up(&c->uuid_write_mutex);
415cb7a583eSKent Overstreet }
416cb7a583eSKent Overstreet
uuid_io(struct cache_set * c,blk_opf_t opf,struct bkey * k,struct closure * parent)4179a4fd6a2SBart Van Assche static void uuid_io(struct cache_set *c, blk_opf_t opf, struct bkey *k,
4189a4fd6a2SBart Van Assche struct closure *parent)
419cafe5635SKent Overstreet {
420cb7a583eSKent Overstreet struct closure *cl = &c->uuid_write;
421cafe5635SKent Overstreet struct uuid_entry *u;
4226f10f7d1SColy Li unsigned int i;
42385b1492eSKent Overstreet char buf[80];
424cafe5635SKent Overstreet
425cafe5635SKent Overstreet BUG_ON(!parent);
426cb7a583eSKent Overstreet down(&c->uuid_write_mutex);
427cb7a583eSKent Overstreet closure_init(cl, parent);
428cafe5635SKent Overstreet
429cafe5635SKent Overstreet for (i = 0; i < KEY_PTRS(k); i++) {
430cafe5635SKent Overstreet struct bio *bio = bch_bbio_alloc(c);
431cafe5635SKent Overstreet
4329a4fd6a2SBart Van Assche bio->bi_opf = opf | REQ_SYNC | REQ_META;
4334f024f37SKent Overstreet bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
434cafe5635SKent Overstreet
435cafe5635SKent Overstreet bio->bi_end_io = uuid_endio;
436cafe5635SKent Overstreet bio->bi_private = cl;
437169ef1cfSKent Overstreet bch_bio_map(bio, c->uuids);
438cafe5635SKent Overstreet
439cafe5635SKent Overstreet bch_submit_bbio(bio, c, k, i);
440cafe5635SKent Overstreet
4419a4fd6a2SBart Van Assche if ((opf & REQ_OP_MASK) != REQ_OP_WRITE)
442cafe5635SKent Overstreet break;
443cafe5635SKent Overstreet }
444cafe5635SKent Overstreet
445dc9d98d6SKent Overstreet bch_extent_to_text(buf, sizeof(buf), k);
4469a4fd6a2SBart Van Assche pr_debug("%s UUIDs at %s\n", (opf & REQ_OP_MASK) == REQ_OP_WRITE ?
4479a4fd6a2SBart Van Assche "wrote" : "read", buf);
448cafe5635SKent Overstreet
449cafe5635SKent Overstreet for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
450169ef1cfSKent Overstreet if (!bch_is_zero(u->uuid, 16))
45146f5aa88SJoe Perches pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n",
452cafe5635SKent Overstreet u - c->uuids, u->uuid, u->label,
453cafe5635SKent Overstreet u->first_reg, u->last_reg, u->invalidated);
454cafe5635SKent Overstreet
455cb7a583eSKent Overstreet closure_return_with_destructor(cl, uuid_io_unlock);
456cafe5635SKent Overstreet }
457cafe5635SKent Overstreet
uuid_read(struct cache_set * c,struct jset * j,struct closure * cl)458cafe5635SKent Overstreet static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
459cafe5635SKent Overstreet {
460cafe5635SKent Overstreet struct bkey *k = &j->uuid_bucket;
461cafe5635SKent Overstreet
46265d45231SKent Overstreet if (__bch_btree_ptr_invalid(c, k))
463cafe5635SKent Overstreet return "bad uuid pointer";
464cafe5635SKent Overstreet
465cafe5635SKent Overstreet bkey_copy(&c->uuid_bucket, k);
4669a4fd6a2SBart Van Assche uuid_io(c, REQ_OP_READ, k, cl);
467cafe5635SKent Overstreet
468cafe5635SKent Overstreet if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
469cafe5635SKent Overstreet struct uuid_entry_v0 *u0 = (void *) c->uuids;
470cafe5635SKent Overstreet struct uuid_entry *u1 = (void *) c->uuids;
471cafe5635SKent Overstreet int i;
472cafe5635SKent Overstreet
473cafe5635SKent Overstreet closure_sync(cl);
474cafe5635SKent Overstreet
475cafe5635SKent Overstreet /*
476cafe5635SKent Overstreet * Since the new uuid entry is bigger than the old, we have to
477cafe5635SKent Overstreet * convert starting at the highest memory address and work down
478cafe5635SKent Overstreet * in order to do it in place
479cafe5635SKent Overstreet */
480cafe5635SKent Overstreet
481cafe5635SKent Overstreet for (i = c->nr_uuids - 1;
482cafe5635SKent Overstreet i >= 0;
483cafe5635SKent Overstreet --i) {
484cafe5635SKent Overstreet memcpy(u1[i].uuid, u0[i].uuid, 16);
485cafe5635SKent Overstreet memcpy(u1[i].label, u0[i].label, 32);
486cafe5635SKent Overstreet
487cafe5635SKent Overstreet u1[i].first_reg = u0[i].first_reg;
488cafe5635SKent Overstreet u1[i].last_reg = u0[i].last_reg;
489cafe5635SKent Overstreet u1[i].invalidated = u0[i].invalidated;
490cafe5635SKent Overstreet
491cafe5635SKent Overstreet u1[i].flags = 0;
492cafe5635SKent Overstreet u1[i].sectors = 0;
493cafe5635SKent Overstreet }
494cafe5635SKent Overstreet }
495cafe5635SKent Overstreet
496cafe5635SKent Overstreet return NULL;
497cafe5635SKent Overstreet }
498cafe5635SKent Overstreet
__uuid_write(struct cache_set * c)499cafe5635SKent Overstreet static int __uuid_write(struct cache_set *c)
500cafe5635SKent Overstreet {
501cafe5635SKent Overstreet BKEY_PADDED(key) k;
502cafe5635SKent Overstreet struct closure cl;
5034a784266SColy Li struct cache *ca = c->cache;
50421e478ddSColy Li unsigned int size;
505cafe5635SKent Overstreet
5061fae7cf0SColy Li closure_init_stack(&cl);
507cafe5635SKent Overstreet lockdep_assert_held(&bch_register_lock);
508cafe5635SKent Overstreet
50917e4aed8SColy Li if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
510cafe5635SKent Overstreet return 1;
511cafe5635SKent Overstreet
5124a784266SColy Li size = meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
51321e478ddSColy Li SET_KEY_SIZE(&k.key, size);
5149a4fd6a2SBart Van Assche uuid_io(c, REQ_OP_WRITE, &k.key, &cl);
515cafe5635SKent Overstreet closure_sync(&cl);
516cafe5635SKent Overstreet
5177a55948dSShenghui Wang /* Only one bucket used for uuid write */
5187a55948dSShenghui Wang atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
5197a55948dSShenghui Wang
520cafe5635SKent Overstreet bkey_copy(&c->uuid_bucket, &k.key);
5213a3b6a4eSKent Overstreet bkey_put(c, &k.key);
522cafe5635SKent Overstreet return 0;
523cafe5635SKent Overstreet }
524cafe5635SKent Overstreet
bch_uuid_write(struct cache_set * c)525cafe5635SKent Overstreet int bch_uuid_write(struct cache_set *c)
526cafe5635SKent Overstreet {
527cafe5635SKent Overstreet int ret = __uuid_write(c);
528cafe5635SKent Overstreet
529cafe5635SKent Overstreet if (!ret)
530cafe5635SKent Overstreet bch_journal_meta(c, NULL);
531cafe5635SKent Overstreet
532cafe5635SKent Overstreet return ret;
533cafe5635SKent Overstreet }
534cafe5635SKent Overstreet
uuid_find(struct cache_set * c,const char * uuid)535cafe5635SKent Overstreet static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
536cafe5635SKent Overstreet {
537cafe5635SKent Overstreet struct uuid_entry *u;
538cafe5635SKent Overstreet
539cafe5635SKent Overstreet for (u = c->uuids;
540cafe5635SKent Overstreet u < c->uuids + c->nr_uuids; u++)
541cafe5635SKent Overstreet if (!memcmp(u->uuid, uuid, 16))
542cafe5635SKent Overstreet return u;
543cafe5635SKent Overstreet
544cafe5635SKent Overstreet return NULL;
545cafe5635SKent Overstreet }
546cafe5635SKent Overstreet
uuid_find_empty(struct cache_set * c)547cafe5635SKent Overstreet static struct uuid_entry *uuid_find_empty(struct cache_set *c)
548cafe5635SKent Overstreet {
549cafe5635SKent Overstreet static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
5501fae7cf0SColy Li
551cafe5635SKent Overstreet return uuid_find(c, zero_uuid);
552cafe5635SKent Overstreet }
553cafe5635SKent Overstreet
554cafe5635SKent Overstreet /*
555cafe5635SKent Overstreet * Bucket priorities/gens:
556cafe5635SKent Overstreet *
557cafe5635SKent Overstreet * For each bucket, we store on disk its
558cafe5635SKent Overstreet * 8 bit gen
559cafe5635SKent Overstreet * 16 bit priority
560cafe5635SKent Overstreet *
561cafe5635SKent Overstreet * See alloc.c for an explanation of the gen. The priority is used to implement
562cafe5635SKent Overstreet * lru (and in the future other) cache replacement policies; for most purposes
563cafe5635SKent Overstreet * it's just an opaque integer.
564cafe5635SKent Overstreet *
565cafe5635SKent Overstreet * The gens and the priorities don't have a whole lot to do with each other, and
566cafe5635SKent Overstreet * it's actually the gens that must be written out at specific times - it's no
567cafe5635SKent Overstreet * big deal if the priorities don't get written, if we lose them we just reuse
568cafe5635SKent Overstreet * buckets in suboptimal order.
569cafe5635SKent Overstreet *
570cafe5635SKent Overstreet * On disk they're stored in a packed array, and in as many buckets are required
571cafe5635SKent Overstreet * to fit them all. The buckets we use to store them form a list; the journal
572cafe5635SKent Overstreet * header points to the first bucket, the first bucket points to the second
573cafe5635SKent Overstreet * bucket, et cetera.
574cafe5635SKent Overstreet *
575cafe5635SKent Overstreet * This code is used by the allocation code; periodically (whenever it runs out
576cafe5635SKent Overstreet * of buckets to allocate from) the allocation code will invalidate some
577cafe5635SKent Overstreet * buckets, but it can't use those buckets until their new gens are safely on
578cafe5635SKent Overstreet * disk.
579cafe5635SKent Overstreet */
580cafe5635SKent Overstreet
prio_endio(struct bio * bio)5814246a0b6SChristoph Hellwig static void prio_endio(struct bio *bio)
582cafe5635SKent Overstreet {
583cafe5635SKent Overstreet struct cache *ca = bio->bi_private;
584cafe5635SKent Overstreet
5854e4cbee9SChristoph Hellwig cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
586cafe5635SKent Overstreet bch_bbio_free(bio, ca->set);
587cafe5635SKent Overstreet closure_put(&ca->prio);
588cafe5635SKent Overstreet }
589cafe5635SKent Overstreet
prio_io(struct cache * ca,uint64_t bucket,blk_opf_t opf)590552eee3bSBart Van Assche static void prio_io(struct cache *ca, uint64_t bucket, blk_opf_t opf)
591cafe5635SKent Overstreet {
592cafe5635SKent Overstreet struct closure *cl = &ca->prio;
593cafe5635SKent Overstreet struct bio *bio = bch_bbio_alloc(ca->set);
594cafe5635SKent Overstreet
595cafe5635SKent Overstreet closure_init_stack(cl);
596cafe5635SKent Overstreet
5974f024f37SKent Overstreet bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
59874d46992SChristoph Hellwig bio_set_dev(bio, ca->bdev);
599c954ac8dSColy Li bio->bi_iter.bi_size = meta_bucket_bytes(&ca->sb);
600cafe5635SKent Overstreet
601cafe5635SKent Overstreet bio->bi_end_io = prio_endio;
602cafe5635SKent Overstreet bio->bi_private = ca;
603552eee3bSBart Van Assche bio->bi_opf = opf | REQ_SYNC | REQ_META;
604169ef1cfSKent Overstreet bch_bio_map(bio, ca->disk_buckets);
605cafe5635SKent Overstreet
606771f393eSColy Li closure_bio_submit(ca->set, bio, &ca->prio);
607cafe5635SKent Overstreet closure_sync(cl);
608cafe5635SKent Overstreet }
609cafe5635SKent Overstreet
bch_prio_write(struct cache * ca,bool wait)61084c529aeSAndrea Righi int bch_prio_write(struct cache *ca, bool wait)
611cafe5635SKent Overstreet {
612cafe5635SKent Overstreet int i;
613cafe5635SKent Overstreet struct bucket *b;
614cafe5635SKent Overstreet struct closure cl;
615cafe5635SKent Overstreet
61646f5aa88SJoe Perches pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n",
61784c529aeSAndrea Righi fifo_used(&ca->free[RESERVE_PRIO]),
61884c529aeSAndrea Righi fifo_used(&ca->free[RESERVE_NONE]),
61984c529aeSAndrea Righi fifo_used(&ca->free_inc));
62084c529aeSAndrea Righi
62184c529aeSAndrea Righi /*
62284c529aeSAndrea Righi * Pre-check if there are enough free buckets. In the non-blocking
62384c529aeSAndrea Righi * scenario it's better to fail early rather than starting to allocate
62484c529aeSAndrea Righi * buckets and do a cleanup later in case of failure.
62584c529aeSAndrea Righi */
62684c529aeSAndrea Righi if (!wait) {
62784c529aeSAndrea Righi size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
62884c529aeSAndrea Righi fifo_used(&ca->free[RESERVE_NONE]);
62984c529aeSAndrea Righi if (prio_buckets(ca) > avail)
63084c529aeSAndrea Righi return -ENOMEM;
63184c529aeSAndrea Righi }
63284c529aeSAndrea Righi
633cafe5635SKent Overstreet closure_init_stack(&cl);
634cafe5635SKent Overstreet
635cafe5635SKent Overstreet lockdep_assert_held(&ca->set->bucket_lock);
636cafe5635SKent Overstreet
637cafe5635SKent Overstreet ca->disk_buckets->seq++;
638cafe5635SKent Overstreet
639cafe5635SKent Overstreet atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
640cafe5635SKent Overstreet &ca->meta_sectors_written);
641cafe5635SKent Overstreet
642cafe5635SKent Overstreet for (i = prio_buckets(ca) - 1; i >= 0; --i) {
643cafe5635SKent Overstreet long bucket;
644cafe5635SKent Overstreet struct prio_set *p = ca->disk_buckets;
645b1a67b0fSKent Overstreet struct bucket_disk *d = p->data;
646b1a67b0fSKent Overstreet struct bucket_disk *end = d + prios_per_bucket(ca);
647cafe5635SKent Overstreet
648cafe5635SKent Overstreet for (b = ca->buckets + i * prios_per_bucket(ca);
649cafe5635SKent Overstreet b < ca->buckets + ca->sb.nbuckets && d < end;
650cafe5635SKent Overstreet b++, d++) {
651cafe5635SKent Overstreet d->prio = cpu_to_le16(b->prio);
652cafe5635SKent Overstreet d->gen = b->gen;
653cafe5635SKent Overstreet }
654cafe5635SKent Overstreet
655cafe5635SKent Overstreet p->next_bucket = ca->prio_buckets[i + 1];
65681ab4190SKent Overstreet p->magic = pset_magic(&ca->sb);
657c954ac8dSColy Li p->csum = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8);
658cafe5635SKent Overstreet
65984c529aeSAndrea Righi bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
660cafe5635SKent Overstreet BUG_ON(bucket == -1);
661cafe5635SKent Overstreet
662cafe5635SKent Overstreet mutex_unlock(&ca->set->bucket_lock);
663552eee3bSBart Van Assche prio_io(ca, bucket, REQ_OP_WRITE);
664cafe5635SKent Overstreet mutex_lock(&ca->set->bucket_lock);
665cafe5635SKent Overstreet
666cafe5635SKent Overstreet ca->prio_buckets[i] = bucket;
667cafe5635SKent Overstreet atomic_dec_bug(&ca->buckets[bucket].pin);
668cafe5635SKent Overstreet }
669cafe5635SKent Overstreet
670cafe5635SKent Overstreet mutex_unlock(&ca->set->bucket_lock);
671cafe5635SKent Overstreet
672cafe5635SKent Overstreet bch_journal_meta(ca->set, &cl);
673cafe5635SKent Overstreet closure_sync(&cl);
674cafe5635SKent Overstreet
675cafe5635SKent Overstreet mutex_lock(&ca->set->bucket_lock);
676cafe5635SKent Overstreet
677cafe5635SKent Overstreet /*
678cafe5635SKent Overstreet * Don't want the old priorities to get garbage collected until after we
679cafe5635SKent Overstreet * finish writing the new ones, and they're journalled
680cafe5635SKent Overstreet */
6812531d9eeSKent Overstreet for (i = 0; i < prio_buckets(ca); i++) {
6822531d9eeSKent Overstreet if (ca->prio_last_buckets[i])
6832531d9eeSKent Overstreet __bch_bucket_free(ca,
6842531d9eeSKent Overstreet &ca->buckets[ca->prio_last_buckets[i]]);
6852531d9eeSKent Overstreet
686cafe5635SKent Overstreet ca->prio_last_buckets[i] = ca->prio_buckets[i];
687cafe5635SKent Overstreet }
68884c529aeSAndrea Righi return 0;
6892531d9eeSKent Overstreet }
690cafe5635SKent Overstreet
prio_read(struct cache * ca,uint64_t bucket)69149d08d59SColy Li static int prio_read(struct cache *ca, uint64_t bucket)
692cafe5635SKent Overstreet {
693cafe5635SKent Overstreet struct prio_set *p = ca->disk_buckets;
694cafe5635SKent Overstreet struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
695cafe5635SKent Overstreet struct bucket *b;
6966f10f7d1SColy Li unsigned int bucket_nr = 0;
69749d08d59SColy Li int ret = -EIO;
698cafe5635SKent Overstreet
699cafe5635SKent Overstreet for (b = ca->buckets;
700cafe5635SKent Overstreet b < ca->buckets + ca->sb.nbuckets;
701cafe5635SKent Overstreet b++, d++) {
702cafe5635SKent Overstreet if (d == end) {
703cafe5635SKent Overstreet ca->prio_buckets[bucket_nr] = bucket;
704cafe5635SKent Overstreet ca->prio_last_buckets[bucket_nr] = bucket;
705cafe5635SKent Overstreet bucket_nr++;
706cafe5635SKent Overstreet
707552eee3bSBart Van Assche prio_io(ca, bucket, REQ_OP_READ);
708cafe5635SKent Overstreet
709b0d30981SColy Li if (p->csum !=
710c954ac8dSColy Li bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
71146f5aa88SJoe Perches pr_warn("bad csum reading priorities\n");
71249d08d59SColy Li goto out;
71349d08d59SColy Li }
714cafe5635SKent Overstreet
71549d08d59SColy Li if (p->magic != pset_magic(&ca->sb)) {
71646f5aa88SJoe Perches pr_warn("bad magic reading priorities\n");
71749d08d59SColy Li goto out;
71849d08d59SColy Li }
719cafe5635SKent Overstreet
720cafe5635SKent Overstreet bucket = p->next_bucket;
721cafe5635SKent Overstreet d = p->data;
722cafe5635SKent Overstreet }
723cafe5635SKent Overstreet
724cafe5635SKent Overstreet b->prio = le16_to_cpu(d->prio);
7253a2fd9d5SKent Overstreet b->gen = b->last_gc = d->gen;
726cafe5635SKent Overstreet }
72749d08d59SColy Li
72849d08d59SColy Li ret = 0;
72949d08d59SColy Li out:
73049d08d59SColy Li return ret;
731cafe5635SKent Overstreet }
732cafe5635SKent Overstreet
733cafe5635SKent Overstreet /* Bcache device */
734cafe5635SKent Overstreet
open_dev(struct gendisk * disk,blk_mode_t mode)73505bdb996SChristoph Hellwig static int open_dev(struct gendisk *disk, blk_mode_t mode)
736cafe5635SKent Overstreet {
737d32e2bf8SChristoph Hellwig struct bcache_device *d = disk->private_data;
7381fae7cf0SColy Li
739c4d951ddSKent Overstreet if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
740cafe5635SKent Overstreet return -ENXIO;
741cafe5635SKent Overstreet
742cafe5635SKent Overstreet closure_get(&d->cl);
743cafe5635SKent Overstreet return 0;
744cafe5635SKent Overstreet }
745cafe5635SKent Overstreet
release_dev(struct gendisk * b)746ae220766SChristoph Hellwig static void release_dev(struct gendisk *b)
747cafe5635SKent Overstreet {
748cafe5635SKent Overstreet struct bcache_device *d = b->private_data;
7491fae7cf0SColy Li
750cafe5635SKent Overstreet closure_put(&d->cl);
751cafe5635SKent Overstreet }
752cafe5635SKent Overstreet
ioctl_dev(struct block_device * b,blk_mode_t mode,unsigned int cmd,unsigned long arg)75305bdb996SChristoph Hellwig static int ioctl_dev(struct block_device *b, blk_mode_t mode,
754cafe5635SKent Overstreet unsigned int cmd, unsigned long arg)
755cafe5635SKent Overstreet {
756cafe5635SKent Overstreet struct bcache_device *d = b->bd_disk->private_data;
7570f0709e6SColy Li
758cafe5635SKent Overstreet return d->ioctl(d, mode, cmd, arg);
759cafe5635SKent Overstreet }
760cafe5635SKent Overstreet
761c62b37d9SChristoph Hellwig static const struct block_device_operations bcache_cached_ops = {
762c62b37d9SChristoph Hellwig .submit_bio = cached_dev_submit_bio,
763c62b37d9SChristoph Hellwig .open = open_dev,
764c62b37d9SChristoph Hellwig .release = release_dev,
765c62b37d9SChristoph Hellwig .ioctl = ioctl_dev,
766c62b37d9SChristoph Hellwig .owner = THIS_MODULE,
767c62b37d9SChristoph Hellwig };
768c62b37d9SChristoph Hellwig
769c62b37d9SChristoph Hellwig static const struct block_device_operations bcache_flash_ops = {
770c62b37d9SChristoph Hellwig .submit_bio = flash_dev_submit_bio,
771cafe5635SKent Overstreet .open = open_dev,
772cafe5635SKent Overstreet .release = release_dev,
773cafe5635SKent Overstreet .ioctl = ioctl_dev,
774cafe5635SKent Overstreet .owner = THIS_MODULE,
775cafe5635SKent Overstreet };
776cafe5635SKent Overstreet
bcache_device_stop(struct bcache_device * d)777cafe5635SKent Overstreet void bcache_device_stop(struct bcache_device *d)
778cafe5635SKent Overstreet {
779c4d951ddSKent Overstreet if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
78063d63b51SColy Li /*
78163d63b51SColy Li * closure_fn set to
78263d63b51SColy Li * - cached device: cached_dev_flush()
78363d63b51SColy Li * - flash dev: flash_dev_flush()
78463d63b51SColy Li */
785cafe5635SKent Overstreet closure_queue(&d->cl);
786cafe5635SKent Overstreet }
787cafe5635SKent Overstreet
bcache_device_unlink(struct bcache_device * d)788ee668506SKent Overstreet static void bcache_device_unlink(struct bcache_device *d)
789ee668506SKent Overstreet {
790c4d951ddSKent Overstreet lockdep_assert_held(&bch_register_lock);
791c4d951ddSKent Overstreet
792c4d951ddSKent Overstreet if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
79308fdb2cdSColy Li struct cache *ca = d->c->cache;
794ee668506SKent Overstreet
795ee668506SKent Overstreet sysfs_remove_link(&d->c->kobj, d->name);
796ee668506SKent Overstreet sysfs_remove_link(&d->kobj, "cache");
797ee668506SKent Overstreet
798ee668506SKent Overstreet bd_unlink_disk_holder(ca->bdev, d->disk);
799ee668506SKent Overstreet }
800c4d951ddSKent Overstreet }
801ee668506SKent Overstreet
bcache_device_link(struct bcache_device * d,struct cache_set * c,const char * name)802ee668506SKent Overstreet static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
803ee668506SKent Overstreet const char *name)
804ee668506SKent Overstreet {
80508fdb2cdSColy Li struct cache *ca = c->cache;
8064b6efb4bSColy Li int ret;
807ee668506SKent Overstreet
808ee668506SKent Overstreet bd_link_disk_holder(ca->bdev, d->disk);
809ee668506SKent Overstreet
810ee668506SKent Overstreet snprintf(d->name, BCACHEDEVNAME_SIZE,
811ee668506SKent Overstreet "%s%u", name, d->id);
812ee668506SKent Overstreet
8134b6efb4bSColy Li ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
8144b6efb4bSColy Li if (ret < 0)
81546f5aa88SJoe Perches pr_err("Couldn't create device -> cache set symlink\n");
8164b6efb4bSColy Li
8174b6efb4bSColy Li ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
8184b6efb4bSColy Li if (ret < 0)
81946f5aa88SJoe Perches pr_err("Couldn't create cache set -> device symlink\n");
820fecaee6fSZheng Liu
821fecaee6fSZheng Liu clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
822ee668506SKent Overstreet }
823ee668506SKent Overstreet
bcache_device_detach(struct bcache_device * d)824cafe5635SKent Overstreet static void bcache_device_detach(struct bcache_device *d)
825cafe5635SKent Overstreet {
826cafe5635SKent Overstreet lockdep_assert_held(&bch_register_lock);
827cafe5635SKent Overstreet
828ea8c5356SColy Li atomic_dec(&d->c->attached_dev_nr);
829ea8c5356SColy Li
830c4d951ddSKent Overstreet if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
831cafe5635SKent Overstreet struct uuid_entry *u = d->c->uuids + d->id;
832cafe5635SKent Overstreet
833cafe5635SKent Overstreet SET_UUID_FLASH_ONLY(u, 0);
834cafe5635SKent Overstreet memcpy(u->uuid, invalid_uuid, 16);
83575cbb3f1SArnd Bergmann u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
836cafe5635SKent Overstreet bch_uuid_write(d->c);
837cafe5635SKent Overstreet }
838cafe5635SKent Overstreet
839ee668506SKent Overstreet bcache_device_unlink(d);
840ee668506SKent Overstreet
841cafe5635SKent Overstreet d->c->devices[d->id] = NULL;
842cafe5635SKent Overstreet closure_put(&d->c->caching);
843cafe5635SKent Overstreet d->c = NULL;
844cafe5635SKent Overstreet }
845cafe5635SKent Overstreet
bcache_device_attach(struct bcache_device * d,struct cache_set * c,unsigned int id)846cafe5635SKent Overstreet static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
8476f10f7d1SColy Li unsigned int id)
848cafe5635SKent Overstreet {
849cafe5635SKent Overstreet d->id = id;
850cafe5635SKent Overstreet d->c = c;
851cafe5635SKent Overstreet c->devices[id] = d;
852cafe5635SKent Overstreet
8532831231dSColy Li if (id >= c->devices_max_used)
8542831231dSColy Li c->devices_max_used = id + 1;
8552831231dSColy Li
856cafe5635SKent Overstreet closure_get(&c->caching);
857cafe5635SKent Overstreet }
858cafe5635SKent Overstreet
first_minor_to_idx(int first_minor)8591dbe32adSColy Li static inline int first_minor_to_idx(int first_minor)
8601dbe32adSColy Li {
8611dbe32adSColy Li return (first_minor/BCACHE_MINORS);
8621dbe32adSColy Li }
8631dbe32adSColy Li
idx_to_first_minor(int idx)8641dbe32adSColy Li static inline int idx_to_first_minor(int idx)
8651dbe32adSColy Li {
8661dbe32adSColy Li return (idx * BCACHE_MINORS);
8671dbe32adSColy Li }
8681dbe32adSColy Li
bcache_device_free(struct bcache_device * d)869cafe5635SKent Overstreet static void bcache_device_free(struct bcache_device *d)
870cafe5635SKent Overstreet {
8712d886951SColy Li struct gendisk *disk = d->disk;
8722d886951SColy Li
873cafe5635SKent Overstreet lockdep_assert_held(&bch_register_lock);
874cafe5635SKent Overstreet
8752d886951SColy Li if (disk)
87646f5aa88SJoe Perches pr_info("%s stopped\n", disk->disk_name);
8772d886951SColy Li else
87846f5aa88SJoe Perches pr_err("bcache device (NULL gendisk) stopped\n");
879cafe5635SKent Overstreet
880cafe5635SKent Overstreet if (d->c)
881cafe5635SKent Overstreet bcache_device_detach(d);
8822d886951SColy Li
8832d886951SColy Li if (disk) {
8841dbe32adSColy Li ida_simple_remove(&bcache_device_idx,
8852d886951SColy Li first_minor_to_idx(disk->first_minor));
8868b9ab626SChristoph Hellwig put_disk(disk);
88728935ab5SKent Overstreet }
888cafe5635SKent Overstreet
889d19936a2SKent Overstreet bioset_exit(&d->bio_split);
890958b4338SPekka Enberg kvfree(d->full_dirty_stripes);
891958b4338SPekka Enberg kvfree(d->stripe_sectors_dirty);
892cafe5635SKent Overstreet
893cafe5635SKent Overstreet closure_debug_destroy(&d->cl);
894cafe5635SKent Overstreet }
895cafe5635SKent Overstreet
bcache_device_init(struct bcache_device * d,unsigned int block_size,sector_t sectors,struct block_device * cached_bdev,const struct block_device_operations * ops)8966f10f7d1SColy Li static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
897c62b37d9SChristoph Hellwig sector_t sectors, struct block_device *cached_bdev,
898c62b37d9SChristoph Hellwig const struct block_device_operations *ops)
899cafe5635SKent Overstreet {
900cafe5635SKent Overstreet struct request_queue *q;
9015f2b18ecSBart Van Assche const size_t max_stripes = min_t(size_t, INT_MAX,
9025f2b18ecSBart Van Assche SIZE_MAX / sizeof(atomic_t));
90365f0f017SColy Li uint64_t n;
9041dbe32adSColy Li int idx;
905279afbadSKent Overstreet
9062d679fc7SKent Overstreet if (!d->stripe_size)
9072d679fc7SKent Overstreet d->stripe_size = 1 << 31;
90809bdafb8SColy Li else if (d->stripe_size < BCH_MIN_STRIPE_SZ)
90909bdafb8SColy Li d->stripe_size = roundup(BCH_MIN_STRIPE_SZ, d->stripe_size);
910279afbadSKent Overstreet
91165f0f017SColy Li n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
91265f0f017SColy Li if (!n || n > max_stripes) {
91365f0f017SColy Li pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
91465f0f017SColy Li n);
915279afbadSKent Overstreet return -ENOMEM;
91648a915a8SKent Overstreet }
91765f0f017SColy Li d->nr_stripes = n;
918279afbadSKent Overstreet
919279afbadSKent Overstreet n = d->nr_stripes * sizeof(atomic_t);
920bc4e54f6SMichal Hocko d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
921279afbadSKent Overstreet if (!d->stripe_sectors_dirty)
922279afbadSKent Overstreet return -ENOMEM;
923cafe5635SKent Overstreet
92448a915a8SKent Overstreet n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
925bc4e54f6SMichal Hocko d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
92648a915a8SKent Overstreet if (!d->full_dirty_stripes)
927224b0683SChristoph Hellwig goto out_free_stripe_sectors_dirty;
92848a915a8SKent Overstreet
9291dbe32adSColy Li idx = ida_simple_get(&bcache_device_idx, 0,
9301dbe32adSColy Li BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
9311dbe32adSColy Li if (idx < 0)
932224b0683SChristoph Hellwig goto out_free_full_dirty_stripes;
933b8c0d911SEric Wheeler
934d19936a2SKent Overstreet if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
9359b4e9f5aSFlorian Schmaus BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
936224b0683SChristoph Hellwig goto out_ida_remove;
9379b4e9f5aSFlorian Schmaus
938bc70852fSChristoph Hellwig d->disk = blk_alloc_disk(NUMA_NO_NODE);
9399b4e9f5aSFlorian Schmaus if (!d->disk)
940224b0683SChristoph Hellwig goto out_bioset_exit;
941cafe5635SKent Overstreet
942279afbadSKent Overstreet set_capacity(d->disk, sectors);
9431dbe32adSColy Li snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
944cafe5635SKent Overstreet
945cafe5635SKent Overstreet d->disk->major = bcache_major;
9461dbe32adSColy Li d->disk->first_minor = idx_to_first_minor(idx);
947bc70852fSChristoph Hellwig d->disk->minors = BCACHE_MINORS;
948c62b37d9SChristoph Hellwig d->disk->fops = ops;
949cafe5635SKent Overstreet d->disk->private_data = d;
950cafe5635SKent Overstreet
951bc70852fSChristoph Hellwig q = d->disk->queue;
952cafe5635SKent Overstreet q->limits.max_hw_sectors = UINT_MAX;
953cafe5635SKent Overstreet q->limits.max_sectors = UINT_MAX;
954cafe5635SKent Overstreet q->limits.max_segment_size = UINT_MAX;
955a8affc03SChristoph Hellwig q->limits.max_segments = BIO_MAX_VECS;
9562bb4cd5cSJens Axboe blk_queue_max_discard_sectors(q, UINT_MAX);
95790db6919SKent Overstreet q->limits.discard_granularity = 512;
958cafe5635SKent Overstreet q->limits.io_min = block_size;
959cafe5635SKent Overstreet q->limits.logical_block_size = block_size;
960cafe5635SKent Overstreet q->limits.physical_block_size = block_size;
961dcacbc12SMauricio Faria de Oliveira
962dcacbc12SMauricio Faria de Oliveira if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
963dcacbc12SMauricio Faria de Oliveira /*
964dcacbc12SMauricio Faria de Oliveira * This should only happen with BCACHE_SB_VERSION_BDEV.
965dcacbc12SMauricio Faria de Oliveira * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
966dcacbc12SMauricio Faria de Oliveira */
9674b25bbf5SColy Li pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
968dcacbc12SMauricio Faria de Oliveira d->disk->disk_name, q->limits.logical_block_size,
969dcacbc12SMauricio Faria de Oliveira PAGE_SIZE, bdev_logical_block_size(cached_bdev));
970dcacbc12SMauricio Faria de Oliveira
971dcacbc12SMauricio Faria de Oliveira /* This also adjusts physical block size/min io size if needed */
972dcacbc12SMauricio Faria de Oliveira blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
973dcacbc12SMauricio Faria de Oliveira }
974dcacbc12SMauricio Faria de Oliveira
97544e1ebe2SBart Van Assche blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
976cafe5635SKent Overstreet
97784b4ff9eSJens Axboe blk_queue_write_cache(q, true, true);
97854d12f2bSKent Overstreet
979cafe5635SKent Overstreet return 0;
9809b4e9f5aSFlorian Schmaus
981224b0683SChristoph Hellwig out_bioset_exit:
982224b0683SChristoph Hellwig bioset_exit(&d->bio_split);
983224b0683SChristoph Hellwig out_ida_remove:
9849b4e9f5aSFlorian Schmaus ida_simple_remove(&bcache_device_idx, idx);
985224b0683SChristoph Hellwig out_free_full_dirty_stripes:
986224b0683SChristoph Hellwig kvfree(d->full_dirty_stripes);
987224b0683SChristoph Hellwig out_free_stripe_sectors_dirty:
988224b0683SChristoph Hellwig kvfree(d->stripe_sectors_dirty);
9899b4e9f5aSFlorian Schmaus return -ENOMEM;
9909b4e9f5aSFlorian Schmaus
991cafe5635SKent Overstreet }
992cafe5635SKent Overstreet
993cafe5635SKent Overstreet /* Cached device */
994cafe5635SKent Overstreet
calc_cached_dev_sectors(struct cache_set * c)995cafe5635SKent Overstreet static void calc_cached_dev_sectors(struct cache_set *c)
996cafe5635SKent Overstreet {
997cafe5635SKent Overstreet uint64_t sectors = 0;
998cafe5635SKent Overstreet struct cached_dev *dc;
999cafe5635SKent Overstreet
1000cafe5635SKent Overstreet list_for_each_entry(dc, &c->cached_devs, list)
1001cda25b82SChristoph Hellwig sectors += bdev_nr_sectors(dc->bdev);
1002cafe5635SKent Overstreet
1003cafe5635SKent Overstreet c->cached_dev_sectors = sectors;
1004cafe5635SKent Overstreet }
1005cafe5635SKent Overstreet
10060f0709e6SColy Li #define BACKING_DEV_OFFLINE_TIMEOUT 5
cached_dev_status_update(void * arg)10070f0709e6SColy Li static int cached_dev_status_update(void *arg)
10080f0709e6SColy Li {
10090f0709e6SColy Li struct cached_dev *dc = arg;
10100f0709e6SColy Li struct request_queue *q;
10110f0709e6SColy Li
10120f0709e6SColy Li /*
10130f0709e6SColy Li * If this delayed worker is stopping outside, directly quit here.
10140f0709e6SColy Li * dc->io_disable might be set via sysfs interface, so check it
10150f0709e6SColy Li * here too.
10160f0709e6SColy Li */
10170f0709e6SColy Li while (!kthread_should_stop() && !dc->io_disable) {
10180f0709e6SColy Li q = bdev_get_queue(dc->bdev);
10190f0709e6SColy Li if (blk_queue_dying(q))
10200f0709e6SColy Li dc->offline_seconds++;
10210f0709e6SColy Li else
10220f0709e6SColy Li dc->offline_seconds = 0;
10230f0709e6SColy Li
10240f0709e6SColy Li if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
10250f5cd781SChristoph Hellwig pr_err("%pg: device offline for %d seconds\n",
10260f5cd781SChristoph Hellwig dc->bdev,
10270f0709e6SColy Li BACKING_DEV_OFFLINE_TIMEOUT);
102846f5aa88SJoe Perches pr_err("%s: disable I/O request due to backing device offline\n",
102946f5aa88SJoe Perches dc->disk.name);
10300f0709e6SColy Li dc->io_disable = true;
10310f0709e6SColy Li /* let others know earlier that io_disable is true */
10320f0709e6SColy Li smp_mb();
10330f0709e6SColy Li bcache_device_stop(&dc->disk);
10340f0709e6SColy Li break;
10350f0709e6SColy Li }
10360f0709e6SColy Li schedule_timeout_interruptible(HZ);
10370f0709e6SColy Li }
10380f0709e6SColy Li
10390f0709e6SColy Li wait_for_kthread_stop();
10400f0709e6SColy Li return 0;
10410f0709e6SColy Li }
10420f0709e6SColy Li
10430f0709e6SColy Li
bch_cached_dev_run(struct cached_dev * dc)10440b13efecSColy Li int bch_cached_dev_run(struct cached_dev *dc)
1045cafe5635SKent Overstreet {
104613e1db65SZhiqiang Liu int ret = 0;
1047cafe5635SKent Overstreet struct bcache_device *d = &dc->disk;
1048792732d9SGeliang Tang char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL);
1049a25c32beSGabriel de Perthuis char *env[] = {
1050a25c32beSGabriel de Perthuis "DRIVER=bcache",
1051a25c32beSGabriel de Perthuis kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
1052792732d9SGeliang Tang kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""),
1053ab9e1400SGabriel de Perthuis NULL,
1054a25c32beSGabriel de Perthuis };
1055cafe5635SKent Overstreet
1056e0faa3d7SColy Li if (dc->io_disable) {
10570f5cd781SChristoph Hellwig pr_err("I/O disabled on cached dev %pg\n", dc->bdev);
105813e1db65SZhiqiang Liu ret = -EIO;
105913e1db65SZhiqiang Liu goto out;
1060e0faa3d7SColy Li }
10610b13efecSColy Li
10624d4d8573SAl Viro if (atomic_xchg(&dc->running, 1)) {
10630f5cd781SChristoph Hellwig pr_info("cached dev %pg is running already\n", dc->bdev);
106413e1db65SZhiqiang Liu ret = -EBUSY;
106513e1db65SZhiqiang Liu goto out;
10664d4d8573SAl Viro }
1067cafe5635SKent Overstreet
1068cafe5635SKent Overstreet if (!d->c &&
1069cafe5635SKent Overstreet BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
1070cafe5635SKent Overstreet struct closure cl;
10711fae7cf0SColy Li
1072cafe5635SKent Overstreet closure_init_stack(&cl);
1073cafe5635SKent Overstreet
1074cafe5635SKent Overstreet SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
1075cafe5635SKent Overstreet bch_write_bdev_super(dc, &cl);
1076cafe5635SKent Overstreet closure_sync(&cl);
1077cafe5635SKent Overstreet }
1078cafe5635SKent Overstreet
10792961c3bbSLuis Chamberlain ret = add_disk(d->disk);
10802961c3bbSLuis Chamberlain if (ret)
10812961c3bbSLuis Chamberlain goto out;
1082ee668506SKent Overstreet bd_link_disk_holder(dc->bdev, dc->disk.disk);
10833be11dbaSColy Li /*
10843be11dbaSColy Li * won't show up in the uevent file, use udevadm monitor -e instead
10853be11dbaSColy Li * only class / kset properties are persistent
10863be11dbaSColy Li */
1087cafe5635SKent Overstreet kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
1088a25c32beSGabriel de Perthuis
1089cafe5635SKent Overstreet if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
10900b13efecSColy Li sysfs_create_link(&disk_to_dev(d->disk)->kobj,
10910b13efecSColy Li &d->kobj, "bcache")) {
109246f5aa88SJoe Perches pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n");
109313e1db65SZhiqiang Liu ret = -ENOMEM;
109413e1db65SZhiqiang Liu goto out;
10950b13efecSColy Li }
10960f0709e6SColy Li
10970f0709e6SColy Li dc->status_update_thread = kthread_run(cached_dev_status_update,
10980f0709e6SColy Li dc, "bcache_status_update");
10990f0709e6SColy Li if (IS_ERR(dc->status_update_thread)) {
110046f5aa88SJoe Perches pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n");
11010f0709e6SColy Li }
11020b13efecSColy Li
110313e1db65SZhiqiang Liu out:
110413e1db65SZhiqiang Liu kfree(env[1]);
110513e1db65SZhiqiang Liu kfree(env[2]);
110613e1db65SZhiqiang Liu kfree(buf);
110713e1db65SZhiqiang Liu return ret;
1108cafe5635SKent Overstreet }
1109cafe5635SKent Overstreet
11103fd47bfeSColy Li /*
11113fd47bfeSColy Li * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
11123fd47bfeSColy Li * work dc->writeback_rate_update is running. Wait until the routine
11133fd47bfeSColy Li * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
11143fd47bfeSColy Li * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
11153fd47bfeSColy Li * seconds, give up waiting here and continue to cancel it too.
11163fd47bfeSColy Li */
cancel_writeback_rate_update_dwork(struct cached_dev * dc)11173fd47bfeSColy Li static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
11183fd47bfeSColy Li {
11193fd47bfeSColy Li int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
11203fd47bfeSColy Li
11213fd47bfeSColy Li do {
11223fd47bfeSColy Li if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
11233fd47bfeSColy Li &dc->disk.flags))
11243fd47bfeSColy Li break;
11253fd47bfeSColy Li time_out--;
11263fd47bfeSColy Li schedule_timeout_interruptible(1);
11273fd47bfeSColy Li } while (time_out > 0);
11283fd47bfeSColy Li
11293fd47bfeSColy Li if (time_out == 0)
113046f5aa88SJoe Perches pr_warn("give up waiting for dc->writeback_write_update to quit\n");
11313fd47bfeSColy Li
11323fd47bfeSColy Li cancel_delayed_work_sync(&dc->writeback_rate_update);
11333fd47bfeSColy Li }
11343fd47bfeSColy Li
cached_dev_detach_finish(struct work_struct * w)1135cafe5635SKent Overstreet static void cached_dev_detach_finish(struct work_struct *w)
1136cafe5635SKent Overstreet {
1137cafe5635SKent Overstreet struct cached_dev *dc = container_of(w, struct cached_dev, detach);
1138aa97f6cdSLin Feng struct cache_set *c = dc->disk.c;
1139cafe5635SKent Overstreet
1140c4d951ddSKent Overstreet BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
11413b304d24SElena Reshetova BUG_ON(refcount_read(&dc->count));
1142cafe5635SKent Overstreet
1143cafe5635SKent Overstreet
11443fd47bfeSColy Li if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
11453fd47bfeSColy Li cancel_writeback_rate_update_dwork(dc);
11463fd47bfeSColy Li
11478d29c442STang Junhui if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
11488d29c442STang Junhui kthread_stop(dc->writeback_thread);
11498d29c442STang Junhui dc->writeback_thread = NULL;
11508d29c442STang Junhui }
11518d29c442STang Junhui
115297ba3b81SColy Li mutex_lock(&bch_register_lock);
115397ba3b81SColy Li
1154cafe5635SKent Overstreet bcache_device_detach(&dc->disk);
1155cafe5635SKent Overstreet list_move(&dc->list, &uncached_devices);
1156aa97f6cdSLin Feng calc_cached_dev_sectors(c);
1157cafe5635SKent Overstreet
1158c4d951ddSKent Overstreet clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
11595b1016e6SKent Overstreet clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
1160c4d951ddSKent Overstreet
1161cafe5635SKent Overstreet mutex_unlock(&bch_register_lock);
1162cafe5635SKent Overstreet
11630f5cd781SChristoph Hellwig pr_info("Caching disabled for %pg\n", dc->bdev);
1164cafe5635SKent Overstreet
1165cafe5635SKent Overstreet /* Drop ref we took in cached_dev_detach() */
1166cafe5635SKent Overstreet closure_put(&dc->disk.cl);
1167cafe5635SKent Overstreet }
1168cafe5635SKent Overstreet
bch_cached_dev_detach(struct cached_dev * dc)1169cafe5635SKent Overstreet void bch_cached_dev_detach(struct cached_dev *dc)
1170cafe5635SKent Overstreet {
1171cafe5635SKent Overstreet lockdep_assert_held(&bch_register_lock);
1172cafe5635SKent Overstreet
1173c4d951ddSKent Overstreet if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1174cafe5635SKent Overstreet return;
1175cafe5635SKent Overstreet
1176c4d951ddSKent Overstreet if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
1177cafe5635SKent Overstreet return;
1178cafe5635SKent Overstreet
1179cafe5635SKent Overstreet /*
1180cafe5635SKent Overstreet * Block the device from being closed and freed until we're finished
1181cafe5635SKent Overstreet * detaching
1182cafe5635SKent Overstreet */
1183cafe5635SKent Overstreet closure_get(&dc->disk.cl);
1184cafe5635SKent Overstreet
1185cafe5635SKent Overstreet bch_writeback_queue(dc);
11863fd47bfeSColy Li
1187cafe5635SKent Overstreet cached_dev_put(dc);
1188cafe5635SKent Overstreet }
1189cafe5635SKent Overstreet
bch_cached_dev_attach(struct cached_dev * dc,struct cache_set * c,uint8_t * set_uuid)119073ac105bSTang Junhui int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
119173ac105bSTang Junhui uint8_t *set_uuid)
1192cafe5635SKent Overstreet {
119375cbb3f1SArnd Bergmann uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
1194cafe5635SKent Overstreet struct uuid_entry *u;
119586755b7aSMichael Lyle struct cached_dev *exist_dc, *t;
11960b13efecSColy Li int ret = 0;
1197cafe5635SKent Overstreet
11981132e56eSColy Li if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
11991132e56eSColy Li (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
1200cafe5635SKent Overstreet return -ENOENT;
1201cafe5635SKent Overstreet
1202cafe5635SKent Overstreet if (dc->disk.c) {
12030f5cd781SChristoph Hellwig pr_err("Can't attach %pg: already attached\n", dc->bdev);
1204cafe5635SKent Overstreet return -EINVAL;
1205cafe5635SKent Overstreet }
1206cafe5635SKent Overstreet
1207cafe5635SKent Overstreet if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
12080f5cd781SChristoph Hellwig pr_err("Can't attach %pg: shutting down\n", dc->bdev);
1209cafe5635SKent Overstreet return -EINVAL;
1210cafe5635SKent Overstreet }
1211cafe5635SKent Overstreet
12124a784266SColy Li if (dc->sb.block_size < c->cache->sb.block_size) {
1213cafe5635SKent Overstreet /* Will die */
12140f5cd781SChristoph Hellwig pr_err("Couldn't attach %pg: block size less than set's block size\n",
12150f5cd781SChristoph Hellwig dc->bdev);
1216cafe5635SKent Overstreet return -EINVAL;
1217cafe5635SKent Overstreet }
1218cafe5635SKent Overstreet
121986755b7aSMichael Lyle /* Check whether already attached */
122086755b7aSMichael Lyle list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
122186755b7aSMichael Lyle if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
12220f5cd781SChristoph Hellwig pr_err("Tried to attach %pg but duplicate UUID already attached\n",
12230f5cd781SChristoph Hellwig dc->bdev);
122486755b7aSMichael Lyle
122586755b7aSMichael Lyle return -EINVAL;
122686755b7aSMichael Lyle }
122786755b7aSMichael Lyle }
122886755b7aSMichael Lyle
1229cafe5635SKent Overstreet u = uuid_find(c, dc->sb.uuid);
1230cafe5635SKent Overstreet
1231cafe5635SKent Overstreet if (u &&
1232cafe5635SKent Overstreet (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
1233cafe5635SKent Overstreet BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
1234cafe5635SKent Overstreet memcpy(u->uuid, invalid_uuid, 16);
123575cbb3f1SArnd Bergmann u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
1236cafe5635SKent Overstreet u = NULL;
1237cafe5635SKent Overstreet }
1238cafe5635SKent Overstreet
1239cafe5635SKent Overstreet if (!u) {
1240cafe5635SKent Overstreet if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
12410f5cd781SChristoph Hellwig pr_err("Couldn't find uuid for %pg in set\n", dc->bdev);
1242cafe5635SKent Overstreet return -ENOENT;
1243cafe5635SKent Overstreet }
1244cafe5635SKent Overstreet
1245cafe5635SKent Overstreet u = uuid_find_empty(c);
1246cafe5635SKent Overstreet if (!u) {
12470f5cd781SChristoph Hellwig pr_err("Not caching %pg, no room for UUID\n", dc->bdev);
1248cafe5635SKent Overstreet return -EINVAL;
1249cafe5635SKent Overstreet }
1250cafe5635SKent Overstreet }
1251cafe5635SKent Overstreet
12523be11dbaSColy Li /*
12533be11dbaSColy Li * Deadlocks since we're called via sysfs...
12543be11dbaSColy Li * sysfs_remove_file(&dc->kobj, &sysfs_attach);
1255cafe5635SKent Overstreet */
1256cafe5635SKent Overstreet
1257169ef1cfSKent Overstreet if (bch_is_zero(u->uuid, 16)) {
1258cafe5635SKent Overstreet struct closure cl;
12591fae7cf0SColy Li
1260cafe5635SKent Overstreet closure_init_stack(&cl);
1261cafe5635SKent Overstreet
1262cafe5635SKent Overstreet memcpy(u->uuid, dc->sb.uuid, 16);
1263cafe5635SKent Overstreet memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
1264cafe5635SKent Overstreet u->first_reg = u->last_reg = rtime;
1265cafe5635SKent Overstreet bch_uuid_write(c);
1266cafe5635SKent Overstreet
12671132e56eSColy Li memcpy(dc->sb.set_uuid, c->set_uuid, 16);
1268cafe5635SKent Overstreet SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1269cafe5635SKent Overstreet
1270cafe5635SKent Overstreet bch_write_bdev_super(dc, &cl);
1271cafe5635SKent Overstreet closure_sync(&cl);
1272cafe5635SKent Overstreet } else {
1273cafe5635SKent Overstreet u->last_reg = rtime;
1274cafe5635SKent Overstreet bch_uuid_write(c);
1275cafe5635SKent Overstreet }
1276cafe5635SKent Overstreet
1277cafe5635SKent Overstreet bcache_device_attach(&dc->disk, c, u - c->uuids);
1278cafe5635SKent Overstreet list_move(&dc->list, &c->cached_devs);
1279cafe5635SKent Overstreet calc_cached_dev_sectors(c);
1280cafe5635SKent Overstreet
1281cafe5635SKent Overstreet /*
1282cafe5635SKent Overstreet * dc->c must be set before dc->count != 0 - paired with the mb in
1283cafe5635SKent Overstreet * cached_dev_get()
1284cafe5635SKent Overstreet */
1285eb2b3d03SColy Li smp_wmb();
12863b304d24SElena Reshetova refcount_set(&dc->count, 1);
1287cafe5635SKent Overstreet
128807cc6ef8SEric Wheeler /* Block writeback thread, but spawn it */
128907cc6ef8SEric Wheeler down_write(&dc->writeback_lock);
129007cc6ef8SEric Wheeler if (bch_cached_dev_writeback_start(dc)) {
129107cc6ef8SEric Wheeler up_write(&dc->writeback_lock);
129246f5aa88SJoe Perches pr_err("Couldn't start writeback facilities for %s\n",
1293633bb2ceSColy Li dc->disk.disk->disk_name);
12949e5c3535SSlava Pestov return -ENOMEM;
129507cc6ef8SEric Wheeler }
12969e5c3535SSlava Pestov
1297cafe5635SKent Overstreet if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1298cafe5635SKent Overstreet atomic_set(&dc->has_dirty, 1);
1299cafe5635SKent Overstreet bch_writeback_queue(dc);
1300cafe5635SKent Overstreet }
1301cafe5635SKent Overstreet
13022e17a262STang Junhui bch_sectors_dirty_init(&dc->disk);
13032e17a262STang Junhui
13040b13efecSColy Li ret = bch_cached_dev_run(dc);
13050b13efecSColy Li if (ret && (ret != -EBUSY)) {
13060b13efecSColy Li up_write(&dc->writeback_lock);
13075c2a634cSColy Li /*
13085c2a634cSColy Li * bch_register_lock is held, bcache_device_stop() is not
13095c2a634cSColy Li * able to be directly called. The kthread and kworker
13105c2a634cSColy Li * created previously in bch_cached_dev_writeback_start()
13115c2a634cSColy Li * have to be stopped manually here.
13125c2a634cSColy Li */
13135c2a634cSColy Li kthread_stop(dc->writeback_thread);
13145c2a634cSColy Li cancel_writeback_rate_update_dwork(dc);
13150f5cd781SChristoph Hellwig pr_err("Couldn't run cached device %pg\n", dc->bdev);
13160b13efecSColy Li return ret;
13170b13efecSColy Li }
13180b13efecSColy Li
1319ee668506SKent Overstreet bcache_device_link(&dc->disk, c, "bdev");
1320ea8c5356SColy Li atomic_inc(&c->attached_dev_nr);
1321cafe5635SKent Overstreet
13225342fd42SColy Li if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
13235342fd42SColy Li pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
13245342fd42SColy Li pr_err("Please update to the latest bcache-tools to create the cache device\n");
13255342fd42SColy Li set_disk_ro(dc->disk.disk, 1);
13265342fd42SColy Li }
13275342fd42SColy Li
132807cc6ef8SEric Wheeler /* Allow the writeback thread to proceed */
132907cc6ef8SEric Wheeler up_write(&dc->writeback_lock);
133007cc6ef8SEric Wheeler
13310f5cd781SChristoph Hellwig pr_info("Caching %pg as %s on set %pU\n",
13320f5cd781SChristoph Hellwig dc->bdev,
13336e916a7eSColy Li dc->disk.disk->disk_name,
13341132e56eSColy Li dc->disk.c->set_uuid);
1335cafe5635SKent Overstreet return 0;
1336cafe5635SKent Overstreet }
1337cafe5635SKent Overstreet
13382d17456eSColy Li /* when dc->disk.kobj released */
bch_cached_dev_release(struct kobject * kobj)1339cafe5635SKent Overstreet void bch_cached_dev_release(struct kobject *kobj)
1340cafe5635SKent Overstreet {
1341cafe5635SKent Overstreet struct cached_dev *dc = container_of(kobj, struct cached_dev,
1342cafe5635SKent Overstreet disk.kobj);
1343cafe5635SKent Overstreet kfree(dc);
1344cafe5635SKent Overstreet module_put(THIS_MODULE);
1345cafe5635SKent Overstreet }
1346cafe5635SKent Overstreet
cached_dev_free(struct closure * cl)1347cafe5635SKent Overstreet static void cached_dev_free(struct closure *cl)
1348cafe5635SKent Overstreet {
1349cafe5635SKent Overstreet struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1350cafe5635SKent Overstreet
13513fd47bfeSColy Li if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
13523fd47bfeSColy Li cancel_writeback_rate_update_dwork(dc);
13533fd47bfeSColy Li
1354a664d0f0SSlava Pestov if (!IS_ERR_OR_NULL(dc->writeback_thread))
13555e6926daSKent Overstreet kthread_stop(dc->writeback_thread);
13560f0709e6SColy Li if (!IS_ERR_OR_NULL(dc->status_update_thread))
13570f0709e6SColy Li kthread_stop(dc->status_update_thread);
1358cafe5635SKent Overstreet
135980265d8dSColy Li mutex_lock(&bch_register_lock);
136080265d8dSColy Li
1361b75f4aedSChristoph Hellwig if (atomic_read(&dc->running)) {
1362ee668506SKent Overstreet bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1363b75f4aedSChristoph Hellwig del_gendisk(dc->disk.disk);
1364b75f4aedSChristoph Hellwig }
1365cafe5635SKent Overstreet bcache_device_free(&dc->disk);
1366cafe5635SKent Overstreet list_del(&dc->list);
1367cafe5635SKent Overstreet
1368cafe5635SKent Overstreet mutex_unlock(&bch_register_lock);
1369cafe5635SKent Overstreet
1370475389aeSChristoph Hellwig if (dc->sb_disk)
1371475389aeSChristoph Hellwig put_page(virt_to_page(dc->sb_disk));
1372e8547d42SLiang Chen
13730781c874SKent Overstreet if (!IS_ERR_OR_NULL(dc->bdev))
13742c555598SJan Kara blkdev_put(dc->bdev, dc);
1375cafe5635SKent Overstreet
1376cafe5635SKent Overstreet wake_up(&unregister_wait);
1377cafe5635SKent Overstreet
1378cafe5635SKent Overstreet kobject_put(&dc->disk.kobj);
1379cafe5635SKent Overstreet }
1380cafe5635SKent Overstreet
cached_dev_flush(struct closure * cl)1381cafe5635SKent Overstreet static void cached_dev_flush(struct closure *cl)
1382cafe5635SKent Overstreet {
1383cafe5635SKent Overstreet struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1384cafe5635SKent Overstreet struct bcache_device *d = &dc->disk;
1385cafe5635SKent Overstreet
1386c9502ea4SKent Overstreet mutex_lock(&bch_register_lock);
1387c9502ea4SKent Overstreet bcache_device_unlink(d);
1388c9502ea4SKent Overstreet mutex_unlock(&bch_register_lock);
1389c9502ea4SKent Overstreet
1390cafe5635SKent Overstreet bch_cache_accounting_destroy(&dc->accounting);
1391cafe5635SKent Overstreet kobject_del(&d->kobj);
1392cafe5635SKent Overstreet
1393cafe5635SKent Overstreet continue_at(cl, cached_dev_free, system_wq);
1394cafe5635SKent Overstreet }
1395cafe5635SKent Overstreet
cached_dev_init(struct cached_dev * dc,unsigned int block_size)13966f10f7d1SColy Li static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
1397cafe5635SKent Overstreet {
1398f59fce84SKent Overstreet int ret;
1399cafe5635SKent Overstreet struct io *io;
1400f59fce84SKent Overstreet struct request_queue *q = bdev_get_queue(dc->bdev);
1401cafe5635SKent Overstreet
1402cafe5635SKent Overstreet __module_get(THIS_MODULE);
1403cafe5635SKent Overstreet INIT_LIST_HEAD(&dc->list);
1404f59fce84SKent Overstreet closure_init(&dc->disk.cl, NULL);
1405f59fce84SKent Overstreet set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1406cafe5635SKent Overstreet kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1407cafe5635SKent Overstreet INIT_WORK(&dc->detach, cached_dev_detach_finish);
1408cb7a583eSKent Overstreet sema_init(&dc->sb_write_mutex, 1);
1409f59fce84SKent Overstreet INIT_LIST_HEAD(&dc->io_lru);
1410f59fce84SKent Overstreet spin_lock_init(&dc->io_lock);
1411f59fce84SKent Overstreet bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1412cafe5635SKent Overstreet
1413cafe5635SKent Overstreet dc->sequential_cutoff = 4 << 20;
1414cafe5635SKent Overstreet
1415cafe5635SKent Overstreet for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1416cafe5635SKent Overstreet list_add(&io->lru, &dc->io_lru);
1417cafe5635SKent Overstreet hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1418cafe5635SKent Overstreet }
1419cafe5635SKent Overstreet
1420c78afc62SKent Overstreet dc->disk.stripe_size = q->limits.io_opt >> 9;
1421c78afc62SKent Overstreet
1422c78afc62SKent Overstreet if (dc->disk.stripe_size)
1423c78afc62SKent Overstreet dc->partial_stripes_expensive =
1424c78afc62SKent Overstreet q->limits.raid_partial_stripes_expensive;
1425c78afc62SKent Overstreet
1426279afbadSKent Overstreet ret = bcache_device_init(&dc->disk, block_size,
1427a782483cSChristoph Hellwig bdev_nr_sectors(dc->bdev) - dc->sb.data_offset,
1428c62b37d9SChristoph Hellwig dc->bdev, &bcache_cached_ops);
1429f59fce84SKent Overstreet if (ret)
1430f59fce84SKent Overstreet return ret;
1431f59fce84SKent Overstreet
14325d4ce78bSChristoph Hellwig blk_queue_io_opt(dc->disk.disk->queue,
14335d4ce78bSChristoph Hellwig max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q)));
1434f59fce84SKent Overstreet
1435c7b7bd07SColy Li atomic_set(&dc->io_errors, 0);
1436c7b7bd07SColy Li dc->io_disable = false;
1437c7b7bd07SColy Li dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
14387e027ca4SColy Li /* default to auto */
14397e027ca4SColy Li dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;
14407e027ca4SColy Li
1441f59fce84SKent Overstreet bch_cached_dev_request_init(dc);
1442f59fce84SKent Overstreet bch_cached_dev_writeback_init(dc);
1443cafe5635SKent Overstreet return 0;
1444cafe5635SKent Overstreet }
1445cafe5635SKent Overstreet
1446cafe5635SKent Overstreet /* Cached device - bcache superblock */
1447cafe5635SKent Overstreet
register_bdev(struct cache_sb * sb,struct cache_sb_disk * sb_disk,struct block_device * bdev,struct cached_dev * dc)1448cfa0c56dSChristoph Hellwig static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
1449cafe5635SKent Overstreet struct block_device *bdev,
1450cafe5635SKent Overstreet struct cached_dev *dc)
1451cafe5635SKent Overstreet {
1452cafe5635SKent Overstreet const char *err = "cannot allocate memory";
1453cafe5635SKent Overstreet struct cache_set *c;
14540b13efecSColy Li int ret = -ENOMEM;
1455cafe5635SKent Overstreet
1456cafe5635SKent Overstreet memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1457cafe5635SKent Overstreet dc->bdev = bdev;
1458475389aeSChristoph Hellwig dc->sb_disk = sb_disk;
14596e916a7eSColy Li
1460f59fce84SKent Overstreet if (cached_dev_init(dc, sb->block_size << 9))
1461f59fce84SKent Overstreet goto err;
1462cafe5635SKent Overstreet
1463cafe5635SKent Overstreet err = "error creating kobject";
14648d65269fSChristoph Hellwig if (kobject_add(&dc->disk.kobj, bdev_kobj(bdev), "bcache"))
1465cafe5635SKent Overstreet goto err;
1466cafe5635SKent Overstreet if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1467cafe5635SKent Overstreet goto err;
1468cafe5635SKent Overstreet
14690f5cd781SChristoph Hellwig pr_info("registered backing device %pg\n", dc->bdev);
1470f59fce84SKent Overstreet
1471cafe5635SKent Overstreet list_add(&dc->list, &uncached_devices);
1472e57fd746SColy Li /* attach to a matched cache set if it exists */
1473cafe5635SKent Overstreet list_for_each_entry(c, &bch_cache_sets, list)
147473ac105bSTang Junhui bch_cached_dev_attach(dc, c, NULL);
1475cafe5635SKent Overstreet
1476cafe5635SKent Overstreet if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
14770b13efecSColy Li BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) {
14780b13efecSColy Li err = "failed to run cached device";
14790b13efecSColy Li ret = bch_cached_dev_run(dc);
14800b13efecSColy Li if (ret)
14810b13efecSColy Li goto err;
14820b13efecSColy Li }
1483cafe5635SKent Overstreet
148488c12d42SColy Li return 0;
1485cafe5635SKent Overstreet err:
14860f5cd781SChristoph Hellwig pr_notice("error %pg: %s\n", dc->bdev, err);
1487f59fce84SKent Overstreet bcache_device_stop(&dc->disk);
14880b13efecSColy Li return ret;
1489cafe5635SKent Overstreet }
1490cafe5635SKent Overstreet
1491cafe5635SKent Overstreet /* Flash only volumes */
1492cafe5635SKent Overstreet
14932d17456eSColy Li /* When d->kobj released */
bch_flash_dev_release(struct kobject * kobj)1494cafe5635SKent Overstreet void bch_flash_dev_release(struct kobject *kobj)
1495cafe5635SKent Overstreet {
1496cafe5635SKent Overstreet struct bcache_device *d = container_of(kobj, struct bcache_device,
1497cafe5635SKent Overstreet kobj);
1498cafe5635SKent Overstreet kfree(d);
1499cafe5635SKent Overstreet }
1500cafe5635SKent Overstreet
flash_dev_free(struct closure * cl)1501cafe5635SKent Overstreet static void flash_dev_free(struct closure *cl)
1502cafe5635SKent Overstreet {
1503cafe5635SKent Overstreet struct bcache_device *d = container_of(cl, struct bcache_device, cl);
15041fae7cf0SColy Li
1505e5112201SSlava Pestov mutex_lock(&bch_register_lock);
150699a27d59STang Junhui atomic_long_sub(bcache_dev_sectors_dirty(d),
150799a27d59STang Junhui &d->c->flash_dev_dirty_sectors);
1508b75f4aedSChristoph Hellwig del_gendisk(d->disk);
1509cafe5635SKent Overstreet bcache_device_free(d);
1510e5112201SSlava Pestov mutex_unlock(&bch_register_lock);
1511cafe5635SKent Overstreet kobject_put(&d->kobj);
1512cafe5635SKent Overstreet }
1513cafe5635SKent Overstreet
flash_dev_flush(struct closure * cl)1514cafe5635SKent Overstreet static void flash_dev_flush(struct closure *cl)
1515cafe5635SKent Overstreet {
1516cafe5635SKent Overstreet struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1517cafe5635SKent Overstreet
1518e5112201SSlava Pestov mutex_lock(&bch_register_lock);
1519ee668506SKent Overstreet bcache_device_unlink(d);
1520e5112201SSlava Pestov mutex_unlock(&bch_register_lock);
1521cafe5635SKent Overstreet kobject_del(&d->kobj);
1522cafe5635SKent Overstreet continue_at(cl, flash_dev_free, system_wq);
1523cafe5635SKent Overstreet }
1524cafe5635SKent Overstreet
flash_dev_run(struct cache_set * c,struct uuid_entry * u)1525cafe5635SKent Overstreet static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1526cafe5635SKent Overstreet {
15272961c3bbSLuis Chamberlain int err = -ENOMEM;
1528cafe5635SKent Overstreet struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1529cafe5635SKent Overstreet GFP_KERNEL);
1530cafe5635SKent Overstreet if (!d)
15312961c3bbSLuis Chamberlain goto err_ret;
1532cafe5635SKent Overstreet
1533cafe5635SKent Overstreet closure_init(&d->cl, NULL);
1534cafe5635SKent Overstreet set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1535cafe5635SKent Overstreet
1536cafe5635SKent Overstreet kobject_init(&d->kobj, &bch_flash_dev_ktype);
1537cafe5635SKent Overstreet
15384e1ebae3SColy Li if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
1539c62b37d9SChristoph Hellwig NULL, &bcache_flash_ops))
1540cafe5635SKent Overstreet goto err;
1541cafe5635SKent Overstreet
1542cafe5635SKent Overstreet bcache_device_attach(d, c, u - c->uuids);
1543175206cfSTang Junhui bch_sectors_dirty_init(d);
1544cafe5635SKent Overstreet bch_flash_dev_request_init(d);
15452961c3bbSLuis Chamberlain err = add_disk(d->disk);
15462961c3bbSLuis Chamberlain if (err)
15472961c3bbSLuis Chamberlain goto err;
1548cafe5635SKent Overstreet
15492961c3bbSLuis Chamberlain err = kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache");
15502961c3bbSLuis Chamberlain if (err)
1551cafe5635SKent Overstreet goto err;
1552cafe5635SKent Overstreet
1553cafe5635SKent Overstreet bcache_device_link(d, c, "volume");
1554cafe5635SKent Overstreet
15555342fd42SColy Li if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
15565342fd42SColy Li pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
15575342fd42SColy Li pr_err("Please update to the latest bcache-tools to create the cache device\n");
15585342fd42SColy Li set_disk_ro(d->disk, 1);
15595342fd42SColy Li }
15605342fd42SColy Li
1561cafe5635SKent Overstreet return 0;
1562cafe5635SKent Overstreet err:
1563cafe5635SKent Overstreet kobject_put(&d->kobj);
15642961c3bbSLuis Chamberlain err_ret:
15652961c3bbSLuis Chamberlain return err;
1566cafe5635SKent Overstreet }
1567cafe5635SKent Overstreet
flash_devs_run(struct cache_set * c)1568cafe5635SKent Overstreet static int flash_devs_run(struct cache_set *c)
1569cafe5635SKent Overstreet {
1570cafe5635SKent Overstreet int ret = 0;
1571cafe5635SKent Overstreet struct uuid_entry *u;
1572cafe5635SKent Overstreet
1573cafe5635SKent Overstreet for (u = c->uuids;
157402aa8a8bSColy Li u < c->uuids + c->nr_uuids && !ret;
1575cafe5635SKent Overstreet u++)
1576cafe5635SKent Overstreet if (UUID_FLASH_ONLY(u))
1577cafe5635SKent Overstreet ret = flash_dev_run(c, u);
1578cafe5635SKent Overstreet
1579cafe5635SKent Overstreet return ret;
1580cafe5635SKent Overstreet }
1581cafe5635SKent Overstreet
bch_flash_dev_create(struct cache_set * c,uint64_t size)1582cafe5635SKent Overstreet int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1583cafe5635SKent Overstreet {
1584cafe5635SKent Overstreet struct uuid_entry *u;
1585cafe5635SKent Overstreet
1586cafe5635SKent Overstreet if (test_bit(CACHE_SET_STOPPING, &c->flags))
1587cafe5635SKent Overstreet return -EINTR;
1588cafe5635SKent Overstreet
1589bf0c55c9SSlava Pestov if (!test_bit(CACHE_SET_RUNNING, &c->flags))
1590bf0c55c9SSlava Pestov return -EPERM;
1591bf0c55c9SSlava Pestov
1592cafe5635SKent Overstreet u = uuid_find_empty(c);
1593cafe5635SKent Overstreet if (!u) {
159446f5aa88SJoe Perches pr_err("Can't create volume, no room for UUID\n");
1595cafe5635SKent Overstreet return -EINVAL;
1596cafe5635SKent Overstreet }
1597cafe5635SKent Overstreet
1598cafe5635SKent Overstreet get_random_bytes(u->uuid, 16);
1599cafe5635SKent Overstreet memset(u->label, 0, 32);
160075cbb3f1SArnd Bergmann u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
1601cafe5635SKent Overstreet
1602cafe5635SKent Overstreet SET_UUID_FLASH_ONLY(u, 1);
1603cafe5635SKent Overstreet u->sectors = size >> 9;
1604cafe5635SKent Overstreet
1605cafe5635SKent Overstreet bch_uuid_write(c);
1606cafe5635SKent Overstreet
1607cafe5635SKent Overstreet return flash_dev_run(c, u);
1608cafe5635SKent Overstreet }
1609cafe5635SKent Overstreet
bch_cached_dev_error(struct cached_dev * dc)1610c7b7bd07SColy Li bool bch_cached_dev_error(struct cached_dev *dc)
1611c7b7bd07SColy Li {
1612c7b7bd07SColy Li if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1613c7b7bd07SColy Li return false;
1614c7b7bd07SColy Li
1615c7b7bd07SColy Li dc->io_disable = true;
1616c7b7bd07SColy Li /* make others know io_disable is true earlier */
1617c7b7bd07SColy Li smp_mb();
1618c7b7bd07SColy Li
16190f5cd781SChristoph Hellwig pr_err("stop %s: too many IO errors on backing device %pg\n",
16200f5cd781SChristoph Hellwig dc->disk.disk->disk_name, dc->bdev);
1621c7b7bd07SColy Li
1622c7b7bd07SColy Li bcache_device_stop(&dc->disk);
1623c7b7bd07SColy Li return true;
1624c7b7bd07SColy Li }
1625c7b7bd07SColy Li
1626cafe5635SKent Overstreet /* Cache set */
1627cafe5635SKent Overstreet
1628cafe5635SKent Overstreet __printf(2, 3)
bch_cache_set_error(struct cache_set * c,const char * fmt,...)1629cafe5635SKent Overstreet bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1630cafe5635SKent Overstreet {
163146f5aa88SJoe Perches struct va_format vaf;
1632cafe5635SKent Overstreet va_list args;
1633cafe5635SKent Overstreet
163477c320ebSKent Overstreet if (c->on_error != ON_ERROR_PANIC &&
163577c320ebSKent Overstreet test_bit(CACHE_SET_STOPPING, &c->flags))
1636cafe5635SKent Overstreet return false;
1637cafe5635SKent Overstreet
1638771f393eSColy Li if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
163946f5aa88SJoe Perches pr_info("CACHE_SET_IO_DISABLE already set\n");
1640771f393eSColy Li
16413be11dbaSColy Li /*
16423be11dbaSColy Li * XXX: we can be called from atomic context
16433be11dbaSColy Li * acquire_console_sem();
1644cafe5635SKent Overstreet */
1645cafe5635SKent Overstreet
1646cafe5635SKent Overstreet va_start(args, fmt);
1647cafe5635SKent Overstreet
164846f5aa88SJoe Perches vaf.fmt = fmt;
164946f5aa88SJoe Perches vaf.va = &args;
165046f5aa88SJoe Perches
165146f5aa88SJoe Perches pr_err("error on %pU: %pV, disabling caching\n",
16521132e56eSColy Li c->set_uuid, &vaf);
165346f5aa88SJoe Perches
165446f5aa88SJoe Perches va_end(args);
1655cafe5635SKent Overstreet
165677c320ebSKent Overstreet if (c->on_error == ON_ERROR_PANIC)
165777c320ebSKent Overstreet panic("panic forced after error\n");
165877c320ebSKent Overstreet
1659cafe5635SKent Overstreet bch_cache_set_unregister(c);
1660cafe5635SKent Overstreet return true;
1661cafe5635SKent Overstreet }
1662cafe5635SKent Overstreet
16632d17456eSColy Li /* When c->kobj released */
bch_cache_set_release(struct kobject * kobj)1664cafe5635SKent Overstreet void bch_cache_set_release(struct kobject *kobj)
1665cafe5635SKent Overstreet {
1666cafe5635SKent Overstreet struct cache_set *c = container_of(kobj, struct cache_set, kobj);
16671fae7cf0SColy Li
1668cafe5635SKent Overstreet kfree(c);
1669cafe5635SKent Overstreet module_put(THIS_MODULE);
1670cafe5635SKent Overstreet }
1671cafe5635SKent Overstreet
cache_set_free(struct closure * cl)1672cafe5635SKent Overstreet static void cache_set_free(struct closure *cl)
1673cafe5635SKent Overstreet {
1674cafe5635SKent Overstreet struct cache_set *c = container_of(cl, struct cache_set, cl);
1675cafe5635SKent Overstreet struct cache *ca;
1676cafe5635SKent Overstreet
1677cafe5635SKent Overstreet debugfs_remove(c->debug);
1678cafe5635SKent Overstreet
1679cafe5635SKent Overstreet bch_open_buckets_free(c);
1680cafe5635SKent Overstreet bch_btree_cache_free(c);
1681cafe5635SKent Overstreet bch_journal_free(c);
1682cafe5635SKent Overstreet
1683a4b732a2SLiang Chen mutex_lock(&bch_register_lock);
16844a784266SColy Li bch_bset_sort_state_free(&c->sort);
16854a784266SColy Li free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
16864a784266SColy Li
168708fdb2cdSColy Li ca = c->cache;
1688c9a78332SSlava Pestov if (ca) {
1689c9a78332SSlava Pestov ca->set = NULL;
1690697e2349SColy Li c->cache = NULL;
1691cafe5635SKent Overstreet kobject_put(&ca->kobj);
1692c9a78332SSlava Pestov }
1693cafe5635SKent Overstreet
1694cafe5635SKent Overstreet
1695da415a09SNicholas Swenson if (c->moving_gc_wq)
1696da415a09SNicholas Swenson destroy_workqueue(c->moving_gc_wq);
1697d19936a2SKent Overstreet bioset_exit(&c->bio_split);
1698d19936a2SKent Overstreet mempool_exit(&c->fill_iter);
1699d19936a2SKent Overstreet mempool_exit(&c->bio_meta);
1700d19936a2SKent Overstreet mempool_exit(&c->search);
1701cafe5635SKent Overstreet kfree(c->devices);
1702cafe5635SKent Overstreet
1703cafe5635SKent Overstreet list_del(&c->list);
1704cafe5635SKent Overstreet mutex_unlock(&bch_register_lock);
1705cafe5635SKent Overstreet
17061132e56eSColy Li pr_info("Cache set %pU unregistered\n", c->set_uuid);
1707cafe5635SKent Overstreet wake_up(&unregister_wait);
1708cafe5635SKent Overstreet
1709cafe5635SKent Overstreet closure_debug_destroy(&c->cl);
1710cafe5635SKent Overstreet kobject_put(&c->kobj);
1711cafe5635SKent Overstreet }
1712cafe5635SKent Overstreet
cache_set_flush(struct closure * cl)1713cafe5635SKent Overstreet static void cache_set_flush(struct closure *cl)
1714cafe5635SKent Overstreet {
1715cafe5635SKent Overstreet struct cache_set *c = container_of(cl, struct cache_set, caching);
171608fdb2cdSColy Li struct cache *ca = c->cache;
1717cafe5635SKent Overstreet struct btree *b;
1718cafe5635SKent Overstreet
1719cafe5635SKent Overstreet bch_cache_accounting_destroy(&c->accounting);
1720cafe5635SKent Overstreet
1721cafe5635SKent Overstreet kobject_put(&c->internal);
1722cafe5635SKent Overstreet kobject_del(&c->kobj);
1723cafe5635SKent Overstreet
1724b387e9b5SColy Li if (!IS_ERR_OR_NULL(c->gc_thread))
172572a44517SKent Overstreet kthread_stop(c->gc_thread);
172672a44517SKent Overstreet
1727*cc05aa2cSLiequan Che if (!IS_ERR_OR_NULL(c->root))
1728cafe5635SKent Overstreet list_add(&c->root->list, &c->btree_cache);
1729cafe5635SKent Overstreet
1730e6dcbd3eSColy Li /*
1731e6dcbd3eSColy Li * Avoid flushing cached nodes if cache set is retiring
1732e6dcbd3eSColy Li * due to too many I/O errors detected.
1733e6dcbd3eSColy Li */
1734e6dcbd3eSColy Li if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
17352a285686SKent Overstreet list_for_each_entry(b, &c->btree_cache, list) {
17362a285686SKent Overstreet mutex_lock(&b->write_lock);
1737cafe5635SKent Overstreet if (btree_node_dirty(b))
17382a285686SKent Overstreet __bch_btree_node_write(b, NULL);
17392a285686SKent Overstreet mutex_unlock(&b->write_lock);
17402a285686SKent Overstreet }
1741cafe5635SKent Overstreet
174279826c35SKent Overstreet if (ca->alloc_thread)
174379826c35SKent Overstreet kthread_stop(ca->alloc_thread);
174479826c35SKent Overstreet
17455b1016e6SKent Overstreet if (c->journal.cur) {
1746dabb4433SKent Overstreet cancel_delayed_work_sync(&c->journal.work);
1747dabb4433SKent Overstreet /* flush last journal entry if needed */
1748dabb4433SKent Overstreet c->journal.work.work.func(&c->journal.work.work);
17495b1016e6SKent Overstreet }
1750dabb4433SKent Overstreet
1751cafe5635SKent Overstreet closure_return(cl);
1752cafe5635SKent Overstreet }
1753cafe5635SKent Overstreet
17547e027ca4SColy Li /*
17557e027ca4SColy Li * This function is only called when CACHE_SET_IO_DISABLE is set, which means
17567e027ca4SColy Li * cache set is unregistering due to too many I/O errors. In this condition,
17577e027ca4SColy Li * the bcache device might be stopped, it depends on stop_when_cache_set_failed
17587e027ca4SColy Li * value and whether the broken cache has dirty data:
17597e027ca4SColy Li *
17607e027ca4SColy Li * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device
17617e027ca4SColy Li * BCH_CACHED_STOP_AUTO 0 NO
17627e027ca4SColy Li * BCH_CACHED_STOP_AUTO 1 YES
17637e027ca4SColy Li * BCH_CACHED_DEV_STOP_ALWAYS 0 YES
17647e027ca4SColy Li * BCH_CACHED_DEV_STOP_ALWAYS 1 YES
17657e027ca4SColy Li *
17667e027ca4SColy Li * The expected behavior is, if stop_when_cache_set_failed is configured to
17677e027ca4SColy Li * "auto" via sysfs interface, the bcache device will not be stopped if the
17687e027ca4SColy Li * backing device is clean on the broken cache device.
17697e027ca4SColy Li */
conditional_stop_bcache_device(struct cache_set * c,struct bcache_device * d,struct cached_dev * dc)17707e027ca4SColy Li static void conditional_stop_bcache_device(struct cache_set *c,
17717e027ca4SColy Li struct bcache_device *d,
17727e027ca4SColy Li struct cached_dev *dc)
17737e027ca4SColy Li {
17747e027ca4SColy Li if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
177546f5aa88SJoe Perches pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
17761132e56eSColy Li d->disk->disk_name, c->set_uuid);
17777e027ca4SColy Li bcache_device_stop(d);
17787e027ca4SColy Li } else if (atomic_read(&dc->has_dirty)) {
17797e027ca4SColy Li /*
17807e027ca4SColy Li * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
17817e027ca4SColy Li * and dc->has_dirty == 1
17827e027ca4SColy Li */
178346f5aa88SJoe Perches pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n",
17847e027ca4SColy Li d->disk->disk_name);
17854fd8e138SColy Li /*
17864fd8e138SColy Li * There might be a small time gap that cache set is
17874fd8e138SColy Li * released but bcache device is not. Inside this time
17884fd8e138SColy Li * gap, regular I/O requests will directly go into
17894fd8e138SColy Li * backing device as no cache set attached to. This
17904fd8e138SColy Li * behavior may also introduce potential inconsistence
17914fd8e138SColy Li * data in writeback mode while cache is dirty.
17924fd8e138SColy Li * Therefore before calling bcache_device_stop() due
17934fd8e138SColy Li * to a broken cache device, dc->io_disable should be
17944fd8e138SColy Li * explicitly set to true.
17954fd8e138SColy Li */
17964fd8e138SColy Li dc->io_disable = true;
17974fd8e138SColy Li /* make others know io_disable is true earlier */
17984fd8e138SColy Li smp_mb();
17997e027ca4SColy Li bcache_device_stop(d);
18007e027ca4SColy Li } else {
18017e027ca4SColy Li /*
18027e027ca4SColy Li * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
18037e027ca4SColy Li * and dc->has_dirty == 0
18047e027ca4SColy Li */
180546f5aa88SJoe Perches pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n",
18067e027ca4SColy Li d->disk->disk_name);
18077e027ca4SColy Li }
18087e027ca4SColy Li }
18097e027ca4SColy Li
__cache_set_unregister(struct closure * cl)1810cafe5635SKent Overstreet static void __cache_set_unregister(struct closure *cl)
1811cafe5635SKent Overstreet {
1812cafe5635SKent Overstreet struct cache_set *c = container_of(cl, struct cache_set, caching);
18135caa52afSKent Overstreet struct cached_dev *dc;
18147e027ca4SColy Li struct bcache_device *d;
1815cafe5635SKent Overstreet size_t i;
1816cafe5635SKent Overstreet
1817cafe5635SKent Overstreet mutex_lock(&bch_register_lock);
1818cafe5635SKent Overstreet
18197e027ca4SColy Li for (i = 0; i < c->devices_max_used; i++) {
18207e027ca4SColy Li d = c->devices[i];
18217e027ca4SColy Li if (!d)
18227e027ca4SColy Li continue;
18237e027ca4SColy Li
18245caa52afSKent Overstreet if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
18255caa52afSKent Overstreet test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
18267e027ca4SColy Li dc = container_of(d, struct cached_dev, disk);
18275caa52afSKent Overstreet bch_cached_dev_detach(dc);
18287e027ca4SColy Li if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
18297e027ca4SColy Li conditional_stop_bcache_device(c, d, dc);
18305caa52afSKent Overstreet } else {
18317e027ca4SColy Li bcache_device_stop(d);
18325caa52afSKent Overstreet }
18335caa52afSKent Overstreet }
1834cafe5635SKent Overstreet
1835cafe5635SKent Overstreet mutex_unlock(&bch_register_lock);
1836cafe5635SKent Overstreet
1837cafe5635SKent Overstreet continue_at(cl, cache_set_flush, system_wq);
1838cafe5635SKent Overstreet }
1839cafe5635SKent Overstreet
bch_cache_set_stop(struct cache_set * c)1840cafe5635SKent Overstreet void bch_cache_set_stop(struct cache_set *c)
1841cafe5635SKent Overstreet {
1842cafe5635SKent Overstreet if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
184363d63b51SColy Li /* closure_fn set to __cache_set_unregister() */
1844cafe5635SKent Overstreet closure_queue(&c->caching);
1845cafe5635SKent Overstreet }
1846cafe5635SKent Overstreet
bch_cache_set_unregister(struct cache_set * c)1847cafe5635SKent Overstreet void bch_cache_set_unregister(struct cache_set *c)
1848cafe5635SKent Overstreet {
1849cafe5635SKent Overstreet set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1850cafe5635SKent Overstreet bch_cache_set_stop(c);
1851cafe5635SKent Overstreet }
1852cafe5635SKent Overstreet
1853de1fafabSColy Li #define alloc_meta_bucket_pages(gfp, sb) \
1854de1fafabSColy Li ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
1855cafe5635SKent Overstreet
bch_cache_set_alloc(struct cache_sb * sb)1856cafe5635SKent Overstreet struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1857cafe5635SKent Overstreet {
1858cafe5635SKent Overstreet int iter_size;
18594a784266SColy Li struct cache *ca = container_of(sb, struct cache, sb);
1860cafe5635SKent Overstreet struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
18611fae7cf0SColy Li
1862cafe5635SKent Overstreet if (!c)
1863cafe5635SKent Overstreet return NULL;
1864cafe5635SKent Overstreet
1865cafe5635SKent Overstreet __module_get(THIS_MODULE);
1866cafe5635SKent Overstreet closure_init(&c->cl, NULL);
1867cafe5635SKent Overstreet set_closure_fn(&c->cl, cache_set_free, system_wq);
1868cafe5635SKent Overstreet
1869cafe5635SKent Overstreet closure_init(&c->caching, &c->cl);
1870cafe5635SKent Overstreet set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1871cafe5635SKent Overstreet
1872cafe5635SKent Overstreet /* Maybe create continue_at_noreturn() and use it here? */
1873cafe5635SKent Overstreet closure_set_stopped(&c->cl);
1874cafe5635SKent Overstreet closure_put(&c->cl);
1875cafe5635SKent Overstreet
1876cafe5635SKent Overstreet kobject_init(&c->kobj, &bch_cache_set_ktype);
1877cafe5635SKent Overstreet kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1878cafe5635SKent Overstreet
1879cafe5635SKent Overstreet bch_cache_accounting_init(&c->accounting, &c->cl);
1880cafe5635SKent Overstreet
18811132e56eSColy Li memcpy(c->set_uuid, sb->set_uuid, 16);
1882d721a43fSColy Li
18834a784266SColy Li c->cache = ca;
18844a784266SColy Li c->cache->set = c;
1885cafe5635SKent Overstreet c->bucket_bits = ilog2(sb->bucket_size);
1886cafe5635SKent Overstreet c->block_bits = ilog2(sb->block_size);
18874a784266SColy Li c->nr_uuids = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
18882831231dSColy Li c->devices_max_used = 0;
1889ea8c5356SColy Li atomic_set(&c->attached_dev_nr, 0);
18904a784266SColy Li c->btree_pages = meta_bucket_pages(sb);
1891cafe5635SKent Overstreet if (c->btree_pages > BTREE_MAX_PAGES)
1892cafe5635SKent Overstreet c->btree_pages = max_t(int, c->btree_pages / 4,
1893cafe5635SKent Overstreet BTREE_MAX_PAGES);
1894cafe5635SKent Overstreet
1895cb7a583eSKent Overstreet sema_init(&c->sb_write_mutex, 1);
1896e8e1d468SKent Overstreet mutex_init(&c->bucket_lock);
18970a63b66dSKent Overstreet init_waitqueue_head(&c->btree_cache_wait);
189834cf78bfSGuoju Fang spin_lock_init(&c->btree_cannibalize_lock);
189935fcd848SKent Overstreet init_waitqueue_head(&c->bucket_wait);
1900be628be0SKent Overstreet init_waitqueue_head(&c->gc_wait);
1901cb7a583eSKent Overstreet sema_init(&c->uuid_write_mutex, 1);
190265d22e91SKent Overstreet
190365d22e91SKent Overstreet spin_lock_init(&c->btree_gc_time.lock);
190465d22e91SKent Overstreet spin_lock_init(&c->btree_split_time.lock);
190565d22e91SKent Overstreet spin_lock_init(&c->btree_read_time.lock);
1906e8e1d468SKent Overstreet
1907cafe5635SKent Overstreet bch_moving_init_cache_set(c);
1908cafe5635SKent Overstreet
1909cafe5635SKent Overstreet INIT_LIST_HEAD(&c->list);
1910cafe5635SKent Overstreet INIT_LIST_HEAD(&c->cached_devs);
1911cafe5635SKent Overstreet INIT_LIST_HEAD(&c->btree_cache);
1912cafe5635SKent Overstreet INIT_LIST_HEAD(&c->btree_cache_freeable);
1913cafe5635SKent Overstreet INIT_LIST_HEAD(&c->btree_cache_freed);
1914cafe5635SKent Overstreet INIT_LIST_HEAD(&c->data_buckets);
1915cafe5635SKent Overstreet
19166479b9f4SMatthew Mirvish iter_size = sizeof(struct btree_iter) +
19176479b9f4SMatthew Mirvish ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
1918cafe5635SKent Overstreet sizeof(struct btree_iter_set);
1919cafe5635SKent Overstreet
1920a42d3c64SColy Li c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
1921a42d3c64SColy Li if (!c->devices)
1922a42d3c64SColy Li goto err;
1923a42d3c64SColy Li
1924a42d3c64SColy Li if (mempool_init_slab_pool(&c->search, 32, bch_search_cache))
1925a42d3c64SColy Li goto err;
1926a42d3c64SColy Li
1927a42d3c64SColy Li if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
1928a42d3c64SColy Li sizeof(struct bbio) +
19294a784266SColy Li sizeof(struct bio_vec) * meta_bucket_pages(sb)))
1930a42d3c64SColy Li goto err;
1931a42d3c64SColy Li
1932a42d3c64SColy Li if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
1933a42d3c64SColy Li goto err;
1934a42d3c64SColy Li
1935a42d3c64SColy Li if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
1936faa8e2c4SMing Lei BIOSET_NEED_RESCUER))
1937a42d3c64SColy Li goto err;
1938a42d3c64SColy Li
19394a784266SColy Li c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
1940a42d3c64SColy Li if (!c->uuids)
1941a42d3c64SColy Li goto err;
1942a42d3c64SColy Li
1943a42d3c64SColy Li c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0);
1944a42d3c64SColy Li if (!c->moving_gc_wq)
1945a42d3c64SColy Li goto err;
1946a42d3c64SColy Li
1947a42d3c64SColy Li if (bch_journal_alloc(c))
1948a42d3c64SColy Li goto err;
1949a42d3c64SColy Li
1950a42d3c64SColy Li if (bch_btree_cache_alloc(c))
1951a42d3c64SColy Li goto err;
1952a42d3c64SColy Li
1953a42d3c64SColy Li if (bch_open_buckets_alloc(c))
1954a42d3c64SColy Li goto err;
1955a42d3c64SColy Li
1956a42d3c64SColy Li if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
1957cafe5635SKent Overstreet goto err;
1958cafe5635SKent Overstreet
1959cafe5635SKent Overstreet c->congested_read_threshold_us = 2000;
1960cafe5635SKent Overstreet c->congested_write_threshold_us = 20000;
19617ba0d830SColy Li c->error_limit = DEFAULT_IO_ERROR_LIMIT;
1962c5fcdedcSColy Li c->idle_max_writeback_rate_enabled = 1;
1963771f393eSColy Li WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
1964cafe5635SKent Overstreet
1965cafe5635SKent Overstreet return c;
1966cafe5635SKent Overstreet err:
1967cafe5635SKent Overstreet bch_cache_set_unregister(c);
1968cafe5635SKent Overstreet return NULL;
1969cafe5635SKent Overstreet }
1970cafe5635SKent Overstreet
run_cache_set(struct cache_set * c)1971ce3e4cfbSColy Li static int run_cache_set(struct cache_set *c)
1972cafe5635SKent Overstreet {
1973cafe5635SKent Overstreet const char *err = "cannot allocate memory";
1974cafe5635SKent Overstreet struct cached_dev *dc, *t;
197508fdb2cdSColy Li struct cache *ca = c->cache;
1976c18536a7SKent Overstreet struct closure cl;
197795f18c9dSShenghui Wang LIST_HEAD(journal);
197895f18c9dSShenghui Wang struct journal_replay *l;
1979cafe5635SKent Overstreet
1980c18536a7SKent Overstreet closure_init_stack(&cl);
1981cafe5635SKent Overstreet
198208fdb2cdSColy Li c->nbuckets = ca->sb.nbuckets;
1983be628be0SKent Overstreet set_gc_sectors(c);
1984cafe5635SKent Overstreet
19856f9414e0SColy Li if (CACHE_SYNC(&c->cache->sb)) {
1986cafe5635SKent Overstreet struct bkey *k;
1987cafe5635SKent Overstreet struct jset *j;
1988cafe5635SKent Overstreet
1989cafe5635SKent Overstreet err = "cannot allocate memory for journal";
1990c18536a7SKent Overstreet if (bch_journal_read(c, &journal))
1991cafe5635SKent Overstreet goto err;
1992cafe5635SKent Overstreet
199346f5aa88SJoe Perches pr_debug("btree_journal_read() done\n");
1994cafe5635SKent Overstreet
1995cafe5635SKent Overstreet err = "no journal entries found";
1996cafe5635SKent Overstreet if (list_empty(&journal))
1997cafe5635SKent Overstreet goto err;
1998cafe5635SKent Overstreet
1999cafe5635SKent Overstreet j = &list_entry(journal.prev, struct journal_replay, list)->j;
2000cafe5635SKent Overstreet
2001cafe5635SKent Overstreet err = "IO error reading priorities";
200249d08d59SColy Li if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
200349d08d59SColy Li goto err;
2004cafe5635SKent Overstreet
2005cafe5635SKent Overstreet /*
2006cafe5635SKent Overstreet * If prio_read() fails it'll call cache_set_error and we'll
2007cafe5635SKent Overstreet * tear everything down right away, but if we perhaps checked
2008cafe5635SKent Overstreet * sooner we could avoid journal replay.
2009cafe5635SKent Overstreet */
2010cafe5635SKent Overstreet
2011cafe5635SKent Overstreet k = &j->btree_root;
2012cafe5635SKent Overstreet
2013cafe5635SKent Overstreet err = "bad btree root";
201465d45231SKent Overstreet if (__bch_btree_ptr_invalid(c, k))
2015cafe5635SKent Overstreet goto err;
2016cafe5635SKent Overstreet
2017cafe5635SKent Overstreet err = "error reading btree root";
2018b0d30981SColy Li c->root = bch_btree_node_get(c, NULL, k,
2019b0d30981SColy Li j->btree_level,
2020b0d30981SColy Li true, NULL);
20214a4bba9fSColy Li if (IS_ERR(c->root))
2022cafe5635SKent Overstreet goto err;
2023cafe5635SKent Overstreet
2024cafe5635SKent Overstreet list_del_init(&c->root->list);
2025cafe5635SKent Overstreet rw_unlock(true, c->root);
2026cafe5635SKent Overstreet
2027c18536a7SKent Overstreet err = uuid_read(c, j, &cl);
2028cafe5635SKent Overstreet if (err)
2029cafe5635SKent Overstreet goto err;
2030cafe5635SKent Overstreet
2031cafe5635SKent Overstreet err = "error in recovery";
2032c18536a7SKent Overstreet if (bch_btree_check(c))
2033cafe5635SKent Overstreet goto err;
2034cafe5635SKent Overstreet
2035cafe5635SKent Overstreet bch_journal_mark(c, &journal);
20362531d9eeSKent Overstreet bch_initial_gc_finish(c);
203746f5aa88SJoe Perches pr_debug("btree_check() done\n");
2038cafe5635SKent Overstreet
2039cafe5635SKent Overstreet /*
2040cafe5635SKent Overstreet * bcache_journal_next() can't happen sooner, or
2041cafe5635SKent Overstreet * btree_gc_finish() will give spurious errors about last_gc >
2042cafe5635SKent Overstreet * gc_gen - this is a hack but oh well.
2043cafe5635SKent Overstreet */
2044cafe5635SKent Overstreet bch_journal_next(&c->journal);
2045cafe5635SKent Overstreet
2046119ba0f8SKent Overstreet err = "error starting allocator thread";
2047119ba0f8SKent Overstreet if (bch_cache_allocator_start(ca))
2048119ba0f8SKent Overstreet goto err;
2049cafe5635SKent Overstreet
2050cafe5635SKent Overstreet /*
2051cafe5635SKent Overstreet * First place it's safe to allocate: btree_check() and
2052cafe5635SKent Overstreet * btree_gc_finish() have to run before we have buckets to
2053cafe5635SKent Overstreet * allocate, and bch_bucket_alloc_set() might cause a journal
2054cafe5635SKent Overstreet * entry to be written so bcache_journal_next() has to be called
2055cafe5635SKent Overstreet * first.
2056cafe5635SKent Overstreet *
2057cafe5635SKent Overstreet * If the uuids were in the old format we have to rewrite them
2058cafe5635SKent Overstreet * before the next journal entry is written:
2059cafe5635SKent Overstreet */
2060cafe5635SKent Overstreet if (j->version < BCACHE_JSET_VERSION_UUID)
2061cafe5635SKent Overstreet __uuid_write(c);
2062cafe5635SKent Overstreet
2063ce3e4cfbSColy Li err = "bcache: replay journal failed";
2064ce3e4cfbSColy Li if (bch_journal_replay(c, &journal))
2065ce3e4cfbSColy Li goto err;
2066cafe5635SKent Overstreet } else {
20676f10f7d1SColy Li unsigned int j;
2068cafe5635SKent Overstreet
206908fdb2cdSColy Li pr_notice("invalidating existing data\n");
2070cafe5635SKent Overstreet ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
2071cafe5635SKent Overstreet 2, SB_JOURNAL_BUCKETS);
2072cafe5635SKent Overstreet
2073cafe5635SKent Overstreet for (j = 0; j < ca->sb.keys; j++)
2074cafe5635SKent Overstreet ca->sb.d[j] = ca->sb.first_bucket + j;
2075cafe5635SKent Overstreet
20762531d9eeSKent Overstreet bch_initial_gc_finish(c);
2077cafe5635SKent Overstreet
2078119ba0f8SKent Overstreet err = "error starting allocator thread";
2079119ba0f8SKent Overstreet if (bch_cache_allocator_start(ca))
2080119ba0f8SKent Overstreet goto err;
2081cafe5635SKent Overstreet
2082cafe5635SKent Overstreet mutex_lock(&c->bucket_lock);
208384c529aeSAndrea Righi bch_prio_write(ca, true);
2084cafe5635SKent Overstreet mutex_unlock(&c->bucket_lock);
2085cafe5635SKent Overstreet
2086cafe5635SKent Overstreet err = "cannot allocate new UUID bucket";
2087cafe5635SKent Overstreet if (__uuid_write(c))
208872a44517SKent Overstreet goto err;
2089cafe5635SKent Overstreet
2090cafe5635SKent Overstreet err = "cannot allocate new btree root";
20912452cc89SSlava Pestov c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
2092028ddcacSZheng Wang if (IS_ERR(c->root))
209372a44517SKent Overstreet goto err;
2094cafe5635SKent Overstreet
20952a285686SKent Overstreet mutex_lock(&c->root->write_lock);
2096cafe5635SKent Overstreet bkey_copy_key(&c->root->key, &MAX_KEY);
2097c18536a7SKent Overstreet bch_btree_node_write(c->root, &cl);
20982a285686SKent Overstreet mutex_unlock(&c->root->write_lock);
2099cafe5635SKent Overstreet
2100cafe5635SKent Overstreet bch_btree_set_root(c->root);
2101cafe5635SKent Overstreet rw_unlock(true, c->root);
2102cafe5635SKent Overstreet
2103cafe5635SKent Overstreet /*
2104cafe5635SKent Overstreet * We don't want to write the first journal entry until
2105cafe5635SKent Overstreet * everything is set up - fortunately journal entries won't be
2106cafe5635SKent Overstreet * written until the SET_CACHE_SYNC() here:
2107cafe5635SKent Overstreet */
21086f9414e0SColy Li SET_CACHE_SYNC(&c->cache->sb, true);
2109cafe5635SKent Overstreet
2110cafe5635SKent Overstreet bch_journal_next(&c->journal);
2111c18536a7SKent Overstreet bch_journal_meta(c, &cl);
2112cafe5635SKent Overstreet }
2113cafe5635SKent Overstreet
211472a44517SKent Overstreet err = "error starting gc thread";
211572a44517SKent Overstreet if (bch_gc_thread_start(c))
211672a44517SKent Overstreet goto err;
211772a44517SKent Overstreet
2118c18536a7SKent Overstreet closure_sync(&cl);
21194a784266SColy Li c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
2120cafe5635SKent Overstreet bcache_write_super(c);
2121cafe5635SKent Overstreet
21225342fd42SColy Li if (bch_has_feature_obso_large_bucket(&c->cache->sb))
21235342fd42SColy Li pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
21245342fd42SColy Li
2125cafe5635SKent Overstreet list_for_each_entry_safe(dc, t, &uncached_devices, list)
212673ac105bSTang Junhui bch_cached_dev_attach(dc, c, NULL);
2127cafe5635SKent Overstreet
2128cafe5635SKent Overstreet flash_devs_run(c);
2129cafe5635SKent Overstreet
213032feee36SColy Li bch_journal_space_reserve(&c->journal);
2131bf0c55c9SSlava Pestov set_bit(CACHE_SET_RUNNING, &c->flags);
2132ce3e4cfbSColy Li return 0;
2133cafe5635SKent Overstreet err:
213495f18c9dSShenghui Wang while (!list_empty(&journal)) {
213595f18c9dSShenghui Wang l = list_first_entry(&journal, struct journal_replay, list);
213695f18c9dSShenghui Wang list_del(&l->list);
213795f18c9dSShenghui Wang kfree(l);
213895f18c9dSShenghui Wang }
213995f18c9dSShenghui Wang
2140c18536a7SKent Overstreet closure_sync(&cl);
214168a53c95SColy Li
2142c8694948SKees Cook bch_cache_set_error(c, "%s", err);
2143ce3e4cfbSColy Li
2144ce3e4cfbSColy Li return -EIO;
2145cafe5635SKent Overstreet }
2146cafe5635SKent Overstreet
register_cache_set(struct cache * ca)2147cafe5635SKent Overstreet static const char *register_cache_set(struct cache *ca)
2148cafe5635SKent Overstreet {
2149cafe5635SKent Overstreet char buf[12];
2150cafe5635SKent Overstreet const char *err = "cannot allocate memory";
2151cafe5635SKent Overstreet struct cache_set *c;
2152cafe5635SKent Overstreet
2153cafe5635SKent Overstreet list_for_each_entry(c, &bch_cache_sets, list)
21541132e56eSColy Li if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
2155697e2349SColy Li if (c->cache)
2156cafe5635SKent Overstreet return "duplicate cache set member";
2157cafe5635SKent Overstreet
2158cafe5635SKent Overstreet goto found;
2159cafe5635SKent Overstreet }
2160cafe5635SKent Overstreet
2161cafe5635SKent Overstreet c = bch_cache_set_alloc(&ca->sb);
2162cafe5635SKent Overstreet if (!c)
2163cafe5635SKent Overstreet return err;
2164cafe5635SKent Overstreet
2165cafe5635SKent Overstreet err = "error creating kobject";
21661132e56eSColy Li if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
2167cafe5635SKent Overstreet kobject_add(&c->internal, &c->kobj, "internal"))
2168cafe5635SKent Overstreet goto err;
2169cafe5635SKent Overstreet
2170cafe5635SKent Overstreet if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
2171cafe5635SKent Overstreet goto err;
2172cafe5635SKent Overstreet
2173cafe5635SKent Overstreet bch_debug_init_cache_set(c);
2174cafe5635SKent Overstreet
2175cafe5635SKent Overstreet list_add(&c->list, &bch_cache_sets);
2176cafe5635SKent Overstreet found:
2177cafe5635SKent Overstreet sprintf(buf, "cache%i", ca->sb.nr_this_dev);
2178cafe5635SKent Overstreet if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
2179cafe5635SKent Overstreet sysfs_create_link(&c->kobj, &ca->kobj, buf))
2180cafe5635SKent Overstreet goto err;
2181cafe5635SKent Overstreet
2182d83353b3SKent Overstreet kobject_get(&ca->kobj);
2183cafe5635SKent Overstreet ca->set = c;
2184697e2349SColy Li ca->set->cache = ca;
2185cafe5635SKent Overstreet
2186ce3e4cfbSColy Li err = "failed to run cache set";
2187ce3e4cfbSColy Li if (run_cache_set(c) < 0)
2188ce3e4cfbSColy Li goto err;
2189cafe5635SKent Overstreet
2190cafe5635SKent Overstreet return NULL;
2191cafe5635SKent Overstreet err:
2192cafe5635SKent Overstreet bch_cache_set_unregister(c);
2193cafe5635SKent Overstreet return err;
2194cafe5635SKent Overstreet }
2195cafe5635SKent Overstreet
2196cafe5635SKent Overstreet /* Cache device */
2197cafe5635SKent Overstreet
21982d17456eSColy Li /* When ca->kobj released */
bch_cache_release(struct kobject * kobj)2199cafe5635SKent Overstreet void bch_cache_release(struct kobject *kobj)
2200cafe5635SKent Overstreet {
2201cafe5635SKent Overstreet struct cache *ca = container_of(kobj, struct cache, kobj);
22026f10f7d1SColy Li unsigned int i;
2203cafe5635SKent Overstreet
2204c9a78332SSlava Pestov if (ca->set) {
2205697e2349SColy Li BUG_ON(ca->set->cache != ca);
2206697e2349SColy Li ca->set->cache = NULL;
2207c9a78332SSlava Pestov }
2208cafe5635SKent Overstreet
2209c954ac8dSColy Li free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
2210cafe5635SKent Overstreet kfree(ca->prio_buckets);
2211cafe5635SKent Overstreet vfree(ca->buckets);
2212cafe5635SKent Overstreet
2213cafe5635SKent Overstreet free_heap(&ca->heap);
2214cafe5635SKent Overstreet free_fifo(&ca->free_inc);
221578365411SKent Overstreet
221678365411SKent Overstreet for (i = 0; i < RESERVE_NR; i++)
221778365411SKent Overstreet free_fifo(&ca->free[i]);
2218cafe5635SKent Overstreet
2219475389aeSChristoph Hellwig if (ca->sb_disk)
2220475389aeSChristoph Hellwig put_page(virt_to_page(ca->sb_disk));
2221cafe5635SKent Overstreet
22220781c874SKent Overstreet if (!IS_ERR_OR_NULL(ca->bdev))
22232c555598SJan Kara blkdev_put(ca->bdev, ca);
2224cafe5635SKent Overstreet
2225cafe5635SKent Overstreet kfree(ca);
2226cafe5635SKent Overstreet module_put(THIS_MODULE);
2227cafe5635SKent Overstreet }
2228cafe5635SKent Overstreet
cache_alloc(struct cache * ca)2229c50d4d5dSYijing Wang static int cache_alloc(struct cache *ca)
2230cafe5635SKent Overstreet {
2231cafe5635SKent Overstreet size_t free;
2232682811b3STang Junhui size_t btree_buckets;
2233cafe5635SKent Overstreet struct bucket *b;
2234f6027bcaSDongbo Cao int ret = -ENOMEM;
2235f6027bcaSDongbo Cao const char *err = NULL;
2236cafe5635SKent Overstreet
2237cafe5635SKent Overstreet __module_get(THIS_MODULE);
2238cafe5635SKent Overstreet kobject_init(&ca->kobj, &bch_cache_ktype);
2239cafe5635SKent Overstreet
224049add496SChristoph Hellwig bio_init(&ca->journal.bio, NULL, ca->journal.bio.bi_inline_vecs, 8, 0);
2241cafe5635SKent Overstreet
2242682811b3STang Junhui /*
2243682811b3STang Junhui * when ca->sb.njournal_buckets is not zero, journal exists,
2244682811b3STang Junhui * and in bch_journal_replay(), tree node may split,
2245682811b3STang Junhui * so bucket of RESERVE_BTREE type is needed,
2246682811b3STang Junhui * the worst situation is all journal buckets are valid journal,
2247682811b3STang Junhui * and all the keys need to replay,
2248682811b3STang Junhui * so the number of RESERVE_BTREE type buckets should be as much
2249682811b3STang Junhui * as journal buckets
2250682811b3STang Junhui */
2251682811b3STang Junhui btree_buckets = ca->sb.njournal_buckets ?: 8;
225278365411SKent Overstreet free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
22533a646fd7SDongbo Cao if (!free) {
22543a646fd7SDongbo Cao ret = -EPERM;
22553a646fd7SDongbo Cao err = "ca->sb.nbuckets is too small";
22563a646fd7SDongbo Cao goto err_free;
22573a646fd7SDongbo Cao }
2258cafe5635SKent Overstreet
2259f6027bcaSDongbo Cao if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
2260f6027bcaSDongbo Cao GFP_KERNEL)) {
2261f6027bcaSDongbo Cao err = "ca->free[RESERVE_BTREE] alloc failed";
2262f6027bcaSDongbo Cao goto err_btree_alloc;
2263f6027bcaSDongbo Cao }
2264f6027bcaSDongbo Cao
2265f6027bcaSDongbo Cao if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
2266f6027bcaSDongbo Cao GFP_KERNEL)) {
2267f6027bcaSDongbo Cao err = "ca->free[RESERVE_PRIO] alloc failed";
2268f6027bcaSDongbo Cao goto err_prio_alloc;
2269f6027bcaSDongbo Cao }
2270f6027bcaSDongbo Cao
2271f6027bcaSDongbo Cao if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
2272f6027bcaSDongbo Cao err = "ca->free[RESERVE_MOVINGGC] alloc failed";
2273f6027bcaSDongbo Cao goto err_movinggc_alloc;
2274f6027bcaSDongbo Cao }
2275f6027bcaSDongbo Cao
2276f6027bcaSDongbo Cao if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
2277f6027bcaSDongbo Cao err = "ca->free[RESERVE_NONE] alloc failed";
2278f6027bcaSDongbo Cao goto err_none_alloc;
2279f6027bcaSDongbo Cao }
2280f6027bcaSDongbo Cao
2281f6027bcaSDongbo Cao if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
2282f6027bcaSDongbo Cao err = "ca->free_inc alloc failed";
2283f6027bcaSDongbo Cao goto err_free_inc_alloc;
2284f6027bcaSDongbo Cao }
2285f6027bcaSDongbo Cao
2286f6027bcaSDongbo Cao if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
2287f6027bcaSDongbo Cao err = "ca->heap alloc failed";
2288f6027bcaSDongbo Cao goto err_heap_alloc;
2289f6027bcaSDongbo Cao }
2290f6027bcaSDongbo Cao
2291f6027bcaSDongbo Cao ca->buckets = vzalloc(array_size(sizeof(struct bucket),
2292f6027bcaSDongbo Cao ca->sb.nbuckets));
2293f6027bcaSDongbo Cao if (!ca->buckets) {
2294f6027bcaSDongbo Cao err = "ca->buckets alloc failed";
2295f6027bcaSDongbo Cao goto err_buckets_alloc;
2296f6027bcaSDongbo Cao }
2297f6027bcaSDongbo Cao
2298f6027bcaSDongbo Cao ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
22996396bb22SKees Cook prio_buckets(ca), 2),
2300f6027bcaSDongbo Cao GFP_KERNEL);
2301f6027bcaSDongbo Cao if (!ca->prio_buckets) {
2302f6027bcaSDongbo Cao err = "ca->prio_buckets alloc failed";
2303f6027bcaSDongbo Cao goto err_prio_buckets_alloc;
2304f6027bcaSDongbo Cao }
2305f6027bcaSDongbo Cao
2306c954ac8dSColy Li ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb);
2307f6027bcaSDongbo Cao if (!ca->disk_buckets) {
2308f6027bcaSDongbo Cao err = "ca->disk_buckets alloc failed";
2309f6027bcaSDongbo Cao goto err_disk_buckets_alloc;
2310f6027bcaSDongbo Cao }
2311cafe5635SKent Overstreet
2312cafe5635SKent Overstreet ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
2313cafe5635SKent Overstreet
2314cafe5635SKent Overstreet for_each_bucket(b, ca)
2315cafe5635SKent Overstreet atomic_set(&b->pin, 0);
2316cafe5635SKent Overstreet return 0;
2317f6027bcaSDongbo Cao
2318f6027bcaSDongbo Cao err_disk_buckets_alloc:
2319f6027bcaSDongbo Cao kfree(ca->prio_buckets);
2320f6027bcaSDongbo Cao err_prio_buckets_alloc:
2321f6027bcaSDongbo Cao vfree(ca->buckets);
2322f6027bcaSDongbo Cao err_buckets_alloc:
2323f6027bcaSDongbo Cao free_heap(&ca->heap);
2324f6027bcaSDongbo Cao err_heap_alloc:
2325f6027bcaSDongbo Cao free_fifo(&ca->free_inc);
2326f6027bcaSDongbo Cao err_free_inc_alloc:
2327f6027bcaSDongbo Cao free_fifo(&ca->free[RESERVE_NONE]);
2328f6027bcaSDongbo Cao err_none_alloc:
2329f6027bcaSDongbo Cao free_fifo(&ca->free[RESERVE_MOVINGGC]);
2330f6027bcaSDongbo Cao err_movinggc_alloc:
2331f6027bcaSDongbo Cao free_fifo(&ca->free[RESERVE_PRIO]);
2332f6027bcaSDongbo Cao err_prio_alloc:
2333f6027bcaSDongbo Cao free_fifo(&ca->free[RESERVE_BTREE]);
2334f6027bcaSDongbo Cao err_btree_alloc:
23353a646fd7SDongbo Cao err_free:
2336f6027bcaSDongbo Cao module_put(THIS_MODULE);
2337f6027bcaSDongbo Cao if (err)
23387e84c215SChristoph Hellwig pr_notice("error %pg: %s\n", ca->bdev, err);
2339f6027bcaSDongbo Cao return ret;
2340cafe5635SKent Overstreet }
2341cafe5635SKent Overstreet
register_cache(struct cache_sb * sb,struct cache_sb_disk * sb_disk,struct block_device * bdev,struct cache * ca)2342cfa0c56dSChristoph Hellwig static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
2343cafe5635SKent Overstreet struct block_device *bdev, struct cache *ca)
2344cafe5635SKent Overstreet {
2345d9dc1702SEric Wheeler const char *err = NULL; /* must be set for any error case */
23469b299728SEric Wheeler int ret = 0;
2347cafe5635SKent Overstreet
2348f59fce84SKent Overstreet memcpy(&ca->sb, sb, sizeof(struct cache_sb));
2349cafe5635SKent Overstreet ca->bdev = bdev;
2350475389aeSChristoph Hellwig ca->sb_disk = sb_disk;
2351f59fce84SKent Overstreet
235270200574SChristoph Hellwig if (bdev_max_discard_sectors((bdev)))
2353cafe5635SKent Overstreet ca->discard = CACHE_DISCARD(&ca->sb);
2354cafe5635SKent Overstreet
2355c50d4d5dSYijing Wang ret = cache_alloc(ca);
2356d9dc1702SEric Wheeler if (ret != 0) {
2357bb6d355cSColy Li /*
2358bb6d355cSColy Li * If we failed here, it means ca->kobj is not initialized yet,
2359bb6d355cSColy Li * kobject_put() won't be called and there is no chance to
2360bb6d355cSColy Li * call blkdev_put() to bdev in bch_cache_release(). So we
2361bb6d355cSColy Li * explicitly call blkdev_put() here.
2362bb6d355cSColy Li */
23632c555598SJan Kara blkdev_put(bdev, ca);
2364d9dc1702SEric Wheeler if (ret == -ENOMEM)
2365d9dc1702SEric Wheeler err = "cache_alloc(): -ENOMEM";
23663a646fd7SDongbo Cao else if (ret == -EPERM)
23673a646fd7SDongbo Cao err = "cache_alloc(): cache device is too small";
2368d9dc1702SEric Wheeler else
2369d9dc1702SEric Wheeler err = "cache_alloc(): unknown error";
2370f59fce84SKent Overstreet goto err;
2371d9dc1702SEric Wheeler }
2372f59fce84SKent Overstreet
23738d65269fSChristoph Hellwig if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) {
23749b299728SEric Wheeler err = "error calling kobject_add";
23759b299728SEric Wheeler ret = -ENOMEM;
23769b299728SEric Wheeler goto out;
23779b299728SEric Wheeler }
2378cafe5635SKent Overstreet
23794fa03402SKent Overstreet mutex_lock(&bch_register_lock);
2380cafe5635SKent Overstreet err = register_cache_set(ca);
23814fa03402SKent Overstreet mutex_unlock(&bch_register_lock);
23824fa03402SKent Overstreet
23839b299728SEric Wheeler if (err) {
23849b299728SEric Wheeler ret = -ENODEV;
23859b299728SEric Wheeler goto out;
23869b299728SEric Wheeler }
2387cafe5635SKent Overstreet
23887e84c215SChristoph Hellwig pr_info("registered cache device %pg\n", ca->bdev);
23899b299728SEric Wheeler
2390d83353b3SKent Overstreet out:
2391d83353b3SKent Overstreet kobject_put(&ca->kobj);
23929b299728SEric Wheeler
2393cafe5635SKent Overstreet err:
23949b299728SEric Wheeler if (err)
23957e84c215SChristoph Hellwig pr_notice("error %pg: %s\n", ca->bdev, err);
23969b299728SEric Wheeler
23979b299728SEric Wheeler return ret;
2398cafe5635SKent Overstreet }
2399cafe5635SKent Overstreet
2400cafe5635SKent Overstreet /* Global interfaces/init */
2401cafe5635SKent Overstreet
2402fc2d5988SColy Li static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2403fc2d5988SColy Li const char *buffer, size_t size);
24040c277e21SColy Li static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
24050c277e21SColy Li struct kobj_attribute *attr,
24060c277e21SColy Li const char *buffer, size_t size);
2407cafe5635SKent Overstreet
2408cafe5635SKent Overstreet kobj_attribute_write(register, register_bcache);
2409cafe5635SKent Overstreet kobj_attribute_write(register_quiet, register_bcache);
24100c277e21SColy Li kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
2411cafe5635SKent Overstreet
bch_is_open_backing(dev_t dev)24124e7b5671SChristoph Hellwig static bool bch_is_open_backing(dev_t dev)
2413b3cf37bfSColy Li {
2414a9dd53adSGabriel de Perthuis struct cache_set *c, *tc;
2415a9dd53adSGabriel de Perthuis struct cached_dev *dc, *t;
2416a9dd53adSGabriel de Perthuis
2417a9dd53adSGabriel de Perthuis list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2418a9dd53adSGabriel de Perthuis list_for_each_entry_safe(dc, t, &c->cached_devs, list)
24194e7b5671SChristoph Hellwig if (dc->bdev->bd_dev == dev)
2420a9dd53adSGabriel de Perthuis return true;
2421a9dd53adSGabriel de Perthuis list_for_each_entry_safe(dc, t, &uncached_devices, list)
24224e7b5671SChristoph Hellwig if (dc->bdev->bd_dev == dev)
2423a9dd53adSGabriel de Perthuis return true;
2424a9dd53adSGabriel de Perthuis return false;
2425a9dd53adSGabriel de Perthuis }
2426a9dd53adSGabriel de Perthuis
bch_is_open_cache(dev_t dev)24274e7b5671SChristoph Hellwig static bool bch_is_open_cache(dev_t dev)
2428b3cf37bfSColy Li {
2429a9dd53adSGabriel de Perthuis struct cache_set *c, *tc;
2430a9dd53adSGabriel de Perthuis
243108fdb2cdSColy Li list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
243208fdb2cdSColy Li struct cache *ca = c->cache;
243308fdb2cdSColy Li
24344e7b5671SChristoph Hellwig if (ca->bdev->bd_dev == dev)
2435a9dd53adSGabriel de Perthuis return true;
243608fdb2cdSColy Li }
243708fdb2cdSColy Li
2438a9dd53adSGabriel de Perthuis return false;
2439a9dd53adSGabriel de Perthuis }
2440a9dd53adSGabriel de Perthuis
bch_is_open(dev_t dev)24414e7b5671SChristoph Hellwig static bool bch_is_open(dev_t dev)
2442b3cf37bfSColy Li {
24434e7b5671SChristoph Hellwig return bch_is_open_cache(dev) || bch_is_open_backing(dev);
2444a9dd53adSGabriel de Perthuis }
2445a9dd53adSGabriel de Perthuis
24469e23ccf8SColy Li struct async_reg_args {
2447ee4a36f4SColy Li struct delayed_work reg_work;
24489e23ccf8SColy Li char *path;
24499e23ccf8SColy Li struct cache_sb *sb;
24509e23ccf8SColy Li struct cache_sb_disk *sb_disk;
24519e23ccf8SColy Li struct block_device *bdev;
2452abcc0cbdSJan Kara void *holder;
24539e23ccf8SColy Li };
24549e23ccf8SColy Li
register_bdev_worker(struct work_struct * work)24559e23ccf8SColy Li static void register_bdev_worker(struct work_struct *work)
24569e23ccf8SColy Li {
24579e23ccf8SColy Li int fail = false;
24589e23ccf8SColy Li struct async_reg_args *args =
2459ee4a36f4SColy Li container_of(work, struct async_reg_args, reg_work.work);
24609e23ccf8SColy Li
24619e23ccf8SColy Li mutex_lock(&bch_register_lock);
2462abcc0cbdSJan Kara if (register_bdev(args->sb, args->sb_disk, args->bdev, args->holder)
2463abcc0cbdSJan Kara < 0)
24649e23ccf8SColy Li fail = true;
24659e23ccf8SColy Li mutex_unlock(&bch_register_lock);
24669e23ccf8SColy Li
24679e23ccf8SColy Li if (fail)
24689e23ccf8SColy Li pr_info("error %s: fail to register backing device\n",
24699e23ccf8SColy Li args->path);
24709e23ccf8SColy Li kfree(args->sb);
24719e23ccf8SColy Li kfree(args->path);
24729e23ccf8SColy Li kfree(args);
24739e23ccf8SColy Li module_put(THIS_MODULE);
24749e23ccf8SColy Li }
24759e23ccf8SColy Li
register_cache_worker(struct work_struct * work)24769e23ccf8SColy Li static void register_cache_worker(struct work_struct *work)
24779e23ccf8SColy Li {
24789e23ccf8SColy Li int fail = false;
24799e23ccf8SColy Li struct async_reg_args *args =
2480ee4a36f4SColy Li container_of(work, struct async_reg_args, reg_work.work);
24819e23ccf8SColy Li
24829e23ccf8SColy Li /* blkdev_put() will be called in bch_cache_release() */
2483abcc0cbdSJan Kara if (register_cache(args->sb, args->sb_disk, args->bdev, args->holder))
24849e23ccf8SColy Li fail = true;
24859e23ccf8SColy Li
24869e23ccf8SColy Li if (fail)
24879e23ccf8SColy Li pr_info("error %s: fail to register cache device\n",
24889e23ccf8SColy Li args->path);
24899e23ccf8SColy Li kfree(args->sb);
24909e23ccf8SColy Li kfree(args->path);
24919e23ccf8SColy Li kfree(args);
24929e23ccf8SColy Li module_put(THIS_MODULE);
24939e23ccf8SColy Li }
24949e23ccf8SColy Li
register_device_async(struct async_reg_args * args)2495d7fae7b4SKai Krakow static void register_device_async(struct async_reg_args *args)
24969e23ccf8SColy Li {
24979e23ccf8SColy Li if (SB_IS_BDEV(args->sb))
2498ee4a36f4SColy Li INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
24999e23ccf8SColy Li else
2500ee4a36f4SColy Li INIT_DELAYED_WORK(&args->reg_work, register_cache_worker);
25019e23ccf8SColy Li
2502ee4a36f4SColy Li /* 10 jiffies is enough for a delay */
2503ee4a36f4SColy Li queue_delayed_work(system_wq, &args->reg_work, 10);
25049e23ccf8SColy Li }
25059e23ccf8SColy Li
alloc_holder_object(struct cache_sb * sb)2506abcc0cbdSJan Kara static void *alloc_holder_object(struct cache_sb *sb)
2507abcc0cbdSJan Kara {
2508abcc0cbdSJan Kara if (SB_IS_BDEV(sb))
2509abcc0cbdSJan Kara return kzalloc(sizeof(struct cached_dev), GFP_KERNEL);
2510abcc0cbdSJan Kara return kzalloc(sizeof(struct cache), GFP_KERNEL);
2511abcc0cbdSJan Kara }
2512abcc0cbdSJan Kara
register_bcache(struct kobject * k,struct kobj_attribute * attr,const char * buffer,size_t size)2513cafe5635SKent Overstreet static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2514cafe5635SKent Overstreet const char *buffer, size_t size)
2515cafe5635SKent Overstreet {
251650246693SChristoph Hellwig const char *err;
251729cda393SColy Li char *path = NULL;
251850246693SChristoph Hellwig struct cache_sb *sb;
2519cfa0c56dSChristoph Hellwig struct cache_sb_disk *sb_disk;
25202c555598SJan Kara struct block_device *bdev, *bdev2;
25212c555598SJan Kara void *holder = NULL;
252250246693SChristoph Hellwig ssize_t ret;
2523a58e88bfSColy Li bool async_registration = false;
25242c555598SJan Kara bool quiet = false;
2525a58e88bfSColy Li
2526a58e88bfSColy Li #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
2527a58e88bfSColy Li async_registration = true;
2528a58e88bfSColy Li #endif
2529cafe5635SKent Overstreet
253050246693SChristoph Hellwig ret = -EBUSY;
253129cda393SColy Li err = "failed to reference bcache module";
2532cafe5635SKent Overstreet if (!try_module_get(THIS_MODULE))
253350246693SChristoph Hellwig goto out;
2534cafe5635SKent Overstreet
2535a59ff6ccSColy Li /* For latest state of bcache_is_reboot */
2536a59ff6ccSColy Li smp_mb();
253729cda393SColy Li err = "bcache is in reboot";
2538a59ff6ccSColy Li if (bcache_is_reboot)
253950246693SChristoph Hellwig goto out_module_put;
2540a59ff6ccSColy Li
254150246693SChristoph Hellwig ret = -ENOMEM;
254250246693SChristoph Hellwig err = "cannot allocate memory";
2543a56489d4SFlorian Schmaus path = kstrndup(buffer, size, GFP_KERNEL);
2544a56489d4SFlorian Schmaus if (!path)
254550246693SChristoph Hellwig goto out_module_put;
2546a56489d4SFlorian Schmaus
2547a56489d4SFlorian Schmaus sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
2548a56489d4SFlorian Schmaus if (!sb)
254950246693SChristoph Hellwig goto out_free_path;
2550cafe5635SKent Overstreet
255150246693SChristoph Hellwig ret = -EINVAL;
2552cafe5635SKent Overstreet err = "failed to open device";
25532c555598SJan Kara bdev = blkdev_get_by_path(strim(path), BLK_OPEN_READ, NULL, NULL);
25542c555598SJan Kara if (IS_ERR(bdev))
255550246693SChristoph Hellwig goto out_free_sb;
2556f59fce84SKent Overstreet
2557f59fce84SKent Overstreet err = "failed to set blocksize";
2558f59fce84SKent Overstreet if (set_blocksize(bdev, 4096))
255950246693SChristoph Hellwig goto out_blkdev_put;
2560cafe5635SKent Overstreet
2561cfa0c56dSChristoph Hellwig err = read_super(sb, bdev, &sb_disk);
2562cafe5635SKent Overstreet if (err)
256350246693SChristoph Hellwig goto out_blkdev_put;
2564cafe5635SKent Overstreet
2565abcc0cbdSJan Kara holder = alloc_holder_object(sb);
2566abcc0cbdSJan Kara if (!holder) {
2567abcc0cbdSJan Kara ret = -ENOMEM;
2568abcc0cbdSJan Kara err = "cannot allocate memory";
2569abcc0cbdSJan Kara goto out_put_sb_page;
2570abcc0cbdSJan Kara }
2571abcc0cbdSJan Kara
25722c555598SJan Kara /* Now reopen in exclusive mode with proper holder */
25732c555598SJan Kara bdev2 = blkdev_get_by_dev(bdev->bd_dev, BLK_OPEN_READ | BLK_OPEN_WRITE,
25742c555598SJan Kara holder, NULL);
25752c555598SJan Kara blkdev_put(bdev, NULL);
25762c555598SJan Kara bdev = bdev2;
25772c555598SJan Kara if (IS_ERR(bdev)) {
25782c555598SJan Kara ret = PTR_ERR(bdev);
25792c555598SJan Kara bdev = NULL;
25802c555598SJan Kara if (ret == -EBUSY) {
25812c555598SJan Kara dev_t dev;
25822c555598SJan Kara
25832c555598SJan Kara mutex_lock(&bch_register_lock);
25842c555598SJan Kara if (lookup_bdev(strim(path), &dev) == 0 &&
25852c555598SJan Kara bch_is_open(dev))
25862c555598SJan Kara err = "device already registered";
25872c555598SJan Kara else
25882c555598SJan Kara err = "device busy";
25892c555598SJan Kara mutex_unlock(&bch_register_lock);
25902c555598SJan Kara if (attr == &ksysfs_register_quiet) {
25912c555598SJan Kara quiet = true;
25922c555598SJan Kara ret = size;
25932c555598SJan Kara }
25942c555598SJan Kara }
25952c555598SJan Kara goto out_free_holder;
25962c555598SJan Kara }
25972c555598SJan Kara
2598cc40daf9STang Junhui err = "failed to register device";
2599a58e88bfSColy Li
2600a58e88bfSColy Li if (async_registration) {
26019e23ccf8SColy Li /* register in asynchronous way */
26029e23ccf8SColy Li struct async_reg_args *args =
26039e23ccf8SColy Li kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
26049e23ccf8SColy Li
26059e23ccf8SColy Li if (!args) {
26069e23ccf8SColy Li ret = -ENOMEM;
26079e23ccf8SColy Li err = "cannot allocate memory";
2608abcc0cbdSJan Kara goto out_free_holder;
26099e23ccf8SColy Li }
26109e23ccf8SColy Li
26119e23ccf8SColy Li args->path = path;
26129e23ccf8SColy Li args->sb = sb;
26139e23ccf8SColy Li args->sb_disk = sb_disk;
26149e23ccf8SColy Li args->bdev = bdev;
2615abcc0cbdSJan Kara args->holder = holder;
2616d7fae7b4SKai Krakow register_device_async(args);
26179e23ccf8SColy Li /* No wait and returns to user space */
26189e23ccf8SColy Li goto async_done;
26199e23ccf8SColy Li }
26209e23ccf8SColy Li
26212903381fSKent Overstreet if (SB_IS_BDEV(sb)) {
26224fa03402SKent Overstreet mutex_lock(&bch_register_lock);
2623abcc0cbdSJan Kara ret = register_bdev(sb, sb_disk, bdev, holder);
26244fa03402SKent Overstreet mutex_unlock(&bch_register_lock);
2625bb6d355cSColy Li /* blkdev_put() will be called in cached_dev_free() */
2626fc8f19ccSChristoph Hellwig if (ret < 0)
2627fc8f19ccSChristoph Hellwig goto out_free_sb;
2628cafe5635SKent Overstreet } else {
2629bb6d355cSColy Li /* blkdev_put() will be called in bch_cache_release() */
2630abcc0cbdSJan Kara ret = register_cache(sb, sb_disk, bdev, holder);
2631d55f7cb2SChao Yu if (ret)
2632fc8f19ccSChristoph Hellwig goto out_free_sb;
263350246693SChristoph Hellwig }
263450246693SChristoph Hellwig
2635f59fce84SKent Overstreet kfree(sb);
2636f59fce84SKent Overstreet kfree(path);
2637f59fce84SKent Overstreet module_put(THIS_MODULE);
26389e23ccf8SColy Li async_done:
263950246693SChristoph Hellwig return size;
2640f59fce84SKent Overstreet
2641abcc0cbdSJan Kara out_free_holder:
2642abcc0cbdSJan Kara kfree(holder);
264350246693SChristoph Hellwig out_put_sb_page:
2644cfa0c56dSChristoph Hellwig put_page(virt_to_page(sb_disk));
264550246693SChristoph Hellwig out_blkdev_put:
26462c555598SJan Kara if (bdev)
26472c555598SJan Kara blkdev_put(bdev, holder);
264850246693SChristoph Hellwig out_free_sb:
264950246693SChristoph Hellwig kfree(sb);
265050246693SChristoph Hellwig out_free_path:
265150246693SChristoph Hellwig kfree(path);
2652ae3cd299SColy Li path = NULL;
265350246693SChristoph Hellwig out_module_put:
265450246693SChristoph Hellwig module_put(THIS_MODULE);
265550246693SChristoph Hellwig out:
26562c555598SJan Kara if (!quiet)
265746f5aa88SJoe Perches pr_info("error %s: %s\n", path?path:"", err);
265850246693SChristoph Hellwig return ret;
2659cafe5635SKent Overstreet }
2660cafe5635SKent Overstreet
26610c277e21SColy Li
26620c277e21SColy Li struct pdev {
26630c277e21SColy Li struct list_head list;
26640c277e21SColy Li struct cached_dev *dc;
26650c277e21SColy Li };
26660c277e21SColy Li
bch_pending_bdevs_cleanup(struct kobject * k,struct kobj_attribute * attr,const char * buffer,size_t size)26670c277e21SColy Li static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
26680c277e21SColy Li struct kobj_attribute *attr,
26690c277e21SColy Li const char *buffer,
26700c277e21SColy Li size_t size)
26710c277e21SColy Li {
26720c277e21SColy Li LIST_HEAD(pending_devs);
26730c277e21SColy Li ssize_t ret = size;
26740c277e21SColy Li struct cached_dev *dc, *tdc;
26750c277e21SColy Li struct pdev *pdev, *tpdev;
26760c277e21SColy Li struct cache_set *c, *tc;
26770c277e21SColy Li
26780c277e21SColy Li mutex_lock(&bch_register_lock);
26790c277e21SColy Li list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
26800c277e21SColy Li pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
26810c277e21SColy Li if (!pdev)
26820c277e21SColy Li break;
26830c277e21SColy Li pdev->dc = dc;
26840c277e21SColy Li list_add(&pdev->list, &pending_devs);
26850c277e21SColy Li }
26860c277e21SColy Li
26870c277e21SColy Li list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
26880c277e21SColy Li char *pdev_set_uuid = pdev->dc->sb.set_uuid;
2689e8092707SYi Li list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
26901132e56eSColy Li char *set_uuid = c->set_uuid;
26910c277e21SColy Li
26920c277e21SColy Li if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
26930c277e21SColy Li list_del(&pdev->list);
26940c277e21SColy Li kfree(pdev);
26950c277e21SColy Li break;
26960c277e21SColy Li }
26970c277e21SColy Li }
26980c277e21SColy Li }
26990c277e21SColy Li mutex_unlock(&bch_register_lock);
27000c277e21SColy Li
27010c277e21SColy Li list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
270246f5aa88SJoe Perches pr_info("delete pdev %p\n", pdev);
27030c277e21SColy Li list_del(&pdev->list);
27040c277e21SColy Li bcache_device_stop(&pdev->dc->disk);
27050c277e21SColy Li kfree(pdev);
27060c277e21SColy Li }
27070c277e21SColy Li
27080c277e21SColy Li return ret;
27090c277e21SColy Li }
27100c277e21SColy Li
bcache_reboot(struct notifier_block * n,unsigned long code,void * x)2711cafe5635SKent Overstreet static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
2712cafe5635SKent Overstreet {
2713a59ff6ccSColy Li if (bcache_is_reboot)
2714a59ff6ccSColy Li return NOTIFY_DONE;
2715a59ff6ccSColy Li
2716cafe5635SKent Overstreet if (code == SYS_DOWN ||
2717cafe5635SKent Overstreet code == SYS_HALT ||
2718cafe5635SKent Overstreet code == SYS_POWER_OFF) {
2719cafe5635SKent Overstreet DEFINE_WAIT(wait);
2720cafe5635SKent Overstreet unsigned long start = jiffies;
2721cafe5635SKent Overstreet bool stopped = false;
2722cafe5635SKent Overstreet
2723cafe5635SKent Overstreet struct cache_set *c, *tc;
2724cafe5635SKent Overstreet struct cached_dev *dc, *tdc;
2725cafe5635SKent Overstreet
2726cafe5635SKent Overstreet mutex_lock(&bch_register_lock);
2727cafe5635SKent Overstreet
2728a59ff6ccSColy Li if (bcache_is_reboot)
2729a59ff6ccSColy Li goto out;
2730a59ff6ccSColy Li
2731a59ff6ccSColy Li /* New registration is rejected since now */
2732a59ff6ccSColy Li bcache_is_reboot = true;
2733a59ff6ccSColy Li /*
2734a59ff6ccSColy Li * Make registering caller (if there is) on other CPU
2735a59ff6ccSColy Li * core know bcache_is_reboot set to true earlier
2736a59ff6ccSColy Li */
2737a59ff6ccSColy Li smp_mb();
2738a59ff6ccSColy Li
2739cafe5635SKent Overstreet if (list_empty(&bch_cache_sets) &&
2740cafe5635SKent Overstreet list_empty(&uncached_devices))
2741cafe5635SKent Overstreet goto out;
2742cafe5635SKent Overstreet
2743a59ff6ccSColy Li mutex_unlock(&bch_register_lock);
2744a59ff6ccSColy Li
274546f5aa88SJoe Perches pr_info("Stopping all devices:\n");
2746cafe5635SKent Overstreet
2747a59ff6ccSColy Li /*
2748a59ff6ccSColy Li * The reason bch_register_lock is not held to call
2749a59ff6ccSColy Li * bch_cache_set_stop() and bcache_device_stop() is to
2750a59ff6ccSColy Li * avoid potential deadlock during reboot, because cache
2751a307e2abSDing Senjie * set or bcache device stopping process will acquire
2752a59ff6ccSColy Li * bch_register_lock too.
2753a59ff6ccSColy Li *
2754a59ff6ccSColy Li * We are safe here because bcache_is_reboot sets to
2755a59ff6ccSColy Li * true already, register_bcache() will reject new
2756a59ff6ccSColy Li * registration now. bcache_is_reboot also makes sure
2757a59ff6ccSColy Li * bcache_reboot() won't be re-entered on by other thread,
2758a59ff6ccSColy Li * so there is no race in following list iteration by
2759a59ff6ccSColy Li * list_for_each_entry_safe().
2760a59ff6ccSColy Li */
2761cafe5635SKent Overstreet list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2762cafe5635SKent Overstreet bch_cache_set_stop(c);
2763cafe5635SKent Overstreet
2764cafe5635SKent Overstreet list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2765cafe5635SKent Overstreet bcache_device_stop(&dc->disk);
2766cafe5635SKent Overstreet
2767eb8cbb6dSColy Li
2768eb8cbb6dSColy Li /*
2769eb8cbb6dSColy Li * Give an early chance for other kthreads and
2770eb8cbb6dSColy Li * kworkers to stop themselves
2771eb8cbb6dSColy Li */
2772eb8cbb6dSColy Li schedule();
2773eb8cbb6dSColy Li
2774cafe5635SKent Overstreet /* What's a condition variable? */
2775cafe5635SKent Overstreet while (1) {
2776eb8cbb6dSColy Li long timeout = start + 10 * HZ - jiffies;
2777cafe5635SKent Overstreet
2778eb8cbb6dSColy Li mutex_lock(&bch_register_lock);
2779cafe5635SKent Overstreet stopped = list_empty(&bch_cache_sets) &&
2780cafe5635SKent Overstreet list_empty(&uncached_devices);
2781cafe5635SKent Overstreet
2782cafe5635SKent Overstreet if (timeout < 0 || stopped)
2783cafe5635SKent Overstreet break;
2784cafe5635SKent Overstreet
2785cafe5635SKent Overstreet prepare_to_wait(&unregister_wait, &wait,
2786cafe5635SKent Overstreet TASK_UNINTERRUPTIBLE);
2787cafe5635SKent Overstreet
2788cafe5635SKent Overstreet mutex_unlock(&bch_register_lock);
2789cafe5635SKent Overstreet schedule_timeout(timeout);
2790cafe5635SKent Overstreet }
2791cafe5635SKent Overstreet
2792cafe5635SKent Overstreet finish_wait(&unregister_wait, &wait);
2793cafe5635SKent Overstreet
2794cafe5635SKent Overstreet if (stopped)
279546f5aa88SJoe Perches pr_info("All devices stopped\n");
2796cafe5635SKent Overstreet else
279746f5aa88SJoe Perches pr_notice("Timeout waiting for devices to be closed\n");
2798cafe5635SKent Overstreet out:
2799cafe5635SKent Overstreet mutex_unlock(&bch_register_lock);
2800cafe5635SKent Overstreet }
2801cafe5635SKent Overstreet
2802cafe5635SKent Overstreet return NOTIFY_DONE;
2803cafe5635SKent Overstreet }
2804cafe5635SKent Overstreet
2805cafe5635SKent Overstreet static struct notifier_block reboot = {
2806cafe5635SKent Overstreet .notifier_call = bcache_reboot,
2807cafe5635SKent Overstreet .priority = INT_MAX, /* before any real devices */
2808cafe5635SKent Overstreet };
2809cafe5635SKent Overstreet
bcache_exit(void)2810cafe5635SKent Overstreet static void bcache_exit(void)
2811cafe5635SKent Overstreet {
2812cafe5635SKent Overstreet bch_debug_exit();
2813cafe5635SKent Overstreet bch_request_exit();
2814cafe5635SKent Overstreet if (bcache_kobj)
2815cafe5635SKent Overstreet kobject_put(bcache_kobj);
2816cafe5635SKent Overstreet if (bcache_wq)
2817cafe5635SKent Overstreet destroy_workqueue(bcache_wq);
28180f843e65SGuoju Fang if (bch_journal_wq)
28190f843e65SGuoju Fang destroy_workqueue(bch_journal_wq);
2820afe78ab4SKai Krakow if (bch_flush_wq)
2821afe78ab4SKai Krakow destroy_workqueue(bch_flush_wq);
28229f233ffeSKai Krakow bch_btree_exit();
28230f843e65SGuoju Fang
28245c41c8a7SKent Overstreet if (bcache_major)
2825cafe5635SKent Overstreet unregister_blkdev(bcache_major, "bcache");
2826cafe5635SKent Overstreet unregister_reboot_notifier(&reboot);
2827330a4db8SLiang Chen mutex_destroy(&bch_register_lock);
2828cafe5635SKent Overstreet }
2829cafe5635SKent Overstreet
28309aaf5165SColy Li /* Check and fixup module parameters */
check_module_parameters(void)28319aaf5165SColy Li static void check_module_parameters(void)
28329aaf5165SColy Li {
28339aaf5165SColy Li if (bch_cutoff_writeback_sync == 0)
28349aaf5165SColy Li bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
28359aaf5165SColy Li else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
283646f5aa88SJoe Perches pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n",
28379aaf5165SColy Li bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
28389aaf5165SColy Li bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
28399aaf5165SColy Li }
28409aaf5165SColy Li
28419aaf5165SColy Li if (bch_cutoff_writeback == 0)
28429aaf5165SColy Li bch_cutoff_writeback = CUTOFF_WRITEBACK;
28439aaf5165SColy Li else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
284446f5aa88SJoe Perches pr_warn("set bch_cutoff_writeback (%u) to max value %u\n",
28459aaf5165SColy Li bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
28469aaf5165SColy Li bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
28479aaf5165SColy Li }
28489aaf5165SColy Li
28499aaf5165SColy Li if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
285046f5aa88SJoe Perches pr_warn("set bch_cutoff_writeback (%u) to %u\n",
28519aaf5165SColy Li bch_cutoff_writeback, bch_cutoff_writeback_sync);
28529aaf5165SColy Li bch_cutoff_writeback = bch_cutoff_writeback_sync;
28539aaf5165SColy Li }
28549aaf5165SColy Li }
28559aaf5165SColy Li
bcache_init(void)2856cafe5635SKent Overstreet static int __init bcache_init(void)
2857cafe5635SKent Overstreet {
2858cafe5635SKent Overstreet static const struct attribute *files[] = {
2859cafe5635SKent Overstreet &ksysfs_register.attr,
2860cafe5635SKent Overstreet &ksysfs_register_quiet.attr,
28610c277e21SColy Li &ksysfs_pendings_cleanup.attr,
2862cafe5635SKent Overstreet NULL
2863cafe5635SKent Overstreet };
2864cafe5635SKent Overstreet
28659aaf5165SColy Li check_module_parameters();
28669aaf5165SColy Li
2867cafe5635SKent Overstreet mutex_init(&bch_register_lock);
2868cafe5635SKent Overstreet init_waitqueue_head(&unregister_wait);
2869cafe5635SKent Overstreet register_reboot_notifier(&reboot);
2870cafe5635SKent Overstreet
2871cafe5635SKent Overstreet bcache_major = register_blkdev(0, "bcache");
28722ecf0cdbSZheng Liu if (bcache_major < 0) {
28732ecf0cdbSZheng Liu unregister_reboot_notifier(&reboot);
2874330a4db8SLiang Chen mutex_destroy(&bch_register_lock);
2875cafe5635SKent Overstreet return bcache_major;
28762ecf0cdbSZheng Liu }
2877cafe5635SKent Overstreet
28789f233ffeSKai Krakow if (bch_btree_init())
28799f233ffeSKai Krakow goto err;
28809f233ffeSKai Krakow
288116c1fdf4SFlorian Schmaus bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
288216c1fdf4SFlorian Schmaus if (!bcache_wq)
288316c1fdf4SFlorian Schmaus goto err;
288416c1fdf4SFlorian Schmaus
2885afe78ab4SKai Krakow /*
2886afe78ab4SKai Krakow * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
2887afe78ab4SKai Krakow *
2888afe78ab4SKai Krakow * 1. It used `system_wq` before which also does no memory reclaim.
2889afe78ab4SKai Krakow * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
2890afe78ab4SKai Krakow * reduced throughput can be observed.
2891afe78ab4SKai Krakow *
2892afe78ab4SKai Krakow * We still want to user our own queue to not congest the `system_wq`.
2893afe78ab4SKai Krakow */
2894afe78ab4SKai Krakow bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
2895afe78ab4SKai Krakow if (!bch_flush_wq)
2896afe78ab4SKai Krakow goto err;
2897afe78ab4SKai Krakow
28980f843e65SGuoju Fang bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
28990f843e65SGuoju Fang if (!bch_journal_wq)
29000f843e65SGuoju Fang goto err;
29010f843e65SGuoju Fang
290216c1fdf4SFlorian Schmaus bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
290316c1fdf4SFlorian Schmaus if (!bcache_kobj)
290416c1fdf4SFlorian Schmaus goto err;
290516c1fdf4SFlorian Schmaus
290616c1fdf4SFlorian Schmaus if (bch_request_init() ||
2907330a4db8SLiang Chen sysfs_create_files(bcache_kobj, files))
2908cafe5635SKent Overstreet goto err;
2909cafe5635SKent Overstreet
291091bafdf0SDongbo Cao bch_debug_init();
291178ac2107SColy Li closure_debug_init();
291278ac2107SColy Li
2913a59ff6ccSColy Li bcache_is_reboot = false;
2914a59ff6ccSColy Li
2915cafe5635SKent Overstreet return 0;
2916cafe5635SKent Overstreet err:
2917cafe5635SKent Overstreet bcache_exit();
2918cafe5635SKent Overstreet return -ENOMEM;
2919cafe5635SKent Overstreet }
2920cafe5635SKent Overstreet
29219aaf5165SColy Li /*
29229aaf5165SColy Li * Module hooks
29239aaf5165SColy Li */
2924cafe5635SKent Overstreet module_exit(bcache_exit);
2925cafe5635SKent Overstreet module_init(bcache_init);
2926009673d0SColy Li
29279aaf5165SColy Li module_param(bch_cutoff_writeback, uint, 0);
29289aaf5165SColy Li MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback");
29299aaf5165SColy Li
29309aaf5165SColy Li module_param(bch_cutoff_writeback_sync, uint, 0);
29319aaf5165SColy Li MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback");
29329aaf5165SColy Li
2933009673d0SColy Li MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
2934009673d0SColy Li MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
2935009673d0SColy Li MODULE_LICENSE("GPL");
2936