super.c (033d777f548645c3a906b73eef5d665aeee55092) | super.c (54d12f2b4fd0f218590d1490b41a18d0e2328a9a) |
---|---|
1/* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9#include "bcache.h" 10#include "btree.h" 11#include "debug.h" 12#include "request.h" | 1/* 2 * bcache setup/teardown code, and some metadata io - read a superblock and 3 * figure out what to do with it. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9#include "bcache.h" 10#include "btree.h" 11#include "debug.h" 12#include "request.h" |
13#include "writeback.h" |
|
13 | 14 |
15#include <linux/blkdev.h> |
|
14#include <linux/buffer_head.h> 15#include <linux/debugfs.h> 16#include <linux/genhd.h> 17#include <linux/module.h> 18#include <linux/random.h> 19#include <linux/reboot.h> 20#include <linux/sysfs.h> 21 --- 315 unchanged lines hidden (view full) --- 337} 338 339static void uuid_io(struct cache_set *c, unsigned long rw, 340 struct bkey *k, struct closure *parent) 341{ 342 struct closure *cl = &c->uuid_write.cl; 343 struct uuid_entry *u; 344 unsigned i; | 16#include <linux/buffer_head.h> 17#include <linux/debugfs.h> 18#include <linux/genhd.h> 19#include <linux/module.h> 20#include <linux/random.h> 21#include <linux/reboot.h> 22#include <linux/sysfs.h> 23 --- 315 unchanged lines hidden (view full) --- 339} 340 341static void uuid_io(struct cache_set *c, unsigned long rw, 342 struct bkey *k, struct closure *parent) 343{ 344 struct closure *cl = &c->uuid_write.cl; 345 struct uuid_entry *u; 346 unsigned i; |
347 char buf[80]; |
|
345 346 BUG_ON(!parent); 347 closure_lock(&c->uuid_write, parent); 348 349 for (i = 0; i < KEY_PTRS(k); i++) { 350 struct bio *bio = bch_bbio_alloc(c); 351 352 bio->bi_rw = REQ_SYNC|REQ_META|rw; --- 4 unchanged lines hidden (view full) --- 357 bch_bio_map(bio, c->uuids); 358 359 bch_submit_bbio(bio, c, k, i); 360 361 if (!(rw & WRITE)) 362 break; 363 } 364 | 348 349 BUG_ON(!parent); 350 closure_lock(&c->uuid_write, parent); 351 352 for (i = 0; i < KEY_PTRS(k); i++) { 353 struct bio *bio = bch_bbio_alloc(c); 354 355 bio->bi_rw = REQ_SYNC|REQ_META|rw; --- 4 unchanged lines hidden (view full) --- 360 bch_bio_map(bio, c->uuids); 361 362 bch_submit_bbio(bio, c, k, i); 363 364 if (!(rw & WRITE)) 365 break; 366 } 367 |
365 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", 366 pkey(&c->uuid_bucket)); | 368 bch_bkey_to_text(buf, sizeof(buf), k); 369 pr_debug("%s UUIDs at %s", rw & REQ_WRITE ? "wrote" : "read", buf); |
367 368 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 369 if (!bch_is_zero(u->uuid, 16)) 370 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 371 u - c->uuids, u->uuid, u->label, 372 u->first_reg, u->last_reg, u->invalidated); 373 374 closure_return(cl); --- 163 unchanged lines hidden (view full) --- 538 539 ca->disk_buckets->seq++; 540 541 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 542 &ca->meta_sectors_written); 543 544 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 545 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); | 370 371 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++) 372 if (!bch_is_zero(u->uuid, 16)) 373 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u", 374 u - c->uuids, u->uuid, u->label, 375 u->first_reg, u->last_reg, u->invalidated); 376 377 closure_return(cl); --- 163 unchanged lines hidden (view full) --- 541 542 ca->disk_buckets->seq++; 543 544 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca), 545 &ca->meta_sectors_written); 546 547 pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free), 548 fifo_used(&ca->free_inc), fifo_used(&ca->unused)); |
546 blktrace_msg(ca, "Starting priorities: " buckets_free(ca)); | |
547 548 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 549 long bucket; 550 struct prio_set *p = ca->disk_buckets; 551 struct bucket_disk *d = p->data; 552 struct bucket_disk *end = d + prios_per_bucket(ca); 553 554 for (b = ca->buckets + i * prios_per_bucket(ca); --- 183 unchanged lines hidden (view full) --- 738 if (d->disk) 739 put_disk(d->disk); 740 741 bio_split_pool_free(&d->bio_split_hook); 742 if (d->unaligned_bvec) 743 mempool_destroy(d->unaligned_bvec); 744 if (d->bio_split) 745 bioset_free(d->bio_split); | 549 550 for (i = prio_buckets(ca) - 1; i >= 0; --i) { 551 long bucket; 552 struct prio_set *p = ca->disk_buckets; 553 struct bucket_disk *d = p->data; 554 struct bucket_disk *end = d + prios_per_bucket(ca); 555 556 for (b = ca->buckets + i * prios_per_bucket(ca); --- 183 unchanged lines hidden (view full) --- 740 if (d->disk) 741 put_disk(d->disk); 742 743 bio_split_pool_free(&d->bio_split_hook); 744 if (d->unaligned_bvec) 745 mempool_destroy(d->unaligned_bvec); 746 if (d->bio_split) 747 bioset_free(d->bio_split); |
748 if (is_vmalloc_addr(d->stripe_sectors_dirty)) 749 vfree(d->stripe_sectors_dirty); 750 else 751 kfree(d->stripe_sectors_dirty); |
|
746 747 closure_debug_destroy(&d->cl); 748} 749 | 752 753 closure_debug_destroy(&d->cl); 754} 755 |
750static int bcache_device_init(struct bcache_device *d, unsigned block_size) | 756static int bcache_device_init(struct bcache_device *d, unsigned block_size, 757 sector_t sectors) |
751{ 752 struct request_queue *q; | 758{ 759 struct request_queue *q; |
760 size_t n; |
|
753 | 761 |
762 if (!d->stripe_size_bits) 763 d->stripe_size_bits = 31; 764 765 d->nr_stripes = round_up(sectors, 1 << d->stripe_size_bits) >> 766 d->stripe_size_bits; 767 768 if (!d->nr_stripes || d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) 769 return -ENOMEM; 770 771 n = d->nr_stripes * sizeof(atomic_t); 772 d->stripe_sectors_dirty = n < PAGE_SIZE << 6 773 ? kzalloc(n, GFP_KERNEL) 774 : vzalloc(n); 775 if (!d->stripe_sectors_dirty) 776 return -ENOMEM; 777 |
|
754 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 755 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 756 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 757 bio_split_pool_init(&d->bio_split_hook) || 758 !(d->disk = alloc_disk(1)) || 759 !(q = blk_alloc_queue(GFP_KERNEL))) 760 return -ENOMEM; 761 | 778 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 779 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, 780 sizeof(struct bio_vec) * BIO_MAX_PAGES)) || 781 bio_split_pool_init(&d->bio_split_hook) || 782 !(d->disk = alloc_disk(1)) || 783 !(q = blk_alloc_queue(GFP_KERNEL))) 784 return -ENOMEM; 785 |
786 set_capacity(d->disk, sectors); |
|
762 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 763 764 d->disk->major = bcache_major; 765 d->disk->first_minor = bcache_minor++; 766 d->disk->fops = &bcache_ops; 767 d->disk->private_data = d; 768 769 blk_queue_make_request(q, NULL); --- 6 unchanged lines hidden (view full) --- 776 q->limits.max_segments = BIO_MAX_PAGES; 777 q->limits.max_discard_sectors = UINT_MAX; 778 q->limits.io_min = block_size; 779 q->limits.logical_block_size = block_size; 780 q->limits.physical_block_size = block_size; 781 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 782 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 783 | 787 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", bcache_minor); 788 789 d->disk->major = bcache_major; 790 d->disk->first_minor = bcache_minor++; 791 d->disk->fops = &bcache_ops; 792 d->disk->private_data = d; 793 794 blk_queue_make_request(q, NULL); --- 6 unchanged lines hidden (view full) --- 801 q->limits.max_segments = BIO_MAX_PAGES; 802 q->limits.max_discard_sectors = UINT_MAX; 803 q->limits.io_min = block_size; 804 q->limits.logical_block_size = block_size; 805 q->limits.physical_block_size = block_size; 806 set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); 807 set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); 808 |
809 blk_queue_flush(q, REQ_FLUSH|REQ_FUA); 810 |
|
784 return 0; 785} 786 787/* Cached device */ 788 789static void calc_cached_dev_sectors(struct cache_set *c) 790{ 791 uint64_t sectors = 0; 792 struct cached_dev *dc; 793 794 list_for_each_entry(dc, &c->cached_devs, list) 795 sectors += bdev_sectors(dc->bdev); 796 797 c->cached_dev_sectors = sectors; 798} 799 800void bch_cached_dev_run(struct cached_dev *dc) 801{ 802 struct bcache_device *d = &dc->disk; | 811 return 0; 812} 813 814/* Cached device */ 815 816static void calc_cached_dev_sectors(struct cache_set *c) 817{ 818 uint64_t sectors = 0; 819 struct cached_dev *dc; 820 821 list_for_each_entry(dc, &c->cached_devs, list) 822 sectors += bdev_sectors(dc->bdev); 823 824 c->cached_dev_sectors = sectors; 825} 826 827void bch_cached_dev_run(struct cached_dev *dc) 828{ 829 struct bcache_device *d = &dc->disk; |
830 char buf[SB_LABEL_SIZE + 1]; 831 char *env[] = { 832 "DRIVER=bcache", 833 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid), 834 NULL, 835 NULL, 836 }; |
|
803 | 837 |
838 memcpy(buf, dc->sb.label, SB_LABEL_SIZE); 839 buf[SB_LABEL_SIZE] = '\0'; 840 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf); 841 |
|
804 if (atomic_xchg(&dc->running, 1)) 805 return; 806 807 if (!d->c && 808 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 809 struct closure cl; 810 closure_init_stack(&cl); 811 812 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 813 bch_write_bdev_super(dc, &cl); 814 closure_sync(&cl); 815 } 816 817 add_disk(d->disk); 818 bd_link_disk_holder(dc->bdev, dc->disk.disk); | 842 if (atomic_xchg(&dc->running, 1)) 843 return; 844 845 if (!d->c && 846 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) { 847 struct closure cl; 848 closure_init_stack(&cl); 849 850 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE); 851 bch_write_bdev_super(dc, &cl); 852 closure_sync(&cl); 853 } 854 855 add_disk(d->disk); 856 bd_link_disk_holder(dc->bdev, dc->disk.disk); |
819#if 0 820 char *env[] = { "SYMLINK=label" , NULL }; | 857 /* won't show up in the uevent file, use udevadm monitor -e instead 858 * only class / kset properties are persistent */ |
821 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); | 859 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env); |
822#endif | 860 kfree(env[1]); 861 kfree(env[2]); 862 |
823 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 824 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 825 pr_debug("error creating sysfs link"); 826} 827 828static void cached_dev_detach_finish(struct work_struct *w) 829{ 830 struct cached_dev *dc = container_of(w, struct cached_dev, detach); --- 124 unchanged lines hidden (view full) --- 955 smp_wmb(); 956 /* 957 * dc->c must be set before dc->count != 0 - paired with the mb in 958 * cached_dev_get() 959 */ 960 atomic_set(&dc->count, 1); 961 962 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { | 863 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") || 864 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache")) 865 pr_debug("error creating sysfs link"); 866} 867 868static void cached_dev_detach_finish(struct work_struct *w) 869{ 870 struct cached_dev *dc = container_of(w, struct cached_dev, detach); --- 124 unchanged lines hidden (view full) --- 995 smp_wmb(); 996 /* 997 * dc->c must be set before dc->count != 0 - paired with the mb in 998 * cached_dev_get() 999 */ 1000 atomic_set(&dc->count, 1); 1001 1002 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { |
1003 bch_sectors_dirty_init(dc); |
|
963 atomic_set(&dc->has_dirty, 1); 964 atomic_inc(&dc->count); 965 bch_writeback_queue(dc); 966 } 967 968 bch_cached_dev_run(dc); 969 bcache_device_link(&dc->disk, c, "bdev"); 970 --- 69 unchanged lines hidden (view full) --- 1040 dc->sequential_merge = true; 1041 dc->sequential_cutoff = 4 << 20; 1042 1043 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1044 list_add(&io->lru, &dc->io_lru); 1045 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1046 } 1047 | 1004 atomic_set(&dc->has_dirty, 1); 1005 atomic_inc(&dc->count); 1006 bch_writeback_queue(dc); 1007 } 1008 1009 bch_cached_dev_run(dc); 1010 bcache_device_link(&dc->disk, c, "bdev"); 1011 --- 69 unchanged lines hidden (view full) --- 1081 dc->sequential_merge = true; 1082 dc->sequential_cutoff = 4 << 20; 1083 1084 for (io = dc->io; io < dc->io + RECENT_IO; io++) { 1085 list_add(&io->lru, &dc->io_lru); 1086 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO); 1087 } 1088 |
1048 ret = bcache_device_init(&dc->disk, block_size); | 1089 ret = bcache_device_init(&dc->disk, block_size, 1090 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); |
1049 if (ret) 1050 return ret; 1051 1052 set_capacity(dc->disk.disk, 1053 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1054 1055 dc->disk.disk->queue->backing_dev_info.ra_pages = 1056 max(dc->disk.disk->queue->backing_dev_info.ra_pages, --- 82 unchanged lines hidden (view full) --- 1139 if (!d) 1140 return -ENOMEM; 1141 1142 closure_init(&d->cl, NULL); 1143 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1144 1145 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1146 | 1091 if (ret) 1092 return ret; 1093 1094 set_capacity(dc->disk.disk, 1095 dc->bdev->bd_part->nr_sects - dc->sb.data_offset); 1096 1097 dc->disk.disk->queue->backing_dev_info.ra_pages = 1098 max(dc->disk.disk->queue->backing_dev_info.ra_pages, --- 82 unchanged lines hidden (view full) --- 1181 if (!d) 1182 return -ENOMEM; 1183 1184 closure_init(&d->cl, NULL); 1185 set_closure_fn(&d->cl, flash_dev_flush, system_wq); 1186 1187 kobject_init(&d->kobj, &bch_flash_dev_ktype); 1188 |
1147 if (bcache_device_init(d, block_bytes(c))) | 1189 if (bcache_device_init(d, block_bytes(c), u->sectors)) |
1148 goto err; 1149 1150 bcache_device_attach(d, c, u - c->uuids); | 1190 goto err; 1191 1192 bcache_device_attach(d, c, u - c->uuids); |
1151 set_capacity(d->disk, u->sectors); | |
1152 bch_flash_dev_request_init(d); 1153 add_disk(d->disk); 1154 1155 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1156 goto err; 1157 1158 bcache_device_link(d, c, "volume"); 1159 --- 90 unchanged lines hidden (view full) --- 1250 1251 for_each_cache(ca, c, i) 1252 if (ca) 1253 kobject_put(&ca->kobj); 1254 1255 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1256 free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); 1257 | 1193 bch_flash_dev_request_init(d); 1194 add_disk(d->disk); 1195 1196 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache")) 1197 goto err; 1198 1199 bcache_device_link(d, c, "volume"); 1200 --- 90 unchanged lines hidden (view full) --- 1291 1292 for_each_cache(ca, c, i) 1293 if (ca) 1294 kobject_put(&ca->kobj); 1295 1296 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c))); 1297 free_pages((unsigned long) c->sort, ilog2(bucket_pages(c))); 1298 |
1258 kfree(c->fill_iter); | |
1259 if (c->bio_split) 1260 bioset_free(c->bio_split); | 1299 if (c->bio_split) 1300 bioset_free(c->bio_split); |
1301 if (c->fill_iter) 1302 mempool_destroy(c->fill_iter); |
|
1261 if (c->bio_meta) 1262 mempool_destroy(c->bio_meta); 1263 if (c->search) 1264 mempool_destroy(c->search); 1265 kfree(c->devices); 1266 1267 mutex_lock(&bch_register_lock); 1268 list_del(&c->list); --- 8 unchanged lines hidden (view full) --- 1277 1278static void cache_set_flush(struct closure *cl) 1279{ 1280 struct cache_set *c = container_of(cl, struct cache_set, caching); 1281 struct btree *b; 1282 1283 /* Shut down allocator threads */ 1284 set_bit(CACHE_SET_STOPPING_2, &c->flags); | 1303 if (c->bio_meta) 1304 mempool_destroy(c->bio_meta); 1305 if (c->search) 1306 mempool_destroy(c->search); 1307 kfree(c->devices); 1308 1309 mutex_lock(&bch_register_lock); 1310 list_del(&c->list); --- 8 unchanged lines hidden (view full) --- 1319 1320static void cache_set_flush(struct closure *cl) 1321{ 1322 struct cache_set *c = container_of(cl, struct cache_set, caching); 1323 struct btree *b; 1324 1325 /* Shut down allocator threads */ 1326 set_bit(CACHE_SET_STOPPING_2, &c->flags); |
1285 wake_up(&c->alloc_wait); | 1327 wake_up_allocators(c); |
1286 1287 bch_cache_accounting_destroy(&c->accounting); 1288 1289 kobject_put(&c->internal); 1290 kobject_del(&c->kobj); 1291 1292 if (!IS_ERR_OR_NULL(c->root)) 1293 list_add(&c->root->list, &c->btree_cache); 1294 1295 /* Should skip this if we're unregistering because of an error */ 1296 list_for_each_entry(b, &c->btree_cache, list) 1297 if (btree_node_dirty(b)) | 1328 1329 bch_cache_accounting_destroy(&c->accounting); 1330 1331 kobject_put(&c->internal); 1332 kobject_del(&c->kobj); 1333 1334 if (!IS_ERR_OR_NULL(c->root)) 1335 list_add(&c->root->list, &c->btree_cache); 1336 1337 /* Should skip this if we're unregistering because of an error */ 1338 list_for_each_entry(b, &c->btree_cache, list) 1339 if (btree_node_dirty(b)) |
1298 bch_btree_write(b, true, NULL); | 1340 bch_btree_node_write(b, NULL); |
1299 1300 closure_return(cl); 1301} 1302 1303static void __cache_set_unregister(struct closure *cl) 1304{ 1305 struct cache_set *c = container_of(cl, struct cache_set, caching); 1306 struct cached_dev *dc, *t; --- 61 unchanged lines hidden (view full) --- 1368 c->block_bits = ilog2(sb->block_size); 1369 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1370 1371 c->btree_pages = c->sb.bucket_size / PAGE_SECTORS; 1372 if (c->btree_pages > BTREE_MAX_PAGES) 1373 c->btree_pages = max_t(int, c->btree_pages / 4, 1374 BTREE_MAX_PAGES); 1375 | 1341 1342 closure_return(cl); 1343} 1344 1345static void __cache_set_unregister(struct closure *cl) 1346{ 1347 struct cache_set *c = container_of(cl, struct cache_set, caching); 1348 struct cached_dev *dc, *t; --- 61 unchanged lines hidden (view full) --- 1410 c->block_bits = ilog2(sb->block_size); 1411 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry); 1412 1413 c->btree_pages = c->sb.bucket_size / PAGE_SECTORS; 1414 if (c->btree_pages > BTREE_MAX_PAGES) 1415 c->btree_pages = max_t(int, c->btree_pages / 4, 1416 BTREE_MAX_PAGES); 1417 |
1376 init_waitqueue_head(&c->alloc_wait); | 1418 c->sort_crit_factor = int_sqrt(c->btree_pages); 1419 |
1377 mutex_init(&c->bucket_lock); | 1420 mutex_init(&c->bucket_lock); |
1378 mutex_init(&c->fill_lock); | |
1379 mutex_init(&c->sort_lock); 1380 spin_lock_init(&c->sort_time_lock); 1381 closure_init_unlocked(&c->sb_write); 1382 closure_init_unlocked(&c->uuid_write); 1383 spin_lock_init(&c->btree_read_time_lock); 1384 bch_moving_init_cache_set(c); 1385 1386 INIT_LIST_HEAD(&c->list); --- 9 unchanged lines hidden (view full) --- 1396 1397 iter_size = (sb->bucket_size / sb->block_size + 1) * 1398 sizeof(struct btree_iter_set); 1399 1400 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1401 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1402 sizeof(struct bbio) + sizeof(struct bio_vec) * 1403 bucket_pages(c))) || | 1421 mutex_init(&c->sort_lock); 1422 spin_lock_init(&c->sort_time_lock); 1423 closure_init_unlocked(&c->sb_write); 1424 closure_init_unlocked(&c->uuid_write); 1425 spin_lock_init(&c->btree_read_time_lock); 1426 bch_moving_init_cache_set(c); 1427 1428 INIT_LIST_HEAD(&c->list); --- 9 unchanged lines hidden (view full) --- 1438 1439 iter_size = (sb->bucket_size / sb->block_size + 1) * 1440 sizeof(struct btree_iter_set); 1441 1442 if (!(c->devices = kzalloc(c->nr_uuids * sizeof(void *), GFP_KERNEL)) || 1443 !(c->bio_meta = mempool_create_kmalloc_pool(2, 1444 sizeof(struct bbio) + sizeof(struct bio_vec) * 1445 bucket_pages(c))) || |
1446 !(c->fill_iter = mempool_create_kmalloc_pool(1, iter_size)) || |
|
1404 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || | 1447 !(c->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || |
1405 !(c->fill_iter = kmalloc(iter_size, GFP_KERNEL)) || | |
1406 !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || 1407 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1408 bch_journal_alloc(c) || 1409 bch_btree_cache_alloc(c) || 1410 bch_open_buckets_alloc(c)) 1411 goto err; 1412 | 1448 !(c->sort = alloc_bucket_pages(GFP_KERNEL, c)) || 1449 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) || 1450 bch_journal_alloc(c) || 1451 bch_btree_cache_alloc(c) || 1452 bch_open_buckets_alloc(c)) 1453 goto err; 1454 |
1413 c->fill_iter->size = sb->bucket_size / sb->block_size; 1414 | |
1415 c->congested_read_threshold_us = 2000; 1416 c->congested_write_threshold_us = 20000; 1417 c->error_limit = 8 << IO_ERROR_SHIFT; 1418 1419 return c; 1420err: 1421 bch_cache_set_unregister(c); 1422 return NULL; --- 68 unchanged lines hidden (view full) --- 1491 1492 /* 1493 * bcache_journal_next() can't happen sooner, or 1494 * btree_gc_finish() will give spurious errors about last_gc > 1495 * gc_gen - this is a hack but oh well. 1496 */ 1497 bch_journal_next(&c->journal); 1498 | 1455 c->congested_read_threshold_us = 2000; 1456 c->congested_write_threshold_us = 20000; 1457 c->error_limit = 8 << IO_ERROR_SHIFT; 1458 1459 return c; 1460err: 1461 bch_cache_set_unregister(c); 1462 return NULL; --- 68 unchanged lines hidden (view full) --- 1531 1532 /* 1533 * bcache_journal_next() can't happen sooner, or 1534 * btree_gc_finish() will give spurious errors about last_gc > 1535 * gc_gen - this is a hack but oh well. 1536 */ 1537 bch_journal_next(&c->journal); 1538 |
1539 err = "error starting allocator thread"; |
|
1499 for_each_cache(ca, c, i) | 1540 for_each_cache(ca, c, i) |
1500 closure_call(&ca->alloc, bch_allocator_thread, 1501 system_wq, &c->cl); | 1541 if (bch_cache_allocator_start(ca)) 1542 goto err; |
1502 1503 /* 1504 * First place it's safe to allocate: btree_check() and 1505 * btree_gc_finish() have to run before we have buckets to 1506 * allocate, and bch_bucket_alloc_set() might cause a journal 1507 * entry to be written so bcache_journal_next() has to be called 1508 * first. 1509 * --- 16 unchanged lines hidden (view full) --- 1526 2, SB_JOURNAL_BUCKETS); 1527 1528 for (j = 0; j < ca->sb.keys; j++) 1529 ca->sb.d[j] = ca->sb.first_bucket + j; 1530 } 1531 1532 bch_btree_gc_finish(c); 1533 | 1543 1544 /* 1545 * First place it's safe to allocate: btree_check() and 1546 * btree_gc_finish() have to run before we have buckets to 1547 * allocate, and bch_bucket_alloc_set() might cause a journal 1548 * entry to be written so bcache_journal_next() has to be called 1549 * first. 1550 * --- 16 unchanged lines hidden (view full) --- 1567 2, SB_JOURNAL_BUCKETS); 1568 1569 for (j = 0; j < ca->sb.keys; j++) 1570 ca->sb.d[j] = ca->sb.first_bucket + j; 1571 } 1572 1573 bch_btree_gc_finish(c); 1574 |
1575 err = "error starting allocator thread"; |
|
1534 for_each_cache(ca, c, i) | 1576 for_each_cache(ca, c, i) |
1535 closure_call(&ca->alloc, bch_allocator_thread, 1536 ca->alloc_workqueue, &c->cl); | 1577 if (bch_cache_allocator_start(ca)) 1578 goto err; |
1537 1538 mutex_lock(&c->bucket_lock); 1539 for_each_cache(ca, c, i) 1540 bch_prio_write(ca); 1541 mutex_unlock(&c->bucket_lock); 1542 | 1579 1580 mutex_lock(&c->bucket_lock); 1581 for_each_cache(ca, c, i) 1582 bch_prio_write(ca); 1583 mutex_unlock(&c->bucket_lock); 1584 |
1543 wake_up(&c->alloc_wait); 1544 | |
1545 err = "cannot allocate new UUID bucket"; 1546 if (__uuid_write(c)) 1547 goto err_unlock_gc; 1548 1549 err = "cannot allocate new btree root"; 1550 c->root = bch_btree_node_alloc(c, 0, &op.cl); 1551 if (IS_ERR_OR_NULL(c->root)) 1552 goto err_unlock_gc; 1553 1554 bkey_copy_key(&c->root->key, &MAX_KEY); | 1585 err = "cannot allocate new UUID bucket"; 1586 if (__uuid_write(c)) 1587 goto err_unlock_gc; 1588 1589 err = "cannot allocate new btree root"; 1590 c->root = bch_btree_node_alloc(c, 0, &op.cl); 1591 if (IS_ERR_OR_NULL(c->root)) 1592 goto err_unlock_gc; 1593 1594 bkey_copy_key(&c->root->key, &MAX_KEY); |
1555 bch_btree_write(c->root, true, &op); | 1595 bch_btree_node_write(c->root, &op.cl); |
1556 1557 bch_btree_set_root(c->root); 1558 rw_unlock(true, c->root); 1559 1560 /* 1561 * We don't want to write the first journal entry until 1562 * everything is set up - fortunately journal entries won't be 1563 * written until the SET_CACHE_SYNC() here: --- 104 unchanged lines hidden (view full) --- 1668 1669 if (ca->set) 1670 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1671 1672 bch_cache_allocator_exit(ca); 1673 1674 bio_split_pool_free(&ca->bio_split_hook); 1675 | 1596 1597 bch_btree_set_root(c->root); 1598 rw_unlock(true, c->root); 1599 1600 /* 1601 * We don't want to write the first journal entry until 1602 * everything is set up - fortunately journal entries won't be 1603 * written until the SET_CACHE_SYNC() here: --- 104 unchanged lines hidden (view full) --- 1708 1709 if (ca->set) 1710 ca->set->cache[ca->sb.nr_this_dev] = NULL; 1711 1712 bch_cache_allocator_exit(ca); 1713 1714 bio_split_pool_free(&ca->bio_split_hook); 1715 |
1676 if (ca->alloc_workqueue) 1677 destroy_workqueue(ca->alloc_workqueue); 1678 | |
1679 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1680 kfree(ca->prio_buckets); 1681 vfree(ca->buckets); 1682 1683 free_heap(&ca->heap); 1684 free_fifo(&ca->unused); 1685 free_fifo(&ca->free_inc); 1686 free_fifo(&ca->free); --- 31 unchanged lines hidden (view full) --- 1718 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1719 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1720 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1721 !(ca->buckets = vzalloc(sizeof(struct bucket) * 1722 ca->sb.nbuckets)) || 1723 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1724 2, GFP_KERNEL)) || 1725 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || | 1716 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca))); 1717 kfree(ca->prio_buckets); 1718 vfree(ca->buckets); 1719 1720 free_heap(&ca->heap); 1721 free_fifo(&ca->unused); 1722 free_fifo(&ca->free_inc); 1723 free_fifo(&ca->free); --- 31 unchanged lines hidden (view full) --- 1755 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) || 1756 !init_fifo(&ca->unused, free << 2, GFP_KERNEL) || 1757 !init_heap(&ca->heap, free << 3, GFP_KERNEL) || 1758 !(ca->buckets = vzalloc(sizeof(struct bucket) * 1759 ca->sb.nbuckets)) || 1760 !(ca->prio_buckets = kzalloc(sizeof(uint64_t) * prio_buckets(ca) * 1761 2, GFP_KERNEL)) || 1762 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)) || |
1726 !(ca->alloc_workqueue = alloc_workqueue("bch_allocator", 0, 1)) || | |
1727 bio_split_pool_init(&ca->bio_split_hook)) 1728 return -ENOMEM; 1729 1730 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1731 1732 for_each_bucket(b, ca) 1733 atomic_set(&b->pin, 0); 1734 --- 46 unchanged lines hidden (view full) --- 1781/* Global interfaces/init */ 1782 1783static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 1784 const char *, size_t); 1785 1786kobj_attribute_write(register, register_bcache); 1787kobj_attribute_write(register_quiet, register_bcache); 1788 | 1763 bio_split_pool_init(&ca->bio_split_hook)) 1764 return -ENOMEM; 1765 1766 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca); 1767 1768 for_each_bucket(b, ca) 1769 atomic_set(&b->pin, 0); 1770 --- 46 unchanged lines hidden (view full) --- 1817/* Global interfaces/init */ 1818 1819static ssize_t register_bcache(struct kobject *, struct kobj_attribute *, 1820 const char *, size_t); 1821 1822kobj_attribute_write(register, register_bcache); 1823kobj_attribute_write(register_quiet, register_bcache); 1824 |
1825static bool bch_is_open_backing(struct block_device *bdev) { 1826 struct cache_set *c, *tc; 1827 struct cached_dev *dc, *t; 1828 1829 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1830 list_for_each_entry_safe(dc, t, &c->cached_devs, list) 1831 if (dc->bdev == bdev) 1832 return true; 1833 list_for_each_entry_safe(dc, t, &uncached_devices, list) 1834 if (dc->bdev == bdev) 1835 return true; 1836 return false; 1837} 1838 1839static bool bch_is_open_cache(struct block_device *bdev) { 1840 struct cache_set *c, *tc; 1841 struct cache *ca; 1842 unsigned i; 1843 1844 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) 1845 for_each_cache(ca, c, i) 1846 if (ca->bdev == bdev) 1847 return true; 1848 return false; 1849} 1850 1851static bool bch_is_open(struct block_device *bdev) { 1852 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev); 1853} 1854 |
|
1789static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1790 const char *buffer, size_t size) 1791{ 1792 ssize_t ret = size; 1793 const char *err = "cannot allocate memory"; 1794 char *path = NULL; 1795 struct cache_sb *sb = NULL; 1796 struct block_device *bdev = NULL; --- 8 unchanged lines hidden (view full) --- 1805 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 1806 goto err; 1807 1808 err = "failed to open device"; 1809 bdev = blkdev_get_by_path(strim(path), 1810 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1811 sb); 1812 if (IS_ERR(bdev)) { | 1855static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, 1856 const char *buffer, size_t size) 1857{ 1858 ssize_t ret = size; 1859 const char *err = "cannot allocate memory"; 1860 char *path = NULL; 1861 struct cache_sb *sb = NULL; 1862 struct block_device *bdev = NULL; --- 8 unchanged lines hidden (view full) --- 1871 !(sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL))) 1872 goto err; 1873 1874 err = "failed to open device"; 1875 bdev = blkdev_get_by_path(strim(path), 1876 FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1877 sb); 1878 if (IS_ERR(bdev)) { |
1813 if (bdev == ERR_PTR(-EBUSY)) 1814 err = "device busy"; | 1879 if (bdev == ERR_PTR(-EBUSY)) { 1880 bdev = lookup_bdev(strim(path)); 1881 if (!IS_ERR(bdev) && bch_is_open(bdev)) 1882 err = "device already registered"; 1883 else 1884 err = "device busy"; 1885 } |
1815 goto err; 1816 } 1817 1818 err = "failed to set blocksize"; 1819 if (set_blocksize(bdev, 4096)) 1820 goto err_close; 1821 1822 err = read_super(sb, bdev, &sb_page); --- 144 unchanged lines hidden --- | 1886 goto err; 1887 } 1888 1889 err = "failed to set blocksize"; 1890 if (set_blocksize(bdev, 4096)) 1891 goto err_close; 1892 1893 err = read_super(sb, bdev, &sb_page); --- 144 unchanged lines hidden --- |