1 /* 2 * Main bcache entry point - handle a read or a write request and decide what to 3 * do with it; the make_request functions are called by the block layer. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "request.h" 13 #include "writeback.h" 14 15 #include <linux/cgroup.h> 16 #include <linux/module.h> 17 #include <linux/hash.h> 18 #include <linux/random.h> 19 #include "blk-cgroup.h" 20 21 #include <trace/events/bcache.h> 22 23 #define CUTOFF_CACHE_ADD 95 24 #define CUTOFF_CACHE_READA 90 25 26 struct kmem_cache *bch_search_cache; 27 28 static void bch_data_insert_start(struct closure *); 29 30 /* Cgroup interface */ 31 32 #ifdef CONFIG_CGROUP_BCACHE 33 static struct bch_cgroup bcache_default_cgroup = { .cache_mode = -1 }; 34 35 static struct bch_cgroup *cgroup_to_bcache(struct cgroup *cgroup) 36 { 37 struct cgroup_subsys_state *css; 38 return cgroup && 39 (css = cgroup_subsys_state(cgroup, bcache_subsys_id)) 40 ? container_of(css, struct bch_cgroup, css) 41 : &bcache_default_cgroup; 42 } 43 44 struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio) 45 { 46 struct cgroup_subsys_state *css = bio->bi_css 47 ? cgroup_subsys_state(bio->bi_css->cgroup, bcache_subsys_id) 48 : task_subsys_state(current, bcache_subsys_id); 49 50 return css 51 ? container_of(css, struct bch_cgroup, css) 52 : &bcache_default_cgroup; 53 } 54 55 static ssize_t cache_mode_read(struct cgroup *cgrp, struct cftype *cft, 56 struct file *file, 57 char __user *buf, size_t nbytes, loff_t *ppos) 58 { 59 char tmp[1024]; 60 int len = bch_snprint_string_list(tmp, PAGE_SIZE, bch_cache_modes, 61 cgroup_to_bcache(cgrp)->cache_mode + 1); 62 63 if (len < 0) 64 return len; 65 66 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); 67 } 68 69 static int cache_mode_write(struct cgroup *cgrp, struct cftype *cft, 70 const char *buf) 71 { 72 int v = bch_read_string_list(buf, bch_cache_modes); 73 if (v < 0) 74 return v; 75 76 cgroup_to_bcache(cgrp)->cache_mode = v - 1; 77 return 0; 78 } 79 80 static u64 bch_verify_read(struct cgroup *cgrp, struct cftype *cft) 81 { 82 return cgroup_to_bcache(cgrp)->verify; 83 } 84 85 static int bch_verify_write(struct cgroup *cgrp, struct cftype *cft, u64 val) 86 { 87 cgroup_to_bcache(cgrp)->verify = val; 88 return 0; 89 } 90 91 static u64 bch_cache_hits_read(struct cgroup *cgrp, struct cftype *cft) 92 { 93 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); 94 return atomic_read(&bcachecg->stats.cache_hits); 95 } 96 97 static u64 bch_cache_misses_read(struct cgroup *cgrp, struct cftype *cft) 98 { 99 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); 100 return atomic_read(&bcachecg->stats.cache_misses); 101 } 102 103 static u64 bch_cache_bypass_hits_read(struct cgroup *cgrp, 104 struct cftype *cft) 105 { 106 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); 107 return atomic_read(&bcachecg->stats.cache_bypass_hits); 108 } 109 110 static u64 bch_cache_bypass_misses_read(struct cgroup *cgrp, 111 struct cftype *cft) 112 { 113 struct bch_cgroup *bcachecg = cgroup_to_bcache(cgrp); 114 return atomic_read(&bcachecg->stats.cache_bypass_misses); 115 } 116 117 static struct cftype bch_files[] = { 118 { 119 .name = "cache_mode", 120 .read = cache_mode_read, 121 .write_string = cache_mode_write, 122 }, 123 { 124 .name = "verify", 125 .read_u64 = bch_verify_read, 126 .write_u64 = bch_verify_write, 127 }, 128 { 129 .name = "cache_hits", 130 .read_u64 = bch_cache_hits_read, 131 }, 132 { 133 .name = "cache_misses", 134 .read_u64 = bch_cache_misses_read, 135 }, 136 { 137 .name = "cache_bypass_hits", 138 .read_u64 = bch_cache_bypass_hits_read, 139 }, 140 { 141 .name = "cache_bypass_misses", 142 .read_u64 = bch_cache_bypass_misses_read, 143 }, 144 { } /* terminate */ 145 }; 146 147 static void init_bch_cgroup(struct bch_cgroup *cg) 148 { 149 cg->cache_mode = -1; 150 } 151 152 static struct cgroup_subsys_state *bcachecg_create(struct cgroup *cgroup) 153 { 154 struct bch_cgroup *cg; 155 156 cg = kzalloc(sizeof(*cg), GFP_KERNEL); 157 if (!cg) 158 return ERR_PTR(-ENOMEM); 159 init_bch_cgroup(cg); 160 return &cg->css; 161 } 162 163 static void bcachecg_destroy(struct cgroup *cgroup) 164 { 165 struct bch_cgroup *cg = cgroup_to_bcache(cgroup); 166 free_css_id(&bcache_subsys, &cg->css); 167 kfree(cg); 168 } 169 170 struct cgroup_subsys bcache_subsys = { 171 .create = bcachecg_create, 172 .destroy = bcachecg_destroy, 173 .subsys_id = bcache_subsys_id, 174 .name = "bcache", 175 .module = THIS_MODULE, 176 }; 177 EXPORT_SYMBOL_GPL(bcache_subsys); 178 #endif 179 180 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) 181 { 182 #ifdef CONFIG_CGROUP_BCACHE 183 int r = bch_bio_to_cgroup(bio)->cache_mode; 184 if (r >= 0) 185 return r; 186 #endif 187 return BDEV_CACHE_MODE(&dc->sb); 188 } 189 190 static bool verify(struct cached_dev *dc, struct bio *bio) 191 { 192 #ifdef CONFIG_CGROUP_BCACHE 193 if (bch_bio_to_cgroup(bio)->verify) 194 return true; 195 #endif 196 return dc->verify; 197 } 198 199 static void bio_csum(struct bio *bio, struct bkey *k) 200 { 201 struct bio_vec *bv; 202 uint64_t csum = 0; 203 int i; 204 205 bio_for_each_segment(bv, bio, i) { 206 void *d = kmap(bv->bv_page) + bv->bv_offset; 207 csum = bch_crc64_update(csum, d, bv->bv_len); 208 kunmap(bv->bv_page); 209 } 210 211 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); 212 } 213 214 /* Insert data into cache */ 215 216 static void bch_data_insert_keys(struct closure *cl) 217 { 218 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 219 atomic_t *journal_ref = NULL; 220 struct bkey *replace_key = op->replace ? &op->replace_key : NULL; 221 int ret; 222 223 /* 224 * If we're looping, might already be waiting on 225 * another journal write - can't wait on more than one journal write at 226 * a time 227 * 228 * XXX: this looks wrong 229 */ 230 #if 0 231 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) 232 closure_sync(&s->cl); 233 #endif 234 235 if (!op->replace) 236 journal_ref = bch_journal(op->c, &op->insert_keys, 237 op->flush_journal ? cl : NULL); 238 239 ret = bch_btree_insert(op->c, &op->insert_keys, 240 journal_ref, replace_key); 241 if (ret == -ESRCH) { 242 op->replace_collision = true; 243 } else if (ret) { 244 op->error = -ENOMEM; 245 op->insert_data_done = true; 246 } 247 248 if (journal_ref) 249 atomic_dec_bug(journal_ref); 250 251 if (!op->insert_data_done) 252 continue_at(cl, bch_data_insert_start, bcache_wq); 253 254 bch_keylist_free(&op->insert_keys); 255 closure_return(cl); 256 } 257 258 static void bch_data_invalidate(struct closure *cl) 259 { 260 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 261 struct bio *bio = op->bio; 262 263 pr_debug("invalidating %i sectors from %llu", 264 bio_sectors(bio), (uint64_t) bio->bi_sector); 265 266 while (bio_sectors(bio)) { 267 unsigned sectors = min(bio_sectors(bio), 268 1U << (KEY_SIZE_BITS - 1)); 269 270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 271 goto out; 272 273 bio->bi_sector += sectors; 274 bio->bi_size -= sectors << 9; 275 276 bch_keylist_add(&op->insert_keys, 277 &KEY(op->inode, bio->bi_sector, sectors)); 278 } 279 280 op->insert_data_done = true; 281 bio_put(bio); 282 out: 283 continue_at(cl, bch_data_insert_keys, bcache_wq); 284 } 285 286 static void bch_data_insert_error(struct closure *cl) 287 { 288 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 289 290 /* 291 * Our data write just errored, which means we've got a bunch of keys to 292 * insert that point to data that wasn't succesfully written. 293 * 294 * We don't have to insert those keys but we still have to invalidate 295 * that region of the cache - so, if we just strip off all the pointers 296 * from the keys we'll accomplish just that. 297 */ 298 299 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; 300 301 while (src != op->insert_keys.top) { 302 struct bkey *n = bkey_next(src); 303 304 SET_KEY_PTRS(src, 0); 305 memmove(dst, src, bkey_bytes(src)); 306 307 dst = bkey_next(dst); 308 src = n; 309 } 310 311 op->insert_keys.top = dst; 312 313 bch_data_insert_keys(cl); 314 } 315 316 static void bch_data_insert_endio(struct bio *bio, int error) 317 { 318 struct closure *cl = bio->bi_private; 319 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 320 321 if (error) { 322 /* TODO: We could try to recover from this. */ 323 if (op->writeback) 324 op->error = error; 325 else if (!op->replace) 326 set_closure_fn(cl, bch_data_insert_error, bcache_wq); 327 else 328 set_closure_fn(cl, NULL, NULL); 329 } 330 331 bch_bbio_endio(op->c, bio, error, "writing data to cache"); 332 } 333 334 static void bch_data_insert_start(struct closure *cl) 335 { 336 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 337 struct bio *bio = op->bio, *n; 338 339 if (op->bypass) 340 return bch_data_invalidate(cl); 341 342 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { 343 set_gc_sectors(op->c); 344 wake_up_gc(op->c); 345 } 346 347 /* 348 * Journal writes are marked REQ_FLUSH; if the original write was a 349 * flush, it'll wait on the journal write. 350 */ 351 bio->bi_rw &= ~(REQ_FLUSH|REQ_FUA); 352 353 do { 354 unsigned i; 355 struct bkey *k; 356 struct bio_set *split = op->c->bio_split; 357 358 /* 1 for the device pointer and 1 for the chksum */ 359 if (bch_keylist_realloc(&op->insert_keys, 360 1 + (op->csum ? 1 : 0), 361 op->c)) 362 continue_at(cl, bch_data_insert_keys, bcache_wq); 363 364 k = op->insert_keys.top; 365 bkey_init(k); 366 SET_KEY_INODE(k, op->inode); 367 SET_KEY_OFFSET(k, bio->bi_sector); 368 369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 370 op->write_point, op->write_prio, 371 op->writeback)) 372 goto err; 373 374 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); 375 376 n->bi_end_io = bch_data_insert_endio; 377 n->bi_private = cl; 378 379 if (op->writeback) { 380 SET_KEY_DIRTY(k, true); 381 382 for (i = 0; i < KEY_PTRS(k); i++) 383 SET_GC_MARK(PTR_BUCKET(op->c, k, i), 384 GC_MARK_DIRTY); 385 } 386 387 SET_KEY_CSUM(k, op->csum); 388 if (KEY_CSUM(k)) 389 bio_csum(n, k); 390 391 trace_bcache_cache_insert(k); 392 bch_keylist_push(&op->insert_keys); 393 394 n->bi_rw |= REQ_WRITE; 395 bch_submit_bbio(n, op->c, k, 0); 396 } while (n != bio); 397 398 op->insert_data_done = true; 399 continue_at(cl, bch_data_insert_keys, bcache_wq); 400 err: 401 /* bch_alloc_sectors() blocks if s->writeback = true */ 402 BUG_ON(op->writeback); 403 404 /* 405 * But if it's not a writeback write we'd rather just bail out if 406 * there aren't any buckets ready to write to - it might take awhile and 407 * we might be starving btree writes for gc or something. 408 */ 409 410 if (!op->replace) { 411 /* 412 * Writethrough write: We can't complete the write until we've 413 * updated the index. But we don't want to delay the write while 414 * we wait for buckets to be freed up, so just invalidate the 415 * rest of the write. 416 */ 417 op->bypass = true; 418 return bch_data_invalidate(cl); 419 } else { 420 /* 421 * From a cache miss, we can just insert the keys for the data 422 * we have written or bail out if we didn't do anything. 423 */ 424 op->insert_data_done = true; 425 bio_put(bio); 426 427 if (!bch_keylist_empty(&op->insert_keys)) 428 continue_at(cl, bch_data_insert_keys, bcache_wq); 429 else 430 closure_return(cl); 431 } 432 } 433 434 /** 435 * bch_data_insert - stick some data in the cache 436 * 437 * This is the starting point for any data to end up in a cache device; it could 438 * be from a normal write, or a writeback write, or a write to a flash only 439 * volume - it's also used by the moving garbage collector to compact data in 440 * mostly empty buckets. 441 * 442 * It first writes the data to the cache, creating a list of keys to be inserted 443 * (if the data had to be fragmented there will be multiple keys); after the 444 * data is written it calls bch_journal, and after the keys have been added to 445 * the next journal write they're inserted into the btree. 446 * 447 * It inserts the data in s->cache_bio; bi_sector is used for the key offset, 448 * and op->inode is used for the key inode. 449 * 450 * If s->bypass is true, instead of inserting the data it invalidates the 451 * region of the cache represented by s->cache_bio and op->inode. 452 */ 453 void bch_data_insert(struct closure *cl) 454 { 455 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 456 457 trace_bcache_write(op->bio, op->writeback, op->bypass); 458 459 bch_keylist_init(&op->insert_keys); 460 bio_get(op->bio); 461 bch_data_insert_start(cl); 462 } 463 464 /* Congested? */ 465 466 unsigned bch_get_congested(struct cache_set *c) 467 { 468 int i; 469 long rand; 470 471 if (!c->congested_read_threshold_us && 472 !c->congested_write_threshold_us) 473 return 0; 474 475 i = (local_clock_us() - c->congested_last_us) / 1024; 476 if (i < 0) 477 return 0; 478 479 i += atomic_read(&c->congested); 480 if (i >= 0) 481 return 0; 482 483 i += CONGESTED_MAX; 484 485 if (i > 0) 486 i = fract_exp_two(i, 6); 487 488 rand = get_random_int(); 489 i -= bitmap_weight(&rand, BITS_PER_LONG); 490 491 return i > 0 ? i : 1; 492 } 493 494 static void add_sequential(struct task_struct *t) 495 { 496 ewma_add(t->sequential_io_avg, 497 t->sequential_io, 8, 0); 498 499 t->sequential_io = 0; 500 } 501 502 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) 503 { 504 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; 505 } 506 507 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) 508 { 509 struct cache_set *c = dc->disk.c; 510 unsigned mode = cache_mode(dc, bio); 511 unsigned sectors, congested = bch_get_congested(c); 512 struct task_struct *task = current; 513 struct io *i; 514 515 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 516 c->gc_stats.in_use > CUTOFF_CACHE_ADD || 517 (bio->bi_rw & REQ_DISCARD)) 518 goto skip; 519 520 if (mode == CACHE_MODE_NONE || 521 (mode == CACHE_MODE_WRITEAROUND && 522 (bio->bi_rw & REQ_WRITE))) 523 goto skip; 524 525 if (bio->bi_sector & (c->sb.block_size - 1) || 526 bio_sectors(bio) & (c->sb.block_size - 1)) { 527 pr_debug("skipping unaligned io"); 528 goto skip; 529 } 530 531 if (bypass_torture_test(dc)) { 532 if ((get_random_int() & 3) == 3) 533 goto skip; 534 else 535 goto rescale; 536 } 537 538 if (!congested && !dc->sequential_cutoff) 539 goto rescale; 540 541 if (!congested && 542 mode == CACHE_MODE_WRITEBACK && 543 (bio->bi_rw & REQ_WRITE) && 544 (bio->bi_rw & REQ_SYNC)) 545 goto rescale; 546 547 spin_lock(&dc->io_lock); 548 549 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) 550 if (i->last == bio->bi_sector && 551 time_before(jiffies, i->jiffies)) 552 goto found; 553 554 i = list_first_entry(&dc->io_lru, struct io, lru); 555 556 add_sequential(task); 557 i->sequential = 0; 558 found: 559 if (i->sequential + bio->bi_size > i->sequential) 560 i->sequential += bio->bi_size; 561 562 i->last = bio_end_sector(bio); 563 i->jiffies = jiffies + msecs_to_jiffies(5000); 564 task->sequential_io = i->sequential; 565 566 hlist_del(&i->hash); 567 hlist_add_head(&i->hash, iohash(dc, i->last)); 568 list_move_tail(&i->lru, &dc->io_lru); 569 570 spin_unlock(&dc->io_lock); 571 572 sectors = max(task->sequential_io, 573 task->sequential_io_avg) >> 9; 574 575 if (dc->sequential_cutoff && 576 sectors >= dc->sequential_cutoff >> 9) { 577 trace_bcache_bypass_sequential(bio); 578 goto skip; 579 } 580 581 if (congested && sectors >= congested) { 582 trace_bcache_bypass_congested(bio); 583 goto skip; 584 } 585 586 rescale: 587 bch_rescale_priorities(c, bio_sectors(bio)); 588 return false; 589 skip: 590 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); 591 return true; 592 } 593 594 /* Cache lookup */ 595 596 struct search { 597 /* Stack frame for bio_complete */ 598 struct closure cl; 599 600 struct bcache_device *d; 601 602 struct bbio bio; 603 struct bio *orig_bio; 604 struct bio *cache_miss; 605 606 unsigned insert_bio_sectors; 607 608 unsigned recoverable:1; 609 unsigned unaligned_bvec:1; 610 unsigned write:1; 611 unsigned read_dirty_data:1; 612 613 unsigned long start_time; 614 615 struct btree_op op; 616 struct data_insert_op iop; 617 }; 618 619 static void bch_cache_read_endio(struct bio *bio, int error) 620 { 621 struct bbio *b = container_of(bio, struct bbio, bio); 622 struct closure *cl = bio->bi_private; 623 struct search *s = container_of(cl, struct search, cl); 624 625 /* 626 * If the bucket was reused while our bio was in flight, we might have 627 * read the wrong data. Set s->error but not error so it doesn't get 628 * counted against the cache device, but we'll still reread the data 629 * from the backing device. 630 */ 631 632 if (error) 633 s->iop.error = error; 634 else if (ptr_stale(s->iop.c, &b->key, 0)) { 635 atomic_long_inc(&s->iop.c->cache_read_races); 636 s->iop.error = -EINTR; 637 } 638 639 bch_bbio_endio(s->iop.c, bio, error, "reading from cache"); 640 } 641 642 /* 643 * Read from a single key, handling the initial cache miss if the key starts in 644 * the middle of the bio 645 */ 646 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) 647 { 648 struct search *s = container_of(op, struct search, op); 649 struct bio *n, *bio = &s->bio.bio; 650 struct bkey *bio_key; 651 unsigned ptr; 652 653 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) 654 return MAP_CONTINUE; 655 656 if (KEY_INODE(k) != s->iop.inode || 657 KEY_START(k) > bio->bi_sector) { 658 unsigned bio_sectors = bio_sectors(bio); 659 unsigned sectors = KEY_INODE(k) == s->iop.inode 660 ? min_t(uint64_t, INT_MAX, 661 KEY_START(k) - bio->bi_sector) 662 : INT_MAX; 663 664 int ret = s->d->cache_miss(b, s, bio, sectors); 665 if (ret != MAP_CONTINUE) 666 return ret; 667 668 /* if this was a complete miss we shouldn't get here */ 669 BUG_ON(bio_sectors <= sectors); 670 } 671 672 if (!KEY_SIZE(k)) 673 return MAP_CONTINUE; 674 675 /* XXX: figure out best pointer - for multiple cache devices */ 676 ptr = 0; 677 678 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; 679 680 if (KEY_DIRTY(k)) 681 s->read_dirty_data = true; 682 683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 684 KEY_OFFSET(k) - bio->bi_sector), 685 GFP_NOIO, s->d->bio_split); 686 687 bio_key = &container_of(n, struct bbio, bio)->key; 688 bch_bkey_copy_single_ptr(bio_key, k, ptr); 689 690 bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); 691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 692 693 n->bi_end_io = bch_cache_read_endio; 694 n->bi_private = &s->cl; 695 696 /* 697 * The bucket we're reading from might be reused while our bio 698 * is in flight, and we could then end up reading the wrong 699 * data. 700 * 701 * We guard against this by checking (in cache_read_endio()) if 702 * the pointer is stale again; if so, we treat it as an error 703 * and reread from the backing device (but we don't pass that 704 * error up anywhere). 705 */ 706 707 __bch_submit_bbio(n, b->c); 708 return n == bio ? MAP_DONE : MAP_CONTINUE; 709 } 710 711 static void cache_lookup(struct closure *cl) 712 { 713 struct search *s = container_of(cl, struct search, iop.cl); 714 struct bio *bio = &s->bio.bio; 715 716 int ret = bch_btree_map_keys(&s->op, s->iop.c, 717 &KEY(s->iop.inode, bio->bi_sector, 0), 718 cache_lookup_fn, MAP_END_KEY); 719 if (ret == -EAGAIN) 720 continue_at(cl, cache_lookup, bcache_wq); 721 722 closure_return(cl); 723 } 724 725 /* Common code for the make_request functions */ 726 727 static void request_endio(struct bio *bio, int error) 728 { 729 struct closure *cl = bio->bi_private; 730 731 if (error) { 732 struct search *s = container_of(cl, struct search, cl); 733 s->iop.error = error; 734 /* Only cache read errors are recoverable */ 735 s->recoverable = false; 736 } 737 738 bio_put(bio); 739 closure_put(cl); 740 } 741 742 static void bio_complete(struct search *s) 743 { 744 if (s->orig_bio) { 745 int cpu, rw = bio_data_dir(s->orig_bio); 746 unsigned long duration = jiffies - s->start_time; 747 748 cpu = part_stat_lock(); 749 part_round_stats(cpu, &s->d->disk->part0); 750 part_stat_add(cpu, &s->d->disk->part0, ticks[rw], duration); 751 part_stat_unlock(); 752 753 trace_bcache_request_end(s->d, s->orig_bio); 754 bio_endio(s->orig_bio, s->iop.error); 755 s->orig_bio = NULL; 756 } 757 } 758 759 static void do_bio_hook(struct search *s) 760 { 761 struct bio *bio = &s->bio.bio; 762 memcpy(bio, s->orig_bio, sizeof(struct bio)); 763 764 bio->bi_end_io = request_endio; 765 bio->bi_private = &s->cl; 766 atomic_set(&bio->bi_cnt, 3); 767 } 768 769 static void search_free(struct closure *cl) 770 { 771 struct search *s = container_of(cl, struct search, cl); 772 bio_complete(s); 773 774 if (s->iop.bio) 775 bio_put(s->iop.bio); 776 777 if (s->unaligned_bvec) 778 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); 779 780 closure_debug_destroy(cl); 781 mempool_free(s, s->d->c->search); 782 } 783 784 static struct search *search_alloc(struct bio *bio, struct bcache_device *d) 785 { 786 struct search *s; 787 struct bio_vec *bv; 788 789 s = mempool_alloc(d->c->search, GFP_NOIO); 790 memset(s, 0, offsetof(struct search, iop.insert_keys)); 791 792 __closure_init(&s->cl, NULL); 793 794 s->iop.inode = d->id; 795 s->iop.c = d->c; 796 s->d = d; 797 s->op.lock = -1; 798 s->iop.write_point = hash_long((unsigned long) current, 16); 799 s->orig_bio = bio; 800 s->write = (bio->bi_rw & REQ_WRITE) != 0; 801 s->iop.flush_journal = (bio->bi_rw & (REQ_FLUSH|REQ_FUA)) != 0; 802 s->recoverable = 1; 803 s->start_time = jiffies; 804 do_bio_hook(s); 805 806 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) { 807 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO); 808 memcpy(bv, bio_iovec(bio), 809 sizeof(struct bio_vec) * bio_segments(bio)); 810 811 s->bio.bio.bi_io_vec = bv; 812 s->unaligned_bvec = 1; 813 } 814 815 return s; 816 } 817 818 /* Cached devices */ 819 820 static void cached_dev_bio_complete(struct closure *cl) 821 { 822 struct search *s = container_of(cl, struct search, cl); 823 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 824 825 search_free(cl); 826 cached_dev_put(dc); 827 } 828 829 /* Process reads */ 830 831 static void cached_dev_cache_miss_done(struct closure *cl) 832 { 833 struct search *s = container_of(cl, struct search, cl); 834 835 if (s->iop.replace_collision) 836 bch_mark_cache_miss_collision(s->iop.c, s->d); 837 838 if (s->iop.bio) { 839 int i; 840 struct bio_vec *bv; 841 842 bio_for_each_segment_all(bv, s->iop.bio, i) 843 __free_page(bv->bv_page); 844 } 845 846 cached_dev_bio_complete(cl); 847 } 848 849 static void cached_dev_read_error(struct closure *cl) 850 { 851 struct search *s = container_of(cl, struct search, cl); 852 struct bio *bio = &s->bio.bio; 853 struct bio_vec *bv; 854 int i; 855 856 if (s->recoverable) { 857 /* Retry from the backing device: */ 858 trace_bcache_read_retry(s->orig_bio); 859 860 s->iop.error = 0; 861 bv = s->bio.bio.bi_io_vec; 862 do_bio_hook(s); 863 s->bio.bio.bi_io_vec = bv; 864 865 if (!s->unaligned_bvec) 866 bio_for_each_segment(bv, s->orig_bio, i) 867 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; 868 else 869 memcpy(s->bio.bio.bi_io_vec, 870 bio_iovec(s->orig_bio), 871 sizeof(struct bio_vec) * 872 bio_segments(s->orig_bio)); 873 874 /* XXX: invalidate cache */ 875 876 closure_bio_submit(bio, cl, s->d); 877 } 878 879 continue_at(cl, cached_dev_cache_miss_done, NULL); 880 } 881 882 static void cached_dev_read_done(struct closure *cl) 883 { 884 struct search *s = container_of(cl, struct search, cl); 885 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 886 887 /* 888 * We had a cache miss; cache_bio now contains data ready to be inserted 889 * into the cache. 890 * 891 * First, we copy the data we just read from cache_bio's bounce buffers 892 * to the buffers the original bio pointed to: 893 */ 894 895 if (s->iop.bio) { 896 bio_reset(s->iop.bio); 897 s->iop.bio->bi_sector = s->cache_miss->bi_sector; 898 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 899 s->iop.bio->bi_size = s->insert_bio_sectors << 9; 900 bch_bio_map(s->iop.bio, NULL); 901 902 bio_copy_data(s->cache_miss, s->iop.bio); 903 904 bio_put(s->cache_miss); 905 s->cache_miss = NULL; 906 } 907 908 if (verify(dc, &s->bio.bio) && s->recoverable && 909 !s->unaligned_bvec && !s->read_dirty_data) 910 bch_data_verify(dc, s->orig_bio); 911 912 bio_complete(s); 913 914 if (s->iop.bio && 915 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { 916 BUG_ON(!s->iop.replace); 917 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 918 } 919 920 continue_at(cl, cached_dev_cache_miss_done, NULL); 921 } 922 923 static void cached_dev_read_done_bh(struct closure *cl) 924 { 925 struct search *s = container_of(cl, struct search, cl); 926 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 927 928 bch_mark_cache_accounting(s->iop.c, s->d, 929 !s->cache_miss, s->iop.bypass); 930 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); 931 932 if (s->iop.error) 933 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); 934 else if (s->iop.bio || verify(dc, &s->bio.bio)) 935 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); 936 else 937 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); 938 } 939 940 static int cached_dev_cache_miss(struct btree *b, struct search *s, 941 struct bio *bio, unsigned sectors) 942 { 943 int ret = MAP_CONTINUE; 944 unsigned reada = 0; 945 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 946 struct bio *miss, *cache_bio; 947 948 if (s->cache_miss || s->iop.bypass) { 949 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 950 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 951 goto out_submit; 952 } 953 954 if (!(bio->bi_rw & REQ_RAHEAD) && 955 !(bio->bi_rw & REQ_META) && 956 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) 957 reada = min_t(sector_t, dc->readahead >> 9, 958 bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); 959 960 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 961 962 s->iop.replace_key = KEY(s->iop.inode, 963 bio->bi_sector + s->insert_bio_sectors, 964 s->insert_bio_sectors); 965 966 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 967 if (ret) 968 return ret; 969 970 s->iop.replace = true; 971 972 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 973 974 /* btree_search_recurse()'s btree iterator is no good anymore */ 975 ret = miss == bio ? MAP_DONE : -EINTR; 976 977 cache_bio = bio_alloc_bioset(GFP_NOWAIT, 978 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), 979 dc->disk.bio_split); 980 if (!cache_bio) 981 goto out_submit; 982 983 cache_bio->bi_sector = miss->bi_sector; 984 cache_bio->bi_bdev = miss->bi_bdev; 985 cache_bio->bi_size = s->insert_bio_sectors << 9; 986 987 cache_bio->bi_end_io = request_endio; 988 cache_bio->bi_private = &s->cl; 989 990 bch_bio_map(cache_bio, NULL); 991 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) 992 goto out_put; 993 994 if (reada) 995 bch_mark_cache_readahead(s->iop.c, s->d); 996 997 s->cache_miss = miss; 998 s->iop.bio = cache_bio; 999 bio_get(cache_bio); 1000 closure_bio_submit(cache_bio, &s->cl, s->d); 1001 1002 return ret; 1003 out_put: 1004 bio_put(cache_bio); 1005 out_submit: 1006 miss->bi_end_io = request_endio; 1007 miss->bi_private = &s->cl; 1008 closure_bio_submit(miss, &s->cl, s->d); 1009 return ret; 1010 } 1011 1012 static void cached_dev_read(struct cached_dev *dc, struct search *s) 1013 { 1014 struct closure *cl = &s->cl; 1015 1016 closure_call(&s->iop.cl, cache_lookup, NULL, cl); 1017 continue_at(cl, cached_dev_read_done_bh, NULL); 1018 } 1019 1020 /* Process writes */ 1021 1022 static void cached_dev_write_complete(struct closure *cl) 1023 { 1024 struct search *s = container_of(cl, struct search, cl); 1025 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 1026 1027 up_read_non_owner(&dc->writeback_lock); 1028 cached_dev_bio_complete(cl); 1029 } 1030 1031 static void cached_dev_write(struct cached_dev *dc, struct search *s) 1032 { 1033 struct closure *cl = &s->cl; 1034 struct bio *bio = &s->bio.bio; 1035 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1036 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1037 1038 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 1039 1040 down_read_non_owner(&dc->writeback_lock); 1041 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { 1042 /* 1043 * We overlap with some dirty data undergoing background 1044 * writeback, force this write to writeback 1045 */ 1046 s->iop.bypass = false; 1047 s->iop.writeback = true; 1048 } 1049 1050 /* 1051 * Discards aren't _required_ to do anything, so skipping if 1052 * check_overlapping returned true is ok 1053 * 1054 * But check_overlapping drops dirty keys for which io hasn't started, 1055 * so we still want to call it. 1056 */ 1057 if (bio->bi_rw & REQ_DISCARD) 1058 s->iop.bypass = true; 1059 1060 if (should_writeback(dc, s->orig_bio, 1061 cache_mode(dc, bio), 1062 s->iop.bypass)) { 1063 s->iop.bypass = false; 1064 s->iop.writeback = true; 1065 } 1066 1067 if (s->iop.bypass) { 1068 s->iop.bio = s->orig_bio; 1069 bio_get(s->iop.bio); 1070 1071 if (!(bio->bi_rw & REQ_DISCARD) || 1072 blk_queue_discard(bdev_get_queue(dc->bdev))) 1073 closure_bio_submit(bio, cl, s->d); 1074 } else if (s->iop.writeback) { 1075 bch_writeback_add(dc); 1076 s->iop.bio = bio; 1077 1078 if (bio->bi_rw & REQ_FLUSH) { 1079 /* Also need to send a flush to the backing device */ 1080 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, 1081 dc->disk.bio_split); 1082 1083 flush->bi_rw = WRITE_FLUSH; 1084 flush->bi_bdev = bio->bi_bdev; 1085 flush->bi_end_io = request_endio; 1086 flush->bi_private = cl; 1087 1088 closure_bio_submit(flush, cl, s->d); 1089 } 1090 } else { 1091 s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, 1092 dc->disk.bio_split); 1093 1094 closure_bio_submit(bio, cl, s->d); 1095 } 1096 1097 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 1098 continue_at(cl, cached_dev_write_complete, NULL); 1099 } 1100 1101 static void cached_dev_nodata(struct closure *cl) 1102 { 1103 struct search *s = container_of(cl, struct search, cl); 1104 struct bio *bio = &s->bio.bio; 1105 1106 if (s->iop.flush_journal) 1107 bch_journal_meta(s->iop.c, cl); 1108 1109 /* If it's a flush, we send the flush to the backing device too */ 1110 closure_bio_submit(bio, cl, s->d); 1111 1112 continue_at(cl, cached_dev_bio_complete, NULL); 1113 } 1114 1115 /* Cached devices - read & write stuff */ 1116 1117 static void cached_dev_make_request(struct request_queue *q, struct bio *bio) 1118 { 1119 struct search *s; 1120 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 1121 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1122 int cpu, rw = bio_data_dir(bio); 1123 1124 cpu = part_stat_lock(); 1125 part_stat_inc(cpu, &d->disk->part0, ios[rw]); 1126 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); 1127 part_stat_unlock(); 1128 1129 bio->bi_bdev = dc->bdev; 1130 bio->bi_sector += dc->sb.data_offset; 1131 1132 if (cached_dev_get(dc)) { 1133 s = search_alloc(bio, d); 1134 trace_bcache_request_start(s->d, bio); 1135 1136 if (!bio->bi_size) { 1137 /* 1138 * can't call bch_journal_meta from under 1139 * generic_make_request 1140 */ 1141 continue_at_nobarrier(&s->cl, 1142 cached_dev_nodata, 1143 bcache_wq); 1144 } else { 1145 s->iop.bypass = check_should_bypass(dc, bio); 1146 1147 if (rw) 1148 cached_dev_write(dc, s); 1149 else 1150 cached_dev_read(dc, s); 1151 } 1152 } else { 1153 if ((bio->bi_rw & REQ_DISCARD) && 1154 !blk_queue_discard(bdev_get_queue(dc->bdev))) 1155 bio_endio(bio, 0); 1156 else 1157 bch_generic_make_request(bio, &d->bio_split_hook); 1158 } 1159 } 1160 1161 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, 1162 unsigned int cmd, unsigned long arg) 1163 { 1164 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1165 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); 1166 } 1167 1168 static int cached_dev_congested(void *data, int bits) 1169 { 1170 struct bcache_device *d = data; 1171 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1172 struct request_queue *q = bdev_get_queue(dc->bdev); 1173 int ret = 0; 1174 1175 if (bdi_congested(&q->backing_dev_info, bits)) 1176 return 1; 1177 1178 if (cached_dev_get(dc)) { 1179 unsigned i; 1180 struct cache *ca; 1181 1182 for_each_cache(ca, d->c, i) { 1183 q = bdev_get_queue(ca->bdev); 1184 ret |= bdi_congested(&q->backing_dev_info, bits); 1185 } 1186 1187 cached_dev_put(dc); 1188 } 1189 1190 return ret; 1191 } 1192 1193 void bch_cached_dev_request_init(struct cached_dev *dc) 1194 { 1195 struct gendisk *g = dc->disk.disk; 1196 1197 g->queue->make_request_fn = cached_dev_make_request; 1198 g->queue->backing_dev_info.congested_fn = cached_dev_congested; 1199 dc->disk.cache_miss = cached_dev_cache_miss; 1200 dc->disk.ioctl = cached_dev_ioctl; 1201 } 1202 1203 /* Flash backed devices */ 1204 1205 static int flash_dev_cache_miss(struct btree *b, struct search *s, 1206 struct bio *bio, unsigned sectors) 1207 { 1208 struct bio_vec *bv; 1209 int i; 1210 1211 /* Zero fill bio */ 1212 1213 bio_for_each_segment(bv, bio, i) { 1214 unsigned j = min(bv->bv_len >> 9, sectors); 1215 1216 void *p = kmap(bv->bv_page); 1217 memset(p + bv->bv_offset, 0, j << 9); 1218 kunmap(bv->bv_page); 1219 1220 sectors -= j; 1221 } 1222 1223 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1224 1225 if (!bio->bi_size) 1226 return MAP_DONE; 1227 1228 return MAP_CONTINUE; 1229 } 1230 1231 static void flash_dev_nodata(struct closure *cl) 1232 { 1233 struct search *s = container_of(cl, struct search, cl); 1234 1235 if (s->iop.flush_journal) 1236 bch_journal_meta(s->iop.c, cl); 1237 1238 continue_at(cl, search_free, NULL); 1239 } 1240 1241 static void flash_dev_make_request(struct request_queue *q, struct bio *bio) 1242 { 1243 struct search *s; 1244 struct closure *cl; 1245 struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; 1246 int cpu, rw = bio_data_dir(bio); 1247 1248 cpu = part_stat_lock(); 1249 part_stat_inc(cpu, &d->disk->part0, ios[rw]); 1250 part_stat_add(cpu, &d->disk->part0, sectors[rw], bio_sectors(bio)); 1251 part_stat_unlock(); 1252 1253 s = search_alloc(bio, d); 1254 cl = &s->cl; 1255 bio = &s->bio.bio; 1256 1257 trace_bcache_request_start(s->d, bio); 1258 1259 if (!bio->bi_size) { 1260 /* 1261 * can't call bch_journal_meta from under 1262 * generic_make_request 1263 */ 1264 continue_at_nobarrier(&s->cl, 1265 flash_dev_nodata, 1266 bcache_wq); 1267 } else if (rw) { 1268 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1269 &KEY(d->id, bio->bi_sector, 0), 1270 &KEY(d->id, bio_end_sector(bio), 0)); 1271 1272 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; 1273 s->iop.writeback = true; 1274 s->iop.bio = bio; 1275 1276 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 1277 } else { 1278 closure_call(&s->iop.cl, cache_lookup, NULL, cl); 1279 } 1280 1281 continue_at(cl, search_free, NULL); 1282 } 1283 1284 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, 1285 unsigned int cmd, unsigned long arg) 1286 { 1287 return -ENOTTY; 1288 } 1289 1290 static int flash_dev_congested(void *data, int bits) 1291 { 1292 struct bcache_device *d = data; 1293 struct request_queue *q; 1294 struct cache *ca; 1295 unsigned i; 1296 int ret = 0; 1297 1298 for_each_cache(ca, d->c, i) { 1299 q = bdev_get_queue(ca->bdev); 1300 ret |= bdi_congested(&q->backing_dev_info, bits); 1301 } 1302 1303 return ret; 1304 } 1305 1306 void bch_flash_dev_request_init(struct bcache_device *d) 1307 { 1308 struct gendisk *g = d->disk; 1309 1310 g->queue->make_request_fn = flash_dev_make_request; 1311 g->queue->backing_dev_info.congested_fn = flash_dev_congested; 1312 d->cache_miss = flash_dev_cache_miss; 1313 d->ioctl = flash_dev_ioctl; 1314 } 1315 1316 void bch_request_exit(void) 1317 { 1318 #ifdef CONFIG_CGROUP_BCACHE 1319 cgroup_unload_subsys(&bcache_subsys); 1320 #endif 1321 if (bch_search_cache) 1322 kmem_cache_destroy(bch_search_cache); 1323 } 1324 1325 int __init bch_request_init(void) 1326 { 1327 bch_search_cache = KMEM_CACHE(search, 0); 1328 if (!bch_search_cache) 1329 return -ENOMEM; 1330 1331 #ifdef CONFIG_CGROUP_BCACHE 1332 cgroup_load_subsys(&bcache_subsys); 1333 init_bch_cgroup(&bcache_default_cgroup); 1334 1335 cgroup_add_cftypes(&bcache_subsys, bch_files); 1336 #endif 1337 return 0; 1338 } 1339