1 /* 2 * Main bcache entry point - handle a read or a write request and decide what to 3 * do with it; the make_request functions are called by the block layer. 4 * 5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 6 * Copyright 2012 Google, Inc. 7 */ 8 9 #include "bcache.h" 10 #include "btree.h" 11 #include "debug.h" 12 #include "request.h" 13 #include "writeback.h" 14 15 #include <linux/module.h> 16 #include <linux/hash.h> 17 #include <linux/random.h> 18 #include <linux/backing-dev.h> 19 20 #include <trace/events/bcache.h> 21 22 #define CUTOFF_CACHE_ADD 95 23 #define CUTOFF_CACHE_READA 90 24 25 struct kmem_cache *bch_search_cache; 26 27 static void bch_data_insert_start(struct closure *); 28 29 static unsigned cache_mode(struct cached_dev *dc, struct bio *bio) 30 { 31 return BDEV_CACHE_MODE(&dc->sb); 32 } 33 34 static bool verify(struct cached_dev *dc, struct bio *bio) 35 { 36 return dc->verify; 37 } 38 39 static void bio_csum(struct bio *bio, struct bkey *k) 40 { 41 struct bio_vec bv; 42 struct bvec_iter iter; 43 uint64_t csum = 0; 44 45 bio_for_each_segment(bv, bio, iter) { 46 void *d = kmap(bv.bv_page) + bv.bv_offset; 47 csum = bch_crc64_update(csum, d, bv.bv_len); 48 kunmap(bv.bv_page); 49 } 50 51 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); 52 } 53 54 /* Insert data into cache */ 55 56 static void bch_data_insert_keys(struct closure *cl) 57 { 58 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 59 atomic_t *journal_ref = NULL; 60 struct bkey *replace_key = op->replace ? &op->replace_key : NULL; 61 int ret; 62 63 /* 64 * If we're looping, might already be waiting on 65 * another journal write - can't wait on more than one journal write at 66 * a time 67 * 68 * XXX: this looks wrong 69 */ 70 #if 0 71 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) 72 closure_sync(&s->cl); 73 #endif 74 75 if (!op->replace) 76 journal_ref = bch_journal(op->c, &op->insert_keys, 77 op->flush_journal ? cl : NULL); 78 79 ret = bch_btree_insert(op->c, &op->insert_keys, 80 journal_ref, replace_key); 81 if (ret == -ESRCH) { 82 op->replace_collision = true; 83 } else if (ret) { 84 op->status = BLK_STS_RESOURCE; 85 op->insert_data_done = true; 86 } 87 88 if (journal_ref) 89 atomic_dec_bug(journal_ref); 90 91 if (!op->insert_data_done) { 92 continue_at(cl, bch_data_insert_start, op->wq); 93 return; 94 } 95 96 bch_keylist_free(&op->insert_keys); 97 closure_return(cl); 98 } 99 100 static int bch_keylist_realloc(struct keylist *l, unsigned u64s, 101 struct cache_set *c) 102 { 103 size_t oldsize = bch_keylist_nkeys(l); 104 size_t newsize = oldsize + u64s; 105 106 /* 107 * The journalling code doesn't handle the case where the keys to insert 108 * is bigger than an empty write: If we just return -ENOMEM here, 109 * bio_insert() and bio_invalidate() will insert the keys created so far 110 * and finish the rest when the keylist is empty. 111 */ 112 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) 113 return -ENOMEM; 114 115 return __bch_keylist_realloc(l, u64s); 116 } 117 118 static void bch_data_invalidate(struct closure *cl) 119 { 120 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 121 struct bio *bio = op->bio; 122 123 pr_debug("invalidating %i sectors from %llu", 124 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); 125 126 while (bio_sectors(bio)) { 127 unsigned sectors = min(bio_sectors(bio), 128 1U << (KEY_SIZE_BITS - 1)); 129 130 if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) 131 goto out; 132 133 bio->bi_iter.bi_sector += sectors; 134 bio->bi_iter.bi_size -= sectors << 9; 135 136 bch_keylist_add(&op->insert_keys, 137 &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); 138 } 139 140 op->insert_data_done = true; 141 bio_put(bio); 142 out: 143 continue_at(cl, bch_data_insert_keys, op->wq); 144 } 145 146 static void bch_data_insert_error(struct closure *cl) 147 { 148 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 149 150 /* 151 * Our data write just errored, which means we've got a bunch of keys to 152 * insert that point to data that wasn't succesfully written. 153 * 154 * We don't have to insert those keys but we still have to invalidate 155 * that region of the cache - so, if we just strip off all the pointers 156 * from the keys we'll accomplish just that. 157 */ 158 159 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; 160 161 while (src != op->insert_keys.top) { 162 struct bkey *n = bkey_next(src); 163 164 SET_KEY_PTRS(src, 0); 165 memmove(dst, src, bkey_bytes(src)); 166 167 dst = bkey_next(dst); 168 src = n; 169 } 170 171 op->insert_keys.top = dst; 172 173 bch_data_insert_keys(cl); 174 } 175 176 static void bch_data_insert_endio(struct bio *bio) 177 { 178 struct closure *cl = bio->bi_private; 179 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 180 181 if (bio->bi_status) { 182 /* TODO: We could try to recover from this. */ 183 if (op->writeback) 184 op->status = bio->bi_status; 185 else if (!op->replace) 186 set_closure_fn(cl, bch_data_insert_error, op->wq); 187 else 188 set_closure_fn(cl, NULL, NULL); 189 } 190 191 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); 192 } 193 194 static void bch_data_insert_start(struct closure *cl) 195 { 196 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 197 struct bio *bio = op->bio, *n; 198 199 if (op->bypass) 200 return bch_data_invalidate(cl); 201 202 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) 203 wake_up_gc(op->c); 204 205 /* 206 * Journal writes are marked REQ_PREFLUSH; if the original write was a 207 * flush, it'll wait on the journal write. 208 */ 209 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); 210 211 do { 212 unsigned i; 213 struct bkey *k; 214 struct bio_set *split = op->c->bio_split; 215 216 /* 1 for the device pointer and 1 for the chksum */ 217 if (bch_keylist_realloc(&op->insert_keys, 218 3 + (op->csum ? 1 : 0), 219 op->c)) { 220 continue_at(cl, bch_data_insert_keys, op->wq); 221 return; 222 } 223 224 k = op->insert_keys.top; 225 bkey_init(k); 226 SET_KEY_INODE(k, op->inode); 227 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); 228 229 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 230 op->write_point, op->write_prio, 231 op->writeback)) 232 goto err; 233 234 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); 235 236 n->bi_end_io = bch_data_insert_endio; 237 n->bi_private = cl; 238 239 if (op->writeback) { 240 SET_KEY_DIRTY(k, true); 241 242 for (i = 0; i < KEY_PTRS(k); i++) 243 SET_GC_MARK(PTR_BUCKET(op->c, k, i), 244 GC_MARK_DIRTY); 245 } 246 247 SET_KEY_CSUM(k, op->csum); 248 if (KEY_CSUM(k)) 249 bio_csum(n, k); 250 251 trace_bcache_cache_insert(k); 252 bch_keylist_push(&op->insert_keys); 253 254 bio_set_op_attrs(n, REQ_OP_WRITE, 0); 255 bch_submit_bbio(n, op->c, k, 0); 256 } while (n != bio); 257 258 op->insert_data_done = true; 259 continue_at(cl, bch_data_insert_keys, op->wq); 260 return; 261 err: 262 /* bch_alloc_sectors() blocks if s->writeback = true */ 263 BUG_ON(op->writeback); 264 265 /* 266 * But if it's not a writeback write we'd rather just bail out if 267 * there aren't any buckets ready to write to - it might take awhile and 268 * we might be starving btree writes for gc or something. 269 */ 270 271 if (!op->replace) { 272 /* 273 * Writethrough write: We can't complete the write until we've 274 * updated the index. But we don't want to delay the write while 275 * we wait for buckets to be freed up, so just invalidate the 276 * rest of the write. 277 */ 278 op->bypass = true; 279 return bch_data_invalidate(cl); 280 } else { 281 /* 282 * From a cache miss, we can just insert the keys for the data 283 * we have written or bail out if we didn't do anything. 284 */ 285 op->insert_data_done = true; 286 bio_put(bio); 287 288 if (!bch_keylist_empty(&op->insert_keys)) 289 continue_at(cl, bch_data_insert_keys, op->wq); 290 else 291 closure_return(cl); 292 } 293 } 294 295 /** 296 * bch_data_insert - stick some data in the cache 297 * 298 * This is the starting point for any data to end up in a cache device; it could 299 * be from a normal write, or a writeback write, or a write to a flash only 300 * volume - it's also used by the moving garbage collector to compact data in 301 * mostly empty buckets. 302 * 303 * It first writes the data to the cache, creating a list of keys to be inserted 304 * (if the data had to be fragmented there will be multiple keys); after the 305 * data is written it calls bch_journal, and after the keys have been added to 306 * the next journal write they're inserted into the btree. 307 * 308 * It inserts the data in s->cache_bio; bi_sector is used for the key offset, 309 * and op->inode is used for the key inode. 310 * 311 * If s->bypass is true, instead of inserting the data it invalidates the 312 * region of the cache represented by s->cache_bio and op->inode. 313 */ 314 void bch_data_insert(struct closure *cl) 315 { 316 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 317 318 trace_bcache_write(op->c, op->inode, op->bio, 319 op->writeback, op->bypass); 320 321 bch_keylist_init(&op->insert_keys); 322 bio_get(op->bio); 323 bch_data_insert_start(cl); 324 } 325 326 /* Congested? */ 327 328 unsigned bch_get_congested(struct cache_set *c) 329 { 330 int i; 331 long rand; 332 333 if (!c->congested_read_threshold_us && 334 !c->congested_write_threshold_us) 335 return 0; 336 337 i = (local_clock_us() - c->congested_last_us) / 1024; 338 if (i < 0) 339 return 0; 340 341 i += atomic_read(&c->congested); 342 if (i >= 0) 343 return 0; 344 345 i += CONGESTED_MAX; 346 347 if (i > 0) 348 i = fract_exp_two(i, 6); 349 350 rand = get_random_int(); 351 i -= bitmap_weight(&rand, BITS_PER_LONG); 352 353 return i > 0 ? i : 1; 354 } 355 356 static void add_sequential(struct task_struct *t) 357 { 358 ewma_add(t->sequential_io_avg, 359 t->sequential_io, 8, 0); 360 361 t->sequential_io = 0; 362 } 363 364 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) 365 { 366 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; 367 } 368 369 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) 370 { 371 struct cache_set *c = dc->disk.c; 372 unsigned mode = cache_mode(dc, bio); 373 unsigned sectors, congested = bch_get_congested(c); 374 struct task_struct *task = current; 375 struct io *i; 376 377 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 378 c->gc_stats.in_use > CUTOFF_CACHE_ADD || 379 (bio_op(bio) == REQ_OP_DISCARD)) 380 goto skip; 381 382 if (mode == CACHE_MODE_NONE || 383 (mode == CACHE_MODE_WRITEAROUND && 384 op_is_write(bio_op(bio)))) 385 goto skip; 386 387 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || 388 bio_sectors(bio) & (c->sb.block_size - 1)) { 389 pr_debug("skipping unaligned io"); 390 goto skip; 391 } 392 393 if (bypass_torture_test(dc)) { 394 if ((get_random_int() & 3) == 3) 395 goto skip; 396 else 397 goto rescale; 398 } 399 400 if (!congested && !dc->sequential_cutoff) 401 goto rescale; 402 403 spin_lock(&dc->io_lock); 404 405 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) 406 if (i->last == bio->bi_iter.bi_sector && 407 time_before(jiffies, i->jiffies)) 408 goto found; 409 410 i = list_first_entry(&dc->io_lru, struct io, lru); 411 412 add_sequential(task); 413 i->sequential = 0; 414 found: 415 if (i->sequential + bio->bi_iter.bi_size > i->sequential) 416 i->sequential += bio->bi_iter.bi_size; 417 418 i->last = bio_end_sector(bio); 419 i->jiffies = jiffies + msecs_to_jiffies(5000); 420 task->sequential_io = i->sequential; 421 422 hlist_del(&i->hash); 423 hlist_add_head(&i->hash, iohash(dc, i->last)); 424 list_move_tail(&i->lru, &dc->io_lru); 425 426 spin_unlock(&dc->io_lock); 427 428 sectors = max(task->sequential_io, 429 task->sequential_io_avg) >> 9; 430 431 if (dc->sequential_cutoff && 432 sectors >= dc->sequential_cutoff >> 9) { 433 trace_bcache_bypass_sequential(bio); 434 goto skip; 435 } 436 437 if (congested && sectors >= congested) { 438 trace_bcache_bypass_congested(bio); 439 goto skip; 440 } 441 442 rescale: 443 bch_rescale_priorities(c, bio_sectors(bio)); 444 return false; 445 skip: 446 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); 447 return true; 448 } 449 450 /* Cache lookup */ 451 452 struct search { 453 /* Stack frame for bio_complete */ 454 struct closure cl; 455 456 struct bbio bio; 457 struct bio *orig_bio; 458 struct bio *cache_miss; 459 struct bcache_device *d; 460 461 unsigned insert_bio_sectors; 462 unsigned recoverable:1; 463 unsigned write:1; 464 unsigned read_dirty_data:1; 465 466 unsigned long start_time; 467 468 struct btree_op op; 469 struct data_insert_op iop; 470 }; 471 472 static void bch_cache_read_endio(struct bio *bio) 473 { 474 struct bbio *b = container_of(bio, struct bbio, bio); 475 struct closure *cl = bio->bi_private; 476 struct search *s = container_of(cl, struct search, cl); 477 478 /* 479 * If the bucket was reused while our bio was in flight, we might have 480 * read the wrong data. Set s->error but not error so it doesn't get 481 * counted against the cache device, but we'll still reread the data 482 * from the backing device. 483 */ 484 485 if (bio->bi_status) 486 s->iop.status = bio->bi_status; 487 else if (!KEY_DIRTY(&b->key) && 488 ptr_stale(s->iop.c, &b->key, 0)) { 489 atomic_long_inc(&s->iop.c->cache_read_races); 490 s->iop.status = BLK_STS_IOERR; 491 } 492 493 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); 494 } 495 496 /* 497 * Read from a single key, handling the initial cache miss if the key starts in 498 * the middle of the bio 499 */ 500 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) 501 { 502 struct search *s = container_of(op, struct search, op); 503 struct bio *n, *bio = &s->bio.bio; 504 struct bkey *bio_key; 505 unsigned ptr; 506 507 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) 508 return MAP_CONTINUE; 509 510 if (KEY_INODE(k) != s->iop.inode || 511 KEY_START(k) > bio->bi_iter.bi_sector) { 512 unsigned bio_sectors = bio_sectors(bio); 513 unsigned sectors = KEY_INODE(k) == s->iop.inode 514 ? min_t(uint64_t, INT_MAX, 515 KEY_START(k) - bio->bi_iter.bi_sector) 516 : INT_MAX; 517 518 int ret = s->d->cache_miss(b, s, bio, sectors); 519 if (ret != MAP_CONTINUE) 520 return ret; 521 522 /* if this was a complete miss we shouldn't get here */ 523 BUG_ON(bio_sectors <= sectors); 524 } 525 526 if (!KEY_SIZE(k)) 527 return MAP_CONTINUE; 528 529 /* XXX: figure out best pointer - for multiple cache devices */ 530 ptr = 0; 531 532 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; 533 534 if (KEY_DIRTY(k)) 535 s->read_dirty_data = true; 536 537 n = bio_next_split(bio, min_t(uint64_t, INT_MAX, 538 KEY_OFFSET(k) - bio->bi_iter.bi_sector), 539 GFP_NOIO, s->d->bio_split); 540 541 bio_key = &container_of(n, struct bbio, bio)->key; 542 bch_bkey_copy_single_ptr(bio_key, k, ptr); 543 544 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); 545 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 546 547 n->bi_end_io = bch_cache_read_endio; 548 n->bi_private = &s->cl; 549 550 /* 551 * The bucket we're reading from might be reused while our bio 552 * is in flight, and we could then end up reading the wrong 553 * data. 554 * 555 * We guard against this by checking (in cache_read_endio()) if 556 * the pointer is stale again; if so, we treat it as an error 557 * and reread from the backing device (but we don't pass that 558 * error up anywhere). 559 */ 560 561 __bch_submit_bbio(n, b->c); 562 return n == bio ? MAP_DONE : MAP_CONTINUE; 563 } 564 565 static void cache_lookup(struct closure *cl) 566 { 567 struct search *s = container_of(cl, struct search, iop.cl); 568 struct bio *bio = &s->bio.bio; 569 int ret; 570 571 bch_btree_op_init(&s->op, -1); 572 573 ret = bch_btree_map_keys(&s->op, s->iop.c, 574 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), 575 cache_lookup_fn, MAP_END_KEY); 576 if (ret == -EAGAIN) { 577 continue_at(cl, cache_lookup, bcache_wq); 578 return; 579 } 580 581 closure_return(cl); 582 } 583 584 /* Common code for the make_request functions */ 585 586 static void request_endio(struct bio *bio) 587 { 588 struct closure *cl = bio->bi_private; 589 590 if (bio->bi_status) { 591 struct search *s = container_of(cl, struct search, cl); 592 s->iop.status = bio->bi_status; 593 /* Only cache read errors are recoverable */ 594 s->recoverable = false; 595 } 596 597 bio_put(bio); 598 closure_put(cl); 599 } 600 601 static void bio_complete(struct search *s) 602 { 603 if (s->orig_bio) { 604 struct request_queue *q = s->orig_bio->bi_disk->queue; 605 generic_end_io_acct(q, bio_data_dir(s->orig_bio), 606 &s->d->disk->part0, s->start_time); 607 608 trace_bcache_request_end(s->d, s->orig_bio); 609 s->orig_bio->bi_status = s->iop.status; 610 bio_endio(s->orig_bio); 611 s->orig_bio = NULL; 612 } 613 } 614 615 static void do_bio_hook(struct search *s, struct bio *orig_bio) 616 { 617 struct bio *bio = &s->bio.bio; 618 619 bio_init(bio, NULL, 0); 620 __bio_clone_fast(bio, orig_bio); 621 bio->bi_end_io = request_endio; 622 bio->bi_private = &s->cl; 623 624 bio_cnt_set(bio, 3); 625 } 626 627 static void search_free(struct closure *cl) 628 { 629 struct search *s = container_of(cl, struct search, cl); 630 bio_complete(s); 631 632 if (s->iop.bio) 633 bio_put(s->iop.bio); 634 635 closure_debug_destroy(cl); 636 mempool_free(s, s->d->c->search); 637 } 638 639 static inline struct search *search_alloc(struct bio *bio, 640 struct bcache_device *d) 641 { 642 struct search *s; 643 644 s = mempool_alloc(d->c->search, GFP_NOIO); 645 646 closure_init(&s->cl, NULL); 647 do_bio_hook(s, bio); 648 649 s->orig_bio = bio; 650 s->cache_miss = NULL; 651 s->d = d; 652 s->recoverable = 1; 653 s->write = op_is_write(bio_op(bio)); 654 s->read_dirty_data = 0; 655 s->start_time = jiffies; 656 657 s->iop.c = d->c; 658 s->iop.bio = NULL; 659 s->iop.inode = d->id; 660 s->iop.write_point = hash_long((unsigned long) current, 16); 661 s->iop.write_prio = 0; 662 s->iop.status = 0; 663 s->iop.flags = 0; 664 s->iop.flush_journal = op_is_flush(bio->bi_opf); 665 s->iop.wq = bcache_wq; 666 667 return s; 668 } 669 670 /* Cached devices */ 671 672 static void cached_dev_bio_complete(struct closure *cl) 673 { 674 struct search *s = container_of(cl, struct search, cl); 675 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 676 677 search_free(cl); 678 cached_dev_put(dc); 679 } 680 681 /* Process reads */ 682 683 static void cached_dev_cache_miss_done(struct closure *cl) 684 { 685 struct search *s = container_of(cl, struct search, cl); 686 687 if (s->iop.replace_collision) 688 bch_mark_cache_miss_collision(s->iop.c, s->d); 689 690 if (s->iop.bio) 691 bio_free_pages(s->iop.bio); 692 693 cached_dev_bio_complete(cl); 694 } 695 696 static void cached_dev_read_error(struct closure *cl) 697 { 698 struct search *s = container_of(cl, struct search, cl); 699 struct bio *bio = &s->bio.bio; 700 701 if (s->recoverable) { 702 /* Retry from the backing device: */ 703 trace_bcache_read_retry(s->orig_bio); 704 705 s->iop.status = 0; 706 do_bio_hook(s, s->orig_bio); 707 708 /* XXX: invalidate cache */ 709 710 closure_bio_submit(bio, cl); 711 } 712 713 continue_at(cl, cached_dev_cache_miss_done, NULL); 714 } 715 716 static void cached_dev_read_done(struct closure *cl) 717 { 718 struct search *s = container_of(cl, struct search, cl); 719 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 720 721 /* 722 * We had a cache miss; cache_bio now contains data ready to be inserted 723 * into the cache. 724 * 725 * First, we copy the data we just read from cache_bio's bounce buffers 726 * to the buffers the original bio pointed to: 727 */ 728 729 if (s->iop.bio) { 730 bio_reset(s->iop.bio); 731 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; 732 bio_copy_dev(s->iop.bio, s->cache_miss); 733 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 734 bch_bio_map(s->iop.bio, NULL); 735 736 bio_copy_data(s->cache_miss, s->iop.bio); 737 738 bio_put(s->cache_miss); 739 s->cache_miss = NULL; 740 } 741 742 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) 743 bch_data_verify(dc, s->orig_bio); 744 745 bio_complete(s); 746 747 if (s->iop.bio && 748 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { 749 BUG_ON(!s->iop.replace); 750 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 751 } 752 753 continue_at(cl, cached_dev_cache_miss_done, NULL); 754 } 755 756 static void cached_dev_read_done_bh(struct closure *cl) 757 { 758 struct search *s = container_of(cl, struct search, cl); 759 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 760 761 bch_mark_cache_accounting(s->iop.c, s->d, 762 !s->cache_miss, s->iop.bypass); 763 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); 764 765 if (s->iop.status) 766 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); 767 else if (s->iop.bio || verify(dc, &s->bio.bio)) 768 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); 769 else 770 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); 771 } 772 773 static int cached_dev_cache_miss(struct btree *b, struct search *s, 774 struct bio *bio, unsigned sectors) 775 { 776 int ret = MAP_CONTINUE; 777 unsigned reada = 0; 778 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 779 struct bio *miss, *cache_bio; 780 781 if (s->cache_miss || s->iop.bypass) { 782 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); 783 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 784 goto out_submit; 785 } 786 787 if (!(bio->bi_opf & REQ_RAHEAD) && 788 !(bio->bi_opf & REQ_META) && 789 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) 790 reada = min_t(sector_t, dc->readahead >> 9, 791 get_capacity(bio->bi_disk) - bio_end_sector(bio)); 792 793 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 794 795 s->iop.replace_key = KEY(s->iop.inode, 796 bio->bi_iter.bi_sector + s->insert_bio_sectors, 797 s->insert_bio_sectors); 798 799 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 800 if (ret) 801 return ret; 802 803 s->iop.replace = true; 804 805 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); 806 807 /* btree_search_recurse()'s btree iterator is no good anymore */ 808 ret = miss == bio ? MAP_DONE : -EINTR; 809 810 cache_bio = bio_alloc_bioset(GFP_NOWAIT, 811 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), 812 dc->disk.bio_split); 813 if (!cache_bio) 814 goto out_submit; 815 816 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; 817 bio_copy_dev(cache_bio, miss); 818 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 819 820 cache_bio->bi_end_io = request_endio; 821 cache_bio->bi_private = &s->cl; 822 823 bch_bio_map(cache_bio, NULL); 824 if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) 825 goto out_put; 826 827 if (reada) 828 bch_mark_cache_readahead(s->iop.c, s->d); 829 830 s->cache_miss = miss; 831 s->iop.bio = cache_bio; 832 bio_get(cache_bio); 833 closure_bio_submit(cache_bio, &s->cl); 834 835 return ret; 836 out_put: 837 bio_put(cache_bio); 838 out_submit: 839 miss->bi_end_io = request_endio; 840 miss->bi_private = &s->cl; 841 closure_bio_submit(miss, &s->cl); 842 return ret; 843 } 844 845 static void cached_dev_read(struct cached_dev *dc, struct search *s) 846 { 847 struct closure *cl = &s->cl; 848 849 closure_call(&s->iop.cl, cache_lookup, NULL, cl); 850 continue_at(cl, cached_dev_read_done_bh, NULL); 851 } 852 853 /* Process writes */ 854 855 static void cached_dev_write_complete(struct closure *cl) 856 { 857 struct search *s = container_of(cl, struct search, cl); 858 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 859 860 up_read_non_owner(&dc->writeback_lock); 861 cached_dev_bio_complete(cl); 862 } 863 864 static void cached_dev_write(struct cached_dev *dc, struct search *s) 865 { 866 struct closure *cl = &s->cl; 867 struct bio *bio = &s->bio.bio; 868 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); 869 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 870 871 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 872 873 down_read_non_owner(&dc->writeback_lock); 874 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { 875 /* 876 * We overlap with some dirty data undergoing background 877 * writeback, force this write to writeback 878 */ 879 s->iop.bypass = false; 880 s->iop.writeback = true; 881 } 882 883 /* 884 * Discards aren't _required_ to do anything, so skipping if 885 * check_overlapping returned true is ok 886 * 887 * But check_overlapping drops dirty keys for which io hasn't started, 888 * so we still want to call it. 889 */ 890 if (bio_op(bio) == REQ_OP_DISCARD) 891 s->iop.bypass = true; 892 893 if (should_writeback(dc, s->orig_bio, 894 cache_mode(dc, bio), 895 s->iop.bypass)) { 896 s->iop.bypass = false; 897 s->iop.writeback = true; 898 } 899 900 if (s->iop.bypass) { 901 s->iop.bio = s->orig_bio; 902 bio_get(s->iop.bio); 903 904 if ((bio_op(bio) != REQ_OP_DISCARD) || 905 blk_queue_discard(bdev_get_queue(dc->bdev))) 906 closure_bio_submit(bio, cl); 907 } else if (s->iop.writeback) { 908 bch_writeback_add(dc); 909 s->iop.bio = bio; 910 911 if (bio->bi_opf & REQ_PREFLUSH) { 912 /* Also need to send a flush to the backing device */ 913 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, 914 dc->disk.bio_split); 915 916 bio_copy_dev(flush, bio); 917 flush->bi_end_io = request_endio; 918 flush->bi_private = cl; 919 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 920 921 closure_bio_submit(flush, cl); 922 } 923 } else { 924 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); 925 926 closure_bio_submit(bio, cl); 927 } 928 929 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 930 continue_at(cl, cached_dev_write_complete, NULL); 931 } 932 933 static void cached_dev_nodata(struct closure *cl) 934 { 935 struct search *s = container_of(cl, struct search, cl); 936 struct bio *bio = &s->bio.bio; 937 938 if (s->iop.flush_journal) 939 bch_journal_meta(s->iop.c, cl); 940 941 /* If it's a flush, we send the flush to the backing device too */ 942 closure_bio_submit(bio, cl); 943 944 continue_at(cl, cached_dev_bio_complete, NULL); 945 } 946 947 /* Cached devices - read & write stuff */ 948 949 static blk_qc_t cached_dev_make_request(struct request_queue *q, 950 struct bio *bio) 951 { 952 struct search *s; 953 struct bcache_device *d = bio->bi_disk->private_data; 954 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 955 int rw = bio_data_dir(bio); 956 957 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); 958 959 bio_set_dev(bio, dc->bdev); 960 bio->bi_iter.bi_sector += dc->sb.data_offset; 961 962 if (cached_dev_get(dc)) { 963 s = search_alloc(bio, d); 964 trace_bcache_request_start(s->d, bio); 965 966 if (!bio->bi_iter.bi_size) { 967 /* 968 * can't call bch_journal_meta from under 969 * generic_make_request 970 */ 971 continue_at_nobarrier(&s->cl, 972 cached_dev_nodata, 973 bcache_wq); 974 } else { 975 s->iop.bypass = check_should_bypass(dc, bio); 976 977 if (rw) 978 cached_dev_write(dc, s); 979 else 980 cached_dev_read(dc, s); 981 } 982 } else { 983 if ((bio_op(bio) == REQ_OP_DISCARD) && 984 !blk_queue_discard(bdev_get_queue(dc->bdev))) 985 bio_endio(bio); 986 else 987 generic_make_request(bio); 988 } 989 990 return BLK_QC_T_NONE; 991 } 992 993 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, 994 unsigned int cmd, unsigned long arg) 995 { 996 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 997 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); 998 } 999 1000 static int cached_dev_congested(void *data, int bits) 1001 { 1002 struct bcache_device *d = data; 1003 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1004 struct request_queue *q = bdev_get_queue(dc->bdev); 1005 int ret = 0; 1006 1007 if (bdi_congested(q->backing_dev_info, bits)) 1008 return 1; 1009 1010 if (cached_dev_get(dc)) { 1011 unsigned i; 1012 struct cache *ca; 1013 1014 for_each_cache(ca, d->c, i) { 1015 q = bdev_get_queue(ca->bdev); 1016 ret |= bdi_congested(q->backing_dev_info, bits); 1017 } 1018 1019 cached_dev_put(dc); 1020 } 1021 1022 return ret; 1023 } 1024 1025 void bch_cached_dev_request_init(struct cached_dev *dc) 1026 { 1027 struct gendisk *g = dc->disk.disk; 1028 1029 g->queue->make_request_fn = cached_dev_make_request; 1030 g->queue->backing_dev_info->congested_fn = cached_dev_congested; 1031 dc->disk.cache_miss = cached_dev_cache_miss; 1032 dc->disk.ioctl = cached_dev_ioctl; 1033 } 1034 1035 /* Flash backed devices */ 1036 1037 static int flash_dev_cache_miss(struct btree *b, struct search *s, 1038 struct bio *bio, unsigned sectors) 1039 { 1040 unsigned bytes = min(sectors, bio_sectors(bio)) << 9; 1041 1042 swap(bio->bi_iter.bi_size, bytes); 1043 zero_fill_bio(bio); 1044 swap(bio->bi_iter.bi_size, bytes); 1045 1046 bio_advance(bio, bytes); 1047 1048 if (!bio->bi_iter.bi_size) 1049 return MAP_DONE; 1050 1051 return MAP_CONTINUE; 1052 } 1053 1054 static void flash_dev_nodata(struct closure *cl) 1055 { 1056 struct search *s = container_of(cl, struct search, cl); 1057 1058 if (s->iop.flush_journal) 1059 bch_journal_meta(s->iop.c, cl); 1060 1061 continue_at(cl, search_free, NULL); 1062 } 1063 1064 static blk_qc_t flash_dev_make_request(struct request_queue *q, 1065 struct bio *bio) 1066 { 1067 struct search *s; 1068 struct closure *cl; 1069 struct bcache_device *d = bio->bi_disk->private_data; 1070 int rw = bio_data_dir(bio); 1071 1072 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); 1073 1074 s = search_alloc(bio, d); 1075 cl = &s->cl; 1076 bio = &s->bio.bio; 1077 1078 trace_bcache_request_start(s->d, bio); 1079 1080 if (!bio->bi_iter.bi_size) { 1081 /* 1082 * can't call bch_journal_meta from under 1083 * generic_make_request 1084 */ 1085 continue_at_nobarrier(&s->cl, 1086 flash_dev_nodata, 1087 bcache_wq); 1088 return BLK_QC_T_NONE; 1089 } else if (rw) { 1090 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1091 &KEY(d->id, bio->bi_iter.bi_sector, 0), 1092 &KEY(d->id, bio_end_sector(bio), 0)); 1093 1094 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; 1095 s->iop.writeback = true; 1096 s->iop.bio = bio; 1097 1098 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 1099 } else { 1100 closure_call(&s->iop.cl, cache_lookup, NULL, cl); 1101 } 1102 1103 continue_at(cl, search_free, NULL); 1104 return BLK_QC_T_NONE; 1105 } 1106 1107 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, 1108 unsigned int cmd, unsigned long arg) 1109 { 1110 return -ENOTTY; 1111 } 1112 1113 static int flash_dev_congested(void *data, int bits) 1114 { 1115 struct bcache_device *d = data; 1116 struct request_queue *q; 1117 struct cache *ca; 1118 unsigned i; 1119 int ret = 0; 1120 1121 for_each_cache(ca, d->c, i) { 1122 q = bdev_get_queue(ca->bdev); 1123 ret |= bdi_congested(q->backing_dev_info, bits); 1124 } 1125 1126 return ret; 1127 } 1128 1129 void bch_flash_dev_request_init(struct bcache_device *d) 1130 { 1131 struct gendisk *g = d->disk; 1132 1133 g->queue->make_request_fn = flash_dev_make_request; 1134 g->queue->backing_dev_info->congested_fn = flash_dev_congested; 1135 d->cache_miss = flash_dev_cache_miss; 1136 d->ioctl = flash_dev_ioctl; 1137 } 1138 1139 void bch_request_exit(void) 1140 { 1141 if (bch_search_cache) 1142 kmem_cache_destroy(bch_search_cache); 1143 } 1144 1145 int __init bch_request_init(void) 1146 { 1147 bch_search_cache = KMEM_CACHE(search, 0); 1148 if (!bch_search_cache) 1149 return -ENOMEM; 1150 1151 return 0; 1152 } 1153