1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Main bcache entry point - handle a read or a write request and decide what to 4 * do with it; the make_request functions are called by the block layer. 5 * 6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 7 * Copyright 2012 Google, Inc. 8 */ 9 10 #include "bcache.h" 11 #include "btree.h" 12 #include "debug.h" 13 #include "request.h" 14 #include "writeback.h" 15 16 #include <linux/module.h> 17 #include <linux/hash.h> 18 #include <linux/random.h> 19 #include <linux/backing-dev.h> 20 21 #include <trace/events/bcache.h> 22 23 #define CUTOFF_CACHE_ADD 95 24 #define CUTOFF_CACHE_READA 90 25 26 struct kmem_cache *bch_search_cache; 27 28 static void bch_data_insert_start(struct closure *); 29 30 static unsigned cache_mode(struct cached_dev *dc) 31 { 32 return BDEV_CACHE_MODE(&dc->sb); 33 } 34 35 static bool verify(struct cached_dev *dc) 36 { 37 return dc->verify; 38 } 39 40 static void bio_csum(struct bio *bio, struct bkey *k) 41 { 42 struct bio_vec bv; 43 struct bvec_iter iter; 44 uint64_t csum = 0; 45 46 bio_for_each_segment(bv, bio, iter) { 47 void *d = kmap(bv.bv_page) + bv.bv_offset; 48 csum = bch_crc64_update(csum, d, bv.bv_len); 49 kunmap(bv.bv_page); 50 } 51 52 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); 53 } 54 55 /* Insert data into cache */ 56 57 static void bch_data_insert_keys(struct closure *cl) 58 { 59 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 60 atomic_t *journal_ref = NULL; 61 struct bkey *replace_key = op->replace ? &op->replace_key : NULL; 62 int ret; 63 64 /* 65 * If we're looping, might already be waiting on 66 * another journal write - can't wait on more than one journal write at 67 * a time 68 * 69 * XXX: this looks wrong 70 */ 71 #if 0 72 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING) 73 closure_sync(&s->cl); 74 #endif 75 76 if (!op->replace) 77 journal_ref = bch_journal(op->c, &op->insert_keys, 78 op->flush_journal ? cl : NULL); 79 80 ret = bch_btree_insert(op->c, &op->insert_keys, 81 journal_ref, replace_key); 82 if (ret == -ESRCH) { 83 op->replace_collision = true; 84 } else if (ret) { 85 op->status = BLK_STS_RESOURCE; 86 op->insert_data_done = true; 87 } 88 89 if (journal_ref) 90 atomic_dec_bug(journal_ref); 91 92 if (!op->insert_data_done) { 93 continue_at(cl, bch_data_insert_start, op->wq); 94 return; 95 } 96 97 bch_keylist_free(&op->insert_keys); 98 closure_return(cl); 99 } 100 101 static int bch_keylist_realloc(struct keylist *l, unsigned u64s, 102 struct cache_set *c) 103 { 104 size_t oldsize = bch_keylist_nkeys(l); 105 size_t newsize = oldsize + u64s; 106 107 /* 108 * The journalling code doesn't handle the case where the keys to insert 109 * is bigger than an empty write: If we just return -ENOMEM here, 110 * bio_insert() and bio_invalidate() will insert the keys created so far 111 * and finish the rest when the keylist is empty. 112 */ 113 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) 114 return -ENOMEM; 115 116 return __bch_keylist_realloc(l, u64s); 117 } 118 119 static void bch_data_invalidate(struct closure *cl) 120 { 121 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 122 struct bio *bio = op->bio; 123 124 pr_debug("invalidating %i sectors from %llu", 125 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); 126 127 while (bio_sectors(bio)) { 128 unsigned sectors = min(bio_sectors(bio), 129 1U << (KEY_SIZE_BITS - 1)); 130 131 if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) 132 goto out; 133 134 bio->bi_iter.bi_sector += sectors; 135 bio->bi_iter.bi_size -= sectors << 9; 136 137 bch_keylist_add(&op->insert_keys, 138 &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); 139 } 140 141 op->insert_data_done = true; 142 bio_put(bio); 143 out: 144 continue_at(cl, bch_data_insert_keys, op->wq); 145 } 146 147 static void bch_data_insert_error(struct closure *cl) 148 { 149 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 150 151 /* 152 * Our data write just errored, which means we've got a bunch of keys to 153 * insert that point to data that wasn't succesfully written. 154 * 155 * We don't have to insert those keys but we still have to invalidate 156 * that region of the cache - so, if we just strip off all the pointers 157 * from the keys we'll accomplish just that. 158 */ 159 160 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; 161 162 while (src != op->insert_keys.top) { 163 struct bkey *n = bkey_next(src); 164 165 SET_KEY_PTRS(src, 0); 166 memmove(dst, src, bkey_bytes(src)); 167 168 dst = bkey_next(dst); 169 src = n; 170 } 171 172 op->insert_keys.top = dst; 173 174 bch_data_insert_keys(cl); 175 } 176 177 static void bch_data_insert_endio(struct bio *bio) 178 { 179 struct closure *cl = bio->bi_private; 180 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 181 182 if (bio->bi_status) { 183 /* TODO: We could try to recover from this. */ 184 if (op->writeback) 185 op->status = bio->bi_status; 186 else if (!op->replace) 187 set_closure_fn(cl, bch_data_insert_error, op->wq); 188 else 189 set_closure_fn(cl, NULL, NULL); 190 } 191 192 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); 193 } 194 195 static void bch_data_insert_start(struct closure *cl) 196 { 197 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 198 struct bio *bio = op->bio, *n; 199 200 if (op->bypass) 201 return bch_data_invalidate(cl); 202 203 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) 204 wake_up_gc(op->c); 205 206 /* 207 * Journal writes are marked REQ_PREFLUSH; if the original write was a 208 * flush, it'll wait on the journal write. 209 */ 210 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); 211 212 do { 213 unsigned i; 214 struct bkey *k; 215 struct bio_set *split = op->c->bio_split; 216 217 /* 1 for the device pointer and 1 for the chksum */ 218 if (bch_keylist_realloc(&op->insert_keys, 219 3 + (op->csum ? 1 : 0), 220 op->c)) { 221 continue_at(cl, bch_data_insert_keys, op->wq); 222 return; 223 } 224 225 k = op->insert_keys.top; 226 bkey_init(k); 227 SET_KEY_INODE(k, op->inode); 228 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); 229 230 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 231 op->write_point, op->write_prio, 232 op->writeback)) 233 goto err; 234 235 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); 236 237 n->bi_end_io = bch_data_insert_endio; 238 n->bi_private = cl; 239 240 if (op->writeback) { 241 SET_KEY_DIRTY(k, true); 242 243 for (i = 0; i < KEY_PTRS(k); i++) 244 SET_GC_MARK(PTR_BUCKET(op->c, k, i), 245 GC_MARK_DIRTY); 246 } 247 248 SET_KEY_CSUM(k, op->csum); 249 if (KEY_CSUM(k)) 250 bio_csum(n, k); 251 252 trace_bcache_cache_insert(k); 253 bch_keylist_push(&op->insert_keys); 254 255 bio_set_op_attrs(n, REQ_OP_WRITE, 0); 256 bch_submit_bbio(n, op->c, k, 0); 257 } while (n != bio); 258 259 op->insert_data_done = true; 260 continue_at(cl, bch_data_insert_keys, op->wq); 261 return; 262 err: 263 /* bch_alloc_sectors() blocks if s->writeback = true */ 264 BUG_ON(op->writeback); 265 266 /* 267 * But if it's not a writeback write we'd rather just bail out if 268 * there aren't any buckets ready to write to - it might take awhile and 269 * we might be starving btree writes for gc or something. 270 */ 271 272 if (!op->replace) { 273 /* 274 * Writethrough write: We can't complete the write until we've 275 * updated the index. But we don't want to delay the write while 276 * we wait for buckets to be freed up, so just invalidate the 277 * rest of the write. 278 */ 279 op->bypass = true; 280 return bch_data_invalidate(cl); 281 } else { 282 /* 283 * From a cache miss, we can just insert the keys for the data 284 * we have written or bail out if we didn't do anything. 285 */ 286 op->insert_data_done = true; 287 bio_put(bio); 288 289 if (!bch_keylist_empty(&op->insert_keys)) 290 continue_at(cl, bch_data_insert_keys, op->wq); 291 else 292 closure_return(cl); 293 } 294 } 295 296 /** 297 * bch_data_insert - stick some data in the cache 298 * 299 * This is the starting point for any data to end up in a cache device; it could 300 * be from a normal write, or a writeback write, or a write to a flash only 301 * volume - it's also used by the moving garbage collector to compact data in 302 * mostly empty buckets. 303 * 304 * It first writes the data to the cache, creating a list of keys to be inserted 305 * (if the data had to be fragmented there will be multiple keys); after the 306 * data is written it calls bch_journal, and after the keys have been added to 307 * the next journal write they're inserted into the btree. 308 * 309 * It inserts the data in s->cache_bio; bi_sector is used for the key offset, 310 * and op->inode is used for the key inode. 311 * 312 * If s->bypass is true, instead of inserting the data it invalidates the 313 * region of the cache represented by s->cache_bio and op->inode. 314 */ 315 void bch_data_insert(struct closure *cl) 316 { 317 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 318 319 trace_bcache_write(op->c, op->inode, op->bio, 320 op->writeback, op->bypass); 321 322 bch_keylist_init(&op->insert_keys); 323 bio_get(op->bio); 324 bch_data_insert_start(cl); 325 } 326 327 /* Congested? */ 328 329 unsigned bch_get_congested(struct cache_set *c) 330 { 331 int i; 332 long rand; 333 334 if (!c->congested_read_threshold_us && 335 !c->congested_write_threshold_us) 336 return 0; 337 338 i = (local_clock_us() - c->congested_last_us) / 1024; 339 if (i < 0) 340 return 0; 341 342 i += atomic_read(&c->congested); 343 if (i >= 0) 344 return 0; 345 346 i += CONGESTED_MAX; 347 348 if (i > 0) 349 i = fract_exp_two(i, 6); 350 351 rand = get_random_int(); 352 i -= bitmap_weight(&rand, BITS_PER_LONG); 353 354 return i > 0 ? i : 1; 355 } 356 357 static void add_sequential(struct task_struct *t) 358 { 359 ewma_add(t->sequential_io_avg, 360 t->sequential_io, 8, 0); 361 362 t->sequential_io = 0; 363 } 364 365 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) 366 { 367 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; 368 } 369 370 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) 371 { 372 struct cache_set *c = dc->disk.c; 373 unsigned mode = cache_mode(dc); 374 unsigned sectors, congested = bch_get_congested(c); 375 struct task_struct *task = current; 376 struct io *i; 377 378 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 379 c->gc_stats.in_use > CUTOFF_CACHE_ADD || 380 (bio_op(bio) == REQ_OP_DISCARD)) 381 goto skip; 382 383 if (mode == CACHE_MODE_NONE || 384 (mode == CACHE_MODE_WRITEAROUND && 385 op_is_write(bio_op(bio)))) 386 goto skip; 387 388 /* 389 * Flag for bypass if the IO is for read-ahead or background, 390 * unless the read-ahead request is for metadata (eg, for gfs2). 391 */ 392 if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) && 393 !(bio->bi_opf & REQ_META)) 394 goto skip; 395 396 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || 397 bio_sectors(bio) & (c->sb.block_size - 1)) { 398 pr_debug("skipping unaligned io"); 399 goto skip; 400 } 401 402 if (bypass_torture_test(dc)) { 403 if ((get_random_int() & 3) == 3) 404 goto skip; 405 else 406 goto rescale; 407 } 408 409 if (!congested && !dc->sequential_cutoff) 410 goto rescale; 411 412 spin_lock(&dc->io_lock); 413 414 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) 415 if (i->last == bio->bi_iter.bi_sector && 416 time_before(jiffies, i->jiffies)) 417 goto found; 418 419 i = list_first_entry(&dc->io_lru, struct io, lru); 420 421 add_sequential(task); 422 i->sequential = 0; 423 found: 424 if (i->sequential + bio->bi_iter.bi_size > i->sequential) 425 i->sequential += bio->bi_iter.bi_size; 426 427 i->last = bio_end_sector(bio); 428 i->jiffies = jiffies + msecs_to_jiffies(5000); 429 task->sequential_io = i->sequential; 430 431 hlist_del(&i->hash); 432 hlist_add_head(&i->hash, iohash(dc, i->last)); 433 list_move_tail(&i->lru, &dc->io_lru); 434 435 spin_unlock(&dc->io_lock); 436 437 sectors = max(task->sequential_io, 438 task->sequential_io_avg) >> 9; 439 440 if (dc->sequential_cutoff && 441 sectors >= dc->sequential_cutoff >> 9) { 442 trace_bcache_bypass_sequential(bio); 443 goto skip; 444 } 445 446 if (congested && sectors >= congested) { 447 trace_bcache_bypass_congested(bio); 448 goto skip; 449 } 450 451 rescale: 452 bch_rescale_priorities(c, bio_sectors(bio)); 453 return false; 454 skip: 455 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); 456 return true; 457 } 458 459 /* Cache lookup */ 460 461 struct search { 462 /* Stack frame for bio_complete */ 463 struct closure cl; 464 465 struct bbio bio; 466 struct bio *orig_bio; 467 struct bio *cache_miss; 468 struct bcache_device *d; 469 470 unsigned insert_bio_sectors; 471 unsigned recoverable:1; 472 unsigned write:1; 473 unsigned read_dirty_data:1; 474 unsigned cache_missed:1; 475 476 unsigned long start_time; 477 478 struct btree_op op; 479 struct data_insert_op iop; 480 }; 481 482 static void bch_cache_read_endio(struct bio *bio) 483 { 484 struct bbio *b = container_of(bio, struct bbio, bio); 485 struct closure *cl = bio->bi_private; 486 struct search *s = container_of(cl, struct search, cl); 487 488 /* 489 * If the bucket was reused while our bio was in flight, we might have 490 * read the wrong data. Set s->error but not error so it doesn't get 491 * counted against the cache device, but we'll still reread the data 492 * from the backing device. 493 */ 494 495 if (bio->bi_status) 496 s->iop.status = bio->bi_status; 497 else if (!KEY_DIRTY(&b->key) && 498 ptr_stale(s->iop.c, &b->key, 0)) { 499 atomic_long_inc(&s->iop.c->cache_read_races); 500 s->iop.status = BLK_STS_IOERR; 501 } 502 503 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); 504 } 505 506 /* 507 * Read from a single key, handling the initial cache miss if the key starts in 508 * the middle of the bio 509 */ 510 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) 511 { 512 struct search *s = container_of(op, struct search, op); 513 struct bio *n, *bio = &s->bio.bio; 514 struct bkey *bio_key; 515 unsigned ptr; 516 517 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) 518 return MAP_CONTINUE; 519 520 if (KEY_INODE(k) != s->iop.inode || 521 KEY_START(k) > bio->bi_iter.bi_sector) { 522 unsigned bio_sectors = bio_sectors(bio); 523 unsigned sectors = KEY_INODE(k) == s->iop.inode 524 ? min_t(uint64_t, INT_MAX, 525 KEY_START(k) - bio->bi_iter.bi_sector) 526 : INT_MAX; 527 528 int ret = s->d->cache_miss(b, s, bio, sectors); 529 if (ret != MAP_CONTINUE) 530 return ret; 531 532 /* if this was a complete miss we shouldn't get here */ 533 BUG_ON(bio_sectors <= sectors); 534 } 535 536 if (!KEY_SIZE(k)) 537 return MAP_CONTINUE; 538 539 /* XXX: figure out best pointer - for multiple cache devices */ 540 ptr = 0; 541 542 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; 543 544 if (KEY_DIRTY(k)) 545 s->read_dirty_data = true; 546 547 n = bio_next_split(bio, min_t(uint64_t, INT_MAX, 548 KEY_OFFSET(k) - bio->bi_iter.bi_sector), 549 GFP_NOIO, s->d->bio_split); 550 551 bio_key = &container_of(n, struct bbio, bio)->key; 552 bch_bkey_copy_single_ptr(bio_key, k, ptr); 553 554 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); 555 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 556 557 n->bi_end_io = bch_cache_read_endio; 558 n->bi_private = &s->cl; 559 560 /* 561 * The bucket we're reading from might be reused while our bio 562 * is in flight, and we could then end up reading the wrong 563 * data. 564 * 565 * We guard against this by checking (in cache_read_endio()) if 566 * the pointer is stale again; if so, we treat it as an error 567 * and reread from the backing device (but we don't pass that 568 * error up anywhere). 569 */ 570 571 __bch_submit_bbio(n, b->c); 572 return n == bio ? MAP_DONE : MAP_CONTINUE; 573 } 574 575 static void cache_lookup(struct closure *cl) 576 { 577 struct search *s = container_of(cl, struct search, iop.cl); 578 struct bio *bio = &s->bio.bio; 579 struct cached_dev *dc; 580 int ret; 581 582 bch_btree_op_init(&s->op, -1); 583 584 ret = bch_btree_map_keys(&s->op, s->iop.c, 585 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), 586 cache_lookup_fn, MAP_END_KEY); 587 if (ret == -EAGAIN) { 588 continue_at(cl, cache_lookup, bcache_wq); 589 return; 590 } 591 592 /* 593 * We might meet err when searching the btree, If that happens, we will 594 * get negative ret, in this scenario we should not recover data from 595 * backing device (when cache device is dirty) because we don't know 596 * whether bkeys the read request covered are all clean. 597 * 598 * And after that happened, s->iop.status is still its initial value 599 * before we submit s->bio.bio 600 */ 601 if (ret < 0) { 602 BUG_ON(ret == -EINTR); 603 if (s->d && s->d->c && 604 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) { 605 dc = container_of(s->d, struct cached_dev, disk); 606 if (dc && atomic_read(&dc->has_dirty)) 607 s->recoverable = false; 608 } 609 if (!s->iop.status) 610 s->iop.status = BLK_STS_IOERR; 611 } 612 613 closure_return(cl); 614 } 615 616 /* Common code for the make_request functions */ 617 618 static void request_endio(struct bio *bio) 619 { 620 struct closure *cl = bio->bi_private; 621 622 if (bio->bi_status) { 623 struct search *s = container_of(cl, struct search, cl); 624 s->iop.status = bio->bi_status; 625 /* Only cache read errors are recoverable */ 626 s->recoverable = false; 627 } 628 629 bio_put(bio); 630 closure_put(cl); 631 } 632 633 static void bio_complete(struct search *s) 634 { 635 if (s->orig_bio) { 636 generic_end_io_acct(s->d->disk->queue, 637 bio_data_dir(s->orig_bio), 638 &s->d->disk->part0, s->start_time); 639 640 trace_bcache_request_end(s->d, s->orig_bio); 641 s->orig_bio->bi_status = s->iop.status; 642 bio_endio(s->orig_bio); 643 s->orig_bio = NULL; 644 } 645 } 646 647 static void do_bio_hook(struct search *s, struct bio *orig_bio) 648 { 649 struct bio *bio = &s->bio.bio; 650 651 bio_init(bio, NULL, 0); 652 __bio_clone_fast(bio, orig_bio); 653 bio->bi_end_io = request_endio; 654 bio->bi_private = &s->cl; 655 656 bio_cnt_set(bio, 3); 657 } 658 659 static void search_free(struct closure *cl) 660 { 661 struct search *s = container_of(cl, struct search, cl); 662 bio_complete(s); 663 664 if (s->iop.bio) 665 bio_put(s->iop.bio); 666 667 closure_debug_destroy(cl); 668 mempool_free(s, s->d->c->search); 669 } 670 671 static inline struct search *search_alloc(struct bio *bio, 672 struct bcache_device *d) 673 { 674 struct search *s; 675 676 s = mempool_alloc(d->c->search, GFP_NOIO); 677 678 closure_init(&s->cl, NULL); 679 do_bio_hook(s, bio); 680 681 s->orig_bio = bio; 682 s->cache_miss = NULL; 683 s->cache_missed = 0; 684 s->d = d; 685 s->recoverable = 1; 686 s->write = op_is_write(bio_op(bio)); 687 s->read_dirty_data = 0; 688 s->start_time = jiffies; 689 690 s->iop.c = d->c; 691 s->iop.bio = NULL; 692 s->iop.inode = d->id; 693 s->iop.write_point = hash_long((unsigned long) current, 16); 694 s->iop.write_prio = 0; 695 s->iop.status = 0; 696 s->iop.flags = 0; 697 s->iop.flush_journal = op_is_flush(bio->bi_opf); 698 s->iop.wq = bcache_wq; 699 700 return s; 701 } 702 703 /* Cached devices */ 704 705 static void cached_dev_bio_complete(struct closure *cl) 706 { 707 struct search *s = container_of(cl, struct search, cl); 708 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 709 710 search_free(cl); 711 cached_dev_put(dc); 712 } 713 714 /* Process reads */ 715 716 static void cached_dev_cache_miss_done(struct closure *cl) 717 { 718 struct search *s = container_of(cl, struct search, cl); 719 720 if (s->iop.replace_collision) 721 bch_mark_cache_miss_collision(s->iop.c, s->d); 722 723 if (s->iop.bio) 724 bio_free_pages(s->iop.bio); 725 726 cached_dev_bio_complete(cl); 727 } 728 729 static void cached_dev_read_error(struct closure *cl) 730 { 731 struct search *s = container_of(cl, struct search, cl); 732 struct bio *bio = &s->bio.bio; 733 734 /* 735 * If read request hit dirty data (s->read_dirty_data is true), 736 * then recovery a failed read request from cached device may 737 * get a stale data back. So read failure recovery is only 738 * permitted when read request hit clean data in cache device, 739 * or when cache read race happened. 740 */ 741 if (s->recoverable && !s->read_dirty_data) { 742 /* Retry from the backing device: */ 743 trace_bcache_read_retry(s->orig_bio); 744 745 s->iop.status = 0; 746 do_bio_hook(s, s->orig_bio); 747 748 /* XXX: invalidate cache */ 749 750 closure_bio_submit(bio, cl); 751 } 752 753 continue_at(cl, cached_dev_cache_miss_done, NULL); 754 } 755 756 static void cached_dev_read_done(struct closure *cl) 757 { 758 struct search *s = container_of(cl, struct search, cl); 759 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 760 761 /* 762 * We had a cache miss; cache_bio now contains data ready to be inserted 763 * into the cache. 764 * 765 * First, we copy the data we just read from cache_bio's bounce buffers 766 * to the buffers the original bio pointed to: 767 */ 768 769 if (s->iop.bio) { 770 bio_reset(s->iop.bio); 771 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; 772 bio_copy_dev(s->iop.bio, s->cache_miss); 773 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 774 bch_bio_map(s->iop.bio, NULL); 775 776 bio_copy_data(s->cache_miss, s->iop.bio); 777 778 bio_put(s->cache_miss); 779 s->cache_miss = NULL; 780 } 781 782 if (verify(dc) && s->recoverable && !s->read_dirty_data) 783 bch_data_verify(dc, s->orig_bio); 784 785 bio_complete(s); 786 787 if (s->iop.bio && 788 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { 789 BUG_ON(!s->iop.replace); 790 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 791 } 792 793 continue_at(cl, cached_dev_cache_miss_done, NULL); 794 } 795 796 static void cached_dev_read_done_bh(struct closure *cl) 797 { 798 struct search *s = container_of(cl, struct search, cl); 799 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 800 801 bch_mark_cache_accounting(s->iop.c, s->d, 802 !s->cache_missed, s->iop.bypass); 803 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass); 804 805 if (s->iop.status) 806 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); 807 else if (s->iop.bio || verify(dc)) 808 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); 809 else 810 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); 811 } 812 813 static int cached_dev_cache_miss(struct btree *b, struct search *s, 814 struct bio *bio, unsigned sectors) 815 { 816 int ret = MAP_CONTINUE; 817 unsigned reada = 0; 818 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 819 struct bio *miss, *cache_bio; 820 821 s->cache_missed = 1; 822 823 if (s->cache_miss || s->iop.bypass) { 824 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); 825 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 826 goto out_submit; 827 } 828 829 if (!(bio->bi_opf & REQ_RAHEAD) && 830 !(bio->bi_opf & REQ_META) && 831 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) 832 reada = min_t(sector_t, dc->readahead >> 9, 833 get_capacity(bio->bi_disk) - bio_end_sector(bio)); 834 835 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 836 837 s->iop.replace_key = KEY(s->iop.inode, 838 bio->bi_iter.bi_sector + s->insert_bio_sectors, 839 s->insert_bio_sectors); 840 841 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 842 if (ret) 843 return ret; 844 845 s->iop.replace = true; 846 847 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); 848 849 /* btree_search_recurse()'s btree iterator is no good anymore */ 850 ret = miss == bio ? MAP_DONE : -EINTR; 851 852 cache_bio = bio_alloc_bioset(GFP_NOWAIT, 853 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), 854 dc->disk.bio_split); 855 if (!cache_bio) 856 goto out_submit; 857 858 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; 859 bio_copy_dev(cache_bio, miss); 860 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 861 862 cache_bio->bi_end_io = request_endio; 863 cache_bio->bi_private = &s->cl; 864 865 bch_bio_map(cache_bio, NULL); 866 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) 867 goto out_put; 868 869 if (reada) 870 bch_mark_cache_readahead(s->iop.c, s->d); 871 872 s->cache_miss = miss; 873 s->iop.bio = cache_bio; 874 bio_get(cache_bio); 875 closure_bio_submit(cache_bio, &s->cl); 876 877 return ret; 878 out_put: 879 bio_put(cache_bio); 880 out_submit: 881 miss->bi_end_io = request_endio; 882 miss->bi_private = &s->cl; 883 closure_bio_submit(miss, &s->cl); 884 return ret; 885 } 886 887 static void cached_dev_read(struct cached_dev *dc, struct search *s) 888 { 889 struct closure *cl = &s->cl; 890 891 closure_call(&s->iop.cl, cache_lookup, NULL, cl); 892 continue_at(cl, cached_dev_read_done_bh, NULL); 893 } 894 895 /* Process writes */ 896 897 static void cached_dev_write_complete(struct closure *cl) 898 { 899 struct search *s = container_of(cl, struct search, cl); 900 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 901 902 up_read_non_owner(&dc->writeback_lock); 903 cached_dev_bio_complete(cl); 904 } 905 906 static void cached_dev_write(struct cached_dev *dc, struct search *s) 907 { 908 struct closure *cl = &s->cl; 909 struct bio *bio = &s->bio.bio; 910 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); 911 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 912 913 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 914 915 down_read_non_owner(&dc->writeback_lock); 916 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { 917 /* 918 * We overlap with some dirty data undergoing background 919 * writeback, force this write to writeback 920 */ 921 s->iop.bypass = false; 922 s->iop.writeback = true; 923 } 924 925 /* 926 * Discards aren't _required_ to do anything, so skipping if 927 * check_overlapping returned true is ok 928 * 929 * But check_overlapping drops dirty keys for which io hasn't started, 930 * so we still want to call it. 931 */ 932 if (bio_op(bio) == REQ_OP_DISCARD) 933 s->iop.bypass = true; 934 935 if (should_writeback(dc, s->orig_bio, 936 cache_mode(dc), 937 s->iop.bypass)) { 938 s->iop.bypass = false; 939 s->iop.writeback = true; 940 } 941 942 if (s->iop.bypass) { 943 s->iop.bio = s->orig_bio; 944 bio_get(s->iop.bio); 945 946 if ((bio_op(bio) != REQ_OP_DISCARD) || 947 blk_queue_discard(bdev_get_queue(dc->bdev))) 948 closure_bio_submit(bio, cl); 949 } else if (s->iop.writeback) { 950 bch_writeback_add(dc); 951 s->iop.bio = bio; 952 953 if (bio->bi_opf & REQ_PREFLUSH) { 954 /* Also need to send a flush to the backing device */ 955 struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, 956 dc->disk.bio_split); 957 958 bio_copy_dev(flush, bio); 959 flush->bi_end_io = request_endio; 960 flush->bi_private = cl; 961 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 962 963 closure_bio_submit(flush, cl); 964 } 965 } else { 966 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); 967 968 closure_bio_submit(bio, cl); 969 } 970 971 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 972 continue_at(cl, cached_dev_write_complete, NULL); 973 } 974 975 static void cached_dev_nodata(struct closure *cl) 976 { 977 struct search *s = container_of(cl, struct search, cl); 978 struct bio *bio = &s->bio.bio; 979 980 if (s->iop.flush_journal) 981 bch_journal_meta(s->iop.c, cl); 982 983 /* If it's a flush, we send the flush to the backing device too */ 984 closure_bio_submit(bio, cl); 985 986 continue_at(cl, cached_dev_bio_complete, NULL); 987 } 988 989 /* Cached devices - read & write stuff */ 990 991 static blk_qc_t cached_dev_make_request(struct request_queue *q, 992 struct bio *bio) 993 { 994 struct search *s; 995 struct bcache_device *d = bio->bi_disk->private_data; 996 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 997 int rw = bio_data_dir(bio); 998 999 atomic_set(&dc->backing_idle, 0); 1000 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); 1001 1002 bio_set_dev(bio, dc->bdev); 1003 bio->bi_iter.bi_sector += dc->sb.data_offset; 1004 1005 if (cached_dev_get(dc)) { 1006 s = search_alloc(bio, d); 1007 trace_bcache_request_start(s->d, bio); 1008 1009 if (!bio->bi_iter.bi_size) { 1010 /* 1011 * can't call bch_journal_meta from under 1012 * generic_make_request 1013 */ 1014 continue_at_nobarrier(&s->cl, 1015 cached_dev_nodata, 1016 bcache_wq); 1017 } else { 1018 s->iop.bypass = check_should_bypass(dc, bio); 1019 1020 if (rw) 1021 cached_dev_write(dc, s); 1022 else 1023 cached_dev_read(dc, s); 1024 } 1025 } else { 1026 if ((bio_op(bio) == REQ_OP_DISCARD) && 1027 !blk_queue_discard(bdev_get_queue(dc->bdev))) 1028 bio_endio(bio); 1029 else 1030 generic_make_request(bio); 1031 } 1032 1033 return BLK_QC_T_NONE; 1034 } 1035 1036 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, 1037 unsigned int cmd, unsigned long arg) 1038 { 1039 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1040 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); 1041 } 1042 1043 static int cached_dev_congested(void *data, int bits) 1044 { 1045 struct bcache_device *d = data; 1046 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1047 struct request_queue *q = bdev_get_queue(dc->bdev); 1048 int ret = 0; 1049 1050 if (bdi_congested(q->backing_dev_info, bits)) 1051 return 1; 1052 1053 if (cached_dev_get(dc)) { 1054 unsigned i; 1055 struct cache *ca; 1056 1057 for_each_cache(ca, d->c, i) { 1058 q = bdev_get_queue(ca->bdev); 1059 ret |= bdi_congested(q->backing_dev_info, bits); 1060 } 1061 1062 cached_dev_put(dc); 1063 } 1064 1065 return ret; 1066 } 1067 1068 void bch_cached_dev_request_init(struct cached_dev *dc) 1069 { 1070 struct gendisk *g = dc->disk.disk; 1071 1072 g->queue->make_request_fn = cached_dev_make_request; 1073 g->queue->backing_dev_info->congested_fn = cached_dev_congested; 1074 dc->disk.cache_miss = cached_dev_cache_miss; 1075 dc->disk.ioctl = cached_dev_ioctl; 1076 } 1077 1078 /* Flash backed devices */ 1079 1080 static int flash_dev_cache_miss(struct btree *b, struct search *s, 1081 struct bio *bio, unsigned sectors) 1082 { 1083 unsigned bytes = min(sectors, bio_sectors(bio)) << 9; 1084 1085 swap(bio->bi_iter.bi_size, bytes); 1086 zero_fill_bio(bio); 1087 swap(bio->bi_iter.bi_size, bytes); 1088 1089 bio_advance(bio, bytes); 1090 1091 if (!bio->bi_iter.bi_size) 1092 return MAP_DONE; 1093 1094 return MAP_CONTINUE; 1095 } 1096 1097 static void flash_dev_nodata(struct closure *cl) 1098 { 1099 struct search *s = container_of(cl, struct search, cl); 1100 1101 if (s->iop.flush_journal) 1102 bch_journal_meta(s->iop.c, cl); 1103 1104 continue_at(cl, search_free, NULL); 1105 } 1106 1107 static blk_qc_t flash_dev_make_request(struct request_queue *q, 1108 struct bio *bio) 1109 { 1110 struct search *s; 1111 struct closure *cl; 1112 struct bcache_device *d = bio->bi_disk->private_data; 1113 int rw = bio_data_dir(bio); 1114 1115 generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); 1116 1117 s = search_alloc(bio, d); 1118 cl = &s->cl; 1119 bio = &s->bio.bio; 1120 1121 trace_bcache_request_start(s->d, bio); 1122 1123 if (!bio->bi_iter.bi_size) { 1124 /* 1125 * can't call bch_journal_meta from under 1126 * generic_make_request 1127 */ 1128 continue_at_nobarrier(&s->cl, 1129 flash_dev_nodata, 1130 bcache_wq); 1131 return BLK_QC_T_NONE; 1132 } else if (rw) { 1133 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1134 &KEY(d->id, bio->bi_iter.bi_sector, 0), 1135 &KEY(d->id, bio_end_sector(bio), 0)); 1136 1137 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; 1138 s->iop.writeback = true; 1139 s->iop.bio = bio; 1140 1141 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 1142 } else { 1143 closure_call(&s->iop.cl, cache_lookup, NULL, cl); 1144 } 1145 1146 continue_at(cl, search_free, NULL); 1147 return BLK_QC_T_NONE; 1148 } 1149 1150 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, 1151 unsigned int cmd, unsigned long arg) 1152 { 1153 return -ENOTTY; 1154 } 1155 1156 static int flash_dev_congested(void *data, int bits) 1157 { 1158 struct bcache_device *d = data; 1159 struct request_queue *q; 1160 struct cache *ca; 1161 unsigned i; 1162 int ret = 0; 1163 1164 for_each_cache(ca, d->c, i) { 1165 q = bdev_get_queue(ca->bdev); 1166 ret |= bdi_congested(q->backing_dev_info, bits); 1167 } 1168 1169 return ret; 1170 } 1171 1172 void bch_flash_dev_request_init(struct bcache_device *d) 1173 { 1174 struct gendisk *g = d->disk; 1175 1176 g->queue->make_request_fn = flash_dev_make_request; 1177 g->queue->backing_dev_info->congested_fn = flash_dev_congested; 1178 d->cache_miss = flash_dev_cache_miss; 1179 d->ioctl = flash_dev_ioctl; 1180 } 1181 1182 void bch_request_exit(void) 1183 { 1184 if (bch_search_cache) 1185 kmem_cache_destroy(bch_search_cache); 1186 } 1187 1188 int __init bch_request_init(void) 1189 { 1190 bch_search_cache = KMEM_CACHE(search, 0); 1191 if (!bch_search_cache) 1192 return -ENOMEM; 1193 1194 return 0; 1195 } 1196