1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Main bcache entry point - handle a read or a write request and decide what to 4 * do with it; the make_request functions are called by the block layer. 5 * 6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com> 7 * Copyright 2012 Google, Inc. 8 */ 9 10 #include "bcache.h" 11 #include "btree.h" 12 #include "debug.h" 13 #include "request.h" 14 #include "writeback.h" 15 16 #include <linux/module.h> 17 #include <linux/hash.h> 18 #include <linux/random.h> 19 #include <linux/backing-dev.h> 20 21 #include <trace/events/bcache.h> 22 23 #define CUTOFF_CACHE_ADD 95 24 #define CUTOFF_CACHE_READA 90 25 26 struct kmem_cache *bch_search_cache; 27 28 static void bch_data_insert_start(struct closure *cl); 29 30 static unsigned int cache_mode(struct cached_dev *dc) 31 { 32 return BDEV_CACHE_MODE(&dc->sb); 33 } 34 35 static bool verify(struct cached_dev *dc) 36 { 37 return dc->verify; 38 } 39 40 static void bio_csum(struct bio *bio, struct bkey *k) 41 { 42 struct bio_vec bv; 43 struct bvec_iter iter; 44 uint64_t csum = 0; 45 46 bio_for_each_segment(bv, bio, iter) { 47 void *d = kmap(bv.bv_page) + bv.bv_offset; 48 49 csum = bch_crc64_update(csum, d, bv.bv_len); 50 kunmap(bv.bv_page); 51 } 52 53 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); 54 } 55 56 /* Insert data into cache */ 57 58 static void bch_data_insert_keys(struct closure *cl) 59 { 60 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 61 atomic_t *journal_ref = NULL; 62 struct bkey *replace_key = op->replace ? &op->replace_key : NULL; 63 int ret; 64 65 if (!op->replace) 66 journal_ref = bch_journal(op->c, &op->insert_keys, 67 op->flush_journal ? cl : NULL); 68 69 ret = bch_btree_insert(op->c, &op->insert_keys, 70 journal_ref, replace_key); 71 if (ret == -ESRCH) { 72 op->replace_collision = true; 73 } else if (ret) { 74 op->status = BLK_STS_RESOURCE; 75 op->insert_data_done = true; 76 } 77 78 if (journal_ref) 79 atomic_dec_bug(journal_ref); 80 81 if (!op->insert_data_done) { 82 continue_at(cl, bch_data_insert_start, op->wq); 83 return; 84 } 85 86 bch_keylist_free(&op->insert_keys); 87 closure_return(cl); 88 } 89 90 static int bch_keylist_realloc(struct keylist *l, unsigned int u64s, 91 struct cache_set *c) 92 { 93 size_t oldsize = bch_keylist_nkeys(l); 94 size_t newsize = oldsize + u64s; 95 96 /* 97 * The journalling code doesn't handle the case where the keys to insert 98 * is bigger than an empty write: If we just return -ENOMEM here, 99 * bch_data_insert_keys() will insert the keys created so far 100 * and finish the rest when the keylist is empty. 101 */ 102 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset)) 103 return -ENOMEM; 104 105 return __bch_keylist_realloc(l, u64s); 106 } 107 108 static void bch_data_invalidate(struct closure *cl) 109 { 110 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 111 struct bio *bio = op->bio; 112 113 pr_debug("invalidating %i sectors from %llu", 114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); 115 116 while (bio_sectors(bio)) { 117 unsigned int sectors = min(bio_sectors(bio), 118 1U << (KEY_SIZE_BITS - 1)); 119 120 if (bch_keylist_realloc(&op->insert_keys, 2, op->c)) 121 goto out; 122 123 bio->bi_iter.bi_sector += sectors; 124 bio->bi_iter.bi_size -= sectors << 9; 125 126 bch_keylist_add(&op->insert_keys, 127 &KEY(op->inode, 128 bio->bi_iter.bi_sector, 129 sectors)); 130 } 131 132 op->insert_data_done = true; 133 /* get in bch_data_insert() */ 134 bio_put(bio); 135 out: 136 continue_at(cl, bch_data_insert_keys, op->wq); 137 } 138 139 static void bch_data_insert_error(struct closure *cl) 140 { 141 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 142 143 /* 144 * Our data write just errored, which means we've got a bunch of keys to 145 * insert that point to data that wasn't successfully written. 146 * 147 * We don't have to insert those keys but we still have to invalidate 148 * that region of the cache - so, if we just strip off all the pointers 149 * from the keys we'll accomplish just that. 150 */ 151 152 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys; 153 154 while (src != op->insert_keys.top) { 155 struct bkey *n = bkey_next(src); 156 157 SET_KEY_PTRS(src, 0); 158 memmove(dst, src, bkey_bytes(src)); 159 160 dst = bkey_next(dst); 161 src = n; 162 } 163 164 op->insert_keys.top = dst; 165 166 bch_data_insert_keys(cl); 167 } 168 169 static void bch_data_insert_endio(struct bio *bio) 170 { 171 struct closure *cl = bio->bi_private; 172 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 173 174 if (bio->bi_status) { 175 /* TODO: We could try to recover from this. */ 176 if (op->writeback) 177 op->status = bio->bi_status; 178 else if (!op->replace) 179 set_closure_fn(cl, bch_data_insert_error, op->wq); 180 else 181 set_closure_fn(cl, NULL, NULL); 182 } 183 184 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache"); 185 } 186 187 static void bch_data_insert_start(struct closure *cl) 188 { 189 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 190 struct bio *bio = op->bio, *n; 191 192 if (op->bypass) 193 return bch_data_invalidate(cl); 194 195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) 196 wake_up_gc(op->c); 197 198 /* 199 * Journal writes are marked REQ_PREFLUSH; if the original write was a 200 * flush, it'll wait on the journal write. 201 */ 202 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA); 203 204 do { 205 unsigned int i; 206 struct bkey *k; 207 struct bio_set *split = &op->c->bio_split; 208 209 /* 1 for the device pointer and 1 for the chksum */ 210 if (bch_keylist_realloc(&op->insert_keys, 211 3 + (op->csum ? 1 : 0), 212 op->c)) { 213 continue_at(cl, bch_data_insert_keys, op->wq); 214 return; 215 } 216 217 k = op->insert_keys.top; 218 bkey_init(k); 219 SET_KEY_INODE(k, op->inode); 220 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); 221 222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 223 op->write_point, op->write_prio, 224 op->writeback)) 225 goto err; 226 227 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); 228 229 n->bi_end_io = bch_data_insert_endio; 230 n->bi_private = cl; 231 232 if (op->writeback) { 233 SET_KEY_DIRTY(k, true); 234 235 for (i = 0; i < KEY_PTRS(k); i++) 236 SET_GC_MARK(PTR_BUCKET(op->c, k, i), 237 GC_MARK_DIRTY); 238 } 239 240 SET_KEY_CSUM(k, op->csum); 241 if (KEY_CSUM(k)) 242 bio_csum(n, k); 243 244 trace_bcache_cache_insert(k); 245 bch_keylist_push(&op->insert_keys); 246 247 bio_set_op_attrs(n, REQ_OP_WRITE, 0); 248 bch_submit_bbio(n, op->c, k, 0); 249 } while (n != bio); 250 251 op->insert_data_done = true; 252 continue_at(cl, bch_data_insert_keys, op->wq); 253 return; 254 err: 255 /* bch_alloc_sectors() blocks if s->writeback = true */ 256 BUG_ON(op->writeback); 257 258 /* 259 * But if it's not a writeback write we'd rather just bail out if 260 * there aren't any buckets ready to write to - it might take awhile and 261 * we might be starving btree writes for gc or something. 262 */ 263 264 if (!op->replace) { 265 /* 266 * Writethrough write: We can't complete the write until we've 267 * updated the index. But we don't want to delay the write while 268 * we wait for buckets to be freed up, so just invalidate the 269 * rest of the write. 270 */ 271 op->bypass = true; 272 return bch_data_invalidate(cl); 273 } else { 274 /* 275 * From a cache miss, we can just insert the keys for the data 276 * we have written or bail out if we didn't do anything. 277 */ 278 op->insert_data_done = true; 279 bio_put(bio); 280 281 if (!bch_keylist_empty(&op->insert_keys)) 282 continue_at(cl, bch_data_insert_keys, op->wq); 283 else 284 closure_return(cl); 285 } 286 } 287 288 /** 289 * bch_data_insert - stick some data in the cache 290 * @cl: closure pointer. 291 * 292 * This is the starting point for any data to end up in a cache device; it could 293 * be from a normal write, or a writeback write, or a write to a flash only 294 * volume - it's also used by the moving garbage collector to compact data in 295 * mostly empty buckets. 296 * 297 * It first writes the data to the cache, creating a list of keys to be inserted 298 * (if the data had to be fragmented there will be multiple keys); after the 299 * data is written it calls bch_journal, and after the keys have been added to 300 * the next journal write they're inserted into the btree. 301 * 302 * It inserts the data in op->bio; bi_sector is used for the key offset, 303 * and op->inode is used for the key inode. 304 * 305 * If op->bypass is true, instead of inserting the data it invalidates the 306 * region of the cache represented by op->bio and op->inode. 307 */ 308 void bch_data_insert(struct closure *cl) 309 { 310 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); 311 312 trace_bcache_write(op->c, op->inode, op->bio, 313 op->writeback, op->bypass); 314 315 bch_keylist_init(&op->insert_keys); 316 bio_get(op->bio); 317 bch_data_insert_start(cl); 318 } 319 320 /* 321 * Congested? Return 0 (not congested) or the limit (in sectors) 322 * beyond which we should bypass the cache due to congestion. 323 */ 324 unsigned int bch_get_congested(const struct cache_set *c) 325 { 326 int i; 327 328 if (!c->congested_read_threshold_us && 329 !c->congested_write_threshold_us) 330 return 0; 331 332 i = (local_clock_us() - c->congested_last_us) / 1024; 333 if (i < 0) 334 return 0; 335 336 i += atomic_read(&c->congested); 337 if (i >= 0) 338 return 0; 339 340 i += CONGESTED_MAX; 341 342 if (i > 0) 343 i = fract_exp_two(i, 6); 344 345 i -= hweight32(get_random_u32()); 346 347 return i > 0 ? i : 1; 348 } 349 350 static void add_sequential(struct task_struct *t) 351 { 352 ewma_add(t->sequential_io_avg, 353 t->sequential_io, 8, 0); 354 355 t->sequential_io = 0; 356 } 357 358 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k) 359 { 360 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)]; 361 } 362 363 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) 364 { 365 struct cache_set *c = dc->disk.c; 366 unsigned int mode = cache_mode(dc); 367 unsigned int sectors, congested; 368 struct task_struct *task = current; 369 struct io *i; 370 371 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) || 372 c->gc_stats.in_use > CUTOFF_CACHE_ADD || 373 (bio_op(bio) == REQ_OP_DISCARD)) 374 goto skip; 375 376 if (mode == CACHE_MODE_NONE || 377 (mode == CACHE_MODE_WRITEAROUND && 378 op_is_write(bio_op(bio)))) 379 goto skip; 380 381 /* 382 * Flag for bypass if the IO is for read-ahead or background, 383 * unless the read-ahead request is for metadata 384 * (eg, for gfs2 or xfs). 385 */ 386 if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) && 387 !(bio->bi_opf & (REQ_META|REQ_PRIO))) 388 goto skip; 389 390 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || 391 bio_sectors(bio) & (c->sb.block_size - 1)) { 392 pr_debug("skipping unaligned io"); 393 goto skip; 394 } 395 396 if (bypass_torture_test(dc)) { 397 if ((get_random_int() & 3) == 3) 398 goto skip; 399 else 400 goto rescale; 401 } 402 403 congested = bch_get_congested(c); 404 if (!congested && !dc->sequential_cutoff) 405 goto rescale; 406 407 spin_lock(&dc->io_lock); 408 409 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) 410 if (i->last == bio->bi_iter.bi_sector && 411 time_before(jiffies, i->jiffies)) 412 goto found; 413 414 i = list_first_entry(&dc->io_lru, struct io, lru); 415 416 add_sequential(task); 417 i->sequential = 0; 418 found: 419 if (i->sequential + bio->bi_iter.bi_size > i->sequential) 420 i->sequential += bio->bi_iter.bi_size; 421 422 i->last = bio_end_sector(bio); 423 i->jiffies = jiffies + msecs_to_jiffies(5000); 424 task->sequential_io = i->sequential; 425 426 hlist_del(&i->hash); 427 hlist_add_head(&i->hash, iohash(dc, i->last)); 428 list_move_tail(&i->lru, &dc->io_lru); 429 430 spin_unlock(&dc->io_lock); 431 432 sectors = max(task->sequential_io, 433 task->sequential_io_avg) >> 9; 434 435 if (dc->sequential_cutoff && 436 sectors >= dc->sequential_cutoff >> 9) { 437 trace_bcache_bypass_sequential(bio); 438 goto skip; 439 } 440 441 if (congested && sectors >= congested) { 442 trace_bcache_bypass_congested(bio); 443 goto skip; 444 } 445 446 rescale: 447 bch_rescale_priorities(c, bio_sectors(bio)); 448 return false; 449 skip: 450 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); 451 return true; 452 } 453 454 /* Cache lookup */ 455 456 struct search { 457 /* Stack frame for bio_complete */ 458 struct closure cl; 459 460 struct bbio bio; 461 struct bio *orig_bio; 462 struct bio *cache_miss; 463 struct bcache_device *d; 464 465 unsigned int insert_bio_sectors; 466 unsigned int recoverable:1; 467 unsigned int write:1; 468 unsigned int read_dirty_data:1; 469 unsigned int cache_missed:1; 470 471 unsigned long start_time; 472 473 struct btree_op op; 474 struct data_insert_op iop; 475 }; 476 477 static void bch_cache_read_endio(struct bio *bio) 478 { 479 struct bbio *b = container_of(bio, struct bbio, bio); 480 struct closure *cl = bio->bi_private; 481 struct search *s = container_of(cl, struct search, cl); 482 483 /* 484 * If the bucket was reused while our bio was in flight, we might have 485 * read the wrong data. Set s->error but not error so it doesn't get 486 * counted against the cache device, but we'll still reread the data 487 * from the backing device. 488 */ 489 490 if (bio->bi_status) 491 s->iop.status = bio->bi_status; 492 else if (!KEY_DIRTY(&b->key) && 493 ptr_stale(s->iop.c, &b->key, 0)) { 494 atomic_long_inc(&s->iop.c->cache_read_races); 495 s->iop.status = BLK_STS_IOERR; 496 } 497 498 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache"); 499 } 500 501 /* 502 * Read from a single key, handling the initial cache miss if the key starts in 503 * the middle of the bio 504 */ 505 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) 506 { 507 struct search *s = container_of(op, struct search, op); 508 struct bio *n, *bio = &s->bio.bio; 509 struct bkey *bio_key; 510 unsigned int ptr; 511 512 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) 513 return MAP_CONTINUE; 514 515 if (KEY_INODE(k) != s->iop.inode || 516 KEY_START(k) > bio->bi_iter.bi_sector) { 517 unsigned int bio_sectors = bio_sectors(bio); 518 unsigned int sectors = KEY_INODE(k) == s->iop.inode 519 ? min_t(uint64_t, INT_MAX, 520 KEY_START(k) - bio->bi_iter.bi_sector) 521 : INT_MAX; 522 int ret = s->d->cache_miss(b, s, bio, sectors); 523 524 if (ret != MAP_CONTINUE) 525 return ret; 526 527 /* if this was a complete miss we shouldn't get here */ 528 BUG_ON(bio_sectors <= sectors); 529 } 530 531 if (!KEY_SIZE(k)) 532 return MAP_CONTINUE; 533 534 /* XXX: figure out best pointer - for multiple cache devices */ 535 ptr = 0; 536 537 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO; 538 539 if (KEY_DIRTY(k)) 540 s->read_dirty_data = true; 541 542 n = bio_next_split(bio, min_t(uint64_t, INT_MAX, 543 KEY_OFFSET(k) - bio->bi_iter.bi_sector), 544 GFP_NOIO, &s->d->bio_split); 545 546 bio_key = &container_of(n, struct bbio, bio)->key; 547 bch_bkey_copy_single_ptr(bio_key, k, ptr); 548 549 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); 550 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 551 552 n->bi_end_io = bch_cache_read_endio; 553 n->bi_private = &s->cl; 554 555 /* 556 * The bucket we're reading from might be reused while our bio 557 * is in flight, and we could then end up reading the wrong 558 * data. 559 * 560 * We guard against this by checking (in cache_read_endio()) if 561 * the pointer is stale again; if so, we treat it as an error 562 * and reread from the backing device (but we don't pass that 563 * error up anywhere). 564 */ 565 566 __bch_submit_bbio(n, b->c); 567 return n == bio ? MAP_DONE : MAP_CONTINUE; 568 } 569 570 static void cache_lookup(struct closure *cl) 571 { 572 struct search *s = container_of(cl, struct search, iop.cl); 573 struct bio *bio = &s->bio.bio; 574 struct cached_dev *dc; 575 int ret; 576 577 bch_btree_op_init(&s->op, -1); 578 579 ret = bch_btree_map_keys(&s->op, s->iop.c, 580 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), 581 cache_lookup_fn, MAP_END_KEY); 582 if (ret == -EAGAIN) { 583 continue_at(cl, cache_lookup, bcache_wq); 584 return; 585 } 586 587 /* 588 * We might meet err when searching the btree, If that happens, we will 589 * get negative ret, in this scenario we should not recover data from 590 * backing device (when cache device is dirty) because we don't know 591 * whether bkeys the read request covered are all clean. 592 * 593 * And after that happened, s->iop.status is still its initial value 594 * before we submit s->bio.bio 595 */ 596 if (ret < 0) { 597 BUG_ON(ret == -EINTR); 598 if (s->d && s->d->c && 599 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) { 600 dc = container_of(s->d, struct cached_dev, disk); 601 if (dc && atomic_read(&dc->has_dirty)) 602 s->recoverable = false; 603 } 604 if (!s->iop.status) 605 s->iop.status = BLK_STS_IOERR; 606 } 607 608 closure_return(cl); 609 } 610 611 /* Common code for the make_request functions */ 612 613 static void request_endio(struct bio *bio) 614 { 615 struct closure *cl = bio->bi_private; 616 617 if (bio->bi_status) { 618 struct search *s = container_of(cl, struct search, cl); 619 620 s->iop.status = bio->bi_status; 621 /* Only cache read errors are recoverable */ 622 s->recoverable = false; 623 } 624 625 bio_put(bio); 626 closure_put(cl); 627 } 628 629 static void backing_request_endio(struct bio *bio) 630 { 631 struct closure *cl = bio->bi_private; 632 633 if (bio->bi_status) { 634 struct search *s = container_of(cl, struct search, cl); 635 struct cached_dev *dc = container_of(s->d, 636 struct cached_dev, disk); 637 /* 638 * If a bio has REQ_PREFLUSH for writeback mode, it is 639 * speically assembled in cached_dev_write() for a non-zero 640 * write request which has REQ_PREFLUSH. we don't set 641 * s->iop.status by this failure, the status will be decided 642 * by result of bch_data_insert() operation. 643 */ 644 if (unlikely(s->iop.writeback && 645 bio->bi_opf & REQ_PREFLUSH)) { 646 pr_err("Can't flush %s: returned bi_status %i", 647 dc->backing_dev_name, bio->bi_status); 648 } else { 649 /* set to orig_bio->bi_status in bio_complete() */ 650 s->iop.status = bio->bi_status; 651 } 652 s->recoverable = false; 653 /* should count I/O error for backing device here */ 654 bch_count_backing_io_errors(dc, bio); 655 } 656 657 bio_put(bio); 658 closure_put(cl); 659 } 660 661 static void bio_complete(struct search *s) 662 { 663 if (s->orig_bio) { 664 generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio), 665 &s->d->disk->part0, s->start_time); 666 667 trace_bcache_request_end(s->d, s->orig_bio); 668 s->orig_bio->bi_status = s->iop.status; 669 bio_endio(s->orig_bio); 670 s->orig_bio = NULL; 671 } 672 } 673 674 static void do_bio_hook(struct search *s, 675 struct bio *orig_bio, 676 bio_end_io_t *end_io_fn) 677 { 678 struct bio *bio = &s->bio.bio; 679 680 bio_init(bio, NULL, 0); 681 __bio_clone_fast(bio, orig_bio); 682 /* 683 * bi_end_io can be set separately somewhere else, e.g. the 684 * variants in, 685 * - cache_bio->bi_end_io from cached_dev_cache_miss() 686 * - n->bi_end_io from cache_lookup_fn() 687 */ 688 bio->bi_end_io = end_io_fn; 689 bio->bi_private = &s->cl; 690 691 bio_cnt_set(bio, 3); 692 } 693 694 static void search_free(struct closure *cl) 695 { 696 struct search *s = container_of(cl, struct search, cl); 697 698 atomic_dec(&s->iop.c->search_inflight); 699 700 if (s->iop.bio) 701 bio_put(s->iop.bio); 702 703 bio_complete(s); 704 closure_debug_destroy(cl); 705 mempool_free(s, &s->iop.c->search); 706 } 707 708 static inline struct search *search_alloc(struct bio *bio, 709 struct bcache_device *d) 710 { 711 struct search *s; 712 713 s = mempool_alloc(&d->c->search, GFP_NOIO); 714 715 closure_init(&s->cl, NULL); 716 do_bio_hook(s, bio, request_endio); 717 atomic_inc(&d->c->search_inflight); 718 719 s->orig_bio = bio; 720 s->cache_miss = NULL; 721 s->cache_missed = 0; 722 s->d = d; 723 s->recoverable = 1; 724 s->write = op_is_write(bio_op(bio)); 725 s->read_dirty_data = 0; 726 s->start_time = jiffies; 727 728 s->iop.c = d->c; 729 s->iop.bio = NULL; 730 s->iop.inode = d->id; 731 s->iop.write_point = hash_long((unsigned long) current, 16); 732 s->iop.write_prio = 0; 733 s->iop.status = 0; 734 s->iop.flags = 0; 735 s->iop.flush_journal = op_is_flush(bio->bi_opf); 736 s->iop.wq = bcache_wq; 737 738 return s; 739 } 740 741 /* Cached devices */ 742 743 static void cached_dev_bio_complete(struct closure *cl) 744 { 745 struct search *s = container_of(cl, struct search, cl); 746 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 747 748 cached_dev_put(dc); 749 search_free(cl); 750 } 751 752 /* Process reads */ 753 754 static void cached_dev_read_error_done(struct closure *cl) 755 { 756 struct search *s = container_of(cl, struct search, cl); 757 758 if (s->iop.replace_collision) 759 bch_mark_cache_miss_collision(s->iop.c, s->d); 760 761 if (s->iop.bio) 762 bio_free_pages(s->iop.bio); 763 764 cached_dev_bio_complete(cl); 765 } 766 767 static void cached_dev_read_error(struct closure *cl) 768 { 769 struct search *s = container_of(cl, struct search, cl); 770 struct bio *bio = &s->bio.bio; 771 772 /* 773 * If read request hit dirty data (s->read_dirty_data is true), 774 * then recovery a failed read request from cached device may 775 * get a stale data back. So read failure recovery is only 776 * permitted when read request hit clean data in cache device, 777 * or when cache read race happened. 778 */ 779 if (s->recoverable && !s->read_dirty_data) { 780 /* Retry from the backing device: */ 781 trace_bcache_read_retry(s->orig_bio); 782 783 s->iop.status = 0; 784 do_bio_hook(s, s->orig_bio, backing_request_endio); 785 786 /* XXX: invalidate cache */ 787 788 /* I/O request sent to backing device */ 789 closure_bio_submit(s->iop.c, bio, cl); 790 } 791 792 continue_at(cl, cached_dev_read_error_done, NULL); 793 } 794 795 static void cached_dev_cache_miss_done(struct closure *cl) 796 { 797 struct search *s = container_of(cl, struct search, cl); 798 struct bcache_device *d = s->d; 799 800 if (s->iop.replace_collision) 801 bch_mark_cache_miss_collision(s->iop.c, s->d); 802 803 if (s->iop.bio) 804 bio_free_pages(s->iop.bio); 805 806 cached_dev_bio_complete(cl); 807 closure_put(&d->cl); 808 } 809 810 static void cached_dev_read_done(struct closure *cl) 811 { 812 struct search *s = container_of(cl, struct search, cl); 813 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 814 815 /* 816 * We had a cache miss; cache_bio now contains data ready to be inserted 817 * into the cache. 818 * 819 * First, we copy the data we just read from cache_bio's bounce buffers 820 * to the buffers the original bio pointed to: 821 */ 822 823 if (s->iop.bio) { 824 bio_reset(s->iop.bio); 825 s->iop.bio->bi_iter.bi_sector = 826 s->cache_miss->bi_iter.bi_sector; 827 bio_copy_dev(s->iop.bio, s->cache_miss); 828 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 829 bch_bio_map(s->iop.bio, NULL); 830 831 bio_copy_data(s->cache_miss, s->iop.bio); 832 833 bio_put(s->cache_miss); 834 s->cache_miss = NULL; 835 } 836 837 if (verify(dc) && s->recoverable && !s->read_dirty_data) 838 bch_data_verify(dc, s->orig_bio); 839 840 closure_get(&dc->disk.cl); 841 bio_complete(s); 842 843 if (s->iop.bio && 844 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) { 845 BUG_ON(!s->iop.replace); 846 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 847 } 848 849 continue_at(cl, cached_dev_cache_miss_done, NULL); 850 } 851 852 static void cached_dev_read_done_bh(struct closure *cl) 853 { 854 struct search *s = container_of(cl, struct search, cl); 855 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 856 857 bch_mark_cache_accounting(s->iop.c, s->d, 858 !s->cache_missed, s->iop.bypass); 859 trace_bcache_read(s->orig_bio, !s->cache_missed, s->iop.bypass); 860 861 if (s->iop.status) 862 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq); 863 else if (s->iop.bio || verify(dc)) 864 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq); 865 else 866 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL); 867 } 868 869 static int cached_dev_cache_miss(struct btree *b, struct search *s, 870 struct bio *bio, unsigned int sectors) 871 { 872 int ret = MAP_CONTINUE; 873 unsigned int reada = 0; 874 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 875 struct bio *miss, *cache_bio; 876 877 s->cache_missed = 1; 878 879 if (s->cache_miss || s->iop.bypass) { 880 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); 881 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 882 goto out_submit; 883 } 884 885 if (!(bio->bi_opf & REQ_RAHEAD) && 886 !(bio->bi_opf & (REQ_META|REQ_PRIO)) && 887 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) 888 reada = min_t(sector_t, dc->readahead >> 9, 889 get_capacity(bio->bi_disk) - bio_end_sector(bio)); 890 891 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 892 893 s->iop.replace_key = KEY(s->iop.inode, 894 bio->bi_iter.bi_sector + s->insert_bio_sectors, 895 s->insert_bio_sectors); 896 897 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 898 if (ret) 899 return ret; 900 901 s->iop.replace = true; 902 903 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split); 904 905 /* btree_search_recurse()'s btree iterator is no good anymore */ 906 ret = miss == bio ? MAP_DONE : -EINTR; 907 908 cache_bio = bio_alloc_bioset(GFP_NOWAIT, 909 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS), 910 &dc->disk.bio_split); 911 if (!cache_bio) 912 goto out_submit; 913 914 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; 915 bio_copy_dev(cache_bio, miss); 916 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; 917 918 cache_bio->bi_end_io = backing_request_endio; 919 cache_bio->bi_private = &s->cl; 920 921 bch_bio_map(cache_bio, NULL); 922 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO)) 923 goto out_put; 924 925 if (reada) 926 bch_mark_cache_readahead(s->iop.c, s->d); 927 928 s->cache_miss = miss; 929 s->iop.bio = cache_bio; 930 bio_get(cache_bio); 931 /* I/O request sent to backing device */ 932 closure_bio_submit(s->iop.c, cache_bio, &s->cl); 933 934 return ret; 935 out_put: 936 bio_put(cache_bio); 937 out_submit: 938 miss->bi_end_io = backing_request_endio; 939 miss->bi_private = &s->cl; 940 /* I/O request sent to backing device */ 941 closure_bio_submit(s->iop.c, miss, &s->cl); 942 return ret; 943 } 944 945 static void cached_dev_read(struct cached_dev *dc, struct search *s) 946 { 947 struct closure *cl = &s->cl; 948 949 closure_call(&s->iop.cl, cache_lookup, NULL, cl); 950 continue_at(cl, cached_dev_read_done_bh, NULL); 951 } 952 953 /* Process writes */ 954 955 static void cached_dev_write_complete(struct closure *cl) 956 { 957 struct search *s = container_of(cl, struct search, cl); 958 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk); 959 960 up_read_non_owner(&dc->writeback_lock); 961 cached_dev_bio_complete(cl); 962 } 963 964 static void cached_dev_write(struct cached_dev *dc, struct search *s) 965 { 966 struct closure *cl = &s->cl; 967 struct bio *bio = &s->bio.bio; 968 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); 969 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 970 971 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 972 973 down_read_non_owner(&dc->writeback_lock); 974 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) { 975 /* 976 * We overlap with some dirty data undergoing background 977 * writeback, force this write to writeback 978 */ 979 s->iop.bypass = false; 980 s->iop.writeback = true; 981 } 982 983 /* 984 * Discards aren't _required_ to do anything, so skipping if 985 * check_overlapping returned true is ok 986 * 987 * But check_overlapping drops dirty keys for which io hasn't started, 988 * so we still want to call it. 989 */ 990 if (bio_op(bio) == REQ_OP_DISCARD) 991 s->iop.bypass = true; 992 993 if (should_writeback(dc, s->orig_bio, 994 cache_mode(dc), 995 s->iop.bypass)) { 996 s->iop.bypass = false; 997 s->iop.writeback = true; 998 } 999 1000 if (s->iop.bypass) { 1001 s->iop.bio = s->orig_bio; 1002 bio_get(s->iop.bio); 1003 1004 if (bio_op(bio) == REQ_OP_DISCARD && 1005 !blk_queue_discard(bdev_get_queue(dc->bdev))) 1006 goto insert_data; 1007 1008 /* I/O request sent to backing device */ 1009 bio->bi_end_io = backing_request_endio; 1010 closure_bio_submit(s->iop.c, bio, cl); 1011 1012 } else if (s->iop.writeback) { 1013 bch_writeback_add(dc); 1014 s->iop.bio = bio; 1015 1016 if (bio->bi_opf & REQ_PREFLUSH) { 1017 /* 1018 * Also need to send a flush to the backing 1019 * device. 1020 */ 1021 struct bio *flush; 1022 1023 flush = bio_alloc_bioset(GFP_NOIO, 0, 1024 &dc->disk.bio_split); 1025 if (!flush) { 1026 s->iop.status = BLK_STS_RESOURCE; 1027 goto insert_data; 1028 } 1029 bio_copy_dev(flush, bio); 1030 flush->bi_end_io = backing_request_endio; 1031 flush->bi_private = cl; 1032 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; 1033 /* I/O request sent to backing device */ 1034 closure_bio_submit(s->iop.c, flush, cl); 1035 } 1036 } else { 1037 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split); 1038 /* I/O request sent to backing device */ 1039 bio->bi_end_io = backing_request_endio; 1040 closure_bio_submit(s->iop.c, bio, cl); 1041 } 1042 1043 insert_data: 1044 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 1045 continue_at(cl, cached_dev_write_complete, NULL); 1046 } 1047 1048 static void cached_dev_nodata(struct closure *cl) 1049 { 1050 struct search *s = container_of(cl, struct search, cl); 1051 struct bio *bio = &s->bio.bio; 1052 1053 if (s->iop.flush_journal) 1054 bch_journal_meta(s->iop.c, cl); 1055 1056 /* If it's a flush, we send the flush to the backing device too */ 1057 bio->bi_end_io = backing_request_endio; 1058 closure_bio_submit(s->iop.c, bio, cl); 1059 1060 continue_at(cl, cached_dev_bio_complete, NULL); 1061 } 1062 1063 struct detached_dev_io_private { 1064 struct bcache_device *d; 1065 unsigned long start_time; 1066 bio_end_io_t *bi_end_io; 1067 void *bi_private; 1068 }; 1069 1070 static void detached_dev_end_io(struct bio *bio) 1071 { 1072 struct detached_dev_io_private *ddip; 1073 1074 ddip = bio->bi_private; 1075 bio->bi_end_io = ddip->bi_end_io; 1076 bio->bi_private = ddip->bi_private; 1077 1078 generic_end_io_acct(ddip->d->disk->queue, bio_op(bio), 1079 &ddip->d->disk->part0, ddip->start_time); 1080 1081 if (bio->bi_status) { 1082 struct cached_dev *dc = container_of(ddip->d, 1083 struct cached_dev, disk); 1084 /* should count I/O error for backing device here */ 1085 bch_count_backing_io_errors(dc, bio); 1086 } 1087 1088 kfree(ddip); 1089 bio->bi_end_io(bio); 1090 } 1091 1092 static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) 1093 { 1094 struct detached_dev_io_private *ddip; 1095 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1096 1097 /* 1098 * no need to call closure_get(&dc->disk.cl), 1099 * because upper layer had already opened bcache device, 1100 * which would call closure_get(&dc->disk.cl) 1101 */ 1102 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); 1103 ddip->d = d; 1104 ddip->start_time = jiffies; 1105 ddip->bi_end_io = bio->bi_end_io; 1106 ddip->bi_private = bio->bi_private; 1107 bio->bi_end_io = detached_dev_end_io; 1108 bio->bi_private = ddip; 1109 1110 if ((bio_op(bio) == REQ_OP_DISCARD) && 1111 !blk_queue_discard(bdev_get_queue(dc->bdev))) 1112 bio->bi_end_io(bio); 1113 else 1114 generic_make_request(bio); 1115 } 1116 1117 static void quit_max_writeback_rate(struct cache_set *c, 1118 struct cached_dev *this_dc) 1119 { 1120 int i; 1121 struct bcache_device *d; 1122 struct cached_dev *dc; 1123 1124 /* 1125 * mutex bch_register_lock may compete with other parallel requesters, 1126 * or attach/detach operations on other backing device. Waiting to 1127 * the mutex lock may increase I/O request latency for seconds or more. 1128 * To avoid such situation, if mutext_trylock() failed, only writeback 1129 * rate of current cached device is set to 1, and __update_write_back() 1130 * will decide writeback rate of other cached devices (remember now 1131 * c->idle_counter is 0 already). 1132 */ 1133 if (mutex_trylock(&bch_register_lock)) { 1134 for (i = 0; i < c->devices_max_used; i++) { 1135 if (!c->devices[i]) 1136 continue; 1137 1138 if (UUID_FLASH_ONLY(&c->uuids[i])) 1139 continue; 1140 1141 d = c->devices[i]; 1142 dc = container_of(d, struct cached_dev, disk); 1143 /* 1144 * set writeback rate to default minimum value, 1145 * then let update_writeback_rate() to decide the 1146 * upcoming rate. 1147 */ 1148 atomic_long_set(&dc->writeback_rate.rate, 1); 1149 } 1150 mutex_unlock(&bch_register_lock); 1151 } else 1152 atomic_long_set(&this_dc->writeback_rate.rate, 1); 1153 } 1154 1155 /* Cached devices - read & write stuff */ 1156 1157 static blk_qc_t cached_dev_make_request(struct request_queue *q, 1158 struct bio *bio) 1159 { 1160 struct search *s; 1161 struct bcache_device *d = bio->bi_disk->private_data; 1162 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1163 int rw = bio_data_dir(bio); 1164 1165 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) || 1166 dc->io_disable)) { 1167 bio->bi_status = BLK_STS_IOERR; 1168 bio_endio(bio); 1169 return BLK_QC_T_NONE; 1170 } 1171 1172 if (likely(d->c)) { 1173 if (atomic_read(&d->c->idle_counter)) 1174 atomic_set(&d->c->idle_counter, 0); 1175 /* 1176 * If at_max_writeback_rate of cache set is true and new I/O 1177 * comes, quit max writeback rate of all cached devices 1178 * attached to this cache set, and set at_max_writeback_rate 1179 * to false. 1180 */ 1181 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) { 1182 atomic_set(&d->c->at_max_writeback_rate, 0); 1183 quit_max_writeback_rate(d->c, dc); 1184 } 1185 } 1186 1187 generic_start_io_acct(q, 1188 bio_op(bio), 1189 bio_sectors(bio), 1190 &d->disk->part0); 1191 1192 bio_set_dev(bio, dc->bdev); 1193 bio->bi_iter.bi_sector += dc->sb.data_offset; 1194 1195 if (cached_dev_get(dc)) { 1196 s = search_alloc(bio, d); 1197 trace_bcache_request_start(s->d, bio); 1198 1199 if (!bio->bi_iter.bi_size) { 1200 /* 1201 * can't call bch_journal_meta from under 1202 * generic_make_request 1203 */ 1204 continue_at_nobarrier(&s->cl, 1205 cached_dev_nodata, 1206 bcache_wq); 1207 } else { 1208 s->iop.bypass = check_should_bypass(dc, bio); 1209 1210 if (rw) 1211 cached_dev_write(dc, s); 1212 else 1213 cached_dev_read(dc, s); 1214 } 1215 } else 1216 /* I/O request sent to backing device */ 1217 detached_dev_do_request(d, bio); 1218 1219 return BLK_QC_T_NONE; 1220 } 1221 1222 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode, 1223 unsigned int cmd, unsigned long arg) 1224 { 1225 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1226 1227 if (dc->io_disable) 1228 return -EIO; 1229 1230 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); 1231 } 1232 1233 static int cached_dev_congested(void *data, int bits) 1234 { 1235 struct bcache_device *d = data; 1236 struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1237 struct request_queue *q = bdev_get_queue(dc->bdev); 1238 int ret = 0; 1239 1240 if (bdi_congested(q->backing_dev_info, bits)) 1241 return 1; 1242 1243 if (cached_dev_get(dc)) { 1244 unsigned int i; 1245 struct cache *ca; 1246 1247 for_each_cache(ca, d->c, i) { 1248 q = bdev_get_queue(ca->bdev); 1249 ret |= bdi_congested(q->backing_dev_info, bits); 1250 } 1251 1252 cached_dev_put(dc); 1253 } 1254 1255 return ret; 1256 } 1257 1258 void bch_cached_dev_request_init(struct cached_dev *dc) 1259 { 1260 struct gendisk *g = dc->disk.disk; 1261 1262 g->queue->make_request_fn = cached_dev_make_request; 1263 g->queue->backing_dev_info->congested_fn = cached_dev_congested; 1264 dc->disk.cache_miss = cached_dev_cache_miss; 1265 dc->disk.ioctl = cached_dev_ioctl; 1266 } 1267 1268 /* Flash backed devices */ 1269 1270 static int flash_dev_cache_miss(struct btree *b, struct search *s, 1271 struct bio *bio, unsigned int sectors) 1272 { 1273 unsigned int bytes = min(sectors, bio_sectors(bio)) << 9; 1274 1275 swap(bio->bi_iter.bi_size, bytes); 1276 zero_fill_bio(bio); 1277 swap(bio->bi_iter.bi_size, bytes); 1278 1279 bio_advance(bio, bytes); 1280 1281 if (!bio->bi_iter.bi_size) 1282 return MAP_DONE; 1283 1284 return MAP_CONTINUE; 1285 } 1286 1287 static void flash_dev_nodata(struct closure *cl) 1288 { 1289 struct search *s = container_of(cl, struct search, cl); 1290 1291 if (s->iop.flush_journal) 1292 bch_journal_meta(s->iop.c, cl); 1293 1294 continue_at(cl, search_free, NULL); 1295 } 1296 1297 static blk_qc_t flash_dev_make_request(struct request_queue *q, 1298 struct bio *bio) 1299 { 1300 struct search *s; 1301 struct closure *cl; 1302 struct bcache_device *d = bio->bi_disk->private_data; 1303 1304 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { 1305 bio->bi_status = BLK_STS_IOERR; 1306 bio_endio(bio); 1307 return BLK_QC_T_NONE; 1308 } 1309 1310 generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0); 1311 1312 s = search_alloc(bio, d); 1313 cl = &s->cl; 1314 bio = &s->bio.bio; 1315 1316 trace_bcache_request_start(s->d, bio); 1317 1318 if (!bio->bi_iter.bi_size) { 1319 /* 1320 * can't call bch_journal_meta from under 1321 * generic_make_request 1322 */ 1323 continue_at_nobarrier(&s->cl, 1324 flash_dev_nodata, 1325 bcache_wq); 1326 return BLK_QC_T_NONE; 1327 } else if (bio_data_dir(bio)) { 1328 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1329 &KEY(d->id, bio->bi_iter.bi_sector, 0), 1330 &KEY(d->id, bio_end_sector(bio), 0)); 1331 1332 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0; 1333 s->iop.writeback = true; 1334 s->iop.bio = bio; 1335 1336 closure_call(&s->iop.cl, bch_data_insert, NULL, cl); 1337 } else { 1338 closure_call(&s->iop.cl, cache_lookup, NULL, cl); 1339 } 1340 1341 continue_at(cl, search_free, NULL); 1342 return BLK_QC_T_NONE; 1343 } 1344 1345 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode, 1346 unsigned int cmd, unsigned long arg) 1347 { 1348 return -ENOTTY; 1349 } 1350 1351 static int flash_dev_congested(void *data, int bits) 1352 { 1353 struct bcache_device *d = data; 1354 struct request_queue *q; 1355 struct cache *ca; 1356 unsigned int i; 1357 int ret = 0; 1358 1359 for_each_cache(ca, d->c, i) { 1360 q = bdev_get_queue(ca->bdev); 1361 ret |= bdi_congested(q->backing_dev_info, bits); 1362 } 1363 1364 return ret; 1365 } 1366 1367 void bch_flash_dev_request_init(struct bcache_device *d) 1368 { 1369 struct gendisk *g = d->disk; 1370 1371 g->queue->make_request_fn = flash_dev_make_request; 1372 g->queue->backing_dev_info->congested_fn = flash_dev_congested; 1373 d->cache_miss = flash_dev_cache_miss; 1374 d->ioctl = flash_dev_ioctl; 1375 } 1376 1377 void bch_request_exit(void) 1378 { 1379 kmem_cache_destroy(bch_search_cache); 1380 } 1381 1382 int __init bch_request_init(void) 1383 { 1384 bch_search_cache = KMEM_CACHE(search, 0); 1385 if (!bch_search_cache) 1386 return -ENOMEM; 1387 1388 return 0; 1389 } 1390