1 /* 2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public Licens 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 16 * 17 */ 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/bio.h> 21 #include <linux/blkdev.h> 22 #include <linux/uio.h> 23 #include <linux/iocontext.h> 24 #include <linux/slab.h> 25 #include <linux/init.h> 26 #include <linux/kernel.h> 27 #include <linux/export.h> 28 #include <linux/mempool.h> 29 #include <linux/workqueue.h> 30 #include <linux/cgroup.h> 31 32 #include <trace/events/block.h> 33 34 /* 35 * Test patch to inline a certain number of bi_io_vec's inside the bio 36 * itself, to shrink a bio data allocation from two mempool calls to one 37 */ 38 #define BIO_INLINE_VECS 4 39 40 /* 41 * if you change this list, also change bvec_alloc or things will 42 * break badly! cannot be bigger than what you can fit into an 43 * unsigned short 44 */ 45 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 46 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { 47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 48 }; 49 #undef BV 50 51 /* 52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 53 * IO code that does not need private memory pools. 54 */ 55 struct bio_set *fs_bio_set; 56 EXPORT_SYMBOL(fs_bio_set); 57 58 /* 59 * Our slab pool management 60 */ 61 struct bio_slab { 62 struct kmem_cache *slab; 63 unsigned int slab_ref; 64 unsigned int slab_size; 65 char name[8]; 66 }; 67 static DEFINE_MUTEX(bio_slab_lock); 68 static struct bio_slab *bio_slabs; 69 static unsigned int bio_slab_nr, bio_slab_max; 70 71 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) 72 { 73 unsigned int sz = sizeof(struct bio) + extra_size; 74 struct kmem_cache *slab = NULL; 75 struct bio_slab *bslab, *new_bio_slabs; 76 unsigned int new_bio_slab_max; 77 unsigned int i, entry = -1; 78 79 mutex_lock(&bio_slab_lock); 80 81 i = 0; 82 while (i < bio_slab_nr) { 83 bslab = &bio_slabs[i]; 84 85 if (!bslab->slab && entry == -1) 86 entry = i; 87 else if (bslab->slab_size == sz) { 88 slab = bslab->slab; 89 bslab->slab_ref++; 90 break; 91 } 92 i++; 93 } 94 95 if (slab) 96 goto out_unlock; 97 98 if (bio_slab_nr == bio_slab_max && entry == -1) { 99 new_bio_slab_max = bio_slab_max << 1; 100 new_bio_slabs = krealloc(bio_slabs, 101 new_bio_slab_max * sizeof(struct bio_slab), 102 GFP_KERNEL); 103 if (!new_bio_slabs) 104 goto out_unlock; 105 bio_slab_max = new_bio_slab_max; 106 bio_slabs = new_bio_slabs; 107 } 108 if (entry == -1) 109 entry = bio_slab_nr++; 110 111 bslab = &bio_slabs[entry]; 112 113 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, 115 SLAB_HWCACHE_ALIGN, NULL); 116 if (!slab) 117 goto out_unlock; 118 119 bslab->slab = slab; 120 bslab->slab_ref = 1; 121 bslab->slab_size = sz; 122 out_unlock: 123 mutex_unlock(&bio_slab_lock); 124 return slab; 125 } 126 127 static void bio_put_slab(struct bio_set *bs) 128 { 129 struct bio_slab *bslab = NULL; 130 unsigned int i; 131 132 mutex_lock(&bio_slab_lock); 133 134 for (i = 0; i < bio_slab_nr; i++) { 135 if (bs->bio_slab == bio_slabs[i].slab) { 136 bslab = &bio_slabs[i]; 137 break; 138 } 139 } 140 141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 142 goto out; 143 144 WARN_ON(!bslab->slab_ref); 145 146 if (--bslab->slab_ref) 147 goto out; 148 149 kmem_cache_destroy(bslab->slab); 150 bslab->slab = NULL; 151 152 out: 153 mutex_unlock(&bio_slab_lock); 154 } 155 156 unsigned int bvec_nr_vecs(unsigned short idx) 157 { 158 return bvec_slabs[idx].nr_vecs; 159 } 160 161 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) 162 { 163 if (!idx) 164 return; 165 idx--; 166 167 BIO_BUG_ON(idx >= BVEC_POOL_NR); 168 169 if (idx == BVEC_POOL_MAX) { 170 mempool_free(bv, pool); 171 } else { 172 struct biovec_slab *bvs = bvec_slabs + idx; 173 174 kmem_cache_free(bvs->slab, bv); 175 } 176 } 177 178 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, 179 mempool_t *pool) 180 { 181 struct bio_vec *bvl; 182 183 /* 184 * see comment near bvec_array define! 185 */ 186 switch (nr) { 187 case 1: 188 *idx = 0; 189 break; 190 case 2 ... 4: 191 *idx = 1; 192 break; 193 case 5 ... 16: 194 *idx = 2; 195 break; 196 case 17 ... 64: 197 *idx = 3; 198 break; 199 case 65 ... 128: 200 *idx = 4; 201 break; 202 case 129 ... BIO_MAX_PAGES: 203 *idx = 5; 204 break; 205 default: 206 return NULL; 207 } 208 209 /* 210 * idx now points to the pool we want to allocate from. only the 211 * 1-vec entry pool is mempool backed. 212 */ 213 if (*idx == BVEC_POOL_MAX) { 214 fallback: 215 bvl = mempool_alloc(pool, gfp_mask); 216 } else { 217 struct biovec_slab *bvs = bvec_slabs + *idx; 218 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); 219 220 /* 221 * Make this allocation restricted and don't dump info on 222 * allocation failures, since we'll fallback to the mempool 223 * in case of failure. 224 */ 225 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 226 227 /* 228 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM 229 * is set, retry with the 1-entry mempool 230 */ 231 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); 232 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { 233 *idx = BVEC_POOL_MAX; 234 goto fallback; 235 } 236 } 237 238 (*idx)++; 239 return bvl; 240 } 241 242 static void __bio_free(struct bio *bio) 243 { 244 bio_disassociate_task(bio); 245 246 if (bio_integrity(bio)) 247 bio_integrity_free(bio); 248 } 249 250 static void bio_free(struct bio *bio) 251 { 252 struct bio_set *bs = bio->bi_pool; 253 void *p; 254 255 __bio_free(bio); 256 257 if (bs) { 258 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); 259 260 /* 261 * If we have front padding, adjust the bio pointer before freeing 262 */ 263 p = bio; 264 p -= bs->front_pad; 265 266 mempool_free(p, bs->bio_pool); 267 } else { 268 /* Bio was allocated by bio_kmalloc() */ 269 kfree(bio); 270 } 271 } 272 273 void bio_init(struct bio *bio) 274 { 275 memset(bio, 0, sizeof(*bio)); 276 atomic_set(&bio->__bi_remaining, 1); 277 atomic_set(&bio->__bi_cnt, 1); 278 } 279 EXPORT_SYMBOL(bio_init); 280 281 /** 282 * bio_reset - reinitialize a bio 283 * @bio: bio to reset 284 * 285 * Description: 286 * After calling bio_reset(), @bio will be in the same state as a freshly 287 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 288 * preserved are the ones that are initialized by bio_alloc_bioset(). See 289 * comment in struct bio. 290 */ 291 void bio_reset(struct bio *bio) 292 { 293 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 294 295 __bio_free(bio); 296 297 memset(bio, 0, BIO_RESET_BYTES); 298 bio->bi_flags = flags; 299 atomic_set(&bio->__bi_remaining, 1); 300 } 301 EXPORT_SYMBOL(bio_reset); 302 303 static struct bio *__bio_chain_endio(struct bio *bio) 304 { 305 struct bio *parent = bio->bi_private; 306 307 if (!parent->bi_error) 308 parent->bi_error = bio->bi_error; 309 bio_put(bio); 310 return parent; 311 } 312 313 static void bio_chain_endio(struct bio *bio) 314 { 315 bio_endio(__bio_chain_endio(bio)); 316 } 317 318 /** 319 * bio_chain - chain bio completions 320 * @bio: the target bio 321 * @parent: the @bio's parent bio 322 * 323 * The caller won't have a bi_end_io called when @bio completes - instead, 324 * @parent's bi_end_io won't be called until both @parent and @bio have 325 * completed; the chained bio will also be freed when it completes. 326 * 327 * The caller must not set bi_private or bi_end_io in @bio. 328 */ 329 void bio_chain(struct bio *bio, struct bio *parent) 330 { 331 BUG_ON(bio->bi_private || bio->bi_end_io); 332 333 bio->bi_private = parent; 334 bio->bi_end_io = bio_chain_endio; 335 bio_inc_remaining(parent); 336 } 337 EXPORT_SYMBOL(bio_chain); 338 339 static void bio_alloc_rescue(struct work_struct *work) 340 { 341 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 342 struct bio *bio; 343 344 while (1) { 345 spin_lock(&bs->rescue_lock); 346 bio = bio_list_pop(&bs->rescue_list); 347 spin_unlock(&bs->rescue_lock); 348 349 if (!bio) 350 break; 351 352 generic_make_request(bio); 353 } 354 } 355 356 static void punt_bios_to_rescuer(struct bio_set *bs) 357 { 358 struct bio_list punt, nopunt; 359 struct bio *bio; 360 361 /* 362 * In order to guarantee forward progress we must punt only bios that 363 * were allocated from this bio_set; otherwise, if there was a bio on 364 * there for a stacking driver higher up in the stack, processing it 365 * could require allocating bios from this bio_set, and doing that from 366 * our own rescuer would be bad. 367 * 368 * Since bio lists are singly linked, pop them all instead of trying to 369 * remove from the middle of the list: 370 */ 371 372 bio_list_init(&punt); 373 bio_list_init(&nopunt); 374 375 while ((bio = bio_list_pop(current->bio_list))) 376 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 377 378 *current->bio_list = nopunt; 379 380 spin_lock(&bs->rescue_lock); 381 bio_list_merge(&bs->rescue_list, &punt); 382 spin_unlock(&bs->rescue_lock); 383 384 queue_work(bs->rescue_workqueue, &bs->rescue_work); 385 } 386 387 /** 388 * bio_alloc_bioset - allocate a bio for I/O 389 * @gfp_mask: the GFP_ mask given to the slab allocator 390 * @nr_iovecs: number of iovecs to pre-allocate 391 * @bs: the bio_set to allocate from. 392 * 393 * Description: 394 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is 395 * backed by the @bs's mempool. 396 * 397 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will 398 * always be able to allocate a bio. This is due to the mempool guarantees. 399 * To make this work, callers must never allocate more than 1 bio at a time 400 * from this pool. Callers that need to allocate more than 1 bio must always 401 * submit the previously allocated bio for IO before attempting to allocate 402 * a new one. Failure to do so can cause deadlocks under memory pressure. 403 * 404 * Note that when running under generic_make_request() (i.e. any block 405 * driver), bios are not submitted until after you return - see the code in 406 * generic_make_request() that converts recursion into iteration, to prevent 407 * stack overflows. 408 * 409 * This would normally mean allocating multiple bios under 410 * generic_make_request() would be susceptible to deadlocks, but we have 411 * deadlock avoidance code that resubmits any blocked bios from a rescuer 412 * thread. 413 * 414 * However, we do not guarantee forward progress for allocations from other 415 * mempools. Doing multiple allocations from the same mempool under 416 * generic_make_request() should be avoided - instead, use bio_set's front_pad 417 * for per bio allocations. 418 * 419 * RETURNS: 420 * Pointer to new bio on success, NULL on failure. 421 */ 422 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 423 { 424 gfp_t saved_gfp = gfp_mask; 425 unsigned front_pad; 426 unsigned inline_vecs; 427 struct bio_vec *bvl = NULL; 428 struct bio *bio; 429 void *p; 430 431 if (!bs) { 432 if (nr_iovecs > UIO_MAXIOV) 433 return NULL; 434 435 p = kmalloc(sizeof(struct bio) + 436 nr_iovecs * sizeof(struct bio_vec), 437 gfp_mask); 438 front_pad = 0; 439 inline_vecs = nr_iovecs; 440 } else { 441 /* should not use nobvec bioset for nr_iovecs > 0 */ 442 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0)) 443 return NULL; 444 /* 445 * generic_make_request() converts recursion to iteration; this 446 * means if we're running beneath it, any bios we allocate and 447 * submit will not be submitted (and thus freed) until after we 448 * return. 449 * 450 * This exposes us to a potential deadlock if we allocate 451 * multiple bios from the same bio_set() while running 452 * underneath generic_make_request(). If we were to allocate 453 * multiple bios (say a stacking block driver that was splitting 454 * bios), we would deadlock if we exhausted the mempool's 455 * reserve. 456 * 457 * We solve this, and guarantee forward progress, with a rescuer 458 * workqueue per bio_set. If we go to allocate and there are 459 * bios on current->bio_list, we first try the allocation 460 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those 461 * bios we would be blocking to the rescuer workqueue before 462 * we retry with the original gfp_flags. 463 */ 464 465 if (current->bio_list && !bio_list_empty(current->bio_list)) 466 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 467 468 p = mempool_alloc(bs->bio_pool, gfp_mask); 469 if (!p && gfp_mask != saved_gfp) { 470 punt_bios_to_rescuer(bs); 471 gfp_mask = saved_gfp; 472 p = mempool_alloc(bs->bio_pool, gfp_mask); 473 } 474 475 front_pad = bs->front_pad; 476 inline_vecs = BIO_INLINE_VECS; 477 } 478 479 if (unlikely(!p)) 480 return NULL; 481 482 bio = p + front_pad; 483 bio_init(bio); 484 485 if (nr_iovecs > inline_vecs) { 486 unsigned long idx = 0; 487 488 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 489 if (!bvl && gfp_mask != saved_gfp) { 490 punt_bios_to_rescuer(bs); 491 gfp_mask = saved_gfp; 492 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 493 } 494 495 if (unlikely(!bvl)) 496 goto err_free; 497 498 bio->bi_flags |= idx << BVEC_POOL_OFFSET; 499 } else if (nr_iovecs) { 500 bvl = bio->bi_inline_vecs; 501 } 502 503 bio->bi_pool = bs; 504 bio->bi_max_vecs = nr_iovecs; 505 bio->bi_io_vec = bvl; 506 return bio; 507 508 err_free: 509 mempool_free(p, bs->bio_pool); 510 return NULL; 511 } 512 EXPORT_SYMBOL(bio_alloc_bioset); 513 514 void zero_fill_bio(struct bio *bio) 515 { 516 unsigned long flags; 517 struct bio_vec bv; 518 struct bvec_iter iter; 519 520 bio_for_each_segment(bv, bio, iter) { 521 char *data = bvec_kmap_irq(&bv, &flags); 522 memset(data, 0, bv.bv_len); 523 flush_dcache_page(bv.bv_page); 524 bvec_kunmap_irq(data, &flags); 525 } 526 } 527 EXPORT_SYMBOL(zero_fill_bio); 528 529 /** 530 * bio_put - release a reference to a bio 531 * @bio: bio to release reference to 532 * 533 * Description: 534 * Put a reference to a &struct bio, either one you have gotten with 535 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. 536 **/ 537 void bio_put(struct bio *bio) 538 { 539 if (!bio_flagged(bio, BIO_REFFED)) 540 bio_free(bio); 541 else { 542 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); 543 544 /* 545 * last put frees it 546 */ 547 if (atomic_dec_and_test(&bio->__bi_cnt)) 548 bio_free(bio); 549 } 550 } 551 EXPORT_SYMBOL(bio_put); 552 553 inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 554 { 555 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 556 blk_recount_segments(q, bio); 557 558 return bio->bi_phys_segments; 559 } 560 EXPORT_SYMBOL(bio_phys_segments); 561 562 /** 563 * __bio_clone_fast - clone a bio that shares the original bio's biovec 564 * @bio: destination bio 565 * @bio_src: bio to clone 566 * 567 * Clone a &bio. Caller will own the returned bio, but not 568 * the actual data it points to. Reference count of returned 569 * bio will be one. 570 * 571 * Caller must ensure that @bio_src is not freed before @bio. 572 */ 573 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) 574 { 575 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 576 577 /* 578 * most users will be overriding ->bi_bdev with a new target, 579 * so we don't set nor calculate new physical/hw segment counts here 580 */ 581 bio->bi_bdev = bio_src->bi_bdev; 582 bio_set_flag(bio, BIO_CLONED); 583 bio->bi_rw = bio_src->bi_rw; 584 bio->bi_iter = bio_src->bi_iter; 585 bio->bi_io_vec = bio_src->bi_io_vec; 586 } 587 EXPORT_SYMBOL(__bio_clone_fast); 588 589 /** 590 * bio_clone_fast - clone a bio that shares the original bio's biovec 591 * @bio: bio to clone 592 * @gfp_mask: allocation priority 593 * @bs: bio_set to allocate from 594 * 595 * Like __bio_clone_fast, only also allocates the returned bio 596 */ 597 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) 598 { 599 struct bio *b; 600 601 b = bio_alloc_bioset(gfp_mask, 0, bs); 602 if (!b) 603 return NULL; 604 605 __bio_clone_fast(b, bio); 606 607 if (bio_integrity(bio)) { 608 int ret; 609 610 ret = bio_integrity_clone(b, bio, gfp_mask); 611 612 if (ret < 0) { 613 bio_put(b); 614 return NULL; 615 } 616 } 617 618 return b; 619 } 620 EXPORT_SYMBOL(bio_clone_fast); 621 622 /** 623 * bio_clone_bioset - clone a bio 624 * @bio_src: bio to clone 625 * @gfp_mask: allocation priority 626 * @bs: bio_set to allocate from 627 * 628 * Clone bio. Caller will own the returned bio, but not the actual data it 629 * points to. Reference count of returned bio will be one. 630 */ 631 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, 632 struct bio_set *bs) 633 { 634 struct bvec_iter iter; 635 struct bio_vec bv; 636 struct bio *bio; 637 638 /* 639 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from 640 * bio_src->bi_io_vec to bio->bi_io_vec. 641 * 642 * We can't do that anymore, because: 643 * 644 * - The point of cloning the biovec is to produce a bio with a biovec 645 * the caller can modify: bi_idx and bi_bvec_done should be 0. 646 * 647 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if 648 * we tried to clone the whole thing bio_alloc_bioset() would fail. 649 * But the clone should succeed as long as the number of biovecs we 650 * actually need to allocate is fewer than BIO_MAX_PAGES. 651 * 652 * - Lastly, bi_vcnt should not be looked at or relied upon by code 653 * that does not own the bio - reason being drivers don't use it for 654 * iterating over the biovec anymore, so expecting it to be kept up 655 * to date (i.e. for clones that share the parent biovec) is just 656 * asking for trouble and would force extra work on 657 * __bio_clone_fast() anyways. 658 */ 659 660 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); 661 if (!bio) 662 return NULL; 663 bio->bi_bdev = bio_src->bi_bdev; 664 bio->bi_rw = bio_src->bi_rw; 665 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 666 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 667 668 if (bio_op(bio) == REQ_OP_DISCARD) 669 goto integrity_clone; 670 671 if (bio_op(bio) == REQ_OP_WRITE_SAME) { 672 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; 673 goto integrity_clone; 674 } 675 676 bio_for_each_segment(bv, bio_src, iter) 677 bio->bi_io_vec[bio->bi_vcnt++] = bv; 678 679 integrity_clone: 680 if (bio_integrity(bio_src)) { 681 int ret; 682 683 ret = bio_integrity_clone(bio, bio_src, gfp_mask); 684 if (ret < 0) { 685 bio_put(bio); 686 return NULL; 687 } 688 } 689 690 return bio; 691 } 692 EXPORT_SYMBOL(bio_clone_bioset); 693 694 /** 695 * bio_add_pc_page - attempt to add page to bio 696 * @q: the target queue 697 * @bio: destination bio 698 * @page: page to add 699 * @len: vec entry length 700 * @offset: vec entry offset 701 * 702 * Attempt to add a page to the bio_vec maplist. This can fail for a 703 * number of reasons, such as the bio being full or target block device 704 * limitations. The target block device must allow bio's up to PAGE_SIZE, 705 * so it is always possible to add a single page to an empty bio. 706 * 707 * This should only be used by REQ_PC bios. 708 */ 709 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page 710 *page, unsigned int len, unsigned int offset) 711 { 712 int retried_segments = 0; 713 struct bio_vec *bvec; 714 715 /* 716 * cloned bio must not modify vec list 717 */ 718 if (unlikely(bio_flagged(bio, BIO_CLONED))) 719 return 0; 720 721 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) 722 return 0; 723 724 /* 725 * For filesystems with a blocksize smaller than the pagesize 726 * we will often be called with the same page as last time and 727 * a consecutive offset. Optimize this special case. 728 */ 729 if (bio->bi_vcnt > 0) { 730 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 731 732 if (page == prev->bv_page && 733 offset == prev->bv_offset + prev->bv_len) { 734 prev->bv_len += len; 735 bio->bi_iter.bi_size += len; 736 goto done; 737 } 738 739 /* 740 * If the queue doesn't support SG gaps and adding this 741 * offset would create a gap, disallow it. 742 */ 743 if (bvec_gap_to_prev(q, prev, offset)) 744 return 0; 745 } 746 747 if (bio->bi_vcnt >= bio->bi_max_vecs) 748 return 0; 749 750 /* 751 * setup the new entry, we might clear it again later if we 752 * cannot add the page 753 */ 754 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 755 bvec->bv_page = page; 756 bvec->bv_len = len; 757 bvec->bv_offset = offset; 758 bio->bi_vcnt++; 759 bio->bi_phys_segments++; 760 bio->bi_iter.bi_size += len; 761 762 /* 763 * Perform a recount if the number of segments is greater 764 * than queue_max_segments(q). 765 */ 766 767 while (bio->bi_phys_segments > queue_max_segments(q)) { 768 769 if (retried_segments) 770 goto failed; 771 772 retried_segments = 1; 773 blk_recount_segments(q, bio); 774 } 775 776 /* If we may be able to merge these biovecs, force a recount */ 777 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) 778 bio_clear_flag(bio, BIO_SEG_VALID); 779 780 done: 781 return len; 782 783 failed: 784 bvec->bv_page = NULL; 785 bvec->bv_len = 0; 786 bvec->bv_offset = 0; 787 bio->bi_vcnt--; 788 bio->bi_iter.bi_size -= len; 789 blk_recount_segments(q, bio); 790 return 0; 791 } 792 EXPORT_SYMBOL(bio_add_pc_page); 793 794 /** 795 * bio_add_page - attempt to add page to bio 796 * @bio: destination bio 797 * @page: page to add 798 * @len: vec entry length 799 * @offset: vec entry offset 800 * 801 * Attempt to add a page to the bio_vec maplist. This will only fail 802 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 803 */ 804 int bio_add_page(struct bio *bio, struct page *page, 805 unsigned int len, unsigned int offset) 806 { 807 struct bio_vec *bv; 808 809 /* 810 * cloned bio must not modify vec list 811 */ 812 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 813 return 0; 814 815 /* 816 * For filesystems with a blocksize smaller than the pagesize 817 * we will often be called with the same page as last time and 818 * a consecutive offset. Optimize this special case. 819 */ 820 if (bio->bi_vcnt > 0) { 821 bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 822 823 if (page == bv->bv_page && 824 offset == bv->bv_offset + bv->bv_len) { 825 bv->bv_len += len; 826 goto done; 827 } 828 } 829 830 if (bio->bi_vcnt >= bio->bi_max_vecs) 831 return 0; 832 833 bv = &bio->bi_io_vec[bio->bi_vcnt]; 834 bv->bv_page = page; 835 bv->bv_len = len; 836 bv->bv_offset = offset; 837 838 bio->bi_vcnt++; 839 done: 840 bio->bi_iter.bi_size += len; 841 return len; 842 } 843 EXPORT_SYMBOL(bio_add_page); 844 845 struct submit_bio_ret { 846 struct completion event; 847 int error; 848 }; 849 850 static void submit_bio_wait_endio(struct bio *bio) 851 { 852 struct submit_bio_ret *ret = bio->bi_private; 853 854 ret->error = bio->bi_error; 855 complete(&ret->event); 856 } 857 858 /** 859 * submit_bio_wait - submit a bio, and wait until it completes 860 * @bio: The &struct bio which describes the I/O 861 * 862 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 863 * bio_endio() on failure. 864 */ 865 int submit_bio_wait(struct bio *bio) 866 { 867 struct submit_bio_ret ret; 868 869 init_completion(&ret.event); 870 bio->bi_private = &ret; 871 bio->bi_end_io = submit_bio_wait_endio; 872 bio->bi_rw |= REQ_SYNC; 873 submit_bio(bio); 874 wait_for_completion_io(&ret.event); 875 876 return ret.error; 877 } 878 EXPORT_SYMBOL(submit_bio_wait); 879 880 /** 881 * bio_advance - increment/complete a bio by some number of bytes 882 * @bio: bio to advance 883 * @bytes: number of bytes to complete 884 * 885 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 886 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 887 * be updated on the last bvec as well. 888 * 889 * @bio will then represent the remaining, uncompleted portion of the io. 890 */ 891 void bio_advance(struct bio *bio, unsigned bytes) 892 { 893 if (bio_integrity(bio)) 894 bio_integrity_advance(bio, bytes); 895 896 bio_advance_iter(bio, &bio->bi_iter, bytes); 897 } 898 EXPORT_SYMBOL(bio_advance); 899 900 /** 901 * bio_alloc_pages - allocates a single page for each bvec in a bio 902 * @bio: bio to allocate pages for 903 * @gfp_mask: flags for allocation 904 * 905 * Allocates pages up to @bio->bi_vcnt. 906 * 907 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are 908 * freed. 909 */ 910 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) 911 { 912 int i; 913 struct bio_vec *bv; 914 915 bio_for_each_segment_all(bv, bio, i) { 916 bv->bv_page = alloc_page(gfp_mask); 917 if (!bv->bv_page) { 918 while (--bv >= bio->bi_io_vec) 919 __free_page(bv->bv_page); 920 return -ENOMEM; 921 } 922 } 923 924 return 0; 925 } 926 EXPORT_SYMBOL(bio_alloc_pages); 927 928 /** 929 * bio_copy_data - copy contents of data buffers from one chain of bios to 930 * another 931 * @src: source bio list 932 * @dst: destination bio list 933 * 934 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats 935 * @src and @dst as linked lists of bios. 936 * 937 * Stops when it reaches the end of either @src or @dst - that is, copies 938 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 939 */ 940 void bio_copy_data(struct bio *dst, struct bio *src) 941 { 942 struct bvec_iter src_iter, dst_iter; 943 struct bio_vec src_bv, dst_bv; 944 void *src_p, *dst_p; 945 unsigned bytes; 946 947 src_iter = src->bi_iter; 948 dst_iter = dst->bi_iter; 949 950 while (1) { 951 if (!src_iter.bi_size) { 952 src = src->bi_next; 953 if (!src) 954 break; 955 956 src_iter = src->bi_iter; 957 } 958 959 if (!dst_iter.bi_size) { 960 dst = dst->bi_next; 961 if (!dst) 962 break; 963 964 dst_iter = dst->bi_iter; 965 } 966 967 src_bv = bio_iter_iovec(src, src_iter); 968 dst_bv = bio_iter_iovec(dst, dst_iter); 969 970 bytes = min(src_bv.bv_len, dst_bv.bv_len); 971 972 src_p = kmap_atomic(src_bv.bv_page); 973 dst_p = kmap_atomic(dst_bv.bv_page); 974 975 memcpy(dst_p + dst_bv.bv_offset, 976 src_p + src_bv.bv_offset, 977 bytes); 978 979 kunmap_atomic(dst_p); 980 kunmap_atomic(src_p); 981 982 bio_advance_iter(src, &src_iter, bytes); 983 bio_advance_iter(dst, &dst_iter, bytes); 984 } 985 } 986 EXPORT_SYMBOL(bio_copy_data); 987 988 struct bio_map_data { 989 int is_our_pages; 990 struct iov_iter iter; 991 struct iovec iov[]; 992 }; 993 994 static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, 995 gfp_t gfp_mask) 996 { 997 if (iov_count > UIO_MAXIOV) 998 return NULL; 999 1000 return kmalloc(sizeof(struct bio_map_data) + 1001 sizeof(struct iovec) * iov_count, gfp_mask); 1002 } 1003 1004 /** 1005 * bio_copy_from_iter - copy all pages from iov_iter to bio 1006 * @bio: The &struct bio which describes the I/O as destination 1007 * @iter: iov_iter as source 1008 * 1009 * Copy all pages from iov_iter to bio. 1010 * Returns 0 on success, or error on failure. 1011 */ 1012 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) 1013 { 1014 int i; 1015 struct bio_vec *bvec; 1016 1017 bio_for_each_segment_all(bvec, bio, i) { 1018 ssize_t ret; 1019 1020 ret = copy_page_from_iter(bvec->bv_page, 1021 bvec->bv_offset, 1022 bvec->bv_len, 1023 &iter); 1024 1025 if (!iov_iter_count(&iter)) 1026 break; 1027 1028 if (ret < bvec->bv_len) 1029 return -EFAULT; 1030 } 1031 1032 return 0; 1033 } 1034 1035 /** 1036 * bio_copy_to_iter - copy all pages from bio to iov_iter 1037 * @bio: The &struct bio which describes the I/O as source 1038 * @iter: iov_iter as destination 1039 * 1040 * Copy all pages from bio to iov_iter. 1041 * Returns 0 on success, or error on failure. 1042 */ 1043 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 1044 { 1045 int i; 1046 struct bio_vec *bvec; 1047 1048 bio_for_each_segment_all(bvec, bio, i) { 1049 ssize_t ret; 1050 1051 ret = copy_page_to_iter(bvec->bv_page, 1052 bvec->bv_offset, 1053 bvec->bv_len, 1054 &iter); 1055 1056 if (!iov_iter_count(&iter)) 1057 break; 1058 1059 if (ret < bvec->bv_len) 1060 return -EFAULT; 1061 } 1062 1063 return 0; 1064 } 1065 1066 static void bio_free_pages(struct bio *bio) 1067 { 1068 struct bio_vec *bvec; 1069 int i; 1070 1071 bio_for_each_segment_all(bvec, bio, i) 1072 __free_page(bvec->bv_page); 1073 } 1074 1075 /** 1076 * bio_uncopy_user - finish previously mapped bio 1077 * @bio: bio being terminated 1078 * 1079 * Free pages allocated from bio_copy_user_iov() and write back data 1080 * to user space in case of a read. 1081 */ 1082 int bio_uncopy_user(struct bio *bio) 1083 { 1084 struct bio_map_data *bmd = bio->bi_private; 1085 int ret = 0; 1086 1087 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1088 /* 1089 * if we're in a workqueue, the request is orphaned, so 1090 * don't copy into a random user address space, just free 1091 * and return -EINTR so user space doesn't expect any data. 1092 */ 1093 if (!current->mm) 1094 ret = -EINTR; 1095 else if (bio_data_dir(bio) == READ) 1096 ret = bio_copy_to_iter(bio, bmd->iter); 1097 if (bmd->is_our_pages) 1098 bio_free_pages(bio); 1099 } 1100 kfree(bmd); 1101 bio_put(bio); 1102 return ret; 1103 } 1104 1105 /** 1106 * bio_copy_user_iov - copy user data to bio 1107 * @q: destination block queue 1108 * @map_data: pointer to the rq_map_data holding pages (if necessary) 1109 * @iter: iovec iterator 1110 * @gfp_mask: memory allocation flags 1111 * 1112 * Prepares and returns a bio for indirect user io, bouncing data 1113 * to/from kernel pages as necessary. Must be paired with 1114 * call bio_uncopy_user() on io completion. 1115 */ 1116 struct bio *bio_copy_user_iov(struct request_queue *q, 1117 struct rq_map_data *map_data, 1118 const struct iov_iter *iter, 1119 gfp_t gfp_mask) 1120 { 1121 struct bio_map_data *bmd; 1122 struct page *page; 1123 struct bio *bio; 1124 int i, ret; 1125 int nr_pages = 0; 1126 unsigned int len = iter->count; 1127 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 1128 1129 for (i = 0; i < iter->nr_segs; i++) { 1130 unsigned long uaddr; 1131 unsigned long end; 1132 unsigned long start; 1133 1134 uaddr = (unsigned long) iter->iov[i].iov_base; 1135 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1) 1136 >> PAGE_SHIFT; 1137 start = uaddr >> PAGE_SHIFT; 1138 1139 /* 1140 * Overflow, abort 1141 */ 1142 if (end < start) 1143 return ERR_PTR(-EINVAL); 1144 1145 nr_pages += end - start; 1146 } 1147 1148 if (offset) 1149 nr_pages++; 1150 1151 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask); 1152 if (!bmd) 1153 return ERR_PTR(-ENOMEM); 1154 1155 /* 1156 * We need to do a deep copy of the iov_iter including the iovecs. 1157 * The caller provided iov might point to an on-stack or otherwise 1158 * shortlived one. 1159 */ 1160 bmd->is_our_pages = map_data ? 0 : 1; 1161 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); 1162 iov_iter_init(&bmd->iter, iter->type, bmd->iov, 1163 iter->nr_segs, iter->count); 1164 1165 ret = -ENOMEM; 1166 bio = bio_kmalloc(gfp_mask, nr_pages); 1167 if (!bio) 1168 goto out_bmd; 1169 1170 if (iter->type & WRITE) 1171 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1172 1173 ret = 0; 1174 1175 if (map_data) { 1176 nr_pages = 1 << map_data->page_order; 1177 i = map_data->offset / PAGE_SIZE; 1178 } 1179 while (len) { 1180 unsigned int bytes = PAGE_SIZE; 1181 1182 bytes -= offset; 1183 1184 if (bytes > len) 1185 bytes = len; 1186 1187 if (map_data) { 1188 if (i == map_data->nr_entries * nr_pages) { 1189 ret = -ENOMEM; 1190 break; 1191 } 1192 1193 page = map_data->pages[i / nr_pages]; 1194 page += (i % nr_pages); 1195 1196 i++; 1197 } else { 1198 page = alloc_page(q->bounce_gfp | gfp_mask); 1199 if (!page) { 1200 ret = -ENOMEM; 1201 break; 1202 } 1203 } 1204 1205 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) 1206 break; 1207 1208 len -= bytes; 1209 offset = 0; 1210 } 1211 1212 if (ret) 1213 goto cleanup; 1214 1215 /* 1216 * success 1217 */ 1218 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || 1219 (map_data && map_data->from_user)) { 1220 ret = bio_copy_from_iter(bio, *iter); 1221 if (ret) 1222 goto cleanup; 1223 } 1224 1225 bio->bi_private = bmd; 1226 return bio; 1227 cleanup: 1228 if (!map_data) 1229 bio_free_pages(bio); 1230 bio_put(bio); 1231 out_bmd: 1232 kfree(bmd); 1233 return ERR_PTR(ret); 1234 } 1235 1236 /** 1237 * bio_map_user_iov - map user iovec into bio 1238 * @q: the struct request_queue for the bio 1239 * @iter: iovec iterator 1240 * @gfp_mask: memory allocation flags 1241 * 1242 * Map the user space address into a bio suitable for io to a block 1243 * device. Returns an error pointer in case of error. 1244 */ 1245 struct bio *bio_map_user_iov(struct request_queue *q, 1246 const struct iov_iter *iter, 1247 gfp_t gfp_mask) 1248 { 1249 int j; 1250 int nr_pages = 0; 1251 struct page **pages; 1252 struct bio *bio; 1253 int cur_page = 0; 1254 int ret, offset; 1255 struct iov_iter i; 1256 struct iovec iov; 1257 1258 iov_for_each(iov, i, *iter) { 1259 unsigned long uaddr = (unsigned long) iov.iov_base; 1260 unsigned long len = iov.iov_len; 1261 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1262 unsigned long start = uaddr >> PAGE_SHIFT; 1263 1264 /* 1265 * Overflow, abort 1266 */ 1267 if (end < start) 1268 return ERR_PTR(-EINVAL); 1269 1270 nr_pages += end - start; 1271 /* 1272 * buffer must be aligned to at least hardsector size for now 1273 */ 1274 if (uaddr & queue_dma_alignment(q)) 1275 return ERR_PTR(-EINVAL); 1276 } 1277 1278 if (!nr_pages) 1279 return ERR_PTR(-EINVAL); 1280 1281 bio = bio_kmalloc(gfp_mask, nr_pages); 1282 if (!bio) 1283 return ERR_PTR(-ENOMEM); 1284 1285 ret = -ENOMEM; 1286 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); 1287 if (!pages) 1288 goto out; 1289 1290 iov_for_each(iov, i, *iter) { 1291 unsigned long uaddr = (unsigned long) iov.iov_base; 1292 unsigned long len = iov.iov_len; 1293 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1294 unsigned long start = uaddr >> PAGE_SHIFT; 1295 const int local_nr_pages = end - start; 1296 const int page_limit = cur_page + local_nr_pages; 1297 1298 ret = get_user_pages_fast(uaddr, local_nr_pages, 1299 (iter->type & WRITE) != WRITE, 1300 &pages[cur_page]); 1301 if (ret < local_nr_pages) { 1302 ret = -EFAULT; 1303 goto out_unmap; 1304 } 1305 1306 offset = offset_in_page(uaddr); 1307 for (j = cur_page; j < page_limit; j++) { 1308 unsigned int bytes = PAGE_SIZE - offset; 1309 1310 if (len <= 0) 1311 break; 1312 1313 if (bytes > len) 1314 bytes = len; 1315 1316 /* 1317 * sorry... 1318 */ 1319 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < 1320 bytes) 1321 break; 1322 1323 len -= bytes; 1324 offset = 0; 1325 } 1326 1327 cur_page = j; 1328 /* 1329 * release the pages we didn't map into the bio, if any 1330 */ 1331 while (j < page_limit) 1332 put_page(pages[j++]); 1333 } 1334 1335 kfree(pages); 1336 1337 /* 1338 * set data direction, and check if mapped pages need bouncing 1339 */ 1340 if (iter->type & WRITE) 1341 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1342 1343 bio_set_flag(bio, BIO_USER_MAPPED); 1344 1345 /* 1346 * subtle -- if __bio_map_user() ended up bouncing a bio, 1347 * it would normally disappear when its bi_end_io is run. 1348 * however, we need it for the unmap, so grab an extra 1349 * reference to it 1350 */ 1351 bio_get(bio); 1352 return bio; 1353 1354 out_unmap: 1355 for (j = 0; j < nr_pages; j++) { 1356 if (!pages[j]) 1357 break; 1358 put_page(pages[j]); 1359 } 1360 out: 1361 kfree(pages); 1362 bio_put(bio); 1363 return ERR_PTR(ret); 1364 } 1365 1366 static void __bio_unmap_user(struct bio *bio) 1367 { 1368 struct bio_vec *bvec; 1369 int i; 1370 1371 /* 1372 * make sure we dirty pages we wrote to 1373 */ 1374 bio_for_each_segment_all(bvec, bio, i) { 1375 if (bio_data_dir(bio) == READ) 1376 set_page_dirty_lock(bvec->bv_page); 1377 1378 put_page(bvec->bv_page); 1379 } 1380 1381 bio_put(bio); 1382 } 1383 1384 /** 1385 * bio_unmap_user - unmap a bio 1386 * @bio: the bio being unmapped 1387 * 1388 * Unmap a bio previously mapped by bio_map_user(). Must be called with 1389 * a process context. 1390 * 1391 * bio_unmap_user() may sleep. 1392 */ 1393 void bio_unmap_user(struct bio *bio) 1394 { 1395 __bio_unmap_user(bio); 1396 bio_put(bio); 1397 } 1398 1399 static void bio_map_kern_endio(struct bio *bio) 1400 { 1401 bio_put(bio); 1402 } 1403 1404 /** 1405 * bio_map_kern - map kernel address into bio 1406 * @q: the struct request_queue for the bio 1407 * @data: pointer to buffer to map 1408 * @len: length in bytes 1409 * @gfp_mask: allocation flags for bio allocation 1410 * 1411 * Map the kernel address into a bio suitable for io to a block 1412 * device. Returns an error pointer in case of error. 1413 */ 1414 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, 1415 gfp_t gfp_mask) 1416 { 1417 unsigned long kaddr = (unsigned long)data; 1418 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1419 unsigned long start = kaddr >> PAGE_SHIFT; 1420 const int nr_pages = end - start; 1421 int offset, i; 1422 struct bio *bio; 1423 1424 bio = bio_kmalloc(gfp_mask, nr_pages); 1425 if (!bio) 1426 return ERR_PTR(-ENOMEM); 1427 1428 offset = offset_in_page(kaddr); 1429 for (i = 0; i < nr_pages; i++) { 1430 unsigned int bytes = PAGE_SIZE - offset; 1431 1432 if (len <= 0) 1433 break; 1434 1435 if (bytes > len) 1436 bytes = len; 1437 1438 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, 1439 offset) < bytes) { 1440 /* we don't support partial mappings */ 1441 bio_put(bio); 1442 return ERR_PTR(-EINVAL); 1443 } 1444 1445 data += bytes; 1446 len -= bytes; 1447 offset = 0; 1448 } 1449 1450 bio->bi_end_io = bio_map_kern_endio; 1451 return bio; 1452 } 1453 EXPORT_SYMBOL(bio_map_kern); 1454 1455 static void bio_copy_kern_endio(struct bio *bio) 1456 { 1457 bio_free_pages(bio); 1458 bio_put(bio); 1459 } 1460 1461 static void bio_copy_kern_endio_read(struct bio *bio) 1462 { 1463 char *p = bio->bi_private; 1464 struct bio_vec *bvec; 1465 int i; 1466 1467 bio_for_each_segment_all(bvec, bio, i) { 1468 memcpy(p, page_address(bvec->bv_page), bvec->bv_len); 1469 p += bvec->bv_len; 1470 } 1471 1472 bio_copy_kern_endio(bio); 1473 } 1474 1475 /** 1476 * bio_copy_kern - copy kernel address into bio 1477 * @q: the struct request_queue for the bio 1478 * @data: pointer to buffer to copy 1479 * @len: length in bytes 1480 * @gfp_mask: allocation flags for bio and page allocation 1481 * @reading: data direction is READ 1482 * 1483 * copy the kernel address into a bio suitable for io to a block 1484 * device. Returns an error pointer in case of error. 1485 */ 1486 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 1487 gfp_t gfp_mask, int reading) 1488 { 1489 unsigned long kaddr = (unsigned long)data; 1490 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1491 unsigned long start = kaddr >> PAGE_SHIFT; 1492 struct bio *bio; 1493 void *p = data; 1494 int nr_pages = 0; 1495 1496 /* 1497 * Overflow, abort 1498 */ 1499 if (end < start) 1500 return ERR_PTR(-EINVAL); 1501 1502 nr_pages = end - start; 1503 bio = bio_kmalloc(gfp_mask, nr_pages); 1504 if (!bio) 1505 return ERR_PTR(-ENOMEM); 1506 1507 while (len) { 1508 struct page *page; 1509 unsigned int bytes = PAGE_SIZE; 1510 1511 if (bytes > len) 1512 bytes = len; 1513 1514 page = alloc_page(q->bounce_gfp | gfp_mask); 1515 if (!page) 1516 goto cleanup; 1517 1518 if (!reading) 1519 memcpy(page_address(page), p, bytes); 1520 1521 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 1522 break; 1523 1524 len -= bytes; 1525 p += bytes; 1526 } 1527 1528 if (reading) { 1529 bio->bi_end_io = bio_copy_kern_endio_read; 1530 bio->bi_private = data; 1531 } else { 1532 bio->bi_end_io = bio_copy_kern_endio; 1533 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1534 } 1535 1536 return bio; 1537 1538 cleanup: 1539 bio_free_pages(bio); 1540 bio_put(bio); 1541 return ERR_PTR(-ENOMEM); 1542 } 1543 1544 /* 1545 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1546 * for performing direct-IO in BIOs. 1547 * 1548 * The problem is that we cannot run set_page_dirty() from interrupt context 1549 * because the required locks are not interrupt-safe. So what we can do is to 1550 * mark the pages dirty _before_ performing IO. And in interrupt context, 1551 * check that the pages are still dirty. If so, fine. If not, redirty them 1552 * in process context. 1553 * 1554 * We special-case compound pages here: normally this means reads into hugetlb 1555 * pages. The logic in here doesn't really work right for compound pages 1556 * because the VM does not uniformly chase down the head page in all cases. 1557 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1558 * handle them at all. So we skip compound pages here at an early stage. 1559 * 1560 * Note that this code is very hard to test under normal circumstances because 1561 * direct-io pins the pages with get_user_pages(). This makes 1562 * is_page_cache_freeable return false, and the VM will not clean the pages. 1563 * But other code (eg, flusher threads) could clean the pages if they are mapped 1564 * pagecache. 1565 * 1566 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1567 * deferred bio dirtying paths. 1568 */ 1569 1570 /* 1571 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1572 */ 1573 void bio_set_pages_dirty(struct bio *bio) 1574 { 1575 struct bio_vec *bvec; 1576 int i; 1577 1578 bio_for_each_segment_all(bvec, bio, i) { 1579 struct page *page = bvec->bv_page; 1580 1581 if (page && !PageCompound(page)) 1582 set_page_dirty_lock(page); 1583 } 1584 } 1585 1586 static void bio_release_pages(struct bio *bio) 1587 { 1588 struct bio_vec *bvec; 1589 int i; 1590 1591 bio_for_each_segment_all(bvec, bio, i) { 1592 struct page *page = bvec->bv_page; 1593 1594 if (page) 1595 put_page(page); 1596 } 1597 } 1598 1599 /* 1600 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1601 * If they are, then fine. If, however, some pages are clean then they must 1602 * have been written out during the direct-IO read. So we take another ref on 1603 * the BIO and the offending pages and re-dirty the pages in process context. 1604 * 1605 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1606 * here on. It will run one put_page() against each page and will run one 1607 * bio_put() against the BIO. 1608 */ 1609 1610 static void bio_dirty_fn(struct work_struct *work); 1611 1612 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1613 static DEFINE_SPINLOCK(bio_dirty_lock); 1614 static struct bio *bio_dirty_list; 1615 1616 /* 1617 * This runs in process context 1618 */ 1619 static void bio_dirty_fn(struct work_struct *work) 1620 { 1621 unsigned long flags; 1622 struct bio *bio; 1623 1624 spin_lock_irqsave(&bio_dirty_lock, flags); 1625 bio = bio_dirty_list; 1626 bio_dirty_list = NULL; 1627 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1628 1629 while (bio) { 1630 struct bio *next = bio->bi_private; 1631 1632 bio_set_pages_dirty(bio); 1633 bio_release_pages(bio); 1634 bio_put(bio); 1635 bio = next; 1636 } 1637 } 1638 1639 void bio_check_pages_dirty(struct bio *bio) 1640 { 1641 struct bio_vec *bvec; 1642 int nr_clean_pages = 0; 1643 int i; 1644 1645 bio_for_each_segment_all(bvec, bio, i) { 1646 struct page *page = bvec->bv_page; 1647 1648 if (PageDirty(page) || PageCompound(page)) { 1649 put_page(page); 1650 bvec->bv_page = NULL; 1651 } else { 1652 nr_clean_pages++; 1653 } 1654 } 1655 1656 if (nr_clean_pages) { 1657 unsigned long flags; 1658 1659 spin_lock_irqsave(&bio_dirty_lock, flags); 1660 bio->bi_private = bio_dirty_list; 1661 bio_dirty_list = bio; 1662 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1663 schedule_work(&bio_dirty_work); 1664 } else { 1665 bio_put(bio); 1666 } 1667 } 1668 1669 void generic_start_io_acct(int rw, unsigned long sectors, 1670 struct hd_struct *part) 1671 { 1672 int cpu = part_stat_lock(); 1673 1674 part_round_stats(cpu, part); 1675 part_stat_inc(cpu, part, ios[rw]); 1676 part_stat_add(cpu, part, sectors[rw], sectors); 1677 part_inc_in_flight(part, rw); 1678 1679 part_stat_unlock(); 1680 } 1681 EXPORT_SYMBOL(generic_start_io_acct); 1682 1683 void generic_end_io_acct(int rw, struct hd_struct *part, 1684 unsigned long start_time) 1685 { 1686 unsigned long duration = jiffies - start_time; 1687 int cpu = part_stat_lock(); 1688 1689 part_stat_add(cpu, part, ticks[rw], duration); 1690 part_round_stats(cpu, part); 1691 part_dec_in_flight(part, rw); 1692 1693 part_stat_unlock(); 1694 } 1695 EXPORT_SYMBOL(generic_end_io_acct); 1696 1697 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1698 void bio_flush_dcache_pages(struct bio *bi) 1699 { 1700 struct bio_vec bvec; 1701 struct bvec_iter iter; 1702 1703 bio_for_each_segment(bvec, bi, iter) 1704 flush_dcache_page(bvec.bv_page); 1705 } 1706 EXPORT_SYMBOL(bio_flush_dcache_pages); 1707 #endif 1708 1709 static inline bool bio_remaining_done(struct bio *bio) 1710 { 1711 /* 1712 * If we're not chaining, then ->__bi_remaining is always 1 and 1713 * we always end io on the first invocation. 1714 */ 1715 if (!bio_flagged(bio, BIO_CHAIN)) 1716 return true; 1717 1718 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1719 1720 if (atomic_dec_and_test(&bio->__bi_remaining)) { 1721 bio_clear_flag(bio, BIO_CHAIN); 1722 return true; 1723 } 1724 1725 return false; 1726 } 1727 1728 /** 1729 * bio_endio - end I/O on a bio 1730 * @bio: bio 1731 * 1732 * Description: 1733 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 1734 * way to end I/O on a bio. No one should call bi_end_io() directly on a 1735 * bio unless they own it and thus know that it has an end_io function. 1736 **/ 1737 void bio_endio(struct bio *bio) 1738 { 1739 again: 1740 if (!bio_remaining_done(bio)) 1741 return; 1742 1743 /* 1744 * Need to have a real endio function for chained bios, otherwise 1745 * various corner cases will break (like stacking block devices that 1746 * save/restore bi_end_io) - however, we want to avoid unbounded 1747 * recursion and blowing the stack. Tail call optimization would 1748 * handle this, but compiling with frame pointers also disables 1749 * gcc's sibling call optimization. 1750 */ 1751 if (bio->bi_end_io == bio_chain_endio) { 1752 bio = __bio_chain_endio(bio); 1753 goto again; 1754 } 1755 1756 if (bio->bi_end_io) 1757 bio->bi_end_io(bio); 1758 } 1759 EXPORT_SYMBOL(bio_endio); 1760 1761 /** 1762 * bio_split - split a bio 1763 * @bio: bio to split 1764 * @sectors: number of sectors to split from the front of @bio 1765 * @gfp: gfp mask 1766 * @bs: bio set to allocate from 1767 * 1768 * Allocates and returns a new bio which represents @sectors from the start of 1769 * @bio, and updates @bio to represent the remaining sectors. 1770 * 1771 * Unless this is a discard request the newly allocated bio will point 1772 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that 1773 * @bio is not freed before the split. 1774 */ 1775 struct bio *bio_split(struct bio *bio, int sectors, 1776 gfp_t gfp, struct bio_set *bs) 1777 { 1778 struct bio *split = NULL; 1779 1780 BUG_ON(sectors <= 0); 1781 BUG_ON(sectors >= bio_sectors(bio)); 1782 1783 /* 1784 * Discards need a mutable bio_vec to accommodate the payload 1785 * required by the DSM TRIM and UNMAP commands. 1786 */ 1787 if (bio_op(bio) == REQ_OP_DISCARD) 1788 split = bio_clone_bioset(bio, gfp, bs); 1789 else 1790 split = bio_clone_fast(bio, gfp, bs); 1791 1792 if (!split) 1793 return NULL; 1794 1795 split->bi_iter.bi_size = sectors << 9; 1796 1797 if (bio_integrity(split)) 1798 bio_integrity_trim(split, 0, sectors); 1799 1800 bio_advance(bio, split->bi_iter.bi_size); 1801 1802 return split; 1803 } 1804 EXPORT_SYMBOL(bio_split); 1805 1806 /** 1807 * bio_trim - trim a bio 1808 * @bio: bio to trim 1809 * @offset: number of sectors to trim from the front of @bio 1810 * @size: size we want to trim @bio to, in sectors 1811 */ 1812 void bio_trim(struct bio *bio, int offset, int size) 1813 { 1814 /* 'bio' is a cloned bio which we need to trim to match 1815 * the given offset and size. 1816 */ 1817 1818 size <<= 9; 1819 if (offset == 0 && size == bio->bi_iter.bi_size) 1820 return; 1821 1822 bio_clear_flag(bio, BIO_SEG_VALID); 1823 1824 bio_advance(bio, offset << 9); 1825 1826 bio->bi_iter.bi_size = size; 1827 } 1828 EXPORT_SYMBOL_GPL(bio_trim); 1829 1830 /* 1831 * create memory pools for biovec's in a bio_set. 1832 * use the global biovec slabs created for general use. 1833 */ 1834 mempool_t *biovec_create_pool(int pool_entries) 1835 { 1836 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX; 1837 1838 return mempool_create_slab_pool(pool_entries, bp->slab); 1839 } 1840 1841 void bioset_free(struct bio_set *bs) 1842 { 1843 if (bs->rescue_workqueue) 1844 destroy_workqueue(bs->rescue_workqueue); 1845 1846 if (bs->bio_pool) 1847 mempool_destroy(bs->bio_pool); 1848 1849 if (bs->bvec_pool) 1850 mempool_destroy(bs->bvec_pool); 1851 1852 bioset_integrity_free(bs); 1853 bio_put_slab(bs); 1854 1855 kfree(bs); 1856 } 1857 EXPORT_SYMBOL(bioset_free); 1858 1859 static struct bio_set *__bioset_create(unsigned int pool_size, 1860 unsigned int front_pad, 1861 bool create_bvec_pool) 1862 { 1863 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1864 struct bio_set *bs; 1865 1866 bs = kzalloc(sizeof(*bs), GFP_KERNEL); 1867 if (!bs) 1868 return NULL; 1869 1870 bs->front_pad = front_pad; 1871 1872 spin_lock_init(&bs->rescue_lock); 1873 bio_list_init(&bs->rescue_list); 1874 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1875 1876 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); 1877 if (!bs->bio_slab) { 1878 kfree(bs); 1879 return NULL; 1880 } 1881 1882 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); 1883 if (!bs->bio_pool) 1884 goto bad; 1885 1886 if (create_bvec_pool) { 1887 bs->bvec_pool = biovec_create_pool(pool_size); 1888 if (!bs->bvec_pool) 1889 goto bad; 1890 } 1891 1892 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); 1893 if (!bs->rescue_workqueue) 1894 goto bad; 1895 1896 return bs; 1897 bad: 1898 bioset_free(bs); 1899 return NULL; 1900 } 1901 1902 /** 1903 * bioset_create - Create a bio_set 1904 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1905 * @front_pad: Number of bytes to allocate in front of the returned bio 1906 * 1907 * Description: 1908 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1909 * to ask for a number of bytes to be allocated in front of the bio. 1910 * Front pad allocation is useful for embedding the bio inside 1911 * another structure, to avoid allocating extra data to go with the bio. 1912 * Note that the bio must be embedded at the END of that structure always, 1913 * or things will break badly. 1914 */ 1915 struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) 1916 { 1917 return __bioset_create(pool_size, front_pad, true); 1918 } 1919 EXPORT_SYMBOL(bioset_create); 1920 1921 /** 1922 * bioset_create_nobvec - Create a bio_set without bio_vec mempool 1923 * @pool_size: Number of bio to cache in the mempool 1924 * @front_pad: Number of bytes to allocate in front of the returned bio 1925 * 1926 * Description: 1927 * Same functionality as bioset_create() except that mempool is not 1928 * created for bio_vecs. Saving some memory for bio_clone_fast() users. 1929 */ 1930 struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad) 1931 { 1932 return __bioset_create(pool_size, front_pad, false); 1933 } 1934 EXPORT_SYMBOL(bioset_create_nobvec); 1935 1936 #ifdef CONFIG_BLK_CGROUP 1937 1938 /** 1939 * bio_associate_blkcg - associate a bio with the specified blkcg 1940 * @bio: target bio 1941 * @blkcg_css: css of the blkcg to associate 1942 * 1943 * Associate @bio with the blkcg specified by @blkcg_css. Block layer will 1944 * treat @bio as if it were issued by a task which belongs to the blkcg. 1945 * 1946 * This function takes an extra reference of @blkcg_css which will be put 1947 * when @bio is released. The caller must own @bio and is responsible for 1948 * synchronizing calls to this function. 1949 */ 1950 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) 1951 { 1952 if (unlikely(bio->bi_css)) 1953 return -EBUSY; 1954 css_get(blkcg_css); 1955 bio->bi_css = blkcg_css; 1956 return 0; 1957 } 1958 EXPORT_SYMBOL_GPL(bio_associate_blkcg); 1959 1960 /** 1961 * bio_associate_current - associate a bio with %current 1962 * @bio: target bio 1963 * 1964 * Associate @bio with %current if it hasn't been associated yet. Block 1965 * layer will treat @bio as if it were issued by %current no matter which 1966 * task actually issues it. 1967 * 1968 * This function takes an extra reference of @task's io_context and blkcg 1969 * which will be put when @bio is released. The caller must own @bio, 1970 * ensure %current->io_context exists, and is responsible for synchronizing 1971 * calls to this function. 1972 */ 1973 int bio_associate_current(struct bio *bio) 1974 { 1975 struct io_context *ioc; 1976 1977 if (bio->bi_css) 1978 return -EBUSY; 1979 1980 ioc = current->io_context; 1981 if (!ioc) 1982 return -ENOENT; 1983 1984 get_io_context_active(ioc); 1985 bio->bi_ioc = ioc; 1986 bio->bi_css = task_get_css(current, io_cgrp_id); 1987 return 0; 1988 } 1989 EXPORT_SYMBOL_GPL(bio_associate_current); 1990 1991 /** 1992 * bio_disassociate_task - undo bio_associate_current() 1993 * @bio: target bio 1994 */ 1995 void bio_disassociate_task(struct bio *bio) 1996 { 1997 if (bio->bi_ioc) { 1998 put_io_context(bio->bi_ioc); 1999 bio->bi_ioc = NULL; 2000 } 2001 if (bio->bi_css) { 2002 css_put(bio->bi_css); 2003 bio->bi_css = NULL; 2004 } 2005 } 2006 2007 #endif /* CONFIG_BLK_CGROUP */ 2008 2009 static void __init biovec_init_slabs(void) 2010 { 2011 int i; 2012 2013 for (i = 0; i < BVEC_POOL_NR; i++) { 2014 int size; 2015 struct biovec_slab *bvs = bvec_slabs + i; 2016 2017 if (bvs->nr_vecs <= BIO_INLINE_VECS) { 2018 bvs->slab = NULL; 2019 continue; 2020 } 2021 2022 size = bvs->nr_vecs * sizeof(struct bio_vec); 2023 bvs->slab = kmem_cache_create(bvs->name, size, 0, 2024 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2025 } 2026 } 2027 2028 static int __init init_bio(void) 2029 { 2030 bio_slab_max = 2; 2031 bio_slab_nr = 0; 2032 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); 2033 if (!bio_slabs) 2034 panic("bio: can't allocate bios\n"); 2035 2036 bio_integrity_init(); 2037 biovec_init_slabs(); 2038 2039 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 2040 if (!fs_bio_set) 2041 panic("bio: can't allocate bios\n"); 2042 2043 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) 2044 panic("bio: can't create integrity pool\n"); 2045 2046 return 0; 2047 } 2048 subsys_initcall(init_bio); 2049