1 /* 2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public Licens 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 16 * 17 */ 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/bio.h> 21 #include <linux/blkdev.h> 22 #include <linux/uio.h> 23 #include <linux/iocontext.h> 24 #include <linux/slab.h> 25 #include <linux/init.h> 26 #include <linux/kernel.h> 27 #include <linux/export.h> 28 #include <linux/mempool.h> 29 #include <linux/workqueue.h> 30 #include <linux/cgroup.h> 31 32 #include <trace/events/block.h> 33 34 /* 35 * Test patch to inline a certain number of bi_io_vec's inside the bio 36 * itself, to shrink a bio data allocation from two mempool calls to one 37 */ 38 #define BIO_INLINE_VECS 4 39 40 /* 41 * if you change this list, also change bvec_alloc or things will 42 * break badly! cannot be bigger than what you can fit into an 43 * unsigned short 44 */ 45 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 46 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { 47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 48 }; 49 #undef BV 50 51 /* 52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 53 * IO code that does not need private memory pools. 54 */ 55 struct bio_set *fs_bio_set; 56 EXPORT_SYMBOL(fs_bio_set); 57 58 /* 59 * Our slab pool management 60 */ 61 struct bio_slab { 62 struct kmem_cache *slab; 63 unsigned int slab_ref; 64 unsigned int slab_size; 65 char name[8]; 66 }; 67 static DEFINE_MUTEX(bio_slab_lock); 68 static struct bio_slab *bio_slabs; 69 static unsigned int bio_slab_nr, bio_slab_max; 70 71 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) 72 { 73 unsigned int sz = sizeof(struct bio) + extra_size; 74 struct kmem_cache *slab = NULL; 75 struct bio_slab *bslab, *new_bio_slabs; 76 unsigned int new_bio_slab_max; 77 unsigned int i, entry = -1; 78 79 mutex_lock(&bio_slab_lock); 80 81 i = 0; 82 while (i < bio_slab_nr) { 83 bslab = &bio_slabs[i]; 84 85 if (!bslab->slab && entry == -1) 86 entry = i; 87 else if (bslab->slab_size == sz) { 88 slab = bslab->slab; 89 bslab->slab_ref++; 90 break; 91 } 92 i++; 93 } 94 95 if (slab) 96 goto out_unlock; 97 98 if (bio_slab_nr == bio_slab_max && entry == -1) { 99 new_bio_slab_max = bio_slab_max << 1; 100 new_bio_slabs = krealloc(bio_slabs, 101 new_bio_slab_max * sizeof(struct bio_slab), 102 GFP_KERNEL); 103 if (!new_bio_slabs) 104 goto out_unlock; 105 bio_slab_max = new_bio_slab_max; 106 bio_slabs = new_bio_slabs; 107 } 108 if (entry == -1) 109 entry = bio_slab_nr++; 110 111 bslab = &bio_slabs[entry]; 112 113 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, 115 SLAB_HWCACHE_ALIGN, NULL); 116 if (!slab) 117 goto out_unlock; 118 119 bslab->slab = slab; 120 bslab->slab_ref = 1; 121 bslab->slab_size = sz; 122 out_unlock: 123 mutex_unlock(&bio_slab_lock); 124 return slab; 125 } 126 127 static void bio_put_slab(struct bio_set *bs) 128 { 129 struct bio_slab *bslab = NULL; 130 unsigned int i; 131 132 mutex_lock(&bio_slab_lock); 133 134 for (i = 0; i < bio_slab_nr; i++) { 135 if (bs->bio_slab == bio_slabs[i].slab) { 136 bslab = &bio_slabs[i]; 137 break; 138 } 139 } 140 141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 142 goto out; 143 144 WARN_ON(!bslab->slab_ref); 145 146 if (--bslab->slab_ref) 147 goto out; 148 149 kmem_cache_destroy(bslab->slab); 150 bslab->slab = NULL; 151 152 out: 153 mutex_unlock(&bio_slab_lock); 154 } 155 156 unsigned int bvec_nr_vecs(unsigned short idx) 157 { 158 return bvec_slabs[idx].nr_vecs; 159 } 160 161 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) 162 { 163 if (!idx) 164 return; 165 idx--; 166 167 BIO_BUG_ON(idx >= BVEC_POOL_NR); 168 169 if (idx == BVEC_POOL_MAX) { 170 mempool_free(bv, pool); 171 } else { 172 struct biovec_slab *bvs = bvec_slabs + idx; 173 174 kmem_cache_free(bvs->slab, bv); 175 } 176 } 177 178 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, 179 mempool_t *pool) 180 { 181 struct bio_vec *bvl; 182 183 /* 184 * see comment near bvec_array define! 185 */ 186 switch (nr) { 187 case 1: 188 *idx = 0; 189 break; 190 case 2 ... 4: 191 *idx = 1; 192 break; 193 case 5 ... 16: 194 *idx = 2; 195 break; 196 case 17 ... 64: 197 *idx = 3; 198 break; 199 case 65 ... 128: 200 *idx = 4; 201 break; 202 case 129 ... BIO_MAX_PAGES: 203 *idx = 5; 204 break; 205 default: 206 return NULL; 207 } 208 209 /* 210 * idx now points to the pool we want to allocate from. only the 211 * 1-vec entry pool is mempool backed. 212 */ 213 if (*idx == BVEC_POOL_MAX) { 214 fallback: 215 bvl = mempool_alloc(pool, gfp_mask); 216 } else { 217 struct biovec_slab *bvs = bvec_slabs + *idx; 218 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); 219 220 /* 221 * Make this allocation restricted and don't dump info on 222 * allocation failures, since we'll fallback to the mempool 223 * in case of failure. 224 */ 225 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 226 227 /* 228 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM 229 * is set, retry with the 1-entry mempool 230 */ 231 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); 232 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { 233 *idx = BVEC_POOL_MAX; 234 goto fallback; 235 } 236 } 237 238 (*idx)++; 239 return bvl; 240 } 241 242 static void __bio_free(struct bio *bio) 243 { 244 bio_disassociate_task(bio); 245 246 if (bio_integrity(bio)) 247 bio_integrity_free(bio); 248 } 249 250 static void bio_free(struct bio *bio) 251 { 252 struct bio_set *bs = bio->bi_pool; 253 void *p; 254 255 __bio_free(bio); 256 257 if (bs) { 258 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); 259 260 /* 261 * If we have front padding, adjust the bio pointer before freeing 262 */ 263 p = bio; 264 p -= bs->front_pad; 265 266 mempool_free(p, bs->bio_pool); 267 } else { 268 /* Bio was allocated by bio_kmalloc() */ 269 kfree(bio); 270 } 271 } 272 273 void bio_init(struct bio *bio) 274 { 275 memset(bio, 0, sizeof(*bio)); 276 atomic_set(&bio->__bi_remaining, 1); 277 atomic_set(&bio->__bi_cnt, 1); 278 } 279 EXPORT_SYMBOL(bio_init); 280 281 /** 282 * bio_reset - reinitialize a bio 283 * @bio: bio to reset 284 * 285 * Description: 286 * After calling bio_reset(), @bio will be in the same state as a freshly 287 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 288 * preserved are the ones that are initialized by bio_alloc_bioset(). See 289 * comment in struct bio. 290 */ 291 void bio_reset(struct bio *bio) 292 { 293 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 294 295 __bio_free(bio); 296 297 memset(bio, 0, BIO_RESET_BYTES); 298 bio->bi_flags = flags; 299 atomic_set(&bio->__bi_remaining, 1); 300 } 301 EXPORT_SYMBOL(bio_reset); 302 303 static struct bio *__bio_chain_endio(struct bio *bio) 304 { 305 struct bio *parent = bio->bi_private; 306 307 if (!parent->bi_error) 308 parent->bi_error = bio->bi_error; 309 bio_put(bio); 310 return parent; 311 } 312 313 static void bio_chain_endio(struct bio *bio) 314 { 315 bio_endio(__bio_chain_endio(bio)); 316 } 317 318 /** 319 * bio_chain - chain bio completions 320 * @bio: the target bio 321 * @parent: the @bio's parent bio 322 * 323 * The caller won't have a bi_end_io called when @bio completes - instead, 324 * @parent's bi_end_io won't be called until both @parent and @bio have 325 * completed; the chained bio will also be freed when it completes. 326 * 327 * The caller must not set bi_private or bi_end_io in @bio. 328 */ 329 void bio_chain(struct bio *bio, struct bio *parent) 330 { 331 BUG_ON(bio->bi_private || bio->bi_end_io); 332 333 bio->bi_private = parent; 334 bio->bi_end_io = bio_chain_endio; 335 bio_inc_remaining(parent); 336 } 337 EXPORT_SYMBOL(bio_chain); 338 339 static void bio_alloc_rescue(struct work_struct *work) 340 { 341 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 342 struct bio *bio; 343 344 while (1) { 345 spin_lock(&bs->rescue_lock); 346 bio = bio_list_pop(&bs->rescue_list); 347 spin_unlock(&bs->rescue_lock); 348 349 if (!bio) 350 break; 351 352 generic_make_request(bio); 353 } 354 } 355 356 static void punt_bios_to_rescuer(struct bio_set *bs) 357 { 358 struct bio_list punt, nopunt; 359 struct bio *bio; 360 361 /* 362 * In order to guarantee forward progress we must punt only bios that 363 * were allocated from this bio_set; otherwise, if there was a bio on 364 * there for a stacking driver higher up in the stack, processing it 365 * could require allocating bios from this bio_set, and doing that from 366 * our own rescuer would be bad. 367 * 368 * Since bio lists are singly linked, pop them all instead of trying to 369 * remove from the middle of the list: 370 */ 371 372 bio_list_init(&punt); 373 bio_list_init(&nopunt); 374 375 while ((bio = bio_list_pop(current->bio_list))) 376 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 377 378 *current->bio_list = nopunt; 379 380 spin_lock(&bs->rescue_lock); 381 bio_list_merge(&bs->rescue_list, &punt); 382 spin_unlock(&bs->rescue_lock); 383 384 queue_work(bs->rescue_workqueue, &bs->rescue_work); 385 } 386 387 /** 388 * bio_alloc_bioset - allocate a bio for I/O 389 * @gfp_mask: the GFP_ mask given to the slab allocator 390 * @nr_iovecs: number of iovecs to pre-allocate 391 * @bs: the bio_set to allocate from. 392 * 393 * Description: 394 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is 395 * backed by the @bs's mempool. 396 * 397 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will 398 * always be able to allocate a bio. This is due to the mempool guarantees. 399 * To make this work, callers must never allocate more than 1 bio at a time 400 * from this pool. Callers that need to allocate more than 1 bio must always 401 * submit the previously allocated bio for IO before attempting to allocate 402 * a new one. Failure to do so can cause deadlocks under memory pressure. 403 * 404 * Note that when running under generic_make_request() (i.e. any block 405 * driver), bios are not submitted until after you return - see the code in 406 * generic_make_request() that converts recursion into iteration, to prevent 407 * stack overflows. 408 * 409 * This would normally mean allocating multiple bios under 410 * generic_make_request() would be susceptible to deadlocks, but we have 411 * deadlock avoidance code that resubmits any blocked bios from a rescuer 412 * thread. 413 * 414 * However, we do not guarantee forward progress for allocations from other 415 * mempools. Doing multiple allocations from the same mempool under 416 * generic_make_request() should be avoided - instead, use bio_set's front_pad 417 * for per bio allocations. 418 * 419 * RETURNS: 420 * Pointer to new bio on success, NULL on failure. 421 */ 422 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 423 { 424 gfp_t saved_gfp = gfp_mask; 425 unsigned front_pad; 426 unsigned inline_vecs; 427 struct bio_vec *bvl = NULL; 428 struct bio *bio; 429 void *p; 430 431 if (!bs) { 432 if (nr_iovecs > UIO_MAXIOV) 433 return NULL; 434 435 p = kmalloc(sizeof(struct bio) + 436 nr_iovecs * sizeof(struct bio_vec), 437 gfp_mask); 438 front_pad = 0; 439 inline_vecs = nr_iovecs; 440 } else { 441 /* should not use nobvec bioset for nr_iovecs > 0 */ 442 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0)) 443 return NULL; 444 /* 445 * generic_make_request() converts recursion to iteration; this 446 * means if we're running beneath it, any bios we allocate and 447 * submit will not be submitted (and thus freed) until after we 448 * return. 449 * 450 * This exposes us to a potential deadlock if we allocate 451 * multiple bios from the same bio_set() while running 452 * underneath generic_make_request(). If we were to allocate 453 * multiple bios (say a stacking block driver that was splitting 454 * bios), we would deadlock if we exhausted the mempool's 455 * reserve. 456 * 457 * We solve this, and guarantee forward progress, with a rescuer 458 * workqueue per bio_set. If we go to allocate and there are 459 * bios on current->bio_list, we first try the allocation 460 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those 461 * bios we would be blocking to the rescuer workqueue before 462 * we retry with the original gfp_flags. 463 */ 464 465 if (current->bio_list && !bio_list_empty(current->bio_list)) 466 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 467 468 p = mempool_alloc(bs->bio_pool, gfp_mask); 469 if (!p && gfp_mask != saved_gfp) { 470 punt_bios_to_rescuer(bs); 471 gfp_mask = saved_gfp; 472 p = mempool_alloc(bs->bio_pool, gfp_mask); 473 } 474 475 front_pad = bs->front_pad; 476 inline_vecs = BIO_INLINE_VECS; 477 } 478 479 if (unlikely(!p)) 480 return NULL; 481 482 bio = p + front_pad; 483 bio_init(bio); 484 485 if (nr_iovecs > inline_vecs) { 486 unsigned long idx = 0; 487 488 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 489 if (!bvl && gfp_mask != saved_gfp) { 490 punt_bios_to_rescuer(bs); 491 gfp_mask = saved_gfp; 492 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 493 } 494 495 if (unlikely(!bvl)) 496 goto err_free; 497 498 bio->bi_flags |= idx << BVEC_POOL_OFFSET; 499 } else if (nr_iovecs) { 500 bvl = bio->bi_inline_vecs; 501 } 502 503 bio->bi_pool = bs; 504 bio->bi_max_vecs = nr_iovecs; 505 bio->bi_io_vec = bvl; 506 return bio; 507 508 err_free: 509 mempool_free(p, bs->bio_pool); 510 return NULL; 511 } 512 EXPORT_SYMBOL(bio_alloc_bioset); 513 514 void zero_fill_bio(struct bio *bio) 515 { 516 unsigned long flags; 517 struct bio_vec bv; 518 struct bvec_iter iter; 519 520 bio_for_each_segment(bv, bio, iter) { 521 char *data = bvec_kmap_irq(&bv, &flags); 522 memset(data, 0, bv.bv_len); 523 flush_dcache_page(bv.bv_page); 524 bvec_kunmap_irq(data, &flags); 525 } 526 } 527 EXPORT_SYMBOL(zero_fill_bio); 528 529 /** 530 * bio_put - release a reference to a bio 531 * @bio: bio to release reference to 532 * 533 * Description: 534 * Put a reference to a &struct bio, either one you have gotten with 535 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. 536 **/ 537 void bio_put(struct bio *bio) 538 { 539 if (!bio_flagged(bio, BIO_REFFED)) 540 bio_free(bio); 541 else { 542 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); 543 544 /* 545 * last put frees it 546 */ 547 if (atomic_dec_and_test(&bio->__bi_cnt)) 548 bio_free(bio); 549 } 550 } 551 EXPORT_SYMBOL(bio_put); 552 553 inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 554 { 555 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 556 blk_recount_segments(q, bio); 557 558 return bio->bi_phys_segments; 559 } 560 EXPORT_SYMBOL(bio_phys_segments); 561 562 /** 563 * __bio_clone_fast - clone a bio that shares the original bio's biovec 564 * @bio: destination bio 565 * @bio_src: bio to clone 566 * 567 * Clone a &bio. Caller will own the returned bio, but not 568 * the actual data it points to. Reference count of returned 569 * bio will be one. 570 * 571 * Caller must ensure that @bio_src is not freed before @bio. 572 */ 573 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) 574 { 575 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 576 577 /* 578 * most users will be overriding ->bi_bdev with a new target, 579 * so we don't set nor calculate new physical/hw segment counts here 580 */ 581 bio->bi_bdev = bio_src->bi_bdev; 582 bio_set_flag(bio, BIO_CLONED); 583 bio->bi_opf = bio_src->bi_opf; 584 bio->bi_iter = bio_src->bi_iter; 585 bio->bi_io_vec = bio_src->bi_io_vec; 586 587 bio_clone_blkcg_association(bio, bio_src); 588 } 589 EXPORT_SYMBOL(__bio_clone_fast); 590 591 /** 592 * bio_clone_fast - clone a bio that shares the original bio's biovec 593 * @bio: bio to clone 594 * @gfp_mask: allocation priority 595 * @bs: bio_set to allocate from 596 * 597 * Like __bio_clone_fast, only also allocates the returned bio 598 */ 599 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) 600 { 601 struct bio *b; 602 603 b = bio_alloc_bioset(gfp_mask, 0, bs); 604 if (!b) 605 return NULL; 606 607 __bio_clone_fast(b, bio); 608 609 if (bio_integrity(bio)) { 610 int ret; 611 612 ret = bio_integrity_clone(b, bio, gfp_mask); 613 614 if (ret < 0) { 615 bio_put(b); 616 return NULL; 617 } 618 } 619 620 return b; 621 } 622 EXPORT_SYMBOL(bio_clone_fast); 623 624 /** 625 * bio_clone_bioset - clone a bio 626 * @bio_src: bio to clone 627 * @gfp_mask: allocation priority 628 * @bs: bio_set to allocate from 629 * 630 * Clone bio. Caller will own the returned bio, but not the actual data it 631 * points to. Reference count of returned bio will be one. 632 */ 633 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, 634 struct bio_set *bs) 635 { 636 struct bvec_iter iter; 637 struct bio_vec bv; 638 struct bio *bio; 639 640 /* 641 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from 642 * bio_src->bi_io_vec to bio->bi_io_vec. 643 * 644 * We can't do that anymore, because: 645 * 646 * - The point of cloning the biovec is to produce a bio with a biovec 647 * the caller can modify: bi_idx and bi_bvec_done should be 0. 648 * 649 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if 650 * we tried to clone the whole thing bio_alloc_bioset() would fail. 651 * But the clone should succeed as long as the number of biovecs we 652 * actually need to allocate is fewer than BIO_MAX_PAGES. 653 * 654 * - Lastly, bi_vcnt should not be looked at or relied upon by code 655 * that does not own the bio - reason being drivers don't use it for 656 * iterating over the biovec anymore, so expecting it to be kept up 657 * to date (i.e. for clones that share the parent biovec) is just 658 * asking for trouble and would force extra work on 659 * __bio_clone_fast() anyways. 660 */ 661 662 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); 663 if (!bio) 664 return NULL; 665 bio->bi_bdev = bio_src->bi_bdev; 666 bio->bi_opf = bio_src->bi_opf; 667 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 668 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 669 670 switch (bio_op(bio)) { 671 case REQ_OP_DISCARD: 672 case REQ_OP_SECURE_ERASE: 673 break; 674 case REQ_OP_WRITE_SAME: 675 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; 676 break; 677 default: 678 bio_for_each_segment(bv, bio_src, iter) 679 bio->bi_io_vec[bio->bi_vcnt++] = bv; 680 break; 681 } 682 683 if (bio_integrity(bio_src)) { 684 int ret; 685 686 ret = bio_integrity_clone(bio, bio_src, gfp_mask); 687 if (ret < 0) { 688 bio_put(bio); 689 return NULL; 690 } 691 } 692 693 bio_clone_blkcg_association(bio, bio_src); 694 695 return bio; 696 } 697 EXPORT_SYMBOL(bio_clone_bioset); 698 699 /** 700 * bio_add_pc_page - attempt to add page to bio 701 * @q: the target queue 702 * @bio: destination bio 703 * @page: page to add 704 * @len: vec entry length 705 * @offset: vec entry offset 706 * 707 * Attempt to add a page to the bio_vec maplist. This can fail for a 708 * number of reasons, such as the bio being full or target block device 709 * limitations. The target block device must allow bio's up to PAGE_SIZE, 710 * so it is always possible to add a single page to an empty bio. 711 * 712 * This should only be used by REQ_PC bios. 713 */ 714 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page 715 *page, unsigned int len, unsigned int offset) 716 { 717 int retried_segments = 0; 718 struct bio_vec *bvec; 719 720 /* 721 * cloned bio must not modify vec list 722 */ 723 if (unlikely(bio_flagged(bio, BIO_CLONED))) 724 return 0; 725 726 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) 727 return 0; 728 729 /* 730 * For filesystems with a blocksize smaller than the pagesize 731 * we will often be called with the same page as last time and 732 * a consecutive offset. Optimize this special case. 733 */ 734 if (bio->bi_vcnt > 0) { 735 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 736 737 if (page == prev->bv_page && 738 offset == prev->bv_offset + prev->bv_len) { 739 prev->bv_len += len; 740 bio->bi_iter.bi_size += len; 741 goto done; 742 } 743 744 /* 745 * If the queue doesn't support SG gaps and adding this 746 * offset would create a gap, disallow it. 747 */ 748 if (bvec_gap_to_prev(q, prev, offset)) 749 return 0; 750 } 751 752 if (bio->bi_vcnt >= bio->bi_max_vecs) 753 return 0; 754 755 /* 756 * setup the new entry, we might clear it again later if we 757 * cannot add the page 758 */ 759 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 760 bvec->bv_page = page; 761 bvec->bv_len = len; 762 bvec->bv_offset = offset; 763 bio->bi_vcnt++; 764 bio->bi_phys_segments++; 765 bio->bi_iter.bi_size += len; 766 767 /* 768 * Perform a recount if the number of segments is greater 769 * than queue_max_segments(q). 770 */ 771 772 while (bio->bi_phys_segments > queue_max_segments(q)) { 773 774 if (retried_segments) 775 goto failed; 776 777 retried_segments = 1; 778 blk_recount_segments(q, bio); 779 } 780 781 /* If we may be able to merge these biovecs, force a recount */ 782 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) 783 bio_clear_flag(bio, BIO_SEG_VALID); 784 785 done: 786 return len; 787 788 failed: 789 bvec->bv_page = NULL; 790 bvec->bv_len = 0; 791 bvec->bv_offset = 0; 792 bio->bi_vcnt--; 793 bio->bi_iter.bi_size -= len; 794 blk_recount_segments(q, bio); 795 return 0; 796 } 797 EXPORT_SYMBOL(bio_add_pc_page); 798 799 /** 800 * bio_add_page - attempt to add page to bio 801 * @bio: destination bio 802 * @page: page to add 803 * @len: vec entry length 804 * @offset: vec entry offset 805 * 806 * Attempt to add a page to the bio_vec maplist. This will only fail 807 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 808 */ 809 int bio_add_page(struct bio *bio, struct page *page, 810 unsigned int len, unsigned int offset) 811 { 812 struct bio_vec *bv; 813 814 /* 815 * cloned bio must not modify vec list 816 */ 817 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 818 return 0; 819 820 /* 821 * For filesystems with a blocksize smaller than the pagesize 822 * we will often be called with the same page as last time and 823 * a consecutive offset. Optimize this special case. 824 */ 825 if (bio->bi_vcnt > 0) { 826 bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 827 828 if (page == bv->bv_page && 829 offset == bv->bv_offset + bv->bv_len) { 830 bv->bv_len += len; 831 goto done; 832 } 833 } 834 835 if (bio->bi_vcnt >= bio->bi_max_vecs) 836 return 0; 837 838 bv = &bio->bi_io_vec[bio->bi_vcnt]; 839 bv->bv_page = page; 840 bv->bv_len = len; 841 bv->bv_offset = offset; 842 843 bio->bi_vcnt++; 844 done: 845 bio->bi_iter.bi_size += len; 846 return len; 847 } 848 EXPORT_SYMBOL(bio_add_page); 849 850 struct submit_bio_ret { 851 struct completion event; 852 int error; 853 }; 854 855 static void submit_bio_wait_endio(struct bio *bio) 856 { 857 struct submit_bio_ret *ret = bio->bi_private; 858 859 ret->error = bio->bi_error; 860 complete(&ret->event); 861 } 862 863 /** 864 * submit_bio_wait - submit a bio, and wait until it completes 865 * @bio: The &struct bio which describes the I/O 866 * 867 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 868 * bio_endio() on failure. 869 */ 870 int submit_bio_wait(struct bio *bio) 871 { 872 struct submit_bio_ret ret; 873 874 init_completion(&ret.event); 875 bio->bi_private = &ret; 876 bio->bi_end_io = submit_bio_wait_endio; 877 bio->bi_opf |= REQ_SYNC; 878 submit_bio(bio); 879 wait_for_completion_io(&ret.event); 880 881 return ret.error; 882 } 883 EXPORT_SYMBOL(submit_bio_wait); 884 885 /** 886 * bio_advance - increment/complete a bio by some number of bytes 887 * @bio: bio to advance 888 * @bytes: number of bytes to complete 889 * 890 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 891 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 892 * be updated on the last bvec as well. 893 * 894 * @bio will then represent the remaining, uncompleted portion of the io. 895 */ 896 void bio_advance(struct bio *bio, unsigned bytes) 897 { 898 if (bio_integrity(bio)) 899 bio_integrity_advance(bio, bytes); 900 901 bio_advance_iter(bio, &bio->bi_iter, bytes); 902 } 903 EXPORT_SYMBOL(bio_advance); 904 905 /** 906 * bio_alloc_pages - allocates a single page for each bvec in a bio 907 * @bio: bio to allocate pages for 908 * @gfp_mask: flags for allocation 909 * 910 * Allocates pages up to @bio->bi_vcnt. 911 * 912 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are 913 * freed. 914 */ 915 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) 916 { 917 int i; 918 struct bio_vec *bv; 919 920 bio_for_each_segment_all(bv, bio, i) { 921 bv->bv_page = alloc_page(gfp_mask); 922 if (!bv->bv_page) { 923 while (--bv >= bio->bi_io_vec) 924 __free_page(bv->bv_page); 925 return -ENOMEM; 926 } 927 } 928 929 return 0; 930 } 931 EXPORT_SYMBOL(bio_alloc_pages); 932 933 /** 934 * bio_copy_data - copy contents of data buffers from one chain of bios to 935 * another 936 * @src: source bio list 937 * @dst: destination bio list 938 * 939 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats 940 * @src and @dst as linked lists of bios. 941 * 942 * Stops when it reaches the end of either @src or @dst - that is, copies 943 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 944 */ 945 void bio_copy_data(struct bio *dst, struct bio *src) 946 { 947 struct bvec_iter src_iter, dst_iter; 948 struct bio_vec src_bv, dst_bv; 949 void *src_p, *dst_p; 950 unsigned bytes; 951 952 src_iter = src->bi_iter; 953 dst_iter = dst->bi_iter; 954 955 while (1) { 956 if (!src_iter.bi_size) { 957 src = src->bi_next; 958 if (!src) 959 break; 960 961 src_iter = src->bi_iter; 962 } 963 964 if (!dst_iter.bi_size) { 965 dst = dst->bi_next; 966 if (!dst) 967 break; 968 969 dst_iter = dst->bi_iter; 970 } 971 972 src_bv = bio_iter_iovec(src, src_iter); 973 dst_bv = bio_iter_iovec(dst, dst_iter); 974 975 bytes = min(src_bv.bv_len, dst_bv.bv_len); 976 977 src_p = kmap_atomic(src_bv.bv_page); 978 dst_p = kmap_atomic(dst_bv.bv_page); 979 980 memcpy(dst_p + dst_bv.bv_offset, 981 src_p + src_bv.bv_offset, 982 bytes); 983 984 kunmap_atomic(dst_p); 985 kunmap_atomic(src_p); 986 987 bio_advance_iter(src, &src_iter, bytes); 988 bio_advance_iter(dst, &dst_iter, bytes); 989 } 990 } 991 EXPORT_SYMBOL(bio_copy_data); 992 993 struct bio_map_data { 994 int is_our_pages; 995 struct iov_iter iter; 996 struct iovec iov[]; 997 }; 998 999 static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, 1000 gfp_t gfp_mask) 1001 { 1002 if (iov_count > UIO_MAXIOV) 1003 return NULL; 1004 1005 return kmalloc(sizeof(struct bio_map_data) + 1006 sizeof(struct iovec) * iov_count, gfp_mask); 1007 } 1008 1009 /** 1010 * bio_copy_from_iter - copy all pages from iov_iter to bio 1011 * @bio: The &struct bio which describes the I/O as destination 1012 * @iter: iov_iter as source 1013 * 1014 * Copy all pages from iov_iter to bio. 1015 * Returns 0 on success, or error on failure. 1016 */ 1017 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) 1018 { 1019 int i; 1020 struct bio_vec *bvec; 1021 1022 bio_for_each_segment_all(bvec, bio, i) { 1023 ssize_t ret; 1024 1025 ret = copy_page_from_iter(bvec->bv_page, 1026 bvec->bv_offset, 1027 bvec->bv_len, 1028 &iter); 1029 1030 if (!iov_iter_count(&iter)) 1031 break; 1032 1033 if (ret < bvec->bv_len) 1034 return -EFAULT; 1035 } 1036 1037 return 0; 1038 } 1039 1040 /** 1041 * bio_copy_to_iter - copy all pages from bio to iov_iter 1042 * @bio: The &struct bio which describes the I/O as source 1043 * @iter: iov_iter as destination 1044 * 1045 * Copy all pages from bio to iov_iter. 1046 * Returns 0 on success, or error on failure. 1047 */ 1048 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 1049 { 1050 int i; 1051 struct bio_vec *bvec; 1052 1053 bio_for_each_segment_all(bvec, bio, i) { 1054 ssize_t ret; 1055 1056 ret = copy_page_to_iter(bvec->bv_page, 1057 bvec->bv_offset, 1058 bvec->bv_len, 1059 &iter); 1060 1061 if (!iov_iter_count(&iter)) 1062 break; 1063 1064 if (ret < bvec->bv_len) 1065 return -EFAULT; 1066 } 1067 1068 return 0; 1069 } 1070 1071 static void bio_free_pages(struct bio *bio) 1072 { 1073 struct bio_vec *bvec; 1074 int i; 1075 1076 bio_for_each_segment_all(bvec, bio, i) 1077 __free_page(bvec->bv_page); 1078 } 1079 1080 /** 1081 * bio_uncopy_user - finish previously mapped bio 1082 * @bio: bio being terminated 1083 * 1084 * Free pages allocated from bio_copy_user_iov() and write back data 1085 * to user space in case of a read. 1086 */ 1087 int bio_uncopy_user(struct bio *bio) 1088 { 1089 struct bio_map_data *bmd = bio->bi_private; 1090 int ret = 0; 1091 1092 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1093 /* 1094 * if we're in a workqueue, the request is orphaned, so 1095 * don't copy into a random user address space, just free 1096 * and return -EINTR so user space doesn't expect any data. 1097 */ 1098 if (!current->mm) 1099 ret = -EINTR; 1100 else if (bio_data_dir(bio) == READ) 1101 ret = bio_copy_to_iter(bio, bmd->iter); 1102 if (bmd->is_our_pages) 1103 bio_free_pages(bio); 1104 } 1105 kfree(bmd); 1106 bio_put(bio); 1107 return ret; 1108 } 1109 1110 /** 1111 * bio_copy_user_iov - copy user data to bio 1112 * @q: destination block queue 1113 * @map_data: pointer to the rq_map_data holding pages (if necessary) 1114 * @iter: iovec iterator 1115 * @gfp_mask: memory allocation flags 1116 * 1117 * Prepares and returns a bio for indirect user io, bouncing data 1118 * to/from kernel pages as necessary. Must be paired with 1119 * call bio_uncopy_user() on io completion. 1120 */ 1121 struct bio *bio_copy_user_iov(struct request_queue *q, 1122 struct rq_map_data *map_data, 1123 const struct iov_iter *iter, 1124 gfp_t gfp_mask) 1125 { 1126 struct bio_map_data *bmd; 1127 struct page *page; 1128 struct bio *bio; 1129 int i, ret; 1130 int nr_pages = 0; 1131 unsigned int len = iter->count; 1132 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 1133 1134 for (i = 0; i < iter->nr_segs; i++) { 1135 unsigned long uaddr; 1136 unsigned long end; 1137 unsigned long start; 1138 1139 uaddr = (unsigned long) iter->iov[i].iov_base; 1140 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1) 1141 >> PAGE_SHIFT; 1142 start = uaddr >> PAGE_SHIFT; 1143 1144 /* 1145 * Overflow, abort 1146 */ 1147 if (end < start) 1148 return ERR_PTR(-EINVAL); 1149 1150 nr_pages += end - start; 1151 } 1152 1153 if (offset) 1154 nr_pages++; 1155 1156 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask); 1157 if (!bmd) 1158 return ERR_PTR(-ENOMEM); 1159 1160 /* 1161 * We need to do a deep copy of the iov_iter including the iovecs. 1162 * The caller provided iov might point to an on-stack or otherwise 1163 * shortlived one. 1164 */ 1165 bmd->is_our_pages = map_data ? 0 : 1; 1166 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); 1167 iov_iter_init(&bmd->iter, iter->type, bmd->iov, 1168 iter->nr_segs, iter->count); 1169 1170 ret = -ENOMEM; 1171 bio = bio_kmalloc(gfp_mask, nr_pages); 1172 if (!bio) 1173 goto out_bmd; 1174 1175 if (iter->type & WRITE) 1176 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1177 1178 ret = 0; 1179 1180 if (map_data) { 1181 nr_pages = 1 << map_data->page_order; 1182 i = map_data->offset / PAGE_SIZE; 1183 } 1184 while (len) { 1185 unsigned int bytes = PAGE_SIZE; 1186 1187 bytes -= offset; 1188 1189 if (bytes > len) 1190 bytes = len; 1191 1192 if (map_data) { 1193 if (i == map_data->nr_entries * nr_pages) { 1194 ret = -ENOMEM; 1195 break; 1196 } 1197 1198 page = map_data->pages[i / nr_pages]; 1199 page += (i % nr_pages); 1200 1201 i++; 1202 } else { 1203 page = alloc_page(q->bounce_gfp | gfp_mask); 1204 if (!page) { 1205 ret = -ENOMEM; 1206 break; 1207 } 1208 } 1209 1210 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) 1211 break; 1212 1213 len -= bytes; 1214 offset = 0; 1215 } 1216 1217 if (ret) 1218 goto cleanup; 1219 1220 /* 1221 * success 1222 */ 1223 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || 1224 (map_data && map_data->from_user)) { 1225 ret = bio_copy_from_iter(bio, *iter); 1226 if (ret) 1227 goto cleanup; 1228 } 1229 1230 bio->bi_private = bmd; 1231 return bio; 1232 cleanup: 1233 if (!map_data) 1234 bio_free_pages(bio); 1235 bio_put(bio); 1236 out_bmd: 1237 kfree(bmd); 1238 return ERR_PTR(ret); 1239 } 1240 1241 /** 1242 * bio_map_user_iov - map user iovec into bio 1243 * @q: the struct request_queue for the bio 1244 * @iter: iovec iterator 1245 * @gfp_mask: memory allocation flags 1246 * 1247 * Map the user space address into a bio suitable for io to a block 1248 * device. Returns an error pointer in case of error. 1249 */ 1250 struct bio *bio_map_user_iov(struct request_queue *q, 1251 const struct iov_iter *iter, 1252 gfp_t gfp_mask) 1253 { 1254 int j; 1255 int nr_pages = 0; 1256 struct page **pages; 1257 struct bio *bio; 1258 int cur_page = 0; 1259 int ret, offset; 1260 struct iov_iter i; 1261 struct iovec iov; 1262 1263 iov_for_each(iov, i, *iter) { 1264 unsigned long uaddr = (unsigned long) iov.iov_base; 1265 unsigned long len = iov.iov_len; 1266 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1267 unsigned long start = uaddr >> PAGE_SHIFT; 1268 1269 /* 1270 * Overflow, abort 1271 */ 1272 if (end < start) 1273 return ERR_PTR(-EINVAL); 1274 1275 nr_pages += end - start; 1276 /* 1277 * buffer must be aligned to at least hardsector size for now 1278 */ 1279 if (uaddr & queue_dma_alignment(q)) 1280 return ERR_PTR(-EINVAL); 1281 } 1282 1283 if (!nr_pages) 1284 return ERR_PTR(-EINVAL); 1285 1286 bio = bio_kmalloc(gfp_mask, nr_pages); 1287 if (!bio) 1288 return ERR_PTR(-ENOMEM); 1289 1290 ret = -ENOMEM; 1291 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); 1292 if (!pages) 1293 goto out; 1294 1295 iov_for_each(iov, i, *iter) { 1296 unsigned long uaddr = (unsigned long) iov.iov_base; 1297 unsigned long len = iov.iov_len; 1298 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1299 unsigned long start = uaddr >> PAGE_SHIFT; 1300 const int local_nr_pages = end - start; 1301 const int page_limit = cur_page + local_nr_pages; 1302 1303 ret = get_user_pages_fast(uaddr, local_nr_pages, 1304 (iter->type & WRITE) != WRITE, 1305 &pages[cur_page]); 1306 if (ret < local_nr_pages) { 1307 ret = -EFAULT; 1308 goto out_unmap; 1309 } 1310 1311 offset = offset_in_page(uaddr); 1312 for (j = cur_page; j < page_limit; j++) { 1313 unsigned int bytes = PAGE_SIZE - offset; 1314 1315 if (len <= 0) 1316 break; 1317 1318 if (bytes > len) 1319 bytes = len; 1320 1321 /* 1322 * sorry... 1323 */ 1324 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < 1325 bytes) 1326 break; 1327 1328 len -= bytes; 1329 offset = 0; 1330 } 1331 1332 cur_page = j; 1333 /* 1334 * release the pages we didn't map into the bio, if any 1335 */ 1336 while (j < page_limit) 1337 put_page(pages[j++]); 1338 } 1339 1340 kfree(pages); 1341 1342 /* 1343 * set data direction, and check if mapped pages need bouncing 1344 */ 1345 if (iter->type & WRITE) 1346 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1347 1348 bio_set_flag(bio, BIO_USER_MAPPED); 1349 1350 /* 1351 * subtle -- if __bio_map_user() ended up bouncing a bio, 1352 * it would normally disappear when its bi_end_io is run. 1353 * however, we need it for the unmap, so grab an extra 1354 * reference to it 1355 */ 1356 bio_get(bio); 1357 return bio; 1358 1359 out_unmap: 1360 for (j = 0; j < nr_pages; j++) { 1361 if (!pages[j]) 1362 break; 1363 put_page(pages[j]); 1364 } 1365 out: 1366 kfree(pages); 1367 bio_put(bio); 1368 return ERR_PTR(ret); 1369 } 1370 1371 static void __bio_unmap_user(struct bio *bio) 1372 { 1373 struct bio_vec *bvec; 1374 int i; 1375 1376 /* 1377 * make sure we dirty pages we wrote to 1378 */ 1379 bio_for_each_segment_all(bvec, bio, i) { 1380 if (bio_data_dir(bio) == READ) 1381 set_page_dirty_lock(bvec->bv_page); 1382 1383 put_page(bvec->bv_page); 1384 } 1385 1386 bio_put(bio); 1387 } 1388 1389 /** 1390 * bio_unmap_user - unmap a bio 1391 * @bio: the bio being unmapped 1392 * 1393 * Unmap a bio previously mapped by bio_map_user(). Must be called with 1394 * a process context. 1395 * 1396 * bio_unmap_user() may sleep. 1397 */ 1398 void bio_unmap_user(struct bio *bio) 1399 { 1400 __bio_unmap_user(bio); 1401 bio_put(bio); 1402 } 1403 1404 static void bio_map_kern_endio(struct bio *bio) 1405 { 1406 bio_put(bio); 1407 } 1408 1409 /** 1410 * bio_map_kern - map kernel address into bio 1411 * @q: the struct request_queue for the bio 1412 * @data: pointer to buffer to map 1413 * @len: length in bytes 1414 * @gfp_mask: allocation flags for bio allocation 1415 * 1416 * Map the kernel address into a bio suitable for io to a block 1417 * device. Returns an error pointer in case of error. 1418 */ 1419 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, 1420 gfp_t gfp_mask) 1421 { 1422 unsigned long kaddr = (unsigned long)data; 1423 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1424 unsigned long start = kaddr >> PAGE_SHIFT; 1425 const int nr_pages = end - start; 1426 int offset, i; 1427 struct bio *bio; 1428 1429 bio = bio_kmalloc(gfp_mask, nr_pages); 1430 if (!bio) 1431 return ERR_PTR(-ENOMEM); 1432 1433 offset = offset_in_page(kaddr); 1434 for (i = 0; i < nr_pages; i++) { 1435 unsigned int bytes = PAGE_SIZE - offset; 1436 1437 if (len <= 0) 1438 break; 1439 1440 if (bytes > len) 1441 bytes = len; 1442 1443 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, 1444 offset) < bytes) { 1445 /* we don't support partial mappings */ 1446 bio_put(bio); 1447 return ERR_PTR(-EINVAL); 1448 } 1449 1450 data += bytes; 1451 len -= bytes; 1452 offset = 0; 1453 } 1454 1455 bio->bi_end_io = bio_map_kern_endio; 1456 return bio; 1457 } 1458 EXPORT_SYMBOL(bio_map_kern); 1459 1460 static void bio_copy_kern_endio(struct bio *bio) 1461 { 1462 bio_free_pages(bio); 1463 bio_put(bio); 1464 } 1465 1466 static void bio_copy_kern_endio_read(struct bio *bio) 1467 { 1468 char *p = bio->bi_private; 1469 struct bio_vec *bvec; 1470 int i; 1471 1472 bio_for_each_segment_all(bvec, bio, i) { 1473 memcpy(p, page_address(bvec->bv_page), bvec->bv_len); 1474 p += bvec->bv_len; 1475 } 1476 1477 bio_copy_kern_endio(bio); 1478 } 1479 1480 /** 1481 * bio_copy_kern - copy kernel address into bio 1482 * @q: the struct request_queue for the bio 1483 * @data: pointer to buffer to copy 1484 * @len: length in bytes 1485 * @gfp_mask: allocation flags for bio and page allocation 1486 * @reading: data direction is READ 1487 * 1488 * copy the kernel address into a bio suitable for io to a block 1489 * device. Returns an error pointer in case of error. 1490 */ 1491 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 1492 gfp_t gfp_mask, int reading) 1493 { 1494 unsigned long kaddr = (unsigned long)data; 1495 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1496 unsigned long start = kaddr >> PAGE_SHIFT; 1497 struct bio *bio; 1498 void *p = data; 1499 int nr_pages = 0; 1500 1501 /* 1502 * Overflow, abort 1503 */ 1504 if (end < start) 1505 return ERR_PTR(-EINVAL); 1506 1507 nr_pages = end - start; 1508 bio = bio_kmalloc(gfp_mask, nr_pages); 1509 if (!bio) 1510 return ERR_PTR(-ENOMEM); 1511 1512 while (len) { 1513 struct page *page; 1514 unsigned int bytes = PAGE_SIZE; 1515 1516 if (bytes > len) 1517 bytes = len; 1518 1519 page = alloc_page(q->bounce_gfp | gfp_mask); 1520 if (!page) 1521 goto cleanup; 1522 1523 if (!reading) 1524 memcpy(page_address(page), p, bytes); 1525 1526 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 1527 break; 1528 1529 len -= bytes; 1530 p += bytes; 1531 } 1532 1533 if (reading) { 1534 bio->bi_end_io = bio_copy_kern_endio_read; 1535 bio->bi_private = data; 1536 } else { 1537 bio->bi_end_io = bio_copy_kern_endio; 1538 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1539 } 1540 1541 return bio; 1542 1543 cleanup: 1544 bio_free_pages(bio); 1545 bio_put(bio); 1546 return ERR_PTR(-ENOMEM); 1547 } 1548 1549 /* 1550 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1551 * for performing direct-IO in BIOs. 1552 * 1553 * The problem is that we cannot run set_page_dirty() from interrupt context 1554 * because the required locks are not interrupt-safe. So what we can do is to 1555 * mark the pages dirty _before_ performing IO. And in interrupt context, 1556 * check that the pages are still dirty. If so, fine. If not, redirty them 1557 * in process context. 1558 * 1559 * We special-case compound pages here: normally this means reads into hugetlb 1560 * pages. The logic in here doesn't really work right for compound pages 1561 * because the VM does not uniformly chase down the head page in all cases. 1562 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1563 * handle them at all. So we skip compound pages here at an early stage. 1564 * 1565 * Note that this code is very hard to test under normal circumstances because 1566 * direct-io pins the pages with get_user_pages(). This makes 1567 * is_page_cache_freeable return false, and the VM will not clean the pages. 1568 * But other code (eg, flusher threads) could clean the pages if they are mapped 1569 * pagecache. 1570 * 1571 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1572 * deferred bio dirtying paths. 1573 */ 1574 1575 /* 1576 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1577 */ 1578 void bio_set_pages_dirty(struct bio *bio) 1579 { 1580 struct bio_vec *bvec; 1581 int i; 1582 1583 bio_for_each_segment_all(bvec, bio, i) { 1584 struct page *page = bvec->bv_page; 1585 1586 if (page && !PageCompound(page)) 1587 set_page_dirty_lock(page); 1588 } 1589 } 1590 1591 static void bio_release_pages(struct bio *bio) 1592 { 1593 struct bio_vec *bvec; 1594 int i; 1595 1596 bio_for_each_segment_all(bvec, bio, i) { 1597 struct page *page = bvec->bv_page; 1598 1599 if (page) 1600 put_page(page); 1601 } 1602 } 1603 1604 /* 1605 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1606 * If they are, then fine. If, however, some pages are clean then they must 1607 * have been written out during the direct-IO read. So we take another ref on 1608 * the BIO and the offending pages and re-dirty the pages in process context. 1609 * 1610 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1611 * here on. It will run one put_page() against each page and will run one 1612 * bio_put() against the BIO. 1613 */ 1614 1615 static void bio_dirty_fn(struct work_struct *work); 1616 1617 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1618 static DEFINE_SPINLOCK(bio_dirty_lock); 1619 static struct bio *bio_dirty_list; 1620 1621 /* 1622 * This runs in process context 1623 */ 1624 static void bio_dirty_fn(struct work_struct *work) 1625 { 1626 unsigned long flags; 1627 struct bio *bio; 1628 1629 spin_lock_irqsave(&bio_dirty_lock, flags); 1630 bio = bio_dirty_list; 1631 bio_dirty_list = NULL; 1632 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1633 1634 while (bio) { 1635 struct bio *next = bio->bi_private; 1636 1637 bio_set_pages_dirty(bio); 1638 bio_release_pages(bio); 1639 bio_put(bio); 1640 bio = next; 1641 } 1642 } 1643 1644 void bio_check_pages_dirty(struct bio *bio) 1645 { 1646 struct bio_vec *bvec; 1647 int nr_clean_pages = 0; 1648 int i; 1649 1650 bio_for_each_segment_all(bvec, bio, i) { 1651 struct page *page = bvec->bv_page; 1652 1653 if (PageDirty(page) || PageCompound(page)) { 1654 put_page(page); 1655 bvec->bv_page = NULL; 1656 } else { 1657 nr_clean_pages++; 1658 } 1659 } 1660 1661 if (nr_clean_pages) { 1662 unsigned long flags; 1663 1664 spin_lock_irqsave(&bio_dirty_lock, flags); 1665 bio->bi_private = bio_dirty_list; 1666 bio_dirty_list = bio; 1667 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1668 schedule_work(&bio_dirty_work); 1669 } else { 1670 bio_put(bio); 1671 } 1672 } 1673 1674 void generic_start_io_acct(int rw, unsigned long sectors, 1675 struct hd_struct *part) 1676 { 1677 int cpu = part_stat_lock(); 1678 1679 part_round_stats(cpu, part); 1680 part_stat_inc(cpu, part, ios[rw]); 1681 part_stat_add(cpu, part, sectors[rw], sectors); 1682 part_inc_in_flight(part, rw); 1683 1684 part_stat_unlock(); 1685 } 1686 EXPORT_SYMBOL(generic_start_io_acct); 1687 1688 void generic_end_io_acct(int rw, struct hd_struct *part, 1689 unsigned long start_time) 1690 { 1691 unsigned long duration = jiffies - start_time; 1692 int cpu = part_stat_lock(); 1693 1694 part_stat_add(cpu, part, ticks[rw], duration); 1695 part_round_stats(cpu, part); 1696 part_dec_in_flight(part, rw); 1697 1698 part_stat_unlock(); 1699 } 1700 EXPORT_SYMBOL(generic_end_io_acct); 1701 1702 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1703 void bio_flush_dcache_pages(struct bio *bi) 1704 { 1705 struct bio_vec bvec; 1706 struct bvec_iter iter; 1707 1708 bio_for_each_segment(bvec, bi, iter) 1709 flush_dcache_page(bvec.bv_page); 1710 } 1711 EXPORT_SYMBOL(bio_flush_dcache_pages); 1712 #endif 1713 1714 static inline bool bio_remaining_done(struct bio *bio) 1715 { 1716 /* 1717 * If we're not chaining, then ->__bi_remaining is always 1 and 1718 * we always end io on the first invocation. 1719 */ 1720 if (!bio_flagged(bio, BIO_CHAIN)) 1721 return true; 1722 1723 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1724 1725 if (atomic_dec_and_test(&bio->__bi_remaining)) { 1726 bio_clear_flag(bio, BIO_CHAIN); 1727 return true; 1728 } 1729 1730 return false; 1731 } 1732 1733 /** 1734 * bio_endio - end I/O on a bio 1735 * @bio: bio 1736 * 1737 * Description: 1738 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 1739 * way to end I/O on a bio. No one should call bi_end_io() directly on a 1740 * bio unless they own it and thus know that it has an end_io function. 1741 **/ 1742 void bio_endio(struct bio *bio) 1743 { 1744 again: 1745 if (!bio_remaining_done(bio)) 1746 return; 1747 1748 /* 1749 * Need to have a real endio function for chained bios, otherwise 1750 * various corner cases will break (like stacking block devices that 1751 * save/restore bi_end_io) - however, we want to avoid unbounded 1752 * recursion and blowing the stack. Tail call optimization would 1753 * handle this, but compiling with frame pointers also disables 1754 * gcc's sibling call optimization. 1755 */ 1756 if (bio->bi_end_io == bio_chain_endio) { 1757 bio = __bio_chain_endio(bio); 1758 goto again; 1759 } 1760 1761 if (bio->bi_end_io) 1762 bio->bi_end_io(bio); 1763 } 1764 EXPORT_SYMBOL(bio_endio); 1765 1766 /** 1767 * bio_split - split a bio 1768 * @bio: bio to split 1769 * @sectors: number of sectors to split from the front of @bio 1770 * @gfp: gfp mask 1771 * @bs: bio set to allocate from 1772 * 1773 * Allocates and returns a new bio which represents @sectors from the start of 1774 * @bio, and updates @bio to represent the remaining sectors. 1775 * 1776 * Unless this is a discard request the newly allocated bio will point 1777 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that 1778 * @bio is not freed before the split. 1779 */ 1780 struct bio *bio_split(struct bio *bio, int sectors, 1781 gfp_t gfp, struct bio_set *bs) 1782 { 1783 struct bio *split = NULL; 1784 1785 BUG_ON(sectors <= 0); 1786 BUG_ON(sectors >= bio_sectors(bio)); 1787 1788 /* 1789 * Discards need a mutable bio_vec to accommodate the payload 1790 * required by the DSM TRIM and UNMAP commands. 1791 */ 1792 if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE) 1793 split = bio_clone_bioset(bio, gfp, bs); 1794 else 1795 split = bio_clone_fast(bio, gfp, bs); 1796 1797 if (!split) 1798 return NULL; 1799 1800 split->bi_iter.bi_size = sectors << 9; 1801 1802 if (bio_integrity(split)) 1803 bio_integrity_trim(split, 0, sectors); 1804 1805 bio_advance(bio, split->bi_iter.bi_size); 1806 1807 return split; 1808 } 1809 EXPORT_SYMBOL(bio_split); 1810 1811 /** 1812 * bio_trim - trim a bio 1813 * @bio: bio to trim 1814 * @offset: number of sectors to trim from the front of @bio 1815 * @size: size we want to trim @bio to, in sectors 1816 */ 1817 void bio_trim(struct bio *bio, int offset, int size) 1818 { 1819 /* 'bio' is a cloned bio which we need to trim to match 1820 * the given offset and size. 1821 */ 1822 1823 size <<= 9; 1824 if (offset == 0 && size == bio->bi_iter.bi_size) 1825 return; 1826 1827 bio_clear_flag(bio, BIO_SEG_VALID); 1828 1829 bio_advance(bio, offset << 9); 1830 1831 bio->bi_iter.bi_size = size; 1832 } 1833 EXPORT_SYMBOL_GPL(bio_trim); 1834 1835 /* 1836 * create memory pools for biovec's in a bio_set. 1837 * use the global biovec slabs created for general use. 1838 */ 1839 mempool_t *biovec_create_pool(int pool_entries) 1840 { 1841 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX; 1842 1843 return mempool_create_slab_pool(pool_entries, bp->slab); 1844 } 1845 1846 void bioset_free(struct bio_set *bs) 1847 { 1848 if (bs->rescue_workqueue) 1849 destroy_workqueue(bs->rescue_workqueue); 1850 1851 if (bs->bio_pool) 1852 mempool_destroy(bs->bio_pool); 1853 1854 if (bs->bvec_pool) 1855 mempool_destroy(bs->bvec_pool); 1856 1857 bioset_integrity_free(bs); 1858 bio_put_slab(bs); 1859 1860 kfree(bs); 1861 } 1862 EXPORT_SYMBOL(bioset_free); 1863 1864 static struct bio_set *__bioset_create(unsigned int pool_size, 1865 unsigned int front_pad, 1866 bool create_bvec_pool) 1867 { 1868 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1869 struct bio_set *bs; 1870 1871 bs = kzalloc(sizeof(*bs), GFP_KERNEL); 1872 if (!bs) 1873 return NULL; 1874 1875 bs->front_pad = front_pad; 1876 1877 spin_lock_init(&bs->rescue_lock); 1878 bio_list_init(&bs->rescue_list); 1879 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1880 1881 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); 1882 if (!bs->bio_slab) { 1883 kfree(bs); 1884 return NULL; 1885 } 1886 1887 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); 1888 if (!bs->bio_pool) 1889 goto bad; 1890 1891 if (create_bvec_pool) { 1892 bs->bvec_pool = biovec_create_pool(pool_size); 1893 if (!bs->bvec_pool) 1894 goto bad; 1895 } 1896 1897 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); 1898 if (!bs->rescue_workqueue) 1899 goto bad; 1900 1901 return bs; 1902 bad: 1903 bioset_free(bs); 1904 return NULL; 1905 } 1906 1907 /** 1908 * bioset_create - Create a bio_set 1909 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1910 * @front_pad: Number of bytes to allocate in front of the returned bio 1911 * 1912 * Description: 1913 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1914 * to ask for a number of bytes to be allocated in front of the bio. 1915 * Front pad allocation is useful for embedding the bio inside 1916 * another structure, to avoid allocating extra data to go with the bio. 1917 * Note that the bio must be embedded at the END of that structure always, 1918 * or things will break badly. 1919 */ 1920 struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) 1921 { 1922 return __bioset_create(pool_size, front_pad, true); 1923 } 1924 EXPORT_SYMBOL(bioset_create); 1925 1926 /** 1927 * bioset_create_nobvec - Create a bio_set without bio_vec mempool 1928 * @pool_size: Number of bio to cache in the mempool 1929 * @front_pad: Number of bytes to allocate in front of the returned bio 1930 * 1931 * Description: 1932 * Same functionality as bioset_create() except that mempool is not 1933 * created for bio_vecs. Saving some memory for bio_clone_fast() users. 1934 */ 1935 struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad) 1936 { 1937 return __bioset_create(pool_size, front_pad, false); 1938 } 1939 EXPORT_SYMBOL(bioset_create_nobvec); 1940 1941 #ifdef CONFIG_BLK_CGROUP 1942 1943 /** 1944 * bio_associate_blkcg - associate a bio with the specified blkcg 1945 * @bio: target bio 1946 * @blkcg_css: css of the blkcg to associate 1947 * 1948 * Associate @bio with the blkcg specified by @blkcg_css. Block layer will 1949 * treat @bio as if it were issued by a task which belongs to the blkcg. 1950 * 1951 * This function takes an extra reference of @blkcg_css which will be put 1952 * when @bio is released. The caller must own @bio and is responsible for 1953 * synchronizing calls to this function. 1954 */ 1955 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) 1956 { 1957 if (unlikely(bio->bi_css)) 1958 return -EBUSY; 1959 css_get(blkcg_css); 1960 bio->bi_css = blkcg_css; 1961 return 0; 1962 } 1963 EXPORT_SYMBOL_GPL(bio_associate_blkcg); 1964 1965 /** 1966 * bio_associate_current - associate a bio with %current 1967 * @bio: target bio 1968 * 1969 * Associate @bio with %current if it hasn't been associated yet. Block 1970 * layer will treat @bio as if it were issued by %current no matter which 1971 * task actually issues it. 1972 * 1973 * This function takes an extra reference of @task's io_context and blkcg 1974 * which will be put when @bio is released. The caller must own @bio, 1975 * ensure %current->io_context exists, and is responsible for synchronizing 1976 * calls to this function. 1977 */ 1978 int bio_associate_current(struct bio *bio) 1979 { 1980 struct io_context *ioc; 1981 1982 if (bio->bi_css) 1983 return -EBUSY; 1984 1985 ioc = current->io_context; 1986 if (!ioc) 1987 return -ENOENT; 1988 1989 get_io_context_active(ioc); 1990 bio->bi_ioc = ioc; 1991 bio->bi_css = task_get_css(current, io_cgrp_id); 1992 return 0; 1993 } 1994 EXPORT_SYMBOL_GPL(bio_associate_current); 1995 1996 /** 1997 * bio_disassociate_task - undo bio_associate_current() 1998 * @bio: target bio 1999 */ 2000 void bio_disassociate_task(struct bio *bio) 2001 { 2002 if (bio->bi_ioc) { 2003 put_io_context(bio->bi_ioc); 2004 bio->bi_ioc = NULL; 2005 } 2006 if (bio->bi_css) { 2007 css_put(bio->bi_css); 2008 bio->bi_css = NULL; 2009 } 2010 } 2011 2012 /** 2013 * bio_clone_blkcg_association - clone blkcg association from src to dst bio 2014 * @dst: destination bio 2015 * @src: source bio 2016 */ 2017 void bio_clone_blkcg_association(struct bio *dst, struct bio *src) 2018 { 2019 if (src->bi_css) 2020 WARN_ON(bio_associate_blkcg(dst, src->bi_css)); 2021 } 2022 2023 #endif /* CONFIG_BLK_CGROUP */ 2024 2025 static void __init biovec_init_slabs(void) 2026 { 2027 int i; 2028 2029 for (i = 0; i < BVEC_POOL_NR; i++) { 2030 int size; 2031 struct biovec_slab *bvs = bvec_slabs + i; 2032 2033 if (bvs->nr_vecs <= BIO_INLINE_VECS) { 2034 bvs->slab = NULL; 2035 continue; 2036 } 2037 2038 size = bvs->nr_vecs * sizeof(struct bio_vec); 2039 bvs->slab = kmem_cache_create(bvs->name, size, 0, 2040 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2041 } 2042 } 2043 2044 static int __init init_bio(void) 2045 { 2046 bio_slab_max = 2; 2047 bio_slab_nr = 0; 2048 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); 2049 if (!bio_slabs) 2050 panic("bio: can't allocate bios\n"); 2051 2052 bio_integrity_init(); 2053 biovec_init_slabs(); 2054 2055 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 2056 if (!fs_bio_set) 2057 panic("bio: can't allocate bios\n"); 2058 2059 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) 2060 panic("bio: can't create integrity pool\n"); 2061 2062 return 0; 2063 } 2064 subsys_initcall(init_bio); 2065