1 /* 2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public Licens 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 16 * 17 */ 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/bio.h> 21 #include <linux/blkdev.h> 22 #include <linux/uio.h> 23 #include <linux/iocontext.h> 24 #include <linux/slab.h> 25 #include <linux/init.h> 26 #include <linux/kernel.h> 27 #include <linux/export.h> 28 #include <linux/mempool.h> 29 #include <linux/workqueue.h> 30 #include <linux/cgroup.h> 31 32 #include <trace/events/block.h> 33 34 /* 35 * Test patch to inline a certain number of bi_io_vec's inside the bio 36 * itself, to shrink a bio data allocation from two mempool calls to one 37 */ 38 #define BIO_INLINE_VECS 4 39 40 /* 41 * if you change this list, also change bvec_alloc or things will 42 * break badly! cannot be bigger than what you can fit into an 43 * unsigned short 44 */ 45 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 46 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { 47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 48 }; 49 #undef BV 50 51 /* 52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 53 * IO code that does not need private memory pools. 54 */ 55 struct bio_set *fs_bio_set; 56 EXPORT_SYMBOL(fs_bio_set); 57 58 /* 59 * Our slab pool management 60 */ 61 struct bio_slab { 62 struct kmem_cache *slab; 63 unsigned int slab_ref; 64 unsigned int slab_size; 65 char name[8]; 66 }; 67 static DEFINE_MUTEX(bio_slab_lock); 68 static struct bio_slab *bio_slabs; 69 static unsigned int bio_slab_nr, bio_slab_max; 70 71 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) 72 { 73 unsigned int sz = sizeof(struct bio) + extra_size; 74 struct kmem_cache *slab = NULL; 75 struct bio_slab *bslab, *new_bio_slabs; 76 unsigned int new_bio_slab_max; 77 unsigned int i, entry = -1; 78 79 mutex_lock(&bio_slab_lock); 80 81 i = 0; 82 while (i < bio_slab_nr) { 83 bslab = &bio_slabs[i]; 84 85 if (!bslab->slab && entry == -1) 86 entry = i; 87 else if (bslab->slab_size == sz) { 88 slab = bslab->slab; 89 bslab->slab_ref++; 90 break; 91 } 92 i++; 93 } 94 95 if (slab) 96 goto out_unlock; 97 98 if (bio_slab_nr == bio_slab_max && entry == -1) { 99 new_bio_slab_max = bio_slab_max << 1; 100 new_bio_slabs = krealloc(bio_slabs, 101 new_bio_slab_max * sizeof(struct bio_slab), 102 GFP_KERNEL); 103 if (!new_bio_slabs) 104 goto out_unlock; 105 bio_slab_max = new_bio_slab_max; 106 bio_slabs = new_bio_slabs; 107 } 108 if (entry == -1) 109 entry = bio_slab_nr++; 110 111 bslab = &bio_slabs[entry]; 112 113 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, 115 SLAB_HWCACHE_ALIGN, NULL); 116 if (!slab) 117 goto out_unlock; 118 119 bslab->slab = slab; 120 bslab->slab_ref = 1; 121 bslab->slab_size = sz; 122 out_unlock: 123 mutex_unlock(&bio_slab_lock); 124 return slab; 125 } 126 127 static void bio_put_slab(struct bio_set *bs) 128 { 129 struct bio_slab *bslab = NULL; 130 unsigned int i; 131 132 mutex_lock(&bio_slab_lock); 133 134 for (i = 0; i < bio_slab_nr; i++) { 135 if (bs->bio_slab == bio_slabs[i].slab) { 136 bslab = &bio_slabs[i]; 137 break; 138 } 139 } 140 141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 142 goto out; 143 144 WARN_ON(!bslab->slab_ref); 145 146 if (--bslab->slab_ref) 147 goto out; 148 149 kmem_cache_destroy(bslab->slab); 150 bslab->slab = NULL; 151 152 out: 153 mutex_unlock(&bio_slab_lock); 154 } 155 156 unsigned int bvec_nr_vecs(unsigned short idx) 157 { 158 return bvec_slabs[idx].nr_vecs; 159 } 160 161 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) 162 { 163 if (!idx) 164 return; 165 idx--; 166 167 BIO_BUG_ON(idx >= BVEC_POOL_NR); 168 169 if (idx == BVEC_POOL_MAX) { 170 mempool_free(bv, pool); 171 } else { 172 struct biovec_slab *bvs = bvec_slabs + idx; 173 174 kmem_cache_free(bvs->slab, bv); 175 } 176 } 177 178 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, 179 mempool_t *pool) 180 { 181 struct bio_vec *bvl; 182 183 /* 184 * see comment near bvec_array define! 185 */ 186 switch (nr) { 187 case 1: 188 *idx = 0; 189 break; 190 case 2 ... 4: 191 *idx = 1; 192 break; 193 case 5 ... 16: 194 *idx = 2; 195 break; 196 case 17 ... 64: 197 *idx = 3; 198 break; 199 case 65 ... 128: 200 *idx = 4; 201 break; 202 case 129 ... BIO_MAX_PAGES: 203 *idx = 5; 204 break; 205 default: 206 return NULL; 207 } 208 209 /* 210 * idx now points to the pool we want to allocate from. only the 211 * 1-vec entry pool is mempool backed. 212 */ 213 if (*idx == BVEC_POOL_MAX) { 214 fallback: 215 bvl = mempool_alloc(pool, gfp_mask); 216 } else { 217 struct biovec_slab *bvs = bvec_slabs + *idx; 218 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); 219 220 /* 221 * Make this allocation restricted and don't dump info on 222 * allocation failures, since we'll fallback to the mempool 223 * in case of failure. 224 */ 225 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 226 227 /* 228 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM 229 * is set, retry with the 1-entry mempool 230 */ 231 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); 232 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { 233 *idx = BVEC_POOL_MAX; 234 goto fallback; 235 } 236 } 237 238 (*idx)++; 239 return bvl; 240 } 241 242 static void __bio_free(struct bio *bio) 243 { 244 bio_disassociate_task(bio); 245 246 if (bio_integrity(bio)) 247 bio_integrity_free(bio); 248 } 249 250 static void bio_free(struct bio *bio) 251 { 252 struct bio_set *bs = bio->bi_pool; 253 void *p; 254 255 __bio_free(bio); 256 257 if (bs) { 258 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); 259 260 /* 261 * If we have front padding, adjust the bio pointer before freeing 262 */ 263 p = bio; 264 p -= bs->front_pad; 265 266 mempool_free(p, bs->bio_pool); 267 } else { 268 /* Bio was allocated by bio_kmalloc() */ 269 kfree(bio); 270 } 271 } 272 273 void bio_init(struct bio *bio, struct bio_vec *table, 274 unsigned short max_vecs) 275 { 276 memset(bio, 0, sizeof(*bio)); 277 atomic_set(&bio->__bi_remaining, 1); 278 atomic_set(&bio->__bi_cnt, 1); 279 280 bio->bi_io_vec = table; 281 bio->bi_max_vecs = max_vecs; 282 } 283 EXPORT_SYMBOL(bio_init); 284 285 /** 286 * bio_reset - reinitialize a bio 287 * @bio: bio to reset 288 * 289 * Description: 290 * After calling bio_reset(), @bio will be in the same state as a freshly 291 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 292 * preserved are the ones that are initialized by bio_alloc_bioset(). See 293 * comment in struct bio. 294 */ 295 void bio_reset(struct bio *bio) 296 { 297 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 298 299 __bio_free(bio); 300 301 memset(bio, 0, BIO_RESET_BYTES); 302 bio->bi_flags = flags; 303 atomic_set(&bio->__bi_remaining, 1); 304 } 305 EXPORT_SYMBOL(bio_reset); 306 307 static struct bio *__bio_chain_endio(struct bio *bio) 308 { 309 struct bio *parent = bio->bi_private; 310 311 if (!parent->bi_error) 312 parent->bi_error = bio->bi_error; 313 bio_put(bio); 314 return parent; 315 } 316 317 static void bio_chain_endio(struct bio *bio) 318 { 319 bio_endio(__bio_chain_endio(bio)); 320 } 321 322 /** 323 * bio_chain - chain bio completions 324 * @bio: the target bio 325 * @parent: the @bio's parent bio 326 * 327 * The caller won't have a bi_end_io called when @bio completes - instead, 328 * @parent's bi_end_io won't be called until both @parent and @bio have 329 * completed; the chained bio will also be freed when it completes. 330 * 331 * The caller must not set bi_private or bi_end_io in @bio. 332 */ 333 void bio_chain(struct bio *bio, struct bio *parent) 334 { 335 BUG_ON(bio->bi_private || bio->bi_end_io); 336 337 bio->bi_private = parent; 338 bio->bi_end_io = bio_chain_endio; 339 bio_inc_remaining(parent); 340 } 341 EXPORT_SYMBOL(bio_chain); 342 343 static void bio_alloc_rescue(struct work_struct *work) 344 { 345 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 346 struct bio *bio; 347 348 while (1) { 349 spin_lock(&bs->rescue_lock); 350 bio = bio_list_pop(&bs->rescue_list); 351 spin_unlock(&bs->rescue_lock); 352 353 if (!bio) 354 break; 355 356 generic_make_request(bio); 357 } 358 } 359 360 static void punt_bios_to_rescuer(struct bio_set *bs) 361 { 362 struct bio_list punt, nopunt; 363 struct bio *bio; 364 365 /* 366 * In order to guarantee forward progress we must punt only bios that 367 * were allocated from this bio_set; otherwise, if there was a bio on 368 * there for a stacking driver higher up in the stack, processing it 369 * could require allocating bios from this bio_set, and doing that from 370 * our own rescuer would be bad. 371 * 372 * Since bio lists are singly linked, pop them all instead of trying to 373 * remove from the middle of the list: 374 */ 375 376 bio_list_init(&punt); 377 bio_list_init(&nopunt); 378 379 while ((bio = bio_list_pop(current->bio_list))) 380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 381 382 *current->bio_list = nopunt; 383 384 spin_lock(&bs->rescue_lock); 385 bio_list_merge(&bs->rescue_list, &punt); 386 spin_unlock(&bs->rescue_lock); 387 388 queue_work(bs->rescue_workqueue, &bs->rescue_work); 389 } 390 391 /** 392 * bio_alloc_bioset - allocate a bio for I/O 393 * @gfp_mask: the GFP_ mask given to the slab allocator 394 * @nr_iovecs: number of iovecs to pre-allocate 395 * @bs: the bio_set to allocate from. 396 * 397 * Description: 398 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is 399 * backed by the @bs's mempool. 400 * 401 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will 402 * always be able to allocate a bio. This is due to the mempool guarantees. 403 * To make this work, callers must never allocate more than 1 bio at a time 404 * from this pool. Callers that need to allocate more than 1 bio must always 405 * submit the previously allocated bio for IO before attempting to allocate 406 * a new one. Failure to do so can cause deadlocks under memory pressure. 407 * 408 * Note that when running under generic_make_request() (i.e. any block 409 * driver), bios are not submitted until after you return - see the code in 410 * generic_make_request() that converts recursion into iteration, to prevent 411 * stack overflows. 412 * 413 * This would normally mean allocating multiple bios under 414 * generic_make_request() would be susceptible to deadlocks, but we have 415 * deadlock avoidance code that resubmits any blocked bios from a rescuer 416 * thread. 417 * 418 * However, we do not guarantee forward progress for allocations from other 419 * mempools. Doing multiple allocations from the same mempool under 420 * generic_make_request() should be avoided - instead, use bio_set's front_pad 421 * for per bio allocations. 422 * 423 * RETURNS: 424 * Pointer to new bio on success, NULL on failure. 425 */ 426 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 427 { 428 gfp_t saved_gfp = gfp_mask; 429 unsigned front_pad; 430 unsigned inline_vecs; 431 struct bio_vec *bvl = NULL; 432 struct bio *bio; 433 void *p; 434 435 if (!bs) { 436 if (nr_iovecs > UIO_MAXIOV) 437 return NULL; 438 439 p = kmalloc(sizeof(struct bio) + 440 nr_iovecs * sizeof(struct bio_vec), 441 gfp_mask); 442 front_pad = 0; 443 inline_vecs = nr_iovecs; 444 } else { 445 /* should not use nobvec bioset for nr_iovecs > 0 */ 446 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0)) 447 return NULL; 448 /* 449 * generic_make_request() converts recursion to iteration; this 450 * means if we're running beneath it, any bios we allocate and 451 * submit will not be submitted (and thus freed) until after we 452 * return. 453 * 454 * This exposes us to a potential deadlock if we allocate 455 * multiple bios from the same bio_set() while running 456 * underneath generic_make_request(). If we were to allocate 457 * multiple bios (say a stacking block driver that was splitting 458 * bios), we would deadlock if we exhausted the mempool's 459 * reserve. 460 * 461 * We solve this, and guarantee forward progress, with a rescuer 462 * workqueue per bio_set. If we go to allocate and there are 463 * bios on current->bio_list, we first try the allocation 464 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those 465 * bios we would be blocking to the rescuer workqueue before 466 * we retry with the original gfp_flags. 467 */ 468 469 if (current->bio_list && !bio_list_empty(current->bio_list)) 470 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 471 472 p = mempool_alloc(bs->bio_pool, gfp_mask); 473 if (!p && gfp_mask != saved_gfp) { 474 punt_bios_to_rescuer(bs); 475 gfp_mask = saved_gfp; 476 p = mempool_alloc(bs->bio_pool, gfp_mask); 477 } 478 479 front_pad = bs->front_pad; 480 inline_vecs = BIO_INLINE_VECS; 481 } 482 483 if (unlikely(!p)) 484 return NULL; 485 486 bio = p + front_pad; 487 bio_init(bio, NULL, 0); 488 489 if (nr_iovecs > inline_vecs) { 490 unsigned long idx = 0; 491 492 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 493 if (!bvl && gfp_mask != saved_gfp) { 494 punt_bios_to_rescuer(bs); 495 gfp_mask = saved_gfp; 496 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 497 } 498 499 if (unlikely(!bvl)) 500 goto err_free; 501 502 bio->bi_flags |= idx << BVEC_POOL_OFFSET; 503 } else if (nr_iovecs) { 504 bvl = bio->bi_inline_vecs; 505 } 506 507 bio->bi_pool = bs; 508 bio->bi_max_vecs = nr_iovecs; 509 bio->bi_io_vec = bvl; 510 return bio; 511 512 err_free: 513 mempool_free(p, bs->bio_pool); 514 return NULL; 515 } 516 EXPORT_SYMBOL(bio_alloc_bioset); 517 518 void zero_fill_bio(struct bio *bio) 519 { 520 unsigned long flags; 521 struct bio_vec bv; 522 struct bvec_iter iter; 523 524 bio_for_each_segment(bv, bio, iter) { 525 char *data = bvec_kmap_irq(&bv, &flags); 526 memset(data, 0, bv.bv_len); 527 flush_dcache_page(bv.bv_page); 528 bvec_kunmap_irq(data, &flags); 529 } 530 } 531 EXPORT_SYMBOL(zero_fill_bio); 532 533 /** 534 * bio_put - release a reference to a bio 535 * @bio: bio to release reference to 536 * 537 * Description: 538 * Put a reference to a &struct bio, either one you have gotten with 539 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. 540 **/ 541 void bio_put(struct bio *bio) 542 { 543 if (!bio_flagged(bio, BIO_REFFED)) 544 bio_free(bio); 545 else { 546 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); 547 548 /* 549 * last put frees it 550 */ 551 if (atomic_dec_and_test(&bio->__bi_cnt)) 552 bio_free(bio); 553 } 554 } 555 EXPORT_SYMBOL(bio_put); 556 557 inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 558 { 559 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 560 blk_recount_segments(q, bio); 561 562 return bio->bi_phys_segments; 563 } 564 EXPORT_SYMBOL(bio_phys_segments); 565 566 /** 567 * __bio_clone_fast - clone a bio that shares the original bio's biovec 568 * @bio: destination bio 569 * @bio_src: bio to clone 570 * 571 * Clone a &bio. Caller will own the returned bio, but not 572 * the actual data it points to. Reference count of returned 573 * bio will be one. 574 * 575 * Caller must ensure that @bio_src is not freed before @bio. 576 */ 577 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) 578 { 579 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 580 581 /* 582 * most users will be overriding ->bi_bdev with a new target, 583 * so we don't set nor calculate new physical/hw segment counts here 584 */ 585 bio->bi_bdev = bio_src->bi_bdev; 586 bio_set_flag(bio, BIO_CLONED); 587 bio->bi_opf = bio_src->bi_opf; 588 bio->bi_iter = bio_src->bi_iter; 589 bio->bi_io_vec = bio_src->bi_io_vec; 590 591 bio_clone_blkcg_association(bio, bio_src); 592 } 593 EXPORT_SYMBOL(__bio_clone_fast); 594 595 /** 596 * bio_clone_fast - clone a bio that shares the original bio's biovec 597 * @bio: bio to clone 598 * @gfp_mask: allocation priority 599 * @bs: bio_set to allocate from 600 * 601 * Like __bio_clone_fast, only also allocates the returned bio 602 */ 603 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) 604 { 605 struct bio *b; 606 607 b = bio_alloc_bioset(gfp_mask, 0, bs); 608 if (!b) 609 return NULL; 610 611 __bio_clone_fast(b, bio); 612 613 if (bio_integrity(bio)) { 614 int ret; 615 616 ret = bio_integrity_clone(b, bio, gfp_mask); 617 618 if (ret < 0) { 619 bio_put(b); 620 return NULL; 621 } 622 } 623 624 return b; 625 } 626 EXPORT_SYMBOL(bio_clone_fast); 627 628 static struct bio *__bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, 629 struct bio_set *bs, int offset, 630 int size) 631 { 632 struct bvec_iter iter; 633 struct bio_vec bv; 634 struct bio *bio; 635 struct bvec_iter iter_src = bio_src->bi_iter; 636 637 /* for supporting partial clone */ 638 if (offset || size != bio_src->bi_iter.bi_size) { 639 bio_advance_iter(bio_src, &iter_src, offset); 640 iter_src.bi_size = size; 641 } 642 643 /* 644 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from 645 * bio_src->bi_io_vec to bio->bi_io_vec. 646 * 647 * We can't do that anymore, because: 648 * 649 * - The point of cloning the biovec is to produce a bio with a biovec 650 * the caller can modify: bi_idx and bi_bvec_done should be 0. 651 * 652 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if 653 * we tried to clone the whole thing bio_alloc_bioset() would fail. 654 * But the clone should succeed as long as the number of biovecs we 655 * actually need to allocate is fewer than BIO_MAX_PAGES. 656 * 657 * - Lastly, bi_vcnt should not be looked at or relied upon by code 658 * that does not own the bio - reason being drivers don't use it for 659 * iterating over the biovec anymore, so expecting it to be kept up 660 * to date (i.e. for clones that share the parent biovec) is just 661 * asking for trouble and would force extra work on 662 * __bio_clone_fast() anyways. 663 */ 664 665 bio = bio_alloc_bioset(gfp_mask, __bio_segments(bio_src, 666 &iter_src), bs); 667 if (!bio) 668 return NULL; 669 bio->bi_bdev = bio_src->bi_bdev; 670 bio->bi_opf = bio_src->bi_opf; 671 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 672 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 673 674 switch (bio_op(bio)) { 675 case REQ_OP_DISCARD: 676 case REQ_OP_SECURE_ERASE: 677 case REQ_OP_WRITE_ZEROES: 678 break; 679 case REQ_OP_WRITE_SAME: 680 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; 681 break; 682 default: 683 __bio_for_each_segment(bv, bio_src, iter, iter_src) 684 bio->bi_io_vec[bio->bi_vcnt++] = bv; 685 break; 686 } 687 688 if (bio_integrity(bio_src)) { 689 int ret; 690 691 ret = bio_integrity_clone(bio, bio_src, gfp_mask); 692 if (ret < 0) { 693 bio_put(bio); 694 return NULL; 695 } 696 } 697 698 bio_clone_blkcg_association(bio, bio_src); 699 700 return bio; 701 } 702 703 /** 704 * bio_clone_bioset - clone a bio 705 * @bio_src: bio to clone 706 * @gfp_mask: allocation priority 707 * @bs: bio_set to allocate from 708 * 709 * Clone bio. Caller will own the returned bio, but not the actual data it 710 * points to. Reference count of returned bio will be one. 711 */ 712 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, 713 struct bio_set *bs) 714 { 715 return __bio_clone_bioset(bio_src, gfp_mask, bs, 0, 716 bio_src->bi_iter.bi_size); 717 } 718 EXPORT_SYMBOL(bio_clone_bioset); 719 720 /** 721 * bio_clone_bioset_partial - clone a partial bio 722 * @bio_src: bio to clone 723 * @gfp_mask: allocation priority 724 * @bs: bio_set to allocate from 725 * @offset: cloned starting from the offset 726 * @size: size for the cloned bio 727 * 728 * Clone bio. Caller will own the returned bio, but not the actual data it 729 * points to. Reference count of returned bio will be one. 730 */ 731 struct bio *bio_clone_bioset_partial(struct bio *bio_src, gfp_t gfp_mask, 732 struct bio_set *bs, int offset, 733 int size) 734 { 735 return __bio_clone_bioset(bio_src, gfp_mask, bs, offset, size); 736 } 737 EXPORT_SYMBOL(bio_clone_bioset_partial); 738 739 /** 740 * bio_add_pc_page - attempt to add page to bio 741 * @q: the target queue 742 * @bio: destination bio 743 * @page: page to add 744 * @len: vec entry length 745 * @offset: vec entry offset 746 * 747 * Attempt to add a page to the bio_vec maplist. This can fail for a 748 * number of reasons, such as the bio being full or target block device 749 * limitations. The target block device must allow bio's up to PAGE_SIZE, 750 * so it is always possible to add a single page to an empty bio. 751 * 752 * This should only be used by REQ_PC bios. 753 */ 754 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page 755 *page, unsigned int len, unsigned int offset) 756 { 757 int retried_segments = 0; 758 struct bio_vec *bvec; 759 760 /* 761 * cloned bio must not modify vec list 762 */ 763 if (unlikely(bio_flagged(bio, BIO_CLONED))) 764 return 0; 765 766 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) 767 return 0; 768 769 /* 770 * For filesystems with a blocksize smaller than the pagesize 771 * we will often be called with the same page as last time and 772 * a consecutive offset. Optimize this special case. 773 */ 774 if (bio->bi_vcnt > 0) { 775 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 776 777 if (page == prev->bv_page && 778 offset == prev->bv_offset + prev->bv_len) { 779 prev->bv_len += len; 780 bio->bi_iter.bi_size += len; 781 goto done; 782 } 783 784 /* 785 * If the queue doesn't support SG gaps and adding this 786 * offset would create a gap, disallow it. 787 */ 788 if (bvec_gap_to_prev(q, prev, offset)) 789 return 0; 790 } 791 792 if (bio->bi_vcnt >= bio->bi_max_vecs) 793 return 0; 794 795 /* 796 * setup the new entry, we might clear it again later if we 797 * cannot add the page 798 */ 799 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 800 bvec->bv_page = page; 801 bvec->bv_len = len; 802 bvec->bv_offset = offset; 803 bio->bi_vcnt++; 804 bio->bi_phys_segments++; 805 bio->bi_iter.bi_size += len; 806 807 /* 808 * Perform a recount if the number of segments is greater 809 * than queue_max_segments(q). 810 */ 811 812 while (bio->bi_phys_segments > queue_max_segments(q)) { 813 814 if (retried_segments) 815 goto failed; 816 817 retried_segments = 1; 818 blk_recount_segments(q, bio); 819 } 820 821 /* If we may be able to merge these biovecs, force a recount */ 822 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) 823 bio_clear_flag(bio, BIO_SEG_VALID); 824 825 done: 826 return len; 827 828 failed: 829 bvec->bv_page = NULL; 830 bvec->bv_len = 0; 831 bvec->bv_offset = 0; 832 bio->bi_vcnt--; 833 bio->bi_iter.bi_size -= len; 834 blk_recount_segments(q, bio); 835 return 0; 836 } 837 EXPORT_SYMBOL(bio_add_pc_page); 838 839 /** 840 * bio_add_page - attempt to add page to bio 841 * @bio: destination bio 842 * @page: page to add 843 * @len: vec entry length 844 * @offset: vec entry offset 845 * 846 * Attempt to add a page to the bio_vec maplist. This will only fail 847 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 848 */ 849 int bio_add_page(struct bio *bio, struct page *page, 850 unsigned int len, unsigned int offset) 851 { 852 struct bio_vec *bv; 853 854 /* 855 * cloned bio must not modify vec list 856 */ 857 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 858 return 0; 859 860 /* 861 * For filesystems with a blocksize smaller than the pagesize 862 * we will often be called with the same page as last time and 863 * a consecutive offset. Optimize this special case. 864 */ 865 if (bio->bi_vcnt > 0) { 866 bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 867 868 if (page == bv->bv_page && 869 offset == bv->bv_offset + bv->bv_len) { 870 bv->bv_len += len; 871 goto done; 872 } 873 } 874 875 if (bio->bi_vcnt >= bio->bi_max_vecs) 876 return 0; 877 878 bv = &bio->bi_io_vec[bio->bi_vcnt]; 879 bv->bv_page = page; 880 bv->bv_len = len; 881 bv->bv_offset = offset; 882 883 bio->bi_vcnt++; 884 done: 885 bio->bi_iter.bi_size += len; 886 return len; 887 } 888 EXPORT_SYMBOL(bio_add_page); 889 890 /** 891 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 892 * @bio: bio to add pages to 893 * @iter: iov iterator describing the region to be mapped 894 * 895 * Pins as many pages from *iter and appends them to @bio's bvec array. The 896 * pages will have to be released using put_page() when done. 897 */ 898 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 899 { 900 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 901 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 902 struct page **pages = (struct page **)bv; 903 size_t offset, diff; 904 ssize_t size; 905 906 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 907 if (unlikely(size <= 0)) 908 return size ? size : -EFAULT; 909 nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; 910 911 /* 912 * Deep magic below: We need to walk the pinned pages backwards 913 * because we are abusing the space allocated for the bio_vecs 914 * for the page array. Because the bio_vecs are larger than the 915 * page pointers by definition this will always work. But it also 916 * means we can't use bio_add_page, so any changes to it's semantics 917 * need to be reflected here as well. 918 */ 919 bio->bi_iter.bi_size += size; 920 bio->bi_vcnt += nr_pages; 921 922 diff = (nr_pages * PAGE_SIZE - offset) - size; 923 while (nr_pages--) { 924 bv[nr_pages].bv_page = pages[nr_pages]; 925 bv[nr_pages].bv_len = PAGE_SIZE; 926 bv[nr_pages].bv_offset = 0; 927 } 928 929 bv[0].bv_offset += offset; 930 bv[0].bv_len -= offset; 931 if (diff) 932 bv[bio->bi_vcnt - 1].bv_len -= diff; 933 934 iov_iter_advance(iter, size); 935 return 0; 936 } 937 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); 938 939 struct submit_bio_ret { 940 struct completion event; 941 int error; 942 }; 943 944 static void submit_bio_wait_endio(struct bio *bio) 945 { 946 struct submit_bio_ret *ret = bio->bi_private; 947 948 ret->error = bio->bi_error; 949 complete(&ret->event); 950 } 951 952 /** 953 * submit_bio_wait - submit a bio, and wait until it completes 954 * @bio: The &struct bio which describes the I/O 955 * 956 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 957 * bio_endio() on failure. 958 */ 959 int submit_bio_wait(struct bio *bio) 960 { 961 struct submit_bio_ret ret; 962 963 init_completion(&ret.event); 964 bio->bi_private = &ret; 965 bio->bi_end_io = submit_bio_wait_endio; 966 bio->bi_opf |= REQ_SYNC; 967 submit_bio(bio); 968 wait_for_completion_io(&ret.event); 969 970 return ret.error; 971 } 972 EXPORT_SYMBOL(submit_bio_wait); 973 974 /** 975 * bio_advance - increment/complete a bio by some number of bytes 976 * @bio: bio to advance 977 * @bytes: number of bytes to complete 978 * 979 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 980 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 981 * be updated on the last bvec as well. 982 * 983 * @bio will then represent the remaining, uncompleted portion of the io. 984 */ 985 void bio_advance(struct bio *bio, unsigned bytes) 986 { 987 if (bio_integrity(bio)) 988 bio_integrity_advance(bio, bytes); 989 990 bio_advance_iter(bio, &bio->bi_iter, bytes); 991 } 992 EXPORT_SYMBOL(bio_advance); 993 994 /** 995 * bio_alloc_pages - allocates a single page for each bvec in a bio 996 * @bio: bio to allocate pages for 997 * @gfp_mask: flags for allocation 998 * 999 * Allocates pages up to @bio->bi_vcnt. 1000 * 1001 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are 1002 * freed. 1003 */ 1004 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) 1005 { 1006 int i; 1007 struct bio_vec *bv; 1008 1009 bio_for_each_segment_all(bv, bio, i) { 1010 bv->bv_page = alloc_page(gfp_mask); 1011 if (!bv->bv_page) { 1012 while (--bv >= bio->bi_io_vec) 1013 __free_page(bv->bv_page); 1014 return -ENOMEM; 1015 } 1016 } 1017 1018 return 0; 1019 } 1020 EXPORT_SYMBOL(bio_alloc_pages); 1021 1022 /** 1023 * bio_copy_data - copy contents of data buffers from one chain of bios to 1024 * another 1025 * @src: source bio list 1026 * @dst: destination bio list 1027 * 1028 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats 1029 * @src and @dst as linked lists of bios. 1030 * 1031 * Stops when it reaches the end of either @src or @dst - that is, copies 1032 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 1033 */ 1034 void bio_copy_data(struct bio *dst, struct bio *src) 1035 { 1036 struct bvec_iter src_iter, dst_iter; 1037 struct bio_vec src_bv, dst_bv; 1038 void *src_p, *dst_p; 1039 unsigned bytes; 1040 1041 src_iter = src->bi_iter; 1042 dst_iter = dst->bi_iter; 1043 1044 while (1) { 1045 if (!src_iter.bi_size) { 1046 src = src->bi_next; 1047 if (!src) 1048 break; 1049 1050 src_iter = src->bi_iter; 1051 } 1052 1053 if (!dst_iter.bi_size) { 1054 dst = dst->bi_next; 1055 if (!dst) 1056 break; 1057 1058 dst_iter = dst->bi_iter; 1059 } 1060 1061 src_bv = bio_iter_iovec(src, src_iter); 1062 dst_bv = bio_iter_iovec(dst, dst_iter); 1063 1064 bytes = min(src_bv.bv_len, dst_bv.bv_len); 1065 1066 src_p = kmap_atomic(src_bv.bv_page); 1067 dst_p = kmap_atomic(dst_bv.bv_page); 1068 1069 memcpy(dst_p + dst_bv.bv_offset, 1070 src_p + src_bv.bv_offset, 1071 bytes); 1072 1073 kunmap_atomic(dst_p); 1074 kunmap_atomic(src_p); 1075 1076 bio_advance_iter(src, &src_iter, bytes); 1077 bio_advance_iter(dst, &dst_iter, bytes); 1078 } 1079 } 1080 EXPORT_SYMBOL(bio_copy_data); 1081 1082 struct bio_map_data { 1083 int is_our_pages; 1084 struct iov_iter iter; 1085 struct iovec iov[]; 1086 }; 1087 1088 static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, 1089 gfp_t gfp_mask) 1090 { 1091 if (iov_count > UIO_MAXIOV) 1092 return NULL; 1093 1094 return kmalloc(sizeof(struct bio_map_data) + 1095 sizeof(struct iovec) * iov_count, gfp_mask); 1096 } 1097 1098 /** 1099 * bio_copy_from_iter - copy all pages from iov_iter to bio 1100 * @bio: The &struct bio which describes the I/O as destination 1101 * @iter: iov_iter as source 1102 * 1103 * Copy all pages from iov_iter to bio. 1104 * Returns 0 on success, or error on failure. 1105 */ 1106 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) 1107 { 1108 int i; 1109 struct bio_vec *bvec; 1110 1111 bio_for_each_segment_all(bvec, bio, i) { 1112 ssize_t ret; 1113 1114 ret = copy_page_from_iter(bvec->bv_page, 1115 bvec->bv_offset, 1116 bvec->bv_len, 1117 &iter); 1118 1119 if (!iov_iter_count(&iter)) 1120 break; 1121 1122 if (ret < bvec->bv_len) 1123 return -EFAULT; 1124 } 1125 1126 return 0; 1127 } 1128 1129 /** 1130 * bio_copy_to_iter - copy all pages from bio to iov_iter 1131 * @bio: The &struct bio which describes the I/O as source 1132 * @iter: iov_iter as destination 1133 * 1134 * Copy all pages from bio to iov_iter. 1135 * Returns 0 on success, or error on failure. 1136 */ 1137 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 1138 { 1139 int i; 1140 struct bio_vec *bvec; 1141 1142 bio_for_each_segment_all(bvec, bio, i) { 1143 ssize_t ret; 1144 1145 ret = copy_page_to_iter(bvec->bv_page, 1146 bvec->bv_offset, 1147 bvec->bv_len, 1148 &iter); 1149 1150 if (!iov_iter_count(&iter)) 1151 break; 1152 1153 if (ret < bvec->bv_len) 1154 return -EFAULT; 1155 } 1156 1157 return 0; 1158 } 1159 1160 void bio_free_pages(struct bio *bio) 1161 { 1162 struct bio_vec *bvec; 1163 int i; 1164 1165 bio_for_each_segment_all(bvec, bio, i) 1166 __free_page(bvec->bv_page); 1167 } 1168 EXPORT_SYMBOL(bio_free_pages); 1169 1170 /** 1171 * bio_uncopy_user - finish previously mapped bio 1172 * @bio: bio being terminated 1173 * 1174 * Free pages allocated from bio_copy_user_iov() and write back data 1175 * to user space in case of a read. 1176 */ 1177 int bio_uncopy_user(struct bio *bio) 1178 { 1179 struct bio_map_data *bmd = bio->bi_private; 1180 int ret = 0; 1181 1182 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1183 /* 1184 * if we're in a workqueue, the request is orphaned, so 1185 * don't copy into a random user address space, just free 1186 * and return -EINTR so user space doesn't expect any data. 1187 */ 1188 if (!current->mm) 1189 ret = -EINTR; 1190 else if (bio_data_dir(bio) == READ) 1191 ret = bio_copy_to_iter(bio, bmd->iter); 1192 if (bmd->is_our_pages) 1193 bio_free_pages(bio); 1194 } 1195 kfree(bmd); 1196 bio_put(bio); 1197 return ret; 1198 } 1199 1200 /** 1201 * bio_copy_user_iov - copy user data to bio 1202 * @q: destination block queue 1203 * @map_data: pointer to the rq_map_data holding pages (if necessary) 1204 * @iter: iovec iterator 1205 * @gfp_mask: memory allocation flags 1206 * 1207 * Prepares and returns a bio for indirect user io, bouncing data 1208 * to/from kernel pages as necessary. Must be paired with 1209 * call bio_uncopy_user() on io completion. 1210 */ 1211 struct bio *bio_copy_user_iov(struct request_queue *q, 1212 struct rq_map_data *map_data, 1213 const struct iov_iter *iter, 1214 gfp_t gfp_mask) 1215 { 1216 struct bio_map_data *bmd; 1217 struct page *page; 1218 struct bio *bio; 1219 int i, ret; 1220 int nr_pages = 0; 1221 unsigned int len = iter->count; 1222 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 1223 1224 for (i = 0; i < iter->nr_segs; i++) { 1225 unsigned long uaddr; 1226 unsigned long end; 1227 unsigned long start; 1228 1229 uaddr = (unsigned long) iter->iov[i].iov_base; 1230 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1) 1231 >> PAGE_SHIFT; 1232 start = uaddr >> PAGE_SHIFT; 1233 1234 /* 1235 * Overflow, abort 1236 */ 1237 if (end < start) 1238 return ERR_PTR(-EINVAL); 1239 1240 nr_pages += end - start; 1241 } 1242 1243 if (offset) 1244 nr_pages++; 1245 1246 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask); 1247 if (!bmd) 1248 return ERR_PTR(-ENOMEM); 1249 1250 /* 1251 * We need to do a deep copy of the iov_iter including the iovecs. 1252 * The caller provided iov might point to an on-stack or otherwise 1253 * shortlived one. 1254 */ 1255 bmd->is_our_pages = map_data ? 0 : 1; 1256 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); 1257 iov_iter_init(&bmd->iter, iter->type, bmd->iov, 1258 iter->nr_segs, iter->count); 1259 1260 ret = -ENOMEM; 1261 bio = bio_kmalloc(gfp_mask, nr_pages); 1262 if (!bio) 1263 goto out_bmd; 1264 1265 ret = 0; 1266 1267 if (map_data) { 1268 nr_pages = 1 << map_data->page_order; 1269 i = map_data->offset / PAGE_SIZE; 1270 } 1271 while (len) { 1272 unsigned int bytes = PAGE_SIZE; 1273 1274 bytes -= offset; 1275 1276 if (bytes > len) 1277 bytes = len; 1278 1279 if (map_data) { 1280 if (i == map_data->nr_entries * nr_pages) { 1281 ret = -ENOMEM; 1282 break; 1283 } 1284 1285 page = map_data->pages[i / nr_pages]; 1286 page += (i % nr_pages); 1287 1288 i++; 1289 } else { 1290 page = alloc_page(q->bounce_gfp | gfp_mask); 1291 if (!page) { 1292 ret = -ENOMEM; 1293 break; 1294 } 1295 } 1296 1297 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) 1298 break; 1299 1300 len -= bytes; 1301 offset = 0; 1302 } 1303 1304 if (ret) 1305 goto cleanup; 1306 1307 /* 1308 * success 1309 */ 1310 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || 1311 (map_data && map_data->from_user)) { 1312 ret = bio_copy_from_iter(bio, *iter); 1313 if (ret) 1314 goto cleanup; 1315 } 1316 1317 bio->bi_private = bmd; 1318 return bio; 1319 cleanup: 1320 if (!map_data) 1321 bio_free_pages(bio); 1322 bio_put(bio); 1323 out_bmd: 1324 kfree(bmd); 1325 return ERR_PTR(ret); 1326 } 1327 1328 /** 1329 * bio_map_user_iov - map user iovec into bio 1330 * @q: the struct request_queue for the bio 1331 * @iter: iovec iterator 1332 * @gfp_mask: memory allocation flags 1333 * 1334 * Map the user space address into a bio suitable for io to a block 1335 * device. Returns an error pointer in case of error. 1336 */ 1337 struct bio *bio_map_user_iov(struct request_queue *q, 1338 const struct iov_iter *iter, 1339 gfp_t gfp_mask) 1340 { 1341 int j; 1342 int nr_pages = 0; 1343 struct page **pages; 1344 struct bio *bio; 1345 int cur_page = 0; 1346 int ret, offset; 1347 struct iov_iter i; 1348 struct iovec iov; 1349 1350 iov_for_each(iov, i, *iter) { 1351 unsigned long uaddr = (unsigned long) iov.iov_base; 1352 unsigned long len = iov.iov_len; 1353 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1354 unsigned long start = uaddr >> PAGE_SHIFT; 1355 1356 /* 1357 * Overflow, abort 1358 */ 1359 if (end < start) 1360 return ERR_PTR(-EINVAL); 1361 1362 nr_pages += end - start; 1363 /* 1364 * buffer must be aligned to at least logical block size for now 1365 */ 1366 if (uaddr & queue_dma_alignment(q)) 1367 return ERR_PTR(-EINVAL); 1368 } 1369 1370 if (!nr_pages) 1371 return ERR_PTR(-EINVAL); 1372 1373 bio = bio_kmalloc(gfp_mask, nr_pages); 1374 if (!bio) 1375 return ERR_PTR(-ENOMEM); 1376 1377 ret = -ENOMEM; 1378 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); 1379 if (!pages) 1380 goto out; 1381 1382 iov_for_each(iov, i, *iter) { 1383 unsigned long uaddr = (unsigned long) iov.iov_base; 1384 unsigned long len = iov.iov_len; 1385 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1386 unsigned long start = uaddr >> PAGE_SHIFT; 1387 const int local_nr_pages = end - start; 1388 const int page_limit = cur_page + local_nr_pages; 1389 1390 ret = get_user_pages_fast(uaddr, local_nr_pages, 1391 (iter->type & WRITE) != WRITE, 1392 &pages[cur_page]); 1393 if (ret < local_nr_pages) { 1394 ret = -EFAULT; 1395 goto out_unmap; 1396 } 1397 1398 offset = offset_in_page(uaddr); 1399 for (j = cur_page; j < page_limit; j++) { 1400 unsigned int bytes = PAGE_SIZE - offset; 1401 1402 if (len <= 0) 1403 break; 1404 1405 if (bytes > len) 1406 bytes = len; 1407 1408 /* 1409 * sorry... 1410 */ 1411 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < 1412 bytes) 1413 break; 1414 1415 len -= bytes; 1416 offset = 0; 1417 } 1418 1419 cur_page = j; 1420 /* 1421 * release the pages we didn't map into the bio, if any 1422 */ 1423 while (j < page_limit) 1424 put_page(pages[j++]); 1425 } 1426 1427 kfree(pages); 1428 1429 bio_set_flag(bio, BIO_USER_MAPPED); 1430 1431 /* 1432 * subtle -- if bio_map_user_iov() ended up bouncing a bio, 1433 * it would normally disappear when its bi_end_io is run. 1434 * however, we need it for the unmap, so grab an extra 1435 * reference to it 1436 */ 1437 bio_get(bio); 1438 return bio; 1439 1440 out_unmap: 1441 for (j = 0; j < nr_pages; j++) { 1442 if (!pages[j]) 1443 break; 1444 put_page(pages[j]); 1445 } 1446 out: 1447 kfree(pages); 1448 bio_put(bio); 1449 return ERR_PTR(ret); 1450 } 1451 1452 static void __bio_unmap_user(struct bio *bio) 1453 { 1454 struct bio_vec *bvec; 1455 int i; 1456 1457 /* 1458 * make sure we dirty pages we wrote to 1459 */ 1460 bio_for_each_segment_all(bvec, bio, i) { 1461 if (bio_data_dir(bio) == READ) 1462 set_page_dirty_lock(bvec->bv_page); 1463 1464 put_page(bvec->bv_page); 1465 } 1466 1467 bio_put(bio); 1468 } 1469 1470 /** 1471 * bio_unmap_user - unmap a bio 1472 * @bio: the bio being unmapped 1473 * 1474 * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from 1475 * process context. 1476 * 1477 * bio_unmap_user() may sleep. 1478 */ 1479 void bio_unmap_user(struct bio *bio) 1480 { 1481 __bio_unmap_user(bio); 1482 bio_put(bio); 1483 } 1484 1485 static void bio_map_kern_endio(struct bio *bio) 1486 { 1487 bio_put(bio); 1488 } 1489 1490 /** 1491 * bio_map_kern - map kernel address into bio 1492 * @q: the struct request_queue for the bio 1493 * @data: pointer to buffer to map 1494 * @len: length in bytes 1495 * @gfp_mask: allocation flags for bio allocation 1496 * 1497 * Map the kernel address into a bio suitable for io to a block 1498 * device. Returns an error pointer in case of error. 1499 */ 1500 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, 1501 gfp_t gfp_mask) 1502 { 1503 unsigned long kaddr = (unsigned long)data; 1504 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1505 unsigned long start = kaddr >> PAGE_SHIFT; 1506 const int nr_pages = end - start; 1507 int offset, i; 1508 struct bio *bio; 1509 1510 bio = bio_kmalloc(gfp_mask, nr_pages); 1511 if (!bio) 1512 return ERR_PTR(-ENOMEM); 1513 1514 offset = offset_in_page(kaddr); 1515 for (i = 0; i < nr_pages; i++) { 1516 unsigned int bytes = PAGE_SIZE - offset; 1517 1518 if (len <= 0) 1519 break; 1520 1521 if (bytes > len) 1522 bytes = len; 1523 1524 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, 1525 offset) < bytes) { 1526 /* we don't support partial mappings */ 1527 bio_put(bio); 1528 return ERR_PTR(-EINVAL); 1529 } 1530 1531 data += bytes; 1532 len -= bytes; 1533 offset = 0; 1534 } 1535 1536 bio->bi_end_io = bio_map_kern_endio; 1537 return bio; 1538 } 1539 EXPORT_SYMBOL(bio_map_kern); 1540 1541 static void bio_copy_kern_endio(struct bio *bio) 1542 { 1543 bio_free_pages(bio); 1544 bio_put(bio); 1545 } 1546 1547 static void bio_copy_kern_endio_read(struct bio *bio) 1548 { 1549 char *p = bio->bi_private; 1550 struct bio_vec *bvec; 1551 int i; 1552 1553 bio_for_each_segment_all(bvec, bio, i) { 1554 memcpy(p, page_address(bvec->bv_page), bvec->bv_len); 1555 p += bvec->bv_len; 1556 } 1557 1558 bio_copy_kern_endio(bio); 1559 } 1560 1561 /** 1562 * bio_copy_kern - copy kernel address into bio 1563 * @q: the struct request_queue for the bio 1564 * @data: pointer to buffer to copy 1565 * @len: length in bytes 1566 * @gfp_mask: allocation flags for bio and page allocation 1567 * @reading: data direction is READ 1568 * 1569 * copy the kernel address into a bio suitable for io to a block 1570 * device. Returns an error pointer in case of error. 1571 */ 1572 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 1573 gfp_t gfp_mask, int reading) 1574 { 1575 unsigned long kaddr = (unsigned long)data; 1576 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1577 unsigned long start = kaddr >> PAGE_SHIFT; 1578 struct bio *bio; 1579 void *p = data; 1580 int nr_pages = 0; 1581 1582 /* 1583 * Overflow, abort 1584 */ 1585 if (end < start) 1586 return ERR_PTR(-EINVAL); 1587 1588 nr_pages = end - start; 1589 bio = bio_kmalloc(gfp_mask, nr_pages); 1590 if (!bio) 1591 return ERR_PTR(-ENOMEM); 1592 1593 while (len) { 1594 struct page *page; 1595 unsigned int bytes = PAGE_SIZE; 1596 1597 if (bytes > len) 1598 bytes = len; 1599 1600 page = alloc_page(q->bounce_gfp | gfp_mask); 1601 if (!page) 1602 goto cleanup; 1603 1604 if (!reading) 1605 memcpy(page_address(page), p, bytes); 1606 1607 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 1608 break; 1609 1610 len -= bytes; 1611 p += bytes; 1612 } 1613 1614 if (reading) { 1615 bio->bi_end_io = bio_copy_kern_endio_read; 1616 bio->bi_private = data; 1617 } else { 1618 bio->bi_end_io = bio_copy_kern_endio; 1619 } 1620 1621 return bio; 1622 1623 cleanup: 1624 bio_free_pages(bio); 1625 bio_put(bio); 1626 return ERR_PTR(-ENOMEM); 1627 } 1628 1629 /* 1630 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1631 * for performing direct-IO in BIOs. 1632 * 1633 * The problem is that we cannot run set_page_dirty() from interrupt context 1634 * because the required locks are not interrupt-safe. So what we can do is to 1635 * mark the pages dirty _before_ performing IO. And in interrupt context, 1636 * check that the pages are still dirty. If so, fine. If not, redirty them 1637 * in process context. 1638 * 1639 * We special-case compound pages here: normally this means reads into hugetlb 1640 * pages. The logic in here doesn't really work right for compound pages 1641 * because the VM does not uniformly chase down the head page in all cases. 1642 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1643 * handle them at all. So we skip compound pages here at an early stage. 1644 * 1645 * Note that this code is very hard to test under normal circumstances because 1646 * direct-io pins the pages with get_user_pages(). This makes 1647 * is_page_cache_freeable return false, and the VM will not clean the pages. 1648 * But other code (eg, flusher threads) could clean the pages if they are mapped 1649 * pagecache. 1650 * 1651 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1652 * deferred bio dirtying paths. 1653 */ 1654 1655 /* 1656 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1657 */ 1658 void bio_set_pages_dirty(struct bio *bio) 1659 { 1660 struct bio_vec *bvec; 1661 int i; 1662 1663 bio_for_each_segment_all(bvec, bio, i) { 1664 struct page *page = bvec->bv_page; 1665 1666 if (page && !PageCompound(page)) 1667 set_page_dirty_lock(page); 1668 } 1669 } 1670 1671 static void bio_release_pages(struct bio *bio) 1672 { 1673 struct bio_vec *bvec; 1674 int i; 1675 1676 bio_for_each_segment_all(bvec, bio, i) { 1677 struct page *page = bvec->bv_page; 1678 1679 if (page) 1680 put_page(page); 1681 } 1682 } 1683 1684 /* 1685 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1686 * If they are, then fine. If, however, some pages are clean then they must 1687 * have been written out during the direct-IO read. So we take another ref on 1688 * the BIO and the offending pages and re-dirty the pages in process context. 1689 * 1690 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1691 * here on. It will run one put_page() against each page and will run one 1692 * bio_put() against the BIO. 1693 */ 1694 1695 static void bio_dirty_fn(struct work_struct *work); 1696 1697 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1698 static DEFINE_SPINLOCK(bio_dirty_lock); 1699 static struct bio *bio_dirty_list; 1700 1701 /* 1702 * This runs in process context 1703 */ 1704 static void bio_dirty_fn(struct work_struct *work) 1705 { 1706 unsigned long flags; 1707 struct bio *bio; 1708 1709 spin_lock_irqsave(&bio_dirty_lock, flags); 1710 bio = bio_dirty_list; 1711 bio_dirty_list = NULL; 1712 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1713 1714 while (bio) { 1715 struct bio *next = bio->bi_private; 1716 1717 bio_set_pages_dirty(bio); 1718 bio_release_pages(bio); 1719 bio_put(bio); 1720 bio = next; 1721 } 1722 } 1723 1724 void bio_check_pages_dirty(struct bio *bio) 1725 { 1726 struct bio_vec *bvec; 1727 int nr_clean_pages = 0; 1728 int i; 1729 1730 bio_for_each_segment_all(bvec, bio, i) { 1731 struct page *page = bvec->bv_page; 1732 1733 if (PageDirty(page) || PageCompound(page)) { 1734 put_page(page); 1735 bvec->bv_page = NULL; 1736 } else { 1737 nr_clean_pages++; 1738 } 1739 } 1740 1741 if (nr_clean_pages) { 1742 unsigned long flags; 1743 1744 spin_lock_irqsave(&bio_dirty_lock, flags); 1745 bio->bi_private = bio_dirty_list; 1746 bio_dirty_list = bio; 1747 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1748 schedule_work(&bio_dirty_work); 1749 } else { 1750 bio_put(bio); 1751 } 1752 } 1753 1754 void generic_start_io_acct(int rw, unsigned long sectors, 1755 struct hd_struct *part) 1756 { 1757 int cpu = part_stat_lock(); 1758 1759 part_round_stats(cpu, part); 1760 part_stat_inc(cpu, part, ios[rw]); 1761 part_stat_add(cpu, part, sectors[rw], sectors); 1762 part_inc_in_flight(part, rw); 1763 1764 part_stat_unlock(); 1765 } 1766 EXPORT_SYMBOL(generic_start_io_acct); 1767 1768 void generic_end_io_acct(int rw, struct hd_struct *part, 1769 unsigned long start_time) 1770 { 1771 unsigned long duration = jiffies - start_time; 1772 int cpu = part_stat_lock(); 1773 1774 part_stat_add(cpu, part, ticks[rw], duration); 1775 part_round_stats(cpu, part); 1776 part_dec_in_flight(part, rw); 1777 1778 part_stat_unlock(); 1779 } 1780 EXPORT_SYMBOL(generic_end_io_acct); 1781 1782 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1783 void bio_flush_dcache_pages(struct bio *bi) 1784 { 1785 struct bio_vec bvec; 1786 struct bvec_iter iter; 1787 1788 bio_for_each_segment(bvec, bi, iter) 1789 flush_dcache_page(bvec.bv_page); 1790 } 1791 EXPORT_SYMBOL(bio_flush_dcache_pages); 1792 #endif 1793 1794 static inline bool bio_remaining_done(struct bio *bio) 1795 { 1796 /* 1797 * If we're not chaining, then ->__bi_remaining is always 1 and 1798 * we always end io on the first invocation. 1799 */ 1800 if (!bio_flagged(bio, BIO_CHAIN)) 1801 return true; 1802 1803 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1804 1805 if (atomic_dec_and_test(&bio->__bi_remaining)) { 1806 bio_clear_flag(bio, BIO_CHAIN); 1807 return true; 1808 } 1809 1810 return false; 1811 } 1812 1813 /** 1814 * bio_endio - end I/O on a bio 1815 * @bio: bio 1816 * 1817 * Description: 1818 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 1819 * way to end I/O on a bio. No one should call bi_end_io() directly on a 1820 * bio unless they own it and thus know that it has an end_io function. 1821 **/ 1822 void bio_endio(struct bio *bio) 1823 { 1824 again: 1825 if (!bio_remaining_done(bio)) 1826 return; 1827 1828 /* 1829 * Need to have a real endio function for chained bios, otherwise 1830 * various corner cases will break (like stacking block devices that 1831 * save/restore bi_end_io) - however, we want to avoid unbounded 1832 * recursion and blowing the stack. Tail call optimization would 1833 * handle this, but compiling with frame pointers also disables 1834 * gcc's sibling call optimization. 1835 */ 1836 if (bio->bi_end_io == bio_chain_endio) { 1837 bio = __bio_chain_endio(bio); 1838 goto again; 1839 } 1840 1841 if (bio->bi_end_io) 1842 bio->bi_end_io(bio); 1843 } 1844 EXPORT_SYMBOL(bio_endio); 1845 1846 /** 1847 * bio_split - split a bio 1848 * @bio: bio to split 1849 * @sectors: number of sectors to split from the front of @bio 1850 * @gfp: gfp mask 1851 * @bs: bio set to allocate from 1852 * 1853 * Allocates and returns a new bio which represents @sectors from the start of 1854 * @bio, and updates @bio to represent the remaining sectors. 1855 * 1856 * Unless this is a discard request the newly allocated bio will point 1857 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that 1858 * @bio is not freed before the split. 1859 */ 1860 struct bio *bio_split(struct bio *bio, int sectors, 1861 gfp_t gfp, struct bio_set *bs) 1862 { 1863 struct bio *split = NULL; 1864 1865 BUG_ON(sectors <= 0); 1866 BUG_ON(sectors >= bio_sectors(bio)); 1867 1868 split = bio_clone_fast(bio, gfp, bs); 1869 if (!split) 1870 return NULL; 1871 1872 split->bi_iter.bi_size = sectors << 9; 1873 1874 if (bio_integrity(split)) 1875 bio_integrity_trim(split, 0, sectors); 1876 1877 bio_advance(bio, split->bi_iter.bi_size); 1878 1879 return split; 1880 } 1881 EXPORT_SYMBOL(bio_split); 1882 1883 /** 1884 * bio_trim - trim a bio 1885 * @bio: bio to trim 1886 * @offset: number of sectors to trim from the front of @bio 1887 * @size: size we want to trim @bio to, in sectors 1888 */ 1889 void bio_trim(struct bio *bio, int offset, int size) 1890 { 1891 /* 'bio' is a cloned bio which we need to trim to match 1892 * the given offset and size. 1893 */ 1894 1895 size <<= 9; 1896 if (offset == 0 && size == bio->bi_iter.bi_size) 1897 return; 1898 1899 bio_clear_flag(bio, BIO_SEG_VALID); 1900 1901 bio_advance(bio, offset << 9); 1902 1903 bio->bi_iter.bi_size = size; 1904 } 1905 EXPORT_SYMBOL_GPL(bio_trim); 1906 1907 /* 1908 * create memory pools for biovec's in a bio_set. 1909 * use the global biovec slabs created for general use. 1910 */ 1911 mempool_t *biovec_create_pool(int pool_entries) 1912 { 1913 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX; 1914 1915 return mempool_create_slab_pool(pool_entries, bp->slab); 1916 } 1917 1918 void bioset_free(struct bio_set *bs) 1919 { 1920 if (bs->rescue_workqueue) 1921 destroy_workqueue(bs->rescue_workqueue); 1922 1923 if (bs->bio_pool) 1924 mempool_destroy(bs->bio_pool); 1925 1926 if (bs->bvec_pool) 1927 mempool_destroy(bs->bvec_pool); 1928 1929 bioset_integrity_free(bs); 1930 bio_put_slab(bs); 1931 1932 kfree(bs); 1933 } 1934 EXPORT_SYMBOL(bioset_free); 1935 1936 static struct bio_set *__bioset_create(unsigned int pool_size, 1937 unsigned int front_pad, 1938 bool create_bvec_pool) 1939 { 1940 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1941 struct bio_set *bs; 1942 1943 bs = kzalloc(sizeof(*bs), GFP_KERNEL); 1944 if (!bs) 1945 return NULL; 1946 1947 bs->front_pad = front_pad; 1948 1949 spin_lock_init(&bs->rescue_lock); 1950 bio_list_init(&bs->rescue_list); 1951 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1952 1953 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); 1954 if (!bs->bio_slab) { 1955 kfree(bs); 1956 return NULL; 1957 } 1958 1959 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); 1960 if (!bs->bio_pool) 1961 goto bad; 1962 1963 if (create_bvec_pool) { 1964 bs->bvec_pool = biovec_create_pool(pool_size); 1965 if (!bs->bvec_pool) 1966 goto bad; 1967 } 1968 1969 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); 1970 if (!bs->rescue_workqueue) 1971 goto bad; 1972 1973 return bs; 1974 bad: 1975 bioset_free(bs); 1976 return NULL; 1977 } 1978 1979 /** 1980 * bioset_create - Create a bio_set 1981 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1982 * @front_pad: Number of bytes to allocate in front of the returned bio 1983 * 1984 * Description: 1985 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1986 * to ask for a number of bytes to be allocated in front of the bio. 1987 * Front pad allocation is useful for embedding the bio inside 1988 * another structure, to avoid allocating extra data to go with the bio. 1989 * Note that the bio must be embedded at the END of that structure always, 1990 * or things will break badly. 1991 */ 1992 struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) 1993 { 1994 return __bioset_create(pool_size, front_pad, true); 1995 } 1996 EXPORT_SYMBOL(bioset_create); 1997 1998 /** 1999 * bioset_create_nobvec - Create a bio_set without bio_vec mempool 2000 * @pool_size: Number of bio to cache in the mempool 2001 * @front_pad: Number of bytes to allocate in front of the returned bio 2002 * 2003 * Description: 2004 * Same functionality as bioset_create() except that mempool is not 2005 * created for bio_vecs. Saving some memory for bio_clone_fast() users. 2006 */ 2007 struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad) 2008 { 2009 return __bioset_create(pool_size, front_pad, false); 2010 } 2011 EXPORT_SYMBOL(bioset_create_nobvec); 2012 2013 #ifdef CONFIG_BLK_CGROUP 2014 2015 /** 2016 * bio_associate_blkcg - associate a bio with the specified blkcg 2017 * @bio: target bio 2018 * @blkcg_css: css of the blkcg to associate 2019 * 2020 * Associate @bio with the blkcg specified by @blkcg_css. Block layer will 2021 * treat @bio as if it were issued by a task which belongs to the blkcg. 2022 * 2023 * This function takes an extra reference of @blkcg_css which will be put 2024 * when @bio is released. The caller must own @bio and is responsible for 2025 * synchronizing calls to this function. 2026 */ 2027 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) 2028 { 2029 if (unlikely(bio->bi_css)) 2030 return -EBUSY; 2031 css_get(blkcg_css); 2032 bio->bi_css = blkcg_css; 2033 return 0; 2034 } 2035 EXPORT_SYMBOL_GPL(bio_associate_blkcg); 2036 2037 /** 2038 * bio_associate_current - associate a bio with %current 2039 * @bio: target bio 2040 * 2041 * Associate @bio with %current if it hasn't been associated yet. Block 2042 * layer will treat @bio as if it were issued by %current no matter which 2043 * task actually issues it. 2044 * 2045 * This function takes an extra reference of @task's io_context and blkcg 2046 * which will be put when @bio is released. The caller must own @bio, 2047 * ensure %current->io_context exists, and is responsible for synchronizing 2048 * calls to this function. 2049 */ 2050 int bio_associate_current(struct bio *bio) 2051 { 2052 struct io_context *ioc; 2053 2054 if (bio->bi_css) 2055 return -EBUSY; 2056 2057 ioc = current->io_context; 2058 if (!ioc) 2059 return -ENOENT; 2060 2061 get_io_context_active(ioc); 2062 bio->bi_ioc = ioc; 2063 bio->bi_css = task_get_css(current, io_cgrp_id); 2064 return 0; 2065 } 2066 EXPORT_SYMBOL_GPL(bio_associate_current); 2067 2068 /** 2069 * bio_disassociate_task - undo bio_associate_current() 2070 * @bio: target bio 2071 */ 2072 void bio_disassociate_task(struct bio *bio) 2073 { 2074 if (bio->bi_ioc) { 2075 put_io_context(bio->bi_ioc); 2076 bio->bi_ioc = NULL; 2077 } 2078 if (bio->bi_css) { 2079 css_put(bio->bi_css); 2080 bio->bi_css = NULL; 2081 } 2082 } 2083 2084 /** 2085 * bio_clone_blkcg_association - clone blkcg association from src to dst bio 2086 * @dst: destination bio 2087 * @src: source bio 2088 */ 2089 void bio_clone_blkcg_association(struct bio *dst, struct bio *src) 2090 { 2091 if (src->bi_css) 2092 WARN_ON(bio_associate_blkcg(dst, src->bi_css)); 2093 } 2094 2095 #endif /* CONFIG_BLK_CGROUP */ 2096 2097 static void __init biovec_init_slabs(void) 2098 { 2099 int i; 2100 2101 for (i = 0; i < BVEC_POOL_NR; i++) { 2102 int size; 2103 struct biovec_slab *bvs = bvec_slabs + i; 2104 2105 if (bvs->nr_vecs <= BIO_INLINE_VECS) { 2106 bvs->slab = NULL; 2107 continue; 2108 } 2109 2110 size = bvs->nr_vecs * sizeof(struct bio_vec); 2111 bvs->slab = kmem_cache_create(bvs->name, size, 0, 2112 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2113 } 2114 } 2115 2116 static int __init init_bio(void) 2117 { 2118 bio_slab_max = 2; 2119 bio_slab_nr = 0; 2120 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); 2121 if (!bio_slabs) 2122 panic("bio: can't allocate bios\n"); 2123 2124 bio_integrity_init(); 2125 biovec_init_slabs(); 2126 2127 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 2128 if (!fs_bio_set) 2129 panic("bio: can't allocate bios\n"); 2130 2131 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) 2132 panic("bio: can't create integrity pool\n"); 2133 2134 return 0; 2135 } 2136 subsys_initcall(init_bio); 2137