1 /* 2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public Licens 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 16 * 17 */ 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/bio.h> 21 #include <linux/blkdev.h> 22 #include <linux/uio.h> 23 #include <linux/iocontext.h> 24 #include <linux/slab.h> 25 #include <linux/init.h> 26 #include <linux/kernel.h> 27 #include <linux/export.h> 28 #include <linux/mempool.h> 29 #include <linux/workqueue.h> 30 #include <linux/cgroup.h> 31 #include <scsi/sg.h> /* for struct sg_iovec */ 32 33 #include <trace/events/block.h> 34 35 /* 36 * Test patch to inline a certain number of bi_io_vec's inside the bio 37 * itself, to shrink a bio data allocation from two mempool calls to one 38 */ 39 #define BIO_INLINE_VECS 4 40 41 /* 42 * if you change this list, also change bvec_alloc or things will 43 * break badly! cannot be bigger than what you can fit into an 44 * unsigned short 45 */ 46 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 47 static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { 48 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 49 }; 50 #undef BV 51 52 /* 53 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 54 * IO code that does not need private memory pools. 55 */ 56 struct bio_set *fs_bio_set; 57 EXPORT_SYMBOL(fs_bio_set); 58 59 /* 60 * Our slab pool management 61 */ 62 struct bio_slab { 63 struct kmem_cache *slab; 64 unsigned int slab_ref; 65 unsigned int slab_size; 66 char name[8]; 67 }; 68 static DEFINE_MUTEX(bio_slab_lock); 69 static struct bio_slab *bio_slabs; 70 static unsigned int bio_slab_nr, bio_slab_max; 71 72 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) 73 { 74 unsigned int sz = sizeof(struct bio) + extra_size; 75 struct kmem_cache *slab = NULL; 76 struct bio_slab *bslab, *new_bio_slabs; 77 unsigned int new_bio_slab_max; 78 unsigned int i, entry = -1; 79 80 mutex_lock(&bio_slab_lock); 81 82 i = 0; 83 while (i < bio_slab_nr) { 84 bslab = &bio_slabs[i]; 85 86 if (!bslab->slab && entry == -1) 87 entry = i; 88 else if (bslab->slab_size == sz) { 89 slab = bslab->slab; 90 bslab->slab_ref++; 91 break; 92 } 93 i++; 94 } 95 96 if (slab) 97 goto out_unlock; 98 99 if (bio_slab_nr == bio_slab_max && entry == -1) { 100 new_bio_slab_max = bio_slab_max << 1; 101 new_bio_slabs = krealloc(bio_slabs, 102 new_bio_slab_max * sizeof(struct bio_slab), 103 GFP_KERNEL); 104 if (!new_bio_slabs) 105 goto out_unlock; 106 bio_slab_max = new_bio_slab_max; 107 bio_slabs = new_bio_slabs; 108 } 109 if (entry == -1) 110 entry = bio_slab_nr++; 111 112 bslab = &bio_slabs[entry]; 113 114 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 115 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, 116 SLAB_HWCACHE_ALIGN, NULL); 117 if (!slab) 118 goto out_unlock; 119 120 bslab->slab = slab; 121 bslab->slab_ref = 1; 122 bslab->slab_size = sz; 123 out_unlock: 124 mutex_unlock(&bio_slab_lock); 125 return slab; 126 } 127 128 static void bio_put_slab(struct bio_set *bs) 129 { 130 struct bio_slab *bslab = NULL; 131 unsigned int i; 132 133 mutex_lock(&bio_slab_lock); 134 135 for (i = 0; i < bio_slab_nr; i++) { 136 if (bs->bio_slab == bio_slabs[i].slab) { 137 bslab = &bio_slabs[i]; 138 break; 139 } 140 } 141 142 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 143 goto out; 144 145 WARN_ON(!bslab->slab_ref); 146 147 if (--bslab->slab_ref) 148 goto out; 149 150 kmem_cache_destroy(bslab->slab); 151 bslab->slab = NULL; 152 153 out: 154 mutex_unlock(&bio_slab_lock); 155 } 156 157 unsigned int bvec_nr_vecs(unsigned short idx) 158 { 159 return bvec_slabs[idx].nr_vecs; 160 } 161 162 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) 163 { 164 BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); 165 166 if (idx == BIOVEC_MAX_IDX) 167 mempool_free(bv, pool); 168 else { 169 struct biovec_slab *bvs = bvec_slabs + idx; 170 171 kmem_cache_free(bvs->slab, bv); 172 } 173 } 174 175 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, 176 mempool_t *pool) 177 { 178 struct bio_vec *bvl; 179 180 /* 181 * see comment near bvec_array define! 182 */ 183 switch (nr) { 184 case 1: 185 *idx = 0; 186 break; 187 case 2 ... 4: 188 *idx = 1; 189 break; 190 case 5 ... 16: 191 *idx = 2; 192 break; 193 case 17 ... 64: 194 *idx = 3; 195 break; 196 case 65 ... 128: 197 *idx = 4; 198 break; 199 case 129 ... BIO_MAX_PAGES: 200 *idx = 5; 201 break; 202 default: 203 return NULL; 204 } 205 206 /* 207 * idx now points to the pool we want to allocate from. only the 208 * 1-vec entry pool is mempool backed. 209 */ 210 if (*idx == BIOVEC_MAX_IDX) { 211 fallback: 212 bvl = mempool_alloc(pool, gfp_mask); 213 } else { 214 struct biovec_slab *bvs = bvec_slabs + *idx; 215 gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); 216 217 /* 218 * Make this allocation restricted and don't dump info on 219 * allocation failures, since we'll fallback to the mempool 220 * in case of failure. 221 */ 222 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 223 224 /* 225 * Try a slab allocation. If this fails and __GFP_WAIT 226 * is set, retry with the 1-entry mempool 227 */ 228 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); 229 if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) { 230 *idx = BIOVEC_MAX_IDX; 231 goto fallback; 232 } 233 } 234 235 return bvl; 236 } 237 238 static void __bio_free(struct bio *bio) 239 { 240 bio_disassociate_task(bio); 241 242 if (bio_integrity(bio)) 243 bio_integrity_free(bio); 244 } 245 246 static void bio_free(struct bio *bio) 247 { 248 struct bio_set *bs = bio->bi_pool; 249 void *p; 250 251 __bio_free(bio); 252 253 if (bs) { 254 if (bio_flagged(bio, BIO_OWNS_VEC)) 255 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); 256 257 /* 258 * If we have front padding, adjust the bio pointer before freeing 259 */ 260 p = bio; 261 p -= bs->front_pad; 262 263 mempool_free(p, bs->bio_pool); 264 } else { 265 /* Bio was allocated by bio_kmalloc() */ 266 kfree(bio); 267 } 268 } 269 270 void bio_init(struct bio *bio) 271 { 272 memset(bio, 0, sizeof(*bio)); 273 bio->bi_flags = 1 << BIO_UPTODATE; 274 atomic_set(&bio->bi_remaining, 1); 275 atomic_set(&bio->bi_cnt, 1); 276 } 277 EXPORT_SYMBOL(bio_init); 278 279 /** 280 * bio_reset - reinitialize a bio 281 * @bio: bio to reset 282 * 283 * Description: 284 * After calling bio_reset(), @bio will be in the same state as a freshly 285 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 286 * preserved are the ones that are initialized by bio_alloc_bioset(). See 287 * comment in struct bio. 288 */ 289 void bio_reset(struct bio *bio) 290 { 291 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 292 293 __bio_free(bio); 294 295 memset(bio, 0, BIO_RESET_BYTES); 296 bio->bi_flags = flags|(1 << BIO_UPTODATE); 297 atomic_set(&bio->bi_remaining, 1); 298 } 299 EXPORT_SYMBOL(bio_reset); 300 301 static void bio_chain_endio(struct bio *bio, int error) 302 { 303 bio_endio(bio->bi_private, error); 304 bio_put(bio); 305 } 306 307 /** 308 * bio_chain - chain bio completions 309 * @bio: the target bio 310 * @parent: the @bio's parent bio 311 * 312 * The caller won't have a bi_end_io called when @bio completes - instead, 313 * @parent's bi_end_io won't be called until both @parent and @bio have 314 * completed; the chained bio will also be freed when it completes. 315 * 316 * The caller must not set bi_private or bi_end_io in @bio. 317 */ 318 void bio_chain(struct bio *bio, struct bio *parent) 319 { 320 BUG_ON(bio->bi_private || bio->bi_end_io); 321 322 bio->bi_private = parent; 323 bio->bi_end_io = bio_chain_endio; 324 atomic_inc(&parent->bi_remaining); 325 } 326 EXPORT_SYMBOL(bio_chain); 327 328 static void bio_alloc_rescue(struct work_struct *work) 329 { 330 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 331 struct bio *bio; 332 333 while (1) { 334 spin_lock(&bs->rescue_lock); 335 bio = bio_list_pop(&bs->rescue_list); 336 spin_unlock(&bs->rescue_lock); 337 338 if (!bio) 339 break; 340 341 generic_make_request(bio); 342 } 343 } 344 345 static void punt_bios_to_rescuer(struct bio_set *bs) 346 { 347 struct bio_list punt, nopunt; 348 struct bio *bio; 349 350 /* 351 * In order to guarantee forward progress we must punt only bios that 352 * were allocated from this bio_set; otherwise, if there was a bio on 353 * there for a stacking driver higher up in the stack, processing it 354 * could require allocating bios from this bio_set, and doing that from 355 * our own rescuer would be bad. 356 * 357 * Since bio lists are singly linked, pop them all instead of trying to 358 * remove from the middle of the list: 359 */ 360 361 bio_list_init(&punt); 362 bio_list_init(&nopunt); 363 364 while ((bio = bio_list_pop(current->bio_list))) 365 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 366 367 *current->bio_list = nopunt; 368 369 spin_lock(&bs->rescue_lock); 370 bio_list_merge(&bs->rescue_list, &punt); 371 spin_unlock(&bs->rescue_lock); 372 373 queue_work(bs->rescue_workqueue, &bs->rescue_work); 374 } 375 376 /** 377 * bio_alloc_bioset - allocate a bio for I/O 378 * @gfp_mask: the GFP_ mask given to the slab allocator 379 * @nr_iovecs: number of iovecs to pre-allocate 380 * @bs: the bio_set to allocate from. 381 * 382 * Description: 383 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is 384 * backed by the @bs's mempool. 385 * 386 * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be 387 * able to allocate a bio. This is due to the mempool guarantees. To make this 388 * work, callers must never allocate more than 1 bio at a time from this pool. 389 * Callers that need to allocate more than 1 bio must always submit the 390 * previously allocated bio for IO before attempting to allocate a new one. 391 * Failure to do so can cause deadlocks under memory pressure. 392 * 393 * Note that when running under generic_make_request() (i.e. any block 394 * driver), bios are not submitted until after you return - see the code in 395 * generic_make_request() that converts recursion into iteration, to prevent 396 * stack overflows. 397 * 398 * This would normally mean allocating multiple bios under 399 * generic_make_request() would be susceptible to deadlocks, but we have 400 * deadlock avoidance code that resubmits any blocked bios from a rescuer 401 * thread. 402 * 403 * However, we do not guarantee forward progress for allocations from other 404 * mempools. Doing multiple allocations from the same mempool under 405 * generic_make_request() should be avoided - instead, use bio_set's front_pad 406 * for per bio allocations. 407 * 408 * RETURNS: 409 * Pointer to new bio on success, NULL on failure. 410 */ 411 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 412 { 413 gfp_t saved_gfp = gfp_mask; 414 unsigned front_pad; 415 unsigned inline_vecs; 416 unsigned long idx = BIO_POOL_NONE; 417 struct bio_vec *bvl = NULL; 418 struct bio *bio; 419 void *p; 420 421 if (!bs) { 422 if (nr_iovecs > UIO_MAXIOV) 423 return NULL; 424 425 p = kmalloc(sizeof(struct bio) + 426 nr_iovecs * sizeof(struct bio_vec), 427 gfp_mask); 428 front_pad = 0; 429 inline_vecs = nr_iovecs; 430 } else { 431 /* should not use nobvec bioset for nr_iovecs > 0 */ 432 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0)) 433 return NULL; 434 /* 435 * generic_make_request() converts recursion to iteration; this 436 * means if we're running beneath it, any bios we allocate and 437 * submit will not be submitted (and thus freed) until after we 438 * return. 439 * 440 * This exposes us to a potential deadlock if we allocate 441 * multiple bios from the same bio_set() while running 442 * underneath generic_make_request(). If we were to allocate 443 * multiple bios (say a stacking block driver that was splitting 444 * bios), we would deadlock if we exhausted the mempool's 445 * reserve. 446 * 447 * We solve this, and guarantee forward progress, with a rescuer 448 * workqueue per bio_set. If we go to allocate and there are 449 * bios on current->bio_list, we first try the allocation 450 * without __GFP_WAIT; if that fails, we punt those bios we 451 * would be blocking to the rescuer workqueue before we retry 452 * with the original gfp_flags. 453 */ 454 455 if (current->bio_list && !bio_list_empty(current->bio_list)) 456 gfp_mask &= ~__GFP_WAIT; 457 458 p = mempool_alloc(bs->bio_pool, gfp_mask); 459 if (!p && gfp_mask != saved_gfp) { 460 punt_bios_to_rescuer(bs); 461 gfp_mask = saved_gfp; 462 p = mempool_alloc(bs->bio_pool, gfp_mask); 463 } 464 465 front_pad = bs->front_pad; 466 inline_vecs = BIO_INLINE_VECS; 467 } 468 469 if (unlikely(!p)) 470 return NULL; 471 472 bio = p + front_pad; 473 bio_init(bio); 474 475 if (nr_iovecs > inline_vecs) { 476 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 477 if (!bvl && gfp_mask != saved_gfp) { 478 punt_bios_to_rescuer(bs); 479 gfp_mask = saved_gfp; 480 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 481 } 482 483 if (unlikely(!bvl)) 484 goto err_free; 485 486 bio->bi_flags |= 1 << BIO_OWNS_VEC; 487 } else if (nr_iovecs) { 488 bvl = bio->bi_inline_vecs; 489 } 490 491 bio->bi_pool = bs; 492 bio->bi_flags |= idx << BIO_POOL_OFFSET; 493 bio->bi_max_vecs = nr_iovecs; 494 bio->bi_io_vec = bvl; 495 return bio; 496 497 err_free: 498 mempool_free(p, bs->bio_pool); 499 return NULL; 500 } 501 EXPORT_SYMBOL(bio_alloc_bioset); 502 503 void zero_fill_bio(struct bio *bio) 504 { 505 unsigned long flags; 506 struct bio_vec bv; 507 struct bvec_iter iter; 508 509 bio_for_each_segment(bv, bio, iter) { 510 char *data = bvec_kmap_irq(&bv, &flags); 511 memset(data, 0, bv.bv_len); 512 flush_dcache_page(bv.bv_page); 513 bvec_kunmap_irq(data, &flags); 514 } 515 } 516 EXPORT_SYMBOL(zero_fill_bio); 517 518 /** 519 * bio_put - release a reference to a bio 520 * @bio: bio to release reference to 521 * 522 * Description: 523 * Put a reference to a &struct bio, either one you have gotten with 524 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. 525 **/ 526 void bio_put(struct bio *bio) 527 { 528 BIO_BUG_ON(!atomic_read(&bio->bi_cnt)); 529 530 /* 531 * last put frees it 532 */ 533 if (atomic_dec_and_test(&bio->bi_cnt)) 534 bio_free(bio); 535 } 536 EXPORT_SYMBOL(bio_put); 537 538 inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 539 { 540 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 541 blk_recount_segments(q, bio); 542 543 return bio->bi_phys_segments; 544 } 545 EXPORT_SYMBOL(bio_phys_segments); 546 547 /** 548 * __bio_clone_fast - clone a bio that shares the original bio's biovec 549 * @bio: destination bio 550 * @bio_src: bio to clone 551 * 552 * Clone a &bio. Caller will own the returned bio, but not 553 * the actual data it points to. Reference count of returned 554 * bio will be one. 555 * 556 * Caller must ensure that @bio_src is not freed before @bio. 557 */ 558 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) 559 { 560 BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE); 561 562 /* 563 * most users will be overriding ->bi_bdev with a new target, 564 * so we don't set nor calculate new physical/hw segment counts here 565 */ 566 bio->bi_bdev = bio_src->bi_bdev; 567 bio->bi_flags |= 1 << BIO_CLONED; 568 bio->bi_rw = bio_src->bi_rw; 569 bio->bi_iter = bio_src->bi_iter; 570 bio->bi_io_vec = bio_src->bi_io_vec; 571 } 572 EXPORT_SYMBOL(__bio_clone_fast); 573 574 /** 575 * bio_clone_fast - clone a bio that shares the original bio's biovec 576 * @bio: bio to clone 577 * @gfp_mask: allocation priority 578 * @bs: bio_set to allocate from 579 * 580 * Like __bio_clone_fast, only also allocates the returned bio 581 */ 582 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) 583 { 584 struct bio *b; 585 586 b = bio_alloc_bioset(gfp_mask, 0, bs); 587 if (!b) 588 return NULL; 589 590 __bio_clone_fast(b, bio); 591 592 if (bio_integrity(bio)) { 593 int ret; 594 595 ret = bio_integrity_clone(b, bio, gfp_mask); 596 597 if (ret < 0) { 598 bio_put(b); 599 return NULL; 600 } 601 } 602 603 return b; 604 } 605 EXPORT_SYMBOL(bio_clone_fast); 606 607 /** 608 * bio_clone_bioset - clone a bio 609 * @bio_src: bio to clone 610 * @gfp_mask: allocation priority 611 * @bs: bio_set to allocate from 612 * 613 * Clone bio. Caller will own the returned bio, but not the actual data it 614 * points to. Reference count of returned bio will be one. 615 */ 616 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, 617 struct bio_set *bs) 618 { 619 struct bvec_iter iter; 620 struct bio_vec bv; 621 struct bio *bio; 622 623 /* 624 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from 625 * bio_src->bi_io_vec to bio->bi_io_vec. 626 * 627 * We can't do that anymore, because: 628 * 629 * - The point of cloning the biovec is to produce a bio with a biovec 630 * the caller can modify: bi_idx and bi_bvec_done should be 0. 631 * 632 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if 633 * we tried to clone the whole thing bio_alloc_bioset() would fail. 634 * But the clone should succeed as long as the number of biovecs we 635 * actually need to allocate is fewer than BIO_MAX_PAGES. 636 * 637 * - Lastly, bi_vcnt should not be looked at or relied upon by code 638 * that does not own the bio - reason being drivers don't use it for 639 * iterating over the biovec anymore, so expecting it to be kept up 640 * to date (i.e. for clones that share the parent biovec) is just 641 * asking for trouble and would force extra work on 642 * __bio_clone_fast() anyways. 643 */ 644 645 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); 646 if (!bio) 647 return NULL; 648 649 bio->bi_bdev = bio_src->bi_bdev; 650 bio->bi_rw = bio_src->bi_rw; 651 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 652 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 653 654 if (bio->bi_rw & REQ_DISCARD) 655 goto integrity_clone; 656 657 if (bio->bi_rw & REQ_WRITE_SAME) { 658 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; 659 goto integrity_clone; 660 } 661 662 bio_for_each_segment(bv, bio_src, iter) 663 bio->bi_io_vec[bio->bi_vcnt++] = bv; 664 665 integrity_clone: 666 if (bio_integrity(bio_src)) { 667 int ret; 668 669 ret = bio_integrity_clone(bio, bio_src, gfp_mask); 670 if (ret < 0) { 671 bio_put(bio); 672 return NULL; 673 } 674 } 675 676 return bio; 677 } 678 EXPORT_SYMBOL(bio_clone_bioset); 679 680 /** 681 * bio_get_nr_vecs - return approx number of vecs 682 * @bdev: I/O target 683 * 684 * Return the approximate number of pages we can send to this target. 685 * There's no guarantee that you will be able to fit this number of pages 686 * into a bio, it does not account for dynamic restrictions that vary 687 * on offset. 688 */ 689 int bio_get_nr_vecs(struct block_device *bdev) 690 { 691 struct request_queue *q = bdev_get_queue(bdev); 692 int nr_pages; 693 694 nr_pages = min_t(unsigned, 695 queue_max_segments(q), 696 queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); 697 698 return min_t(unsigned, nr_pages, BIO_MAX_PAGES); 699 700 } 701 EXPORT_SYMBOL(bio_get_nr_vecs); 702 703 static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page 704 *page, unsigned int len, unsigned int offset, 705 unsigned int max_sectors) 706 { 707 int retried_segments = 0; 708 struct bio_vec *bvec; 709 710 /* 711 * cloned bio must not modify vec list 712 */ 713 if (unlikely(bio_flagged(bio, BIO_CLONED))) 714 return 0; 715 716 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) 717 return 0; 718 719 /* 720 * For filesystems with a blocksize smaller than the pagesize 721 * we will often be called with the same page as last time and 722 * a consecutive offset. Optimize this special case. 723 */ 724 if (bio->bi_vcnt > 0) { 725 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 726 727 if (page == prev->bv_page && 728 offset == prev->bv_offset + prev->bv_len) { 729 unsigned int prev_bv_len = prev->bv_len; 730 prev->bv_len += len; 731 732 if (q->merge_bvec_fn) { 733 struct bvec_merge_data bvm = { 734 /* prev_bvec is already charged in 735 bi_size, discharge it in order to 736 simulate merging updated prev_bvec 737 as new bvec. */ 738 .bi_bdev = bio->bi_bdev, 739 .bi_sector = bio->bi_iter.bi_sector, 740 .bi_size = bio->bi_iter.bi_size - 741 prev_bv_len, 742 .bi_rw = bio->bi_rw, 743 }; 744 745 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) { 746 prev->bv_len -= len; 747 return 0; 748 } 749 } 750 751 bio->bi_iter.bi_size += len; 752 goto done; 753 } 754 755 /* 756 * If the queue doesn't support SG gaps and adding this 757 * offset would create a gap, disallow it. 758 */ 759 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) && 760 bvec_gap_to_prev(prev, offset)) 761 return 0; 762 } 763 764 if (bio->bi_vcnt >= bio->bi_max_vecs) 765 return 0; 766 767 /* 768 * setup the new entry, we might clear it again later if we 769 * cannot add the page 770 */ 771 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 772 bvec->bv_page = page; 773 bvec->bv_len = len; 774 bvec->bv_offset = offset; 775 bio->bi_vcnt++; 776 bio->bi_phys_segments++; 777 bio->bi_iter.bi_size += len; 778 779 /* 780 * Perform a recount if the number of segments is greater 781 * than queue_max_segments(q). 782 */ 783 784 while (bio->bi_phys_segments > queue_max_segments(q)) { 785 786 if (retried_segments) 787 goto failed; 788 789 retried_segments = 1; 790 blk_recount_segments(q, bio); 791 } 792 793 /* 794 * if queue has other restrictions (eg varying max sector size 795 * depending on offset), it can specify a merge_bvec_fn in the 796 * queue to get further control 797 */ 798 if (q->merge_bvec_fn) { 799 struct bvec_merge_data bvm = { 800 .bi_bdev = bio->bi_bdev, 801 .bi_sector = bio->bi_iter.bi_sector, 802 .bi_size = bio->bi_iter.bi_size - len, 803 .bi_rw = bio->bi_rw, 804 }; 805 806 /* 807 * merge_bvec_fn() returns number of bytes it can accept 808 * at this offset 809 */ 810 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) 811 goto failed; 812 } 813 814 /* If we may be able to merge these biovecs, force a recount */ 815 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) 816 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 817 818 done: 819 return len; 820 821 failed: 822 bvec->bv_page = NULL; 823 bvec->bv_len = 0; 824 bvec->bv_offset = 0; 825 bio->bi_vcnt--; 826 bio->bi_iter.bi_size -= len; 827 blk_recount_segments(q, bio); 828 return 0; 829 } 830 831 /** 832 * bio_add_pc_page - attempt to add page to bio 833 * @q: the target queue 834 * @bio: destination bio 835 * @page: page to add 836 * @len: vec entry length 837 * @offset: vec entry offset 838 * 839 * Attempt to add a page to the bio_vec maplist. This can fail for a 840 * number of reasons, such as the bio being full or target block device 841 * limitations. The target block device must allow bio's up to PAGE_SIZE, 842 * so it is always possible to add a single page to an empty bio. 843 * 844 * This should only be used by REQ_PC bios. 845 */ 846 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, 847 unsigned int len, unsigned int offset) 848 { 849 return __bio_add_page(q, bio, page, len, offset, 850 queue_max_hw_sectors(q)); 851 } 852 EXPORT_SYMBOL(bio_add_pc_page); 853 854 /** 855 * bio_add_page - attempt to add page to bio 856 * @bio: destination bio 857 * @page: page to add 858 * @len: vec entry length 859 * @offset: vec entry offset 860 * 861 * Attempt to add a page to the bio_vec maplist. This can fail for a 862 * number of reasons, such as the bio being full or target block device 863 * limitations. The target block device must allow bio's up to PAGE_SIZE, 864 * so it is always possible to add a single page to an empty bio. 865 */ 866 int bio_add_page(struct bio *bio, struct page *page, unsigned int len, 867 unsigned int offset) 868 { 869 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 870 unsigned int max_sectors; 871 872 max_sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); 873 if ((max_sectors < (len >> 9)) && !bio->bi_iter.bi_size) 874 max_sectors = len >> 9; 875 876 return __bio_add_page(q, bio, page, len, offset, max_sectors); 877 } 878 EXPORT_SYMBOL(bio_add_page); 879 880 struct submit_bio_ret { 881 struct completion event; 882 int error; 883 }; 884 885 static void submit_bio_wait_endio(struct bio *bio, int error) 886 { 887 struct submit_bio_ret *ret = bio->bi_private; 888 889 ret->error = error; 890 complete(&ret->event); 891 } 892 893 /** 894 * submit_bio_wait - submit a bio, and wait until it completes 895 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) 896 * @bio: The &struct bio which describes the I/O 897 * 898 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 899 * bio_endio() on failure. 900 */ 901 int submit_bio_wait(int rw, struct bio *bio) 902 { 903 struct submit_bio_ret ret; 904 905 rw |= REQ_SYNC; 906 init_completion(&ret.event); 907 bio->bi_private = &ret; 908 bio->bi_end_io = submit_bio_wait_endio; 909 submit_bio(rw, bio); 910 wait_for_completion(&ret.event); 911 912 return ret.error; 913 } 914 EXPORT_SYMBOL(submit_bio_wait); 915 916 /** 917 * bio_advance - increment/complete a bio by some number of bytes 918 * @bio: bio to advance 919 * @bytes: number of bytes to complete 920 * 921 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 922 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 923 * be updated on the last bvec as well. 924 * 925 * @bio will then represent the remaining, uncompleted portion of the io. 926 */ 927 void bio_advance(struct bio *bio, unsigned bytes) 928 { 929 if (bio_integrity(bio)) 930 bio_integrity_advance(bio, bytes); 931 932 bio_advance_iter(bio, &bio->bi_iter, bytes); 933 } 934 EXPORT_SYMBOL(bio_advance); 935 936 /** 937 * bio_alloc_pages - allocates a single page for each bvec in a bio 938 * @bio: bio to allocate pages for 939 * @gfp_mask: flags for allocation 940 * 941 * Allocates pages up to @bio->bi_vcnt. 942 * 943 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are 944 * freed. 945 */ 946 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) 947 { 948 int i; 949 struct bio_vec *bv; 950 951 bio_for_each_segment_all(bv, bio, i) { 952 bv->bv_page = alloc_page(gfp_mask); 953 if (!bv->bv_page) { 954 while (--bv >= bio->bi_io_vec) 955 __free_page(bv->bv_page); 956 return -ENOMEM; 957 } 958 } 959 960 return 0; 961 } 962 EXPORT_SYMBOL(bio_alloc_pages); 963 964 /** 965 * bio_copy_data - copy contents of data buffers from one chain of bios to 966 * another 967 * @src: source bio list 968 * @dst: destination bio list 969 * 970 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats 971 * @src and @dst as linked lists of bios. 972 * 973 * Stops when it reaches the end of either @src or @dst - that is, copies 974 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 975 */ 976 void bio_copy_data(struct bio *dst, struct bio *src) 977 { 978 struct bvec_iter src_iter, dst_iter; 979 struct bio_vec src_bv, dst_bv; 980 void *src_p, *dst_p; 981 unsigned bytes; 982 983 src_iter = src->bi_iter; 984 dst_iter = dst->bi_iter; 985 986 while (1) { 987 if (!src_iter.bi_size) { 988 src = src->bi_next; 989 if (!src) 990 break; 991 992 src_iter = src->bi_iter; 993 } 994 995 if (!dst_iter.bi_size) { 996 dst = dst->bi_next; 997 if (!dst) 998 break; 999 1000 dst_iter = dst->bi_iter; 1001 } 1002 1003 src_bv = bio_iter_iovec(src, src_iter); 1004 dst_bv = bio_iter_iovec(dst, dst_iter); 1005 1006 bytes = min(src_bv.bv_len, dst_bv.bv_len); 1007 1008 src_p = kmap_atomic(src_bv.bv_page); 1009 dst_p = kmap_atomic(dst_bv.bv_page); 1010 1011 memcpy(dst_p + dst_bv.bv_offset, 1012 src_p + src_bv.bv_offset, 1013 bytes); 1014 1015 kunmap_atomic(dst_p); 1016 kunmap_atomic(src_p); 1017 1018 bio_advance_iter(src, &src_iter, bytes); 1019 bio_advance_iter(dst, &dst_iter, bytes); 1020 } 1021 } 1022 EXPORT_SYMBOL(bio_copy_data); 1023 1024 struct bio_map_data { 1025 int nr_sgvecs; 1026 int is_our_pages; 1027 struct sg_iovec sgvecs[]; 1028 }; 1029 1030 static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, 1031 const struct sg_iovec *iov, int iov_count, 1032 int is_our_pages) 1033 { 1034 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); 1035 bmd->nr_sgvecs = iov_count; 1036 bmd->is_our_pages = is_our_pages; 1037 bio->bi_private = bmd; 1038 } 1039 1040 static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, 1041 gfp_t gfp_mask) 1042 { 1043 if (iov_count > UIO_MAXIOV) 1044 return NULL; 1045 1046 return kmalloc(sizeof(struct bio_map_data) + 1047 sizeof(struct sg_iovec) * iov_count, gfp_mask); 1048 } 1049 1050 static int __bio_copy_iov(struct bio *bio, const struct sg_iovec *iov, int iov_count, 1051 int to_user, int from_user, int do_free_page) 1052 { 1053 int ret = 0, i; 1054 struct bio_vec *bvec; 1055 int iov_idx = 0; 1056 unsigned int iov_off = 0; 1057 1058 bio_for_each_segment_all(bvec, bio, i) { 1059 char *bv_addr = page_address(bvec->bv_page); 1060 unsigned int bv_len = bvec->bv_len; 1061 1062 while (bv_len && iov_idx < iov_count) { 1063 unsigned int bytes; 1064 char __user *iov_addr; 1065 1066 bytes = min_t(unsigned int, 1067 iov[iov_idx].iov_len - iov_off, bv_len); 1068 iov_addr = iov[iov_idx].iov_base + iov_off; 1069 1070 if (!ret) { 1071 if (to_user) 1072 ret = copy_to_user(iov_addr, bv_addr, 1073 bytes); 1074 1075 if (from_user) 1076 ret = copy_from_user(bv_addr, iov_addr, 1077 bytes); 1078 1079 if (ret) 1080 ret = -EFAULT; 1081 } 1082 1083 bv_len -= bytes; 1084 bv_addr += bytes; 1085 iov_addr += bytes; 1086 iov_off += bytes; 1087 1088 if (iov[iov_idx].iov_len == iov_off) { 1089 iov_idx++; 1090 iov_off = 0; 1091 } 1092 } 1093 1094 if (do_free_page) 1095 __free_page(bvec->bv_page); 1096 } 1097 1098 return ret; 1099 } 1100 1101 /** 1102 * bio_uncopy_user - finish previously mapped bio 1103 * @bio: bio being terminated 1104 * 1105 * Free pages allocated from bio_copy_user() and write back data 1106 * to user space in case of a read. 1107 */ 1108 int bio_uncopy_user(struct bio *bio) 1109 { 1110 struct bio_map_data *bmd = bio->bi_private; 1111 struct bio_vec *bvec; 1112 int ret = 0, i; 1113 1114 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1115 /* 1116 * if we're in a workqueue, the request is orphaned, so 1117 * don't copy into a random user address space, just free. 1118 */ 1119 if (current->mm) 1120 ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs, 1121 bio_data_dir(bio) == READ, 1122 0, bmd->is_our_pages); 1123 else if (bmd->is_our_pages) 1124 bio_for_each_segment_all(bvec, bio, i) 1125 __free_page(bvec->bv_page); 1126 } 1127 kfree(bmd); 1128 bio_put(bio); 1129 return ret; 1130 } 1131 EXPORT_SYMBOL(bio_uncopy_user); 1132 1133 /** 1134 * bio_copy_user_iov - copy user data to bio 1135 * @q: destination block queue 1136 * @map_data: pointer to the rq_map_data holding pages (if necessary) 1137 * @iov: the iovec. 1138 * @iov_count: number of elements in the iovec 1139 * @write_to_vm: bool indicating writing to pages or not 1140 * @gfp_mask: memory allocation flags 1141 * 1142 * Prepares and returns a bio for indirect user io, bouncing data 1143 * to/from kernel pages as necessary. Must be paired with 1144 * call bio_uncopy_user() on io completion. 1145 */ 1146 struct bio *bio_copy_user_iov(struct request_queue *q, 1147 struct rq_map_data *map_data, 1148 const struct sg_iovec *iov, int iov_count, 1149 int write_to_vm, gfp_t gfp_mask) 1150 { 1151 struct bio_map_data *bmd; 1152 struct bio_vec *bvec; 1153 struct page *page; 1154 struct bio *bio; 1155 int i, ret; 1156 int nr_pages = 0; 1157 unsigned int len = 0; 1158 unsigned int offset = map_data ? map_data->offset & ~PAGE_MASK : 0; 1159 1160 for (i = 0; i < iov_count; i++) { 1161 unsigned long uaddr; 1162 unsigned long end; 1163 unsigned long start; 1164 1165 uaddr = (unsigned long)iov[i].iov_base; 1166 end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1167 start = uaddr >> PAGE_SHIFT; 1168 1169 /* 1170 * Overflow, abort 1171 */ 1172 if (end < start) 1173 return ERR_PTR(-EINVAL); 1174 1175 nr_pages += end - start; 1176 len += iov[i].iov_len; 1177 } 1178 1179 if (offset) 1180 nr_pages++; 1181 1182 bmd = bio_alloc_map_data(iov_count, gfp_mask); 1183 if (!bmd) 1184 return ERR_PTR(-ENOMEM); 1185 1186 ret = -ENOMEM; 1187 bio = bio_kmalloc(gfp_mask, nr_pages); 1188 if (!bio) 1189 goto out_bmd; 1190 1191 if (!write_to_vm) 1192 bio->bi_rw |= REQ_WRITE; 1193 1194 ret = 0; 1195 1196 if (map_data) { 1197 nr_pages = 1 << map_data->page_order; 1198 i = map_data->offset / PAGE_SIZE; 1199 } 1200 while (len) { 1201 unsigned int bytes = PAGE_SIZE; 1202 1203 bytes -= offset; 1204 1205 if (bytes > len) 1206 bytes = len; 1207 1208 if (map_data) { 1209 if (i == map_data->nr_entries * nr_pages) { 1210 ret = -ENOMEM; 1211 break; 1212 } 1213 1214 page = map_data->pages[i / nr_pages]; 1215 page += (i % nr_pages); 1216 1217 i++; 1218 } else { 1219 page = alloc_page(q->bounce_gfp | gfp_mask); 1220 if (!page) { 1221 ret = -ENOMEM; 1222 break; 1223 } 1224 } 1225 1226 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) 1227 break; 1228 1229 len -= bytes; 1230 offset = 0; 1231 } 1232 1233 if (ret) 1234 goto cleanup; 1235 1236 /* 1237 * success 1238 */ 1239 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || 1240 (map_data && map_data->from_user)) { 1241 ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0); 1242 if (ret) 1243 goto cleanup; 1244 } 1245 1246 bio_set_map_data(bmd, bio, iov, iov_count, map_data ? 0 : 1); 1247 return bio; 1248 cleanup: 1249 if (!map_data) 1250 bio_for_each_segment_all(bvec, bio, i) 1251 __free_page(bvec->bv_page); 1252 1253 bio_put(bio); 1254 out_bmd: 1255 kfree(bmd); 1256 return ERR_PTR(ret); 1257 } 1258 1259 /** 1260 * bio_copy_user - copy user data to bio 1261 * @q: destination block queue 1262 * @map_data: pointer to the rq_map_data holding pages (if necessary) 1263 * @uaddr: start of user address 1264 * @len: length in bytes 1265 * @write_to_vm: bool indicating writing to pages or not 1266 * @gfp_mask: memory allocation flags 1267 * 1268 * Prepares and returns a bio for indirect user io, bouncing data 1269 * to/from kernel pages as necessary. Must be paired with 1270 * call bio_uncopy_user() on io completion. 1271 */ 1272 struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, 1273 unsigned long uaddr, unsigned int len, 1274 int write_to_vm, gfp_t gfp_mask) 1275 { 1276 struct sg_iovec iov; 1277 1278 iov.iov_base = (void __user *)uaddr; 1279 iov.iov_len = len; 1280 1281 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); 1282 } 1283 EXPORT_SYMBOL(bio_copy_user); 1284 1285 static struct bio *__bio_map_user_iov(struct request_queue *q, 1286 struct block_device *bdev, 1287 const struct sg_iovec *iov, int iov_count, 1288 int write_to_vm, gfp_t gfp_mask) 1289 { 1290 int i, j; 1291 int nr_pages = 0; 1292 struct page **pages; 1293 struct bio *bio; 1294 int cur_page = 0; 1295 int ret, offset; 1296 1297 for (i = 0; i < iov_count; i++) { 1298 unsigned long uaddr = (unsigned long)iov[i].iov_base; 1299 unsigned long len = iov[i].iov_len; 1300 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1301 unsigned long start = uaddr >> PAGE_SHIFT; 1302 1303 /* 1304 * Overflow, abort 1305 */ 1306 if (end < start) 1307 return ERR_PTR(-EINVAL); 1308 1309 nr_pages += end - start; 1310 /* 1311 * buffer must be aligned to at least hardsector size for now 1312 */ 1313 if (uaddr & queue_dma_alignment(q)) 1314 return ERR_PTR(-EINVAL); 1315 } 1316 1317 if (!nr_pages) 1318 return ERR_PTR(-EINVAL); 1319 1320 bio = bio_kmalloc(gfp_mask, nr_pages); 1321 if (!bio) 1322 return ERR_PTR(-ENOMEM); 1323 1324 ret = -ENOMEM; 1325 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); 1326 if (!pages) 1327 goto out; 1328 1329 for (i = 0; i < iov_count; i++) { 1330 unsigned long uaddr = (unsigned long)iov[i].iov_base; 1331 unsigned long len = iov[i].iov_len; 1332 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1333 unsigned long start = uaddr >> PAGE_SHIFT; 1334 const int local_nr_pages = end - start; 1335 const int page_limit = cur_page + local_nr_pages; 1336 1337 ret = get_user_pages_fast(uaddr, local_nr_pages, 1338 write_to_vm, &pages[cur_page]); 1339 if (ret < local_nr_pages) { 1340 ret = -EFAULT; 1341 goto out_unmap; 1342 } 1343 1344 offset = uaddr & ~PAGE_MASK; 1345 for (j = cur_page; j < page_limit; j++) { 1346 unsigned int bytes = PAGE_SIZE - offset; 1347 1348 if (len <= 0) 1349 break; 1350 1351 if (bytes > len) 1352 bytes = len; 1353 1354 /* 1355 * sorry... 1356 */ 1357 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < 1358 bytes) 1359 break; 1360 1361 len -= bytes; 1362 offset = 0; 1363 } 1364 1365 cur_page = j; 1366 /* 1367 * release the pages we didn't map into the bio, if any 1368 */ 1369 while (j < page_limit) 1370 page_cache_release(pages[j++]); 1371 } 1372 1373 kfree(pages); 1374 1375 /* 1376 * set data direction, and check if mapped pages need bouncing 1377 */ 1378 if (!write_to_vm) 1379 bio->bi_rw |= REQ_WRITE; 1380 1381 bio->bi_bdev = bdev; 1382 bio->bi_flags |= (1 << BIO_USER_MAPPED); 1383 return bio; 1384 1385 out_unmap: 1386 for (i = 0; i < nr_pages; i++) { 1387 if(!pages[i]) 1388 break; 1389 page_cache_release(pages[i]); 1390 } 1391 out: 1392 kfree(pages); 1393 bio_put(bio); 1394 return ERR_PTR(ret); 1395 } 1396 1397 /** 1398 * bio_map_user - map user address into bio 1399 * @q: the struct request_queue for the bio 1400 * @bdev: destination block device 1401 * @uaddr: start of user address 1402 * @len: length in bytes 1403 * @write_to_vm: bool indicating writing to pages or not 1404 * @gfp_mask: memory allocation flags 1405 * 1406 * Map the user space address into a bio suitable for io to a block 1407 * device. Returns an error pointer in case of error. 1408 */ 1409 struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, 1410 unsigned long uaddr, unsigned int len, int write_to_vm, 1411 gfp_t gfp_mask) 1412 { 1413 struct sg_iovec iov; 1414 1415 iov.iov_base = (void __user *)uaddr; 1416 iov.iov_len = len; 1417 1418 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); 1419 } 1420 EXPORT_SYMBOL(bio_map_user); 1421 1422 /** 1423 * bio_map_user_iov - map user sg_iovec table into bio 1424 * @q: the struct request_queue for the bio 1425 * @bdev: destination block device 1426 * @iov: the iovec. 1427 * @iov_count: number of elements in the iovec 1428 * @write_to_vm: bool indicating writing to pages or not 1429 * @gfp_mask: memory allocation flags 1430 * 1431 * Map the user space address into a bio suitable for io to a block 1432 * device. Returns an error pointer in case of error. 1433 */ 1434 struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, 1435 const struct sg_iovec *iov, int iov_count, 1436 int write_to_vm, gfp_t gfp_mask) 1437 { 1438 struct bio *bio; 1439 1440 bio = __bio_map_user_iov(q, bdev, iov, iov_count, write_to_vm, 1441 gfp_mask); 1442 if (IS_ERR(bio)) 1443 return bio; 1444 1445 /* 1446 * subtle -- if __bio_map_user() ended up bouncing a bio, 1447 * it would normally disappear when its bi_end_io is run. 1448 * however, we need it for the unmap, so grab an extra 1449 * reference to it 1450 */ 1451 bio_get(bio); 1452 1453 return bio; 1454 } 1455 1456 static void __bio_unmap_user(struct bio *bio) 1457 { 1458 struct bio_vec *bvec; 1459 int i; 1460 1461 /* 1462 * make sure we dirty pages we wrote to 1463 */ 1464 bio_for_each_segment_all(bvec, bio, i) { 1465 if (bio_data_dir(bio) == READ) 1466 set_page_dirty_lock(bvec->bv_page); 1467 1468 page_cache_release(bvec->bv_page); 1469 } 1470 1471 bio_put(bio); 1472 } 1473 1474 /** 1475 * bio_unmap_user - unmap a bio 1476 * @bio: the bio being unmapped 1477 * 1478 * Unmap a bio previously mapped by bio_map_user(). Must be called with 1479 * a process context. 1480 * 1481 * bio_unmap_user() may sleep. 1482 */ 1483 void bio_unmap_user(struct bio *bio) 1484 { 1485 __bio_unmap_user(bio); 1486 bio_put(bio); 1487 } 1488 EXPORT_SYMBOL(bio_unmap_user); 1489 1490 static void bio_map_kern_endio(struct bio *bio, int err) 1491 { 1492 bio_put(bio); 1493 } 1494 1495 static struct bio *__bio_map_kern(struct request_queue *q, void *data, 1496 unsigned int len, gfp_t gfp_mask) 1497 { 1498 unsigned long kaddr = (unsigned long)data; 1499 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1500 unsigned long start = kaddr >> PAGE_SHIFT; 1501 const int nr_pages = end - start; 1502 int offset, i; 1503 struct bio *bio; 1504 1505 bio = bio_kmalloc(gfp_mask, nr_pages); 1506 if (!bio) 1507 return ERR_PTR(-ENOMEM); 1508 1509 offset = offset_in_page(kaddr); 1510 for (i = 0; i < nr_pages; i++) { 1511 unsigned int bytes = PAGE_SIZE - offset; 1512 1513 if (len <= 0) 1514 break; 1515 1516 if (bytes > len) 1517 bytes = len; 1518 1519 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, 1520 offset) < bytes) 1521 break; 1522 1523 data += bytes; 1524 len -= bytes; 1525 offset = 0; 1526 } 1527 1528 bio->bi_end_io = bio_map_kern_endio; 1529 return bio; 1530 } 1531 1532 /** 1533 * bio_map_kern - map kernel address into bio 1534 * @q: the struct request_queue for the bio 1535 * @data: pointer to buffer to map 1536 * @len: length in bytes 1537 * @gfp_mask: allocation flags for bio allocation 1538 * 1539 * Map the kernel address into a bio suitable for io to a block 1540 * device. Returns an error pointer in case of error. 1541 */ 1542 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, 1543 gfp_t gfp_mask) 1544 { 1545 struct bio *bio; 1546 1547 bio = __bio_map_kern(q, data, len, gfp_mask); 1548 if (IS_ERR(bio)) 1549 return bio; 1550 1551 if (bio->bi_iter.bi_size == len) 1552 return bio; 1553 1554 /* 1555 * Don't support partial mappings. 1556 */ 1557 bio_put(bio); 1558 return ERR_PTR(-EINVAL); 1559 } 1560 EXPORT_SYMBOL(bio_map_kern); 1561 1562 static void bio_copy_kern_endio(struct bio *bio, int err) 1563 { 1564 struct bio_vec *bvec; 1565 const int read = bio_data_dir(bio) == READ; 1566 struct bio_map_data *bmd = bio->bi_private; 1567 int i; 1568 char *p = bmd->sgvecs[0].iov_base; 1569 1570 bio_for_each_segment_all(bvec, bio, i) { 1571 char *addr = page_address(bvec->bv_page); 1572 1573 if (read) 1574 memcpy(p, addr, bvec->bv_len); 1575 1576 __free_page(bvec->bv_page); 1577 p += bvec->bv_len; 1578 } 1579 1580 kfree(bmd); 1581 bio_put(bio); 1582 } 1583 1584 /** 1585 * bio_copy_kern - copy kernel address into bio 1586 * @q: the struct request_queue for the bio 1587 * @data: pointer to buffer to copy 1588 * @len: length in bytes 1589 * @gfp_mask: allocation flags for bio and page allocation 1590 * @reading: data direction is READ 1591 * 1592 * copy the kernel address into a bio suitable for io to a block 1593 * device. Returns an error pointer in case of error. 1594 */ 1595 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 1596 gfp_t gfp_mask, int reading) 1597 { 1598 struct bio *bio; 1599 struct bio_vec *bvec; 1600 int i; 1601 1602 bio = bio_copy_user(q, NULL, (unsigned long)data, len, 1, gfp_mask); 1603 if (IS_ERR(bio)) 1604 return bio; 1605 1606 if (!reading) { 1607 void *p = data; 1608 1609 bio_for_each_segment_all(bvec, bio, i) { 1610 char *addr = page_address(bvec->bv_page); 1611 1612 memcpy(addr, p, bvec->bv_len); 1613 p += bvec->bv_len; 1614 } 1615 } 1616 1617 bio->bi_end_io = bio_copy_kern_endio; 1618 1619 return bio; 1620 } 1621 EXPORT_SYMBOL(bio_copy_kern); 1622 1623 /* 1624 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1625 * for performing direct-IO in BIOs. 1626 * 1627 * The problem is that we cannot run set_page_dirty() from interrupt context 1628 * because the required locks are not interrupt-safe. So what we can do is to 1629 * mark the pages dirty _before_ performing IO. And in interrupt context, 1630 * check that the pages are still dirty. If so, fine. If not, redirty them 1631 * in process context. 1632 * 1633 * We special-case compound pages here: normally this means reads into hugetlb 1634 * pages. The logic in here doesn't really work right for compound pages 1635 * because the VM does not uniformly chase down the head page in all cases. 1636 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1637 * handle them at all. So we skip compound pages here at an early stage. 1638 * 1639 * Note that this code is very hard to test under normal circumstances because 1640 * direct-io pins the pages with get_user_pages(). This makes 1641 * is_page_cache_freeable return false, and the VM will not clean the pages. 1642 * But other code (eg, flusher threads) could clean the pages if they are mapped 1643 * pagecache. 1644 * 1645 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1646 * deferred bio dirtying paths. 1647 */ 1648 1649 /* 1650 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1651 */ 1652 void bio_set_pages_dirty(struct bio *bio) 1653 { 1654 struct bio_vec *bvec; 1655 int i; 1656 1657 bio_for_each_segment_all(bvec, bio, i) { 1658 struct page *page = bvec->bv_page; 1659 1660 if (page && !PageCompound(page)) 1661 set_page_dirty_lock(page); 1662 } 1663 } 1664 1665 static void bio_release_pages(struct bio *bio) 1666 { 1667 struct bio_vec *bvec; 1668 int i; 1669 1670 bio_for_each_segment_all(bvec, bio, i) { 1671 struct page *page = bvec->bv_page; 1672 1673 if (page) 1674 put_page(page); 1675 } 1676 } 1677 1678 /* 1679 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1680 * If they are, then fine. If, however, some pages are clean then they must 1681 * have been written out during the direct-IO read. So we take another ref on 1682 * the BIO and the offending pages and re-dirty the pages in process context. 1683 * 1684 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1685 * here on. It will run one page_cache_release() against each page and will 1686 * run one bio_put() against the BIO. 1687 */ 1688 1689 static void bio_dirty_fn(struct work_struct *work); 1690 1691 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1692 static DEFINE_SPINLOCK(bio_dirty_lock); 1693 static struct bio *bio_dirty_list; 1694 1695 /* 1696 * This runs in process context 1697 */ 1698 static void bio_dirty_fn(struct work_struct *work) 1699 { 1700 unsigned long flags; 1701 struct bio *bio; 1702 1703 spin_lock_irqsave(&bio_dirty_lock, flags); 1704 bio = bio_dirty_list; 1705 bio_dirty_list = NULL; 1706 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1707 1708 while (bio) { 1709 struct bio *next = bio->bi_private; 1710 1711 bio_set_pages_dirty(bio); 1712 bio_release_pages(bio); 1713 bio_put(bio); 1714 bio = next; 1715 } 1716 } 1717 1718 void bio_check_pages_dirty(struct bio *bio) 1719 { 1720 struct bio_vec *bvec; 1721 int nr_clean_pages = 0; 1722 int i; 1723 1724 bio_for_each_segment_all(bvec, bio, i) { 1725 struct page *page = bvec->bv_page; 1726 1727 if (PageDirty(page) || PageCompound(page)) { 1728 page_cache_release(page); 1729 bvec->bv_page = NULL; 1730 } else { 1731 nr_clean_pages++; 1732 } 1733 } 1734 1735 if (nr_clean_pages) { 1736 unsigned long flags; 1737 1738 spin_lock_irqsave(&bio_dirty_lock, flags); 1739 bio->bi_private = bio_dirty_list; 1740 bio_dirty_list = bio; 1741 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1742 schedule_work(&bio_dirty_work); 1743 } else { 1744 bio_put(bio); 1745 } 1746 } 1747 1748 void generic_start_io_acct(int rw, unsigned long sectors, 1749 struct hd_struct *part) 1750 { 1751 int cpu = part_stat_lock(); 1752 1753 part_round_stats(cpu, part); 1754 part_stat_inc(cpu, part, ios[rw]); 1755 part_stat_add(cpu, part, sectors[rw], sectors); 1756 part_inc_in_flight(part, rw); 1757 1758 part_stat_unlock(); 1759 } 1760 EXPORT_SYMBOL(generic_start_io_acct); 1761 1762 void generic_end_io_acct(int rw, struct hd_struct *part, 1763 unsigned long start_time) 1764 { 1765 unsigned long duration = jiffies - start_time; 1766 int cpu = part_stat_lock(); 1767 1768 part_stat_add(cpu, part, ticks[rw], duration); 1769 part_round_stats(cpu, part); 1770 part_dec_in_flight(part, rw); 1771 1772 part_stat_unlock(); 1773 } 1774 EXPORT_SYMBOL(generic_end_io_acct); 1775 1776 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1777 void bio_flush_dcache_pages(struct bio *bi) 1778 { 1779 struct bio_vec bvec; 1780 struct bvec_iter iter; 1781 1782 bio_for_each_segment(bvec, bi, iter) 1783 flush_dcache_page(bvec.bv_page); 1784 } 1785 EXPORT_SYMBOL(bio_flush_dcache_pages); 1786 #endif 1787 1788 /** 1789 * bio_endio - end I/O on a bio 1790 * @bio: bio 1791 * @error: error, if any 1792 * 1793 * Description: 1794 * bio_endio() will end I/O on the whole bio. bio_endio() is the 1795 * preferred way to end I/O on a bio, it takes care of clearing 1796 * BIO_UPTODATE on error. @error is 0 on success, and and one of the 1797 * established -Exxxx (-EIO, for instance) error values in case 1798 * something went wrong. No one should call bi_end_io() directly on a 1799 * bio unless they own it and thus know that it has an end_io 1800 * function. 1801 **/ 1802 void bio_endio(struct bio *bio, int error) 1803 { 1804 while (bio) { 1805 BUG_ON(atomic_read(&bio->bi_remaining) <= 0); 1806 1807 if (error) 1808 clear_bit(BIO_UPTODATE, &bio->bi_flags); 1809 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 1810 error = -EIO; 1811 1812 if (!atomic_dec_and_test(&bio->bi_remaining)) 1813 return; 1814 1815 /* 1816 * Need to have a real endio function for chained bios, 1817 * otherwise various corner cases will break (like stacking 1818 * block devices that save/restore bi_end_io) - however, we want 1819 * to avoid unbounded recursion and blowing the stack. Tail call 1820 * optimization would handle this, but compiling with frame 1821 * pointers also disables gcc's sibling call optimization. 1822 */ 1823 if (bio->bi_end_io == bio_chain_endio) { 1824 struct bio *parent = bio->bi_private; 1825 bio_put(bio); 1826 bio = parent; 1827 } else { 1828 if (bio->bi_end_io) 1829 bio->bi_end_io(bio, error); 1830 bio = NULL; 1831 } 1832 } 1833 } 1834 EXPORT_SYMBOL(bio_endio); 1835 1836 /** 1837 * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining 1838 * @bio: bio 1839 * @error: error, if any 1840 * 1841 * For code that has saved and restored bi_end_io; thing hard before using this 1842 * function, probably you should've cloned the entire bio. 1843 **/ 1844 void bio_endio_nodec(struct bio *bio, int error) 1845 { 1846 atomic_inc(&bio->bi_remaining); 1847 bio_endio(bio, error); 1848 } 1849 EXPORT_SYMBOL(bio_endio_nodec); 1850 1851 /** 1852 * bio_split - split a bio 1853 * @bio: bio to split 1854 * @sectors: number of sectors to split from the front of @bio 1855 * @gfp: gfp mask 1856 * @bs: bio set to allocate from 1857 * 1858 * Allocates and returns a new bio which represents @sectors from the start of 1859 * @bio, and updates @bio to represent the remaining sectors. 1860 * 1861 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's 1862 * responsibility to ensure that @bio is not freed before the split. 1863 */ 1864 struct bio *bio_split(struct bio *bio, int sectors, 1865 gfp_t gfp, struct bio_set *bs) 1866 { 1867 struct bio *split = NULL; 1868 1869 BUG_ON(sectors <= 0); 1870 BUG_ON(sectors >= bio_sectors(bio)); 1871 1872 split = bio_clone_fast(bio, gfp, bs); 1873 if (!split) 1874 return NULL; 1875 1876 split->bi_iter.bi_size = sectors << 9; 1877 1878 if (bio_integrity(split)) 1879 bio_integrity_trim(split, 0, sectors); 1880 1881 bio_advance(bio, split->bi_iter.bi_size); 1882 1883 return split; 1884 } 1885 EXPORT_SYMBOL(bio_split); 1886 1887 /** 1888 * bio_trim - trim a bio 1889 * @bio: bio to trim 1890 * @offset: number of sectors to trim from the front of @bio 1891 * @size: size we want to trim @bio to, in sectors 1892 */ 1893 void bio_trim(struct bio *bio, int offset, int size) 1894 { 1895 /* 'bio' is a cloned bio which we need to trim to match 1896 * the given offset and size. 1897 */ 1898 1899 size <<= 9; 1900 if (offset == 0 && size == bio->bi_iter.bi_size) 1901 return; 1902 1903 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 1904 1905 bio_advance(bio, offset << 9); 1906 1907 bio->bi_iter.bi_size = size; 1908 } 1909 EXPORT_SYMBOL_GPL(bio_trim); 1910 1911 /* 1912 * create memory pools for biovec's in a bio_set. 1913 * use the global biovec slabs created for general use. 1914 */ 1915 mempool_t *biovec_create_pool(int pool_entries) 1916 { 1917 struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; 1918 1919 return mempool_create_slab_pool(pool_entries, bp->slab); 1920 } 1921 1922 void bioset_free(struct bio_set *bs) 1923 { 1924 if (bs->rescue_workqueue) 1925 destroy_workqueue(bs->rescue_workqueue); 1926 1927 if (bs->bio_pool) 1928 mempool_destroy(bs->bio_pool); 1929 1930 if (bs->bvec_pool) 1931 mempool_destroy(bs->bvec_pool); 1932 1933 bioset_integrity_free(bs); 1934 bio_put_slab(bs); 1935 1936 kfree(bs); 1937 } 1938 EXPORT_SYMBOL(bioset_free); 1939 1940 static struct bio_set *__bioset_create(unsigned int pool_size, 1941 unsigned int front_pad, 1942 bool create_bvec_pool) 1943 { 1944 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1945 struct bio_set *bs; 1946 1947 bs = kzalloc(sizeof(*bs), GFP_KERNEL); 1948 if (!bs) 1949 return NULL; 1950 1951 bs->front_pad = front_pad; 1952 1953 spin_lock_init(&bs->rescue_lock); 1954 bio_list_init(&bs->rescue_list); 1955 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1956 1957 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); 1958 if (!bs->bio_slab) { 1959 kfree(bs); 1960 return NULL; 1961 } 1962 1963 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); 1964 if (!bs->bio_pool) 1965 goto bad; 1966 1967 if (create_bvec_pool) { 1968 bs->bvec_pool = biovec_create_pool(pool_size); 1969 if (!bs->bvec_pool) 1970 goto bad; 1971 } 1972 1973 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); 1974 if (!bs->rescue_workqueue) 1975 goto bad; 1976 1977 return bs; 1978 bad: 1979 bioset_free(bs); 1980 return NULL; 1981 } 1982 1983 /** 1984 * bioset_create - Create a bio_set 1985 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1986 * @front_pad: Number of bytes to allocate in front of the returned bio 1987 * 1988 * Description: 1989 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1990 * to ask for a number of bytes to be allocated in front of the bio. 1991 * Front pad allocation is useful for embedding the bio inside 1992 * another structure, to avoid allocating extra data to go with the bio. 1993 * Note that the bio must be embedded at the END of that structure always, 1994 * or things will break badly. 1995 */ 1996 struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) 1997 { 1998 return __bioset_create(pool_size, front_pad, true); 1999 } 2000 EXPORT_SYMBOL(bioset_create); 2001 2002 /** 2003 * bioset_create_nobvec - Create a bio_set without bio_vec mempool 2004 * @pool_size: Number of bio to cache in the mempool 2005 * @front_pad: Number of bytes to allocate in front of the returned bio 2006 * 2007 * Description: 2008 * Same functionality as bioset_create() except that mempool is not 2009 * created for bio_vecs. Saving some memory for bio_clone_fast() users. 2010 */ 2011 struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad) 2012 { 2013 return __bioset_create(pool_size, front_pad, false); 2014 } 2015 EXPORT_SYMBOL(bioset_create_nobvec); 2016 2017 #ifdef CONFIG_BLK_CGROUP 2018 /** 2019 * bio_associate_current - associate a bio with %current 2020 * @bio: target bio 2021 * 2022 * Associate @bio with %current if it hasn't been associated yet. Block 2023 * layer will treat @bio as if it were issued by %current no matter which 2024 * task actually issues it. 2025 * 2026 * This function takes an extra reference of @task's io_context and blkcg 2027 * which will be put when @bio is released. The caller must own @bio, 2028 * ensure %current->io_context exists, and is responsible for synchronizing 2029 * calls to this function. 2030 */ 2031 int bio_associate_current(struct bio *bio) 2032 { 2033 struct io_context *ioc; 2034 struct cgroup_subsys_state *css; 2035 2036 if (bio->bi_ioc) 2037 return -EBUSY; 2038 2039 ioc = current->io_context; 2040 if (!ioc) 2041 return -ENOENT; 2042 2043 /* acquire active ref on @ioc and associate */ 2044 get_io_context_active(ioc); 2045 bio->bi_ioc = ioc; 2046 2047 /* associate blkcg if exists */ 2048 rcu_read_lock(); 2049 css = task_css(current, blkio_cgrp_id); 2050 if (css && css_tryget_online(css)) 2051 bio->bi_css = css; 2052 rcu_read_unlock(); 2053 2054 return 0; 2055 } 2056 2057 /** 2058 * bio_disassociate_task - undo bio_associate_current() 2059 * @bio: target bio 2060 */ 2061 void bio_disassociate_task(struct bio *bio) 2062 { 2063 if (bio->bi_ioc) { 2064 put_io_context(bio->bi_ioc); 2065 bio->bi_ioc = NULL; 2066 } 2067 if (bio->bi_css) { 2068 css_put(bio->bi_css); 2069 bio->bi_css = NULL; 2070 } 2071 } 2072 2073 #endif /* CONFIG_BLK_CGROUP */ 2074 2075 static void __init biovec_init_slabs(void) 2076 { 2077 int i; 2078 2079 for (i = 0; i < BIOVEC_NR_POOLS; i++) { 2080 int size; 2081 struct biovec_slab *bvs = bvec_slabs + i; 2082 2083 if (bvs->nr_vecs <= BIO_INLINE_VECS) { 2084 bvs->slab = NULL; 2085 continue; 2086 } 2087 2088 size = bvs->nr_vecs * sizeof(struct bio_vec); 2089 bvs->slab = kmem_cache_create(bvs->name, size, 0, 2090 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2091 } 2092 } 2093 2094 static int __init init_bio(void) 2095 { 2096 bio_slab_max = 2; 2097 bio_slab_nr = 0; 2098 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); 2099 if (!bio_slabs) 2100 panic("bio: can't allocate bios\n"); 2101 2102 bio_integrity_init(); 2103 biovec_init_slabs(); 2104 2105 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 2106 if (!fs_bio_set) 2107 panic("bio: can't allocate bios\n"); 2108 2109 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) 2110 panic("bio: can't create integrity pool\n"); 2111 2112 return 0; 2113 } 2114 subsys_initcall(init_bio); 2115