1 /* 2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk> 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public Licens 14 * along with this program; if not, write to the Free Software 15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111- 16 * 17 */ 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 #include <linux/bio.h> 21 #include <linux/blkdev.h> 22 #include <linux/uio.h> 23 #include <linux/iocontext.h> 24 #include <linux/slab.h> 25 #include <linux/init.h> 26 #include <linux/kernel.h> 27 #include <linux/export.h> 28 #include <linux/mempool.h> 29 #include <linux/workqueue.h> 30 #include <linux/cgroup.h> 31 32 #include <trace/events/block.h> 33 34 /* 35 * Test patch to inline a certain number of bi_io_vec's inside the bio 36 * itself, to shrink a bio data allocation from two mempool calls to one 37 */ 38 #define BIO_INLINE_VECS 4 39 40 /* 41 * if you change this list, also change bvec_alloc or things will 42 * break badly! cannot be bigger than what you can fit into an 43 * unsigned short 44 */ 45 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) } 46 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = { 47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES), 48 }; 49 #undef BV 50 51 /* 52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by 53 * IO code that does not need private memory pools. 54 */ 55 struct bio_set *fs_bio_set; 56 EXPORT_SYMBOL(fs_bio_set); 57 58 /* 59 * Our slab pool management 60 */ 61 struct bio_slab { 62 struct kmem_cache *slab; 63 unsigned int slab_ref; 64 unsigned int slab_size; 65 char name[8]; 66 }; 67 static DEFINE_MUTEX(bio_slab_lock); 68 static struct bio_slab *bio_slabs; 69 static unsigned int bio_slab_nr, bio_slab_max; 70 71 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) 72 { 73 unsigned int sz = sizeof(struct bio) + extra_size; 74 struct kmem_cache *slab = NULL; 75 struct bio_slab *bslab, *new_bio_slabs; 76 unsigned int new_bio_slab_max; 77 unsigned int i, entry = -1; 78 79 mutex_lock(&bio_slab_lock); 80 81 i = 0; 82 while (i < bio_slab_nr) { 83 bslab = &bio_slabs[i]; 84 85 if (!bslab->slab && entry == -1) 86 entry = i; 87 else if (bslab->slab_size == sz) { 88 slab = bslab->slab; 89 bslab->slab_ref++; 90 break; 91 } 92 i++; 93 } 94 95 if (slab) 96 goto out_unlock; 97 98 if (bio_slab_nr == bio_slab_max && entry == -1) { 99 new_bio_slab_max = bio_slab_max << 1; 100 new_bio_slabs = krealloc(bio_slabs, 101 new_bio_slab_max * sizeof(struct bio_slab), 102 GFP_KERNEL); 103 if (!new_bio_slabs) 104 goto out_unlock; 105 bio_slab_max = new_bio_slab_max; 106 bio_slabs = new_bio_slabs; 107 } 108 if (entry == -1) 109 entry = bio_slab_nr++; 110 111 bslab = &bio_slabs[entry]; 112 113 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry); 114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN, 115 SLAB_HWCACHE_ALIGN, NULL); 116 if (!slab) 117 goto out_unlock; 118 119 bslab->slab = slab; 120 bslab->slab_ref = 1; 121 bslab->slab_size = sz; 122 out_unlock: 123 mutex_unlock(&bio_slab_lock); 124 return slab; 125 } 126 127 static void bio_put_slab(struct bio_set *bs) 128 { 129 struct bio_slab *bslab = NULL; 130 unsigned int i; 131 132 mutex_lock(&bio_slab_lock); 133 134 for (i = 0; i < bio_slab_nr; i++) { 135 if (bs->bio_slab == bio_slabs[i].slab) { 136 bslab = &bio_slabs[i]; 137 break; 138 } 139 } 140 141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) 142 goto out; 143 144 WARN_ON(!bslab->slab_ref); 145 146 if (--bslab->slab_ref) 147 goto out; 148 149 kmem_cache_destroy(bslab->slab); 150 bslab->slab = NULL; 151 152 out: 153 mutex_unlock(&bio_slab_lock); 154 } 155 156 unsigned int bvec_nr_vecs(unsigned short idx) 157 { 158 return bvec_slabs[idx].nr_vecs; 159 } 160 161 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) 162 { 163 if (!idx) 164 return; 165 idx--; 166 167 BIO_BUG_ON(idx >= BVEC_POOL_NR); 168 169 if (idx == BVEC_POOL_MAX) { 170 mempool_free(bv, pool); 171 } else { 172 struct biovec_slab *bvs = bvec_slabs + idx; 173 174 kmem_cache_free(bvs->slab, bv); 175 } 176 } 177 178 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, 179 mempool_t *pool) 180 { 181 struct bio_vec *bvl; 182 183 /* 184 * see comment near bvec_array define! 185 */ 186 switch (nr) { 187 case 1: 188 *idx = 0; 189 break; 190 case 2 ... 4: 191 *idx = 1; 192 break; 193 case 5 ... 16: 194 *idx = 2; 195 break; 196 case 17 ... 64: 197 *idx = 3; 198 break; 199 case 65 ... 128: 200 *idx = 4; 201 break; 202 case 129 ... BIO_MAX_PAGES: 203 *idx = 5; 204 break; 205 default: 206 return NULL; 207 } 208 209 /* 210 * idx now points to the pool we want to allocate from. only the 211 * 1-vec entry pool is mempool backed. 212 */ 213 if (*idx == BVEC_POOL_MAX) { 214 fallback: 215 bvl = mempool_alloc(pool, gfp_mask); 216 } else { 217 struct biovec_slab *bvs = bvec_slabs + *idx; 218 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); 219 220 /* 221 * Make this allocation restricted and don't dump info on 222 * allocation failures, since we'll fallback to the mempool 223 * in case of failure. 224 */ 225 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; 226 227 /* 228 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM 229 * is set, retry with the 1-entry mempool 230 */ 231 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask); 232 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { 233 *idx = BVEC_POOL_MAX; 234 goto fallback; 235 } 236 } 237 238 (*idx)++; 239 return bvl; 240 } 241 242 static void __bio_free(struct bio *bio) 243 { 244 bio_disassociate_task(bio); 245 246 if (bio_integrity(bio)) 247 bio_integrity_free(bio); 248 } 249 250 static void bio_free(struct bio *bio) 251 { 252 struct bio_set *bs = bio->bi_pool; 253 void *p; 254 255 __bio_free(bio); 256 257 if (bs) { 258 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); 259 260 /* 261 * If we have front padding, adjust the bio pointer before freeing 262 */ 263 p = bio; 264 p -= bs->front_pad; 265 266 mempool_free(p, bs->bio_pool); 267 } else { 268 /* Bio was allocated by bio_kmalloc() */ 269 kfree(bio); 270 } 271 } 272 273 void bio_init(struct bio *bio, struct bio_vec *table, 274 unsigned short max_vecs) 275 { 276 memset(bio, 0, sizeof(*bio)); 277 atomic_set(&bio->__bi_remaining, 1); 278 atomic_set(&bio->__bi_cnt, 1); 279 280 bio->bi_io_vec = table; 281 bio->bi_max_vecs = max_vecs; 282 } 283 EXPORT_SYMBOL(bio_init); 284 285 /** 286 * bio_reset - reinitialize a bio 287 * @bio: bio to reset 288 * 289 * Description: 290 * After calling bio_reset(), @bio will be in the same state as a freshly 291 * allocated bio returned bio bio_alloc_bioset() - the only fields that are 292 * preserved are the ones that are initialized by bio_alloc_bioset(). See 293 * comment in struct bio. 294 */ 295 void bio_reset(struct bio *bio) 296 { 297 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); 298 299 __bio_free(bio); 300 301 memset(bio, 0, BIO_RESET_BYTES); 302 bio->bi_flags = flags; 303 atomic_set(&bio->__bi_remaining, 1); 304 } 305 EXPORT_SYMBOL(bio_reset); 306 307 static struct bio *__bio_chain_endio(struct bio *bio) 308 { 309 struct bio *parent = bio->bi_private; 310 311 if (!parent->bi_error) 312 parent->bi_error = bio->bi_error; 313 bio_put(bio); 314 return parent; 315 } 316 317 static void bio_chain_endio(struct bio *bio) 318 { 319 bio_endio(__bio_chain_endio(bio)); 320 } 321 322 /** 323 * bio_chain - chain bio completions 324 * @bio: the target bio 325 * @parent: the @bio's parent bio 326 * 327 * The caller won't have a bi_end_io called when @bio completes - instead, 328 * @parent's bi_end_io won't be called until both @parent and @bio have 329 * completed; the chained bio will also be freed when it completes. 330 * 331 * The caller must not set bi_private or bi_end_io in @bio. 332 */ 333 void bio_chain(struct bio *bio, struct bio *parent) 334 { 335 BUG_ON(bio->bi_private || bio->bi_end_io); 336 337 bio->bi_private = parent; 338 bio->bi_end_io = bio_chain_endio; 339 bio_inc_remaining(parent); 340 } 341 EXPORT_SYMBOL(bio_chain); 342 343 static void bio_alloc_rescue(struct work_struct *work) 344 { 345 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 346 struct bio *bio; 347 348 while (1) { 349 spin_lock(&bs->rescue_lock); 350 bio = bio_list_pop(&bs->rescue_list); 351 spin_unlock(&bs->rescue_lock); 352 353 if (!bio) 354 break; 355 356 generic_make_request(bio); 357 } 358 } 359 360 static void punt_bios_to_rescuer(struct bio_set *bs) 361 { 362 struct bio_list punt, nopunt; 363 struct bio *bio; 364 365 /* 366 * In order to guarantee forward progress we must punt only bios that 367 * were allocated from this bio_set; otherwise, if there was a bio on 368 * there for a stacking driver higher up in the stack, processing it 369 * could require allocating bios from this bio_set, and doing that from 370 * our own rescuer would be bad. 371 * 372 * Since bio lists are singly linked, pop them all instead of trying to 373 * remove from the middle of the list: 374 */ 375 376 bio_list_init(&punt); 377 bio_list_init(&nopunt); 378 379 while ((bio = bio_list_pop(current->bio_list))) 380 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); 381 382 *current->bio_list = nopunt; 383 384 spin_lock(&bs->rescue_lock); 385 bio_list_merge(&bs->rescue_list, &punt); 386 spin_unlock(&bs->rescue_lock); 387 388 queue_work(bs->rescue_workqueue, &bs->rescue_work); 389 } 390 391 /** 392 * bio_alloc_bioset - allocate a bio for I/O 393 * @gfp_mask: the GFP_ mask given to the slab allocator 394 * @nr_iovecs: number of iovecs to pre-allocate 395 * @bs: the bio_set to allocate from. 396 * 397 * Description: 398 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is 399 * backed by the @bs's mempool. 400 * 401 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will 402 * always be able to allocate a bio. This is due to the mempool guarantees. 403 * To make this work, callers must never allocate more than 1 bio at a time 404 * from this pool. Callers that need to allocate more than 1 bio must always 405 * submit the previously allocated bio for IO before attempting to allocate 406 * a new one. Failure to do so can cause deadlocks under memory pressure. 407 * 408 * Note that when running under generic_make_request() (i.e. any block 409 * driver), bios are not submitted until after you return - see the code in 410 * generic_make_request() that converts recursion into iteration, to prevent 411 * stack overflows. 412 * 413 * This would normally mean allocating multiple bios under 414 * generic_make_request() would be susceptible to deadlocks, but we have 415 * deadlock avoidance code that resubmits any blocked bios from a rescuer 416 * thread. 417 * 418 * However, we do not guarantee forward progress for allocations from other 419 * mempools. Doing multiple allocations from the same mempool under 420 * generic_make_request() should be avoided - instead, use bio_set's front_pad 421 * for per bio allocations. 422 * 423 * RETURNS: 424 * Pointer to new bio on success, NULL on failure. 425 */ 426 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 427 { 428 gfp_t saved_gfp = gfp_mask; 429 unsigned front_pad; 430 unsigned inline_vecs; 431 struct bio_vec *bvl = NULL; 432 struct bio *bio; 433 void *p; 434 435 if (!bs) { 436 if (nr_iovecs > UIO_MAXIOV) 437 return NULL; 438 439 p = kmalloc(sizeof(struct bio) + 440 nr_iovecs * sizeof(struct bio_vec), 441 gfp_mask); 442 front_pad = 0; 443 inline_vecs = nr_iovecs; 444 } else { 445 /* should not use nobvec bioset for nr_iovecs > 0 */ 446 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0)) 447 return NULL; 448 /* 449 * generic_make_request() converts recursion to iteration; this 450 * means if we're running beneath it, any bios we allocate and 451 * submit will not be submitted (and thus freed) until after we 452 * return. 453 * 454 * This exposes us to a potential deadlock if we allocate 455 * multiple bios from the same bio_set() while running 456 * underneath generic_make_request(). If we were to allocate 457 * multiple bios (say a stacking block driver that was splitting 458 * bios), we would deadlock if we exhausted the mempool's 459 * reserve. 460 * 461 * We solve this, and guarantee forward progress, with a rescuer 462 * workqueue per bio_set. If we go to allocate and there are 463 * bios on current->bio_list, we first try the allocation 464 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those 465 * bios we would be blocking to the rescuer workqueue before 466 * we retry with the original gfp_flags. 467 */ 468 469 if (current->bio_list && !bio_list_empty(current->bio_list)) 470 gfp_mask &= ~__GFP_DIRECT_RECLAIM; 471 472 p = mempool_alloc(bs->bio_pool, gfp_mask); 473 if (!p && gfp_mask != saved_gfp) { 474 punt_bios_to_rescuer(bs); 475 gfp_mask = saved_gfp; 476 p = mempool_alloc(bs->bio_pool, gfp_mask); 477 } 478 479 front_pad = bs->front_pad; 480 inline_vecs = BIO_INLINE_VECS; 481 } 482 483 if (unlikely(!p)) 484 return NULL; 485 486 bio = p + front_pad; 487 bio_init(bio, NULL, 0); 488 489 if (nr_iovecs > inline_vecs) { 490 unsigned long idx = 0; 491 492 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 493 if (!bvl && gfp_mask != saved_gfp) { 494 punt_bios_to_rescuer(bs); 495 gfp_mask = saved_gfp; 496 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); 497 } 498 499 if (unlikely(!bvl)) 500 goto err_free; 501 502 bio->bi_flags |= idx << BVEC_POOL_OFFSET; 503 } else if (nr_iovecs) { 504 bvl = bio->bi_inline_vecs; 505 } 506 507 bio->bi_pool = bs; 508 bio->bi_max_vecs = nr_iovecs; 509 bio->bi_io_vec = bvl; 510 return bio; 511 512 err_free: 513 mempool_free(p, bs->bio_pool); 514 return NULL; 515 } 516 EXPORT_SYMBOL(bio_alloc_bioset); 517 518 void zero_fill_bio(struct bio *bio) 519 { 520 unsigned long flags; 521 struct bio_vec bv; 522 struct bvec_iter iter; 523 524 bio_for_each_segment(bv, bio, iter) { 525 char *data = bvec_kmap_irq(&bv, &flags); 526 memset(data, 0, bv.bv_len); 527 flush_dcache_page(bv.bv_page); 528 bvec_kunmap_irq(data, &flags); 529 } 530 } 531 EXPORT_SYMBOL(zero_fill_bio); 532 533 /** 534 * bio_put - release a reference to a bio 535 * @bio: bio to release reference to 536 * 537 * Description: 538 * Put a reference to a &struct bio, either one you have gotten with 539 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. 540 **/ 541 void bio_put(struct bio *bio) 542 { 543 if (!bio_flagged(bio, BIO_REFFED)) 544 bio_free(bio); 545 else { 546 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt)); 547 548 /* 549 * last put frees it 550 */ 551 if (atomic_dec_and_test(&bio->__bi_cnt)) 552 bio_free(bio); 553 } 554 } 555 EXPORT_SYMBOL(bio_put); 556 557 inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 558 { 559 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 560 blk_recount_segments(q, bio); 561 562 return bio->bi_phys_segments; 563 } 564 EXPORT_SYMBOL(bio_phys_segments); 565 566 /** 567 * __bio_clone_fast - clone a bio that shares the original bio's biovec 568 * @bio: destination bio 569 * @bio_src: bio to clone 570 * 571 * Clone a &bio. Caller will own the returned bio, but not 572 * the actual data it points to. Reference count of returned 573 * bio will be one. 574 * 575 * Caller must ensure that @bio_src is not freed before @bio. 576 */ 577 void __bio_clone_fast(struct bio *bio, struct bio *bio_src) 578 { 579 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); 580 581 /* 582 * most users will be overriding ->bi_bdev with a new target, 583 * so we don't set nor calculate new physical/hw segment counts here 584 */ 585 bio->bi_bdev = bio_src->bi_bdev; 586 bio_set_flag(bio, BIO_CLONED); 587 bio->bi_opf = bio_src->bi_opf; 588 bio->bi_iter = bio_src->bi_iter; 589 bio->bi_io_vec = bio_src->bi_io_vec; 590 591 bio_clone_blkcg_association(bio, bio_src); 592 } 593 EXPORT_SYMBOL(__bio_clone_fast); 594 595 /** 596 * bio_clone_fast - clone a bio that shares the original bio's biovec 597 * @bio: bio to clone 598 * @gfp_mask: allocation priority 599 * @bs: bio_set to allocate from 600 * 601 * Like __bio_clone_fast, only also allocates the returned bio 602 */ 603 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) 604 { 605 struct bio *b; 606 607 b = bio_alloc_bioset(gfp_mask, 0, bs); 608 if (!b) 609 return NULL; 610 611 __bio_clone_fast(b, bio); 612 613 if (bio_integrity(bio)) { 614 int ret; 615 616 ret = bio_integrity_clone(b, bio, gfp_mask); 617 618 if (ret < 0) { 619 bio_put(b); 620 return NULL; 621 } 622 } 623 624 return b; 625 } 626 EXPORT_SYMBOL(bio_clone_fast); 627 628 /** 629 * bio_clone_bioset - clone a bio 630 * @bio_src: bio to clone 631 * @gfp_mask: allocation priority 632 * @bs: bio_set to allocate from 633 * 634 * Clone bio. Caller will own the returned bio, but not the actual data it 635 * points to. Reference count of returned bio will be one. 636 */ 637 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask, 638 struct bio_set *bs) 639 { 640 struct bvec_iter iter; 641 struct bio_vec bv; 642 struct bio *bio; 643 644 /* 645 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from 646 * bio_src->bi_io_vec to bio->bi_io_vec. 647 * 648 * We can't do that anymore, because: 649 * 650 * - The point of cloning the biovec is to produce a bio with a biovec 651 * the caller can modify: bi_idx and bi_bvec_done should be 0. 652 * 653 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if 654 * we tried to clone the whole thing bio_alloc_bioset() would fail. 655 * But the clone should succeed as long as the number of biovecs we 656 * actually need to allocate is fewer than BIO_MAX_PAGES. 657 * 658 * - Lastly, bi_vcnt should not be looked at or relied upon by code 659 * that does not own the bio - reason being drivers don't use it for 660 * iterating over the biovec anymore, so expecting it to be kept up 661 * to date (i.e. for clones that share the parent biovec) is just 662 * asking for trouble and would force extra work on 663 * __bio_clone_fast() anyways. 664 */ 665 666 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); 667 if (!bio) 668 return NULL; 669 bio->bi_bdev = bio_src->bi_bdev; 670 bio->bi_opf = bio_src->bi_opf; 671 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; 672 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size; 673 674 switch (bio_op(bio)) { 675 case REQ_OP_DISCARD: 676 case REQ_OP_SECURE_ERASE: 677 case REQ_OP_WRITE_ZEROES: 678 break; 679 case REQ_OP_WRITE_SAME: 680 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0]; 681 break; 682 default: 683 bio_for_each_segment(bv, bio_src, iter) 684 bio->bi_io_vec[bio->bi_vcnt++] = bv; 685 break; 686 } 687 688 if (bio_integrity(bio_src)) { 689 int ret; 690 691 ret = bio_integrity_clone(bio, bio_src, gfp_mask); 692 if (ret < 0) { 693 bio_put(bio); 694 return NULL; 695 } 696 } 697 698 bio_clone_blkcg_association(bio, bio_src); 699 700 return bio; 701 } 702 EXPORT_SYMBOL(bio_clone_bioset); 703 704 /** 705 * bio_add_pc_page - attempt to add page to bio 706 * @q: the target queue 707 * @bio: destination bio 708 * @page: page to add 709 * @len: vec entry length 710 * @offset: vec entry offset 711 * 712 * Attempt to add a page to the bio_vec maplist. This can fail for a 713 * number of reasons, such as the bio being full or target block device 714 * limitations. The target block device must allow bio's up to PAGE_SIZE, 715 * so it is always possible to add a single page to an empty bio. 716 * 717 * This should only be used by REQ_PC bios. 718 */ 719 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page 720 *page, unsigned int len, unsigned int offset) 721 { 722 int retried_segments = 0; 723 struct bio_vec *bvec; 724 725 /* 726 * cloned bio must not modify vec list 727 */ 728 if (unlikely(bio_flagged(bio, BIO_CLONED))) 729 return 0; 730 731 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) 732 return 0; 733 734 /* 735 * For filesystems with a blocksize smaller than the pagesize 736 * we will often be called with the same page as last time and 737 * a consecutive offset. Optimize this special case. 738 */ 739 if (bio->bi_vcnt > 0) { 740 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; 741 742 if (page == prev->bv_page && 743 offset == prev->bv_offset + prev->bv_len) { 744 prev->bv_len += len; 745 bio->bi_iter.bi_size += len; 746 goto done; 747 } 748 749 /* 750 * If the queue doesn't support SG gaps and adding this 751 * offset would create a gap, disallow it. 752 */ 753 if (bvec_gap_to_prev(q, prev, offset)) 754 return 0; 755 } 756 757 if (bio->bi_vcnt >= bio->bi_max_vecs) 758 return 0; 759 760 /* 761 * setup the new entry, we might clear it again later if we 762 * cannot add the page 763 */ 764 bvec = &bio->bi_io_vec[bio->bi_vcnt]; 765 bvec->bv_page = page; 766 bvec->bv_len = len; 767 bvec->bv_offset = offset; 768 bio->bi_vcnt++; 769 bio->bi_phys_segments++; 770 bio->bi_iter.bi_size += len; 771 772 /* 773 * Perform a recount if the number of segments is greater 774 * than queue_max_segments(q). 775 */ 776 777 while (bio->bi_phys_segments > queue_max_segments(q)) { 778 779 if (retried_segments) 780 goto failed; 781 782 retried_segments = 1; 783 blk_recount_segments(q, bio); 784 } 785 786 /* If we may be able to merge these biovecs, force a recount */ 787 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) 788 bio_clear_flag(bio, BIO_SEG_VALID); 789 790 done: 791 return len; 792 793 failed: 794 bvec->bv_page = NULL; 795 bvec->bv_len = 0; 796 bvec->bv_offset = 0; 797 bio->bi_vcnt--; 798 bio->bi_iter.bi_size -= len; 799 blk_recount_segments(q, bio); 800 return 0; 801 } 802 EXPORT_SYMBOL(bio_add_pc_page); 803 804 /** 805 * bio_add_page - attempt to add page to bio 806 * @bio: destination bio 807 * @page: page to add 808 * @len: vec entry length 809 * @offset: vec entry offset 810 * 811 * Attempt to add a page to the bio_vec maplist. This will only fail 812 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. 813 */ 814 int bio_add_page(struct bio *bio, struct page *page, 815 unsigned int len, unsigned int offset) 816 { 817 struct bio_vec *bv; 818 819 /* 820 * cloned bio must not modify vec list 821 */ 822 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) 823 return 0; 824 825 /* 826 * For filesystems with a blocksize smaller than the pagesize 827 * we will often be called with the same page as last time and 828 * a consecutive offset. Optimize this special case. 829 */ 830 if (bio->bi_vcnt > 0) { 831 bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; 832 833 if (page == bv->bv_page && 834 offset == bv->bv_offset + bv->bv_len) { 835 bv->bv_len += len; 836 goto done; 837 } 838 } 839 840 if (bio->bi_vcnt >= bio->bi_max_vecs) 841 return 0; 842 843 bv = &bio->bi_io_vec[bio->bi_vcnt]; 844 bv->bv_page = page; 845 bv->bv_len = len; 846 bv->bv_offset = offset; 847 848 bio->bi_vcnt++; 849 done: 850 bio->bi_iter.bi_size += len; 851 return len; 852 } 853 EXPORT_SYMBOL(bio_add_page); 854 855 /** 856 * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio 857 * @bio: bio to add pages to 858 * @iter: iov iterator describing the region to be mapped 859 * 860 * Pins as many pages from *iter and appends them to @bio's bvec array. The 861 * pages will have to be released using put_page() when done. 862 */ 863 int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) 864 { 865 unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; 866 struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; 867 struct page **pages = (struct page **)bv; 868 size_t offset, diff; 869 ssize_t size; 870 871 size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); 872 if (unlikely(size <= 0)) 873 return size ? size : -EFAULT; 874 nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; 875 876 /* 877 * Deep magic below: We need to walk the pinned pages backwards 878 * because we are abusing the space allocated for the bio_vecs 879 * for the page array. Because the bio_vecs are larger than the 880 * page pointers by definition this will always work. But it also 881 * means we can't use bio_add_page, so any changes to it's semantics 882 * need to be reflected here as well. 883 */ 884 bio->bi_iter.bi_size += size; 885 bio->bi_vcnt += nr_pages; 886 887 diff = (nr_pages * PAGE_SIZE - offset) - size; 888 while (nr_pages--) { 889 bv[nr_pages].bv_page = pages[nr_pages]; 890 bv[nr_pages].bv_len = PAGE_SIZE; 891 bv[nr_pages].bv_offset = 0; 892 } 893 894 bv[0].bv_offset += offset; 895 bv[0].bv_len -= offset; 896 if (diff) 897 bv[bio->bi_vcnt - 1].bv_len -= diff; 898 899 iov_iter_advance(iter, size); 900 return 0; 901 } 902 EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); 903 904 struct submit_bio_ret { 905 struct completion event; 906 int error; 907 }; 908 909 static void submit_bio_wait_endio(struct bio *bio) 910 { 911 struct submit_bio_ret *ret = bio->bi_private; 912 913 ret->error = bio->bi_error; 914 complete(&ret->event); 915 } 916 917 /** 918 * submit_bio_wait - submit a bio, and wait until it completes 919 * @bio: The &struct bio which describes the I/O 920 * 921 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from 922 * bio_endio() on failure. 923 */ 924 int submit_bio_wait(struct bio *bio) 925 { 926 struct submit_bio_ret ret; 927 928 init_completion(&ret.event); 929 bio->bi_private = &ret; 930 bio->bi_end_io = submit_bio_wait_endio; 931 bio->bi_opf |= REQ_SYNC; 932 submit_bio(bio); 933 wait_for_completion_io(&ret.event); 934 935 return ret.error; 936 } 937 EXPORT_SYMBOL(submit_bio_wait); 938 939 /** 940 * bio_advance - increment/complete a bio by some number of bytes 941 * @bio: bio to advance 942 * @bytes: number of bytes to complete 943 * 944 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to 945 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will 946 * be updated on the last bvec as well. 947 * 948 * @bio will then represent the remaining, uncompleted portion of the io. 949 */ 950 void bio_advance(struct bio *bio, unsigned bytes) 951 { 952 if (bio_integrity(bio)) 953 bio_integrity_advance(bio, bytes); 954 955 bio_advance_iter(bio, &bio->bi_iter, bytes); 956 } 957 EXPORT_SYMBOL(bio_advance); 958 959 /** 960 * bio_alloc_pages - allocates a single page for each bvec in a bio 961 * @bio: bio to allocate pages for 962 * @gfp_mask: flags for allocation 963 * 964 * Allocates pages up to @bio->bi_vcnt. 965 * 966 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are 967 * freed. 968 */ 969 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) 970 { 971 int i; 972 struct bio_vec *bv; 973 974 bio_for_each_segment_all(bv, bio, i) { 975 bv->bv_page = alloc_page(gfp_mask); 976 if (!bv->bv_page) { 977 while (--bv >= bio->bi_io_vec) 978 __free_page(bv->bv_page); 979 return -ENOMEM; 980 } 981 } 982 983 return 0; 984 } 985 EXPORT_SYMBOL(bio_alloc_pages); 986 987 /** 988 * bio_copy_data - copy contents of data buffers from one chain of bios to 989 * another 990 * @src: source bio list 991 * @dst: destination bio list 992 * 993 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats 994 * @src and @dst as linked lists of bios. 995 * 996 * Stops when it reaches the end of either @src or @dst - that is, copies 997 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). 998 */ 999 void bio_copy_data(struct bio *dst, struct bio *src) 1000 { 1001 struct bvec_iter src_iter, dst_iter; 1002 struct bio_vec src_bv, dst_bv; 1003 void *src_p, *dst_p; 1004 unsigned bytes; 1005 1006 src_iter = src->bi_iter; 1007 dst_iter = dst->bi_iter; 1008 1009 while (1) { 1010 if (!src_iter.bi_size) { 1011 src = src->bi_next; 1012 if (!src) 1013 break; 1014 1015 src_iter = src->bi_iter; 1016 } 1017 1018 if (!dst_iter.bi_size) { 1019 dst = dst->bi_next; 1020 if (!dst) 1021 break; 1022 1023 dst_iter = dst->bi_iter; 1024 } 1025 1026 src_bv = bio_iter_iovec(src, src_iter); 1027 dst_bv = bio_iter_iovec(dst, dst_iter); 1028 1029 bytes = min(src_bv.bv_len, dst_bv.bv_len); 1030 1031 src_p = kmap_atomic(src_bv.bv_page); 1032 dst_p = kmap_atomic(dst_bv.bv_page); 1033 1034 memcpy(dst_p + dst_bv.bv_offset, 1035 src_p + src_bv.bv_offset, 1036 bytes); 1037 1038 kunmap_atomic(dst_p); 1039 kunmap_atomic(src_p); 1040 1041 bio_advance_iter(src, &src_iter, bytes); 1042 bio_advance_iter(dst, &dst_iter, bytes); 1043 } 1044 } 1045 EXPORT_SYMBOL(bio_copy_data); 1046 1047 struct bio_map_data { 1048 int is_our_pages; 1049 struct iov_iter iter; 1050 struct iovec iov[]; 1051 }; 1052 1053 static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count, 1054 gfp_t gfp_mask) 1055 { 1056 if (iov_count > UIO_MAXIOV) 1057 return NULL; 1058 1059 return kmalloc(sizeof(struct bio_map_data) + 1060 sizeof(struct iovec) * iov_count, gfp_mask); 1061 } 1062 1063 /** 1064 * bio_copy_from_iter - copy all pages from iov_iter to bio 1065 * @bio: The &struct bio which describes the I/O as destination 1066 * @iter: iov_iter as source 1067 * 1068 * Copy all pages from iov_iter to bio. 1069 * Returns 0 on success, or error on failure. 1070 */ 1071 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter) 1072 { 1073 int i; 1074 struct bio_vec *bvec; 1075 1076 bio_for_each_segment_all(bvec, bio, i) { 1077 ssize_t ret; 1078 1079 ret = copy_page_from_iter(bvec->bv_page, 1080 bvec->bv_offset, 1081 bvec->bv_len, 1082 &iter); 1083 1084 if (!iov_iter_count(&iter)) 1085 break; 1086 1087 if (ret < bvec->bv_len) 1088 return -EFAULT; 1089 } 1090 1091 return 0; 1092 } 1093 1094 /** 1095 * bio_copy_to_iter - copy all pages from bio to iov_iter 1096 * @bio: The &struct bio which describes the I/O as source 1097 * @iter: iov_iter as destination 1098 * 1099 * Copy all pages from bio to iov_iter. 1100 * Returns 0 on success, or error on failure. 1101 */ 1102 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 1103 { 1104 int i; 1105 struct bio_vec *bvec; 1106 1107 bio_for_each_segment_all(bvec, bio, i) { 1108 ssize_t ret; 1109 1110 ret = copy_page_to_iter(bvec->bv_page, 1111 bvec->bv_offset, 1112 bvec->bv_len, 1113 &iter); 1114 1115 if (!iov_iter_count(&iter)) 1116 break; 1117 1118 if (ret < bvec->bv_len) 1119 return -EFAULT; 1120 } 1121 1122 return 0; 1123 } 1124 1125 void bio_free_pages(struct bio *bio) 1126 { 1127 struct bio_vec *bvec; 1128 int i; 1129 1130 bio_for_each_segment_all(bvec, bio, i) 1131 __free_page(bvec->bv_page); 1132 } 1133 EXPORT_SYMBOL(bio_free_pages); 1134 1135 /** 1136 * bio_uncopy_user - finish previously mapped bio 1137 * @bio: bio being terminated 1138 * 1139 * Free pages allocated from bio_copy_user_iov() and write back data 1140 * to user space in case of a read. 1141 */ 1142 int bio_uncopy_user(struct bio *bio) 1143 { 1144 struct bio_map_data *bmd = bio->bi_private; 1145 int ret = 0; 1146 1147 if (!bio_flagged(bio, BIO_NULL_MAPPED)) { 1148 /* 1149 * if we're in a workqueue, the request is orphaned, so 1150 * don't copy into a random user address space, just free 1151 * and return -EINTR so user space doesn't expect any data. 1152 */ 1153 if (!current->mm) 1154 ret = -EINTR; 1155 else if (bio_data_dir(bio) == READ) 1156 ret = bio_copy_to_iter(bio, bmd->iter); 1157 if (bmd->is_our_pages) 1158 bio_free_pages(bio); 1159 } 1160 kfree(bmd); 1161 bio_put(bio); 1162 return ret; 1163 } 1164 1165 /** 1166 * bio_copy_user_iov - copy user data to bio 1167 * @q: destination block queue 1168 * @map_data: pointer to the rq_map_data holding pages (if necessary) 1169 * @iter: iovec iterator 1170 * @gfp_mask: memory allocation flags 1171 * 1172 * Prepares and returns a bio for indirect user io, bouncing data 1173 * to/from kernel pages as necessary. Must be paired with 1174 * call bio_uncopy_user() on io completion. 1175 */ 1176 struct bio *bio_copy_user_iov(struct request_queue *q, 1177 struct rq_map_data *map_data, 1178 const struct iov_iter *iter, 1179 gfp_t gfp_mask) 1180 { 1181 struct bio_map_data *bmd; 1182 struct page *page; 1183 struct bio *bio; 1184 int i, ret; 1185 int nr_pages = 0; 1186 unsigned int len = iter->count; 1187 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 1188 1189 for (i = 0; i < iter->nr_segs; i++) { 1190 unsigned long uaddr; 1191 unsigned long end; 1192 unsigned long start; 1193 1194 uaddr = (unsigned long) iter->iov[i].iov_base; 1195 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1) 1196 >> PAGE_SHIFT; 1197 start = uaddr >> PAGE_SHIFT; 1198 1199 /* 1200 * Overflow, abort 1201 */ 1202 if (end < start) 1203 return ERR_PTR(-EINVAL); 1204 1205 nr_pages += end - start; 1206 } 1207 1208 if (offset) 1209 nr_pages++; 1210 1211 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask); 1212 if (!bmd) 1213 return ERR_PTR(-ENOMEM); 1214 1215 /* 1216 * We need to do a deep copy of the iov_iter including the iovecs. 1217 * The caller provided iov might point to an on-stack or otherwise 1218 * shortlived one. 1219 */ 1220 bmd->is_our_pages = map_data ? 0 : 1; 1221 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); 1222 iov_iter_init(&bmd->iter, iter->type, bmd->iov, 1223 iter->nr_segs, iter->count); 1224 1225 ret = -ENOMEM; 1226 bio = bio_kmalloc(gfp_mask, nr_pages); 1227 if (!bio) 1228 goto out_bmd; 1229 1230 if (iter->type & WRITE) 1231 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1232 1233 ret = 0; 1234 1235 if (map_data) { 1236 nr_pages = 1 << map_data->page_order; 1237 i = map_data->offset / PAGE_SIZE; 1238 } 1239 while (len) { 1240 unsigned int bytes = PAGE_SIZE; 1241 1242 bytes -= offset; 1243 1244 if (bytes > len) 1245 bytes = len; 1246 1247 if (map_data) { 1248 if (i == map_data->nr_entries * nr_pages) { 1249 ret = -ENOMEM; 1250 break; 1251 } 1252 1253 page = map_data->pages[i / nr_pages]; 1254 page += (i % nr_pages); 1255 1256 i++; 1257 } else { 1258 page = alloc_page(q->bounce_gfp | gfp_mask); 1259 if (!page) { 1260 ret = -ENOMEM; 1261 break; 1262 } 1263 } 1264 1265 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) 1266 break; 1267 1268 len -= bytes; 1269 offset = 0; 1270 } 1271 1272 if (ret) 1273 goto cleanup; 1274 1275 /* 1276 * success 1277 */ 1278 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) || 1279 (map_data && map_data->from_user)) { 1280 ret = bio_copy_from_iter(bio, *iter); 1281 if (ret) 1282 goto cleanup; 1283 } 1284 1285 bio->bi_private = bmd; 1286 return bio; 1287 cleanup: 1288 if (!map_data) 1289 bio_free_pages(bio); 1290 bio_put(bio); 1291 out_bmd: 1292 kfree(bmd); 1293 return ERR_PTR(ret); 1294 } 1295 1296 /** 1297 * bio_map_user_iov - map user iovec into bio 1298 * @q: the struct request_queue for the bio 1299 * @iter: iovec iterator 1300 * @gfp_mask: memory allocation flags 1301 * 1302 * Map the user space address into a bio suitable for io to a block 1303 * device. Returns an error pointer in case of error. 1304 */ 1305 struct bio *bio_map_user_iov(struct request_queue *q, 1306 const struct iov_iter *iter, 1307 gfp_t gfp_mask) 1308 { 1309 int j; 1310 int nr_pages = 0; 1311 struct page **pages; 1312 struct bio *bio; 1313 int cur_page = 0; 1314 int ret, offset; 1315 struct iov_iter i; 1316 struct iovec iov; 1317 1318 iov_for_each(iov, i, *iter) { 1319 unsigned long uaddr = (unsigned long) iov.iov_base; 1320 unsigned long len = iov.iov_len; 1321 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1322 unsigned long start = uaddr >> PAGE_SHIFT; 1323 1324 /* 1325 * Overflow, abort 1326 */ 1327 if (end < start) 1328 return ERR_PTR(-EINVAL); 1329 1330 nr_pages += end - start; 1331 /* 1332 * buffer must be aligned to at least logical block size for now 1333 */ 1334 if (uaddr & queue_dma_alignment(q)) 1335 return ERR_PTR(-EINVAL); 1336 } 1337 1338 if (!nr_pages) 1339 return ERR_PTR(-EINVAL); 1340 1341 bio = bio_kmalloc(gfp_mask, nr_pages); 1342 if (!bio) 1343 return ERR_PTR(-ENOMEM); 1344 1345 ret = -ENOMEM; 1346 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask); 1347 if (!pages) 1348 goto out; 1349 1350 iov_for_each(iov, i, *iter) { 1351 unsigned long uaddr = (unsigned long) iov.iov_base; 1352 unsigned long len = iov.iov_len; 1353 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1354 unsigned long start = uaddr >> PAGE_SHIFT; 1355 const int local_nr_pages = end - start; 1356 const int page_limit = cur_page + local_nr_pages; 1357 1358 ret = get_user_pages_fast(uaddr, local_nr_pages, 1359 (iter->type & WRITE) != WRITE, 1360 &pages[cur_page]); 1361 if (ret < local_nr_pages) { 1362 ret = -EFAULT; 1363 goto out_unmap; 1364 } 1365 1366 offset = offset_in_page(uaddr); 1367 for (j = cur_page; j < page_limit; j++) { 1368 unsigned int bytes = PAGE_SIZE - offset; 1369 1370 if (len <= 0) 1371 break; 1372 1373 if (bytes > len) 1374 bytes = len; 1375 1376 /* 1377 * sorry... 1378 */ 1379 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) < 1380 bytes) 1381 break; 1382 1383 len -= bytes; 1384 offset = 0; 1385 } 1386 1387 cur_page = j; 1388 /* 1389 * release the pages we didn't map into the bio, if any 1390 */ 1391 while (j < page_limit) 1392 put_page(pages[j++]); 1393 } 1394 1395 kfree(pages); 1396 1397 /* 1398 * set data direction, and check if mapped pages need bouncing 1399 */ 1400 if (iter->type & WRITE) 1401 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1402 1403 bio_set_flag(bio, BIO_USER_MAPPED); 1404 1405 /* 1406 * subtle -- if __bio_map_user() ended up bouncing a bio, 1407 * it would normally disappear when its bi_end_io is run. 1408 * however, we need it for the unmap, so grab an extra 1409 * reference to it 1410 */ 1411 bio_get(bio); 1412 return bio; 1413 1414 out_unmap: 1415 for (j = 0; j < nr_pages; j++) { 1416 if (!pages[j]) 1417 break; 1418 put_page(pages[j]); 1419 } 1420 out: 1421 kfree(pages); 1422 bio_put(bio); 1423 return ERR_PTR(ret); 1424 } 1425 1426 static void __bio_unmap_user(struct bio *bio) 1427 { 1428 struct bio_vec *bvec; 1429 int i; 1430 1431 /* 1432 * make sure we dirty pages we wrote to 1433 */ 1434 bio_for_each_segment_all(bvec, bio, i) { 1435 if (bio_data_dir(bio) == READ) 1436 set_page_dirty_lock(bvec->bv_page); 1437 1438 put_page(bvec->bv_page); 1439 } 1440 1441 bio_put(bio); 1442 } 1443 1444 /** 1445 * bio_unmap_user - unmap a bio 1446 * @bio: the bio being unmapped 1447 * 1448 * Unmap a bio previously mapped by bio_map_user(). Must be called with 1449 * a process context. 1450 * 1451 * bio_unmap_user() may sleep. 1452 */ 1453 void bio_unmap_user(struct bio *bio) 1454 { 1455 __bio_unmap_user(bio); 1456 bio_put(bio); 1457 } 1458 1459 static void bio_map_kern_endio(struct bio *bio) 1460 { 1461 bio_put(bio); 1462 } 1463 1464 /** 1465 * bio_map_kern - map kernel address into bio 1466 * @q: the struct request_queue for the bio 1467 * @data: pointer to buffer to map 1468 * @len: length in bytes 1469 * @gfp_mask: allocation flags for bio allocation 1470 * 1471 * Map the kernel address into a bio suitable for io to a block 1472 * device. Returns an error pointer in case of error. 1473 */ 1474 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, 1475 gfp_t gfp_mask) 1476 { 1477 unsigned long kaddr = (unsigned long)data; 1478 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1479 unsigned long start = kaddr >> PAGE_SHIFT; 1480 const int nr_pages = end - start; 1481 int offset, i; 1482 struct bio *bio; 1483 1484 bio = bio_kmalloc(gfp_mask, nr_pages); 1485 if (!bio) 1486 return ERR_PTR(-ENOMEM); 1487 1488 offset = offset_in_page(kaddr); 1489 for (i = 0; i < nr_pages; i++) { 1490 unsigned int bytes = PAGE_SIZE - offset; 1491 1492 if (len <= 0) 1493 break; 1494 1495 if (bytes > len) 1496 bytes = len; 1497 1498 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes, 1499 offset) < bytes) { 1500 /* we don't support partial mappings */ 1501 bio_put(bio); 1502 return ERR_PTR(-EINVAL); 1503 } 1504 1505 data += bytes; 1506 len -= bytes; 1507 offset = 0; 1508 } 1509 1510 bio->bi_end_io = bio_map_kern_endio; 1511 return bio; 1512 } 1513 EXPORT_SYMBOL(bio_map_kern); 1514 1515 static void bio_copy_kern_endio(struct bio *bio) 1516 { 1517 bio_free_pages(bio); 1518 bio_put(bio); 1519 } 1520 1521 static void bio_copy_kern_endio_read(struct bio *bio) 1522 { 1523 char *p = bio->bi_private; 1524 struct bio_vec *bvec; 1525 int i; 1526 1527 bio_for_each_segment_all(bvec, bio, i) { 1528 memcpy(p, page_address(bvec->bv_page), bvec->bv_len); 1529 p += bvec->bv_len; 1530 } 1531 1532 bio_copy_kern_endio(bio); 1533 } 1534 1535 /** 1536 * bio_copy_kern - copy kernel address into bio 1537 * @q: the struct request_queue for the bio 1538 * @data: pointer to buffer to copy 1539 * @len: length in bytes 1540 * @gfp_mask: allocation flags for bio and page allocation 1541 * @reading: data direction is READ 1542 * 1543 * copy the kernel address into a bio suitable for io to a block 1544 * device. Returns an error pointer in case of error. 1545 */ 1546 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 1547 gfp_t gfp_mask, int reading) 1548 { 1549 unsigned long kaddr = (unsigned long)data; 1550 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1551 unsigned long start = kaddr >> PAGE_SHIFT; 1552 struct bio *bio; 1553 void *p = data; 1554 int nr_pages = 0; 1555 1556 /* 1557 * Overflow, abort 1558 */ 1559 if (end < start) 1560 return ERR_PTR(-EINVAL); 1561 1562 nr_pages = end - start; 1563 bio = bio_kmalloc(gfp_mask, nr_pages); 1564 if (!bio) 1565 return ERR_PTR(-ENOMEM); 1566 1567 while (len) { 1568 struct page *page; 1569 unsigned int bytes = PAGE_SIZE; 1570 1571 if (bytes > len) 1572 bytes = len; 1573 1574 page = alloc_page(q->bounce_gfp | gfp_mask); 1575 if (!page) 1576 goto cleanup; 1577 1578 if (!reading) 1579 memcpy(page_address(page), p, bytes); 1580 1581 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 1582 break; 1583 1584 len -= bytes; 1585 p += bytes; 1586 } 1587 1588 if (reading) { 1589 bio->bi_end_io = bio_copy_kern_endio_read; 1590 bio->bi_private = data; 1591 } else { 1592 bio->bi_end_io = bio_copy_kern_endio; 1593 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 1594 } 1595 1596 return bio; 1597 1598 cleanup: 1599 bio_free_pages(bio); 1600 bio_put(bio); 1601 return ERR_PTR(-ENOMEM); 1602 } 1603 1604 /* 1605 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 1606 * for performing direct-IO in BIOs. 1607 * 1608 * The problem is that we cannot run set_page_dirty() from interrupt context 1609 * because the required locks are not interrupt-safe. So what we can do is to 1610 * mark the pages dirty _before_ performing IO. And in interrupt context, 1611 * check that the pages are still dirty. If so, fine. If not, redirty them 1612 * in process context. 1613 * 1614 * We special-case compound pages here: normally this means reads into hugetlb 1615 * pages. The logic in here doesn't really work right for compound pages 1616 * because the VM does not uniformly chase down the head page in all cases. 1617 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't 1618 * handle them at all. So we skip compound pages here at an early stage. 1619 * 1620 * Note that this code is very hard to test under normal circumstances because 1621 * direct-io pins the pages with get_user_pages(). This makes 1622 * is_page_cache_freeable return false, and the VM will not clean the pages. 1623 * But other code (eg, flusher threads) could clean the pages if they are mapped 1624 * pagecache. 1625 * 1626 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the 1627 * deferred bio dirtying paths. 1628 */ 1629 1630 /* 1631 * bio_set_pages_dirty() will mark all the bio's pages as dirty. 1632 */ 1633 void bio_set_pages_dirty(struct bio *bio) 1634 { 1635 struct bio_vec *bvec; 1636 int i; 1637 1638 bio_for_each_segment_all(bvec, bio, i) { 1639 struct page *page = bvec->bv_page; 1640 1641 if (page && !PageCompound(page)) 1642 set_page_dirty_lock(page); 1643 } 1644 } 1645 1646 static void bio_release_pages(struct bio *bio) 1647 { 1648 struct bio_vec *bvec; 1649 int i; 1650 1651 bio_for_each_segment_all(bvec, bio, i) { 1652 struct page *page = bvec->bv_page; 1653 1654 if (page) 1655 put_page(page); 1656 } 1657 } 1658 1659 /* 1660 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. 1661 * If they are, then fine. If, however, some pages are clean then they must 1662 * have been written out during the direct-IO read. So we take another ref on 1663 * the BIO and the offending pages and re-dirty the pages in process context. 1664 * 1665 * It is expected that bio_check_pages_dirty() will wholly own the BIO from 1666 * here on. It will run one put_page() against each page and will run one 1667 * bio_put() against the BIO. 1668 */ 1669 1670 static void bio_dirty_fn(struct work_struct *work); 1671 1672 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); 1673 static DEFINE_SPINLOCK(bio_dirty_lock); 1674 static struct bio *bio_dirty_list; 1675 1676 /* 1677 * This runs in process context 1678 */ 1679 static void bio_dirty_fn(struct work_struct *work) 1680 { 1681 unsigned long flags; 1682 struct bio *bio; 1683 1684 spin_lock_irqsave(&bio_dirty_lock, flags); 1685 bio = bio_dirty_list; 1686 bio_dirty_list = NULL; 1687 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1688 1689 while (bio) { 1690 struct bio *next = bio->bi_private; 1691 1692 bio_set_pages_dirty(bio); 1693 bio_release_pages(bio); 1694 bio_put(bio); 1695 bio = next; 1696 } 1697 } 1698 1699 void bio_check_pages_dirty(struct bio *bio) 1700 { 1701 struct bio_vec *bvec; 1702 int nr_clean_pages = 0; 1703 int i; 1704 1705 bio_for_each_segment_all(bvec, bio, i) { 1706 struct page *page = bvec->bv_page; 1707 1708 if (PageDirty(page) || PageCompound(page)) { 1709 put_page(page); 1710 bvec->bv_page = NULL; 1711 } else { 1712 nr_clean_pages++; 1713 } 1714 } 1715 1716 if (nr_clean_pages) { 1717 unsigned long flags; 1718 1719 spin_lock_irqsave(&bio_dirty_lock, flags); 1720 bio->bi_private = bio_dirty_list; 1721 bio_dirty_list = bio; 1722 spin_unlock_irqrestore(&bio_dirty_lock, flags); 1723 schedule_work(&bio_dirty_work); 1724 } else { 1725 bio_put(bio); 1726 } 1727 } 1728 1729 void generic_start_io_acct(int rw, unsigned long sectors, 1730 struct hd_struct *part) 1731 { 1732 int cpu = part_stat_lock(); 1733 1734 part_round_stats(cpu, part); 1735 part_stat_inc(cpu, part, ios[rw]); 1736 part_stat_add(cpu, part, sectors[rw], sectors); 1737 part_inc_in_flight(part, rw); 1738 1739 part_stat_unlock(); 1740 } 1741 EXPORT_SYMBOL(generic_start_io_acct); 1742 1743 void generic_end_io_acct(int rw, struct hd_struct *part, 1744 unsigned long start_time) 1745 { 1746 unsigned long duration = jiffies - start_time; 1747 int cpu = part_stat_lock(); 1748 1749 part_stat_add(cpu, part, ticks[rw], duration); 1750 part_round_stats(cpu, part); 1751 part_dec_in_flight(part, rw); 1752 1753 part_stat_unlock(); 1754 } 1755 EXPORT_SYMBOL(generic_end_io_acct); 1756 1757 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1758 void bio_flush_dcache_pages(struct bio *bi) 1759 { 1760 struct bio_vec bvec; 1761 struct bvec_iter iter; 1762 1763 bio_for_each_segment(bvec, bi, iter) 1764 flush_dcache_page(bvec.bv_page); 1765 } 1766 EXPORT_SYMBOL(bio_flush_dcache_pages); 1767 #endif 1768 1769 static inline bool bio_remaining_done(struct bio *bio) 1770 { 1771 /* 1772 * If we're not chaining, then ->__bi_remaining is always 1 and 1773 * we always end io on the first invocation. 1774 */ 1775 if (!bio_flagged(bio, BIO_CHAIN)) 1776 return true; 1777 1778 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); 1779 1780 if (atomic_dec_and_test(&bio->__bi_remaining)) { 1781 bio_clear_flag(bio, BIO_CHAIN); 1782 return true; 1783 } 1784 1785 return false; 1786 } 1787 1788 /** 1789 * bio_endio - end I/O on a bio 1790 * @bio: bio 1791 * 1792 * Description: 1793 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred 1794 * way to end I/O on a bio. No one should call bi_end_io() directly on a 1795 * bio unless they own it and thus know that it has an end_io function. 1796 **/ 1797 void bio_endio(struct bio *bio) 1798 { 1799 again: 1800 if (!bio_remaining_done(bio)) 1801 return; 1802 1803 /* 1804 * Need to have a real endio function for chained bios, otherwise 1805 * various corner cases will break (like stacking block devices that 1806 * save/restore bi_end_io) - however, we want to avoid unbounded 1807 * recursion and blowing the stack. Tail call optimization would 1808 * handle this, but compiling with frame pointers also disables 1809 * gcc's sibling call optimization. 1810 */ 1811 if (bio->bi_end_io == bio_chain_endio) { 1812 bio = __bio_chain_endio(bio); 1813 goto again; 1814 } 1815 1816 if (bio->bi_end_io) 1817 bio->bi_end_io(bio); 1818 } 1819 EXPORT_SYMBOL(bio_endio); 1820 1821 /** 1822 * bio_split - split a bio 1823 * @bio: bio to split 1824 * @sectors: number of sectors to split from the front of @bio 1825 * @gfp: gfp mask 1826 * @bs: bio set to allocate from 1827 * 1828 * Allocates and returns a new bio which represents @sectors from the start of 1829 * @bio, and updates @bio to represent the remaining sectors. 1830 * 1831 * Unless this is a discard request the newly allocated bio will point 1832 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that 1833 * @bio is not freed before the split. 1834 */ 1835 struct bio *bio_split(struct bio *bio, int sectors, 1836 gfp_t gfp, struct bio_set *bs) 1837 { 1838 struct bio *split = NULL; 1839 1840 BUG_ON(sectors <= 0); 1841 BUG_ON(sectors >= bio_sectors(bio)); 1842 1843 split = bio_clone_fast(bio, gfp, bs); 1844 if (!split) 1845 return NULL; 1846 1847 split->bi_iter.bi_size = sectors << 9; 1848 1849 if (bio_integrity(split)) 1850 bio_integrity_trim(split, 0, sectors); 1851 1852 bio_advance(bio, split->bi_iter.bi_size); 1853 1854 return split; 1855 } 1856 EXPORT_SYMBOL(bio_split); 1857 1858 /** 1859 * bio_trim - trim a bio 1860 * @bio: bio to trim 1861 * @offset: number of sectors to trim from the front of @bio 1862 * @size: size we want to trim @bio to, in sectors 1863 */ 1864 void bio_trim(struct bio *bio, int offset, int size) 1865 { 1866 /* 'bio' is a cloned bio which we need to trim to match 1867 * the given offset and size. 1868 */ 1869 1870 size <<= 9; 1871 if (offset == 0 && size == bio->bi_iter.bi_size) 1872 return; 1873 1874 bio_clear_flag(bio, BIO_SEG_VALID); 1875 1876 bio_advance(bio, offset << 9); 1877 1878 bio->bi_iter.bi_size = size; 1879 } 1880 EXPORT_SYMBOL_GPL(bio_trim); 1881 1882 /* 1883 * create memory pools for biovec's in a bio_set. 1884 * use the global biovec slabs created for general use. 1885 */ 1886 mempool_t *biovec_create_pool(int pool_entries) 1887 { 1888 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX; 1889 1890 return mempool_create_slab_pool(pool_entries, bp->slab); 1891 } 1892 1893 void bioset_free(struct bio_set *bs) 1894 { 1895 if (bs->rescue_workqueue) 1896 destroy_workqueue(bs->rescue_workqueue); 1897 1898 if (bs->bio_pool) 1899 mempool_destroy(bs->bio_pool); 1900 1901 if (bs->bvec_pool) 1902 mempool_destroy(bs->bvec_pool); 1903 1904 bioset_integrity_free(bs); 1905 bio_put_slab(bs); 1906 1907 kfree(bs); 1908 } 1909 EXPORT_SYMBOL(bioset_free); 1910 1911 static struct bio_set *__bioset_create(unsigned int pool_size, 1912 unsigned int front_pad, 1913 bool create_bvec_pool) 1914 { 1915 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); 1916 struct bio_set *bs; 1917 1918 bs = kzalloc(sizeof(*bs), GFP_KERNEL); 1919 if (!bs) 1920 return NULL; 1921 1922 bs->front_pad = front_pad; 1923 1924 spin_lock_init(&bs->rescue_lock); 1925 bio_list_init(&bs->rescue_list); 1926 INIT_WORK(&bs->rescue_work, bio_alloc_rescue); 1927 1928 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); 1929 if (!bs->bio_slab) { 1930 kfree(bs); 1931 return NULL; 1932 } 1933 1934 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab); 1935 if (!bs->bio_pool) 1936 goto bad; 1937 1938 if (create_bvec_pool) { 1939 bs->bvec_pool = biovec_create_pool(pool_size); 1940 if (!bs->bvec_pool) 1941 goto bad; 1942 } 1943 1944 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); 1945 if (!bs->rescue_workqueue) 1946 goto bad; 1947 1948 return bs; 1949 bad: 1950 bioset_free(bs); 1951 return NULL; 1952 } 1953 1954 /** 1955 * bioset_create - Create a bio_set 1956 * @pool_size: Number of bio and bio_vecs to cache in the mempool 1957 * @front_pad: Number of bytes to allocate in front of the returned bio 1958 * 1959 * Description: 1960 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller 1961 * to ask for a number of bytes to be allocated in front of the bio. 1962 * Front pad allocation is useful for embedding the bio inside 1963 * another structure, to avoid allocating extra data to go with the bio. 1964 * Note that the bio must be embedded at the END of that structure always, 1965 * or things will break badly. 1966 */ 1967 struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) 1968 { 1969 return __bioset_create(pool_size, front_pad, true); 1970 } 1971 EXPORT_SYMBOL(bioset_create); 1972 1973 /** 1974 * bioset_create_nobvec - Create a bio_set without bio_vec mempool 1975 * @pool_size: Number of bio to cache in the mempool 1976 * @front_pad: Number of bytes to allocate in front of the returned bio 1977 * 1978 * Description: 1979 * Same functionality as bioset_create() except that mempool is not 1980 * created for bio_vecs. Saving some memory for bio_clone_fast() users. 1981 */ 1982 struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad) 1983 { 1984 return __bioset_create(pool_size, front_pad, false); 1985 } 1986 EXPORT_SYMBOL(bioset_create_nobvec); 1987 1988 #ifdef CONFIG_BLK_CGROUP 1989 1990 /** 1991 * bio_associate_blkcg - associate a bio with the specified blkcg 1992 * @bio: target bio 1993 * @blkcg_css: css of the blkcg to associate 1994 * 1995 * Associate @bio with the blkcg specified by @blkcg_css. Block layer will 1996 * treat @bio as if it were issued by a task which belongs to the blkcg. 1997 * 1998 * This function takes an extra reference of @blkcg_css which will be put 1999 * when @bio is released. The caller must own @bio and is responsible for 2000 * synchronizing calls to this function. 2001 */ 2002 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) 2003 { 2004 if (unlikely(bio->bi_css)) 2005 return -EBUSY; 2006 css_get(blkcg_css); 2007 bio->bi_css = blkcg_css; 2008 return 0; 2009 } 2010 EXPORT_SYMBOL_GPL(bio_associate_blkcg); 2011 2012 /** 2013 * bio_associate_current - associate a bio with %current 2014 * @bio: target bio 2015 * 2016 * Associate @bio with %current if it hasn't been associated yet. Block 2017 * layer will treat @bio as if it were issued by %current no matter which 2018 * task actually issues it. 2019 * 2020 * This function takes an extra reference of @task's io_context and blkcg 2021 * which will be put when @bio is released. The caller must own @bio, 2022 * ensure %current->io_context exists, and is responsible for synchronizing 2023 * calls to this function. 2024 */ 2025 int bio_associate_current(struct bio *bio) 2026 { 2027 struct io_context *ioc; 2028 2029 if (bio->bi_css) 2030 return -EBUSY; 2031 2032 ioc = current->io_context; 2033 if (!ioc) 2034 return -ENOENT; 2035 2036 get_io_context_active(ioc); 2037 bio->bi_ioc = ioc; 2038 bio->bi_css = task_get_css(current, io_cgrp_id); 2039 return 0; 2040 } 2041 EXPORT_SYMBOL_GPL(bio_associate_current); 2042 2043 /** 2044 * bio_disassociate_task - undo bio_associate_current() 2045 * @bio: target bio 2046 */ 2047 void bio_disassociate_task(struct bio *bio) 2048 { 2049 if (bio->bi_ioc) { 2050 put_io_context(bio->bi_ioc); 2051 bio->bi_ioc = NULL; 2052 } 2053 if (bio->bi_css) { 2054 css_put(bio->bi_css); 2055 bio->bi_css = NULL; 2056 } 2057 } 2058 2059 /** 2060 * bio_clone_blkcg_association - clone blkcg association from src to dst bio 2061 * @dst: destination bio 2062 * @src: source bio 2063 */ 2064 void bio_clone_blkcg_association(struct bio *dst, struct bio *src) 2065 { 2066 if (src->bi_css) 2067 WARN_ON(bio_associate_blkcg(dst, src->bi_css)); 2068 } 2069 2070 #endif /* CONFIG_BLK_CGROUP */ 2071 2072 static void __init biovec_init_slabs(void) 2073 { 2074 int i; 2075 2076 for (i = 0; i < BVEC_POOL_NR; i++) { 2077 int size; 2078 struct biovec_slab *bvs = bvec_slabs + i; 2079 2080 if (bvs->nr_vecs <= BIO_INLINE_VECS) { 2081 bvs->slab = NULL; 2082 continue; 2083 } 2084 2085 size = bvs->nr_vecs * sizeof(struct bio_vec); 2086 bvs->slab = kmem_cache_create(bvs->name, size, 0, 2087 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 2088 } 2089 } 2090 2091 static int __init init_bio(void) 2092 { 2093 bio_slab_max = 2; 2094 bio_slab_nr = 0; 2095 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL); 2096 if (!bio_slabs) 2097 panic("bio: can't allocate bios\n"); 2098 2099 bio_integrity_init(); 2100 biovec_init_slabs(); 2101 2102 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0); 2103 if (!fs_bio_set) 2104 panic("bio: can't allocate bios\n"); 2105 2106 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) 2107 panic("bio: can't create integrity pool\n"); 2108 2109 return 0; 2110 } 2111 subsys_initcall(init_bio); 2112