1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to mapping data to requests 4 */ 5 #include <linux/kernel.h> 6 #include <linux/sched/task_stack.h> 7 #include <linux/module.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/uio.h> 11 12 #include "blk.h" 13 14 struct bio_map_data { 15 bool is_our_pages : 1; 16 bool is_null_mapped : 1; 17 struct iov_iter iter; 18 struct iovec iov[]; 19 }; 20 21 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, 22 gfp_t gfp_mask) 23 { 24 struct bio_map_data *bmd; 25 26 if (data->nr_segs > UIO_MAXIOV) 27 return NULL; 28 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); 30 if (!bmd) 31 return NULL; 32 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); 33 bmd->iter = *data; 34 bmd->iter.iov = bmd->iov; 35 return bmd; 36 } 37 38 /** 39 * bio_copy_from_iter - copy all pages from iov_iter to bio 40 * @bio: The &struct bio which describes the I/O as destination 41 * @iter: iov_iter as source 42 * 43 * Copy all pages from iov_iter to bio. 44 * Returns 0 on success, or error on failure. 45 */ 46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) 47 { 48 struct bio_vec *bvec; 49 struct bvec_iter_all iter_all; 50 51 bio_for_each_segment_all(bvec, bio, iter_all) { 52 ssize_t ret; 53 54 ret = copy_page_from_iter(bvec->bv_page, 55 bvec->bv_offset, 56 bvec->bv_len, 57 iter); 58 59 if (!iov_iter_count(iter)) 60 break; 61 62 if (ret < bvec->bv_len) 63 return -EFAULT; 64 } 65 66 return 0; 67 } 68 69 /** 70 * bio_copy_to_iter - copy all pages from bio to iov_iter 71 * @bio: The &struct bio which describes the I/O as source 72 * @iter: iov_iter as destination 73 * 74 * Copy all pages from bio to iov_iter. 75 * Returns 0 on success, or error on failure. 76 */ 77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 78 { 79 struct bio_vec *bvec; 80 struct bvec_iter_all iter_all; 81 82 bio_for_each_segment_all(bvec, bio, iter_all) { 83 ssize_t ret; 84 85 ret = copy_page_to_iter(bvec->bv_page, 86 bvec->bv_offset, 87 bvec->bv_len, 88 &iter); 89 90 if (!iov_iter_count(&iter)) 91 break; 92 93 if (ret < bvec->bv_len) 94 return -EFAULT; 95 } 96 97 return 0; 98 } 99 100 /** 101 * bio_uncopy_user - finish previously mapped bio 102 * @bio: bio being terminated 103 * 104 * Free pages allocated from bio_copy_user_iov() and write back data 105 * to user space in case of a read. 106 */ 107 static int bio_uncopy_user(struct bio *bio) 108 { 109 struct bio_map_data *bmd = bio->bi_private; 110 int ret = 0; 111 112 if (!bmd->is_null_mapped) { 113 /* 114 * if we're in a workqueue, the request is orphaned, so 115 * don't copy into a random user address space, just free 116 * and return -EINTR so user space doesn't expect any data. 117 */ 118 if (!current->mm) 119 ret = -EINTR; 120 else if (bio_data_dir(bio) == READ) 121 ret = bio_copy_to_iter(bio, bmd->iter); 122 if (bmd->is_our_pages) 123 bio_free_pages(bio); 124 } 125 kfree(bmd); 126 return ret; 127 } 128 129 static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, 130 struct iov_iter *iter, gfp_t gfp_mask) 131 { 132 struct bio_map_data *bmd; 133 struct page *page; 134 struct bio *bio; 135 int i = 0, ret; 136 int nr_pages; 137 unsigned int len = iter->count; 138 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 139 140 bmd = bio_alloc_map_data(iter, gfp_mask); 141 if (!bmd) 142 return -ENOMEM; 143 144 /* 145 * We need to do a deep copy of the iov_iter including the iovecs. 146 * The caller provided iov might point to an on-stack or otherwise 147 * shortlived one. 148 */ 149 bmd->is_our_pages = !map_data; 150 bmd->is_null_mapped = (map_data && map_data->null_mapped); 151 152 nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); 153 154 ret = -ENOMEM; 155 bio = bio_kmalloc(nr_pages, gfp_mask); 156 if (!bio) 157 goto out_bmd; 158 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); 159 160 if (map_data) { 161 nr_pages = 1U << map_data->page_order; 162 i = map_data->offset / PAGE_SIZE; 163 } 164 while (len) { 165 unsigned int bytes = PAGE_SIZE; 166 167 bytes -= offset; 168 169 if (bytes > len) 170 bytes = len; 171 172 if (map_data) { 173 if (i == map_data->nr_entries * nr_pages) { 174 ret = -ENOMEM; 175 goto cleanup; 176 } 177 178 page = map_data->pages[i / nr_pages]; 179 page += (i % nr_pages); 180 181 i++; 182 } else { 183 page = alloc_page(GFP_NOIO | gfp_mask); 184 if (!page) { 185 ret = -ENOMEM; 186 goto cleanup; 187 } 188 } 189 190 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { 191 if (!map_data) 192 __free_page(page); 193 break; 194 } 195 196 len -= bytes; 197 offset = 0; 198 } 199 200 if (map_data) 201 map_data->offset += bio->bi_iter.bi_size; 202 203 /* 204 * success 205 */ 206 if ((iov_iter_rw(iter) == WRITE && 207 (!map_data || !map_data->null_mapped)) || 208 (map_data && map_data->from_user)) { 209 ret = bio_copy_from_iter(bio, iter); 210 if (ret) 211 goto cleanup; 212 } else { 213 if (bmd->is_our_pages) 214 zero_fill_bio(bio); 215 iov_iter_advance(iter, bio->bi_iter.bi_size); 216 } 217 218 bio->bi_private = bmd; 219 220 ret = blk_rq_append_bio(rq, bio); 221 if (ret) 222 goto cleanup; 223 return 0; 224 cleanup: 225 if (!map_data) 226 bio_free_pages(bio); 227 bio_uninit(bio); 228 kfree(bio); 229 out_bmd: 230 kfree(bmd); 231 return ret; 232 } 233 234 static void blk_mq_map_bio_put(struct bio *bio) 235 { 236 if (bio->bi_opf & REQ_ALLOC_CACHE) { 237 bio_put(bio); 238 } else { 239 bio_uninit(bio); 240 kfree(bio); 241 } 242 } 243 244 static struct bio *blk_rq_map_bio_alloc(struct request *rq, 245 unsigned int nr_vecs, gfp_t gfp_mask) 246 { 247 struct bio *bio; 248 249 if (rq->cmd_flags & REQ_ALLOC_CACHE) { 250 bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask, 251 &fs_bio_set); 252 if (!bio) 253 return NULL; 254 } else { 255 bio = bio_kmalloc(nr_vecs, gfp_mask); 256 if (!bio) 257 return NULL; 258 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); 259 } 260 return bio; 261 } 262 263 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, 264 gfp_t gfp_mask) 265 { 266 unsigned int max_sectors = queue_max_hw_sectors(rq->q); 267 unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); 268 unsigned int gup_flags = 0; 269 struct bio *bio; 270 int ret; 271 int j; 272 273 if (!iov_iter_count(iter)) 274 return -EINVAL; 275 276 bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); 277 if (bio == NULL) 278 return -ENOMEM; 279 280 if (blk_queue_pci_p2pdma(rq->q)) 281 gup_flags |= FOLL_PCI_P2PDMA; 282 283 while (iov_iter_count(iter)) { 284 struct page **pages, *stack_pages[UIO_FASTIOV]; 285 ssize_t bytes; 286 size_t offs; 287 int npages; 288 289 if (nr_vecs <= ARRAY_SIZE(stack_pages)) { 290 pages = stack_pages; 291 bytes = iov_iter_get_pages(iter, pages, LONG_MAX, 292 nr_vecs, &offs, gup_flags); 293 } else { 294 bytes = iov_iter_get_pages_alloc(iter, &pages, 295 LONG_MAX, &offs, gup_flags); 296 } 297 if (unlikely(bytes <= 0)) { 298 ret = bytes ? bytes : -EFAULT; 299 goto out_unmap; 300 } 301 302 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); 303 304 if (unlikely(offs & queue_dma_alignment(rq->q))) 305 j = 0; 306 else { 307 for (j = 0; j < npages; j++) { 308 struct page *page = pages[j]; 309 unsigned int n = PAGE_SIZE - offs; 310 bool same_page = false; 311 312 if (n > bytes) 313 n = bytes; 314 315 if (!bio_add_hw_page(rq->q, bio, page, n, offs, 316 max_sectors, &same_page)) { 317 if (same_page) 318 put_page(page); 319 break; 320 } 321 322 bytes -= n; 323 offs = 0; 324 } 325 } 326 /* 327 * release the pages we didn't map into the bio, if any 328 */ 329 while (j < npages) 330 put_page(pages[j++]); 331 if (pages != stack_pages) 332 kvfree(pages); 333 /* couldn't stuff something into bio? */ 334 if (bytes) { 335 iov_iter_revert(iter, bytes); 336 break; 337 } 338 } 339 340 ret = blk_rq_append_bio(rq, bio); 341 if (ret) 342 goto out_unmap; 343 return 0; 344 345 out_unmap: 346 bio_release_pages(bio, false); 347 blk_mq_map_bio_put(bio); 348 return ret; 349 } 350 351 static void bio_invalidate_vmalloc_pages(struct bio *bio) 352 { 353 #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 354 if (bio->bi_private && !op_is_write(bio_op(bio))) { 355 unsigned long i, len = 0; 356 357 for (i = 0; i < bio->bi_vcnt; i++) 358 len += bio->bi_io_vec[i].bv_len; 359 invalidate_kernel_vmap_range(bio->bi_private, len); 360 } 361 #endif 362 } 363 364 static void bio_map_kern_endio(struct bio *bio) 365 { 366 bio_invalidate_vmalloc_pages(bio); 367 bio_uninit(bio); 368 kfree(bio); 369 } 370 371 /** 372 * bio_map_kern - map kernel address into bio 373 * @q: the struct request_queue for the bio 374 * @data: pointer to buffer to map 375 * @len: length in bytes 376 * @gfp_mask: allocation flags for bio allocation 377 * 378 * Map the kernel address into a bio suitable for io to a block 379 * device. Returns an error pointer in case of error. 380 */ 381 static struct bio *bio_map_kern(struct request_queue *q, void *data, 382 unsigned int len, gfp_t gfp_mask) 383 { 384 unsigned long kaddr = (unsigned long)data; 385 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 386 unsigned long start = kaddr >> PAGE_SHIFT; 387 const int nr_pages = end - start; 388 bool is_vmalloc = is_vmalloc_addr(data); 389 struct page *page; 390 int offset, i; 391 struct bio *bio; 392 393 bio = bio_kmalloc(nr_pages, gfp_mask); 394 if (!bio) 395 return ERR_PTR(-ENOMEM); 396 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 397 398 if (is_vmalloc) { 399 flush_kernel_vmap_range(data, len); 400 bio->bi_private = data; 401 } 402 403 offset = offset_in_page(kaddr); 404 for (i = 0; i < nr_pages; i++) { 405 unsigned int bytes = PAGE_SIZE - offset; 406 407 if (len <= 0) 408 break; 409 410 if (bytes > len) 411 bytes = len; 412 413 if (!is_vmalloc) 414 page = virt_to_page(data); 415 else 416 page = vmalloc_to_page(data); 417 if (bio_add_pc_page(q, bio, page, bytes, 418 offset) < bytes) { 419 /* we don't support partial mappings */ 420 bio_uninit(bio); 421 kfree(bio); 422 return ERR_PTR(-EINVAL); 423 } 424 425 data += bytes; 426 len -= bytes; 427 offset = 0; 428 } 429 430 bio->bi_end_io = bio_map_kern_endio; 431 return bio; 432 } 433 434 static void bio_copy_kern_endio(struct bio *bio) 435 { 436 bio_free_pages(bio); 437 bio_uninit(bio); 438 kfree(bio); 439 } 440 441 static void bio_copy_kern_endio_read(struct bio *bio) 442 { 443 char *p = bio->bi_private; 444 struct bio_vec *bvec; 445 struct bvec_iter_all iter_all; 446 447 bio_for_each_segment_all(bvec, bio, iter_all) { 448 memcpy_from_bvec(p, bvec); 449 p += bvec->bv_len; 450 } 451 452 bio_copy_kern_endio(bio); 453 } 454 455 /** 456 * bio_copy_kern - copy kernel address into bio 457 * @q: the struct request_queue for the bio 458 * @data: pointer to buffer to copy 459 * @len: length in bytes 460 * @gfp_mask: allocation flags for bio and page allocation 461 * @reading: data direction is READ 462 * 463 * copy the kernel address into a bio suitable for io to a block 464 * device. Returns an error pointer in case of error. 465 */ 466 static struct bio *bio_copy_kern(struct request_queue *q, void *data, 467 unsigned int len, gfp_t gfp_mask, int reading) 468 { 469 unsigned long kaddr = (unsigned long)data; 470 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 471 unsigned long start = kaddr >> PAGE_SHIFT; 472 struct bio *bio; 473 void *p = data; 474 int nr_pages = 0; 475 476 /* 477 * Overflow, abort 478 */ 479 if (end < start) 480 return ERR_PTR(-EINVAL); 481 482 nr_pages = end - start; 483 bio = bio_kmalloc(nr_pages, gfp_mask); 484 if (!bio) 485 return ERR_PTR(-ENOMEM); 486 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 487 488 while (len) { 489 struct page *page; 490 unsigned int bytes = PAGE_SIZE; 491 492 if (bytes > len) 493 bytes = len; 494 495 page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); 496 if (!page) 497 goto cleanup; 498 499 if (!reading) 500 memcpy(page_address(page), p, bytes); 501 502 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 503 break; 504 505 len -= bytes; 506 p += bytes; 507 } 508 509 if (reading) { 510 bio->bi_end_io = bio_copy_kern_endio_read; 511 bio->bi_private = data; 512 } else { 513 bio->bi_end_io = bio_copy_kern_endio; 514 } 515 516 return bio; 517 518 cleanup: 519 bio_free_pages(bio); 520 bio_uninit(bio); 521 kfree(bio); 522 return ERR_PTR(-ENOMEM); 523 } 524 525 /* 526 * Append a bio to a passthrough request. Only works if the bio can be merged 527 * into the request based on the driver constraints. 528 */ 529 int blk_rq_append_bio(struct request *rq, struct bio *bio) 530 { 531 struct bvec_iter iter; 532 struct bio_vec bv; 533 unsigned int nr_segs = 0; 534 535 bio_for_each_bvec(bv, bio, iter) 536 nr_segs++; 537 538 if (!rq->bio) { 539 blk_rq_bio_prep(rq, bio, nr_segs); 540 } else { 541 if (!ll_back_merge_fn(rq, bio, nr_segs)) 542 return -EINVAL; 543 rq->biotail->bi_next = bio; 544 rq->biotail = bio; 545 rq->__data_len += (bio)->bi_iter.bi_size; 546 bio_crypt_free_ctx(bio); 547 } 548 549 return 0; 550 } 551 EXPORT_SYMBOL(blk_rq_append_bio); 552 553 /* Prepare bio for passthrough IO given ITER_BVEC iter */ 554 static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) 555 { 556 struct request_queue *q = rq->q; 557 size_t nr_iter = iov_iter_count(iter); 558 size_t nr_segs = iter->nr_segs; 559 struct bio_vec *bvecs, *bvprvp = NULL; 560 const struct queue_limits *lim = &q->limits; 561 unsigned int nsegs = 0, bytes = 0; 562 struct bio *bio; 563 size_t i; 564 565 if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) 566 return -EINVAL; 567 if (nr_segs > queue_max_segments(q)) 568 return -EINVAL; 569 570 /* no iovecs to alloc, as we already have a BVEC iterator */ 571 bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); 572 if (bio == NULL) 573 return -ENOMEM; 574 575 bio_iov_bvec_set(bio, (struct iov_iter *)iter); 576 blk_rq_bio_prep(rq, bio, nr_segs); 577 578 /* loop to perform a bunch of sanity checks */ 579 bvecs = (struct bio_vec *)iter->bvec; 580 for (i = 0; i < nr_segs; i++) { 581 struct bio_vec *bv = &bvecs[i]; 582 583 /* 584 * If the queue doesn't support SG gaps and adding this 585 * offset would create a gap, fallback to copy. 586 */ 587 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { 588 blk_mq_map_bio_put(bio); 589 return -EREMOTEIO; 590 } 591 /* check full condition */ 592 if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) 593 goto put_bio; 594 if (bytes + bv->bv_len > nr_iter) 595 goto put_bio; 596 if (bv->bv_offset + bv->bv_len > PAGE_SIZE) 597 goto put_bio; 598 599 nsegs++; 600 bytes += bv->bv_len; 601 bvprvp = bv; 602 } 603 return 0; 604 put_bio: 605 blk_mq_map_bio_put(bio); 606 return -EINVAL; 607 } 608 609 /** 610 * blk_rq_map_user_iov - map user data to a request, for passthrough requests 611 * @q: request queue where request should be inserted 612 * @rq: request to map data to 613 * @map_data: pointer to the rq_map_data holding pages (if necessary) 614 * @iter: iovec iterator 615 * @gfp_mask: memory allocation flags 616 * 617 * Description: 618 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 619 * a kernel bounce buffer is used. 620 * 621 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 622 * still in process context. 623 */ 624 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 625 struct rq_map_data *map_data, 626 const struct iov_iter *iter, gfp_t gfp_mask) 627 { 628 bool copy = false, map_bvec = false; 629 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 630 struct bio *bio = NULL; 631 struct iov_iter i; 632 int ret = -EINVAL; 633 634 if (map_data) 635 copy = true; 636 else if (blk_queue_may_bounce(q)) 637 copy = true; 638 else if (iov_iter_alignment(iter) & align) 639 copy = true; 640 else if (iov_iter_is_bvec(iter)) 641 map_bvec = true; 642 else if (!iter_is_iovec(iter)) 643 copy = true; 644 else if (queue_virt_boundary(q)) 645 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 646 647 if (map_bvec) { 648 ret = blk_rq_map_user_bvec(rq, iter); 649 if (!ret) 650 return 0; 651 if (ret != -EREMOTEIO) 652 goto fail; 653 /* fall back to copying the data on limits mismatches */ 654 copy = true; 655 } 656 657 i = *iter; 658 do { 659 if (copy) 660 ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); 661 else 662 ret = bio_map_user_iov(rq, &i, gfp_mask); 663 if (ret) 664 goto unmap_rq; 665 if (!bio) 666 bio = rq->bio; 667 } while (iov_iter_count(&i)); 668 669 return 0; 670 671 unmap_rq: 672 blk_rq_unmap_user(bio); 673 fail: 674 rq->bio = NULL; 675 return ret; 676 } 677 EXPORT_SYMBOL(blk_rq_map_user_iov); 678 679 int blk_rq_map_user(struct request_queue *q, struct request *rq, 680 struct rq_map_data *map_data, void __user *ubuf, 681 unsigned long len, gfp_t gfp_mask) 682 { 683 struct iovec iov; 684 struct iov_iter i; 685 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 686 687 if (unlikely(ret < 0)) 688 return ret; 689 690 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 691 } 692 EXPORT_SYMBOL(blk_rq_map_user); 693 694 int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data, 695 void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, 696 bool vec, int iov_count, bool check_iter_count, int rw) 697 { 698 int ret = 0; 699 700 if (vec) { 701 struct iovec fast_iov[UIO_FASTIOV]; 702 struct iovec *iov = fast_iov; 703 struct iov_iter iter; 704 705 ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len, 706 UIO_FASTIOV, &iov, &iter); 707 if (ret < 0) 708 return ret; 709 710 if (iov_count) { 711 /* SG_IO howto says that the shorter of the two wins */ 712 iov_iter_truncate(&iter, buf_len); 713 if (check_iter_count && !iov_iter_count(&iter)) { 714 kfree(iov); 715 return -EINVAL; 716 } 717 } 718 719 ret = blk_rq_map_user_iov(req->q, req, map_data, &iter, 720 gfp_mask); 721 kfree(iov); 722 } else if (buf_len) { 723 ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len, 724 gfp_mask); 725 } 726 return ret; 727 } 728 EXPORT_SYMBOL(blk_rq_map_user_io); 729 730 /** 731 * blk_rq_unmap_user - unmap a request with user data 732 * @bio: start of bio list 733 * 734 * Description: 735 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 736 * supply the original rq->bio from the blk_rq_map_user() return, since 737 * the I/O completion may have changed rq->bio. 738 */ 739 int blk_rq_unmap_user(struct bio *bio) 740 { 741 struct bio *next_bio; 742 int ret = 0, ret2; 743 744 while (bio) { 745 if (bio->bi_private) { 746 ret2 = bio_uncopy_user(bio); 747 if (ret2 && !ret) 748 ret = ret2; 749 } else { 750 bio_release_pages(bio, bio_data_dir(bio) == READ); 751 } 752 753 next_bio = bio; 754 bio = bio->bi_next; 755 blk_mq_map_bio_put(next_bio); 756 } 757 758 return ret; 759 } 760 EXPORT_SYMBOL(blk_rq_unmap_user); 761 762 /** 763 * blk_rq_map_kern - map kernel data to a request, for passthrough requests 764 * @q: request queue where request should be inserted 765 * @rq: request to fill 766 * @kbuf: the kernel buffer 767 * @len: length of user data 768 * @gfp_mask: memory allocation flags 769 * 770 * Description: 771 * Data will be mapped directly if possible. Otherwise a bounce 772 * buffer is used. Can be called multiple times to append multiple 773 * buffers. 774 */ 775 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 776 unsigned int len, gfp_t gfp_mask) 777 { 778 int reading = rq_data_dir(rq) == READ; 779 unsigned long addr = (unsigned long) kbuf; 780 struct bio *bio; 781 int ret; 782 783 if (len > (queue_max_hw_sectors(q) << 9)) 784 return -EINVAL; 785 if (!len || !kbuf) 786 return -EINVAL; 787 788 if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) || 789 blk_queue_may_bounce(q)) 790 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 791 else 792 bio = bio_map_kern(q, kbuf, len, gfp_mask); 793 794 if (IS_ERR(bio)) 795 return PTR_ERR(bio); 796 797 bio->bi_opf &= ~REQ_OP_MASK; 798 bio->bi_opf |= req_op(rq); 799 800 ret = blk_rq_append_bio(rq, bio); 801 if (unlikely(ret)) { 802 bio_uninit(bio); 803 kfree(bio); 804 } 805 return ret; 806 } 807 EXPORT_SYMBOL(blk_rq_map_kern); 808