1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to mapping data to requests 4 */ 5 #include <linux/kernel.h> 6 #include <linux/sched/task_stack.h> 7 #include <linux/module.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/uio.h> 11 12 #include "blk.h" 13 14 struct bio_map_data { 15 bool is_our_pages : 1; 16 bool is_null_mapped : 1; 17 struct iov_iter iter; 18 struct iovec iov[]; 19 }; 20 21 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, 22 gfp_t gfp_mask) 23 { 24 struct bio_map_data *bmd; 25 26 if (data->nr_segs > UIO_MAXIOV) 27 return NULL; 28 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); 30 if (!bmd) 31 return NULL; 32 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); 33 bmd->iter = *data; 34 if (iter_is_iovec(data)) 35 bmd->iter.iov = bmd->iov; 36 return bmd; 37 } 38 39 /** 40 * bio_copy_from_iter - copy all pages from iov_iter to bio 41 * @bio: The &struct bio which describes the I/O as destination 42 * @iter: iov_iter as source 43 * 44 * Copy all pages from iov_iter to bio. 45 * Returns 0 on success, or error on failure. 46 */ 47 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) 48 { 49 struct bio_vec *bvec; 50 struct bvec_iter_all iter_all; 51 52 bio_for_each_segment_all(bvec, bio, iter_all) { 53 ssize_t ret; 54 55 ret = copy_page_from_iter(bvec->bv_page, 56 bvec->bv_offset, 57 bvec->bv_len, 58 iter); 59 60 if (!iov_iter_count(iter)) 61 break; 62 63 if (ret < bvec->bv_len) 64 return -EFAULT; 65 } 66 67 return 0; 68 } 69 70 /** 71 * bio_copy_to_iter - copy all pages from bio to iov_iter 72 * @bio: The &struct bio which describes the I/O as source 73 * @iter: iov_iter as destination 74 * 75 * Copy all pages from bio to iov_iter. 76 * Returns 0 on success, or error on failure. 77 */ 78 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 79 { 80 struct bio_vec *bvec; 81 struct bvec_iter_all iter_all; 82 83 bio_for_each_segment_all(bvec, bio, iter_all) { 84 ssize_t ret; 85 86 ret = copy_page_to_iter(bvec->bv_page, 87 bvec->bv_offset, 88 bvec->bv_len, 89 &iter); 90 91 if (!iov_iter_count(&iter)) 92 break; 93 94 if (ret < bvec->bv_len) 95 return -EFAULT; 96 } 97 98 return 0; 99 } 100 101 /** 102 * bio_uncopy_user - finish previously mapped bio 103 * @bio: bio being terminated 104 * 105 * Free pages allocated from bio_copy_user_iov() and write back data 106 * to user space in case of a read. 107 */ 108 static int bio_uncopy_user(struct bio *bio) 109 { 110 struct bio_map_data *bmd = bio->bi_private; 111 int ret = 0; 112 113 if (!bmd->is_null_mapped) { 114 /* 115 * if we're in a workqueue, the request is orphaned, so 116 * don't copy into a random user address space, just free 117 * and return -EINTR so user space doesn't expect any data. 118 */ 119 if (!current->mm) 120 ret = -EINTR; 121 else if (bio_data_dir(bio) == READ) 122 ret = bio_copy_to_iter(bio, bmd->iter); 123 if (bmd->is_our_pages) 124 bio_free_pages(bio); 125 } 126 kfree(bmd); 127 return ret; 128 } 129 130 static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, 131 struct iov_iter *iter, gfp_t gfp_mask) 132 { 133 struct bio_map_data *bmd; 134 struct page *page; 135 struct bio *bio; 136 int i = 0, ret; 137 int nr_pages; 138 unsigned int len = iter->count; 139 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 140 141 bmd = bio_alloc_map_data(iter, gfp_mask); 142 if (!bmd) 143 return -ENOMEM; 144 145 /* 146 * We need to do a deep copy of the iov_iter including the iovecs. 147 * The caller provided iov might point to an on-stack or otherwise 148 * shortlived one. 149 */ 150 bmd->is_our_pages = !map_data; 151 bmd->is_null_mapped = (map_data && map_data->null_mapped); 152 153 nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); 154 155 ret = -ENOMEM; 156 bio = bio_kmalloc(nr_pages, gfp_mask); 157 if (!bio) 158 goto out_bmd; 159 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); 160 161 if (map_data) { 162 nr_pages = 1U << map_data->page_order; 163 i = map_data->offset / PAGE_SIZE; 164 } 165 while (len) { 166 unsigned int bytes = PAGE_SIZE; 167 168 bytes -= offset; 169 170 if (bytes > len) 171 bytes = len; 172 173 if (map_data) { 174 if (i == map_data->nr_entries * nr_pages) { 175 ret = -ENOMEM; 176 goto cleanup; 177 } 178 179 page = map_data->pages[i / nr_pages]; 180 page += (i % nr_pages); 181 182 i++; 183 } else { 184 page = alloc_page(GFP_NOIO | gfp_mask); 185 if (!page) { 186 ret = -ENOMEM; 187 goto cleanup; 188 } 189 } 190 191 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { 192 if (!map_data) 193 __free_page(page); 194 break; 195 } 196 197 len -= bytes; 198 offset = 0; 199 } 200 201 if (map_data) 202 map_data->offset += bio->bi_iter.bi_size; 203 204 /* 205 * success 206 */ 207 if ((iov_iter_rw(iter) == WRITE && 208 (!map_data || !map_data->null_mapped)) || 209 (map_data && map_data->from_user)) { 210 ret = bio_copy_from_iter(bio, iter); 211 if (ret) 212 goto cleanup; 213 } else { 214 if (bmd->is_our_pages) 215 zero_fill_bio(bio); 216 iov_iter_advance(iter, bio->bi_iter.bi_size); 217 } 218 219 bio->bi_private = bmd; 220 221 ret = blk_rq_append_bio(rq, bio); 222 if (ret) 223 goto cleanup; 224 return 0; 225 cleanup: 226 if (!map_data) 227 bio_free_pages(bio); 228 bio_uninit(bio); 229 kfree(bio); 230 out_bmd: 231 kfree(bmd); 232 return ret; 233 } 234 235 static void blk_mq_map_bio_put(struct bio *bio) 236 { 237 if (bio->bi_opf & REQ_ALLOC_CACHE) { 238 bio_put(bio); 239 } else { 240 bio_uninit(bio); 241 kfree(bio); 242 } 243 } 244 245 static struct bio *blk_rq_map_bio_alloc(struct request *rq, 246 unsigned int nr_vecs, gfp_t gfp_mask) 247 { 248 struct bio *bio; 249 250 if (rq->cmd_flags & REQ_POLLED) { 251 blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE; 252 253 bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, 254 &fs_bio_set); 255 if (!bio) 256 return NULL; 257 } else { 258 bio = bio_kmalloc(nr_vecs, gfp_mask); 259 if (!bio) 260 return NULL; 261 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); 262 } 263 return bio; 264 } 265 266 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, 267 gfp_t gfp_mask) 268 { 269 unsigned int max_sectors = queue_max_hw_sectors(rq->q); 270 unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); 271 unsigned int gup_flags = 0; 272 struct bio *bio; 273 int ret; 274 int j; 275 276 if (!iov_iter_count(iter)) 277 return -EINVAL; 278 279 bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask); 280 if (bio == NULL) 281 return -ENOMEM; 282 283 if (blk_queue_pci_p2pdma(rq->q)) 284 gup_flags |= FOLL_PCI_P2PDMA; 285 286 while (iov_iter_count(iter)) { 287 struct page **pages, *stack_pages[UIO_FASTIOV]; 288 ssize_t bytes; 289 size_t offs; 290 int npages; 291 292 if (nr_vecs <= ARRAY_SIZE(stack_pages)) { 293 pages = stack_pages; 294 bytes = iov_iter_get_pages(iter, pages, LONG_MAX, 295 nr_vecs, &offs, gup_flags); 296 } else { 297 bytes = iov_iter_get_pages_alloc(iter, &pages, 298 LONG_MAX, &offs, gup_flags); 299 } 300 if (unlikely(bytes <= 0)) { 301 ret = bytes ? bytes : -EFAULT; 302 goto out_unmap; 303 } 304 305 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); 306 307 if (unlikely(offs & queue_dma_alignment(rq->q))) 308 j = 0; 309 else { 310 for (j = 0; j < npages; j++) { 311 struct page *page = pages[j]; 312 unsigned int n = PAGE_SIZE - offs; 313 bool same_page = false; 314 315 if (n > bytes) 316 n = bytes; 317 318 if (!bio_add_hw_page(rq->q, bio, page, n, offs, 319 max_sectors, &same_page)) { 320 if (same_page) 321 put_page(page); 322 break; 323 } 324 325 bytes -= n; 326 offs = 0; 327 } 328 } 329 /* 330 * release the pages we didn't map into the bio, if any 331 */ 332 while (j < npages) 333 put_page(pages[j++]); 334 if (pages != stack_pages) 335 kvfree(pages); 336 /* couldn't stuff something into bio? */ 337 if (bytes) { 338 iov_iter_revert(iter, bytes); 339 break; 340 } 341 } 342 343 ret = blk_rq_append_bio(rq, bio); 344 if (ret) 345 goto out_unmap; 346 return 0; 347 348 out_unmap: 349 bio_release_pages(bio, false); 350 blk_mq_map_bio_put(bio); 351 return ret; 352 } 353 354 static void bio_invalidate_vmalloc_pages(struct bio *bio) 355 { 356 #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 357 if (bio->bi_private && !op_is_write(bio_op(bio))) { 358 unsigned long i, len = 0; 359 360 for (i = 0; i < bio->bi_vcnt; i++) 361 len += bio->bi_io_vec[i].bv_len; 362 invalidate_kernel_vmap_range(bio->bi_private, len); 363 } 364 #endif 365 } 366 367 static void bio_map_kern_endio(struct bio *bio) 368 { 369 bio_invalidate_vmalloc_pages(bio); 370 bio_uninit(bio); 371 kfree(bio); 372 } 373 374 /** 375 * bio_map_kern - map kernel address into bio 376 * @q: the struct request_queue for the bio 377 * @data: pointer to buffer to map 378 * @len: length in bytes 379 * @gfp_mask: allocation flags for bio allocation 380 * 381 * Map the kernel address into a bio suitable for io to a block 382 * device. Returns an error pointer in case of error. 383 */ 384 static struct bio *bio_map_kern(struct request_queue *q, void *data, 385 unsigned int len, gfp_t gfp_mask) 386 { 387 unsigned long kaddr = (unsigned long)data; 388 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 389 unsigned long start = kaddr >> PAGE_SHIFT; 390 const int nr_pages = end - start; 391 bool is_vmalloc = is_vmalloc_addr(data); 392 struct page *page; 393 int offset, i; 394 struct bio *bio; 395 396 bio = bio_kmalloc(nr_pages, gfp_mask); 397 if (!bio) 398 return ERR_PTR(-ENOMEM); 399 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 400 401 if (is_vmalloc) { 402 flush_kernel_vmap_range(data, len); 403 bio->bi_private = data; 404 } 405 406 offset = offset_in_page(kaddr); 407 for (i = 0; i < nr_pages; i++) { 408 unsigned int bytes = PAGE_SIZE - offset; 409 410 if (len <= 0) 411 break; 412 413 if (bytes > len) 414 bytes = len; 415 416 if (!is_vmalloc) 417 page = virt_to_page(data); 418 else 419 page = vmalloc_to_page(data); 420 if (bio_add_pc_page(q, bio, page, bytes, 421 offset) < bytes) { 422 /* we don't support partial mappings */ 423 bio_uninit(bio); 424 kfree(bio); 425 return ERR_PTR(-EINVAL); 426 } 427 428 data += bytes; 429 len -= bytes; 430 offset = 0; 431 } 432 433 bio->bi_end_io = bio_map_kern_endio; 434 return bio; 435 } 436 437 static void bio_copy_kern_endio(struct bio *bio) 438 { 439 bio_free_pages(bio); 440 bio_uninit(bio); 441 kfree(bio); 442 } 443 444 static void bio_copy_kern_endio_read(struct bio *bio) 445 { 446 char *p = bio->bi_private; 447 struct bio_vec *bvec; 448 struct bvec_iter_all iter_all; 449 450 bio_for_each_segment_all(bvec, bio, iter_all) { 451 memcpy_from_bvec(p, bvec); 452 p += bvec->bv_len; 453 } 454 455 bio_copy_kern_endio(bio); 456 } 457 458 /** 459 * bio_copy_kern - copy kernel address into bio 460 * @q: the struct request_queue for the bio 461 * @data: pointer to buffer to copy 462 * @len: length in bytes 463 * @gfp_mask: allocation flags for bio and page allocation 464 * @reading: data direction is READ 465 * 466 * copy the kernel address into a bio suitable for io to a block 467 * device. Returns an error pointer in case of error. 468 */ 469 static struct bio *bio_copy_kern(struct request_queue *q, void *data, 470 unsigned int len, gfp_t gfp_mask, int reading) 471 { 472 unsigned long kaddr = (unsigned long)data; 473 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 474 unsigned long start = kaddr >> PAGE_SHIFT; 475 struct bio *bio; 476 void *p = data; 477 int nr_pages = 0; 478 479 /* 480 * Overflow, abort 481 */ 482 if (end < start) 483 return ERR_PTR(-EINVAL); 484 485 nr_pages = end - start; 486 bio = bio_kmalloc(nr_pages, gfp_mask); 487 if (!bio) 488 return ERR_PTR(-ENOMEM); 489 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 490 491 while (len) { 492 struct page *page; 493 unsigned int bytes = PAGE_SIZE; 494 495 if (bytes > len) 496 bytes = len; 497 498 page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); 499 if (!page) 500 goto cleanup; 501 502 if (!reading) 503 memcpy(page_address(page), p, bytes); 504 505 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 506 break; 507 508 len -= bytes; 509 p += bytes; 510 } 511 512 if (reading) { 513 bio->bi_end_io = bio_copy_kern_endio_read; 514 bio->bi_private = data; 515 } else { 516 bio->bi_end_io = bio_copy_kern_endio; 517 } 518 519 return bio; 520 521 cleanup: 522 bio_free_pages(bio); 523 bio_uninit(bio); 524 kfree(bio); 525 return ERR_PTR(-ENOMEM); 526 } 527 528 /* 529 * Append a bio to a passthrough request. Only works if the bio can be merged 530 * into the request based on the driver constraints. 531 */ 532 int blk_rq_append_bio(struct request *rq, struct bio *bio) 533 { 534 struct bvec_iter iter; 535 struct bio_vec bv; 536 unsigned int nr_segs = 0; 537 538 bio_for_each_bvec(bv, bio, iter) 539 nr_segs++; 540 541 if (!rq->bio) { 542 blk_rq_bio_prep(rq, bio, nr_segs); 543 } else { 544 if (!ll_back_merge_fn(rq, bio, nr_segs)) 545 return -EINVAL; 546 rq->biotail->bi_next = bio; 547 rq->biotail = bio; 548 rq->__data_len += (bio)->bi_iter.bi_size; 549 bio_crypt_free_ctx(bio); 550 } 551 552 return 0; 553 } 554 EXPORT_SYMBOL(blk_rq_append_bio); 555 556 /* Prepare bio for passthrough IO given ITER_BVEC iter */ 557 static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) 558 { 559 struct request_queue *q = rq->q; 560 size_t nr_iter = iov_iter_count(iter); 561 size_t nr_segs = iter->nr_segs; 562 struct bio_vec *bvecs, *bvprvp = NULL; 563 const struct queue_limits *lim = &q->limits; 564 unsigned int nsegs = 0, bytes = 0; 565 struct bio *bio; 566 size_t i; 567 568 if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) 569 return -EINVAL; 570 if (nr_segs > queue_max_segments(q)) 571 return -EINVAL; 572 573 /* no iovecs to alloc, as we already have a BVEC iterator */ 574 bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); 575 if (bio == NULL) 576 return -ENOMEM; 577 578 bio_iov_bvec_set(bio, (struct iov_iter *)iter); 579 blk_rq_bio_prep(rq, bio, nr_segs); 580 581 /* loop to perform a bunch of sanity checks */ 582 bvecs = (struct bio_vec *)iter->bvec; 583 for (i = 0; i < nr_segs; i++) { 584 struct bio_vec *bv = &bvecs[i]; 585 586 /* 587 * If the queue doesn't support SG gaps and adding this 588 * offset would create a gap, fallback to copy. 589 */ 590 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { 591 blk_mq_map_bio_put(bio); 592 return -EREMOTEIO; 593 } 594 /* check full condition */ 595 if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) 596 goto put_bio; 597 if (bytes + bv->bv_len > nr_iter) 598 goto put_bio; 599 if (bv->bv_offset + bv->bv_len > PAGE_SIZE) 600 goto put_bio; 601 602 nsegs++; 603 bytes += bv->bv_len; 604 bvprvp = bv; 605 } 606 return 0; 607 put_bio: 608 blk_mq_map_bio_put(bio); 609 return -EINVAL; 610 } 611 612 /** 613 * blk_rq_map_user_iov - map user data to a request, for passthrough requests 614 * @q: request queue where request should be inserted 615 * @rq: request to map data to 616 * @map_data: pointer to the rq_map_data holding pages (if necessary) 617 * @iter: iovec iterator 618 * @gfp_mask: memory allocation flags 619 * 620 * Description: 621 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 622 * a kernel bounce buffer is used. 623 * 624 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 625 * still in process context. 626 */ 627 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 628 struct rq_map_data *map_data, 629 const struct iov_iter *iter, gfp_t gfp_mask) 630 { 631 bool copy = false, map_bvec = false; 632 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 633 struct bio *bio = NULL; 634 struct iov_iter i; 635 int ret = -EINVAL; 636 637 if (map_data) 638 copy = true; 639 else if (blk_queue_may_bounce(q)) 640 copy = true; 641 else if (iov_iter_alignment(iter) & align) 642 copy = true; 643 else if (iov_iter_is_bvec(iter)) 644 map_bvec = true; 645 else if (!user_backed_iter(iter)) 646 copy = true; 647 else if (queue_virt_boundary(q)) 648 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 649 650 if (map_bvec) { 651 ret = blk_rq_map_user_bvec(rq, iter); 652 if (!ret) 653 return 0; 654 if (ret != -EREMOTEIO) 655 goto fail; 656 /* fall back to copying the data on limits mismatches */ 657 copy = true; 658 } 659 660 i = *iter; 661 do { 662 if (copy) 663 ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); 664 else 665 ret = bio_map_user_iov(rq, &i, gfp_mask); 666 if (ret) 667 goto unmap_rq; 668 if (!bio) 669 bio = rq->bio; 670 } while (iov_iter_count(&i)); 671 672 return 0; 673 674 unmap_rq: 675 blk_rq_unmap_user(bio); 676 fail: 677 rq->bio = NULL; 678 return ret; 679 } 680 EXPORT_SYMBOL(blk_rq_map_user_iov); 681 682 int blk_rq_map_user(struct request_queue *q, struct request *rq, 683 struct rq_map_data *map_data, void __user *ubuf, 684 unsigned long len, gfp_t gfp_mask) 685 { 686 struct iov_iter i; 687 int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i); 688 689 if (unlikely(ret < 0)) 690 return ret; 691 692 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 693 } 694 EXPORT_SYMBOL(blk_rq_map_user); 695 696 int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data, 697 void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask, 698 bool vec, int iov_count, bool check_iter_count, int rw) 699 { 700 int ret = 0; 701 702 if (vec) { 703 struct iovec fast_iov[UIO_FASTIOV]; 704 struct iovec *iov = fast_iov; 705 struct iov_iter iter; 706 707 ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len, 708 UIO_FASTIOV, &iov, &iter); 709 if (ret < 0) 710 return ret; 711 712 if (iov_count) { 713 /* SG_IO howto says that the shorter of the two wins */ 714 iov_iter_truncate(&iter, buf_len); 715 if (check_iter_count && !iov_iter_count(&iter)) { 716 kfree(iov); 717 return -EINVAL; 718 } 719 } 720 721 ret = blk_rq_map_user_iov(req->q, req, map_data, &iter, 722 gfp_mask); 723 kfree(iov); 724 } else if (buf_len) { 725 ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len, 726 gfp_mask); 727 } 728 return ret; 729 } 730 EXPORT_SYMBOL(blk_rq_map_user_io); 731 732 /** 733 * blk_rq_unmap_user - unmap a request with user data 734 * @bio: start of bio list 735 * 736 * Description: 737 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 738 * supply the original rq->bio from the blk_rq_map_user() return, since 739 * the I/O completion may have changed rq->bio. 740 */ 741 int blk_rq_unmap_user(struct bio *bio) 742 { 743 struct bio *next_bio; 744 int ret = 0, ret2; 745 746 while (bio) { 747 if (bio->bi_private) { 748 ret2 = bio_uncopy_user(bio); 749 if (ret2 && !ret) 750 ret = ret2; 751 } else { 752 bio_release_pages(bio, bio_data_dir(bio) == READ); 753 } 754 755 next_bio = bio; 756 bio = bio->bi_next; 757 blk_mq_map_bio_put(next_bio); 758 } 759 760 return ret; 761 } 762 EXPORT_SYMBOL(blk_rq_unmap_user); 763 764 /** 765 * blk_rq_map_kern - map kernel data to a request, for passthrough requests 766 * @q: request queue where request should be inserted 767 * @rq: request to fill 768 * @kbuf: the kernel buffer 769 * @len: length of user data 770 * @gfp_mask: memory allocation flags 771 * 772 * Description: 773 * Data will be mapped directly if possible. Otherwise a bounce 774 * buffer is used. Can be called multiple times to append multiple 775 * buffers. 776 */ 777 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 778 unsigned int len, gfp_t gfp_mask) 779 { 780 int reading = rq_data_dir(rq) == READ; 781 unsigned long addr = (unsigned long) kbuf; 782 struct bio *bio; 783 int ret; 784 785 if (len > (queue_max_hw_sectors(q) << 9)) 786 return -EINVAL; 787 if (!len || !kbuf) 788 return -EINVAL; 789 790 if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) || 791 blk_queue_may_bounce(q)) 792 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 793 else 794 bio = bio_map_kern(q, kbuf, len, gfp_mask); 795 796 if (IS_ERR(bio)) 797 return PTR_ERR(bio); 798 799 bio->bi_opf &= ~REQ_OP_MASK; 800 bio->bi_opf |= req_op(rq); 801 802 ret = blk_rq_append_bio(rq, bio); 803 if (unlikely(ret)) { 804 bio_uninit(bio); 805 kfree(bio); 806 } 807 return ret; 808 } 809 EXPORT_SYMBOL(blk_rq_map_kern); 810