1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to mapping data to requests 4 */ 5 #include <linux/kernel.h> 6 #include <linux/sched/task_stack.h> 7 #include <linux/module.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/uio.h> 11 12 #include "blk.h" 13 14 struct bio_map_data { 15 bool is_our_pages : 1; 16 bool is_null_mapped : 1; 17 struct iov_iter iter; 18 struct iovec iov[]; 19 }; 20 21 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, 22 gfp_t gfp_mask) 23 { 24 struct bio_map_data *bmd; 25 26 if (data->nr_segs > UIO_MAXIOV) 27 return NULL; 28 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); 30 if (!bmd) 31 return NULL; 32 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); 33 bmd->iter = *data; 34 bmd->iter.iov = bmd->iov; 35 return bmd; 36 } 37 38 /** 39 * bio_copy_from_iter - copy all pages from iov_iter to bio 40 * @bio: The &struct bio which describes the I/O as destination 41 * @iter: iov_iter as source 42 * 43 * Copy all pages from iov_iter to bio. 44 * Returns 0 on success, or error on failure. 45 */ 46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) 47 { 48 struct bio_vec *bvec; 49 struct bvec_iter_all iter_all; 50 51 bio_for_each_segment_all(bvec, bio, iter_all) { 52 ssize_t ret; 53 54 ret = copy_page_from_iter(bvec->bv_page, 55 bvec->bv_offset, 56 bvec->bv_len, 57 iter); 58 59 if (!iov_iter_count(iter)) 60 break; 61 62 if (ret < bvec->bv_len) 63 return -EFAULT; 64 } 65 66 return 0; 67 } 68 69 /** 70 * bio_copy_to_iter - copy all pages from bio to iov_iter 71 * @bio: The &struct bio which describes the I/O as source 72 * @iter: iov_iter as destination 73 * 74 * Copy all pages from bio to iov_iter. 75 * Returns 0 on success, or error on failure. 76 */ 77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 78 { 79 struct bio_vec *bvec; 80 struct bvec_iter_all iter_all; 81 82 bio_for_each_segment_all(bvec, bio, iter_all) { 83 ssize_t ret; 84 85 ret = copy_page_to_iter(bvec->bv_page, 86 bvec->bv_offset, 87 bvec->bv_len, 88 &iter); 89 90 if (!iov_iter_count(&iter)) 91 break; 92 93 if (ret < bvec->bv_len) 94 return -EFAULT; 95 } 96 97 return 0; 98 } 99 100 /** 101 * bio_uncopy_user - finish previously mapped bio 102 * @bio: bio being terminated 103 * 104 * Free pages allocated from bio_copy_user_iov() and write back data 105 * to user space in case of a read. 106 */ 107 static int bio_uncopy_user(struct bio *bio) 108 { 109 struct bio_map_data *bmd = bio->bi_private; 110 int ret = 0; 111 112 if (!bmd->is_null_mapped) { 113 /* 114 * if we're in a workqueue, the request is orphaned, so 115 * don't copy into a random user address space, just free 116 * and return -EINTR so user space doesn't expect any data. 117 */ 118 if (!current->mm) 119 ret = -EINTR; 120 else if (bio_data_dir(bio) == READ) 121 ret = bio_copy_to_iter(bio, bmd->iter); 122 if (bmd->is_our_pages) 123 bio_free_pages(bio); 124 } 125 kfree(bmd); 126 return ret; 127 } 128 129 static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, 130 struct iov_iter *iter, gfp_t gfp_mask) 131 { 132 struct bio_map_data *bmd; 133 struct page *page; 134 struct bio *bio; 135 int i = 0, ret; 136 int nr_pages; 137 unsigned int len = iter->count; 138 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 139 140 bmd = bio_alloc_map_data(iter, gfp_mask); 141 if (!bmd) 142 return -ENOMEM; 143 144 /* 145 * We need to do a deep copy of the iov_iter including the iovecs. 146 * The caller provided iov might point to an on-stack or otherwise 147 * shortlived one. 148 */ 149 bmd->is_our_pages = !map_data; 150 bmd->is_null_mapped = (map_data && map_data->null_mapped); 151 152 nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); 153 154 ret = -ENOMEM; 155 bio = bio_kmalloc(nr_pages, gfp_mask); 156 if (!bio) 157 goto out_bmd; 158 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); 159 160 if (map_data) { 161 nr_pages = 1U << map_data->page_order; 162 i = map_data->offset / PAGE_SIZE; 163 } 164 while (len) { 165 unsigned int bytes = PAGE_SIZE; 166 167 bytes -= offset; 168 169 if (bytes > len) 170 bytes = len; 171 172 if (map_data) { 173 if (i == map_data->nr_entries * nr_pages) { 174 ret = -ENOMEM; 175 goto cleanup; 176 } 177 178 page = map_data->pages[i / nr_pages]; 179 page += (i % nr_pages); 180 181 i++; 182 } else { 183 page = alloc_page(GFP_NOIO | gfp_mask); 184 if (!page) { 185 ret = -ENOMEM; 186 goto cleanup; 187 } 188 } 189 190 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { 191 if (!map_data) 192 __free_page(page); 193 break; 194 } 195 196 len -= bytes; 197 offset = 0; 198 } 199 200 if (map_data) 201 map_data->offset += bio->bi_iter.bi_size; 202 203 /* 204 * success 205 */ 206 if ((iov_iter_rw(iter) == WRITE && 207 (!map_data || !map_data->null_mapped)) || 208 (map_data && map_data->from_user)) { 209 ret = bio_copy_from_iter(bio, iter); 210 if (ret) 211 goto cleanup; 212 } else { 213 if (bmd->is_our_pages) 214 zero_fill_bio(bio); 215 iov_iter_advance(iter, bio->bi_iter.bi_size); 216 } 217 218 bio->bi_private = bmd; 219 220 ret = blk_rq_append_bio(rq, bio); 221 if (ret) 222 goto cleanup; 223 return 0; 224 cleanup: 225 if (!map_data) 226 bio_free_pages(bio); 227 bio_uninit(bio); 228 kfree(bio); 229 out_bmd: 230 kfree(bmd); 231 return ret; 232 } 233 234 static void bio_map_put(struct bio *bio) 235 { 236 if (bio->bi_opf & REQ_ALLOC_CACHE) { 237 bio_put(bio); 238 } else { 239 bio_uninit(bio); 240 kfree(bio); 241 } 242 } 243 244 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, 245 gfp_t gfp_mask) 246 { 247 unsigned int max_sectors = queue_max_hw_sectors(rq->q); 248 unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); 249 struct bio *bio; 250 int ret; 251 int j; 252 253 if (!iov_iter_count(iter)) 254 return -EINVAL; 255 256 if (rq->cmd_flags & REQ_POLLED) { 257 blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE; 258 259 bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, 260 &fs_bio_set); 261 if (!bio) 262 return -ENOMEM; 263 } else { 264 bio = bio_kmalloc(nr_vecs, gfp_mask); 265 if (!bio) 266 return -ENOMEM; 267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); 268 } 269 270 while (iov_iter_count(iter)) { 271 struct page **pages, *stack_pages[UIO_FASTIOV]; 272 ssize_t bytes; 273 size_t offs; 274 int npages; 275 276 if (nr_vecs <= ARRAY_SIZE(stack_pages)) { 277 pages = stack_pages; 278 bytes = iov_iter_get_pages2(iter, pages, LONG_MAX, 279 nr_vecs, &offs); 280 } else { 281 bytes = iov_iter_get_pages_alloc2(iter, &pages, 282 LONG_MAX, &offs); 283 } 284 if (unlikely(bytes <= 0)) { 285 ret = bytes ? bytes : -EFAULT; 286 goto out_unmap; 287 } 288 289 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); 290 291 if (unlikely(offs & queue_dma_alignment(rq->q))) 292 j = 0; 293 else { 294 for (j = 0; j < npages; j++) { 295 struct page *page = pages[j]; 296 unsigned int n = PAGE_SIZE - offs; 297 bool same_page = false; 298 299 if (n > bytes) 300 n = bytes; 301 302 if (!bio_add_hw_page(rq->q, bio, page, n, offs, 303 max_sectors, &same_page)) { 304 if (same_page) 305 put_page(page); 306 break; 307 } 308 309 bytes -= n; 310 offs = 0; 311 } 312 } 313 /* 314 * release the pages we didn't map into the bio, if any 315 */ 316 while (j < npages) 317 put_page(pages[j++]); 318 if (pages != stack_pages) 319 kvfree(pages); 320 /* couldn't stuff something into bio? */ 321 if (bytes) { 322 iov_iter_revert(iter, bytes); 323 break; 324 } 325 } 326 327 ret = blk_rq_append_bio(rq, bio); 328 if (ret) 329 goto out_unmap; 330 return 0; 331 332 out_unmap: 333 bio_release_pages(bio, false); 334 bio_map_put(bio); 335 return ret; 336 } 337 338 static void bio_invalidate_vmalloc_pages(struct bio *bio) 339 { 340 #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 341 if (bio->bi_private && !op_is_write(bio_op(bio))) { 342 unsigned long i, len = 0; 343 344 for (i = 0; i < bio->bi_vcnt; i++) 345 len += bio->bi_io_vec[i].bv_len; 346 invalidate_kernel_vmap_range(bio->bi_private, len); 347 } 348 #endif 349 } 350 351 static void bio_map_kern_endio(struct bio *bio) 352 { 353 bio_invalidate_vmalloc_pages(bio); 354 bio_uninit(bio); 355 kfree(bio); 356 } 357 358 /** 359 * bio_map_kern - map kernel address into bio 360 * @q: the struct request_queue for the bio 361 * @data: pointer to buffer to map 362 * @len: length in bytes 363 * @gfp_mask: allocation flags for bio allocation 364 * 365 * Map the kernel address into a bio suitable for io to a block 366 * device. Returns an error pointer in case of error. 367 */ 368 static struct bio *bio_map_kern(struct request_queue *q, void *data, 369 unsigned int len, gfp_t gfp_mask) 370 { 371 unsigned long kaddr = (unsigned long)data; 372 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 373 unsigned long start = kaddr >> PAGE_SHIFT; 374 const int nr_pages = end - start; 375 bool is_vmalloc = is_vmalloc_addr(data); 376 struct page *page; 377 int offset, i; 378 struct bio *bio; 379 380 bio = bio_kmalloc(nr_pages, gfp_mask); 381 if (!bio) 382 return ERR_PTR(-ENOMEM); 383 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 384 385 if (is_vmalloc) { 386 flush_kernel_vmap_range(data, len); 387 bio->bi_private = data; 388 } 389 390 offset = offset_in_page(kaddr); 391 for (i = 0; i < nr_pages; i++) { 392 unsigned int bytes = PAGE_SIZE - offset; 393 394 if (len <= 0) 395 break; 396 397 if (bytes > len) 398 bytes = len; 399 400 if (!is_vmalloc) 401 page = virt_to_page(data); 402 else 403 page = vmalloc_to_page(data); 404 if (bio_add_pc_page(q, bio, page, bytes, 405 offset) < bytes) { 406 /* we don't support partial mappings */ 407 bio_uninit(bio); 408 kfree(bio); 409 return ERR_PTR(-EINVAL); 410 } 411 412 data += bytes; 413 len -= bytes; 414 offset = 0; 415 } 416 417 bio->bi_end_io = bio_map_kern_endio; 418 return bio; 419 } 420 421 static void bio_copy_kern_endio(struct bio *bio) 422 { 423 bio_free_pages(bio); 424 bio_uninit(bio); 425 kfree(bio); 426 } 427 428 static void bio_copy_kern_endio_read(struct bio *bio) 429 { 430 char *p = bio->bi_private; 431 struct bio_vec *bvec; 432 struct bvec_iter_all iter_all; 433 434 bio_for_each_segment_all(bvec, bio, iter_all) { 435 memcpy_from_bvec(p, bvec); 436 p += bvec->bv_len; 437 } 438 439 bio_copy_kern_endio(bio); 440 } 441 442 /** 443 * bio_copy_kern - copy kernel address into bio 444 * @q: the struct request_queue for the bio 445 * @data: pointer to buffer to copy 446 * @len: length in bytes 447 * @gfp_mask: allocation flags for bio and page allocation 448 * @reading: data direction is READ 449 * 450 * copy the kernel address into a bio suitable for io to a block 451 * device. Returns an error pointer in case of error. 452 */ 453 static struct bio *bio_copy_kern(struct request_queue *q, void *data, 454 unsigned int len, gfp_t gfp_mask, int reading) 455 { 456 unsigned long kaddr = (unsigned long)data; 457 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 458 unsigned long start = kaddr >> PAGE_SHIFT; 459 struct bio *bio; 460 void *p = data; 461 int nr_pages = 0; 462 463 /* 464 * Overflow, abort 465 */ 466 if (end < start) 467 return ERR_PTR(-EINVAL); 468 469 nr_pages = end - start; 470 bio = bio_kmalloc(nr_pages, gfp_mask); 471 if (!bio) 472 return ERR_PTR(-ENOMEM); 473 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 474 475 while (len) { 476 struct page *page; 477 unsigned int bytes = PAGE_SIZE; 478 479 if (bytes > len) 480 bytes = len; 481 482 page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); 483 if (!page) 484 goto cleanup; 485 486 if (!reading) 487 memcpy(page_address(page), p, bytes); 488 489 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 490 break; 491 492 len -= bytes; 493 p += bytes; 494 } 495 496 if (reading) { 497 bio->bi_end_io = bio_copy_kern_endio_read; 498 bio->bi_private = data; 499 } else { 500 bio->bi_end_io = bio_copy_kern_endio; 501 } 502 503 return bio; 504 505 cleanup: 506 bio_free_pages(bio); 507 bio_uninit(bio); 508 kfree(bio); 509 return ERR_PTR(-ENOMEM); 510 } 511 512 /* 513 * Append a bio to a passthrough request. Only works if the bio can be merged 514 * into the request based on the driver constraints. 515 */ 516 int blk_rq_append_bio(struct request *rq, struct bio *bio) 517 { 518 struct bvec_iter iter; 519 struct bio_vec bv; 520 unsigned int nr_segs = 0; 521 522 bio_for_each_bvec(bv, bio, iter) 523 nr_segs++; 524 525 if (!rq->bio) { 526 blk_rq_bio_prep(rq, bio, nr_segs); 527 } else { 528 if (!ll_back_merge_fn(rq, bio, nr_segs)) 529 return -EINVAL; 530 rq->biotail->bi_next = bio; 531 rq->biotail = bio; 532 rq->__data_len += (bio)->bi_iter.bi_size; 533 bio_crypt_free_ctx(bio); 534 } 535 536 return 0; 537 } 538 EXPORT_SYMBOL(blk_rq_append_bio); 539 540 /** 541 * blk_rq_map_user_iov - map user data to a request, for passthrough requests 542 * @q: request queue where request should be inserted 543 * @rq: request to map data to 544 * @map_data: pointer to the rq_map_data holding pages (if necessary) 545 * @iter: iovec iterator 546 * @gfp_mask: memory allocation flags 547 * 548 * Description: 549 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 550 * a kernel bounce buffer is used. 551 * 552 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 553 * still in process context. 554 */ 555 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 556 struct rq_map_data *map_data, 557 const struct iov_iter *iter, gfp_t gfp_mask) 558 { 559 bool copy = false; 560 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 561 struct bio *bio = NULL; 562 struct iov_iter i; 563 int ret = -EINVAL; 564 565 if (!iter_is_iovec(iter)) 566 goto fail; 567 568 if (map_data) 569 copy = true; 570 else if (blk_queue_may_bounce(q)) 571 copy = true; 572 else if (iov_iter_alignment(iter) & align) 573 copy = true; 574 else if (queue_virt_boundary(q)) 575 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 576 577 i = *iter; 578 do { 579 if (copy) 580 ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); 581 else 582 ret = bio_map_user_iov(rq, &i, gfp_mask); 583 if (ret) 584 goto unmap_rq; 585 if (!bio) 586 bio = rq->bio; 587 } while (iov_iter_count(&i)); 588 589 return 0; 590 591 unmap_rq: 592 blk_rq_unmap_user(bio); 593 fail: 594 rq->bio = NULL; 595 return ret; 596 } 597 EXPORT_SYMBOL(blk_rq_map_user_iov); 598 599 int blk_rq_map_user(struct request_queue *q, struct request *rq, 600 struct rq_map_data *map_data, void __user *ubuf, 601 unsigned long len, gfp_t gfp_mask) 602 { 603 struct iovec iov; 604 struct iov_iter i; 605 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 606 607 if (unlikely(ret < 0)) 608 return ret; 609 610 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 611 } 612 EXPORT_SYMBOL(blk_rq_map_user); 613 614 /** 615 * blk_rq_unmap_user - unmap a request with user data 616 * @bio: start of bio list 617 * 618 * Description: 619 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 620 * supply the original rq->bio from the blk_rq_map_user() return, since 621 * the I/O completion may have changed rq->bio. 622 */ 623 int blk_rq_unmap_user(struct bio *bio) 624 { 625 struct bio *next_bio; 626 int ret = 0, ret2; 627 628 while (bio) { 629 if (bio->bi_private) { 630 ret2 = bio_uncopy_user(bio); 631 if (ret2 && !ret) 632 ret = ret2; 633 } else { 634 bio_release_pages(bio, bio_data_dir(bio) == READ); 635 } 636 637 next_bio = bio; 638 bio = bio->bi_next; 639 bio_map_put(next_bio); 640 } 641 642 return ret; 643 } 644 EXPORT_SYMBOL(blk_rq_unmap_user); 645 646 /** 647 * blk_rq_map_kern - map kernel data to a request, for passthrough requests 648 * @q: request queue where request should be inserted 649 * @rq: request to fill 650 * @kbuf: the kernel buffer 651 * @len: length of user data 652 * @gfp_mask: memory allocation flags 653 * 654 * Description: 655 * Data will be mapped directly if possible. Otherwise a bounce 656 * buffer is used. Can be called multiple times to append multiple 657 * buffers. 658 */ 659 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 660 unsigned int len, gfp_t gfp_mask) 661 { 662 int reading = rq_data_dir(rq) == READ; 663 unsigned long addr = (unsigned long) kbuf; 664 struct bio *bio; 665 int ret; 666 667 if (len > (queue_max_hw_sectors(q) << 9)) 668 return -EINVAL; 669 if (!len || !kbuf) 670 return -EINVAL; 671 672 if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) || 673 blk_queue_may_bounce(q)) 674 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 675 else 676 bio = bio_map_kern(q, kbuf, len, gfp_mask); 677 678 if (IS_ERR(bio)) 679 return PTR_ERR(bio); 680 681 bio->bi_opf &= ~REQ_OP_MASK; 682 bio->bi_opf |= req_op(rq); 683 684 ret = blk_rq_append_bio(rq, bio); 685 if (unlikely(ret)) { 686 bio_uninit(bio); 687 kfree(bio); 688 } 689 return ret; 690 } 691 EXPORT_SYMBOL(blk_rq_map_kern); 692