1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions related to mapping data to requests 4 */ 5 #include <linux/kernel.h> 6 #include <linux/sched/task_stack.h> 7 #include <linux/module.h> 8 #include <linux/bio.h> 9 #include <linux/blkdev.h> 10 #include <linux/uio.h> 11 12 #include "blk.h" 13 14 struct bio_map_data { 15 bool is_our_pages : 1; 16 bool is_null_mapped : 1; 17 struct iov_iter iter; 18 struct iovec iov[]; 19 }; 20 21 static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data, 22 gfp_t gfp_mask) 23 { 24 struct bio_map_data *bmd; 25 26 if (data->nr_segs > UIO_MAXIOV) 27 return NULL; 28 29 bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask); 30 if (!bmd) 31 return NULL; 32 memcpy(bmd->iov, data->iov, sizeof(struct iovec) * data->nr_segs); 33 bmd->iter = *data; 34 bmd->iter.iov = bmd->iov; 35 return bmd; 36 } 37 38 /** 39 * bio_copy_from_iter - copy all pages from iov_iter to bio 40 * @bio: The &struct bio which describes the I/O as destination 41 * @iter: iov_iter as source 42 * 43 * Copy all pages from iov_iter to bio. 44 * Returns 0 on success, or error on failure. 45 */ 46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) 47 { 48 struct bio_vec *bvec; 49 struct bvec_iter_all iter_all; 50 51 bio_for_each_segment_all(bvec, bio, iter_all) { 52 ssize_t ret; 53 54 ret = copy_page_from_iter(bvec->bv_page, 55 bvec->bv_offset, 56 bvec->bv_len, 57 iter); 58 59 if (!iov_iter_count(iter)) 60 break; 61 62 if (ret < bvec->bv_len) 63 return -EFAULT; 64 } 65 66 return 0; 67 } 68 69 /** 70 * bio_copy_to_iter - copy all pages from bio to iov_iter 71 * @bio: The &struct bio which describes the I/O as source 72 * @iter: iov_iter as destination 73 * 74 * Copy all pages from bio to iov_iter. 75 * Returns 0 on success, or error on failure. 76 */ 77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) 78 { 79 struct bio_vec *bvec; 80 struct bvec_iter_all iter_all; 81 82 bio_for_each_segment_all(bvec, bio, iter_all) { 83 ssize_t ret; 84 85 ret = copy_page_to_iter(bvec->bv_page, 86 bvec->bv_offset, 87 bvec->bv_len, 88 &iter); 89 90 if (!iov_iter_count(&iter)) 91 break; 92 93 if (ret < bvec->bv_len) 94 return -EFAULT; 95 } 96 97 return 0; 98 } 99 100 /** 101 * bio_uncopy_user - finish previously mapped bio 102 * @bio: bio being terminated 103 * 104 * Free pages allocated from bio_copy_user_iov() and write back data 105 * to user space in case of a read. 106 */ 107 static int bio_uncopy_user(struct bio *bio) 108 { 109 struct bio_map_data *bmd = bio->bi_private; 110 int ret = 0; 111 112 if (!bmd->is_null_mapped) { 113 /* 114 * if we're in a workqueue, the request is orphaned, so 115 * don't copy into a random user address space, just free 116 * and return -EINTR so user space doesn't expect any data. 117 */ 118 if (!current->mm) 119 ret = -EINTR; 120 else if (bio_data_dir(bio) == READ) 121 ret = bio_copy_to_iter(bio, bmd->iter); 122 if (bmd->is_our_pages) 123 bio_free_pages(bio); 124 } 125 kfree(bmd); 126 return ret; 127 } 128 129 static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, 130 struct iov_iter *iter, gfp_t gfp_mask) 131 { 132 struct bio_map_data *bmd; 133 struct page *page; 134 struct bio *bio; 135 int i = 0, ret; 136 int nr_pages; 137 unsigned int len = iter->count; 138 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0; 139 140 bmd = bio_alloc_map_data(iter, gfp_mask); 141 if (!bmd) 142 return -ENOMEM; 143 144 /* 145 * We need to do a deep copy of the iov_iter including the iovecs. 146 * The caller provided iov might point to an on-stack or otherwise 147 * shortlived one. 148 */ 149 bmd->is_our_pages = !map_data; 150 bmd->is_null_mapped = (map_data && map_data->null_mapped); 151 152 nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE)); 153 154 ret = -ENOMEM; 155 bio = bio_kmalloc(nr_pages, gfp_mask); 156 if (!bio) 157 goto out_bmd; 158 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq)); 159 160 if (map_data) { 161 nr_pages = 1U << map_data->page_order; 162 i = map_data->offset / PAGE_SIZE; 163 } 164 while (len) { 165 unsigned int bytes = PAGE_SIZE; 166 167 bytes -= offset; 168 169 if (bytes > len) 170 bytes = len; 171 172 if (map_data) { 173 if (i == map_data->nr_entries * nr_pages) { 174 ret = -ENOMEM; 175 goto cleanup; 176 } 177 178 page = map_data->pages[i / nr_pages]; 179 page += (i % nr_pages); 180 181 i++; 182 } else { 183 page = alloc_page(GFP_NOIO | gfp_mask); 184 if (!page) { 185 ret = -ENOMEM; 186 goto cleanup; 187 } 188 } 189 190 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { 191 if (!map_data) 192 __free_page(page); 193 break; 194 } 195 196 len -= bytes; 197 offset = 0; 198 } 199 200 if (map_data) 201 map_data->offset += bio->bi_iter.bi_size; 202 203 /* 204 * success 205 */ 206 if ((iov_iter_rw(iter) == WRITE && 207 (!map_data || !map_data->null_mapped)) || 208 (map_data && map_data->from_user)) { 209 ret = bio_copy_from_iter(bio, iter); 210 if (ret) 211 goto cleanup; 212 } else { 213 if (bmd->is_our_pages) 214 zero_fill_bio(bio); 215 iov_iter_advance(iter, bio->bi_iter.bi_size); 216 } 217 218 bio->bi_private = bmd; 219 220 ret = blk_rq_append_bio(rq, bio); 221 if (ret) 222 goto cleanup; 223 return 0; 224 cleanup: 225 if (!map_data) 226 bio_free_pages(bio); 227 bio_uninit(bio); 228 kfree(bio); 229 out_bmd: 230 kfree(bmd); 231 return ret; 232 } 233 234 static void bio_map_put(struct bio *bio) 235 { 236 if (bio->bi_opf & REQ_ALLOC_CACHE) { 237 bio_put(bio); 238 } else { 239 bio_uninit(bio); 240 kfree(bio); 241 } 242 } 243 244 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, 245 gfp_t gfp_mask) 246 { 247 unsigned int max_sectors = queue_max_hw_sectors(rq->q); 248 unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS); 249 struct bio *bio; 250 int ret; 251 int j; 252 253 if (!iov_iter_count(iter)) 254 return -EINVAL; 255 256 if (rq->cmd_flags & REQ_POLLED) { 257 blk_opf_t opf = rq->cmd_flags | REQ_ALLOC_CACHE; 258 259 bio = bio_alloc_bioset(NULL, nr_vecs, opf, gfp_mask, 260 &fs_bio_set); 261 if (!bio) 262 return -ENOMEM; 263 } else { 264 bio = bio_kmalloc(nr_vecs, gfp_mask); 265 if (!bio) 266 return -ENOMEM; 267 bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq)); 268 } 269 270 while (iov_iter_count(iter)) { 271 struct page **pages; 272 ssize_t bytes; 273 size_t offs, added = 0; 274 int npages; 275 276 bytes = iov_iter_get_pages_alloc2(iter, &pages, LONG_MAX, &offs); 277 if (unlikely(bytes <= 0)) { 278 ret = bytes ? bytes : -EFAULT; 279 goto out_unmap; 280 } 281 282 npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE); 283 284 if (unlikely(offs & queue_dma_alignment(rq->q))) 285 j = 0; 286 else { 287 for (j = 0; j < npages; j++) { 288 struct page *page = pages[j]; 289 unsigned int n = PAGE_SIZE - offs; 290 bool same_page = false; 291 292 if (n > bytes) 293 n = bytes; 294 295 if (!bio_add_hw_page(rq->q, bio, page, n, offs, 296 max_sectors, &same_page)) { 297 if (same_page) 298 put_page(page); 299 break; 300 } 301 302 added += n; 303 bytes -= n; 304 offs = 0; 305 } 306 } 307 /* 308 * release the pages we didn't map into the bio, if any 309 */ 310 while (j < npages) 311 put_page(pages[j++]); 312 kvfree(pages); 313 /* couldn't stuff something into bio? */ 314 if (bytes) { 315 iov_iter_revert(iter, bytes); 316 break; 317 } 318 } 319 320 ret = blk_rq_append_bio(rq, bio); 321 if (ret) 322 goto out_unmap; 323 return 0; 324 325 out_unmap: 326 bio_release_pages(bio, false); 327 bio_map_put(bio); 328 return ret; 329 } 330 331 static void bio_invalidate_vmalloc_pages(struct bio *bio) 332 { 333 #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 334 if (bio->bi_private && !op_is_write(bio_op(bio))) { 335 unsigned long i, len = 0; 336 337 for (i = 0; i < bio->bi_vcnt; i++) 338 len += bio->bi_io_vec[i].bv_len; 339 invalidate_kernel_vmap_range(bio->bi_private, len); 340 } 341 #endif 342 } 343 344 static void bio_map_kern_endio(struct bio *bio) 345 { 346 bio_invalidate_vmalloc_pages(bio); 347 bio_uninit(bio); 348 kfree(bio); 349 } 350 351 /** 352 * bio_map_kern - map kernel address into bio 353 * @q: the struct request_queue for the bio 354 * @data: pointer to buffer to map 355 * @len: length in bytes 356 * @gfp_mask: allocation flags for bio allocation 357 * 358 * Map the kernel address into a bio suitable for io to a block 359 * device. Returns an error pointer in case of error. 360 */ 361 static struct bio *bio_map_kern(struct request_queue *q, void *data, 362 unsigned int len, gfp_t gfp_mask) 363 { 364 unsigned long kaddr = (unsigned long)data; 365 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 366 unsigned long start = kaddr >> PAGE_SHIFT; 367 const int nr_pages = end - start; 368 bool is_vmalloc = is_vmalloc_addr(data); 369 struct page *page; 370 int offset, i; 371 struct bio *bio; 372 373 bio = bio_kmalloc(nr_pages, gfp_mask); 374 if (!bio) 375 return ERR_PTR(-ENOMEM); 376 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 377 378 if (is_vmalloc) { 379 flush_kernel_vmap_range(data, len); 380 bio->bi_private = data; 381 } 382 383 offset = offset_in_page(kaddr); 384 for (i = 0; i < nr_pages; i++) { 385 unsigned int bytes = PAGE_SIZE - offset; 386 387 if (len <= 0) 388 break; 389 390 if (bytes > len) 391 bytes = len; 392 393 if (!is_vmalloc) 394 page = virt_to_page(data); 395 else 396 page = vmalloc_to_page(data); 397 if (bio_add_pc_page(q, bio, page, bytes, 398 offset) < bytes) { 399 /* we don't support partial mappings */ 400 bio_uninit(bio); 401 kfree(bio); 402 return ERR_PTR(-EINVAL); 403 } 404 405 data += bytes; 406 len -= bytes; 407 offset = 0; 408 } 409 410 bio->bi_end_io = bio_map_kern_endio; 411 return bio; 412 } 413 414 static void bio_copy_kern_endio(struct bio *bio) 415 { 416 bio_free_pages(bio); 417 bio_uninit(bio); 418 kfree(bio); 419 } 420 421 static void bio_copy_kern_endio_read(struct bio *bio) 422 { 423 char *p = bio->bi_private; 424 struct bio_vec *bvec; 425 struct bvec_iter_all iter_all; 426 427 bio_for_each_segment_all(bvec, bio, iter_all) { 428 memcpy_from_bvec(p, bvec); 429 p += bvec->bv_len; 430 } 431 432 bio_copy_kern_endio(bio); 433 } 434 435 /** 436 * bio_copy_kern - copy kernel address into bio 437 * @q: the struct request_queue for the bio 438 * @data: pointer to buffer to copy 439 * @len: length in bytes 440 * @gfp_mask: allocation flags for bio and page allocation 441 * @reading: data direction is READ 442 * 443 * copy the kernel address into a bio suitable for io to a block 444 * device. Returns an error pointer in case of error. 445 */ 446 static struct bio *bio_copy_kern(struct request_queue *q, void *data, 447 unsigned int len, gfp_t gfp_mask, int reading) 448 { 449 unsigned long kaddr = (unsigned long)data; 450 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 451 unsigned long start = kaddr >> PAGE_SHIFT; 452 struct bio *bio; 453 void *p = data; 454 int nr_pages = 0; 455 456 /* 457 * Overflow, abort 458 */ 459 if (end < start) 460 return ERR_PTR(-EINVAL); 461 462 nr_pages = end - start; 463 bio = bio_kmalloc(nr_pages, gfp_mask); 464 if (!bio) 465 return ERR_PTR(-ENOMEM); 466 bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0); 467 468 while (len) { 469 struct page *page; 470 unsigned int bytes = PAGE_SIZE; 471 472 if (bytes > len) 473 bytes = len; 474 475 page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask); 476 if (!page) 477 goto cleanup; 478 479 if (!reading) 480 memcpy(page_address(page), p, bytes); 481 482 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) 483 break; 484 485 len -= bytes; 486 p += bytes; 487 } 488 489 if (reading) { 490 bio->bi_end_io = bio_copy_kern_endio_read; 491 bio->bi_private = data; 492 } else { 493 bio->bi_end_io = bio_copy_kern_endio; 494 } 495 496 return bio; 497 498 cleanup: 499 bio_free_pages(bio); 500 bio_uninit(bio); 501 kfree(bio); 502 return ERR_PTR(-ENOMEM); 503 } 504 505 /* 506 * Append a bio to a passthrough request. Only works if the bio can be merged 507 * into the request based on the driver constraints. 508 */ 509 int blk_rq_append_bio(struct request *rq, struct bio *bio) 510 { 511 struct bvec_iter iter; 512 struct bio_vec bv; 513 unsigned int nr_segs = 0; 514 515 bio_for_each_bvec(bv, bio, iter) 516 nr_segs++; 517 518 if (!rq->bio) { 519 blk_rq_bio_prep(rq, bio, nr_segs); 520 } else { 521 if (!ll_back_merge_fn(rq, bio, nr_segs)) 522 return -EINVAL; 523 rq->biotail->bi_next = bio; 524 rq->biotail = bio; 525 rq->__data_len += (bio)->bi_iter.bi_size; 526 bio_crypt_free_ctx(bio); 527 } 528 529 return 0; 530 } 531 EXPORT_SYMBOL(blk_rq_append_bio); 532 533 /** 534 * blk_rq_map_user_iov - map user data to a request, for passthrough requests 535 * @q: request queue where request should be inserted 536 * @rq: request to map data to 537 * @map_data: pointer to the rq_map_data holding pages (if necessary) 538 * @iter: iovec iterator 539 * @gfp_mask: memory allocation flags 540 * 541 * Description: 542 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 543 * a kernel bounce buffer is used. 544 * 545 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 546 * still in process context. 547 */ 548 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 549 struct rq_map_data *map_data, 550 const struct iov_iter *iter, gfp_t gfp_mask) 551 { 552 bool copy = false; 553 unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); 554 struct bio *bio = NULL; 555 struct iov_iter i; 556 int ret = -EINVAL; 557 558 if (!iter_is_iovec(iter)) 559 goto fail; 560 561 if (map_data) 562 copy = true; 563 else if (blk_queue_may_bounce(q)) 564 copy = true; 565 else if (iov_iter_alignment(iter) & align) 566 copy = true; 567 else if (queue_virt_boundary(q)) 568 copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); 569 570 i = *iter; 571 do { 572 if (copy) 573 ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask); 574 else 575 ret = bio_map_user_iov(rq, &i, gfp_mask); 576 if (ret) 577 goto unmap_rq; 578 if (!bio) 579 bio = rq->bio; 580 } while (iov_iter_count(&i)); 581 582 return 0; 583 584 unmap_rq: 585 blk_rq_unmap_user(bio); 586 fail: 587 rq->bio = NULL; 588 return ret; 589 } 590 EXPORT_SYMBOL(blk_rq_map_user_iov); 591 592 int blk_rq_map_user(struct request_queue *q, struct request *rq, 593 struct rq_map_data *map_data, void __user *ubuf, 594 unsigned long len, gfp_t gfp_mask) 595 { 596 struct iovec iov; 597 struct iov_iter i; 598 int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i); 599 600 if (unlikely(ret < 0)) 601 return ret; 602 603 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); 604 } 605 EXPORT_SYMBOL(blk_rq_map_user); 606 607 /** 608 * blk_rq_unmap_user - unmap a request with user data 609 * @bio: start of bio list 610 * 611 * Description: 612 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 613 * supply the original rq->bio from the blk_rq_map_user() return, since 614 * the I/O completion may have changed rq->bio. 615 */ 616 int blk_rq_unmap_user(struct bio *bio) 617 { 618 struct bio *next_bio; 619 int ret = 0, ret2; 620 621 while (bio) { 622 if (bio->bi_private) { 623 ret2 = bio_uncopy_user(bio); 624 if (ret2 && !ret) 625 ret = ret2; 626 } else { 627 bio_release_pages(bio, bio_data_dir(bio) == READ); 628 } 629 630 next_bio = bio; 631 bio = bio->bi_next; 632 bio_map_put(next_bio); 633 } 634 635 return ret; 636 } 637 EXPORT_SYMBOL(blk_rq_unmap_user); 638 639 /** 640 * blk_rq_map_kern - map kernel data to a request, for passthrough requests 641 * @q: request queue where request should be inserted 642 * @rq: request to fill 643 * @kbuf: the kernel buffer 644 * @len: length of user data 645 * @gfp_mask: memory allocation flags 646 * 647 * Description: 648 * Data will be mapped directly if possible. Otherwise a bounce 649 * buffer is used. Can be called multiple times to append multiple 650 * buffers. 651 */ 652 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 653 unsigned int len, gfp_t gfp_mask) 654 { 655 int reading = rq_data_dir(rq) == READ; 656 unsigned long addr = (unsigned long) kbuf; 657 struct bio *bio; 658 int ret; 659 660 if (len > (queue_max_hw_sectors(q) << 9)) 661 return -EINVAL; 662 if (!len || !kbuf) 663 return -EINVAL; 664 665 if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) || 666 blk_queue_may_bounce(q)) 667 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 668 else 669 bio = bio_map_kern(q, kbuf, len, gfp_mask); 670 671 if (IS_ERR(bio)) 672 return PTR_ERR(bio); 673 674 bio->bi_opf &= ~REQ_OP_MASK; 675 bio->bi_opf |= req_op(rq); 676 677 ret = blk_rq_append_bio(rq, bio); 678 if (unlikely(ret)) { 679 bio_uninit(bio); 680 kfree(bio); 681 } 682 return ret; 683 } 684 EXPORT_SYMBOL(blk_rq_map_kern); 685