1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 /* covers iovec and kvec alike */ 20 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ 21 size_t off = 0; \ 22 size_t skip = i->iov_offset; \ 23 do { \ 24 len = min(n, __p->iov_len - skip); \ 25 if (likely(len)) { \ 26 base = __p->iov_base + skip; \ 27 len -= (STEP); \ 28 off += len; \ 29 skip += len; \ 30 n -= len; \ 31 if (skip < __p->iov_len) \ 32 break; \ 33 } \ 34 __p++; \ 35 skip = 0; \ 36 } while (n); \ 37 i->iov_offset = skip; \ 38 n = off; \ 39 } 40 41 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ 42 size_t off = 0; \ 43 unsigned skip = i->iov_offset; \ 44 while (n) { \ 45 unsigned offset = p->bv_offset + skip; \ 46 unsigned left; \ 47 void *kaddr = kmap_local_page(p->bv_page + \ 48 offset / PAGE_SIZE); \ 49 base = kaddr + offset % PAGE_SIZE; \ 50 len = min(min(n, (size_t)(p->bv_len - skip)), \ 51 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 52 left = (STEP); \ 53 kunmap_local(kaddr); \ 54 len -= left; \ 55 off += len; \ 56 skip += len; \ 57 if (skip == p->bv_len) { \ 58 skip = 0; \ 59 p++; \ 60 } \ 61 n -= len; \ 62 if (left) \ 63 break; \ 64 } \ 65 i->iov_offset = skip; \ 66 n = off; \ 67 } 68 69 #define iterate_xarray(i, n, base, len, __off, STEP) { \ 70 __label__ __out; \ 71 size_t __off = 0; \ 72 struct page *head = NULL; \ 73 loff_t start = i->xarray_start + i->iov_offset; \ 74 unsigned offset = start % PAGE_SIZE; \ 75 pgoff_t index = start / PAGE_SIZE; \ 76 int j; \ 77 \ 78 XA_STATE(xas, i->xarray, index); \ 79 \ 80 rcu_read_lock(); \ 81 xas_for_each(&xas, head, ULONG_MAX) { \ 82 unsigned left; \ 83 if (xas_retry(&xas, head)) \ 84 continue; \ 85 if (WARN_ON(xa_is_value(head))) \ 86 break; \ 87 if (WARN_ON(PageHuge(head))) \ 88 break; \ 89 for (j = (head->index < index) ? index - head->index : 0; \ 90 j < thp_nr_pages(head); j++) { \ 91 void *kaddr = kmap_local_page(head + j); \ 92 base = kaddr + offset; \ 93 len = PAGE_SIZE - offset; \ 94 len = min(n, len); \ 95 left = (STEP); \ 96 kunmap_local(kaddr); \ 97 len -= left; \ 98 __off += len; \ 99 n -= len; \ 100 if (left || n == 0) \ 101 goto __out; \ 102 offset = 0; \ 103 } \ 104 } \ 105 __out: \ 106 rcu_read_unlock(); \ 107 i->iov_offset += __off; \ 108 n = __off; \ 109 } 110 111 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ 112 if (unlikely(i->count < n)) \ 113 n = i->count; \ 114 if (likely(n)) { \ 115 if (likely(iter_is_iovec(i))) { \ 116 const struct iovec *iov = i->iov; \ 117 void __user *base; \ 118 size_t len; \ 119 iterate_iovec(i, n, base, len, off, \ 120 iov, (I)) \ 121 i->nr_segs -= iov - i->iov; \ 122 i->iov = iov; \ 123 } else if (iov_iter_is_bvec(i)) { \ 124 const struct bio_vec *bvec = i->bvec; \ 125 void *base; \ 126 size_t len; \ 127 iterate_bvec(i, n, base, len, off, \ 128 bvec, (K)) \ 129 i->nr_segs -= bvec - i->bvec; \ 130 i->bvec = bvec; \ 131 } else if (iov_iter_is_kvec(i)) { \ 132 const struct kvec *kvec = i->kvec; \ 133 void *base; \ 134 size_t len; \ 135 iterate_iovec(i, n, base, len, off, \ 136 kvec, (K)) \ 137 i->nr_segs -= kvec - i->kvec; \ 138 i->kvec = kvec; \ 139 } else if (iov_iter_is_xarray(i)) { \ 140 void *base; \ 141 size_t len; \ 142 iterate_xarray(i, n, base, len, off, \ 143 (K)) \ 144 } \ 145 i->count -= n; \ 146 } \ 147 } 148 #define iterate_and_advance(i, n, base, len, off, I, K) \ 149 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) 150 151 static int copyout(void __user *to, const void *from, size_t n) 152 { 153 if (should_fail_usercopy()) 154 return n; 155 if (access_ok(to, n)) { 156 instrument_copy_to_user(to, from, n); 157 n = raw_copy_to_user(to, from, n); 158 } 159 return n; 160 } 161 162 static int copyin(void *to, const void __user *from, size_t n) 163 { 164 if (should_fail_usercopy()) 165 return n; 166 if (access_ok(from, n)) { 167 instrument_copy_from_user(to, from, n); 168 n = raw_copy_from_user(to, from, n); 169 } 170 return n; 171 } 172 173 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, 174 struct iov_iter *i) 175 { 176 size_t skip, copy, left, wanted; 177 const struct iovec *iov; 178 char __user *buf; 179 void *kaddr, *from; 180 181 if (unlikely(bytes > i->count)) 182 bytes = i->count; 183 184 if (unlikely(!bytes)) 185 return 0; 186 187 might_fault(); 188 wanted = bytes; 189 iov = i->iov; 190 skip = i->iov_offset; 191 buf = iov->iov_base + skip; 192 copy = min(bytes, iov->iov_len - skip); 193 194 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) { 195 kaddr = kmap_atomic(page); 196 from = kaddr + offset; 197 198 /* first chunk, usually the only one */ 199 left = copyout(buf, from, copy); 200 copy -= left; 201 skip += copy; 202 from += copy; 203 bytes -= copy; 204 205 while (unlikely(!left && bytes)) { 206 iov++; 207 buf = iov->iov_base; 208 copy = min(bytes, iov->iov_len); 209 left = copyout(buf, from, copy); 210 copy -= left; 211 skip = copy; 212 from += copy; 213 bytes -= copy; 214 } 215 if (likely(!bytes)) { 216 kunmap_atomic(kaddr); 217 goto done; 218 } 219 offset = from - kaddr; 220 buf += copy; 221 kunmap_atomic(kaddr); 222 copy = min(bytes, iov->iov_len - skip); 223 } 224 /* Too bad - revert to non-atomic kmap */ 225 226 kaddr = kmap(page); 227 from = kaddr + offset; 228 left = copyout(buf, from, copy); 229 copy -= left; 230 skip += copy; 231 from += copy; 232 bytes -= copy; 233 while (unlikely(!left && bytes)) { 234 iov++; 235 buf = iov->iov_base; 236 copy = min(bytes, iov->iov_len); 237 left = copyout(buf, from, copy); 238 copy -= left; 239 skip = copy; 240 from += copy; 241 bytes -= copy; 242 } 243 kunmap(page); 244 245 done: 246 if (skip == iov->iov_len) { 247 iov++; 248 skip = 0; 249 } 250 i->count -= wanted - bytes; 251 i->nr_segs -= iov - i->iov; 252 i->iov = iov; 253 i->iov_offset = skip; 254 return wanted - bytes; 255 } 256 257 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, 258 struct iov_iter *i) 259 { 260 size_t skip, copy, left, wanted; 261 const struct iovec *iov; 262 char __user *buf; 263 void *kaddr, *to; 264 265 if (unlikely(bytes > i->count)) 266 bytes = i->count; 267 268 if (unlikely(!bytes)) 269 return 0; 270 271 might_fault(); 272 wanted = bytes; 273 iov = i->iov; 274 skip = i->iov_offset; 275 buf = iov->iov_base + skip; 276 copy = min(bytes, iov->iov_len - skip); 277 278 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) { 279 kaddr = kmap_atomic(page); 280 to = kaddr + offset; 281 282 /* first chunk, usually the only one */ 283 left = copyin(to, buf, copy); 284 copy -= left; 285 skip += copy; 286 to += copy; 287 bytes -= copy; 288 289 while (unlikely(!left && bytes)) { 290 iov++; 291 buf = iov->iov_base; 292 copy = min(bytes, iov->iov_len); 293 left = copyin(to, buf, copy); 294 copy -= left; 295 skip = copy; 296 to += copy; 297 bytes -= copy; 298 } 299 if (likely(!bytes)) { 300 kunmap_atomic(kaddr); 301 goto done; 302 } 303 offset = to - kaddr; 304 buf += copy; 305 kunmap_atomic(kaddr); 306 copy = min(bytes, iov->iov_len - skip); 307 } 308 /* Too bad - revert to non-atomic kmap */ 309 310 kaddr = kmap(page); 311 to = kaddr + offset; 312 left = copyin(to, buf, copy); 313 copy -= left; 314 skip += copy; 315 to += copy; 316 bytes -= copy; 317 while (unlikely(!left && bytes)) { 318 iov++; 319 buf = iov->iov_base; 320 copy = min(bytes, iov->iov_len); 321 left = copyin(to, buf, copy); 322 copy -= left; 323 skip = copy; 324 to += copy; 325 bytes -= copy; 326 } 327 kunmap(page); 328 329 done: 330 if (skip == iov->iov_len) { 331 iov++; 332 skip = 0; 333 } 334 i->count -= wanted - bytes; 335 i->nr_segs -= iov - i->iov; 336 i->iov = iov; 337 i->iov_offset = skip; 338 return wanted - bytes; 339 } 340 341 #ifdef PIPE_PARANOIA 342 static bool sanity(const struct iov_iter *i) 343 { 344 struct pipe_inode_info *pipe = i->pipe; 345 unsigned int p_head = pipe->head; 346 unsigned int p_tail = pipe->tail; 347 unsigned int p_mask = pipe->ring_size - 1; 348 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 349 unsigned int i_head = i->head; 350 unsigned int idx; 351 352 if (i->iov_offset) { 353 struct pipe_buffer *p; 354 if (unlikely(p_occupancy == 0)) 355 goto Bad; // pipe must be non-empty 356 if (unlikely(i_head != p_head - 1)) 357 goto Bad; // must be at the last buffer... 358 359 p = &pipe->bufs[i_head & p_mask]; 360 if (unlikely(p->offset + p->len != i->iov_offset)) 361 goto Bad; // ... at the end of segment 362 } else { 363 if (i_head != p_head) 364 goto Bad; // must be right after the last buffer 365 } 366 return true; 367 Bad: 368 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); 369 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 370 p_head, p_tail, pipe->ring_size); 371 for (idx = 0; idx < pipe->ring_size; idx++) 372 printk(KERN_ERR "[%p %p %d %d]\n", 373 pipe->bufs[idx].ops, 374 pipe->bufs[idx].page, 375 pipe->bufs[idx].offset, 376 pipe->bufs[idx].len); 377 WARN_ON(1); 378 return false; 379 } 380 #else 381 #define sanity(i) true 382 #endif 383 384 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 385 struct iov_iter *i) 386 { 387 struct pipe_inode_info *pipe = i->pipe; 388 struct pipe_buffer *buf; 389 unsigned int p_tail = pipe->tail; 390 unsigned int p_mask = pipe->ring_size - 1; 391 unsigned int i_head = i->head; 392 size_t off; 393 394 if (unlikely(bytes > i->count)) 395 bytes = i->count; 396 397 if (unlikely(!bytes)) 398 return 0; 399 400 if (!sanity(i)) 401 return 0; 402 403 off = i->iov_offset; 404 buf = &pipe->bufs[i_head & p_mask]; 405 if (off) { 406 if (offset == off && buf->page == page) { 407 /* merge with the last one */ 408 buf->len += bytes; 409 i->iov_offset += bytes; 410 goto out; 411 } 412 i_head++; 413 buf = &pipe->bufs[i_head & p_mask]; 414 } 415 if (pipe_full(i_head, p_tail, pipe->max_usage)) 416 return 0; 417 418 buf->ops = &page_cache_pipe_buf_ops; 419 buf->flags = 0; 420 get_page(page); 421 buf->page = page; 422 buf->offset = offset; 423 buf->len = bytes; 424 425 pipe->head = i_head + 1; 426 i->iov_offset = offset + bytes; 427 i->head = i_head; 428 out: 429 i->count -= bytes; 430 return bytes; 431 } 432 433 /* 434 * fault_in_iov_iter_readable - fault in iov iterator for reading 435 * @i: iterator 436 * @size: maximum length 437 * 438 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 439 * @size. For each iovec, fault in each page that constitutes the iovec. 440 * 441 * Returns the number of bytes not faulted in (like copy_to_user() and 442 * copy_from_user()). 443 * 444 * Always returns 0 for non-userspace iterators. 445 */ 446 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 447 { 448 if (iter_is_iovec(i)) { 449 size_t count = min(size, iov_iter_count(i)); 450 const struct iovec *p; 451 size_t skip; 452 453 size -= count; 454 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 455 size_t len = min(count, p->iov_len - skip); 456 size_t ret; 457 458 if (unlikely(!len)) 459 continue; 460 ret = fault_in_readable(p->iov_base + skip, len); 461 count -= len - ret; 462 if (ret) 463 break; 464 } 465 return count + size; 466 } 467 return 0; 468 } 469 EXPORT_SYMBOL(fault_in_iov_iter_readable); 470 471 /* 472 * fault_in_iov_iter_writeable - fault in iov iterator for writing 473 * @i: iterator 474 * @size: maximum length 475 * 476 * Faults in the iterator using get_user_pages(), i.e., without triggering 477 * hardware page faults. This is primarily useful when we already know that 478 * some or all of the pages in @i aren't in memory. 479 * 480 * Returns the number of bytes not faulted in, like copy_to_user() and 481 * copy_from_user(). 482 * 483 * Always returns 0 for non-user-space iterators. 484 */ 485 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 486 { 487 if (iter_is_iovec(i)) { 488 size_t count = min(size, iov_iter_count(i)); 489 const struct iovec *p; 490 size_t skip; 491 492 size -= count; 493 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 494 size_t len = min(count, p->iov_len - skip); 495 size_t ret; 496 497 if (unlikely(!len)) 498 continue; 499 ret = fault_in_safe_writeable(p->iov_base + skip, len); 500 count -= len - ret; 501 if (ret) 502 break; 503 } 504 return count + size; 505 } 506 return 0; 507 } 508 EXPORT_SYMBOL(fault_in_iov_iter_writeable); 509 510 void iov_iter_init(struct iov_iter *i, unsigned int direction, 511 const struct iovec *iov, unsigned long nr_segs, 512 size_t count) 513 { 514 WARN_ON(direction & ~(READ | WRITE)); 515 *i = (struct iov_iter) { 516 .iter_type = ITER_IOVEC, 517 .nofault = false, 518 .data_source = direction, 519 .iov = iov, 520 .nr_segs = nr_segs, 521 .iov_offset = 0, 522 .count = count 523 }; 524 } 525 EXPORT_SYMBOL(iov_iter_init); 526 527 static inline bool allocated(struct pipe_buffer *buf) 528 { 529 return buf->ops == &default_pipe_buf_ops; 530 } 531 532 static inline void data_start(const struct iov_iter *i, 533 unsigned int *iter_headp, size_t *offp) 534 { 535 unsigned int p_mask = i->pipe->ring_size - 1; 536 unsigned int iter_head = i->head; 537 size_t off = i->iov_offset; 538 539 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || 540 off == PAGE_SIZE)) { 541 iter_head++; 542 off = 0; 543 } 544 *iter_headp = iter_head; 545 *offp = off; 546 } 547 548 static size_t push_pipe(struct iov_iter *i, size_t size, 549 int *iter_headp, size_t *offp) 550 { 551 struct pipe_inode_info *pipe = i->pipe; 552 unsigned int p_tail = pipe->tail; 553 unsigned int p_mask = pipe->ring_size - 1; 554 unsigned int iter_head; 555 size_t off; 556 ssize_t left; 557 558 if (unlikely(size > i->count)) 559 size = i->count; 560 if (unlikely(!size)) 561 return 0; 562 563 left = size; 564 data_start(i, &iter_head, &off); 565 *iter_headp = iter_head; 566 *offp = off; 567 if (off) { 568 left -= PAGE_SIZE - off; 569 if (left <= 0) { 570 pipe->bufs[iter_head & p_mask].len += size; 571 return size; 572 } 573 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; 574 iter_head++; 575 } 576 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { 577 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; 578 struct page *page = alloc_page(GFP_USER); 579 if (!page) 580 break; 581 582 buf->ops = &default_pipe_buf_ops; 583 buf->flags = 0; 584 buf->page = page; 585 buf->offset = 0; 586 buf->len = min_t(ssize_t, left, PAGE_SIZE); 587 left -= buf->len; 588 iter_head++; 589 pipe->head = iter_head; 590 591 if (left == 0) 592 return size; 593 } 594 return size - left; 595 } 596 597 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 598 struct iov_iter *i) 599 { 600 struct pipe_inode_info *pipe = i->pipe; 601 unsigned int p_mask = pipe->ring_size - 1; 602 unsigned int i_head; 603 size_t n, off; 604 605 if (!sanity(i)) 606 return 0; 607 608 bytes = n = push_pipe(i, bytes, &i_head, &off); 609 if (unlikely(!n)) 610 return 0; 611 do { 612 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 613 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); 614 i->head = i_head; 615 i->iov_offset = off + chunk; 616 n -= chunk; 617 addr += chunk; 618 off = 0; 619 i_head++; 620 } while (n); 621 i->count -= bytes; 622 return bytes; 623 } 624 625 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 626 __wsum sum, size_t off) 627 { 628 __wsum next = csum_partial_copy_nocheck(from, to, len); 629 return csum_block_add(sum, next, off); 630 } 631 632 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 633 struct iov_iter *i, __wsum *sump) 634 { 635 struct pipe_inode_info *pipe = i->pipe; 636 unsigned int p_mask = pipe->ring_size - 1; 637 __wsum sum = *sump; 638 size_t off = 0; 639 unsigned int i_head; 640 size_t r; 641 642 if (!sanity(i)) 643 return 0; 644 645 bytes = push_pipe(i, bytes, &i_head, &r); 646 while (bytes) { 647 size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r); 648 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); 649 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); 650 kunmap_local(p); 651 i->head = i_head; 652 i->iov_offset = r + chunk; 653 bytes -= chunk; 654 off += chunk; 655 r = 0; 656 i_head++; 657 } 658 *sump = sum; 659 i->count -= off; 660 return off; 661 } 662 663 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 664 { 665 if (unlikely(iov_iter_is_pipe(i))) 666 return copy_pipe_to_iter(addr, bytes, i); 667 if (iter_is_iovec(i)) 668 might_fault(); 669 iterate_and_advance(i, bytes, base, len, off, 670 copyout(base, addr + off, len), 671 memcpy(base, addr + off, len) 672 ) 673 674 return bytes; 675 } 676 EXPORT_SYMBOL(_copy_to_iter); 677 678 #ifdef CONFIG_ARCH_HAS_COPY_MC 679 static int copyout_mc(void __user *to, const void *from, size_t n) 680 { 681 if (access_ok(to, n)) { 682 instrument_copy_to_user(to, from, n); 683 n = copy_mc_to_user((__force void *) to, from, n); 684 } 685 return n; 686 } 687 688 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 689 struct iov_iter *i) 690 { 691 struct pipe_inode_info *pipe = i->pipe; 692 unsigned int p_mask = pipe->ring_size - 1; 693 unsigned int i_head; 694 unsigned int valid = pipe->head; 695 size_t n, off, xfer = 0; 696 697 if (!sanity(i)) 698 return 0; 699 700 n = push_pipe(i, bytes, &i_head, &off); 701 while (n) { 702 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 703 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); 704 unsigned long rem; 705 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); 706 chunk -= rem; 707 kunmap_local(p); 708 if (chunk) { 709 i->head = i_head; 710 i->iov_offset = off + chunk; 711 xfer += chunk; 712 valid = i_head + 1; 713 } 714 if (rem) { 715 pipe->bufs[i_head & p_mask].len -= rem; 716 pipe_discard_from(pipe, valid); 717 break; 718 } 719 n -= chunk; 720 off = 0; 721 i_head++; 722 } 723 i->count -= xfer; 724 return xfer; 725 } 726 727 /** 728 * _copy_mc_to_iter - copy to iter with source memory error exception handling 729 * @addr: source kernel address 730 * @bytes: total transfer length 731 * @i: destination iterator 732 * 733 * The pmem driver deploys this for the dax operation 734 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 735 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 736 * successfully copied. 737 * 738 * The main differences between this and typical _copy_to_iter(). 739 * 740 * * Typical tail/residue handling after a fault retries the copy 741 * byte-by-byte until the fault happens again. Re-triggering machine 742 * checks is potentially fatal so the implementation uses source 743 * alignment and poison alignment assumptions to avoid re-triggering 744 * hardware exceptions. 745 * 746 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 747 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 748 * a short copy. 749 * 750 * Return: number of bytes copied (may be %0) 751 */ 752 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 753 { 754 if (unlikely(iov_iter_is_pipe(i))) 755 return copy_mc_pipe_to_iter(addr, bytes, i); 756 if (iter_is_iovec(i)) 757 might_fault(); 758 __iterate_and_advance(i, bytes, base, len, off, 759 copyout_mc(base, addr + off, len), 760 copy_mc_to_kernel(base, addr + off, len) 761 ) 762 763 return bytes; 764 } 765 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 766 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 767 768 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 769 { 770 if (unlikely(iov_iter_is_pipe(i))) { 771 WARN_ON(1); 772 return 0; 773 } 774 if (iter_is_iovec(i)) 775 might_fault(); 776 iterate_and_advance(i, bytes, base, len, off, 777 copyin(addr + off, base, len), 778 memcpy(addr + off, base, len) 779 ) 780 781 return bytes; 782 } 783 EXPORT_SYMBOL(_copy_from_iter); 784 785 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 786 { 787 if (unlikely(iov_iter_is_pipe(i))) { 788 WARN_ON(1); 789 return 0; 790 } 791 iterate_and_advance(i, bytes, base, len, off, 792 __copy_from_user_inatomic_nocache(addr + off, base, len), 793 memcpy(addr + off, base, len) 794 ) 795 796 return bytes; 797 } 798 EXPORT_SYMBOL(_copy_from_iter_nocache); 799 800 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 801 /** 802 * _copy_from_iter_flushcache - write destination through cpu cache 803 * @addr: destination kernel address 804 * @bytes: total transfer length 805 * @i: source iterator 806 * 807 * The pmem driver arranges for filesystem-dax to use this facility via 808 * dax_copy_from_iter() for ensuring that writes to persistent memory 809 * are flushed through the CPU cache. It is differentiated from 810 * _copy_from_iter_nocache() in that guarantees all data is flushed for 811 * all iterator types. The _copy_from_iter_nocache() only attempts to 812 * bypass the cache for the ITER_IOVEC case, and on some archs may use 813 * instructions that strand dirty-data in the cache. 814 * 815 * Return: number of bytes copied (may be %0) 816 */ 817 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 818 { 819 if (unlikely(iov_iter_is_pipe(i))) { 820 WARN_ON(1); 821 return 0; 822 } 823 iterate_and_advance(i, bytes, base, len, off, 824 __copy_from_user_flushcache(addr + off, base, len), 825 memcpy_flushcache(addr + off, base, len) 826 ) 827 828 return bytes; 829 } 830 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 831 #endif 832 833 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 834 { 835 struct page *head; 836 size_t v = n + offset; 837 838 /* 839 * The general case needs to access the page order in order 840 * to compute the page size. 841 * However, we mostly deal with order-0 pages and thus can 842 * avoid a possible cache line miss for requests that fit all 843 * page orders. 844 */ 845 if (n <= v && v <= PAGE_SIZE) 846 return true; 847 848 head = compound_head(page); 849 v += (page - head) << PAGE_SHIFT; 850 851 if (likely(n <= v && v <= (page_size(head)))) 852 return true; 853 WARN_ON(1); 854 return false; 855 } 856 857 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 858 struct iov_iter *i) 859 { 860 if (likely(iter_is_iovec(i))) 861 return copy_page_to_iter_iovec(page, offset, bytes, i); 862 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { 863 void *kaddr = kmap_local_page(page); 864 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); 865 kunmap_local(kaddr); 866 return wanted; 867 } 868 if (iov_iter_is_pipe(i)) 869 return copy_page_to_iter_pipe(page, offset, bytes, i); 870 if (unlikely(iov_iter_is_discard(i))) { 871 if (unlikely(i->count < bytes)) 872 bytes = i->count; 873 i->count -= bytes; 874 return bytes; 875 } 876 WARN_ON(1); 877 return 0; 878 } 879 880 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 881 struct iov_iter *i) 882 { 883 size_t res = 0; 884 if (unlikely(!page_copy_sane(page, offset, bytes))) 885 return 0; 886 page += offset / PAGE_SIZE; // first subpage 887 offset %= PAGE_SIZE; 888 while (1) { 889 size_t n = __copy_page_to_iter(page, offset, 890 min(bytes, (size_t)PAGE_SIZE - offset), i); 891 res += n; 892 bytes -= n; 893 if (!bytes || !n) 894 break; 895 offset += n; 896 if (offset == PAGE_SIZE) { 897 page++; 898 offset = 0; 899 } 900 } 901 return res; 902 } 903 EXPORT_SYMBOL(copy_page_to_iter); 904 905 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 906 struct iov_iter *i) 907 { 908 if (unlikely(!page_copy_sane(page, offset, bytes))) 909 return 0; 910 if (likely(iter_is_iovec(i))) 911 return copy_page_from_iter_iovec(page, offset, bytes, i); 912 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { 913 void *kaddr = kmap_local_page(page); 914 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); 915 kunmap_local(kaddr); 916 return wanted; 917 } 918 WARN_ON(1); 919 return 0; 920 } 921 EXPORT_SYMBOL(copy_page_from_iter); 922 923 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 924 { 925 struct pipe_inode_info *pipe = i->pipe; 926 unsigned int p_mask = pipe->ring_size - 1; 927 unsigned int i_head; 928 size_t n, off; 929 930 if (!sanity(i)) 931 return 0; 932 933 bytes = n = push_pipe(i, bytes, &i_head, &off); 934 if (unlikely(!n)) 935 return 0; 936 937 do { 938 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 939 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); 940 memset(p + off, 0, chunk); 941 kunmap_local(p); 942 i->head = i_head; 943 i->iov_offset = off + chunk; 944 n -= chunk; 945 off = 0; 946 i_head++; 947 } while (n); 948 i->count -= bytes; 949 return bytes; 950 } 951 952 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 953 { 954 if (unlikely(iov_iter_is_pipe(i))) 955 return pipe_zero(bytes, i); 956 iterate_and_advance(i, bytes, base, len, count, 957 clear_user(base, len), 958 memset(base, 0, len) 959 ) 960 961 return bytes; 962 } 963 EXPORT_SYMBOL(iov_iter_zero); 964 965 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, 966 struct iov_iter *i) 967 { 968 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 969 if (unlikely(!page_copy_sane(page, offset, bytes))) { 970 kunmap_atomic(kaddr); 971 return 0; 972 } 973 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 974 kunmap_atomic(kaddr); 975 WARN_ON(1); 976 return 0; 977 } 978 iterate_and_advance(i, bytes, base, len, off, 979 copyin(p + off, base, len), 980 memcpy(p + off, base, len) 981 ) 982 kunmap_atomic(kaddr); 983 return bytes; 984 } 985 EXPORT_SYMBOL(copy_page_from_iter_atomic); 986 987 static inline void pipe_truncate(struct iov_iter *i) 988 { 989 struct pipe_inode_info *pipe = i->pipe; 990 unsigned int p_tail = pipe->tail; 991 unsigned int p_head = pipe->head; 992 unsigned int p_mask = pipe->ring_size - 1; 993 994 if (!pipe_empty(p_head, p_tail)) { 995 struct pipe_buffer *buf; 996 unsigned int i_head = i->head; 997 size_t off = i->iov_offset; 998 999 if (off) { 1000 buf = &pipe->bufs[i_head & p_mask]; 1001 buf->len = off - buf->offset; 1002 i_head++; 1003 } 1004 while (p_head != i_head) { 1005 p_head--; 1006 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); 1007 } 1008 1009 pipe->head = p_head; 1010 } 1011 } 1012 1013 static void pipe_advance(struct iov_iter *i, size_t size) 1014 { 1015 struct pipe_inode_info *pipe = i->pipe; 1016 if (size) { 1017 struct pipe_buffer *buf; 1018 unsigned int p_mask = pipe->ring_size - 1; 1019 unsigned int i_head = i->head; 1020 size_t off = i->iov_offset, left = size; 1021 1022 if (off) /* make it relative to the beginning of buffer */ 1023 left += off - pipe->bufs[i_head & p_mask].offset; 1024 while (1) { 1025 buf = &pipe->bufs[i_head & p_mask]; 1026 if (left <= buf->len) 1027 break; 1028 left -= buf->len; 1029 i_head++; 1030 } 1031 i->head = i_head; 1032 i->iov_offset = buf->offset + left; 1033 } 1034 i->count -= size; 1035 /* ... and discard everything past that point */ 1036 pipe_truncate(i); 1037 } 1038 1039 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 1040 { 1041 struct bvec_iter bi; 1042 1043 bi.bi_size = i->count; 1044 bi.bi_bvec_done = i->iov_offset; 1045 bi.bi_idx = 0; 1046 bvec_iter_advance(i->bvec, &bi, size); 1047 1048 i->bvec += bi.bi_idx; 1049 i->nr_segs -= bi.bi_idx; 1050 i->count = bi.bi_size; 1051 i->iov_offset = bi.bi_bvec_done; 1052 } 1053 1054 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 1055 { 1056 const struct iovec *iov, *end; 1057 1058 if (!i->count) 1059 return; 1060 i->count -= size; 1061 1062 size += i->iov_offset; // from beginning of current segment 1063 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { 1064 if (likely(size < iov->iov_len)) 1065 break; 1066 size -= iov->iov_len; 1067 } 1068 i->iov_offset = size; 1069 i->nr_segs -= iov - i->iov; 1070 i->iov = iov; 1071 } 1072 1073 void iov_iter_advance(struct iov_iter *i, size_t size) 1074 { 1075 if (unlikely(i->count < size)) 1076 size = i->count; 1077 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 1078 /* iovec and kvec have identical layouts */ 1079 iov_iter_iovec_advance(i, size); 1080 } else if (iov_iter_is_bvec(i)) { 1081 iov_iter_bvec_advance(i, size); 1082 } else if (iov_iter_is_pipe(i)) { 1083 pipe_advance(i, size); 1084 } else if (unlikely(iov_iter_is_xarray(i))) { 1085 i->iov_offset += size; 1086 i->count -= size; 1087 } else if (iov_iter_is_discard(i)) { 1088 i->count -= size; 1089 } 1090 } 1091 EXPORT_SYMBOL(iov_iter_advance); 1092 1093 void iov_iter_revert(struct iov_iter *i, size_t unroll) 1094 { 1095 if (!unroll) 1096 return; 1097 if (WARN_ON(unroll > MAX_RW_COUNT)) 1098 return; 1099 i->count += unroll; 1100 if (unlikely(iov_iter_is_pipe(i))) { 1101 struct pipe_inode_info *pipe = i->pipe; 1102 unsigned int p_mask = pipe->ring_size - 1; 1103 unsigned int i_head = i->head; 1104 size_t off = i->iov_offset; 1105 while (1) { 1106 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; 1107 size_t n = off - b->offset; 1108 if (unroll < n) { 1109 off -= unroll; 1110 break; 1111 } 1112 unroll -= n; 1113 if (!unroll && i_head == i->start_head) { 1114 off = 0; 1115 break; 1116 } 1117 i_head--; 1118 b = &pipe->bufs[i_head & p_mask]; 1119 off = b->offset + b->len; 1120 } 1121 i->iov_offset = off; 1122 i->head = i_head; 1123 pipe_truncate(i); 1124 return; 1125 } 1126 if (unlikely(iov_iter_is_discard(i))) 1127 return; 1128 if (unroll <= i->iov_offset) { 1129 i->iov_offset -= unroll; 1130 return; 1131 } 1132 unroll -= i->iov_offset; 1133 if (iov_iter_is_xarray(i)) { 1134 BUG(); /* We should never go beyond the start of the specified 1135 * range since we might then be straying into pages that 1136 * aren't pinned. 1137 */ 1138 } else if (iov_iter_is_bvec(i)) { 1139 const struct bio_vec *bvec = i->bvec; 1140 while (1) { 1141 size_t n = (--bvec)->bv_len; 1142 i->nr_segs++; 1143 if (unroll <= n) { 1144 i->bvec = bvec; 1145 i->iov_offset = n - unroll; 1146 return; 1147 } 1148 unroll -= n; 1149 } 1150 } else { /* same logics for iovec and kvec */ 1151 const struct iovec *iov = i->iov; 1152 while (1) { 1153 size_t n = (--iov)->iov_len; 1154 i->nr_segs++; 1155 if (unroll <= n) { 1156 i->iov = iov; 1157 i->iov_offset = n - unroll; 1158 return; 1159 } 1160 unroll -= n; 1161 } 1162 } 1163 } 1164 EXPORT_SYMBOL(iov_iter_revert); 1165 1166 /* 1167 * Return the count of just the current iov_iter segment. 1168 */ 1169 size_t iov_iter_single_seg_count(const struct iov_iter *i) 1170 { 1171 if (i->nr_segs > 1) { 1172 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1173 return min(i->count, i->iov->iov_len - i->iov_offset); 1174 if (iov_iter_is_bvec(i)) 1175 return min(i->count, i->bvec->bv_len - i->iov_offset); 1176 } 1177 return i->count; 1178 } 1179 EXPORT_SYMBOL(iov_iter_single_seg_count); 1180 1181 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 1182 const struct kvec *kvec, unsigned long nr_segs, 1183 size_t count) 1184 { 1185 WARN_ON(direction & ~(READ | WRITE)); 1186 *i = (struct iov_iter){ 1187 .iter_type = ITER_KVEC, 1188 .data_source = direction, 1189 .kvec = kvec, 1190 .nr_segs = nr_segs, 1191 .iov_offset = 0, 1192 .count = count 1193 }; 1194 } 1195 EXPORT_SYMBOL(iov_iter_kvec); 1196 1197 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1198 const struct bio_vec *bvec, unsigned long nr_segs, 1199 size_t count) 1200 { 1201 WARN_ON(direction & ~(READ | WRITE)); 1202 *i = (struct iov_iter){ 1203 .iter_type = ITER_BVEC, 1204 .data_source = direction, 1205 .bvec = bvec, 1206 .nr_segs = nr_segs, 1207 .iov_offset = 0, 1208 .count = count 1209 }; 1210 } 1211 EXPORT_SYMBOL(iov_iter_bvec); 1212 1213 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1214 struct pipe_inode_info *pipe, 1215 size_t count) 1216 { 1217 BUG_ON(direction != READ); 1218 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1219 *i = (struct iov_iter){ 1220 .iter_type = ITER_PIPE, 1221 .data_source = false, 1222 .pipe = pipe, 1223 .head = pipe->head, 1224 .start_head = pipe->head, 1225 .iov_offset = 0, 1226 .count = count 1227 }; 1228 } 1229 EXPORT_SYMBOL(iov_iter_pipe); 1230 1231 /** 1232 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1233 * @i: The iterator to initialise. 1234 * @direction: The direction of the transfer. 1235 * @xarray: The xarray to access. 1236 * @start: The start file position. 1237 * @count: The size of the I/O buffer in bytes. 1238 * 1239 * Set up an I/O iterator to either draw data out of the pages attached to an 1240 * inode or to inject data into those pages. The pages *must* be prevented 1241 * from evaporation, either by taking a ref on them or locking them by the 1242 * caller. 1243 */ 1244 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1245 struct xarray *xarray, loff_t start, size_t count) 1246 { 1247 BUG_ON(direction & ~1); 1248 *i = (struct iov_iter) { 1249 .iter_type = ITER_XARRAY, 1250 .data_source = direction, 1251 .xarray = xarray, 1252 .xarray_start = start, 1253 .count = count, 1254 .iov_offset = 0 1255 }; 1256 } 1257 EXPORT_SYMBOL(iov_iter_xarray); 1258 1259 /** 1260 * iov_iter_discard - Initialise an I/O iterator that discards data 1261 * @i: The iterator to initialise. 1262 * @direction: The direction of the transfer. 1263 * @count: The size of the I/O buffer in bytes. 1264 * 1265 * Set up an I/O iterator that just discards everything that's written to it. 1266 * It's only available as a READ iterator. 1267 */ 1268 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1269 { 1270 BUG_ON(direction != READ); 1271 *i = (struct iov_iter){ 1272 .iter_type = ITER_DISCARD, 1273 .data_source = false, 1274 .count = count, 1275 .iov_offset = 0 1276 }; 1277 } 1278 EXPORT_SYMBOL(iov_iter_discard); 1279 1280 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 1281 { 1282 unsigned long res = 0; 1283 size_t size = i->count; 1284 size_t skip = i->iov_offset; 1285 unsigned k; 1286 1287 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1288 size_t len = i->iov[k].iov_len - skip; 1289 if (len) { 1290 res |= (unsigned long)i->iov[k].iov_base + skip; 1291 if (len > size) 1292 len = size; 1293 res |= len; 1294 size -= len; 1295 if (!size) 1296 break; 1297 } 1298 } 1299 return res; 1300 } 1301 1302 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 1303 { 1304 unsigned res = 0; 1305 size_t size = i->count; 1306 unsigned skip = i->iov_offset; 1307 unsigned k; 1308 1309 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1310 size_t len = i->bvec[k].bv_len - skip; 1311 res |= (unsigned long)i->bvec[k].bv_offset + skip; 1312 if (len > size) 1313 len = size; 1314 res |= len; 1315 size -= len; 1316 if (!size) 1317 break; 1318 } 1319 return res; 1320 } 1321 1322 unsigned long iov_iter_alignment(const struct iov_iter *i) 1323 { 1324 /* iovec and kvec have identical layouts */ 1325 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1326 return iov_iter_alignment_iovec(i); 1327 1328 if (iov_iter_is_bvec(i)) 1329 return iov_iter_alignment_bvec(i); 1330 1331 if (iov_iter_is_pipe(i)) { 1332 unsigned int p_mask = i->pipe->ring_size - 1; 1333 size_t size = i->count; 1334 1335 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) 1336 return size | i->iov_offset; 1337 return size; 1338 } 1339 1340 if (iov_iter_is_xarray(i)) 1341 return (i->xarray_start + i->iov_offset) | i->count; 1342 1343 return 0; 1344 } 1345 EXPORT_SYMBOL(iov_iter_alignment); 1346 1347 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1348 { 1349 unsigned long res = 0; 1350 unsigned long v = 0; 1351 size_t size = i->count; 1352 unsigned k; 1353 1354 if (WARN_ON(!iter_is_iovec(i))) 1355 return ~0U; 1356 1357 for (k = 0; k < i->nr_segs; k++) { 1358 if (i->iov[k].iov_len) { 1359 unsigned long base = (unsigned long)i->iov[k].iov_base; 1360 if (v) // if not the first one 1361 res |= base | v; // this start | previous end 1362 v = base + i->iov[k].iov_len; 1363 if (size <= i->iov[k].iov_len) 1364 break; 1365 size -= i->iov[k].iov_len; 1366 } 1367 } 1368 return res; 1369 } 1370 EXPORT_SYMBOL(iov_iter_gap_alignment); 1371 1372 static inline ssize_t __pipe_get_pages(struct iov_iter *i, 1373 size_t maxsize, 1374 struct page **pages, 1375 int iter_head, 1376 size_t *start) 1377 { 1378 struct pipe_inode_info *pipe = i->pipe; 1379 unsigned int p_mask = pipe->ring_size - 1; 1380 ssize_t n = push_pipe(i, maxsize, &iter_head, start); 1381 if (!n) 1382 return -EFAULT; 1383 1384 maxsize = n; 1385 n += *start; 1386 while (n > 0) { 1387 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); 1388 iter_head++; 1389 n -= PAGE_SIZE; 1390 } 1391 1392 return maxsize; 1393 } 1394 1395 static ssize_t pipe_get_pages(struct iov_iter *i, 1396 struct page **pages, size_t maxsize, unsigned maxpages, 1397 size_t *start) 1398 { 1399 unsigned int iter_head, npages; 1400 size_t capacity; 1401 1402 if (!sanity(i)) 1403 return -EFAULT; 1404 1405 data_start(i, &iter_head, start); 1406 /* Amount of free space: some of this one + all after this one */ 1407 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1408 capacity = min(npages, maxpages) * PAGE_SIZE - *start; 1409 1410 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); 1411 } 1412 1413 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1414 pgoff_t index, unsigned int nr_pages) 1415 { 1416 XA_STATE(xas, xa, index); 1417 struct page *page; 1418 unsigned int ret = 0; 1419 1420 rcu_read_lock(); 1421 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1422 if (xas_retry(&xas, page)) 1423 continue; 1424 1425 /* Has the page moved or been split? */ 1426 if (unlikely(page != xas_reload(&xas))) { 1427 xas_reset(&xas); 1428 continue; 1429 } 1430 1431 pages[ret] = find_subpage(page, xas.xa_index); 1432 get_page(pages[ret]); 1433 if (++ret == nr_pages) 1434 break; 1435 } 1436 rcu_read_unlock(); 1437 return ret; 1438 } 1439 1440 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1441 struct page **pages, size_t maxsize, 1442 unsigned maxpages, size_t *_start_offset) 1443 { 1444 unsigned nr, offset; 1445 pgoff_t index, count; 1446 size_t size = maxsize; 1447 loff_t pos; 1448 1449 if (!size || !maxpages) 1450 return 0; 1451 1452 pos = i->xarray_start + i->iov_offset; 1453 index = pos >> PAGE_SHIFT; 1454 offset = pos & ~PAGE_MASK; 1455 *_start_offset = offset; 1456 1457 count = 1; 1458 if (size > PAGE_SIZE - offset) { 1459 size -= PAGE_SIZE - offset; 1460 count += size >> PAGE_SHIFT; 1461 size &= ~PAGE_MASK; 1462 if (size) 1463 count++; 1464 } 1465 1466 if (count > maxpages) 1467 count = maxpages; 1468 1469 nr = iter_xarray_populate_pages(pages, i->xarray, index, count); 1470 if (nr == 0) 1471 return 0; 1472 1473 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1474 } 1475 1476 /* must be done on non-empty ITER_IOVEC one */ 1477 static unsigned long first_iovec_segment(const struct iov_iter *i, 1478 size_t *size, size_t *start, 1479 size_t maxsize, unsigned maxpages) 1480 { 1481 size_t skip; 1482 long k; 1483 1484 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1485 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip; 1486 size_t len = i->iov[k].iov_len - skip; 1487 1488 if (unlikely(!len)) 1489 continue; 1490 if (len > maxsize) 1491 len = maxsize; 1492 len += (*start = addr % PAGE_SIZE); 1493 if (len > maxpages * PAGE_SIZE) 1494 len = maxpages * PAGE_SIZE; 1495 *size = len; 1496 return addr & PAGE_MASK; 1497 } 1498 BUG(); // if it had been empty, we wouldn't get called 1499 } 1500 1501 /* must be done on non-empty ITER_BVEC one */ 1502 static struct page *first_bvec_segment(const struct iov_iter *i, 1503 size_t *size, size_t *start, 1504 size_t maxsize, unsigned maxpages) 1505 { 1506 struct page *page; 1507 size_t skip = i->iov_offset, len; 1508 1509 len = i->bvec->bv_len - skip; 1510 if (len > maxsize) 1511 len = maxsize; 1512 skip += i->bvec->bv_offset; 1513 page = i->bvec->bv_page + skip / PAGE_SIZE; 1514 len += (*start = skip % PAGE_SIZE); 1515 if (len > maxpages * PAGE_SIZE) 1516 len = maxpages * PAGE_SIZE; 1517 *size = len; 1518 return page; 1519 } 1520 1521 ssize_t iov_iter_get_pages(struct iov_iter *i, 1522 struct page **pages, size_t maxsize, unsigned maxpages, 1523 size_t *start) 1524 { 1525 size_t len; 1526 int n, res; 1527 1528 if (maxsize > i->count) 1529 maxsize = i->count; 1530 if (!maxsize) 1531 return 0; 1532 1533 if (likely(iter_is_iovec(i))) { 1534 unsigned int gup_flags = 0; 1535 unsigned long addr; 1536 1537 if (iov_iter_rw(i) != WRITE) 1538 gup_flags |= FOLL_WRITE; 1539 if (i->nofault) 1540 gup_flags |= FOLL_NOFAULT; 1541 1542 addr = first_iovec_segment(i, &len, start, maxsize, maxpages); 1543 n = DIV_ROUND_UP(len, PAGE_SIZE); 1544 res = get_user_pages_fast(addr, n, gup_flags, pages); 1545 if (unlikely(res <= 0)) 1546 return res; 1547 return (res == n ? len : res * PAGE_SIZE) - *start; 1548 } 1549 if (iov_iter_is_bvec(i)) { 1550 struct page *page; 1551 1552 page = first_bvec_segment(i, &len, start, maxsize, maxpages); 1553 n = DIV_ROUND_UP(len, PAGE_SIZE); 1554 while (n--) 1555 get_page(*pages++ = page++); 1556 return len - *start; 1557 } 1558 if (iov_iter_is_pipe(i)) 1559 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1560 if (iov_iter_is_xarray(i)) 1561 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1562 return -EFAULT; 1563 } 1564 EXPORT_SYMBOL(iov_iter_get_pages); 1565 1566 static struct page **get_pages_array(size_t n) 1567 { 1568 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); 1569 } 1570 1571 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 1572 struct page ***pages, size_t maxsize, 1573 size_t *start) 1574 { 1575 struct page **p; 1576 unsigned int iter_head, npages; 1577 ssize_t n; 1578 1579 if (!sanity(i)) 1580 return -EFAULT; 1581 1582 data_start(i, &iter_head, start); 1583 /* Amount of free space: some of this one + all after this one */ 1584 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1585 n = npages * PAGE_SIZE - *start; 1586 if (maxsize > n) 1587 maxsize = n; 1588 else 1589 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1590 p = get_pages_array(npages); 1591 if (!p) 1592 return -ENOMEM; 1593 n = __pipe_get_pages(i, maxsize, p, iter_head, start); 1594 if (n > 0) 1595 *pages = p; 1596 else 1597 kvfree(p); 1598 return n; 1599 } 1600 1601 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, 1602 struct page ***pages, size_t maxsize, 1603 size_t *_start_offset) 1604 { 1605 struct page **p; 1606 unsigned nr, offset; 1607 pgoff_t index, count; 1608 size_t size = maxsize; 1609 loff_t pos; 1610 1611 if (!size) 1612 return 0; 1613 1614 pos = i->xarray_start + i->iov_offset; 1615 index = pos >> PAGE_SHIFT; 1616 offset = pos & ~PAGE_MASK; 1617 *_start_offset = offset; 1618 1619 count = 1; 1620 if (size > PAGE_SIZE - offset) { 1621 size -= PAGE_SIZE - offset; 1622 count += size >> PAGE_SHIFT; 1623 size &= ~PAGE_MASK; 1624 if (size) 1625 count++; 1626 } 1627 1628 p = get_pages_array(count); 1629 if (!p) 1630 return -ENOMEM; 1631 *pages = p; 1632 1633 nr = iter_xarray_populate_pages(p, i->xarray, index, count); 1634 if (nr == 0) 1635 return 0; 1636 1637 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1638 } 1639 1640 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1641 struct page ***pages, size_t maxsize, 1642 size_t *start) 1643 { 1644 struct page **p; 1645 size_t len; 1646 int n, res; 1647 1648 if (maxsize > i->count) 1649 maxsize = i->count; 1650 if (!maxsize) 1651 return 0; 1652 1653 if (likely(iter_is_iovec(i))) { 1654 unsigned int gup_flags = 0; 1655 unsigned long addr; 1656 1657 if (iov_iter_rw(i) != WRITE) 1658 gup_flags |= FOLL_WRITE; 1659 if (i->nofault) 1660 gup_flags |= FOLL_NOFAULT; 1661 1662 addr = first_iovec_segment(i, &len, start, maxsize, ~0U); 1663 n = DIV_ROUND_UP(len, PAGE_SIZE); 1664 p = get_pages_array(n); 1665 if (!p) 1666 return -ENOMEM; 1667 res = get_user_pages_fast(addr, n, gup_flags, p); 1668 if (unlikely(res <= 0)) { 1669 kvfree(p); 1670 *pages = NULL; 1671 return res; 1672 } 1673 *pages = p; 1674 return (res == n ? len : res * PAGE_SIZE) - *start; 1675 } 1676 if (iov_iter_is_bvec(i)) { 1677 struct page *page; 1678 1679 page = first_bvec_segment(i, &len, start, maxsize, ~0U); 1680 n = DIV_ROUND_UP(len, PAGE_SIZE); 1681 *pages = p = get_pages_array(n); 1682 if (!p) 1683 return -ENOMEM; 1684 while (n--) 1685 get_page(*p++ = page++); 1686 return len - *start; 1687 } 1688 if (iov_iter_is_pipe(i)) 1689 return pipe_get_pages_alloc(i, pages, maxsize, start); 1690 if (iov_iter_is_xarray(i)) 1691 return iter_xarray_get_pages_alloc(i, pages, maxsize, start); 1692 return -EFAULT; 1693 } 1694 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1695 1696 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1697 struct iov_iter *i) 1698 { 1699 __wsum sum, next; 1700 sum = *csum; 1701 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1702 WARN_ON(1); 1703 return 0; 1704 } 1705 iterate_and_advance(i, bytes, base, len, off, ({ 1706 next = csum_and_copy_from_user(base, addr + off, len); 1707 sum = csum_block_add(sum, next, off); 1708 next ? 0 : len; 1709 }), ({ 1710 sum = csum_and_memcpy(addr + off, base, len, sum, off); 1711 }) 1712 ) 1713 *csum = sum; 1714 return bytes; 1715 } 1716 EXPORT_SYMBOL(csum_and_copy_from_iter); 1717 1718 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1719 struct iov_iter *i) 1720 { 1721 struct csum_state *csstate = _csstate; 1722 __wsum sum, next; 1723 1724 if (unlikely(iov_iter_is_discard(i))) { 1725 WARN_ON(1); /* for now */ 1726 return 0; 1727 } 1728 1729 sum = csum_shift(csstate->csum, csstate->off); 1730 if (unlikely(iov_iter_is_pipe(i))) 1731 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); 1732 else iterate_and_advance(i, bytes, base, len, off, ({ 1733 next = csum_and_copy_to_user(addr + off, base, len); 1734 sum = csum_block_add(sum, next, off); 1735 next ? 0 : len; 1736 }), ({ 1737 sum = csum_and_memcpy(base, addr + off, len, sum, off); 1738 }) 1739 ) 1740 csstate->csum = csum_shift(sum, csstate->off); 1741 csstate->off += bytes; 1742 return bytes; 1743 } 1744 EXPORT_SYMBOL(csum_and_copy_to_iter); 1745 1746 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1747 struct iov_iter *i) 1748 { 1749 #ifdef CONFIG_CRYPTO_HASH 1750 struct ahash_request *hash = hashp; 1751 struct scatterlist sg; 1752 size_t copied; 1753 1754 copied = copy_to_iter(addr, bytes, i); 1755 sg_init_one(&sg, addr, copied); 1756 ahash_request_set_crypt(hash, &sg, NULL, copied); 1757 crypto_ahash_update(hash); 1758 return copied; 1759 #else 1760 return 0; 1761 #endif 1762 } 1763 EXPORT_SYMBOL(hash_and_copy_to_iter); 1764 1765 static int iov_npages(const struct iov_iter *i, int maxpages) 1766 { 1767 size_t skip = i->iov_offset, size = i->count; 1768 const struct iovec *p; 1769 int npages = 0; 1770 1771 for (p = i->iov; size; skip = 0, p++) { 1772 unsigned offs = offset_in_page(p->iov_base + skip); 1773 size_t len = min(p->iov_len - skip, size); 1774 1775 if (len) { 1776 size -= len; 1777 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1778 if (unlikely(npages > maxpages)) 1779 return maxpages; 1780 } 1781 } 1782 return npages; 1783 } 1784 1785 static int bvec_npages(const struct iov_iter *i, int maxpages) 1786 { 1787 size_t skip = i->iov_offset, size = i->count; 1788 const struct bio_vec *p; 1789 int npages = 0; 1790 1791 for (p = i->bvec; size; skip = 0, p++) { 1792 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1793 size_t len = min(p->bv_len - skip, size); 1794 1795 size -= len; 1796 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1797 if (unlikely(npages > maxpages)) 1798 return maxpages; 1799 } 1800 return npages; 1801 } 1802 1803 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1804 { 1805 if (unlikely(!i->count)) 1806 return 0; 1807 /* iovec and kvec have identical layouts */ 1808 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1809 return iov_npages(i, maxpages); 1810 if (iov_iter_is_bvec(i)) 1811 return bvec_npages(i, maxpages); 1812 if (iov_iter_is_pipe(i)) { 1813 unsigned int iter_head; 1814 int npages; 1815 size_t off; 1816 1817 if (!sanity(i)) 1818 return 0; 1819 1820 data_start(i, &iter_head, &off); 1821 /* some of this one + all after this one */ 1822 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1823 return min(npages, maxpages); 1824 } 1825 if (iov_iter_is_xarray(i)) { 1826 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1827 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1828 return min(npages, maxpages); 1829 } 1830 return 0; 1831 } 1832 EXPORT_SYMBOL(iov_iter_npages); 1833 1834 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1835 { 1836 *new = *old; 1837 if (unlikely(iov_iter_is_pipe(new))) { 1838 WARN_ON(1); 1839 return NULL; 1840 } 1841 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) 1842 return NULL; 1843 if (iov_iter_is_bvec(new)) 1844 return new->bvec = kmemdup(new->bvec, 1845 new->nr_segs * sizeof(struct bio_vec), 1846 flags); 1847 else 1848 /* iovec and kvec have identical layout */ 1849 return new->iov = kmemdup(new->iov, 1850 new->nr_segs * sizeof(struct iovec), 1851 flags); 1852 } 1853 EXPORT_SYMBOL(dup_iter); 1854 1855 static int copy_compat_iovec_from_user(struct iovec *iov, 1856 const struct iovec __user *uvec, unsigned long nr_segs) 1857 { 1858 const struct compat_iovec __user *uiov = 1859 (const struct compat_iovec __user *)uvec; 1860 int ret = -EFAULT, i; 1861 1862 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1863 return -EFAULT; 1864 1865 for (i = 0; i < nr_segs; i++) { 1866 compat_uptr_t buf; 1867 compat_ssize_t len; 1868 1869 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1870 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1871 1872 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1873 if (len < 0) { 1874 ret = -EINVAL; 1875 goto uaccess_end; 1876 } 1877 iov[i].iov_base = compat_ptr(buf); 1878 iov[i].iov_len = len; 1879 } 1880 1881 ret = 0; 1882 uaccess_end: 1883 user_access_end(); 1884 return ret; 1885 } 1886 1887 static int copy_iovec_from_user(struct iovec *iov, 1888 const struct iovec __user *uvec, unsigned long nr_segs) 1889 { 1890 unsigned long seg; 1891 1892 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1893 return -EFAULT; 1894 for (seg = 0; seg < nr_segs; seg++) { 1895 if ((ssize_t)iov[seg].iov_len < 0) 1896 return -EINVAL; 1897 } 1898 1899 return 0; 1900 } 1901 1902 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1903 unsigned long nr_segs, unsigned long fast_segs, 1904 struct iovec *fast_iov, bool compat) 1905 { 1906 struct iovec *iov = fast_iov; 1907 int ret; 1908 1909 /* 1910 * SuS says "The readv() function *may* fail if the iovcnt argument was 1911 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1912 * traditionally returned zero for zero segments, so... 1913 */ 1914 if (nr_segs == 0) 1915 return iov; 1916 if (nr_segs > UIO_MAXIOV) 1917 return ERR_PTR(-EINVAL); 1918 if (nr_segs > fast_segs) { 1919 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1920 if (!iov) 1921 return ERR_PTR(-ENOMEM); 1922 } 1923 1924 if (compat) 1925 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1926 else 1927 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1928 if (ret) { 1929 if (iov != fast_iov) 1930 kfree(iov); 1931 return ERR_PTR(ret); 1932 } 1933 1934 return iov; 1935 } 1936 1937 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1938 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1939 struct iov_iter *i, bool compat) 1940 { 1941 ssize_t total_len = 0; 1942 unsigned long seg; 1943 struct iovec *iov; 1944 1945 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1946 if (IS_ERR(iov)) { 1947 *iovp = NULL; 1948 return PTR_ERR(iov); 1949 } 1950 1951 /* 1952 * According to the Single Unix Specification we should return EINVAL if 1953 * an element length is < 0 when cast to ssize_t or if the total length 1954 * would overflow the ssize_t return value of the system call. 1955 * 1956 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1957 * overflow case. 1958 */ 1959 for (seg = 0; seg < nr_segs; seg++) { 1960 ssize_t len = (ssize_t)iov[seg].iov_len; 1961 1962 if (!access_ok(iov[seg].iov_base, len)) { 1963 if (iov != *iovp) 1964 kfree(iov); 1965 *iovp = NULL; 1966 return -EFAULT; 1967 } 1968 1969 if (len > MAX_RW_COUNT - total_len) { 1970 len = MAX_RW_COUNT - total_len; 1971 iov[seg].iov_len = len; 1972 } 1973 total_len += len; 1974 } 1975 1976 iov_iter_init(i, type, iov, nr_segs, total_len); 1977 if (iov == *iovp) 1978 *iovp = NULL; 1979 else 1980 *iovp = iov; 1981 return total_len; 1982 } 1983 1984 /** 1985 * import_iovec() - Copy an array of &struct iovec from userspace 1986 * into the kernel, check that it is valid, and initialize a new 1987 * &struct iov_iter iterator to access it. 1988 * 1989 * @type: One of %READ or %WRITE. 1990 * @uvec: Pointer to the userspace array. 1991 * @nr_segs: Number of elements in userspace array. 1992 * @fast_segs: Number of elements in @iov. 1993 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1994 * on-stack) kernel array. 1995 * @i: Pointer to iterator that will be initialized on success. 1996 * 1997 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1998 * then this function places %NULL in *@iov on return. Otherwise, a new 1999 * array will be allocated and the result placed in *@iov. This means that 2000 * the caller may call kfree() on *@iov regardless of whether the small 2001 * on-stack array was used or not (and regardless of whether this function 2002 * returns an error or not). 2003 * 2004 * Return: Negative error code on error, bytes imported on success 2005 */ 2006 ssize_t import_iovec(int type, const struct iovec __user *uvec, 2007 unsigned nr_segs, unsigned fast_segs, 2008 struct iovec **iovp, struct iov_iter *i) 2009 { 2010 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 2011 in_compat_syscall()); 2012 } 2013 EXPORT_SYMBOL(import_iovec); 2014 2015 int import_single_range(int rw, void __user *buf, size_t len, 2016 struct iovec *iov, struct iov_iter *i) 2017 { 2018 if (len > MAX_RW_COUNT) 2019 len = MAX_RW_COUNT; 2020 if (unlikely(!access_ok(buf, len))) 2021 return -EFAULT; 2022 2023 iov->iov_base = buf; 2024 iov->iov_len = len; 2025 iov_iter_init(i, rw, iov, 1, len); 2026 return 0; 2027 } 2028 EXPORT_SYMBOL(import_single_range); 2029 2030 /** 2031 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 2032 * iov_iter_save_state() was called. 2033 * 2034 * @i: &struct iov_iter to restore 2035 * @state: state to restore from 2036 * 2037 * Used after iov_iter_save_state() to bring restore @i, if operations may 2038 * have advanced it. 2039 * 2040 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 2041 */ 2042 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 2043 { 2044 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 2045 !iov_iter_is_kvec(i)) 2046 return; 2047 i->iov_offset = state->iov_offset; 2048 i->count = state->count; 2049 /* 2050 * For the *vec iters, nr_segs + iov is constant - if we increment 2051 * the vec, then we also decrement the nr_segs count. Hence we don't 2052 * need to track both of these, just one is enough and we can deduct 2053 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 2054 * size, so we can just increment the iov pointer as they are unionzed. 2055 * ITER_BVEC _may_ be the same size on some archs, but on others it is 2056 * not. Be safe and handle it separately. 2057 */ 2058 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 2059 if (iov_iter_is_bvec(i)) 2060 i->bvec -= state->nr_segs - i->nr_segs; 2061 else 2062 i->iov -= state->nr_segs - i->nr_segs; 2063 i->nr_segs = state->nr_segs; 2064 } 2065