1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 /* covers ubuf and kbuf alike */ 20 #define iterate_buf(i, n, base, len, off, __p, STEP) { \ 21 size_t __maybe_unused off = 0; \ 22 len = n; \ 23 base = __p + i->iov_offset; \ 24 len -= (STEP); \ 25 i->iov_offset += len; \ 26 n = len; \ 27 } 28 29 /* covers iovec and kvec alike */ 30 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ 31 size_t off = 0; \ 32 size_t skip = i->iov_offset; \ 33 do { \ 34 len = min(n, __p->iov_len - skip); \ 35 if (likely(len)) { \ 36 base = __p->iov_base + skip; \ 37 len -= (STEP); \ 38 off += len; \ 39 skip += len; \ 40 n -= len; \ 41 if (skip < __p->iov_len) \ 42 break; \ 43 } \ 44 __p++; \ 45 skip = 0; \ 46 } while (n); \ 47 i->iov_offset = skip; \ 48 n = off; \ 49 } 50 51 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ 52 size_t off = 0; \ 53 unsigned skip = i->iov_offset; \ 54 while (n) { \ 55 unsigned offset = p->bv_offset + skip; \ 56 unsigned left; \ 57 void *kaddr = kmap_local_page(p->bv_page + \ 58 offset / PAGE_SIZE); \ 59 base = kaddr + offset % PAGE_SIZE; \ 60 len = min(min(n, (size_t)(p->bv_len - skip)), \ 61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 62 left = (STEP); \ 63 kunmap_local(kaddr); \ 64 len -= left; \ 65 off += len; \ 66 skip += len; \ 67 if (skip == p->bv_len) { \ 68 skip = 0; \ 69 p++; \ 70 } \ 71 n -= len; \ 72 if (left) \ 73 break; \ 74 } \ 75 i->iov_offset = skip; \ 76 n = off; \ 77 } 78 79 #define iterate_xarray(i, n, base, len, __off, STEP) { \ 80 __label__ __out; \ 81 size_t __off = 0; \ 82 struct folio *folio; \ 83 loff_t start = i->xarray_start + i->iov_offset; \ 84 pgoff_t index = start / PAGE_SIZE; \ 85 XA_STATE(xas, i->xarray, index); \ 86 \ 87 len = PAGE_SIZE - offset_in_page(start); \ 88 rcu_read_lock(); \ 89 xas_for_each(&xas, folio, ULONG_MAX) { \ 90 unsigned left; \ 91 size_t offset; \ 92 if (xas_retry(&xas, folio)) \ 93 continue; \ 94 if (WARN_ON(xa_is_value(folio))) \ 95 break; \ 96 if (WARN_ON(folio_test_hugetlb(folio))) \ 97 break; \ 98 offset = offset_in_folio(folio, start + __off); \ 99 while (offset < folio_size(folio)) { \ 100 base = kmap_local_folio(folio, offset); \ 101 len = min(n, len); \ 102 left = (STEP); \ 103 kunmap_local(base); \ 104 len -= left; \ 105 __off += len; \ 106 n -= len; \ 107 if (left || n == 0) \ 108 goto __out; \ 109 offset += len; \ 110 len = PAGE_SIZE; \ 111 } \ 112 } \ 113 __out: \ 114 rcu_read_unlock(); \ 115 i->iov_offset += __off; \ 116 n = __off; \ 117 } 118 119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ 120 if (unlikely(i->count < n)) \ 121 n = i->count; \ 122 if (likely(n)) { \ 123 if (likely(iter_is_ubuf(i))) { \ 124 void __user *base; \ 125 size_t len; \ 126 iterate_buf(i, n, base, len, off, \ 127 i->ubuf, (I)) \ 128 } else if (likely(iter_is_iovec(i))) { \ 129 const struct iovec *iov = i->iov; \ 130 void __user *base; \ 131 size_t len; \ 132 iterate_iovec(i, n, base, len, off, \ 133 iov, (I)) \ 134 i->nr_segs -= iov - i->iov; \ 135 i->iov = iov; \ 136 } else if (iov_iter_is_bvec(i)) { \ 137 const struct bio_vec *bvec = i->bvec; \ 138 void *base; \ 139 size_t len; \ 140 iterate_bvec(i, n, base, len, off, \ 141 bvec, (K)) \ 142 i->nr_segs -= bvec - i->bvec; \ 143 i->bvec = bvec; \ 144 } else if (iov_iter_is_kvec(i)) { \ 145 const struct kvec *kvec = i->kvec; \ 146 void *base; \ 147 size_t len; \ 148 iterate_iovec(i, n, base, len, off, \ 149 kvec, (K)) \ 150 i->nr_segs -= kvec - i->kvec; \ 151 i->kvec = kvec; \ 152 } else if (iov_iter_is_xarray(i)) { \ 153 void *base; \ 154 size_t len; \ 155 iterate_xarray(i, n, base, len, off, \ 156 (K)) \ 157 } \ 158 i->count -= n; \ 159 } \ 160 } 161 #define iterate_and_advance(i, n, base, len, off, I, K) \ 162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) 163 164 static int copyout(void __user *to, const void *from, size_t n) 165 { 166 if (should_fail_usercopy()) 167 return n; 168 if (access_ok(to, n)) { 169 instrument_copy_to_user(to, from, n); 170 n = raw_copy_to_user(to, from, n); 171 } 172 return n; 173 } 174 175 static int copyin(void *to, const void __user *from, size_t n) 176 { 177 if (should_fail_usercopy()) 178 return n; 179 if (access_ok(from, n)) { 180 instrument_copy_from_user(to, from, n); 181 n = raw_copy_from_user(to, from, n); 182 } 183 return n; 184 } 185 186 static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe, 187 unsigned int slot) 188 { 189 return &pipe->bufs[slot & (pipe->ring_size - 1)]; 190 } 191 192 #ifdef PIPE_PARANOIA 193 static bool sanity(const struct iov_iter *i) 194 { 195 struct pipe_inode_info *pipe = i->pipe; 196 unsigned int p_head = pipe->head; 197 unsigned int p_tail = pipe->tail; 198 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 199 unsigned int i_head = i->head; 200 unsigned int idx; 201 202 if (i->iov_offset) { 203 struct pipe_buffer *p; 204 if (unlikely(p_occupancy == 0)) 205 goto Bad; // pipe must be non-empty 206 if (unlikely(i_head != p_head - 1)) 207 goto Bad; // must be at the last buffer... 208 209 p = pipe_buf(pipe, i_head); 210 if (unlikely(p->offset + p->len != i->iov_offset)) 211 goto Bad; // ... at the end of segment 212 } else { 213 if (i_head != p_head) 214 goto Bad; // must be right after the last buffer 215 } 216 return true; 217 Bad: 218 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); 219 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 220 p_head, p_tail, pipe->ring_size); 221 for (idx = 0; idx < pipe->ring_size; idx++) 222 printk(KERN_ERR "[%p %p %d %d]\n", 223 pipe->bufs[idx].ops, 224 pipe->bufs[idx].page, 225 pipe->bufs[idx].offset, 226 pipe->bufs[idx].len); 227 WARN_ON(1); 228 return false; 229 } 230 #else 231 #define sanity(i) true 232 #endif 233 234 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) 235 { 236 struct page *page = alloc_page(GFP_USER); 237 if (page) { 238 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 239 *buf = (struct pipe_buffer) { 240 .ops = &default_pipe_buf_ops, 241 .page = page, 242 .offset = 0, 243 .len = size 244 }; 245 } 246 return page; 247 } 248 249 static void push_page(struct pipe_inode_info *pipe, struct page *page, 250 unsigned int offset, unsigned int size) 251 { 252 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 253 *buf = (struct pipe_buffer) { 254 .ops = &page_cache_pipe_buf_ops, 255 .page = page, 256 .offset = offset, 257 .len = size 258 }; 259 get_page(page); 260 } 261 262 static inline bool allocated(struct pipe_buffer *buf) 263 { 264 return buf->ops == &default_pipe_buf_ops; 265 } 266 267 static struct page *append_pipe(struct iov_iter *i, size_t size, 268 unsigned int *off) 269 { 270 struct pipe_inode_info *pipe = i->pipe; 271 size_t offset = i->iov_offset; 272 struct pipe_buffer *buf; 273 struct page *page; 274 275 if (offset && offset < PAGE_SIZE) { 276 // some space in the last buffer; can we add to it? 277 buf = pipe_buf(pipe, pipe->head - 1); 278 if (allocated(buf)) { 279 size = min_t(size_t, size, PAGE_SIZE - offset); 280 buf->len += size; 281 i->iov_offset += size; 282 i->count -= size; 283 *off = offset; 284 return buf->page; 285 } 286 } 287 // OK, we need a new buffer 288 *off = 0; 289 size = min_t(size_t, size, PAGE_SIZE); 290 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 291 return NULL; 292 page = push_anon(pipe, size); 293 if (!page) 294 return NULL; 295 i->head = pipe->head - 1; 296 i->iov_offset = size; 297 i->count -= size; 298 return page; 299 } 300 301 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 302 struct iov_iter *i) 303 { 304 struct pipe_inode_info *pipe = i->pipe; 305 unsigned int head = pipe->head; 306 307 if (unlikely(bytes > i->count)) 308 bytes = i->count; 309 310 if (unlikely(!bytes)) 311 return 0; 312 313 if (!sanity(i)) 314 return 0; 315 316 if (offset && i->iov_offset == offset) { // could we merge it? 317 struct pipe_buffer *buf = pipe_buf(pipe, head - 1); 318 if (buf->page == page) { 319 buf->len += bytes; 320 i->iov_offset += bytes; 321 i->count -= bytes; 322 return bytes; 323 } 324 } 325 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 326 return 0; 327 328 push_page(pipe, page, offset, bytes); 329 i->iov_offset = offset + bytes; 330 i->head = head; 331 i->count -= bytes; 332 return bytes; 333 } 334 335 /* 336 * fault_in_iov_iter_readable - fault in iov iterator for reading 337 * @i: iterator 338 * @size: maximum length 339 * 340 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 341 * @size. For each iovec, fault in each page that constitutes the iovec. 342 * 343 * Returns the number of bytes not faulted in (like copy_to_user() and 344 * copy_from_user()). 345 * 346 * Always returns 0 for non-userspace iterators. 347 */ 348 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 349 { 350 if (iter_is_ubuf(i)) { 351 size_t n = min(size, iov_iter_count(i)); 352 n -= fault_in_readable(i->ubuf + i->iov_offset, n); 353 return size - n; 354 } else if (iter_is_iovec(i)) { 355 size_t count = min(size, iov_iter_count(i)); 356 const struct iovec *p; 357 size_t skip; 358 359 size -= count; 360 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 361 size_t len = min(count, p->iov_len - skip); 362 size_t ret; 363 364 if (unlikely(!len)) 365 continue; 366 ret = fault_in_readable(p->iov_base + skip, len); 367 count -= len - ret; 368 if (ret) 369 break; 370 } 371 return count + size; 372 } 373 return 0; 374 } 375 EXPORT_SYMBOL(fault_in_iov_iter_readable); 376 377 /* 378 * fault_in_iov_iter_writeable - fault in iov iterator for writing 379 * @i: iterator 380 * @size: maximum length 381 * 382 * Faults in the iterator using get_user_pages(), i.e., without triggering 383 * hardware page faults. This is primarily useful when we already know that 384 * some or all of the pages in @i aren't in memory. 385 * 386 * Returns the number of bytes not faulted in, like copy_to_user() and 387 * copy_from_user(). 388 * 389 * Always returns 0 for non-user-space iterators. 390 */ 391 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 392 { 393 if (iter_is_ubuf(i)) { 394 size_t n = min(size, iov_iter_count(i)); 395 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); 396 return size - n; 397 } else if (iter_is_iovec(i)) { 398 size_t count = min(size, iov_iter_count(i)); 399 const struct iovec *p; 400 size_t skip; 401 402 size -= count; 403 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 404 size_t len = min(count, p->iov_len - skip); 405 size_t ret; 406 407 if (unlikely(!len)) 408 continue; 409 ret = fault_in_safe_writeable(p->iov_base + skip, len); 410 count -= len - ret; 411 if (ret) 412 break; 413 } 414 return count + size; 415 } 416 return 0; 417 } 418 EXPORT_SYMBOL(fault_in_iov_iter_writeable); 419 420 void iov_iter_init(struct iov_iter *i, unsigned int direction, 421 const struct iovec *iov, unsigned long nr_segs, 422 size_t count) 423 { 424 WARN_ON(direction & ~(READ | WRITE)); 425 *i = (struct iov_iter) { 426 .iter_type = ITER_IOVEC, 427 .nofault = false, 428 .user_backed = true, 429 .data_source = direction, 430 .iov = iov, 431 .nr_segs = nr_segs, 432 .iov_offset = 0, 433 .count = count 434 }; 435 } 436 EXPORT_SYMBOL(iov_iter_init); 437 438 static inline void data_start(const struct iov_iter *i, 439 unsigned int *iter_headp, size_t *offp) 440 { 441 unsigned int iter_head = i->head; 442 size_t off = i->iov_offset; 443 444 if (off && (!allocated(pipe_buf(i->pipe, iter_head)) || 445 off == PAGE_SIZE)) { 446 iter_head++; 447 off = 0; 448 } 449 *iter_headp = iter_head; 450 *offp = off; 451 } 452 453 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 454 struct iov_iter *i) 455 { 456 unsigned int off, chunk; 457 458 if (unlikely(bytes > i->count)) 459 bytes = i->count; 460 if (unlikely(!bytes)) 461 return 0; 462 463 if (!sanity(i)) 464 return 0; 465 466 for (size_t n = bytes; n; n -= chunk) { 467 struct page *page = append_pipe(i, n, &off); 468 chunk = min_t(size_t, n, PAGE_SIZE - off); 469 if (!page) 470 return bytes - n; 471 memcpy_to_page(page, off, addr, chunk); 472 addr += chunk; 473 } 474 return bytes; 475 } 476 477 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 478 __wsum sum, size_t off) 479 { 480 __wsum next = csum_partial_copy_nocheck(from, to, len); 481 return csum_block_add(sum, next, off); 482 } 483 484 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 485 struct iov_iter *i, __wsum *sump) 486 { 487 __wsum sum = *sump; 488 size_t off = 0; 489 unsigned int chunk, r; 490 491 if (unlikely(bytes > i->count)) 492 bytes = i->count; 493 if (unlikely(!bytes)) 494 return 0; 495 496 if (!sanity(i)) 497 return 0; 498 499 while (bytes) { 500 struct page *page = append_pipe(i, bytes, &r); 501 char *p; 502 503 if (!page) 504 break; 505 chunk = min_t(size_t, bytes, PAGE_SIZE - r); 506 p = kmap_local_page(page); 507 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); 508 kunmap_local(p); 509 off += chunk; 510 bytes -= chunk; 511 } 512 *sump = sum; 513 return off; 514 } 515 516 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 517 { 518 if (unlikely(iov_iter_is_pipe(i))) 519 return copy_pipe_to_iter(addr, bytes, i); 520 if (user_backed_iter(i)) 521 might_fault(); 522 iterate_and_advance(i, bytes, base, len, off, 523 copyout(base, addr + off, len), 524 memcpy(base, addr + off, len) 525 ) 526 527 return bytes; 528 } 529 EXPORT_SYMBOL(_copy_to_iter); 530 531 #ifdef CONFIG_ARCH_HAS_COPY_MC 532 static int copyout_mc(void __user *to, const void *from, size_t n) 533 { 534 if (access_ok(to, n)) { 535 instrument_copy_to_user(to, from, n); 536 n = copy_mc_to_user((__force void *) to, from, n); 537 } 538 return n; 539 } 540 541 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 542 struct iov_iter *i) 543 { 544 size_t xfer = 0; 545 unsigned int off, chunk; 546 547 if (unlikely(bytes > i->count)) 548 bytes = i->count; 549 if (unlikely(!bytes)) 550 return 0; 551 552 if (!sanity(i)) 553 return 0; 554 555 while (bytes) { 556 struct page *page = append_pipe(i, bytes, &off); 557 unsigned long rem; 558 char *p; 559 560 if (!page) 561 break; 562 chunk = min_t(size_t, bytes, PAGE_SIZE - off); 563 p = kmap_local_page(page); 564 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); 565 chunk -= rem; 566 kunmap_local(p); 567 xfer += chunk; 568 bytes -= chunk; 569 if (rem) { 570 iov_iter_revert(i, rem); 571 break; 572 } 573 } 574 return xfer; 575 } 576 577 /** 578 * _copy_mc_to_iter - copy to iter with source memory error exception handling 579 * @addr: source kernel address 580 * @bytes: total transfer length 581 * @i: destination iterator 582 * 583 * The pmem driver deploys this for the dax operation 584 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 585 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 586 * successfully copied. 587 * 588 * The main differences between this and typical _copy_to_iter(). 589 * 590 * * Typical tail/residue handling after a fault retries the copy 591 * byte-by-byte until the fault happens again. Re-triggering machine 592 * checks is potentially fatal so the implementation uses source 593 * alignment and poison alignment assumptions to avoid re-triggering 594 * hardware exceptions. 595 * 596 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 597 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 598 * a short copy. 599 * 600 * Return: number of bytes copied (may be %0) 601 */ 602 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 603 { 604 if (unlikely(iov_iter_is_pipe(i))) 605 return copy_mc_pipe_to_iter(addr, bytes, i); 606 if (user_backed_iter(i)) 607 might_fault(); 608 __iterate_and_advance(i, bytes, base, len, off, 609 copyout_mc(base, addr + off, len), 610 copy_mc_to_kernel(base, addr + off, len) 611 ) 612 613 return bytes; 614 } 615 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 616 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 617 618 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 619 { 620 if (unlikely(iov_iter_is_pipe(i))) { 621 WARN_ON(1); 622 return 0; 623 } 624 if (user_backed_iter(i)) 625 might_fault(); 626 iterate_and_advance(i, bytes, base, len, off, 627 copyin(addr + off, base, len), 628 memcpy(addr + off, base, len) 629 ) 630 631 return bytes; 632 } 633 EXPORT_SYMBOL(_copy_from_iter); 634 635 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 636 { 637 if (unlikely(iov_iter_is_pipe(i))) { 638 WARN_ON(1); 639 return 0; 640 } 641 iterate_and_advance(i, bytes, base, len, off, 642 __copy_from_user_inatomic_nocache(addr + off, base, len), 643 memcpy(addr + off, base, len) 644 ) 645 646 return bytes; 647 } 648 EXPORT_SYMBOL(_copy_from_iter_nocache); 649 650 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 651 /** 652 * _copy_from_iter_flushcache - write destination through cpu cache 653 * @addr: destination kernel address 654 * @bytes: total transfer length 655 * @i: source iterator 656 * 657 * The pmem driver arranges for filesystem-dax to use this facility via 658 * dax_copy_from_iter() for ensuring that writes to persistent memory 659 * are flushed through the CPU cache. It is differentiated from 660 * _copy_from_iter_nocache() in that guarantees all data is flushed for 661 * all iterator types. The _copy_from_iter_nocache() only attempts to 662 * bypass the cache for the ITER_IOVEC case, and on some archs may use 663 * instructions that strand dirty-data in the cache. 664 * 665 * Return: number of bytes copied (may be %0) 666 */ 667 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 668 { 669 if (unlikely(iov_iter_is_pipe(i))) { 670 WARN_ON(1); 671 return 0; 672 } 673 iterate_and_advance(i, bytes, base, len, off, 674 __copy_from_user_flushcache(addr + off, base, len), 675 memcpy_flushcache(addr + off, base, len) 676 ) 677 678 return bytes; 679 } 680 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 681 #endif 682 683 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 684 { 685 struct page *head; 686 size_t v = n + offset; 687 688 /* 689 * The general case needs to access the page order in order 690 * to compute the page size. 691 * However, we mostly deal with order-0 pages and thus can 692 * avoid a possible cache line miss for requests that fit all 693 * page orders. 694 */ 695 if (n <= v && v <= PAGE_SIZE) 696 return true; 697 698 head = compound_head(page); 699 v += (page - head) << PAGE_SHIFT; 700 701 if (likely(n <= v && v <= (page_size(head)))) 702 return true; 703 WARN_ON(1); 704 return false; 705 } 706 707 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 708 struct iov_iter *i) 709 { 710 if (unlikely(iov_iter_is_pipe(i))) { 711 return copy_page_to_iter_pipe(page, offset, bytes, i); 712 } else { 713 void *kaddr = kmap_local_page(page); 714 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); 715 kunmap_local(kaddr); 716 return wanted; 717 } 718 } 719 720 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 721 struct iov_iter *i) 722 { 723 size_t res = 0; 724 if (unlikely(!page_copy_sane(page, offset, bytes))) 725 return 0; 726 page += offset / PAGE_SIZE; // first subpage 727 offset %= PAGE_SIZE; 728 while (1) { 729 size_t n = __copy_page_to_iter(page, offset, 730 min(bytes, (size_t)PAGE_SIZE - offset), i); 731 res += n; 732 bytes -= n; 733 if (!bytes || !n) 734 break; 735 offset += n; 736 if (offset == PAGE_SIZE) { 737 page++; 738 offset = 0; 739 } 740 } 741 return res; 742 } 743 EXPORT_SYMBOL(copy_page_to_iter); 744 745 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 746 struct iov_iter *i) 747 { 748 if (page_copy_sane(page, offset, bytes)) { 749 void *kaddr = kmap_local_page(page); 750 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); 751 kunmap_local(kaddr); 752 return wanted; 753 } 754 return 0; 755 } 756 EXPORT_SYMBOL(copy_page_from_iter); 757 758 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 759 { 760 unsigned int chunk, off; 761 762 if (unlikely(bytes > i->count)) 763 bytes = i->count; 764 if (unlikely(!bytes)) 765 return 0; 766 767 if (!sanity(i)) 768 return 0; 769 770 for (size_t n = bytes; n; n -= chunk) { 771 struct page *page = append_pipe(i, n, &off); 772 char *p; 773 774 if (!page) 775 return bytes - n; 776 chunk = min_t(size_t, n, PAGE_SIZE - off); 777 p = kmap_local_page(page); 778 memset(p + off, 0, chunk); 779 kunmap_local(p); 780 } 781 return bytes; 782 } 783 784 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 785 { 786 if (unlikely(iov_iter_is_pipe(i))) 787 return pipe_zero(bytes, i); 788 iterate_and_advance(i, bytes, base, len, count, 789 clear_user(base, len), 790 memset(base, 0, len) 791 ) 792 793 return bytes; 794 } 795 EXPORT_SYMBOL(iov_iter_zero); 796 797 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, 798 struct iov_iter *i) 799 { 800 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 801 if (unlikely(!page_copy_sane(page, offset, bytes))) { 802 kunmap_atomic(kaddr); 803 return 0; 804 } 805 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 806 kunmap_atomic(kaddr); 807 WARN_ON(1); 808 return 0; 809 } 810 iterate_and_advance(i, bytes, base, len, off, 811 copyin(p + off, base, len), 812 memcpy(p + off, base, len) 813 ) 814 kunmap_atomic(kaddr); 815 return bytes; 816 } 817 EXPORT_SYMBOL(copy_page_from_iter_atomic); 818 819 static inline void pipe_truncate(struct iov_iter *i) 820 { 821 struct pipe_inode_info *pipe = i->pipe; 822 unsigned int p_tail = pipe->tail; 823 unsigned int p_head = pipe->head; 824 unsigned int p_mask = pipe->ring_size - 1; 825 826 if (!pipe_empty(p_head, p_tail)) { 827 struct pipe_buffer *buf; 828 unsigned int i_head = i->head; 829 size_t off = i->iov_offset; 830 831 if (off) { 832 buf = &pipe->bufs[i_head & p_mask]; 833 buf->len = off - buf->offset; 834 i_head++; 835 } 836 while (p_head != i_head) { 837 p_head--; 838 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); 839 } 840 841 pipe->head = p_head; 842 } 843 } 844 845 static void pipe_advance(struct iov_iter *i, size_t size) 846 { 847 struct pipe_inode_info *pipe = i->pipe; 848 unsigned int off = i->iov_offset; 849 850 if (!off && !size) { 851 pipe_discard_from(pipe, i->start_head); // discard everything 852 return; 853 } 854 i->count -= size; 855 while (1) { 856 struct pipe_buffer *buf = pipe_buf(pipe, i->head); 857 if (off) /* make it relative to the beginning of buffer */ 858 size += off - buf->offset; 859 if (size <= buf->len) { 860 buf->len = size; 861 i->iov_offset = buf->offset + size; 862 break; 863 } 864 size -= buf->len; 865 i->head++; 866 off = 0; 867 } 868 pipe_discard_from(pipe, i->head + 1); // discard everything past this one 869 } 870 871 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 872 { 873 const struct bio_vec *bvec, *end; 874 875 if (!i->count) 876 return; 877 i->count -= size; 878 879 size += i->iov_offset; 880 881 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { 882 if (likely(size < bvec->bv_len)) 883 break; 884 size -= bvec->bv_len; 885 } 886 i->iov_offset = size; 887 i->nr_segs -= bvec - i->bvec; 888 i->bvec = bvec; 889 } 890 891 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 892 { 893 const struct iovec *iov, *end; 894 895 if (!i->count) 896 return; 897 i->count -= size; 898 899 size += i->iov_offset; // from beginning of current segment 900 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { 901 if (likely(size < iov->iov_len)) 902 break; 903 size -= iov->iov_len; 904 } 905 i->iov_offset = size; 906 i->nr_segs -= iov - i->iov; 907 i->iov = iov; 908 } 909 910 void iov_iter_advance(struct iov_iter *i, size_t size) 911 { 912 if (unlikely(i->count < size)) 913 size = i->count; 914 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { 915 i->iov_offset += size; 916 i->count -= size; 917 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 918 /* iovec and kvec have identical layouts */ 919 iov_iter_iovec_advance(i, size); 920 } else if (iov_iter_is_bvec(i)) { 921 iov_iter_bvec_advance(i, size); 922 } else if (iov_iter_is_pipe(i)) { 923 pipe_advance(i, size); 924 } else if (iov_iter_is_discard(i)) { 925 i->count -= size; 926 } 927 } 928 EXPORT_SYMBOL(iov_iter_advance); 929 930 void iov_iter_revert(struct iov_iter *i, size_t unroll) 931 { 932 if (!unroll) 933 return; 934 if (WARN_ON(unroll > MAX_RW_COUNT)) 935 return; 936 i->count += unroll; 937 if (unlikely(iov_iter_is_pipe(i))) { 938 struct pipe_inode_info *pipe = i->pipe; 939 unsigned int p_mask = pipe->ring_size - 1; 940 unsigned int i_head = i->head; 941 size_t off = i->iov_offset; 942 while (1) { 943 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; 944 size_t n = off - b->offset; 945 if (unroll < n) { 946 off -= unroll; 947 break; 948 } 949 unroll -= n; 950 if (!unroll && i_head == i->start_head) { 951 off = 0; 952 break; 953 } 954 i_head--; 955 b = &pipe->bufs[i_head & p_mask]; 956 off = b->offset + b->len; 957 } 958 i->iov_offset = off; 959 i->head = i_head; 960 pipe_truncate(i); 961 return; 962 } 963 if (unlikely(iov_iter_is_discard(i))) 964 return; 965 if (unroll <= i->iov_offset) { 966 i->iov_offset -= unroll; 967 return; 968 } 969 unroll -= i->iov_offset; 970 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { 971 BUG(); /* We should never go beyond the start of the specified 972 * range since we might then be straying into pages that 973 * aren't pinned. 974 */ 975 } else if (iov_iter_is_bvec(i)) { 976 const struct bio_vec *bvec = i->bvec; 977 while (1) { 978 size_t n = (--bvec)->bv_len; 979 i->nr_segs++; 980 if (unroll <= n) { 981 i->bvec = bvec; 982 i->iov_offset = n - unroll; 983 return; 984 } 985 unroll -= n; 986 } 987 } else { /* same logics for iovec and kvec */ 988 const struct iovec *iov = i->iov; 989 while (1) { 990 size_t n = (--iov)->iov_len; 991 i->nr_segs++; 992 if (unroll <= n) { 993 i->iov = iov; 994 i->iov_offset = n - unroll; 995 return; 996 } 997 unroll -= n; 998 } 999 } 1000 } 1001 EXPORT_SYMBOL(iov_iter_revert); 1002 1003 /* 1004 * Return the count of just the current iov_iter segment. 1005 */ 1006 size_t iov_iter_single_seg_count(const struct iov_iter *i) 1007 { 1008 if (i->nr_segs > 1) { 1009 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1010 return min(i->count, i->iov->iov_len - i->iov_offset); 1011 if (iov_iter_is_bvec(i)) 1012 return min(i->count, i->bvec->bv_len - i->iov_offset); 1013 } 1014 return i->count; 1015 } 1016 EXPORT_SYMBOL(iov_iter_single_seg_count); 1017 1018 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 1019 const struct kvec *kvec, unsigned long nr_segs, 1020 size_t count) 1021 { 1022 WARN_ON(direction & ~(READ | WRITE)); 1023 *i = (struct iov_iter){ 1024 .iter_type = ITER_KVEC, 1025 .data_source = direction, 1026 .kvec = kvec, 1027 .nr_segs = nr_segs, 1028 .iov_offset = 0, 1029 .count = count 1030 }; 1031 } 1032 EXPORT_SYMBOL(iov_iter_kvec); 1033 1034 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1035 const struct bio_vec *bvec, unsigned long nr_segs, 1036 size_t count) 1037 { 1038 WARN_ON(direction & ~(READ | WRITE)); 1039 *i = (struct iov_iter){ 1040 .iter_type = ITER_BVEC, 1041 .data_source = direction, 1042 .bvec = bvec, 1043 .nr_segs = nr_segs, 1044 .iov_offset = 0, 1045 .count = count 1046 }; 1047 } 1048 EXPORT_SYMBOL(iov_iter_bvec); 1049 1050 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1051 struct pipe_inode_info *pipe, 1052 size_t count) 1053 { 1054 BUG_ON(direction != READ); 1055 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1056 *i = (struct iov_iter){ 1057 .iter_type = ITER_PIPE, 1058 .data_source = false, 1059 .pipe = pipe, 1060 .head = pipe->head, 1061 .start_head = pipe->head, 1062 .iov_offset = 0, 1063 .count = count 1064 }; 1065 } 1066 EXPORT_SYMBOL(iov_iter_pipe); 1067 1068 /** 1069 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1070 * @i: The iterator to initialise. 1071 * @direction: The direction of the transfer. 1072 * @xarray: The xarray to access. 1073 * @start: The start file position. 1074 * @count: The size of the I/O buffer in bytes. 1075 * 1076 * Set up an I/O iterator to either draw data out of the pages attached to an 1077 * inode or to inject data into those pages. The pages *must* be prevented 1078 * from evaporation, either by taking a ref on them or locking them by the 1079 * caller. 1080 */ 1081 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1082 struct xarray *xarray, loff_t start, size_t count) 1083 { 1084 BUG_ON(direction & ~1); 1085 *i = (struct iov_iter) { 1086 .iter_type = ITER_XARRAY, 1087 .data_source = direction, 1088 .xarray = xarray, 1089 .xarray_start = start, 1090 .count = count, 1091 .iov_offset = 0 1092 }; 1093 } 1094 EXPORT_SYMBOL(iov_iter_xarray); 1095 1096 /** 1097 * iov_iter_discard - Initialise an I/O iterator that discards data 1098 * @i: The iterator to initialise. 1099 * @direction: The direction of the transfer. 1100 * @count: The size of the I/O buffer in bytes. 1101 * 1102 * Set up an I/O iterator that just discards everything that's written to it. 1103 * It's only available as a READ iterator. 1104 */ 1105 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1106 { 1107 BUG_ON(direction != READ); 1108 *i = (struct iov_iter){ 1109 .iter_type = ITER_DISCARD, 1110 .data_source = false, 1111 .count = count, 1112 .iov_offset = 0 1113 }; 1114 } 1115 EXPORT_SYMBOL(iov_iter_discard); 1116 1117 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, 1118 unsigned len_mask) 1119 { 1120 size_t size = i->count; 1121 size_t skip = i->iov_offset; 1122 unsigned k; 1123 1124 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1125 size_t len = i->iov[k].iov_len - skip; 1126 1127 if (len > size) 1128 len = size; 1129 if (len & len_mask) 1130 return false; 1131 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) 1132 return false; 1133 1134 size -= len; 1135 if (!size) 1136 break; 1137 } 1138 return true; 1139 } 1140 1141 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, 1142 unsigned len_mask) 1143 { 1144 size_t size = i->count; 1145 unsigned skip = i->iov_offset; 1146 unsigned k; 1147 1148 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1149 size_t len = i->bvec[k].bv_len - skip; 1150 1151 if (len > size) 1152 len = size; 1153 if (len & len_mask) 1154 return false; 1155 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) 1156 return false; 1157 1158 size -= len; 1159 if (!size) 1160 break; 1161 } 1162 return true; 1163 } 1164 1165 /** 1166 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments 1167 * are aligned to the parameters. 1168 * 1169 * @i: &struct iov_iter to restore 1170 * @addr_mask: bit mask to check against the iov element's addresses 1171 * @len_mask: bit mask to check against the iov element's lengths 1172 * 1173 * Return: false if any addresses or lengths intersect with the provided masks 1174 */ 1175 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 1176 unsigned len_mask) 1177 { 1178 if (likely(iter_is_ubuf(i))) { 1179 if (i->count & len_mask) 1180 return false; 1181 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) 1182 return false; 1183 return true; 1184 } 1185 1186 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1187 return iov_iter_aligned_iovec(i, addr_mask, len_mask); 1188 1189 if (iov_iter_is_bvec(i)) 1190 return iov_iter_aligned_bvec(i, addr_mask, len_mask); 1191 1192 if (iov_iter_is_pipe(i)) { 1193 unsigned int p_mask = i->pipe->ring_size - 1; 1194 size_t size = i->count; 1195 1196 if (size & len_mask) 1197 return false; 1198 if (size && allocated(&i->pipe->bufs[i->head & p_mask])) { 1199 if (i->iov_offset & addr_mask) 1200 return false; 1201 } 1202 1203 return true; 1204 } 1205 1206 if (iov_iter_is_xarray(i)) { 1207 if (i->count & len_mask) 1208 return false; 1209 if ((i->xarray_start + i->iov_offset) & addr_mask) 1210 return false; 1211 } 1212 1213 return true; 1214 } 1215 EXPORT_SYMBOL_GPL(iov_iter_is_aligned); 1216 1217 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 1218 { 1219 unsigned long res = 0; 1220 size_t size = i->count; 1221 size_t skip = i->iov_offset; 1222 unsigned k; 1223 1224 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1225 size_t len = i->iov[k].iov_len - skip; 1226 if (len) { 1227 res |= (unsigned long)i->iov[k].iov_base + skip; 1228 if (len > size) 1229 len = size; 1230 res |= len; 1231 size -= len; 1232 if (!size) 1233 break; 1234 } 1235 } 1236 return res; 1237 } 1238 1239 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 1240 { 1241 unsigned res = 0; 1242 size_t size = i->count; 1243 unsigned skip = i->iov_offset; 1244 unsigned k; 1245 1246 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1247 size_t len = i->bvec[k].bv_len - skip; 1248 res |= (unsigned long)i->bvec[k].bv_offset + skip; 1249 if (len > size) 1250 len = size; 1251 res |= len; 1252 size -= len; 1253 if (!size) 1254 break; 1255 } 1256 return res; 1257 } 1258 1259 unsigned long iov_iter_alignment(const struct iov_iter *i) 1260 { 1261 if (likely(iter_is_ubuf(i))) { 1262 size_t size = i->count; 1263 if (size) 1264 return ((unsigned long)i->ubuf + i->iov_offset) | size; 1265 return 0; 1266 } 1267 1268 /* iovec and kvec have identical layouts */ 1269 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1270 return iov_iter_alignment_iovec(i); 1271 1272 if (iov_iter_is_bvec(i)) 1273 return iov_iter_alignment_bvec(i); 1274 1275 if (iov_iter_is_pipe(i)) { 1276 size_t size = i->count; 1277 1278 if (size && i->iov_offset && allocated(pipe_buf(i->pipe, i->head))) 1279 return size | i->iov_offset; 1280 return size; 1281 } 1282 1283 if (iov_iter_is_xarray(i)) 1284 return (i->xarray_start + i->iov_offset) | i->count; 1285 1286 return 0; 1287 } 1288 EXPORT_SYMBOL(iov_iter_alignment); 1289 1290 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1291 { 1292 unsigned long res = 0; 1293 unsigned long v = 0; 1294 size_t size = i->count; 1295 unsigned k; 1296 1297 if (iter_is_ubuf(i)) 1298 return 0; 1299 1300 if (WARN_ON(!iter_is_iovec(i))) 1301 return ~0U; 1302 1303 for (k = 0; k < i->nr_segs; k++) { 1304 if (i->iov[k].iov_len) { 1305 unsigned long base = (unsigned long)i->iov[k].iov_base; 1306 if (v) // if not the first one 1307 res |= base | v; // this start | previous end 1308 v = base + i->iov[k].iov_len; 1309 if (size <= i->iov[k].iov_len) 1310 break; 1311 size -= i->iov[k].iov_len; 1312 } 1313 } 1314 return res; 1315 } 1316 EXPORT_SYMBOL(iov_iter_gap_alignment); 1317 1318 static inline ssize_t __pipe_get_pages(struct iov_iter *i, 1319 size_t maxsize, 1320 struct page **pages, 1321 size_t off) 1322 { 1323 struct pipe_inode_info *pipe = i->pipe; 1324 ssize_t left = maxsize; 1325 1326 if (off) { 1327 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head - 1); 1328 1329 get_page(*pages++ = buf->page); 1330 left -= PAGE_SIZE - off; 1331 if (left <= 0) { 1332 buf->len += maxsize; 1333 return maxsize; 1334 } 1335 buf->len = PAGE_SIZE; 1336 } 1337 while (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { 1338 struct page *page = push_anon(pipe, 1339 min_t(ssize_t, left, PAGE_SIZE)); 1340 if (!page) 1341 break; 1342 get_page(*pages++ = page); 1343 left -= PAGE_SIZE; 1344 if (left <= 0) 1345 return maxsize; 1346 } 1347 return maxsize - left ? : -EFAULT; 1348 } 1349 1350 static ssize_t pipe_get_pages(struct iov_iter *i, 1351 struct page **pages, size_t maxsize, unsigned maxpages, 1352 size_t *start) 1353 { 1354 unsigned int iter_head, npages; 1355 size_t capacity; 1356 1357 if (!sanity(i)) 1358 return -EFAULT; 1359 1360 data_start(i, &iter_head, start); 1361 /* Amount of free space: some of this one + all after this one */ 1362 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1363 capacity = min(npages, maxpages) * PAGE_SIZE - *start; 1364 1365 return __pipe_get_pages(i, min(maxsize, capacity), pages, *start); 1366 } 1367 1368 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1369 pgoff_t index, unsigned int nr_pages) 1370 { 1371 XA_STATE(xas, xa, index); 1372 struct page *page; 1373 unsigned int ret = 0; 1374 1375 rcu_read_lock(); 1376 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1377 if (xas_retry(&xas, page)) 1378 continue; 1379 1380 /* Has the page moved or been split? */ 1381 if (unlikely(page != xas_reload(&xas))) { 1382 xas_reset(&xas); 1383 continue; 1384 } 1385 1386 pages[ret] = find_subpage(page, xas.xa_index); 1387 get_page(pages[ret]); 1388 if (++ret == nr_pages) 1389 break; 1390 } 1391 rcu_read_unlock(); 1392 return ret; 1393 } 1394 1395 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1396 struct page **pages, size_t maxsize, 1397 unsigned maxpages, size_t *_start_offset) 1398 { 1399 unsigned nr, offset; 1400 pgoff_t index, count; 1401 size_t size = maxsize; 1402 loff_t pos; 1403 1404 if (!size || !maxpages) 1405 return 0; 1406 1407 pos = i->xarray_start + i->iov_offset; 1408 index = pos >> PAGE_SHIFT; 1409 offset = pos & ~PAGE_MASK; 1410 *_start_offset = offset; 1411 1412 count = 1; 1413 if (size > PAGE_SIZE - offset) { 1414 size -= PAGE_SIZE - offset; 1415 count += size >> PAGE_SHIFT; 1416 size &= ~PAGE_MASK; 1417 if (size) 1418 count++; 1419 } 1420 1421 if (count > maxpages) 1422 count = maxpages; 1423 1424 nr = iter_xarray_populate_pages(pages, i->xarray, index, count); 1425 if (nr == 0) 1426 return 0; 1427 1428 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1429 } 1430 1431 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */ 1432 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) 1433 { 1434 size_t skip; 1435 long k; 1436 1437 if (iter_is_ubuf(i)) 1438 return (unsigned long)i->ubuf + i->iov_offset; 1439 1440 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1441 size_t len = i->iov[k].iov_len - skip; 1442 1443 if (unlikely(!len)) 1444 continue; 1445 if (*size > len) 1446 *size = len; 1447 return (unsigned long)i->iov[k].iov_base + skip; 1448 } 1449 BUG(); // if it had been empty, we wouldn't get called 1450 } 1451 1452 /* must be done on non-empty ITER_BVEC one */ 1453 static struct page *first_bvec_segment(const struct iov_iter *i, 1454 size_t *size, size_t *start) 1455 { 1456 struct page *page; 1457 size_t skip = i->iov_offset, len; 1458 1459 len = i->bvec->bv_len - skip; 1460 if (*size > len) 1461 *size = len; 1462 skip += i->bvec->bv_offset; 1463 page = i->bvec->bv_page + skip / PAGE_SIZE; 1464 *start = skip % PAGE_SIZE; 1465 return page; 1466 } 1467 1468 ssize_t iov_iter_get_pages(struct iov_iter *i, 1469 struct page **pages, size_t maxsize, unsigned maxpages, 1470 size_t *start) 1471 { 1472 int n, res; 1473 1474 if (maxsize > i->count) 1475 maxsize = i->count; 1476 if (!maxsize) 1477 return 0; 1478 if (maxsize > MAX_RW_COUNT) 1479 maxsize = MAX_RW_COUNT; 1480 1481 if (likely(user_backed_iter(i))) { 1482 unsigned int gup_flags = 0; 1483 unsigned long addr; 1484 1485 if (iov_iter_rw(i) != WRITE) 1486 gup_flags |= FOLL_WRITE; 1487 if (i->nofault) 1488 gup_flags |= FOLL_NOFAULT; 1489 1490 addr = first_iovec_segment(i, &maxsize); 1491 *start = addr % PAGE_SIZE; 1492 addr &= PAGE_MASK; 1493 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1494 if (n > maxpages) 1495 n = maxpages; 1496 res = get_user_pages_fast(addr, n, gup_flags, pages); 1497 if (unlikely(res <= 0)) 1498 return res; 1499 return min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1500 } 1501 if (iov_iter_is_bvec(i)) { 1502 struct page *page; 1503 1504 page = first_bvec_segment(i, &maxsize, start); 1505 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1506 if (n > maxpages) 1507 n = maxpages; 1508 for (int k = 0; k < n; k++) 1509 get_page(*pages++ = page++); 1510 return min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1511 } 1512 if (iov_iter_is_pipe(i)) 1513 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1514 if (iov_iter_is_xarray(i)) 1515 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1516 return -EFAULT; 1517 } 1518 EXPORT_SYMBOL(iov_iter_get_pages); 1519 1520 static struct page **get_pages_array(size_t n) 1521 { 1522 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); 1523 } 1524 1525 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 1526 struct page ***pages, size_t maxsize, 1527 size_t *start) 1528 { 1529 struct page **p; 1530 unsigned int iter_head, npages; 1531 ssize_t n; 1532 1533 if (!sanity(i)) 1534 return -EFAULT; 1535 1536 data_start(i, &iter_head, start); 1537 /* Amount of free space: some of this one + all after this one */ 1538 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1539 n = npages * PAGE_SIZE - *start; 1540 if (maxsize > n) 1541 maxsize = n; 1542 else 1543 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1544 p = get_pages_array(npages); 1545 if (!p) 1546 return -ENOMEM; 1547 n = __pipe_get_pages(i, maxsize, p, *start); 1548 if (n > 0) 1549 *pages = p; 1550 else 1551 kvfree(p); 1552 return n; 1553 } 1554 1555 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, 1556 struct page ***pages, size_t maxsize, 1557 size_t *_start_offset) 1558 { 1559 struct page **p; 1560 unsigned nr, offset; 1561 pgoff_t index, count; 1562 size_t size = maxsize; 1563 loff_t pos; 1564 1565 if (!size) 1566 return 0; 1567 1568 pos = i->xarray_start + i->iov_offset; 1569 index = pos >> PAGE_SHIFT; 1570 offset = pos & ~PAGE_MASK; 1571 *_start_offset = offset; 1572 1573 count = 1; 1574 if (size > PAGE_SIZE - offset) { 1575 size -= PAGE_SIZE - offset; 1576 count += size >> PAGE_SHIFT; 1577 size &= ~PAGE_MASK; 1578 if (size) 1579 count++; 1580 } 1581 1582 p = get_pages_array(count); 1583 if (!p) 1584 return -ENOMEM; 1585 *pages = p; 1586 1587 nr = iter_xarray_populate_pages(p, i->xarray, index, count); 1588 if (nr == 0) 1589 return 0; 1590 1591 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1592 } 1593 1594 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1595 struct page ***pages, size_t maxsize, 1596 size_t *start) 1597 { 1598 struct page **p; 1599 int n, res; 1600 1601 if (maxsize > i->count) 1602 maxsize = i->count; 1603 if (!maxsize) 1604 return 0; 1605 if (maxsize > MAX_RW_COUNT) 1606 maxsize = MAX_RW_COUNT; 1607 1608 if (likely(user_backed_iter(i))) { 1609 unsigned int gup_flags = 0; 1610 unsigned long addr; 1611 1612 if (iov_iter_rw(i) != WRITE) 1613 gup_flags |= FOLL_WRITE; 1614 if (i->nofault) 1615 gup_flags |= FOLL_NOFAULT; 1616 1617 addr = first_iovec_segment(i, &maxsize); 1618 *start = addr % PAGE_SIZE; 1619 addr &= PAGE_MASK; 1620 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1621 p = get_pages_array(n); 1622 if (!p) 1623 return -ENOMEM; 1624 res = get_user_pages_fast(addr, n, gup_flags, p); 1625 if (unlikely(res <= 0)) { 1626 kvfree(p); 1627 *pages = NULL; 1628 return res; 1629 } 1630 *pages = p; 1631 return min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1632 } 1633 if (iov_iter_is_bvec(i)) { 1634 struct page *page; 1635 1636 page = first_bvec_segment(i, &maxsize, start); 1637 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1638 *pages = p = get_pages_array(n); 1639 if (!p) 1640 return -ENOMEM; 1641 for (int k = 0; k < n; k++) 1642 get_page(*p++ = page++); 1643 return min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1644 } 1645 if (iov_iter_is_pipe(i)) 1646 return pipe_get_pages_alloc(i, pages, maxsize, start); 1647 if (iov_iter_is_xarray(i)) 1648 return iter_xarray_get_pages_alloc(i, pages, maxsize, start); 1649 return -EFAULT; 1650 } 1651 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1652 1653 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1654 struct iov_iter *i) 1655 { 1656 __wsum sum, next; 1657 sum = *csum; 1658 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1659 WARN_ON(1); 1660 return 0; 1661 } 1662 iterate_and_advance(i, bytes, base, len, off, ({ 1663 next = csum_and_copy_from_user(base, addr + off, len); 1664 sum = csum_block_add(sum, next, off); 1665 next ? 0 : len; 1666 }), ({ 1667 sum = csum_and_memcpy(addr + off, base, len, sum, off); 1668 }) 1669 ) 1670 *csum = sum; 1671 return bytes; 1672 } 1673 EXPORT_SYMBOL(csum_and_copy_from_iter); 1674 1675 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1676 struct iov_iter *i) 1677 { 1678 struct csum_state *csstate = _csstate; 1679 __wsum sum, next; 1680 1681 if (unlikely(iov_iter_is_discard(i))) { 1682 WARN_ON(1); /* for now */ 1683 return 0; 1684 } 1685 1686 sum = csum_shift(csstate->csum, csstate->off); 1687 if (unlikely(iov_iter_is_pipe(i))) 1688 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); 1689 else iterate_and_advance(i, bytes, base, len, off, ({ 1690 next = csum_and_copy_to_user(addr + off, base, len); 1691 sum = csum_block_add(sum, next, off); 1692 next ? 0 : len; 1693 }), ({ 1694 sum = csum_and_memcpy(base, addr + off, len, sum, off); 1695 }) 1696 ) 1697 csstate->csum = csum_shift(sum, csstate->off); 1698 csstate->off += bytes; 1699 return bytes; 1700 } 1701 EXPORT_SYMBOL(csum_and_copy_to_iter); 1702 1703 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1704 struct iov_iter *i) 1705 { 1706 #ifdef CONFIG_CRYPTO_HASH 1707 struct ahash_request *hash = hashp; 1708 struct scatterlist sg; 1709 size_t copied; 1710 1711 copied = copy_to_iter(addr, bytes, i); 1712 sg_init_one(&sg, addr, copied); 1713 ahash_request_set_crypt(hash, &sg, NULL, copied); 1714 crypto_ahash_update(hash); 1715 return copied; 1716 #else 1717 return 0; 1718 #endif 1719 } 1720 EXPORT_SYMBOL(hash_and_copy_to_iter); 1721 1722 static int iov_npages(const struct iov_iter *i, int maxpages) 1723 { 1724 size_t skip = i->iov_offset, size = i->count; 1725 const struct iovec *p; 1726 int npages = 0; 1727 1728 for (p = i->iov; size; skip = 0, p++) { 1729 unsigned offs = offset_in_page(p->iov_base + skip); 1730 size_t len = min(p->iov_len - skip, size); 1731 1732 if (len) { 1733 size -= len; 1734 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1735 if (unlikely(npages > maxpages)) 1736 return maxpages; 1737 } 1738 } 1739 return npages; 1740 } 1741 1742 static int bvec_npages(const struct iov_iter *i, int maxpages) 1743 { 1744 size_t skip = i->iov_offset, size = i->count; 1745 const struct bio_vec *p; 1746 int npages = 0; 1747 1748 for (p = i->bvec; size; skip = 0, p++) { 1749 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1750 size_t len = min(p->bv_len - skip, size); 1751 1752 size -= len; 1753 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1754 if (unlikely(npages > maxpages)) 1755 return maxpages; 1756 } 1757 return npages; 1758 } 1759 1760 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1761 { 1762 if (unlikely(!i->count)) 1763 return 0; 1764 if (likely(iter_is_ubuf(i))) { 1765 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); 1766 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); 1767 return min(npages, maxpages); 1768 } 1769 /* iovec and kvec have identical layouts */ 1770 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1771 return iov_npages(i, maxpages); 1772 if (iov_iter_is_bvec(i)) 1773 return bvec_npages(i, maxpages); 1774 if (iov_iter_is_pipe(i)) { 1775 unsigned int iter_head; 1776 int npages; 1777 size_t off; 1778 1779 if (!sanity(i)) 1780 return 0; 1781 1782 data_start(i, &iter_head, &off); 1783 /* some of this one + all after this one */ 1784 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1785 return min(npages, maxpages); 1786 } 1787 if (iov_iter_is_xarray(i)) { 1788 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1789 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1790 return min(npages, maxpages); 1791 } 1792 return 0; 1793 } 1794 EXPORT_SYMBOL(iov_iter_npages); 1795 1796 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1797 { 1798 *new = *old; 1799 if (unlikely(iov_iter_is_pipe(new))) { 1800 WARN_ON(1); 1801 return NULL; 1802 } 1803 if (iov_iter_is_bvec(new)) 1804 return new->bvec = kmemdup(new->bvec, 1805 new->nr_segs * sizeof(struct bio_vec), 1806 flags); 1807 else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) 1808 /* iovec and kvec have identical layout */ 1809 return new->iov = kmemdup(new->iov, 1810 new->nr_segs * sizeof(struct iovec), 1811 flags); 1812 return NULL; 1813 } 1814 EXPORT_SYMBOL(dup_iter); 1815 1816 static int copy_compat_iovec_from_user(struct iovec *iov, 1817 const struct iovec __user *uvec, unsigned long nr_segs) 1818 { 1819 const struct compat_iovec __user *uiov = 1820 (const struct compat_iovec __user *)uvec; 1821 int ret = -EFAULT, i; 1822 1823 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1824 return -EFAULT; 1825 1826 for (i = 0; i < nr_segs; i++) { 1827 compat_uptr_t buf; 1828 compat_ssize_t len; 1829 1830 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1831 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1832 1833 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1834 if (len < 0) { 1835 ret = -EINVAL; 1836 goto uaccess_end; 1837 } 1838 iov[i].iov_base = compat_ptr(buf); 1839 iov[i].iov_len = len; 1840 } 1841 1842 ret = 0; 1843 uaccess_end: 1844 user_access_end(); 1845 return ret; 1846 } 1847 1848 static int copy_iovec_from_user(struct iovec *iov, 1849 const struct iovec __user *uvec, unsigned long nr_segs) 1850 { 1851 unsigned long seg; 1852 1853 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1854 return -EFAULT; 1855 for (seg = 0; seg < nr_segs; seg++) { 1856 if ((ssize_t)iov[seg].iov_len < 0) 1857 return -EINVAL; 1858 } 1859 1860 return 0; 1861 } 1862 1863 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1864 unsigned long nr_segs, unsigned long fast_segs, 1865 struct iovec *fast_iov, bool compat) 1866 { 1867 struct iovec *iov = fast_iov; 1868 int ret; 1869 1870 /* 1871 * SuS says "The readv() function *may* fail if the iovcnt argument was 1872 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1873 * traditionally returned zero for zero segments, so... 1874 */ 1875 if (nr_segs == 0) 1876 return iov; 1877 if (nr_segs > UIO_MAXIOV) 1878 return ERR_PTR(-EINVAL); 1879 if (nr_segs > fast_segs) { 1880 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1881 if (!iov) 1882 return ERR_PTR(-ENOMEM); 1883 } 1884 1885 if (compat) 1886 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1887 else 1888 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1889 if (ret) { 1890 if (iov != fast_iov) 1891 kfree(iov); 1892 return ERR_PTR(ret); 1893 } 1894 1895 return iov; 1896 } 1897 1898 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1899 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1900 struct iov_iter *i, bool compat) 1901 { 1902 ssize_t total_len = 0; 1903 unsigned long seg; 1904 struct iovec *iov; 1905 1906 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1907 if (IS_ERR(iov)) { 1908 *iovp = NULL; 1909 return PTR_ERR(iov); 1910 } 1911 1912 /* 1913 * According to the Single Unix Specification we should return EINVAL if 1914 * an element length is < 0 when cast to ssize_t or if the total length 1915 * would overflow the ssize_t return value of the system call. 1916 * 1917 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1918 * overflow case. 1919 */ 1920 for (seg = 0; seg < nr_segs; seg++) { 1921 ssize_t len = (ssize_t)iov[seg].iov_len; 1922 1923 if (!access_ok(iov[seg].iov_base, len)) { 1924 if (iov != *iovp) 1925 kfree(iov); 1926 *iovp = NULL; 1927 return -EFAULT; 1928 } 1929 1930 if (len > MAX_RW_COUNT - total_len) { 1931 len = MAX_RW_COUNT - total_len; 1932 iov[seg].iov_len = len; 1933 } 1934 total_len += len; 1935 } 1936 1937 iov_iter_init(i, type, iov, nr_segs, total_len); 1938 if (iov == *iovp) 1939 *iovp = NULL; 1940 else 1941 *iovp = iov; 1942 return total_len; 1943 } 1944 1945 /** 1946 * import_iovec() - Copy an array of &struct iovec from userspace 1947 * into the kernel, check that it is valid, and initialize a new 1948 * &struct iov_iter iterator to access it. 1949 * 1950 * @type: One of %READ or %WRITE. 1951 * @uvec: Pointer to the userspace array. 1952 * @nr_segs: Number of elements in userspace array. 1953 * @fast_segs: Number of elements in @iov. 1954 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1955 * on-stack) kernel array. 1956 * @i: Pointer to iterator that will be initialized on success. 1957 * 1958 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1959 * then this function places %NULL in *@iov on return. Otherwise, a new 1960 * array will be allocated and the result placed in *@iov. This means that 1961 * the caller may call kfree() on *@iov regardless of whether the small 1962 * on-stack array was used or not (and regardless of whether this function 1963 * returns an error or not). 1964 * 1965 * Return: Negative error code on error, bytes imported on success 1966 */ 1967 ssize_t import_iovec(int type, const struct iovec __user *uvec, 1968 unsigned nr_segs, unsigned fast_segs, 1969 struct iovec **iovp, struct iov_iter *i) 1970 { 1971 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1972 in_compat_syscall()); 1973 } 1974 EXPORT_SYMBOL(import_iovec); 1975 1976 int import_single_range(int rw, void __user *buf, size_t len, 1977 struct iovec *iov, struct iov_iter *i) 1978 { 1979 if (len > MAX_RW_COUNT) 1980 len = MAX_RW_COUNT; 1981 if (unlikely(!access_ok(buf, len))) 1982 return -EFAULT; 1983 1984 iov->iov_base = buf; 1985 iov->iov_len = len; 1986 iov_iter_init(i, rw, iov, 1, len); 1987 return 0; 1988 } 1989 EXPORT_SYMBOL(import_single_range); 1990 1991 /** 1992 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 1993 * iov_iter_save_state() was called. 1994 * 1995 * @i: &struct iov_iter to restore 1996 * @state: state to restore from 1997 * 1998 * Used after iov_iter_save_state() to bring restore @i, if operations may 1999 * have advanced it. 2000 * 2001 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 2002 */ 2003 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 2004 { 2005 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 2006 !iov_iter_is_kvec(i) && !iter_is_ubuf(i)) 2007 return; 2008 i->iov_offset = state->iov_offset; 2009 i->count = state->count; 2010 if (iter_is_ubuf(i)) 2011 return; 2012 /* 2013 * For the *vec iters, nr_segs + iov is constant - if we increment 2014 * the vec, then we also decrement the nr_segs count. Hence we don't 2015 * need to track both of these, just one is enough and we can deduct 2016 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 2017 * size, so we can just increment the iov pointer as they are unionzed. 2018 * ITER_BVEC _may_ be the same size on some archs, but on others it is 2019 * not. Be safe and handle it separately. 2020 */ 2021 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 2022 if (iov_iter_is_bvec(i)) 2023 i->bvec -= state->nr_segs - i->nr_segs; 2024 else 2025 i->iov -= state->nr_segs - i->nr_segs; 2026 i->nr_segs = state->nr_segs; 2027 } 2028