1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 /* covers ubuf and kbuf alike */ 20 #define iterate_buf(i, n, base, len, off, __p, STEP) { \ 21 size_t __maybe_unused off = 0; \ 22 len = n; \ 23 base = __p + i->iov_offset; \ 24 len -= (STEP); \ 25 i->iov_offset += len; \ 26 n = len; \ 27 } 28 29 /* covers iovec and kvec alike */ 30 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ 31 size_t off = 0; \ 32 size_t skip = i->iov_offset; \ 33 do { \ 34 len = min(n, __p->iov_len - skip); \ 35 if (likely(len)) { \ 36 base = __p->iov_base + skip; \ 37 len -= (STEP); \ 38 off += len; \ 39 skip += len; \ 40 n -= len; \ 41 if (skip < __p->iov_len) \ 42 break; \ 43 } \ 44 __p++; \ 45 skip = 0; \ 46 } while (n); \ 47 i->iov_offset = skip; \ 48 n = off; \ 49 } 50 51 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ 52 size_t off = 0; \ 53 unsigned skip = i->iov_offset; \ 54 while (n) { \ 55 unsigned offset = p->bv_offset + skip; \ 56 unsigned left; \ 57 void *kaddr = kmap_local_page(p->bv_page + \ 58 offset / PAGE_SIZE); \ 59 base = kaddr + offset % PAGE_SIZE; \ 60 len = min(min(n, (size_t)(p->bv_len - skip)), \ 61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 62 left = (STEP); \ 63 kunmap_local(kaddr); \ 64 len -= left; \ 65 off += len; \ 66 skip += len; \ 67 if (skip == p->bv_len) { \ 68 skip = 0; \ 69 p++; \ 70 } \ 71 n -= len; \ 72 if (left) \ 73 break; \ 74 } \ 75 i->iov_offset = skip; \ 76 n = off; \ 77 } 78 79 #define iterate_xarray(i, n, base, len, __off, STEP) { \ 80 __label__ __out; \ 81 size_t __off = 0; \ 82 struct folio *folio; \ 83 loff_t start = i->xarray_start + i->iov_offset; \ 84 pgoff_t index = start / PAGE_SIZE; \ 85 XA_STATE(xas, i->xarray, index); \ 86 \ 87 len = PAGE_SIZE - offset_in_page(start); \ 88 rcu_read_lock(); \ 89 xas_for_each(&xas, folio, ULONG_MAX) { \ 90 unsigned left; \ 91 size_t offset; \ 92 if (xas_retry(&xas, folio)) \ 93 continue; \ 94 if (WARN_ON(xa_is_value(folio))) \ 95 break; \ 96 if (WARN_ON(folio_test_hugetlb(folio))) \ 97 break; \ 98 offset = offset_in_folio(folio, start + __off); \ 99 while (offset < folio_size(folio)) { \ 100 base = kmap_local_folio(folio, offset); \ 101 len = min(n, len); \ 102 left = (STEP); \ 103 kunmap_local(base); \ 104 len -= left; \ 105 __off += len; \ 106 n -= len; \ 107 if (left || n == 0) \ 108 goto __out; \ 109 offset += len; \ 110 len = PAGE_SIZE; \ 111 } \ 112 } \ 113 __out: \ 114 rcu_read_unlock(); \ 115 i->iov_offset += __off; \ 116 n = __off; \ 117 } 118 119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ 120 if (unlikely(i->count < n)) \ 121 n = i->count; \ 122 if (likely(n)) { \ 123 if (likely(iter_is_ubuf(i))) { \ 124 void __user *base; \ 125 size_t len; \ 126 iterate_buf(i, n, base, len, off, \ 127 i->ubuf, (I)) \ 128 } else if (likely(iter_is_iovec(i))) { \ 129 const struct iovec *iov = i->iov; \ 130 void __user *base; \ 131 size_t len; \ 132 iterate_iovec(i, n, base, len, off, \ 133 iov, (I)) \ 134 i->nr_segs -= iov - i->iov; \ 135 i->iov = iov; \ 136 } else if (iov_iter_is_bvec(i)) { \ 137 const struct bio_vec *bvec = i->bvec; \ 138 void *base; \ 139 size_t len; \ 140 iterate_bvec(i, n, base, len, off, \ 141 bvec, (K)) \ 142 i->nr_segs -= bvec - i->bvec; \ 143 i->bvec = bvec; \ 144 } else if (iov_iter_is_kvec(i)) { \ 145 const struct kvec *kvec = i->kvec; \ 146 void *base; \ 147 size_t len; \ 148 iterate_iovec(i, n, base, len, off, \ 149 kvec, (K)) \ 150 i->nr_segs -= kvec - i->kvec; \ 151 i->kvec = kvec; \ 152 } else if (iov_iter_is_xarray(i)) { \ 153 void *base; \ 154 size_t len; \ 155 iterate_xarray(i, n, base, len, off, \ 156 (K)) \ 157 } \ 158 i->count -= n; \ 159 } \ 160 } 161 #define iterate_and_advance(i, n, base, len, off, I, K) \ 162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) 163 164 static int copyout(void __user *to, const void *from, size_t n) 165 { 166 if (should_fail_usercopy()) 167 return n; 168 if (access_ok(to, n)) { 169 instrument_copy_to_user(to, from, n); 170 n = raw_copy_to_user(to, from, n); 171 } 172 return n; 173 } 174 175 static int copyin(void *to, const void __user *from, size_t n) 176 { 177 if (should_fail_usercopy()) 178 return n; 179 if (access_ok(from, n)) { 180 instrument_copy_from_user(to, from, n); 181 n = raw_copy_from_user(to, from, n); 182 } 183 return n; 184 } 185 186 static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe, 187 unsigned int slot) 188 { 189 return &pipe->bufs[slot & (pipe->ring_size - 1)]; 190 } 191 192 #ifdef PIPE_PARANOIA 193 static bool sanity(const struct iov_iter *i) 194 { 195 struct pipe_inode_info *pipe = i->pipe; 196 unsigned int p_head = pipe->head; 197 unsigned int p_tail = pipe->tail; 198 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 199 unsigned int i_head = i->head; 200 unsigned int idx; 201 202 if (i->iov_offset) { 203 struct pipe_buffer *p; 204 if (unlikely(p_occupancy == 0)) 205 goto Bad; // pipe must be non-empty 206 if (unlikely(i_head != p_head - 1)) 207 goto Bad; // must be at the last buffer... 208 209 p = pipe_buf(pipe, i_head); 210 if (unlikely(p->offset + p->len != i->iov_offset)) 211 goto Bad; // ... at the end of segment 212 } else { 213 if (i_head != p_head) 214 goto Bad; // must be right after the last buffer 215 } 216 return true; 217 Bad: 218 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); 219 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 220 p_head, p_tail, pipe->ring_size); 221 for (idx = 0; idx < pipe->ring_size; idx++) 222 printk(KERN_ERR "[%p %p %d %d]\n", 223 pipe->bufs[idx].ops, 224 pipe->bufs[idx].page, 225 pipe->bufs[idx].offset, 226 pipe->bufs[idx].len); 227 WARN_ON(1); 228 return false; 229 } 230 #else 231 #define sanity(i) true 232 #endif 233 234 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) 235 { 236 struct page *page = alloc_page(GFP_USER); 237 if (page) { 238 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 239 *buf = (struct pipe_buffer) { 240 .ops = &default_pipe_buf_ops, 241 .page = page, 242 .offset = 0, 243 .len = size 244 }; 245 } 246 return page; 247 } 248 249 static void push_page(struct pipe_inode_info *pipe, struct page *page, 250 unsigned int offset, unsigned int size) 251 { 252 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 253 *buf = (struct pipe_buffer) { 254 .ops = &page_cache_pipe_buf_ops, 255 .page = page, 256 .offset = offset, 257 .len = size 258 }; 259 get_page(page); 260 } 261 262 static inline bool allocated(struct pipe_buffer *buf) 263 { 264 return buf->ops == &default_pipe_buf_ops; 265 } 266 267 static struct page *append_pipe(struct iov_iter *i, size_t size, 268 unsigned int *off) 269 { 270 struct pipe_inode_info *pipe = i->pipe; 271 size_t offset = i->iov_offset; 272 struct pipe_buffer *buf; 273 struct page *page; 274 275 if (offset && offset < PAGE_SIZE) { 276 // some space in the last buffer; can we add to it? 277 buf = pipe_buf(pipe, pipe->head - 1); 278 if (allocated(buf)) { 279 size = min_t(size_t, size, PAGE_SIZE - offset); 280 buf->len += size; 281 i->iov_offset += size; 282 i->count -= size; 283 *off = offset; 284 return buf->page; 285 } 286 } 287 // OK, we need a new buffer 288 *off = 0; 289 size = min_t(size_t, size, PAGE_SIZE); 290 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 291 return NULL; 292 page = push_anon(pipe, size); 293 if (!page) 294 return NULL; 295 i->head = pipe->head - 1; 296 i->iov_offset = size; 297 i->count -= size; 298 return page; 299 } 300 301 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 302 struct iov_iter *i) 303 { 304 struct pipe_inode_info *pipe = i->pipe; 305 unsigned int head = pipe->head; 306 307 if (unlikely(bytes > i->count)) 308 bytes = i->count; 309 310 if (unlikely(!bytes)) 311 return 0; 312 313 if (!sanity(i)) 314 return 0; 315 316 if (offset && i->iov_offset == offset) { // could we merge it? 317 struct pipe_buffer *buf = pipe_buf(pipe, head - 1); 318 if (buf->page == page) { 319 buf->len += bytes; 320 i->iov_offset += bytes; 321 i->count -= bytes; 322 return bytes; 323 } 324 } 325 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 326 return 0; 327 328 push_page(pipe, page, offset, bytes); 329 i->iov_offset = offset + bytes; 330 i->head = head; 331 i->count -= bytes; 332 return bytes; 333 } 334 335 /* 336 * fault_in_iov_iter_readable - fault in iov iterator for reading 337 * @i: iterator 338 * @size: maximum length 339 * 340 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 341 * @size. For each iovec, fault in each page that constitutes the iovec. 342 * 343 * Returns the number of bytes not faulted in (like copy_to_user() and 344 * copy_from_user()). 345 * 346 * Always returns 0 for non-userspace iterators. 347 */ 348 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 349 { 350 if (iter_is_ubuf(i)) { 351 size_t n = min(size, iov_iter_count(i)); 352 n -= fault_in_readable(i->ubuf + i->iov_offset, n); 353 return size - n; 354 } else if (iter_is_iovec(i)) { 355 size_t count = min(size, iov_iter_count(i)); 356 const struct iovec *p; 357 size_t skip; 358 359 size -= count; 360 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 361 size_t len = min(count, p->iov_len - skip); 362 size_t ret; 363 364 if (unlikely(!len)) 365 continue; 366 ret = fault_in_readable(p->iov_base + skip, len); 367 count -= len - ret; 368 if (ret) 369 break; 370 } 371 return count + size; 372 } 373 return 0; 374 } 375 EXPORT_SYMBOL(fault_in_iov_iter_readable); 376 377 /* 378 * fault_in_iov_iter_writeable - fault in iov iterator for writing 379 * @i: iterator 380 * @size: maximum length 381 * 382 * Faults in the iterator using get_user_pages(), i.e., without triggering 383 * hardware page faults. This is primarily useful when we already know that 384 * some or all of the pages in @i aren't in memory. 385 * 386 * Returns the number of bytes not faulted in, like copy_to_user() and 387 * copy_from_user(). 388 * 389 * Always returns 0 for non-user-space iterators. 390 */ 391 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 392 { 393 if (iter_is_ubuf(i)) { 394 size_t n = min(size, iov_iter_count(i)); 395 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); 396 return size - n; 397 } else if (iter_is_iovec(i)) { 398 size_t count = min(size, iov_iter_count(i)); 399 const struct iovec *p; 400 size_t skip; 401 402 size -= count; 403 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 404 size_t len = min(count, p->iov_len - skip); 405 size_t ret; 406 407 if (unlikely(!len)) 408 continue; 409 ret = fault_in_safe_writeable(p->iov_base + skip, len); 410 count -= len - ret; 411 if (ret) 412 break; 413 } 414 return count + size; 415 } 416 return 0; 417 } 418 EXPORT_SYMBOL(fault_in_iov_iter_writeable); 419 420 void iov_iter_init(struct iov_iter *i, unsigned int direction, 421 const struct iovec *iov, unsigned long nr_segs, 422 size_t count) 423 { 424 WARN_ON(direction & ~(READ | WRITE)); 425 *i = (struct iov_iter) { 426 .iter_type = ITER_IOVEC, 427 .nofault = false, 428 .user_backed = true, 429 .data_source = direction, 430 .iov = iov, 431 .nr_segs = nr_segs, 432 .iov_offset = 0, 433 .count = count 434 }; 435 } 436 EXPORT_SYMBOL(iov_iter_init); 437 438 static inline void data_start(const struct iov_iter *i, 439 unsigned int *iter_headp, size_t *offp) 440 { 441 unsigned int iter_head = i->head; 442 size_t off = i->iov_offset; 443 444 if (off && (!allocated(pipe_buf(i->pipe, iter_head)) || 445 off == PAGE_SIZE)) { 446 iter_head++; 447 off = 0; 448 } 449 *iter_headp = iter_head; 450 *offp = off; 451 } 452 453 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 454 struct iov_iter *i) 455 { 456 unsigned int off, chunk; 457 458 if (unlikely(bytes > i->count)) 459 bytes = i->count; 460 if (unlikely(!bytes)) 461 return 0; 462 463 if (!sanity(i)) 464 return 0; 465 466 for (size_t n = bytes; n; n -= chunk) { 467 struct page *page = append_pipe(i, n, &off); 468 chunk = min_t(size_t, n, PAGE_SIZE - off); 469 if (!page) 470 return bytes - n; 471 memcpy_to_page(page, off, addr, chunk); 472 addr += chunk; 473 } 474 return bytes; 475 } 476 477 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 478 __wsum sum, size_t off) 479 { 480 __wsum next = csum_partial_copy_nocheck(from, to, len); 481 return csum_block_add(sum, next, off); 482 } 483 484 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 485 struct iov_iter *i, __wsum *sump) 486 { 487 __wsum sum = *sump; 488 size_t off = 0; 489 unsigned int chunk, r; 490 491 if (unlikely(bytes > i->count)) 492 bytes = i->count; 493 if (unlikely(!bytes)) 494 return 0; 495 496 if (!sanity(i)) 497 return 0; 498 499 while (bytes) { 500 struct page *page = append_pipe(i, bytes, &r); 501 char *p; 502 503 if (!page) 504 break; 505 chunk = min_t(size_t, bytes, PAGE_SIZE - r); 506 p = kmap_local_page(page); 507 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); 508 kunmap_local(p); 509 off += chunk; 510 bytes -= chunk; 511 } 512 *sump = sum; 513 return off; 514 } 515 516 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 517 { 518 if (unlikely(iov_iter_is_pipe(i))) 519 return copy_pipe_to_iter(addr, bytes, i); 520 if (user_backed_iter(i)) 521 might_fault(); 522 iterate_and_advance(i, bytes, base, len, off, 523 copyout(base, addr + off, len), 524 memcpy(base, addr + off, len) 525 ) 526 527 return bytes; 528 } 529 EXPORT_SYMBOL(_copy_to_iter); 530 531 #ifdef CONFIG_ARCH_HAS_COPY_MC 532 static int copyout_mc(void __user *to, const void *from, size_t n) 533 { 534 if (access_ok(to, n)) { 535 instrument_copy_to_user(to, from, n); 536 n = copy_mc_to_user((__force void *) to, from, n); 537 } 538 return n; 539 } 540 541 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 542 struct iov_iter *i) 543 { 544 size_t xfer = 0; 545 unsigned int off, chunk; 546 547 if (unlikely(bytes > i->count)) 548 bytes = i->count; 549 if (unlikely(!bytes)) 550 return 0; 551 552 if (!sanity(i)) 553 return 0; 554 555 while (bytes) { 556 struct page *page = append_pipe(i, bytes, &off); 557 unsigned long rem; 558 char *p; 559 560 if (!page) 561 break; 562 chunk = min_t(size_t, bytes, PAGE_SIZE - off); 563 p = kmap_local_page(page); 564 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); 565 chunk -= rem; 566 kunmap_local(p); 567 xfer += chunk; 568 bytes -= chunk; 569 if (rem) { 570 iov_iter_revert(i, rem); 571 break; 572 } 573 } 574 return xfer; 575 } 576 577 /** 578 * _copy_mc_to_iter - copy to iter with source memory error exception handling 579 * @addr: source kernel address 580 * @bytes: total transfer length 581 * @i: destination iterator 582 * 583 * The pmem driver deploys this for the dax operation 584 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 585 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 586 * successfully copied. 587 * 588 * The main differences between this and typical _copy_to_iter(). 589 * 590 * * Typical tail/residue handling after a fault retries the copy 591 * byte-by-byte until the fault happens again. Re-triggering machine 592 * checks is potentially fatal so the implementation uses source 593 * alignment and poison alignment assumptions to avoid re-triggering 594 * hardware exceptions. 595 * 596 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 597 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 598 * a short copy. 599 * 600 * Return: number of bytes copied (may be %0) 601 */ 602 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 603 { 604 if (unlikely(iov_iter_is_pipe(i))) 605 return copy_mc_pipe_to_iter(addr, bytes, i); 606 if (user_backed_iter(i)) 607 might_fault(); 608 __iterate_and_advance(i, bytes, base, len, off, 609 copyout_mc(base, addr + off, len), 610 copy_mc_to_kernel(base, addr + off, len) 611 ) 612 613 return bytes; 614 } 615 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 616 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 617 618 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 619 { 620 if (unlikely(iov_iter_is_pipe(i))) { 621 WARN_ON(1); 622 return 0; 623 } 624 if (user_backed_iter(i)) 625 might_fault(); 626 iterate_and_advance(i, bytes, base, len, off, 627 copyin(addr + off, base, len), 628 memcpy(addr + off, base, len) 629 ) 630 631 return bytes; 632 } 633 EXPORT_SYMBOL(_copy_from_iter); 634 635 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 636 { 637 if (unlikely(iov_iter_is_pipe(i))) { 638 WARN_ON(1); 639 return 0; 640 } 641 iterate_and_advance(i, bytes, base, len, off, 642 __copy_from_user_inatomic_nocache(addr + off, base, len), 643 memcpy(addr + off, base, len) 644 ) 645 646 return bytes; 647 } 648 EXPORT_SYMBOL(_copy_from_iter_nocache); 649 650 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 651 /** 652 * _copy_from_iter_flushcache - write destination through cpu cache 653 * @addr: destination kernel address 654 * @bytes: total transfer length 655 * @i: source iterator 656 * 657 * The pmem driver arranges for filesystem-dax to use this facility via 658 * dax_copy_from_iter() for ensuring that writes to persistent memory 659 * are flushed through the CPU cache. It is differentiated from 660 * _copy_from_iter_nocache() in that guarantees all data is flushed for 661 * all iterator types. The _copy_from_iter_nocache() only attempts to 662 * bypass the cache for the ITER_IOVEC case, and on some archs may use 663 * instructions that strand dirty-data in the cache. 664 * 665 * Return: number of bytes copied (may be %0) 666 */ 667 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 668 { 669 if (unlikely(iov_iter_is_pipe(i))) { 670 WARN_ON(1); 671 return 0; 672 } 673 iterate_and_advance(i, bytes, base, len, off, 674 __copy_from_user_flushcache(addr + off, base, len), 675 memcpy_flushcache(addr + off, base, len) 676 ) 677 678 return bytes; 679 } 680 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 681 #endif 682 683 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 684 { 685 struct page *head; 686 size_t v = n + offset; 687 688 /* 689 * The general case needs to access the page order in order 690 * to compute the page size. 691 * However, we mostly deal with order-0 pages and thus can 692 * avoid a possible cache line miss for requests that fit all 693 * page orders. 694 */ 695 if (n <= v && v <= PAGE_SIZE) 696 return true; 697 698 head = compound_head(page); 699 v += (page - head) << PAGE_SHIFT; 700 701 if (likely(n <= v && v <= (page_size(head)))) 702 return true; 703 WARN_ON(1); 704 return false; 705 } 706 707 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 708 struct iov_iter *i) 709 { 710 if (unlikely(iov_iter_is_pipe(i))) { 711 return copy_page_to_iter_pipe(page, offset, bytes, i); 712 } else { 713 void *kaddr = kmap_local_page(page); 714 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); 715 kunmap_local(kaddr); 716 return wanted; 717 } 718 } 719 720 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 721 struct iov_iter *i) 722 { 723 size_t res = 0; 724 if (unlikely(!page_copy_sane(page, offset, bytes))) 725 return 0; 726 page += offset / PAGE_SIZE; // first subpage 727 offset %= PAGE_SIZE; 728 while (1) { 729 size_t n = __copy_page_to_iter(page, offset, 730 min(bytes, (size_t)PAGE_SIZE - offset), i); 731 res += n; 732 bytes -= n; 733 if (!bytes || !n) 734 break; 735 offset += n; 736 if (offset == PAGE_SIZE) { 737 page++; 738 offset = 0; 739 } 740 } 741 return res; 742 } 743 EXPORT_SYMBOL(copy_page_to_iter); 744 745 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 746 struct iov_iter *i) 747 { 748 if (page_copy_sane(page, offset, bytes)) { 749 void *kaddr = kmap_local_page(page); 750 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); 751 kunmap_local(kaddr); 752 return wanted; 753 } 754 return 0; 755 } 756 EXPORT_SYMBOL(copy_page_from_iter); 757 758 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 759 { 760 unsigned int chunk, off; 761 762 if (unlikely(bytes > i->count)) 763 bytes = i->count; 764 if (unlikely(!bytes)) 765 return 0; 766 767 if (!sanity(i)) 768 return 0; 769 770 for (size_t n = bytes; n; n -= chunk) { 771 struct page *page = append_pipe(i, n, &off); 772 char *p; 773 774 if (!page) 775 return bytes - n; 776 chunk = min_t(size_t, n, PAGE_SIZE - off); 777 p = kmap_local_page(page); 778 memset(p + off, 0, chunk); 779 kunmap_local(p); 780 } 781 return bytes; 782 } 783 784 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 785 { 786 if (unlikely(iov_iter_is_pipe(i))) 787 return pipe_zero(bytes, i); 788 iterate_and_advance(i, bytes, base, len, count, 789 clear_user(base, len), 790 memset(base, 0, len) 791 ) 792 793 return bytes; 794 } 795 EXPORT_SYMBOL(iov_iter_zero); 796 797 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, 798 struct iov_iter *i) 799 { 800 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 801 if (unlikely(!page_copy_sane(page, offset, bytes))) { 802 kunmap_atomic(kaddr); 803 return 0; 804 } 805 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 806 kunmap_atomic(kaddr); 807 WARN_ON(1); 808 return 0; 809 } 810 iterate_and_advance(i, bytes, base, len, off, 811 copyin(p + off, base, len), 812 memcpy(p + off, base, len) 813 ) 814 kunmap_atomic(kaddr); 815 return bytes; 816 } 817 EXPORT_SYMBOL(copy_page_from_iter_atomic); 818 819 static void pipe_advance(struct iov_iter *i, size_t size) 820 { 821 struct pipe_inode_info *pipe = i->pipe; 822 unsigned int off = i->iov_offset; 823 824 if (!off && !size) { 825 pipe_discard_from(pipe, i->start_head); // discard everything 826 return; 827 } 828 i->count -= size; 829 while (1) { 830 struct pipe_buffer *buf = pipe_buf(pipe, i->head); 831 if (off) /* make it relative to the beginning of buffer */ 832 size += off - buf->offset; 833 if (size <= buf->len) { 834 buf->len = size; 835 i->iov_offset = buf->offset + size; 836 break; 837 } 838 size -= buf->len; 839 i->head++; 840 off = 0; 841 } 842 pipe_discard_from(pipe, i->head + 1); // discard everything past this one 843 } 844 845 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 846 { 847 const struct bio_vec *bvec, *end; 848 849 if (!i->count) 850 return; 851 i->count -= size; 852 853 size += i->iov_offset; 854 855 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { 856 if (likely(size < bvec->bv_len)) 857 break; 858 size -= bvec->bv_len; 859 } 860 i->iov_offset = size; 861 i->nr_segs -= bvec - i->bvec; 862 i->bvec = bvec; 863 } 864 865 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 866 { 867 const struct iovec *iov, *end; 868 869 if (!i->count) 870 return; 871 i->count -= size; 872 873 size += i->iov_offset; // from beginning of current segment 874 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { 875 if (likely(size < iov->iov_len)) 876 break; 877 size -= iov->iov_len; 878 } 879 i->iov_offset = size; 880 i->nr_segs -= iov - i->iov; 881 i->iov = iov; 882 } 883 884 void iov_iter_advance(struct iov_iter *i, size_t size) 885 { 886 if (unlikely(i->count < size)) 887 size = i->count; 888 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { 889 i->iov_offset += size; 890 i->count -= size; 891 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 892 /* iovec and kvec have identical layouts */ 893 iov_iter_iovec_advance(i, size); 894 } else if (iov_iter_is_bvec(i)) { 895 iov_iter_bvec_advance(i, size); 896 } else if (iov_iter_is_pipe(i)) { 897 pipe_advance(i, size); 898 } else if (iov_iter_is_discard(i)) { 899 i->count -= size; 900 } 901 } 902 EXPORT_SYMBOL(iov_iter_advance); 903 904 void iov_iter_revert(struct iov_iter *i, size_t unroll) 905 { 906 if (!unroll) 907 return; 908 if (WARN_ON(unroll > MAX_RW_COUNT)) 909 return; 910 i->count += unroll; 911 if (unlikely(iov_iter_is_pipe(i))) { 912 struct pipe_inode_info *pipe = i->pipe; 913 unsigned int head = pipe->head; 914 915 while (head > i->start_head) { 916 struct pipe_buffer *b = pipe_buf(pipe, --head); 917 if (unroll < b->len) { 918 b->len -= unroll; 919 i->iov_offset = b->offset + b->len; 920 i->head = head; 921 return; 922 } 923 unroll -= b->len; 924 pipe_buf_release(pipe, b); 925 pipe->head--; 926 } 927 i->iov_offset = 0; 928 i->head = head; 929 return; 930 } 931 if (unlikely(iov_iter_is_discard(i))) 932 return; 933 if (unroll <= i->iov_offset) { 934 i->iov_offset -= unroll; 935 return; 936 } 937 unroll -= i->iov_offset; 938 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { 939 BUG(); /* We should never go beyond the start of the specified 940 * range since we might then be straying into pages that 941 * aren't pinned. 942 */ 943 } else if (iov_iter_is_bvec(i)) { 944 const struct bio_vec *bvec = i->bvec; 945 while (1) { 946 size_t n = (--bvec)->bv_len; 947 i->nr_segs++; 948 if (unroll <= n) { 949 i->bvec = bvec; 950 i->iov_offset = n - unroll; 951 return; 952 } 953 unroll -= n; 954 } 955 } else { /* same logics for iovec and kvec */ 956 const struct iovec *iov = i->iov; 957 while (1) { 958 size_t n = (--iov)->iov_len; 959 i->nr_segs++; 960 if (unroll <= n) { 961 i->iov = iov; 962 i->iov_offset = n - unroll; 963 return; 964 } 965 unroll -= n; 966 } 967 } 968 } 969 EXPORT_SYMBOL(iov_iter_revert); 970 971 /* 972 * Return the count of just the current iov_iter segment. 973 */ 974 size_t iov_iter_single_seg_count(const struct iov_iter *i) 975 { 976 if (i->nr_segs > 1) { 977 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 978 return min(i->count, i->iov->iov_len - i->iov_offset); 979 if (iov_iter_is_bvec(i)) 980 return min(i->count, i->bvec->bv_len - i->iov_offset); 981 } 982 return i->count; 983 } 984 EXPORT_SYMBOL(iov_iter_single_seg_count); 985 986 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 987 const struct kvec *kvec, unsigned long nr_segs, 988 size_t count) 989 { 990 WARN_ON(direction & ~(READ | WRITE)); 991 *i = (struct iov_iter){ 992 .iter_type = ITER_KVEC, 993 .data_source = direction, 994 .kvec = kvec, 995 .nr_segs = nr_segs, 996 .iov_offset = 0, 997 .count = count 998 }; 999 } 1000 EXPORT_SYMBOL(iov_iter_kvec); 1001 1002 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1003 const struct bio_vec *bvec, unsigned long nr_segs, 1004 size_t count) 1005 { 1006 WARN_ON(direction & ~(READ | WRITE)); 1007 *i = (struct iov_iter){ 1008 .iter_type = ITER_BVEC, 1009 .data_source = direction, 1010 .bvec = bvec, 1011 .nr_segs = nr_segs, 1012 .iov_offset = 0, 1013 .count = count 1014 }; 1015 } 1016 EXPORT_SYMBOL(iov_iter_bvec); 1017 1018 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1019 struct pipe_inode_info *pipe, 1020 size_t count) 1021 { 1022 BUG_ON(direction != READ); 1023 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1024 *i = (struct iov_iter){ 1025 .iter_type = ITER_PIPE, 1026 .data_source = false, 1027 .pipe = pipe, 1028 .head = pipe->head, 1029 .start_head = pipe->head, 1030 .iov_offset = 0, 1031 .count = count 1032 }; 1033 } 1034 EXPORT_SYMBOL(iov_iter_pipe); 1035 1036 /** 1037 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1038 * @i: The iterator to initialise. 1039 * @direction: The direction of the transfer. 1040 * @xarray: The xarray to access. 1041 * @start: The start file position. 1042 * @count: The size of the I/O buffer in bytes. 1043 * 1044 * Set up an I/O iterator to either draw data out of the pages attached to an 1045 * inode or to inject data into those pages. The pages *must* be prevented 1046 * from evaporation, either by taking a ref on them or locking them by the 1047 * caller. 1048 */ 1049 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1050 struct xarray *xarray, loff_t start, size_t count) 1051 { 1052 BUG_ON(direction & ~1); 1053 *i = (struct iov_iter) { 1054 .iter_type = ITER_XARRAY, 1055 .data_source = direction, 1056 .xarray = xarray, 1057 .xarray_start = start, 1058 .count = count, 1059 .iov_offset = 0 1060 }; 1061 } 1062 EXPORT_SYMBOL(iov_iter_xarray); 1063 1064 /** 1065 * iov_iter_discard - Initialise an I/O iterator that discards data 1066 * @i: The iterator to initialise. 1067 * @direction: The direction of the transfer. 1068 * @count: The size of the I/O buffer in bytes. 1069 * 1070 * Set up an I/O iterator that just discards everything that's written to it. 1071 * It's only available as a READ iterator. 1072 */ 1073 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1074 { 1075 BUG_ON(direction != READ); 1076 *i = (struct iov_iter){ 1077 .iter_type = ITER_DISCARD, 1078 .data_source = false, 1079 .count = count, 1080 .iov_offset = 0 1081 }; 1082 } 1083 EXPORT_SYMBOL(iov_iter_discard); 1084 1085 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, 1086 unsigned len_mask) 1087 { 1088 size_t size = i->count; 1089 size_t skip = i->iov_offset; 1090 unsigned k; 1091 1092 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1093 size_t len = i->iov[k].iov_len - skip; 1094 1095 if (len > size) 1096 len = size; 1097 if (len & len_mask) 1098 return false; 1099 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) 1100 return false; 1101 1102 size -= len; 1103 if (!size) 1104 break; 1105 } 1106 return true; 1107 } 1108 1109 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, 1110 unsigned len_mask) 1111 { 1112 size_t size = i->count; 1113 unsigned skip = i->iov_offset; 1114 unsigned k; 1115 1116 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1117 size_t len = i->bvec[k].bv_len - skip; 1118 1119 if (len > size) 1120 len = size; 1121 if (len & len_mask) 1122 return false; 1123 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) 1124 return false; 1125 1126 size -= len; 1127 if (!size) 1128 break; 1129 } 1130 return true; 1131 } 1132 1133 /** 1134 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments 1135 * are aligned to the parameters. 1136 * 1137 * @i: &struct iov_iter to restore 1138 * @addr_mask: bit mask to check against the iov element's addresses 1139 * @len_mask: bit mask to check against the iov element's lengths 1140 * 1141 * Return: false if any addresses or lengths intersect with the provided masks 1142 */ 1143 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 1144 unsigned len_mask) 1145 { 1146 if (likely(iter_is_ubuf(i))) { 1147 if (i->count & len_mask) 1148 return false; 1149 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) 1150 return false; 1151 return true; 1152 } 1153 1154 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1155 return iov_iter_aligned_iovec(i, addr_mask, len_mask); 1156 1157 if (iov_iter_is_bvec(i)) 1158 return iov_iter_aligned_bvec(i, addr_mask, len_mask); 1159 1160 if (iov_iter_is_pipe(i)) { 1161 unsigned int p_mask = i->pipe->ring_size - 1; 1162 size_t size = i->count; 1163 1164 if (size & len_mask) 1165 return false; 1166 if (size && allocated(&i->pipe->bufs[i->head & p_mask])) { 1167 if (i->iov_offset & addr_mask) 1168 return false; 1169 } 1170 1171 return true; 1172 } 1173 1174 if (iov_iter_is_xarray(i)) { 1175 if (i->count & len_mask) 1176 return false; 1177 if ((i->xarray_start + i->iov_offset) & addr_mask) 1178 return false; 1179 } 1180 1181 return true; 1182 } 1183 EXPORT_SYMBOL_GPL(iov_iter_is_aligned); 1184 1185 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 1186 { 1187 unsigned long res = 0; 1188 size_t size = i->count; 1189 size_t skip = i->iov_offset; 1190 unsigned k; 1191 1192 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1193 size_t len = i->iov[k].iov_len - skip; 1194 if (len) { 1195 res |= (unsigned long)i->iov[k].iov_base + skip; 1196 if (len > size) 1197 len = size; 1198 res |= len; 1199 size -= len; 1200 if (!size) 1201 break; 1202 } 1203 } 1204 return res; 1205 } 1206 1207 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 1208 { 1209 unsigned res = 0; 1210 size_t size = i->count; 1211 unsigned skip = i->iov_offset; 1212 unsigned k; 1213 1214 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1215 size_t len = i->bvec[k].bv_len - skip; 1216 res |= (unsigned long)i->bvec[k].bv_offset + skip; 1217 if (len > size) 1218 len = size; 1219 res |= len; 1220 size -= len; 1221 if (!size) 1222 break; 1223 } 1224 return res; 1225 } 1226 1227 unsigned long iov_iter_alignment(const struct iov_iter *i) 1228 { 1229 if (likely(iter_is_ubuf(i))) { 1230 size_t size = i->count; 1231 if (size) 1232 return ((unsigned long)i->ubuf + i->iov_offset) | size; 1233 return 0; 1234 } 1235 1236 /* iovec and kvec have identical layouts */ 1237 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1238 return iov_iter_alignment_iovec(i); 1239 1240 if (iov_iter_is_bvec(i)) 1241 return iov_iter_alignment_bvec(i); 1242 1243 if (iov_iter_is_pipe(i)) { 1244 size_t size = i->count; 1245 1246 if (size && i->iov_offset && allocated(pipe_buf(i->pipe, i->head))) 1247 return size | i->iov_offset; 1248 return size; 1249 } 1250 1251 if (iov_iter_is_xarray(i)) 1252 return (i->xarray_start + i->iov_offset) | i->count; 1253 1254 return 0; 1255 } 1256 EXPORT_SYMBOL(iov_iter_alignment); 1257 1258 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1259 { 1260 unsigned long res = 0; 1261 unsigned long v = 0; 1262 size_t size = i->count; 1263 unsigned k; 1264 1265 if (iter_is_ubuf(i)) 1266 return 0; 1267 1268 if (WARN_ON(!iter_is_iovec(i))) 1269 return ~0U; 1270 1271 for (k = 0; k < i->nr_segs; k++) { 1272 if (i->iov[k].iov_len) { 1273 unsigned long base = (unsigned long)i->iov[k].iov_base; 1274 if (v) // if not the first one 1275 res |= base | v; // this start | previous end 1276 v = base + i->iov[k].iov_len; 1277 if (size <= i->iov[k].iov_len) 1278 break; 1279 size -= i->iov[k].iov_len; 1280 } 1281 } 1282 return res; 1283 } 1284 EXPORT_SYMBOL(iov_iter_gap_alignment); 1285 1286 static inline ssize_t __pipe_get_pages(struct iov_iter *i, 1287 size_t maxsize, 1288 struct page **pages, 1289 size_t off) 1290 { 1291 struct pipe_inode_info *pipe = i->pipe; 1292 ssize_t left = maxsize; 1293 1294 if (off) { 1295 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head - 1); 1296 1297 get_page(*pages++ = buf->page); 1298 left -= PAGE_SIZE - off; 1299 if (left <= 0) { 1300 buf->len += maxsize; 1301 return maxsize; 1302 } 1303 buf->len = PAGE_SIZE; 1304 } 1305 while (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { 1306 struct page *page = push_anon(pipe, 1307 min_t(ssize_t, left, PAGE_SIZE)); 1308 if (!page) 1309 break; 1310 get_page(*pages++ = page); 1311 left -= PAGE_SIZE; 1312 if (left <= 0) 1313 return maxsize; 1314 } 1315 return maxsize - left ? : -EFAULT; 1316 } 1317 1318 static ssize_t pipe_get_pages(struct iov_iter *i, 1319 struct page **pages, size_t maxsize, unsigned maxpages, 1320 size_t *start) 1321 { 1322 unsigned int iter_head, npages; 1323 size_t capacity; 1324 1325 if (!sanity(i)) 1326 return -EFAULT; 1327 1328 data_start(i, &iter_head, start); 1329 /* Amount of free space: some of this one + all after this one */ 1330 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1331 capacity = min(npages, maxpages) * PAGE_SIZE - *start; 1332 1333 return __pipe_get_pages(i, min(maxsize, capacity), pages, *start); 1334 } 1335 1336 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1337 pgoff_t index, unsigned int nr_pages) 1338 { 1339 XA_STATE(xas, xa, index); 1340 struct page *page; 1341 unsigned int ret = 0; 1342 1343 rcu_read_lock(); 1344 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1345 if (xas_retry(&xas, page)) 1346 continue; 1347 1348 /* Has the page moved or been split? */ 1349 if (unlikely(page != xas_reload(&xas))) { 1350 xas_reset(&xas); 1351 continue; 1352 } 1353 1354 pages[ret] = find_subpage(page, xas.xa_index); 1355 get_page(pages[ret]); 1356 if (++ret == nr_pages) 1357 break; 1358 } 1359 rcu_read_unlock(); 1360 return ret; 1361 } 1362 1363 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1364 struct page **pages, size_t maxsize, 1365 unsigned maxpages, size_t *_start_offset) 1366 { 1367 unsigned nr, offset; 1368 pgoff_t index, count; 1369 size_t size = maxsize; 1370 loff_t pos; 1371 1372 if (!size || !maxpages) 1373 return 0; 1374 1375 pos = i->xarray_start + i->iov_offset; 1376 index = pos >> PAGE_SHIFT; 1377 offset = pos & ~PAGE_MASK; 1378 *_start_offset = offset; 1379 1380 count = 1; 1381 if (size > PAGE_SIZE - offset) { 1382 size -= PAGE_SIZE - offset; 1383 count += size >> PAGE_SHIFT; 1384 size &= ~PAGE_MASK; 1385 if (size) 1386 count++; 1387 } 1388 1389 if (count > maxpages) 1390 count = maxpages; 1391 1392 nr = iter_xarray_populate_pages(pages, i->xarray, index, count); 1393 if (nr == 0) 1394 return 0; 1395 1396 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1397 } 1398 1399 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */ 1400 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) 1401 { 1402 size_t skip; 1403 long k; 1404 1405 if (iter_is_ubuf(i)) 1406 return (unsigned long)i->ubuf + i->iov_offset; 1407 1408 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1409 size_t len = i->iov[k].iov_len - skip; 1410 1411 if (unlikely(!len)) 1412 continue; 1413 if (*size > len) 1414 *size = len; 1415 return (unsigned long)i->iov[k].iov_base + skip; 1416 } 1417 BUG(); // if it had been empty, we wouldn't get called 1418 } 1419 1420 /* must be done on non-empty ITER_BVEC one */ 1421 static struct page *first_bvec_segment(const struct iov_iter *i, 1422 size_t *size, size_t *start) 1423 { 1424 struct page *page; 1425 size_t skip = i->iov_offset, len; 1426 1427 len = i->bvec->bv_len - skip; 1428 if (*size > len) 1429 *size = len; 1430 skip += i->bvec->bv_offset; 1431 page = i->bvec->bv_page + skip / PAGE_SIZE; 1432 *start = skip % PAGE_SIZE; 1433 return page; 1434 } 1435 1436 ssize_t iov_iter_get_pages(struct iov_iter *i, 1437 struct page **pages, size_t maxsize, unsigned maxpages, 1438 size_t *start) 1439 { 1440 int n, res; 1441 1442 if (maxsize > i->count) 1443 maxsize = i->count; 1444 if (!maxsize) 1445 return 0; 1446 if (maxsize > MAX_RW_COUNT) 1447 maxsize = MAX_RW_COUNT; 1448 1449 if (likely(user_backed_iter(i))) { 1450 unsigned int gup_flags = 0; 1451 unsigned long addr; 1452 1453 if (iov_iter_rw(i) != WRITE) 1454 gup_flags |= FOLL_WRITE; 1455 if (i->nofault) 1456 gup_flags |= FOLL_NOFAULT; 1457 1458 addr = first_iovec_segment(i, &maxsize); 1459 *start = addr % PAGE_SIZE; 1460 addr &= PAGE_MASK; 1461 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1462 if (n > maxpages) 1463 n = maxpages; 1464 res = get_user_pages_fast(addr, n, gup_flags, pages); 1465 if (unlikely(res <= 0)) 1466 return res; 1467 return min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1468 } 1469 if (iov_iter_is_bvec(i)) { 1470 struct page *page; 1471 1472 page = first_bvec_segment(i, &maxsize, start); 1473 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1474 if (n > maxpages) 1475 n = maxpages; 1476 for (int k = 0; k < n; k++) 1477 get_page(*pages++ = page++); 1478 return min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1479 } 1480 if (iov_iter_is_pipe(i)) 1481 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1482 if (iov_iter_is_xarray(i)) 1483 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1484 return -EFAULT; 1485 } 1486 EXPORT_SYMBOL(iov_iter_get_pages); 1487 1488 static struct page **get_pages_array(size_t n) 1489 { 1490 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); 1491 } 1492 1493 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 1494 struct page ***pages, size_t maxsize, 1495 size_t *start) 1496 { 1497 struct page **p; 1498 unsigned int iter_head, npages; 1499 ssize_t n; 1500 1501 if (!sanity(i)) 1502 return -EFAULT; 1503 1504 data_start(i, &iter_head, start); 1505 /* Amount of free space: some of this one + all after this one */ 1506 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1507 n = npages * PAGE_SIZE - *start; 1508 if (maxsize > n) 1509 maxsize = n; 1510 else 1511 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1512 p = get_pages_array(npages); 1513 if (!p) 1514 return -ENOMEM; 1515 n = __pipe_get_pages(i, maxsize, p, *start); 1516 if (n > 0) 1517 *pages = p; 1518 else 1519 kvfree(p); 1520 return n; 1521 } 1522 1523 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, 1524 struct page ***pages, size_t maxsize, 1525 size_t *_start_offset) 1526 { 1527 struct page **p; 1528 unsigned nr, offset; 1529 pgoff_t index, count; 1530 size_t size = maxsize; 1531 loff_t pos; 1532 1533 if (!size) 1534 return 0; 1535 1536 pos = i->xarray_start + i->iov_offset; 1537 index = pos >> PAGE_SHIFT; 1538 offset = pos & ~PAGE_MASK; 1539 *_start_offset = offset; 1540 1541 count = 1; 1542 if (size > PAGE_SIZE - offset) { 1543 size -= PAGE_SIZE - offset; 1544 count += size >> PAGE_SHIFT; 1545 size &= ~PAGE_MASK; 1546 if (size) 1547 count++; 1548 } 1549 1550 p = get_pages_array(count); 1551 if (!p) 1552 return -ENOMEM; 1553 *pages = p; 1554 1555 nr = iter_xarray_populate_pages(p, i->xarray, index, count); 1556 if (nr == 0) 1557 return 0; 1558 1559 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1560 } 1561 1562 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1563 struct page ***pages, size_t maxsize, 1564 size_t *start) 1565 { 1566 struct page **p; 1567 int n, res; 1568 1569 if (maxsize > i->count) 1570 maxsize = i->count; 1571 if (!maxsize) 1572 return 0; 1573 if (maxsize > MAX_RW_COUNT) 1574 maxsize = MAX_RW_COUNT; 1575 1576 if (likely(user_backed_iter(i))) { 1577 unsigned int gup_flags = 0; 1578 unsigned long addr; 1579 1580 if (iov_iter_rw(i) != WRITE) 1581 gup_flags |= FOLL_WRITE; 1582 if (i->nofault) 1583 gup_flags |= FOLL_NOFAULT; 1584 1585 addr = first_iovec_segment(i, &maxsize); 1586 *start = addr % PAGE_SIZE; 1587 addr &= PAGE_MASK; 1588 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1589 p = get_pages_array(n); 1590 if (!p) 1591 return -ENOMEM; 1592 res = get_user_pages_fast(addr, n, gup_flags, p); 1593 if (unlikely(res <= 0)) { 1594 kvfree(p); 1595 *pages = NULL; 1596 return res; 1597 } 1598 *pages = p; 1599 return min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1600 } 1601 if (iov_iter_is_bvec(i)) { 1602 struct page *page; 1603 1604 page = first_bvec_segment(i, &maxsize, start); 1605 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1606 *pages = p = get_pages_array(n); 1607 if (!p) 1608 return -ENOMEM; 1609 for (int k = 0; k < n; k++) 1610 get_page(*p++ = page++); 1611 return min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1612 } 1613 if (iov_iter_is_pipe(i)) 1614 return pipe_get_pages_alloc(i, pages, maxsize, start); 1615 if (iov_iter_is_xarray(i)) 1616 return iter_xarray_get_pages_alloc(i, pages, maxsize, start); 1617 return -EFAULT; 1618 } 1619 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1620 1621 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1622 struct iov_iter *i) 1623 { 1624 __wsum sum, next; 1625 sum = *csum; 1626 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1627 WARN_ON(1); 1628 return 0; 1629 } 1630 iterate_and_advance(i, bytes, base, len, off, ({ 1631 next = csum_and_copy_from_user(base, addr + off, len); 1632 sum = csum_block_add(sum, next, off); 1633 next ? 0 : len; 1634 }), ({ 1635 sum = csum_and_memcpy(addr + off, base, len, sum, off); 1636 }) 1637 ) 1638 *csum = sum; 1639 return bytes; 1640 } 1641 EXPORT_SYMBOL(csum_and_copy_from_iter); 1642 1643 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1644 struct iov_iter *i) 1645 { 1646 struct csum_state *csstate = _csstate; 1647 __wsum sum, next; 1648 1649 if (unlikely(iov_iter_is_discard(i))) { 1650 WARN_ON(1); /* for now */ 1651 return 0; 1652 } 1653 1654 sum = csum_shift(csstate->csum, csstate->off); 1655 if (unlikely(iov_iter_is_pipe(i))) 1656 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); 1657 else iterate_and_advance(i, bytes, base, len, off, ({ 1658 next = csum_and_copy_to_user(addr + off, base, len); 1659 sum = csum_block_add(sum, next, off); 1660 next ? 0 : len; 1661 }), ({ 1662 sum = csum_and_memcpy(base, addr + off, len, sum, off); 1663 }) 1664 ) 1665 csstate->csum = csum_shift(sum, csstate->off); 1666 csstate->off += bytes; 1667 return bytes; 1668 } 1669 EXPORT_SYMBOL(csum_and_copy_to_iter); 1670 1671 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1672 struct iov_iter *i) 1673 { 1674 #ifdef CONFIG_CRYPTO_HASH 1675 struct ahash_request *hash = hashp; 1676 struct scatterlist sg; 1677 size_t copied; 1678 1679 copied = copy_to_iter(addr, bytes, i); 1680 sg_init_one(&sg, addr, copied); 1681 ahash_request_set_crypt(hash, &sg, NULL, copied); 1682 crypto_ahash_update(hash); 1683 return copied; 1684 #else 1685 return 0; 1686 #endif 1687 } 1688 EXPORT_SYMBOL(hash_and_copy_to_iter); 1689 1690 static int iov_npages(const struct iov_iter *i, int maxpages) 1691 { 1692 size_t skip = i->iov_offset, size = i->count; 1693 const struct iovec *p; 1694 int npages = 0; 1695 1696 for (p = i->iov; size; skip = 0, p++) { 1697 unsigned offs = offset_in_page(p->iov_base + skip); 1698 size_t len = min(p->iov_len - skip, size); 1699 1700 if (len) { 1701 size -= len; 1702 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1703 if (unlikely(npages > maxpages)) 1704 return maxpages; 1705 } 1706 } 1707 return npages; 1708 } 1709 1710 static int bvec_npages(const struct iov_iter *i, int maxpages) 1711 { 1712 size_t skip = i->iov_offset, size = i->count; 1713 const struct bio_vec *p; 1714 int npages = 0; 1715 1716 for (p = i->bvec; size; skip = 0, p++) { 1717 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1718 size_t len = min(p->bv_len - skip, size); 1719 1720 size -= len; 1721 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1722 if (unlikely(npages > maxpages)) 1723 return maxpages; 1724 } 1725 return npages; 1726 } 1727 1728 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1729 { 1730 if (unlikely(!i->count)) 1731 return 0; 1732 if (likely(iter_is_ubuf(i))) { 1733 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); 1734 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); 1735 return min(npages, maxpages); 1736 } 1737 /* iovec and kvec have identical layouts */ 1738 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1739 return iov_npages(i, maxpages); 1740 if (iov_iter_is_bvec(i)) 1741 return bvec_npages(i, maxpages); 1742 if (iov_iter_is_pipe(i)) { 1743 unsigned int iter_head; 1744 int npages; 1745 size_t off; 1746 1747 if (!sanity(i)) 1748 return 0; 1749 1750 data_start(i, &iter_head, &off); 1751 /* some of this one + all after this one */ 1752 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1753 return min(npages, maxpages); 1754 } 1755 if (iov_iter_is_xarray(i)) { 1756 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1757 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1758 return min(npages, maxpages); 1759 } 1760 return 0; 1761 } 1762 EXPORT_SYMBOL(iov_iter_npages); 1763 1764 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1765 { 1766 *new = *old; 1767 if (unlikely(iov_iter_is_pipe(new))) { 1768 WARN_ON(1); 1769 return NULL; 1770 } 1771 if (iov_iter_is_bvec(new)) 1772 return new->bvec = kmemdup(new->bvec, 1773 new->nr_segs * sizeof(struct bio_vec), 1774 flags); 1775 else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) 1776 /* iovec and kvec have identical layout */ 1777 return new->iov = kmemdup(new->iov, 1778 new->nr_segs * sizeof(struct iovec), 1779 flags); 1780 return NULL; 1781 } 1782 EXPORT_SYMBOL(dup_iter); 1783 1784 static int copy_compat_iovec_from_user(struct iovec *iov, 1785 const struct iovec __user *uvec, unsigned long nr_segs) 1786 { 1787 const struct compat_iovec __user *uiov = 1788 (const struct compat_iovec __user *)uvec; 1789 int ret = -EFAULT, i; 1790 1791 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1792 return -EFAULT; 1793 1794 for (i = 0; i < nr_segs; i++) { 1795 compat_uptr_t buf; 1796 compat_ssize_t len; 1797 1798 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1799 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1800 1801 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1802 if (len < 0) { 1803 ret = -EINVAL; 1804 goto uaccess_end; 1805 } 1806 iov[i].iov_base = compat_ptr(buf); 1807 iov[i].iov_len = len; 1808 } 1809 1810 ret = 0; 1811 uaccess_end: 1812 user_access_end(); 1813 return ret; 1814 } 1815 1816 static int copy_iovec_from_user(struct iovec *iov, 1817 const struct iovec __user *uvec, unsigned long nr_segs) 1818 { 1819 unsigned long seg; 1820 1821 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1822 return -EFAULT; 1823 for (seg = 0; seg < nr_segs; seg++) { 1824 if ((ssize_t)iov[seg].iov_len < 0) 1825 return -EINVAL; 1826 } 1827 1828 return 0; 1829 } 1830 1831 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1832 unsigned long nr_segs, unsigned long fast_segs, 1833 struct iovec *fast_iov, bool compat) 1834 { 1835 struct iovec *iov = fast_iov; 1836 int ret; 1837 1838 /* 1839 * SuS says "The readv() function *may* fail if the iovcnt argument was 1840 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1841 * traditionally returned zero for zero segments, so... 1842 */ 1843 if (nr_segs == 0) 1844 return iov; 1845 if (nr_segs > UIO_MAXIOV) 1846 return ERR_PTR(-EINVAL); 1847 if (nr_segs > fast_segs) { 1848 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1849 if (!iov) 1850 return ERR_PTR(-ENOMEM); 1851 } 1852 1853 if (compat) 1854 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1855 else 1856 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1857 if (ret) { 1858 if (iov != fast_iov) 1859 kfree(iov); 1860 return ERR_PTR(ret); 1861 } 1862 1863 return iov; 1864 } 1865 1866 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1867 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1868 struct iov_iter *i, bool compat) 1869 { 1870 ssize_t total_len = 0; 1871 unsigned long seg; 1872 struct iovec *iov; 1873 1874 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1875 if (IS_ERR(iov)) { 1876 *iovp = NULL; 1877 return PTR_ERR(iov); 1878 } 1879 1880 /* 1881 * According to the Single Unix Specification we should return EINVAL if 1882 * an element length is < 0 when cast to ssize_t or if the total length 1883 * would overflow the ssize_t return value of the system call. 1884 * 1885 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1886 * overflow case. 1887 */ 1888 for (seg = 0; seg < nr_segs; seg++) { 1889 ssize_t len = (ssize_t)iov[seg].iov_len; 1890 1891 if (!access_ok(iov[seg].iov_base, len)) { 1892 if (iov != *iovp) 1893 kfree(iov); 1894 *iovp = NULL; 1895 return -EFAULT; 1896 } 1897 1898 if (len > MAX_RW_COUNT - total_len) { 1899 len = MAX_RW_COUNT - total_len; 1900 iov[seg].iov_len = len; 1901 } 1902 total_len += len; 1903 } 1904 1905 iov_iter_init(i, type, iov, nr_segs, total_len); 1906 if (iov == *iovp) 1907 *iovp = NULL; 1908 else 1909 *iovp = iov; 1910 return total_len; 1911 } 1912 1913 /** 1914 * import_iovec() - Copy an array of &struct iovec from userspace 1915 * into the kernel, check that it is valid, and initialize a new 1916 * &struct iov_iter iterator to access it. 1917 * 1918 * @type: One of %READ or %WRITE. 1919 * @uvec: Pointer to the userspace array. 1920 * @nr_segs: Number of elements in userspace array. 1921 * @fast_segs: Number of elements in @iov. 1922 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1923 * on-stack) kernel array. 1924 * @i: Pointer to iterator that will be initialized on success. 1925 * 1926 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1927 * then this function places %NULL in *@iov on return. Otherwise, a new 1928 * array will be allocated and the result placed in *@iov. This means that 1929 * the caller may call kfree() on *@iov regardless of whether the small 1930 * on-stack array was used or not (and regardless of whether this function 1931 * returns an error or not). 1932 * 1933 * Return: Negative error code on error, bytes imported on success 1934 */ 1935 ssize_t import_iovec(int type, const struct iovec __user *uvec, 1936 unsigned nr_segs, unsigned fast_segs, 1937 struct iovec **iovp, struct iov_iter *i) 1938 { 1939 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1940 in_compat_syscall()); 1941 } 1942 EXPORT_SYMBOL(import_iovec); 1943 1944 int import_single_range(int rw, void __user *buf, size_t len, 1945 struct iovec *iov, struct iov_iter *i) 1946 { 1947 if (len > MAX_RW_COUNT) 1948 len = MAX_RW_COUNT; 1949 if (unlikely(!access_ok(buf, len))) 1950 return -EFAULT; 1951 1952 iov->iov_base = buf; 1953 iov->iov_len = len; 1954 iov_iter_init(i, rw, iov, 1, len); 1955 return 0; 1956 } 1957 EXPORT_SYMBOL(import_single_range); 1958 1959 /** 1960 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 1961 * iov_iter_save_state() was called. 1962 * 1963 * @i: &struct iov_iter to restore 1964 * @state: state to restore from 1965 * 1966 * Used after iov_iter_save_state() to bring restore @i, if operations may 1967 * have advanced it. 1968 * 1969 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 1970 */ 1971 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 1972 { 1973 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 1974 !iov_iter_is_kvec(i) && !iter_is_ubuf(i)) 1975 return; 1976 i->iov_offset = state->iov_offset; 1977 i->count = state->count; 1978 if (iter_is_ubuf(i)) 1979 return; 1980 /* 1981 * For the *vec iters, nr_segs + iov is constant - if we increment 1982 * the vec, then we also decrement the nr_segs count. Hence we don't 1983 * need to track both of these, just one is enough and we can deduct 1984 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 1985 * size, so we can just increment the iov pointer as they are unionzed. 1986 * ITER_BVEC _may_ be the same size on some archs, but on others it is 1987 * not. Be safe and handle it separately. 1988 */ 1989 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 1990 if (iov_iter_is_bvec(i)) 1991 i->bvec -= state->nr_segs - i->nr_segs; 1992 else 1993 i->iov -= state->nr_segs - i->nr_segs; 1994 i->nr_segs = state->nr_segs; 1995 } 1996