1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 /* covers ubuf and kbuf alike */ 20 #define iterate_buf(i, n, base, len, off, __p, STEP) { \ 21 size_t __maybe_unused off = 0; \ 22 len = n; \ 23 base = __p + i->iov_offset; \ 24 len -= (STEP); \ 25 i->iov_offset += len; \ 26 n = len; \ 27 } 28 29 /* covers iovec and kvec alike */ 30 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ 31 size_t off = 0; \ 32 size_t skip = i->iov_offset; \ 33 do { \ 34 len = min(n, __p->iov_len - skip); \ 35 if (likely(len)) { \ 36 base = __p->iov_base + skip; \ 37 len -= (STEP); \ 38 off += len; \ 39 skip += len; \ 40 n -= len; \ 41 if (skip < __p->iov_len) \ 42 break; \ 43 } \ 44 __p++; \ 45 skip = 0; \ 46 } while (n); \ 47 i->iov_offset = skip; \ 48 n = off; \ 49 } 50 51 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ 52 size_t off = 0; \ 53 unsigned skip = i->iov_offset; \ 54 while (n) { \ 55 unsigned offset = p->bv_offset + skip; \ 56 unsigned left; \ 57 void *kaddr = kmap_local_page(p->bv_page + \ 58 offset / PAGE_SIZE); \ 59 base = kaddr + offset % PAGE_SIZE; \ 60 len = min(min(n, (size_t)(p->bv_len - skip)), \ 61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 62 left = (STEP); \ 63 kunmap_local(kaddr); \ 64 len -= left; \ 65 off += len; \ 66 skip += len; \ 67 if (skip == p->bv_len) { \ 68 skip = 0; \ 69 p++; \ 70 } \ 71 n -= len; \ 72 if (left) \ 73 break; \ 74 } \ 75 i->iov_offset = skip; \ 76 n = off; \ 77 } 78 79 #define iterate_xarray(i, n, base, len, __off, STEP) { \ 80 __label__ __out; \ 81 size_t __off = 0; \ 82 struct folio *folio; \ 83 loff_t start = i->xarray_start + i->iov_offset; \ 84 pgoff_t index = start / PAGE_SIZE; \ 85 XA_STATE(xas, i->xarray, index); \ 86 \ 87 len = PAGE_SIZE - offset_in_page(start); \ 88 rcu_read_lock(); \ 89 xas_for_each(&xas, folio, ULONG_MAX) { \ 90 unsigned left; \ 91 size_t offset; \ 92 if (xas_retry(&xas, folio)) \ 93 continue; \ 94 if (WARN_ON(xa_is_value(folio))) \ 95 break; \ 96 if (WARN_ON(folio_test_hugetlb(folio))) \ 97 break; \ 98 offset = offset_in_folio(folio, start + __off); \ 99 while (offset < folio_size(folio)) { \ 100 base = kmap_local_folio(folio, offset); \ 101 len = min(n, len); \ 102 left = (STEP); \ 103 kunmap_local(base); \ 104 len -= left; \ 105 __off += len; \ 106 n -= len; \ 107 if (left || n == 0) \ 108 goto __out; \ 109 offset += len; \ 110 len = PAGE_SIZE; \ 111 } \ 112 } \ 113 __out: \ 114 rcu_read_unlock(); \ 115 i->iov_offset += __off; \ 116 n = __off; \ 117 } 118 119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ 120 if (unlikely(i->count < n)) \ 121 n = i->count; \ 122 if (likely(n)) { \ 123 if (likely(iter_is_ubuf(i))) { \ 124 void __user *base; \ 125 size_t len; \ 126 iterate_buf(i, n, base, len, off, \ 127 i->ubuf, (I)) \ 128 } else if (likely(iter_is_iovec(i))) { \ 129 const struct iovec *iov = i->iov; \ 130 void __user *base; \ 131 size_t len; \ 132 iterate_iovec(i, n, base, len, off, \ 133 iov, (I)) \ 134 i->nr_segs -= iov - i->iov; \ 135 i->iov = iov; \ 136 } else if (iov_iter_is_bvec(i)) { \ 137 const struct bio_vec *bvec = i->bvec; \ 138 void *base; \ 139 size_t len; \ 140 iterate_bvec(i, n, base, len, off, \ 141 bvec, (K)) \ 142 i->nr_segs -= bvec - i->bvec; \ 143 i->bvec = bvec; \ 144 } else if (iov_iter_is_kvec(i)) { \ 145 const struct kvec *kvec = i->kvec; \ 146 void *base; \ 147 size_t len; \ 148 iterate_iovec(i, n, base, len, off, \ 149 kvec, (K)) \ 150 i->nr_segs -= kvec - i->kvec; \ 151 i->kvec = kvec; \ 152 } else if (iov_iter_is_xarray(i)) { \ 153 void *base; \ 154 size_t len; \ 155 iterate_xarray(i, n, base, len, off, \ 156 (K)) \ 157 } \ 158 i->count -= n; \ 159 } \ 160 } 161 #define iterate_and_advance(i, n, base, len, off, I, K) \ 162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) 163 164 static int copyout(void __user *to, const void *from, size_t n) 165 { 166 if (should_fail_usercopy()) 167 return n; 168 if (access_ok(to, n)) { 169 instrument_copy_to_user(to, from, n); 170 n = raw_copy_to_user(to, from, n); 171 } 172 return n; 173 } 174 175 static int copyin(void *to, const void __user *from, size_t n) 176 { 177 if (should_fail_usercopy()) 178 return n; 179 if (access_ok(from, n)) { 180 instrument_copy_from_user(to, from, n); 181 n = raw_copy_from_user(to, from, n); 182 } 183 return n; 184 } 185 186 static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe, 187 unsigned int slot) 188 { 189 return &pipe->bufs[slot & (pipe->ring_size - 1)]; 190 } 191 192 #ifdef PIPE_PARANOIA 193 static bool sanity(const struct iov_iter *i) 194 { 195 struct pipe_inode_info *pipe = i->pipe; 196 unsigned int p_head = pipe->head; 197 unsigned int p_tail = pipe->tail; 198 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 199 unsigned int i_head = i->head; 200 unsigned int idx; 201 202 if (i->last_offset) { 203 struct pipe_buffer *p; 204 if (unlikely(p_occupancy == 0)) 205 goto Bad; // pipe must be non-empty 206 if (unlikely(i_head != p_head - 1)) 207 goto Bad; // must be at the last buffer... 208 209 p = pipe_buf(pipe, i_head); 210 if (unlikely(p->offset + p->len != abs(i->last_offset))) 211 goto Bad; // ... at the end of segment 212 } else { 213 if (i_head != p_head) 214 goto Bad; // must be right after the last buffer 215 } 216 return true; 217 Bad: 218 printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset); 219 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 220 p_head, p_tail, pipe->ring_size); 221 for (idx = 0; idx < pipe->ring_size; idx++) 222 printk(KERN_ERR "[%p %p %d %d]\n", 223 pipe->bufs[idx].ops, 224 pipe->bufs[idx].page, 225 pipe->bufs[idx].offset, 226 pipe->bufs[idx].len); 227 WARN_ON(1); 228 return false; 229 } 230 #else 231 #define sanity(i) true 232 #endif 233 234 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) 235 { 236 struct page *page = alloc_page(GFP_USER); 237 if (page) { 238 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 239 *buf = (struct pipe_buffer) { 240 .ops = &default_pipe_buf_ops, 241 .page = page, 242 .offset = 0, 243 .len = size 244 }; 245 } 246 return page; 247 } 248 249 static void push_page(struct pipe_inode_info *pipe, struct page *page, 250 unsigned int offset, unsigned int size) 251 { 252 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 253 *buf = (struct pipe_buffer) { 254 .ops = &page_cache_pipe_buf_ops, 255 .page = page, 256 .offset = offset, 257 .len = size 258 }; 259 get_page(page); 260 } 261 262 static inline int last_offset(const struct pipe_buffer *buf) 263 { 264 if (buf->ops == &default_pipe_buf_ops) 265 return buf->len; // buf->offset is 0 for those 266 else 267 return -(buf->offset + buf->len); 268 } 269 270 static struct page *append_pipe(struct iov_iter *i, size_t size, 271 unsigned int *off) 272 { 273 struct pipe_inode_info *pipe = i->pipe; 274 int offset = i->last_offset; 275 struct pipe_buffer *buf; 276 struct page *page; 277 278 if (offset > 0 && offset < PAGE_SIZE) { 279 // some space in the last buffer; add to it 280 buf = pipe_buf(pipe, pipe->head - 1); 281 size = min_t(size_t, size, PAGE_SIZE - offset); 282 buf->len += size; 283 i->last_offset += size; 284 i->count -= size; 285 *off = offset; 286 return buf->page; 287 } 288 // OK, we need a new buffer 289 *off = 0; 290 size = min_t(size_t, size, PAGE_SIZE); 291 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 292 return NULL; 293 page = push_anon(pipe, size); 294 if (!page) 295 return NULL; 296 i->head = pipe->head - 1; 297 i->last_offset = size; 298 i->count -= size; 299 return page; 300 } 301 302 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 303 struct iov_iter *i) 304 { 305 struct pipe_inode_info *pipe = i->pipe; 306 unsigned int head = pipe->head; 307 308 if (unlikely(bytes > i->count)) 309 bytes = i->count; 310 311 if (unlikely(!bytes)) 312 return 0; 313 314 if (!sanity(i)) 315 return 0; 316 317 if (offset && i->last_offset == -offset) { // could we merge it? 318 struct pipe_buffer *buf = pipe_buf(pipe, head - 1); 319 if (buf->page == page) { 320 buf->len += bytes; 321 i->last_offset -= bytes; 322 i->count -= bytes; 323 return bytes; 324 } 325 } 326 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 327 return 0; 328 329 push_page(pipe, page, offset, bytes); 330 i->last_offset = -(offset + bytes); 331 i->head = head; 332 i->count -= bytes; 333 return bytes; 334 } 335 336 /* 337 * fault_in_iov_iter_readable - fault in iov iterator for reading 338 * @i: iterator 339 * @size: maximum length 340 * 341 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 342 * @size. For each iovec, fault in each page that constitutes the iovec. 343 * 344 * Returns the number of bytes not faulted in (like copy_to_user() and 345 * copy_from_user()). 346 * 347 * Always returns 0 for non-userspace iterators. 348 */ 349 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 350 { 351 if (iter_is_ubuf(i)) { 352 size_t n = min(size, iov_iter_count(i)); 353 n -= fault_in_readable(i->ubuf + i->iov_offset, n); 354 return size - n; 355 } else if (iter_is_iovec(i)) { 356 size_t count = min(size, iov_iter_count(i)); 357 const struct iovec *p; 358 size_t skip; 359 360 size -= count; 361 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 362 size_t len = min(count, p->iov_len - skip); 363 size_t ret; 364 365 if (unlikely(!len)) 366 continue; 367 ret = fault_in_readable(p->iov_base + skip, len); 368 count -= len - ret; 369 if (ret) 370 break; 371 } 372 return count + size; 373 } 374 return 0; 375 } 376 EXPORT_SYMBOL(fault_in_iov_iter_readable); 377 378 /* 379 * fault_in_iov_iter_writeable - fault in iov iterator for writing 380 * @i: iterator 381 * @size: maximum length 382 * 383 * Faults in the iterator using get_user_pages(), i.e., without triggering 384 * hardware page faults. This is primarily useful when we already know that 385 * some or all of the pages in @i aren't in memory. 386 * 387 * Returns the number of bytes not faulted in, like copy_to_user() and 388 * copy_from_user(). 389 * 390 * Always returns 0 for non-user-space iterators. 391 */ 392 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 393 { 394 if (iter_is_ubuf(i)) { 395 size_t n = min(size, iov_iter_count(i)); 396 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); 397 return size - n; 398 } else if (iter_is_iovec(i)) { 399 size_t count = min(size, iov_iter_count(i)); 400 const struct iovec *p; 401 size_t skip; 402 403 size -= count; 404 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 405 size_t len = min(count, p->iov_len - skip); 406 size_t ret; 407 408 if (unlikely(!len)) 409 continue; 410 ret = fault_in_safe_writeable(p->iov_base + skip, len); 411 count -= len - ret; 412 if (ret) 413 break; 414 } 415 return count + size; 416 } 417 return 0; 418 } 419 EXPORT_SYMBOL(fault_in_iov_iter_writeable); 420 421 void iov_iter_init(struct iov_iter *i, unsigned int direction, 422 const struct iovec *iov, unsigned long nr_segs, 423 size_t count) 424 { 425 WARN_ON(direction & ~(READ | WRITE)); 426 *i = (struct iov_iter) { 427 .iter_type = ITER_IOVEC, 428 .nofault = false, 429 .user_backed = true, 430 .data_source = direction, 431 .iov = iov, 432 .nr_segs = nr_segs, 433 .iov_offset = 0, 434 .count = count 435 }; 436 } 437 EXPORT_SYMBOL(iov_iter_init); 438 439 // returns the offset in partial buffer (if any) 440 static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages) 441 { 442 struct pipe_inode_info *pipe = i->pipe; 443 int used = pipe->head - pipe->tail; 444 int off = i->last_offset; 445 446 *npages = max((int)pipe->max_usage - used, 0); 447 448 if (off > 0 && off < PAGE_SIZE) { // anon and not full 449 (*npages)++; 450 return off; 451 } 452 return 0; 453 } 454 455 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 456 struct iov_iter *i) 457 { 458 unsigned int off, chunk; 459 460 if (unlikely(bytes > i->count)) 461 bytes = i->count; 462 if (unlikely(!bytes)) 463 return 0; 464 465 if (!sanity(i)) 466 return 0; 467 468 for (size_t n = bytes; n; n -= chunk) { 469 struct page *page = append_pipe(i, n, &off); 470 chunk = min_t(size_t, n, PAGE_SIZE - off); 471 if (!page) 472 return bytes - n; 473 memcpy_to_page(page, off, addr, chunk); 474 addr += chunk; 475 } 476 return bytes; 477 } 478 479 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 480 __wsum sum, size_t off) 481 { 482 __wsum next = csum_partial_copy_nocheck(from, to, len); 483 return csum_block_add(sum, next, off); 484 } 485 486 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 487 struct iov_iter *i, __wsum *sump) 488 { 489 __wsum sum = *sump; 490 size_t off = 0; 491 unsigned int chunk, r; 492 493 if (unlikely(bytes > i->count)) 494 bytes = i->count; 495 if (unlikely(!bytes)) 496 return 0; 497 498 if (!sanity(i)) 499 return 0; 500 501 while (bytes) { 502 struct page *page = append_pipe(i, bytes, &r); 503 char *p; 504 505 if (!page) 506 break; 507 chunk = min_t(size_t, bytes, PAGE_SIZE - r); 508 p = kmap_local_page(page); 509 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); 510 kunmap_local(p); 511 off += chunk; 512 bytes -= chunk; 513 } 514 *sump = sum; 515 return off; 516 } 517 518 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 519 { 520 if (unlikely(iov_iter_is_pipe(i))) 521 return copy_pipe_to_iter(addr, bytes, i); 522 if (user_backed_iter(i)) 523 might_fault(); 524 iterate_and_advance(i, bytes, base, len, off, 525 copyout(base, addr + off, len), 526 memcpy(base, addr + off, len) 527 ) 528 529 return bytes; 530 } 531 EXPORT_SYMBOL(_copy_to_iter); 532 533 #ifdef CONFIG_ARCH_HAS_COPY_MC 534 static int copyout_mc(void __user *to, const void *from, size_t n) 535 { 536 if (access_ok(to, n)) { 537 instrument_copy_to_user(to, from, n); 538 n = copy_mc_to_user((__force void *) to, from, n); 539 } 540 return n; 541 } 542 543 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 544 struct iov_iter *i) 545 { 546 size_t xfer = 0; 547 unsigned int off, chunk; 548 549 if (unlikely(bytes > i->count)) 550 bytes = i->count; 551 if (unlikely(!bytes)) 552 return 0; 553 554 if (!sanity(i)) 555 return 0; 556 557 while (bytes) { 558 struct page *page = append_pipe(i, bytes, &off); 559 unsigned long rem; 560 char *p; 561 562 if (!page) 563 break; 564 chunk = min_t(size_t, bytes, PAGE_SIZE - off); 565 p = kmap_local_page(page); 566 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); 567 chunk -= rem; 568 kunmap_local(p); 569 xfer += chunk; 570 bytes -= chunk; 571 if (rem) { 572 iov_iter_revert(i, rem); 573 break; 574 } 575 } 576 return xfer; 577 } 578 579 /** 580 * _copy_mc_to_iter - copy to iter with source memory error exception handling 581 * @addr: source kernel address 582 * @bytes: total transfer length 583 * @i: destination iterator 584 * 585 * The pmem driver deploys this for the dax operation 586 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 587 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 588 * successfully copied. 589 * 590 * The main differences between this and typical _copy_to_iter(). 591 * 592 * * Typical tail/residue handling after a fault retries the copy 593 * byte-by-byte until the fault happens again. Re-triggering machine 594 * checks is potentially fatal so the implementation uses source 595 * alignment and poison alignment assumptions to avoid re-triggering 596 * hardware exceptions. 597 * 598 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 599 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 600 * a short copy. 601 * 602 * Return: number of bytes copied (may be %0) 603 */ 604 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 605 { 606 if (unlikely(iov_iter_is_pipe(i))) 607 return copy_mc_pipe_to_iter(addr, bytes, i); 608 if (user_backed_iter(i)) 609 might_fault(); 610 __iterate_and_advance(i, bytes, base, len, off, 611 copyout_mc(base, addr + off, len), 612 copy_mc_to_kernel(base, addr + off, len) 613 ) 614 615 return bytes; 616 } 617 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 618 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 619 620 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 621 { 622 if (unlikely(iov_iter_is_pipe(i))) { 623 WARN_ON(1); 624 return 0; 625 } 626 if (user_backed_iter(i)) 627 might_fault(); 628 iterate_and_advance(i, bytes, base, len, off, 629 copyin(addr + off, base, len), 630 memcpy(addr + off, base, len) 631 ) 632 633 return bytes; 634 } 635 EXPORT_SYMBOL(_copy_from_iter); 636 637 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 638 { 639 if (unlikely(iov_iter_is_pipe(i))) { 640 WARN_ON(1); 641 return 0; 642 } 643 iterate_and_advance(i, bytes, base, len, off, 644 __copy_from_user_inatomic_nocache(addr + off, base, len), 645 memcpy(addr + off, base, len) 646 ) 647 648 return bytes; 649 } 650 EXPORT_SYMBOL(_copy_from_iter_nocache); 651 652 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 653 /** 654 * _copy_from_iter_flushcache - write destination through cpu cache 655 * @addr: destination kernel address 656 * @bytes: total transfer length 657 * @i: source iterator 658 * 659 * The pmem driver arranges for filesystem-dax to use this facility via 660 * dax_copy_from_iter() for ensuring that writes to persistent memory 661 * are flushed through the CPU cache. It is differentiated from 662 * _copy_from_iter_nocache() in that guarantees all data is flushed for 663 * all iterator types. The _copy_from_iter_nocache() only attempts to 664 * bypass the cache for the ITER_IOVEC case, and on some archs may use 665 * instructions that strand dirty-data in the cache. 666 * 667 * Return: number of bytes copied (may be %0) 668 */ 669 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 670 { 671 if (unlikely(iov_iter_is_pipe(i))) { 672 WARN_ON(1); 673 return 0; 674 } 675 iterate_and_advance(i, bytes, base, len, off, 676 __copy_from_user_flushcache(addr + off, base, len), 677 memcpy_flushcache(addr + off, base, len) 678 ) 679 680 return bytes; 681 } 682 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 683 #endif 684 685 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 686 { 687 struct page *head; 688 size_t v = n + offset; 689 690 /* 691 * The general case needs to access the page order in order 692 * to compute the page size. 693 * However, we mostly deal with order-0 pages and thus can 694 * avoid a possible cache line miss for requests that fit all 695 * page orders. 696 */ 697 if (n <= v && v <= PAGE_SIZE) 698 return true; 699 700 head = compound_head(page); 701 v += (page - head) << PAGE_SHIFT; 702 703 if (likely(n <= v && v <= (page_size(head)))) 704 return true; 705 WARN_ON(1); 706 return false; 707 } 708 709 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 710 struct iov_iter *i) 711 { 712 size_t res = 0; 713 if (unlikely(!page_copy_sane(page, offset, bytes))) 714 return 0; 715 if (unlikely(iov_iter_is_pipe(i))) 716 return copy_page_to_iter_pipe(page, offset, bytes, i); 717 page += offset / PAGE_SIZE; // first subpage 718 offset %= PAGE_SIZE; 719 while (1) { 720 void *kaddr = kmap_local_page(page); 721 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); 722 n = _copy_to_iter(kaddr + offset, n, i); 723 kunmap_local(kaddr); 724 res += n; 725 bytes -= n; 726 if (!bytes || !n) 727 break; 728 offset += n; 729 if (offset == PAGE_SIZE) { 730 page++; 731 offset = 0; 732 } 733 } 734 return res; 735 } 736 EXPORT_SYMBOL(copy_page_to_iter); 737 738 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 739 struct iov_iter *i) 740 { 741 size_t res = 0; 742 if (!page_copy_sane(page, offset, bytes)) 743 return 0; 744 page += offset / PAGE_SIZE; // first subpage 745 offset %= PAGE_SIZE; 746 while (1) { 747 void *kaddr = kmap_local_page(page); 748 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); 749 n = _copy_from_iter(kaddr + offset, n, i); 750 kunmap_local(kaddr); 751 res += n; 752 bytes -= n; 753 if (!bytes || !n) 754 break; 755 offset += n; 756 if (offset == PAGE_SIZE) { 757 page++; 758 offset = 0; 759 } 760 } 761 return res; 762 } 763 EXPORT_SYMBOL(copy_page_from_iter); 764 765 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 766 { 767 unsigned int chunk, off; 768 769 if (unlikely(bytes > i->count)) 770 bytes = i->count; 771 if (unlikely(!bytes)) 772 return 0; 773 774 if (!sanity(i)) 775 return 0; 776 777 for (size_t n = bytes; n; n -= chunk) { 778 struct page *page = append_pipe(i, n, &off); 779 char *p; 780 781 if (!page) 782 return bytes - n; 783 chunk = min_t(size_t, n, PAGE_SIZE - off); 784 p = kmap_local_page(page); 785 memset(p + off, 0, chunk); 786 kunmap_local(p); 787 } 788 return bytes; 789 } 790 791 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 792 { 793 if (unlikely(iov_iter_is_pipe(i))) 794 return pipe_zero(bytes, i); 795 iterate_and_advance(i, bytes, base, len, count, 796 clear_user(base, len), 797 memset(base, 0, len) 798 ) 799 800 return bytes; 801 } 802 EXPORT_SYMBOL(iov_iter_zero); 803 804 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, 805 struct iov_iter *i) 806 { 807 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 808 if (unlikely(!page_copy_sane(page, offset, bytes))) { 809 kunmap_atomic(kaddr); 810 return 0; 811 } 812 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 813 kunmap_atomic(kaddr); 814 WARN_ON(1); 815 return 0; 816 } 817 iterate_and_advance(i, bytes, base, len, off, 818 copyin(p + off, base, len), 819 memcpy(p + off, base, len) 820 ) 821 kunmap_atomic(kaddr); 822 return bytes; 823 } 824 EXPORT_SYMBOL(copy_page_from_iter_atomic); 825 826 static void pipe_advance(struct iov_iter *i, size_t size) 827 { 828 struct pipe_inode_info *pipe = i->pipe; 829 int off = i->last_offset; 830 831 if (!off && !size) { 832 pipe_discard_from(pipe, i->start_head); // discard everything 833 return; 834 } 835 i->count -= size; 836 while (1) { 837 struct pipe_buffer *buf = pipe_buf(pipe, i->head); 838 if (off) /* make it relative to the beginning of buffer */ 839 size += abs(off) - buf->offset; 840 if (size <= buf->len) { 841 buf->len = size; 842 i->last_offset = last_offset(buf); 843 break; 844 } 845 size -= buf->len; 846 i->head++; 847 off = 0; 848 } 849 pipe_discard_from(pipe, i->head + 1); // discard everything past this one 850 } 851 852 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 853 { 854 const struct bio_vec *bvec, *end; 855 856 if (!i->count) 857 return; 858 i->count -= size; 859 860 size += i->iov_offset; 861 862 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { 863 if (likely(size < bvec->bv_len)) 864 break; 865 size -= bvec->bv_len; 866 } 867 i->iov_offset = size; 868 i->nr_segs -= bvec - i->bvec; 869 i->bvec = bvec; 870 } 871 872 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 873 { 874 const struct iovec *iov, *end; 875 876 if (!i->count) 877 return; 878 i->count -= size; 879 880 size += i->iov_offset; // from beginning of current segment 881 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { 882 if (likely(size < iov->iov_len)) 883 break; 884 size -= iov->iov_len; 885 } 886 i->iov_offset = size; 887 i->nr_segs -= iov - i->iov; 888 i->iov = iov; 889 } 890 891 void iov_iter_advance(struct iov_iter *i, size_t size) 892 { 893 if (unlikely(i->count < size)) 894 size = i->count; 895 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { 896 i->iov_offset += size; 897 i->count -= size; 898 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 899 /* iovec and kvec have identical layouts */ 900 iov_iter_iovec_advance(i, size); 901 } else if (iov_iter_is_bvec(i)) { 902 iov_iter_bvec_advance(i, size); 903 } else if (iov_iter_is_pipe(i)) { 904 pipe_advance(i, size); 905 } else if (iov_iter_is_discard(i)) { 906 i->count -= size; 907 } 908 } 909 EXPORT_SYMBOL(iov_iter_advance); 910 911 void iov_iter_revert(struct iov_iter *i, size_t unroll) 912 { 913 if (!unroll) 914 return; 915 if (WARN_ON(unroll > MAX_RW_COUNT)) 916 return; 917 i->count += unroll; 918 if (unlikely(iov_iter_is_pipe(i))) { 919 struct pipe_inode_info *pipe = i->pipe; 920 unsigned int head = pipe->head; 921 922 while (head > i->start_head) { 923 struct pipe_buffer *b = pipe_buf(pipe, --head); 924 if (unroll < b->len) { 925 b->len -= unroll; 926 i->last_offset = last_offset(b); 927 i->head = head; 928 return; 929 } 930 unroll -= b->len; 931 pipe_buf_release(pipe, b); 932 pipe->head--; 933 } 934 i->last_offset = 0; 935 i->head = head; 936 return; 937 } 938 if (unlikely(iov_iter_is_discard(i))) 939 return; 940 if (unroll <= i->iov_offset) { 941 i->iov_offset -= unroll; 942 return; 943 } 944 unroll -= i->iov_offset; 945 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { 946 BUG(); /* We should never go beyond the start of the specified 947 * range since we might then be straying into pages that 948 * aren't pinned. 949 */ 950 } else if (iov_iter_is_bvec(i)) { 951 const struct bio_vec *bvec = i->bvec; 952 while (1) { 953 size_t n = (--bvec)->bv_len; 954 i->nr_segs++; 955 if (unroll <= n) { 956 i->bvec = bvec; 957 i->iov_offset = n - unroll; 958 return; 959 } 960 unroll -= n; 961 } 962 } else { /* same logics for iovec and kvec */ 963 const struct iovec *iov = i->iov; 964 while (1) { 965 size_t n = (--iov)->iov_len; 966 i->nr_segs++; 967 if (unroll <= n) { 968 i->iov = iov; 969 i->iov_offset = n - unroll; 970 return; 971 } 972 unroll -= n; 973 } 974 } 975 } 976 EXPORT_SYMBOL(iov_iter_revert); 977 978 /* 979 * Return the count of just the current iov_iter segment. 980 */ 981 size_t iov_iter_single_seg_count(const struct iov_iter *i) 982 { 983 if (i->nr_segs > 1) { 984 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 985 return min(i->count, i->iov->iov_len - i->iov_offset); 986 if (iov_iter_is_bvec(i)) 987 return min(i->count, i->bvec->bv_len - i->iov_offset); 988 } 989 return i->count; 990 } 991 EXPORT_SYMBOL(iov_iter_single_seg_count); 992 993 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 994 const struct kvec *kvec, unsigned long nr_segs, 995 size_t count) 996 { 997 WARN_ON(direction & ~(READ | WRITE)); 998 *i = (struct iov_iter){ 999 .iter_type = ITER_KVEC, 1000 .data_source = direction, 1001 .kvec = kvec, 1002 .nr_segs = nr_segs, 1003 .iov_offset = 0, 1004 .count = count 1005 }; 1006 } 1007 EXPORT_SYMBOL(iov_iter_kvec); 1008 1009 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1010 const struct bio_vec *bvec, unsigned long nr_segs, 1011 size_t count) 1012 { 1013 WARN_ON(direction & ~(READ | WRITE)); 1014 *i = (struct iov_iter){ 1015 .iter_type = ITER_BVEC, 1016 .data_source = direction, 1017 .bvec = bvec, 1018 .nr_segs = nr_segs, 1019 .iov_offset = 0, 1020 .count = count 1021 }; 1022 } 1023 EXPORT_SYMBOL(iov_iter_bvec); 1024 1025 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1026 struct pipe_inode_info *pipe, 1027 size_t count) 1028 { 1029 BUG_ON(direction != READ); 1030 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1031 *i = (struct iov_iter){ 1032 .iter_type = ITER_PIPE, 1033 .data_source = false, 1034 .pipe = pipe, 1035 .head = pipe->head, 1036 .start_head = pipe->head, 1037 .last_offset = 0, 1038 .count = count 1039 }; 1040 } 1041 EXPORT_SYMBOL(iov_iter_pipe); 1042 1043 /** 1044 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1045 * @i: The iterator to initialise. 1046 * @direction: The direction of the transfer. 1047 * @xarray: The xarray to access. 1048 * @start: The start file position. 1049 * @count: The size of the I/O buffer in bytes. 1050 * 1051 * Set up an I/O iterator to either draw data out of the pages attached to an 1052 * inode or to inject data into those pages. The pages *must* be prevented 1053 * from evaporation, either by taking a ref on them or locking them by the 1054 * caller. 1055 */ 1056 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1057 struct xarray *xarray, loff_t start, size_t count) 1058 { 1059 BUG_ON(direction & ~1); 1060 *i = (struct iov_iter) { 1061 .iter_type = ITER_XARRAY, 1062 .data_source = direction, 1063 .xarray = xarray, 1064 .xarray_start = start, 1065 .count = count, 1066 .iov_offset = 0 1067 }; 1068 } 1069 EXPORT_SYMBOL(iov_iter_xarray); 1070 1071 /** 1072 * iov_iter_discard - Initialise an I/O iterator that discards data 1073 * @i: The iterator to initialise. 1074 * @direction: The direction of the transfer. 1075 * @count: The size of the I/O buffer in bytes. 1076 * 1077 * Set up an I/O iterator that just discards everything that's written to it. 1078 * It's only available as a READ iterator. 1079 */ 1080 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1081 { 1082 BUG_ON(direction != READ); 1083 *i = (struct iov_iter){ 1084 .iter_type = ITER_DISCARD, 1085 .data_source = false, 1086 .count = count, 1087 .iov_offset = 0 1088 }; 1089 } 1090 EXPORT_SYMBOL(iov_iter_discard); 1091 1092 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, 1093 unsigned len_mask) 1094 { 1095 size_t size = i->count; 1096 size_t skip = i->iov_offset; 1097 unsigned k; 1098 1099 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1100 size_t len = i->iov[k].iov_len - skip; 1101 1102 if (len > size) 1103 len = size; 1104 if (len & len_mask) 1105 return false; 1106 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) 1107 return false; 1108 1109 size -= len; 1110 if (!size) 1111 break; 1112 } 1113 return true; 1114 } 1115 1116 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, 1117 unsigned len_mask) 1118 { 1119 size_t size = i->count; 1120 unsigned skip = i->iov_offset; 1121 unsigned k; 1122 1123 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1124 size_t len = i->bvec[k].bv_len - skip; 1125 1126 if (len > size) 1127 len = size; 1128 if (len & len_mask) 1129 return false; 1130 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) 1131 return false; 1132 1133 size -= len; 1134 if (!size) 1135 break; 1136 } 1137 return true; 1138 } 1139 1140 /** 1141 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments 1142 * are aligned to the parameters. 1143 * 1144 * @i: &struct iov_iter to restore 1145 * @addr_mask: bit mask to check against the iov element's addresses 1146 * @len_mask: bit mask to check against the iov element's lengths 1147 * 1148 * Return: false if any addresses or lengths intersect with the provided masks 1149 */ 1150 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 1151 unsigned len_mask) 1152 { 1153 if (likely(iter_is_ubuf(i))) { 1154 if (i->count & len_mask) 1155 return false; 1156 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) 1157 return false; 1158 return true; 1159 } 1160 1161 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1162 return iov_iter_aligned_iovec(i, addr_mask, len_mask); 1163 1164 if (iov_iter_is_bvec(i)) 1165 return iov_iter_aligned_bvec(i, addr_mask, len_mask); 1166 1167 if (iov_iter_is_pipe(i)) { 1168 size_t size = i->count; 1169 1170 if (size & len_mask) 1171 return false; 1172 if (size && i->last_offset > 0) { 1173 if (i->last_offset & addr_mask) 1174 return false; 1175 } 1176 1177 return true; 1178 } 1179 1180 if (iov_iter_is_xarray(i)) { 1181 if (i->count & len_mask) 1182 return false; 1183 if ((i->xarray_start + i->iov_offset) & addr_mask) 1184 return false; 1185 } 1186 1187 return true; 1188 } 1189 EXPORT_SYMBOL_GPL(iov_iter_is_aligned); 1190 1191 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 1192 { 1193 unsigned long res = 0; 1194 size_t size = i->count; 1195 size_t skip = i->iov_offset; 1196 unsigned k; 1197 1198 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1199 size_t len = i->iov[k].iov_len - skip; 1200 if (len) { 1201 res |= (unsigned long)i->iov[k].iov_base + skip; 1202 if (len > size) 1203 len = size; 1204 res |= len; 1205 size -= len; 1206 if (!size) 1207 break; 1208 } 1209 } 1210 return res; 1211 } 1212 1213 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 1214 { 1215 unsigned res = 0; 1216 size_t size = i->count; 1217 unsigned skip = i->iov_offset; 1218 unsigned k; 1219 1220 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1221 size_t len = i->bvec[k].bv_len - skip; 1222 res |= (unsigned long)i->bvec[k].bv_offset + skip; 1223 if (len > size) 1224 len = size; 1225 res |= len; 1226 size -= len; 1227 if (!size) 1228 break; 1229 } 1230 return res; 1231 } 1232 1233 unsigned long iov_iter_alignment(const struct iov_iter *i) 1234 { 1235 if (likely(iter_is_ubuf(i))) { 1236 size_t size = i->count; 1237 if (size) 1238 return ((unsigned long)i->ubuf + i->iov_offset) | size; 1239 return 0; 1240 } 1241 1242 /* iovec and kvec have identical layouts */ 1243 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1244 return iov_iter_alignment_iovec(i); 1245 1246 if (iov_iter_is_bvec(i)) 1247 return iov_iter_alignment_bvec(i); 1248 1249 if (iov_iter_is_pipe(i)) { 1250 size_t size = i->count; 1251 1252 if (size && i->last_offset > 0) 1253 return size | i->last_offset; 1254 return size; 1255 } 1256 1257 if (iov_iter_is_xarray(i)) 1258 return (i->xarray_start + i->iov_offset) | i->count; 1259 1260 return 0; 1261 } 1262 EXPORT_SYMBOL(iov_iter_alignment); 1263 1264 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1265 { 1266 unsigned long res = 0; 1267 unsigned long v = 0; 1268 size_t size = i->count; 1269 unsigned k; 1270 1271 if (iter_is_ubuf(i)) 1272 return 0; 1273 1274 if (WARN_ON(!iter_is_iovec(i))) 1275 return ~0U; 1276 1277 for (k = 0; k < i->nr_segs; k++) { 1278 if (i->iov[k].iov_len) { 1279 unsigned long base = (unsigned long)i->iov[k].iov_base; 1280 if (v) // if not the first one 1281 res |= base | v; // this start | previous end 1282 v = base + i->iov[k].iov_len; 1283 if (size <= i->iov[k].iov_len) 1284 break; 1285 size -= i->iov[k].iov_len; 1286 } 1287 } 1288 return res; 1289 } 1290 EXPORT_SYMBOL(iov_iter_gap_alignment); 1291 1292 static int want_pages_array(struct page ***res, size_t size, 1293 size_t start, unsigned int maxpages) 1294 { 1295 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE); 1296 1297 if (count > maxpages) 1298 count = maxpages; 1299 WARN_ON(!count); // caller should've prevented that 1300 if (!*res) { 1301 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); 1302 if (!*res) 1303 return 0; 1304 } 1305 return count; 1306 } 1307 1308 static ssize_t pipe_get_pages(struct iov_iter *i, 1309 struct page ***pages, size_t maxsize, unsigned maxpages, 1310 size_t *start) 1311 { 1312 unsigned int npages, count, off, chunk; 1313 struct page **p; 1314 size_t left; 1315 1316 if (!sanity(i)) 1317 return -EFAULT; 1318 1319 *start = off = pipe_npages(i, &npages); 1320 if (!npages) 1321 return -EFAULT; 1322 count = want_pages_array(pages, maxsize, off, min(npages, maxpages)); 1323 if (!count) 1324 return -ENOMEM; 1325 p = *pages; 1326 for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) { 1327 struct page *page = append_pipe(i, left, &off); 1328 if (!page) 1329 break; 1330 chunk = min_t(size_t, left, PAGE_SIZE - off); 1331 get_page(*p++ = page); 1332 } 1333 if (!npages) 1334 return -EFAULT; 1335 return maxsize - left; 1336 } 1337 1338 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1339 pgoff_t index, unsigned int nr_pages) 1340 { 1341 XA_STATE(xas, xa, index); 1342 struct page *page; 1343 unsigned int ret = 0; 1344 1345 rcu_read_lock(); 1346 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1347 if (xas_retry(&xas, page)) 1348 continue; 1349 1350 /* Has the page moved or been split? */ 1351 if (unlikely(page != xas_reload(&xas))) { 1352 xas_reset(&xas); 1353 continue; 1354 } 1355 1356 pages[ret] = find_subpage(page, xas.xa_index); 1357 get_page(pages[ret]); 1358 if (++ret == nr_pages) 1359 break; 1360 } 1361 rcu_read_unlock(); 1362 return ret; 1363 } 1364 1365 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1366 struct page ***pages, size_t maxsize, 1367 unsigned maxpages, size_t *_start_offset) 1368 { 1369 unsigned nr, offset, count; 1370 pgoff_t index; 1371 loff_t pos; 1372 1373 pos = i->xarray_start + i->iov_offset; 1374 index = pos >> PAGE_SHIFT; 1375 offset = pos & ~PAGE_MASK; 1376 *_start_offset = offset; 1377 1378 count = want_pages_array(pages, maxsize, offset, maxpages); 1379 if (!count) 1380 return -ENOMEM; 1381 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); 1382 if (nr == 0) 1383 return 0; 1384 1385 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1386 i->iov_offset += maxsize; 1387 i->count -= maxsize; 1388 return maxsize; 1389 } 1390 1391 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */ 1392 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) 1393 { 1394 size_t skip; 1395 long k; 1396 1397 if (iter_is_ubuf(i)) 1398 return (unsigned long)i->ubuf + i->iov_offset; 1399 1400 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1401 size_t len = i->iov[k].iov_len - skip; 1402 1403 if (unlikely(!len)) 1404 continue; 1405 if (*size > len) 1406 *size = len; 1407 return (unsigned long)i->iov[k].iov_base + skip; 1408 } 1409 BUG(); // if it had been empty, we wouldn't get called 1410 } 1411 1412 /* must be done on non-empty ITER_BVEC one */ 1413 static struct page *first_bvec_segment(const struct iov_iter *i, 1414 size_t *size, size_t *start) 1415 { 1416 struct page *page; 1417 size_t skip = i->iov_offset, len; 1418 1419 len = i->bvec->bv_len - skip; 1420 if (*size > len) 1421 *size = len; 1422 skip += i->bvec->bv_offset; 1423 page = i->bvec->bv_page + skip / PAGE_SIZE; 1424 *start = skip % PAGE_SIZE; 1425 return page; 1426 } 1427 1428 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, 1429 struct page ***pages, size_t maxsize, 1430 unsigned int maxpages, size_t *start) 1431 { 1432 unsigned int n; 1433 1434 if (maxsize > i->count) 1435 maxsize = i->count; 1436 if (!maxsize) 1437 return 0; 1438 if (maxsize > MAX_RW_COUNT) 1439 maxsize = MAX_RW_COUNT; 1440 1441 if (likely(user_backed_iter(i))) { 1442 unsigned int gup_flags = 0; 1443 unsigned long addr; 1444 int res; 1445 1446 if (iov_iter_rw(i) != WRITE) 1447 gup_flags |= FOLL_WRITE; 1448 if (i->nofault) 1449 gup_flags |= FOLL_NOFAULT; 1450 1451 addr = first_iovec_segment(i, &maxsize); 1452 *start = addr % PAGE_SIZE; 1453 addr &= PAGE_MASK; 1454 n = want_pages_array(pages, maxsize, *start, maxpages); 1455 if (!n) 1456 return -ENOMEM; 1457 res = get_user_pages_fast(addr, n, gup_flags, *pages); 1458 if (unlikely(res <= 0)) 1459 return res; 1460 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1461 iov_iter_advance(i, maxsize); 1462 return maxsize; 1463 } 1464 if (iov_iter_is_bvec(i)) { 1465 struct page **p; 1466 struct page *page; 1467 1468 page = first_bvec_segment(i, &maxsize, start); 1469 n = want_pages_array(pages, maxsize, *start, maxpages); 1470 if (!n) 1471 return -ENOMEM; 1472 p = *pages; 1473 for (int k = 0; k < n; k++) 1474 get_page(p[k] = page + k); 1475 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1476 i->count -= maxsize; 1477 i->iov_offset += maxsize; 1478 if (i->iov_offset == i->bvec->bv_len) { 1479 i->iov_offset = 0; 1480 i->bvec++; 1481 i->nr_segs--; 1482 } 1483 return maxsize; 1484 } 1485 if (iov_iter_is_pipe(i)) 1486 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1487 if (iov_iter_is_xarray(i)) 1488 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1489 return -EFAULT; 1490 } 1491 1492 ssize_t iov_iter_get_pages2(struct iov_iter *i, 1493 struct page **pages, size_t maxsize, unsigned maxpages, 1494 size_t *start) 1495 { 1496 if (!maxpages) 1497 return 0; 1498 BUG_ON(!pages); 1499 1500 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); 1501 } 1502 EXPORT_SYMBOL(iov_iter_get_pages2); 1503 1504 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, 1505 struct page ***pages, size_t maxsize, 1506 size_t *start) 1507 { 1508 ssize_t len; 1509 1510 *pages = NULL; 1511 1512 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); 1513 if (len <= 0) { 1514 kvfree(*pages); 1515 *pages = NULL; 1516 } 1517 return len; 1518 } 1519 EXPORT_SYMBOL(iov_iter_get_pages_alloc2); 1520 1521 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1522 struct iov_iter *i) 1523 { 1524 __wsum sum, next; 1525 sum = *csum; 1526 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1527 WARN_ON(1); 1528 return 0; 1529 } 1530 iterate_and_advance(i, bytes, base, len, off, ({ 1531 next = csum_and_copy_from_user(base, addr + off, len); 1532 sum = csum_block_add(sum, next, off); 1533 next ? 0 : len; 1534 }), ({ 1535 sum = csum_and_memcpy(addr + off, base, len, sum, off); 1536 }) 1537 ) 1538 *csum = sum; 1539 return bytes; 1540 } 1541 EXPORT_SYMBOL(csum_and_copy_from_iter); 1542 1543 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1544 struct iov_iter *i) 1545 { 1546 struct csum_state *csstate = _csstate; 1547 __wsum sum, next; 1548 1549 if (unlikely(iov_iter_is_discard(i))) { 1550 WARN_ON(1); /* for now */ 1551 return 0; 1552 } 1553 1554 sum = csum_shift(csstate->csum, csstate->off); 1555 if (unlikely(iov_iter_is_pipe(i))) 1556 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); 1557 else iterate_and_advance(i, bytes, base, len, off, ({ 1558 next = csum_and_copy_to_user(addr + off, base, len); 1559 sum = csum_block_add(sum, next, off); 1560 next ? 0 : len; 1561 }), ({ 1562 sum = csum_and_memcpy(base, addr + off, len, sum, off); 1563 }) 1564 ) 1565 csstate->csum = csum_shift(sum, csstate->off); 1566 csstate->off += bytes; 1567 return bytes; 1568 } 1569 EXPORT_SYMBOL(csum_and_copy_to_iter); 1570 1571 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1572 struct iov_iter *i) 1573 { 1574 #ifdef CONFIG_CRYPTO_HASH 1575 struct ahash_request *hash = hashp; 1576 struct scatterlist sg; 1577 size_t copied; 1578 1579 copied = copy_to_iter(addr, bytes, i); 1580 sg_init_one(&sg, addr, copied); 1581 ahash_request_set_crypt(hash, &sg, NULL, copied); 1582 crypto_ahash_update(hash); 1583 return copied; 1584 #else 1585 return 0; 1586 #endif 1587 } 1588 EXPORT_SYMBOL(hash_and_copy_to_iter); 1589 1590 static int iov_npages(const struct iov_iter *i, int maxpages) 1591 { 1592 size_t skip = i->iov_offset, size = i->count; 1593 const struct iovec *p; 1594 int npages = 0; 1595 1596 for (p = i->iov; size; skip = 0, p++) { 1597 unsigned offs = offset_in_page(p->iov_base + skip); 1598 size_t len = min(p->iov_len - skip, size); 1599 1600 if (len) { 1601 size -= len; 1602 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1603 if (unlikely(npages > maxpages)) 1604 return maxpages; 1605 } 1606 } 1607 return npages; 1608 } 1609 1610 static int bvec_npages(const struct iov_iter *i, int maxpages) 1611 { 1612 size_t skip = i->iov_offset, size = i->count; 1613 const struct bio_vec *p; 1614 int npages = 0; 1615 1616 for (p = i->bvec; size; skip = 0, p++) { 1617 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1618 size_t len = min(p->bv_len - skip, size); 1619 1620 size -= len; 1621 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1622 if (unlikely(npages > maxpages)) 1623 return maxpages; 1624 } 1625 return npages; 1626 } 1627 1628 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1629 { 1630 if (unlikely(!i->count)) 1631 return 0; 1632 if (likely(iter_is_ubuf(i))) { 1633 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); 1634 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); 1635 return min(npages, maxpages); 1636 } 1637 /* iovec and kvec have identical layouts */ 1638 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1639 return iov_npages(i, maxpages); 1640 if (iov_iter_is_bvec(i)) 1641 return bvec_npages(i, maxpages); 1642 if (iov_iter_is_pipe(i)) { 1643 int npages; 1644 1645 if (!sanity(i)) 1646 return 0; 1647 1648 pipe_npages(i, &npages); 1649 return min(npages, maxpages); 1650 } 1651 if (iov_iter_is_xarray(i)) { 1652 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1653 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1654 return min(npages, maxpages); 1655 } 1656 return 0; 1657 } 1658 EXPORT_SYMBOL(iov_iter_npages); 1659 1660 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1661 { 1662 *new = *old; 1663 if (unlikely(iov_iter_is_pipe(new))) { 1664 WARN_ON(1); 1665 return NULL; 1666 } 1667 if (iov_iter_is_bvec(new)) 1668 return new->bvec = kmemdup(new->bvec, 1669 new->nr_segs * sizeof(struct bio_vec), 1670 flags); 1671 else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) 1672 /* iovec and kvec have identical layout */ 1673 return new->iov = kmemdup(new->iov, 1674 new->nr_segs * sizeof(struct iovec), 1675 flags); 1676 return NULL; 1677 } 1678 EXPORT_SYMBOL(dup_iter); 1679 1680 static int copy_compat_iovec_from_user(struct iovec *iov, 1681 const struct iovec __user *uvec, unsigned long nr_segs) 1682 { 1683 const struct compat_iovec __user *uiov = 1684 (const struct compat_iovec __user *)uvec; 1685 int ret = -EFAULT, i; 1686 1687 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1688 return -EFAULT; 1689 1690 for (i = 0; i < nr_segs; i++) { 1691 compat_uptr_t buf; 1692 compat_ssize_t len; 1693 1694 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1695 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1696 1697 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1698 if (len < 0) { 1699 ret = -EINVAL; 1700 goto uaccess_end; 1701 } 1702 iov[i].iov_base = compat_ptr(buf); 1703 iov[i].iov_len = len; 1704 } 1705 1706 ret = 0; 1707 uaccess_end: 1708 user_access_end(); 1709 return ret; 1710 } 1711 1712 static int copy_iovec_from_user(struct iovec *iov, 1713 const struct iovec __user *uvec, unsigned long nr_segs) 1714 { 1715 unsigned long seg; 1716 1717 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1718 return -EFAULT; 1719 for (seg = 0; seg < nr_segs; seg++) { 1720 if ((ssize_t)iov[seg].iov_len < 0) 1721 return -EINVAL; 1722 } 1723 1724 return 0; 1725 } 1726 1727 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1728 unsigned long nr_segs, unsigned long fast_segs, 1729 struct iovec *fast_iov, bool compat) 1730 { 1731 struct iovec *iov = fast_iov; 1732 int ret; 1733 1734 /* 1735 * SuS says "The readv() function *may* fail if the iovcnt argument was 1736 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1737 * traditionally returned zero for zero segments, so... 1738 */ 1739 if (nr_segs == 0) 1740 return iov; 1741 if (nr_segs > UIO_MAXIOV) 1742 return ERR_PTR(-EINVAL); 1743 if (nr_segs > fast_segs) { 1744 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1745 if (!iov) 1746 return ERR_PTR(-ENOMEM); 1747 } 1748 1749 if (compat) 1750 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1751 else 1752 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1753 if (ret) { 1754 if (iov != fast_iov) 1755 kfree(iov); 1756 return ERR_PTR(ret); 1757 } 1758 1759 return iov; 1760 } 1761 1762 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1763 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1764 struct iov_iter *i, bool compat) 1765 { 1766 ssize_t total_len = 0; 1767 unsigned long seg; 1768 struct iovec *iov; 1769 1770 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1771 if (IS_ERR(iov)) { 1772 *iovp = NULL; 1773 return PTR_ERR(iov); 1774 } 1775 1776 /* 1777 * According to the Single Unix Specification we should return EINVAL if 1778 * an element length is < 0 when cast to ssize_t or if the total length 1779 * would overflow the ssize_t return value of the system call. 1780 * 1781 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1782 * overflow case. 1783 */ 1784 for (seg = 0; seg < nr_segs; seg++) { 1785 ssize_t len = (ssize_t)iov[seg].iov_len; 1786 1787 if (!access_ok(iov[seg].iov_base, len)) { 1788 if (iov != *iovp) 1789 kfree(iov); 1790 *iovp = NULL; 1791 return -EFAULT; 1792 } 1793 1794 if (len > MAX_RW_COUNT - total_len) { 1795 len = MAX_RW_COUNT - total_len; 1796 iov[seg].iov_len = len; 1797 } 1798 total_len += len; 1799 } 1800 1801 iov_iter_init(i, type, iov, nr_segs, total_len); 1802 if (iov == *iovp) 1803 *iovp = NULL; 1804 else 1805 *iovp = iov; 1806 return total_len; 1807 } 1808 1809 /** 1810 * import_iovec() - Copy an array of &struct iovec from userspace 1811 * into the kernel, check that it is valid, and initialize a new 1812 * &struct iov_iter iterator to access it. 1813 * 1814 * @type: One of %READ or %WRITE. 1815 * @uvec: Pointer to the userspace array. 1816 * @nr_segs: Number of elements in userspace array. 1817 * @fast_segs: Number of elements in @iov. 1818 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1819 * on-stack) kernel array. 1820 * @i: Pointer to iterator that will be initialized on success. 1821 * 1822 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1823 * then this function places %NULL in *@iov on return. Otherwise, a new 1824 * array will be allocated and the result placed in *@iov. This means that 1825 * the caller may call kfree() on *@iov regardless of whether the small 1826 * on-stack array was used or not (and regardless of whether this function 1827 * returns an error or not). 1828 * 1829 * Return: Negative error code on error, bytes imported on success 1830 */ 1831 ssize_t import_iovec(int type, const struct iovec __user *uvec, 1832 unsigned nr_segs, unsigned fast_segs, 1833 struct iovec **iovp, struct iov_iter *i) 1834 { 1835 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1836 in_compat_syscall()); 1837 } 1838 EXPORT_SYMBOL(import_iovec); 1839 1840 int import_single_range(int rw, void __user *buf, size_t len, 1841 struct iovec *iov, struct iov_iter *i) 1842 { 1843 if (len > MAX_RW_COUNT) 1844 len = MAX_RW_COUNT; 1845 if (unlikely(!access_ok(buf, len))) 1846 return -EFAULT; 1847 1848 iov->iov_base = buf; 1849 iov->iov_len = len; 1850 iov_iter_init(i, rw, iov, 1, len); 1851 return 0; 1852 } 1853 EXPORT_SYMBOL(import_single_range); 1854 1855 /** 1856 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 1857 * iov_iter_save_state() was called. 1858 * 1859 * @i: &struct iov_iter to restore 1860 * @state: state to restore from 1861 * 1862 * Used after iov_iter_save_state() to bring restore @i, if operations may 1863 * have advanced it. 1864 * 1865 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 1866 */ 1867 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 1868 { 1869 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 1870 !iov_iter_is_kvec(i) && !iter_is_ubuf(i)) 1871 return; 1872 i->iov_offset = state->iov_offset; 1873 i->count = state->count; 1874 if (iter_is_ubuf(i)) 1875 return; 1876 /* 1877 * For the *vec iters, nr_segs + iov is constant - if we increment 1878 * the vec, then we also decrement the nr_segs count. Hence we don't 1879 * need to track both of these, just one is enough and we can deduct 1880 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 1881 * size, so we can just increment the iov pointer as they are unionzed. 1882 * ITER_BVEC _may_ be the same size on some archs, but on others it is 1883 * not. Be safe and handle it separately. 1884 */ 1885 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 1886 if (iov_iter_is_bvec(i)) 1887 i->bvec -= state->nr_segs - i->nr_segs; 1888 else 1889 i->iov -= state->nr_segs - i->nr_segs; 1890 i->nr_segs = state->nr_segs; 1891 } 1892