1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 /* covers ubuf and kbuf alike */ 20 #define iterate_buf(i, n, base, len, off, __p, STEP) { \ 21 size_t __maybe_unused off = 0; \ 22 len = n; \ 23 base = __p + i->iov_offset; \ 24 len -= (STEP); \ 25 i->iov_offset += len; \ 26 n = len; \ 27 } 28 29 /* covers iovec and kvec alike */ 30 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ 31 size_t off = 0; \ 32 size_t skip = i->iov_offset; \ 33 do { \ 34 len = min(n, __p->iov_len - skip); \ 35 if (likely(len)) { \ 36 base = __p->iov_base + skip; \ 37 len -= (STEP); \ 38 off += len; \ 39 skip += len; \ 40 n -= len; \ 41 if (skip < __p->iov_len) \ 42 break; \ 43 } \ 44 __p++; \ 45 skip = 0; \ 46 } while (n); \ 47 i->iov_offset = skip; \ 48 n = off; \ 49 } 50 51 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ 52 size_t off = 0; \ 53 unsigned skip = i->iov_offset; \ 54 while (n) { \ 55 unsigned offset = p->bv_offset + skip; \ 56 unsigned left; \ 57 void *kaddr = kmap_local_page(p->bv_page + \ 58 offset / PAGE_SIZE); \ 59 base = kaddr + offset % PAGE_SIZE; \ 60 len = min(min(n, (size_t)(p->bv_len - skip)), \ 61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 62 left = (STEP); \ 63 kunmap_local(kaddr); \ 64 len -= left; \ 65 off += len; \ 66 skip += len; \ 67 if (skip == p->bv_len) { \ 68 skip = 0; \ 69 p++; \ 70 } \ 71 n -= len; \ 72 if (left) \ 73 break; \ 74 } \ 75 i->iov_offset = skip; \ 76 n = off; \ 77 } 78 79 #define iterate_xarray(i, n, base, len, __off, STEP) { \ 80 __label__ __out; \ 81 size_t __off = 0; \ 82 struct folio *folio; \ 83 loff_t start = i->xarray_start + i->iov_offset; \ 84 pgoff_t index = start / PAGE_SIZE; \ 85 XA_STATE(xas, i->xarray, index); \ 86 \ 87 len = PAGE_SIZE - offset_in_page(start); \ 88 rcu_read_lock(); \ 89 xas_for_each(&xas, folio, ULONG_MAX) { \ 90 unsigned left; \ 91 size_t offset; \ 92 if (xas_retry(&xas, folio)) \ 93 continue; \ 94 if (WARN_ON(xa_is_value(folio))) \ 95 break; \ 96 if (WARN_ON(folio_test_hugetlb(folio))) \ 97 break; \ 98 offset = offset_in_folio(folio, start + __off); \ 99 while (offset < folio_size(folio)) { \ 100 base = kmap_local_folio(folio, offset); \ 101 len = min(n, len); \ 102 left = (STEP); \ 103 kunmap_local(base); \ 104 len -= left; \ 105 __off += len; \ 106 n -= len; \ 107 if (left || n == 0) \ 108 goto __out; \ 109 offset += len; \ 110 len = PAGE_SIZE; \ 111 } \ 112 } \ 113 __out: \ 114 rcu_read_unlock(); \ 115 i->iov_offset += __off; \ 116 n = __off; \ 117 } 118 119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ 120 if (unlikely(i->count < n)) \ 121 n = i->count; \ 122 if (likely(n)) { \ 123 if (likely(iter_is_ubuf(i))) { \ 124 void __user *base; \ 125 size_t len; \ 126 iterate_buf(i, n, base, len, off, \ 127 i->ubuf, (I)) \ 128 } else if (likely(iter_is_iovec(i))) { \ 129 const struct iovec *iov = i->iov; \ 130 void __user *base; \ 131 size_t len; \ 132 iterate_iovec(i, n, base, len, off, \ 133 iov, (I)) \ 134 i->nr_segs -= iov - i->iov; \ 135 i->iov = iov; \ 136 } else if (iov_iter_is_bvec(i)) { \ 137 const struct bio_vec *bvec = i->bvec; \ 138 void *base; \ 139 size_t len; \ 140 iterate_bvec(i, n, base, len, off, \ 141 bvec, (K)) \ 142 i->nr_segs -= bvec - i->bvec; \ 143 i->bvec = bvec; \ 144 } else if (iov_iter_is_kvec(i)) { \ 145 const struct kvec *kvec = i->kvec; \ 146 void *base; \ 147 size_t len; \ 148 iterate_iovec(i, n, base, len, off, \ 149 kvec, (K)) \ 150 i->nr_segs -= kvec - i->kvec; \ 151 i->kvec = kvec; \ 152 } else if (iov_iter_is_xarray(i)) { \ 153 void *base; \ 154 size_t len; \ 155 iterate_xarray(i, n, base, len, off, \ 156 (K)) \ 157 } \ 158 i->count -= n; \ 159 } \ 160 } 161 #define iterate_and_advance(i, n, base, len, off, I, K) \ 162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) 163 164 static int copyout(void __user *to, const void *from, size_t n) 165 { 166 if (should_fail_usercopy()) 167 return n; 168 if (access_ok(to, n)) { 169 instrument_copy_to_user(to, from, n); 170 n = raw_copy_to_user(to, from, n); 171 } 172 return n; 173 } 174 175 static int copyin(void *to, const void __user *from, size_t n) 176 { 177 if (should_fail_usercopy()) 178 return n; 179 if (access_ok(from, n)) { 180 instrument_copy_from_user(to, from, n); 181 n = raw_copy_from_user(to, from, n); 182 } 183 return n; 184 } 185 186 static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe, 187 unsigned int slot) 188 { 189 return &pipe->bufs[slot & (pipe->ring_size - 1)]; 190 } 191 192 #ifdef PIPE_PARANOIA 193 static bool sanity(const struct iov_iter *i) 194 { 195 struct pipe_inode_info *pipe = i->pipe; 196 unsigned int p_head = pipe->head; 197 unsigned int p_tail = pipe->tail; 198 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 199 unsigned int i_head = i->head; 200 unsigned int idx; 201 202 if (i->last_offset) { 203 struct pipe_buffer *p; 204 if (unlikely(p_occupancy == 0)) 205 goto Bad; // pipe must be non-empty 206 if (unlikely(i_head != p_head - 1)) 207 goto Bad; // must be at the last buffer... 208 209 p = pipe_buf(pipe, i_head); 210 if (unlikely(p->offset + p->len != abs(i->last_offset))) 211 goto Bad; // ... at the end of segment 212 } else { 213 if (i_head != p_head) 214 goto Bad; // must be right after the last buffer 215 } 216 return true; 217 Bad: 218 printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset); 219 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 220 p_head, p_tail, pipe->ring_size); 221 for (idx = 0; idx < pipe->ring_size; idx++) 222 printk(KERN_ERR "[%p %p %d %d]\n", 223 pipe->bufs[idx].ops, 224 pipe->bufs[idx].page, 225 pipe->bufs[idx].offset, 226 pipe->bufs[idx].len); 227 WARN_ON(1); 228 return false; 229 } 230 #else 231 #define sanity(i) true 232 #endif 233 234 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) 235 { 236 struct page *page = alloc_page(GFP_USER); 237 if (page) { 238 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 239 *buf = (struct pipe_buffer) { 240 .ops = &default_pipe_buf_ops, 241 .page = page, 242 .offset = 0, 243 .len = size 244 }; 245 } 246 return page; 247 } 248 249 static void push_page(struct pipe_inode_info *pipe, struct page *page, 250 unsigned int offset, unsigned int size) 251 { 252 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 253 *buf = (struct pipe_buffer) { 254 .ops = &page_cache_pipe_buf_ops, 255 .page = page, 256 .offset = offset, 257 .len = size 258 }; 259 get_page(page); 260 } 261 262 static inline int last_offset(const struct pipe_buffer *buf) 263 { 264 if (buf->ops == &default_pipe_buf_ops) 265 return buf->len; // buf->offset is 0 for those 266 else 267 return -(buf->offset + buf->len); 268 } 269 270 static struct page *append_pipe(struct iov_iter *i, size_t size, 271 unsigned int *off) 272 { 273 struct pipe_inode_info *pipe = i->pipe; 274 int offset = i->last_offset; 275 struct pipe_buffer *buf; 276 struct page *page; 277 278 if (offset > 0 && offset < PAGE_SIZE) { 279 // some space in the last buffer; add to it 280 buf = pipe_buf(pipe, pipe->head - 1); 281 size = min_t(size_t, size, PAGE_SIZE - offset); 282 buf->len += size; 283 i->last_offset += size; 284 i->count -= size; 285 *off = offset; 286 return buf->page; 287 } 288 // OK, we need a new buffer 289 *off = 0; 290 size = min_t(size_t, size, PAGE_SIZE); 291 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 292 return NULL; 293 page = push_anon(pipe, size); 294 if (!page) 295 return NULL; 296 i->head = pipe->head - 1; 297 i->last_offset = size; 298 i->count -= size; 299 return page; 300 } 301 302 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 303 struct iov_iter *i) 304 { 305 struct pipe_inode_info *pipe = i->pipe; 306 unsigned int head = pipe->head; 307 308 if (unlikely(bytes > i->count)) 309 bytes = i->count; 310 311 if (unlikely(!bytes)) 312 return 0; 313 314 if (!sanity(i)) 315 return 0; 316 317 if (offset && i->last_offset == -offset) { // could we merge it? 318 struct pipe_buffer *buf = pipe_buf(pipe, head - 1); 319 if (buf->page == page) { 320 buf->len += bytes; 321 i->last_offset -= bytes; 322 i->count -= bytes; 323 return bytes; 324 } 325 } 326 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 327 return 0; 328 329 push_page(pipe, page, offset, bytes); 330 i->last_offset = -(offset + bytes); 331 i->head = head; 332 i->count -= bytes; 333 return bytes; 334 } 335 336 /* 337 * fault_in_iov_iter_readable - fault in iov iterator for reading 338 * @i: iterator 339 * @size: maximum length 340 * 341 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 342 * @size. For each iovec, fault in each page that constitutes the iovec. 343 * 344 * Returns the number of bytes not faulted in (like copy_to_user() and 345 * copy_from_user()). 346 * 347 * Always returns 0 for non-userspace iterators. 348 */ 349 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 350 { 351 if (iter_is_ubuf(i)) { 352 size_t n = min(size, iov_iter_count(i)); 353 n -= fault_in_readable(i->ubuf + i->iov_offset, n); 354 return size - n; 355 } else if (iter_is_iovec(i)) { 356 size_t count = min(size, iov_iter_count(i)); 357 const struct iovec *p; 358 size_t skip; 359 360 size -= count; 361 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 362 size_t len = min(count, p->iov_len - skip); 363 size_t ret; 364 365 if (unlikely(!len)) 366 continue; 367 ret = fault_in_readable(p->iov_base + skip, len); 368 count -= len - ret; 369 if (ret) 370 break; 371 } 372 return count + size; 373 } 374 return 0; 375 } 376 EXPORT_SYMBOL(fault_in_iov_iter_readable); 377 378 /* 379 * fault_in_iov_iter_writeable - fault in iov iterator for writing 380 * @i: iterator 381 * @size: maximum length 382 * 383 * Faults in the iterator using get_user_pages(), i.e., without triggering 384 * hardware page faults. This is primarily useful when we already know that 385 * some or all of the pages in @i aren't in memory. 386 * 387 * Returns the number of bytes not faulted in, like copy_to_user() and 388 * copy_from_user(). 389 * 390 * Always returns 0 for non-user-space iterators. 391 */ 392 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 393 { 394 if (iter_is_ubuf(i)) { 395 size_t n = min(size, iov_iter_count(i)); 396 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); 397 return size - n; 398 } else if (iter_is_iovec(i)) { 399 size_t count = min(size, iov_iter_count(i)); 400 const struct iovec *p; 401 size_t skip; 402 403 size -= count; 404 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 405 size_t len = min(count, p->iov_len - skip); 406 size_t ret; 407 408 if (unlikely(!len)) 409 continue; 410 ret = fault_in_safe_writeable(p->iov_base + skip, len); 411 count -= len - ret; 412 if (ret) 413 break; 414 } 415 return count + size; 416 } 417 return 0; 418 } 419 EXPORT_SYMBOL(fault_in_iov_iter_writeable); 420 421 void iov_iter_init(struct iov_iter *i, unsigned int direction, 422 const struct iovec *iov, unsigned long nr_segs, 423 size_t count) 424 { 425 WARN_ON(direction & ~(READ | WRITE)); 426 *i = (struct iov_iter) { 427 .iter_type = ITER_IOVEC, 428 .nofault = false, 429 .user_backed = true, 430 .data_source = direction, 431 .iov = iov, 432 .nr_segs = nr_segs, 433 .iov_offset = 0, 434 .count = count 435 }; 436 } 437 EXPORT_SYMBOL(iov_iter_init); 438 439 // returns the offset in partial buffer (if any) 440 static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages) 441 { 442 struct pipe_inode_info *pipe = i->pipe; 443 int used = pipe->head - pipe->tail; 444 int off = i->last_offset; 445 446 *npages = max((int)pipe->max_usage - used, 0); 447 448 if (off > 0 && off < PAGE_SIZE) { // anon and not full 449 (*npages)++; 450 return off; 451 } 452 return 0; 453 } 454 455 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 456 struct iov_iter *i) 457 { 458 unsigned int off, chunk; 459 460 if (unlikely(bytes > i->count)) 461 bytes = i->count; 462 if (unlikely(!bytes)) 463 return 0; 464 465 if (!sanity(i)) 466 return 0; 467 468 for (size_t n = bytes; n; n -= chunk) { 469 struct page *page = append_pipe(i, n, &off); 470 chunk = min_t(size_t, n, PAGE_SIZE - off); 471 if (!page) 472 return bytes - n; 473 memcpy_to_page(page, off, addr, chunk); 474 addr += chunk; 475 } 476 return bytes; 477 } 478 479 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 480 __wsum sum, size_t off) 481 { 482 __wsum next = csum_partial_copy_nocheck(from, to, len); 483 return csum_block_add(sum, next, off); 484 } 485 486 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 487 struct iov_iter *i, __wsum *sump) 488 { 489 __wsum sum = *sump; 490 size_t off = 0; 491 unsigned int chunk, r; 492 493 if (unlikely(bytes > i->count)) 494 bytes = i->count; 495 if (unlikely(!bytes)) 496 return 0; 497 498 if (!sanity(i)) 499 return 0; 500 501 while (bytes) { 502 struct page *page = append_pipe(i, bytes, &r); 503 char *p; 504 505 if (!page) 506 break; 507 chunk = min_t(size_t, bytes, PAGE_SIZE - r); 508 p = kmap_local_page(page); 509 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); 510 kunmap_local(p); 511 off += chunk; 512 bytes -= chunk; 513 } 514 *sump = sum; 515 return off; 516 } 517 518 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 519 { 520 if (unlikely(iov_iter_is_pipe(i))) 521 return copy_pipe_to_iter(addr, bytes, i); 522 if (user_backed_iter(i)) 523 might_fault(); 524 iterate_and_advance(i, bytes, base, len, off, 525 copyout(base, addr + off, len), 526 memcpy(base, addr + off, len) 527 ) 528 529 return bytes; 530 } 531 EXPORT_SYMBOL(_copy_to_iter); 532 533 #ifdef CONFIG_ARCH_HAS_COPY_MC 534 static int copyout_mc(void __user *to, const void *from, size_t n) 535 { 536 if (access_ok(to, n)) { 537 instrument_copy_to_user(to, from, n); 538 n = copy_mc_to_user((__force void *) to, from, n); 539 } 540 return n; 541 } 542 543 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 544 struct iov_iter *i) 545 { 546 size_t xfer = 0; 547 unsigned int off, chunk; 548 549 if (unlikely(bytes > i->count)) 550 bytes = i->count; 551 if (unlikely(!bytes)) 552 return 0; 553 554 if (!sanity(i)) 555 return 0; 556 557 while (bytes) { 558 struct page *page = append_pipe(i, bytes, &off); 559 unsigned long rem; 560 char *p; 561 562 if (!page) 563 break; 564 chunk = min_t(size_t, bytes, PAGE_SIZE - off); 565 p = kmap_local_page(page); 566 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); 567 chunk -= rem; 568 kunmap_local(p); 569 xfer += chunk; 570 bytes -= chunk; 571 if (rem) { 572 iov_iter_revert(i, rem); 573 break; 574 } 575 } 576 return xfer; 577 } 578 579 /** 580 * _copy_mc_to_iter - copy to iter with source memory error exception handling 581 * @addr: source kernel address 582 * @bytes: total transfer length 583 * @i: destination iterator 584 * 585 * The pmem driver deploys this for the dax operation 586 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 587 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 588 * successfully copied. 589 * 590 * The main differences between this and typical _copy_to_iter(). 591 * 592 * * Typical tail/residue handling after a fault retries the copy 593 * byte-by-byte until the fault happens again. Re-triggering machine 594 * checks is potentially fatal so the implementation uses source 595 * alignment and poison alignment assumptions to avoid re-triggering 596 * hardware exceptions. 597 * 598 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 599 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 600 * a short copy. 601 * 602 * Return: number of bytes copied (may be %0) 603 */ 604 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 605 { 606 if (unlikely(iov_iter_is_pipe(i))) 607 return copy_mc_pipe_to_iter(addr, bytes, i); 608 if (user_backed_iter(i)) 609 might_fault(); 610 __iterate_and_advance(i, bytes, base, len, off, 611 copyout_mc(base, addr + off, len), 612 copy_mc_to_kernel(base, addr + off, len) 613 ) 614 615 return bytes; 616 } 617 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 618 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 619 620 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 621 { 622 if (unlikely(iov_iter_is_pipe(i))) { 623 WARN_ON(1); 624 return 0; 625 } 626 if (user_backed_iter(i)) 627 might_fault(); 628 iterate_and_advance(i, bytes, base, len, off, 629 copyin(addr + off, base, len), 630 memcpy(addr + off, base, len) 631 ) 632 633 return bytes; 634 } 635 EXPORT_SYMBOL(_copy_from_iter); 636 637 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 638 { 639 if (unlikely(iov_iter_is_pipe(i))) { 640 WARN_ON(1); 641 return 0; 642 } 643 iterate_and_advance(i, bytes, base, len, off, 644 __copy_from_user_inatomic_nocache(addr + off, base, len), 645 memcpy(addr + off, base, len) 646 ) 647 648 return bytes; 649 } 650 EXPORT_SYMBOL(_copy_from_iter_nocache); 651 652 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 653 /** 654 * _copy_from_iter_flushcache - write destination through cpu cache 655 * @addr: destination kernel address 656 * @bytes: total transfer length 657 * @i: source iterator 658 * 659 * The pmem driver arranges for filesystem-dax to use this facility via 660 * dax_copy_from_iter() for ensuring that writes to persistent memory 661 * are flushed through the CPU cache. It is differentiated from 662 * _copy_from_iter_nocache() in that guarantees all data is flushed for 663 * all iterator types. The _copy_from_iter_nocache() only attempts to 664 * bypass the cache for the ITER_IOVEC case, and on some archs may use 665 * instructions that strand dirty-data in the cache. 666 * 667 * Return: number of bytes copied (may be %0) 668 */ 669 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 670 { 671 if (unlikely(iov_iter_is_pipe(i))) { 672 WARN_ON(1); 673 return 0; 674 } 675 iterate_and_advance(i, bytes, base, len, off, 676 __copy_from_user_flushcache(addr + off, base, len), 677 memcpy_flushcache(addr + off, base, len) 678 ) 679 680 return bytes; 681 } 682 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 683 #endif 684 685 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 686 { 687 struct page *head; 688 size_t v = n + offset; 689 690 /* 691 * The general case needs to access the page order in order 692 * to compute the page size. 693 * However, we mostly deal with order-0 pages and thus can 694 * avoid a possible cache line miss for requests that fit all 695 * page orders. 696 */ 697 if (n <= v && v <= PAGE_SIZE) 698 return true; 699 700 head = compound_head(page); 701 v += (page - head) << PAGE_SHIFT; 702 703 if (likely(n <= v && v <= (page_size(head)))) 704 return true; 705 WARN_ON(1); 706 return false; 707 } 708 709 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 710 struct iov_iter *i) 711 { 712 if (unlikely(iov_iter_is_pipe(i))) { 713 return copy_page_to_iter_pipe(page, offset, bytes, i); 714 } else { 715 void *kaddr = kmap_local_page(page); 716 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); 717 kunmap_local(kaddr); 718 return wanted; 719 } 720 } 721 722 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 723 struct iov_iter *i) 724 { 725 size_t res = 0; 726 if (unlikely(!page_copy_sane(page, offset, bytes))) 727 return 0; 728 page += offset / PAGE_SIZE; // first subpage 729 offset %= PAGE_SIZE; 730 while (1) { 731 size_t n = __copy_page_to_iter(page, offset, 732 min(bytes, (size_t)PAGE_SIZE - offset), i); 733 res += n; 734 bytes -= n; 735 if (!bytes || !n) 736 break; 737 offset += n; 738 if (offset == PAGE_SIZE) { 739 page++; 740 offset = 0; 741 } 742 } 743 return res; 744 } 745 EXPORT_SYMBOL(copy_page_to_iter); 746 747 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 748 struct iov_iter *i) 749 { 750 if (page_copy_sane(page, offset, bytes)) { 751 void *kaddr = kmap_local_page(page); 752 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); 753 kunmap_local(kaddr); 754 return wanted; 755 } 756 return 0; 757 } 758 EXPORT_SYMBOL(copy_page_from_iter); 759 760 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 761 { 762 unsigned int chunk, off; 763 764 if (unlikely(bytes > i->count)) 765 bytes = i->count; 766 if (unlikely(!bytes)) 767 return 0; 768 769 if (!sanity(i)) 770 return 0; 771 772 for (size_t n = bytes; n; n -= chunk) { 773 struct page *page = append_pipe(i, n, &off); 774 char *p; 775 776 if (!page) 777 return bytes - n; 778 chunk = min_t(size_t, n, PAGE_SIZE - off); 779 p = kmap_local_page(page); 780 memset(p + off, 0, chunk); 781 kunmap_local(p); 782 } 783 return bytes; 784 } 785 786 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 787 { 788 if (unlikely(iov_iter_is_pipe(i))) 789 return pipe_zero(bytes, i); 790 iterate_and_advance(i, bytes, base, len, count, 791 clear_user(base, len), 792 memset(base, 0, len) 793 ) 794 795 return bytes; 796 } 797 EXPORT_SYMBOL(iov_iter_zero); 798 799 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, 800 struct iov_iter *i) 801 { 802 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 803 if (unlikely(!page_copy_sane(page, offset, bytes))) { 804 kunmap_atomic(kaddr); 805 return 0; 806 } 807 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 808 kunmap_atomic(kaddr); 809 WARN_ON(1); 810 return 0; 811 } 812 iterate_and_advance(i, bytes, base, len, off, 813 copyin(p + off, base, len), 814 memcpy(p + off, base, len) 815 ) 816 kunmap_atomic(kaddr); 817 return bytes; 818 } 819 EXPORT_SYMBOL(copy_page_from_iter_atomic); 820 821 static void pipe_advance(struct iov_iter *i, size_t size) 822 { 823 struct pipe_inode_info *pipe = i->pipe; 824 int off = i->last_offset; 825 826 if (!off && !size) { 827 pipe_discard_from(pipe, i->start_head); // discard everything 828 return; 829 } 830 i->count -= size; 831 while (1) { 832 struct pipe_buffer *buf = pipe_buf(pipe, i->head); 833 if (off) /* make it relative to the beginning of buffer */ 834 size += abs(off) - buf->offset; 835 if (size <= buf->len) { 836 buf->len = size; 837 i->last_offset = last_offset(buf); 838 break; 839 } 840 size -= buf->len; 841 i->head++; 842 off = 0; 843 } 844 pipe_discard_from(pipe, i->head + 1); // discard everything past this one 845 } 846 847 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 848 { 849 const struct bio_vec *bvec, *end; 850 851 if (!i->count) 852 return; 853 i->count -= size; 854 855 size += i->iov_offset; 856 857 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { 858 if (likely(size < bvec->bv_len)) 859 break; 860 size -= bvec->bv_len; 861 } 862 i->iov_offset = size; 863 i->nr_segs -= bvec - i->bvec; 864 i->bvec = bvec; 865 } 866 867 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 868 { 869 const struct iovec *iov, *end; 870 871 if (!i->count) 872 return; 873 i->count -= size; 874 875 size += i->iov_offset; // from beginning of current segment 876 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { 877 if (likely(size < iov->iov_len)) 878 break; 879 size -= iov->iov_len; 880 } 881 i->iov_offset = size; 882 i->nr_segs -= iov - i->iov; 883 i->iov = iov; 884 } 885 886 void iov_iter_advance(struct iov_iter *i, size_t size) 887 { 888 if (unlikely(i->count < size)) 889 size = i->count; 890 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { 891 i->iov_offset += size; 892 i->count -= size; 893 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 894 /* iovec and kvec have identical layouts */ 895 iov_iter_iovec_advance(i, size); 896 } else if (iov_iter_is_bvec(i)) { 897 iov_iter_bvec_advance(i, size); 898 } else if (iov_iter_is_pipe(i)) { 899 pipe_advance(i, size); 900 } else if (iov_iter_is_discard(i)) { 901 i->count -= size; 902 } 903 } 904 EXPORT_SYMBOL(iov_iter_advance); 905 906 void iov_iter_revert(struct iov_iter *i, size_t unroll) 907 { 908 if (!unroll) 909 return; 910 if (WARN_ON(unroll > MAX_RW_COUNT)) 911 return; 912 i->count += unroll; 913 if (unlikely(iov_iter_is_pipe(i))) { 914 struct pipe_inode_info *pipe = i->pipe; 915 unsigned int head = pipe->head; 916 917 while (head > i->start_head) { 918 struct pipe_buffer *b = pipe_buf(pipe, --head); 919 if (unroll < b->len) { 920 b->len -= unroll; 921 i->last_offset = last_offset(b); 922 i->head = head; 923 return; 924 } 925 unroll -= b->len; 926 pipe_buf_release(pipe, b); 927 pipe->head--; 928 } 929 i->last_offset = 0; 930 i->head = head; 931 return; 932 } 933 if (unlikely(iov_iter_is_discard(i))) 934 return; 935 if (unroll <= i->iov_offset) { 936 i->iov_offset -= unroll; 937 return; 938 } 939 unroll -= i->iov_offset; 940 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { 941 BUG(); /* We should never go beyond the start of the specified 942 * range since we might then be straying into pages that 943 * aren't pinned. 944 */ 945 } else if (iov_iter_is_bvec(i)) { 946 const struct bio_vec *bvec = i->bvec; 947 while (1) { 948 size_t n = (--bvec)->bv_len; 949 i->nr_segs++; 950 if (unroll <= n) { 951 i->bvec = bvec; 952 i->iov_offset = n - unroll; 953 return; 954 } 955 unroll -= n; 956 } 957 } else { /* same logics for iovec and kvec */ 958 const struct iovec *iov = i->iov; 959 while (1) { 960 size_t n = (--iov)->iov_len; 961 i->nr_segs++; 962 if (unroll <= n) { 963 i->iov = iov; 964 i->iov_offset = n - unroll; 965 return; 966 } 967 unroll -= n; 968 } 969 } 970 } 971 EXPORT_SYMBOL(iov_iter_revert); 972 973 /* 974 * Return the count of just the current iov_iter segment. 975 */ 976 size_t iov_iter_single_seg_count(const struct iov_iter *i) 977 { 978 if (i->nr_segs > 1) { 979 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 980 return min(i->count, i->iov->iov_len - i->iov_offset); 981 if (iov_iter_is_bvec(i)) 982 return min(i->count, i->bvec->bv_len - i->iov_offset); 983 } 984 return i->count; 985 } 986 EXPORT_SYMBOL(iov_iter_single_seg_count); 987 988 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 989 const struct kvec *kvec, unsigned long nr_segs, 990 size_t count) 991 { 992 WARN_ON(direction & ~(READ | WRITE)); 993 *i = (struct iov_iter){ 994 .iter_type = ITER_KVEC, 995 .data_source = direction, 996 .kvec = kvec, 997 .nr_segs = nr_segs, 998 .iov_offset = 0, 999 .count = count 1000 }; 1001 } 1002 EXPORT_SYMBOL(iov_iter_kvec); 1003 1004 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1005 const struct bio_vec *bvec, unsigned long nr_segs, 1006 size_t count) 1007 { 1008 WARN_ON(direction & ~(READ | WRITE)); 1009 *i = (struct iov_iter){ 1010 .iter_type = ITER_BVEC, 1011 .data_source = direction, 1012 .bvec = bvec, 1013 .nr_segs = nr_segs, 1014 .iov_offset = 0, 1015 .count = count 1016 }; 1017 } 1018 EXPORT_SYMBOL(iov_iter_bvec); 1019 1020 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1021 struct pipe_inode_info *pipe, 1022 size_t count) 1023 { 1024 BUG_ON(direction != READ); 1025 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1026 *i = (struct iov_iter){ 1027 .iter_type = ITER_PIPE, 1028 .data_source = false, 1029 .pipe = pipe, 1030 .head = pipe->head, 1031 .start_head = pipe->head, 1032 .last_offset = 0, 1033 .count = count 1034 }; 1035 } 1036 EXPORT_SYMBOL(iov_iter_pipe); 1037 1038 /** 1039 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1040 * @i: The iterator to initialise. 1041 * @direction: The direction of the transfer. 1042 * @xarray: The xarray to access. 1043 * @start: The start file position. 1044 * @count: The size of the I/O buffer in bytes. 1045 * 1046 * Set up an I/O iterator to either draw data out of the pages attached to an 1047 * inode or to inject data into those pages. The pages *must* be prevented 1048 * from evaporation, either by taking a ref on them or locking them by the 1049 * caller. 1050 */ 1051 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1052 struct xarray *xarray, loff_t start, size_t count) 1053 { 1054 BUG_ON(direction & ~1); 1055 *i = (struct iov_iter) { 1056 .iter_type = ITER_XARRAY, 1057 .data_source = direction, 1058 .xarray = xarray, 1059 .xarray_start = start, 1060 .count = count, 1061 .iov_offset = 0 1062 }; 1063 } 1064 EXPORT_SYMBOL(iov_iter_xarray); 1065 1066 /** 1067 * iov_iter_discard - Initialise an I/O iterator that discards data 1068 * @i: The iterator to initialise. 1069 * @direction: The direction of the transfer. 1070 * @count: The size of the I/O buffer in bytes. 1071 * 1072 * Set up an I/O iterator that just discards everything that's written to it. 1073 * It's only available as a READ iterator. 1074 */ 1075 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1076 { 1077 BUG_ON(direction != READ); 1078 *i = (struct iov_iter){ 1079 .iter_type = ITER_DISCARD, 1080 .data_source = false, 1081 .count = count, 1082 .iov_offset = 0 1083 }; 1084 } 1085 EXPORT_SYMBOL(iov_iter_discard); 1086 1087 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, 1088 unsigned len_mask) 1089 { 1090 size_t size = i->count; 1091 size_t skip = i->iov_offset; 1092 unsigned k; 1093 1094 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1095 size_t len = i->iov[k].iov_len - skip; 1096 1097 if (len > size) 1098 len = size; 1099 if (len & len_mask) 1100 return false; 1101 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) 1102 return false; 1103 1104 size -= len; 1105 if (!size) 1106 break; 1107 } 1108 return true; 1109 } 1110 1111 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, 1112 unsigned len_mask) 1113 { 1114 size_t size = i->count; 1115 unsigned skip = i->iov_offset; 1116 unsigned k; 1117 1118 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1119 size_t len = i->bvec[k].bv_len - skip; 1120 1121 if (len > size) 1122 len = size; 1123 if (len & len_mask) 1124 return false; 1125 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) 1126 return false; 1127 1128 size -= len; 1129 if (!size) 1130 break; 1131 } 1132 return true; 1133 } 1134 1135 /** 1136 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments 1137 * are aligned to the parameters. 1138 * 1139 * @i: &struct iov_iter to restore 1140 * @addr_mask: bit mask to check against the iov element's addresses 1141 * @len_mask: bit mask to check against the iov element's lengths 1142 * 1143 * Return: false if any addresses or lengths intersect with the provided masks 1144 */ 1145 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 1146 unsigned len_mask) 1147 { 1148 if (likely(iter_is_ubuf(i))) { 1149 if (i->count & len_mask) 1150 return false; 1151 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) 1152 return false; 1153 return true; 1154 } 1155 1156 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1157 return iov_iter_aligned_iovec(i, addr_mask, len_mask); 1158 1159 if (iov_iter_is_bvec(i)) 1160 return iov_iter_aligned_bvec(i, addr_mask, len_mask); 1161 1162 if (iov_iter_is_pipe(i)) { 1163 size_t size = i->count; 1164 1165 if (size & len_mask) 1166 return false; 1167 if (size && i->last_offset > 0) { 1168 if (i->last_offset & addr_mask) 1169 return false; 1170 } 1171 1172 return true; 1173 } 1174 1175 if (iov_iter_is_xarray(i)) { 1176 if (i->count & len_mask) 1177 return false; 1178 if ((i->xarray_start + i->iov_offset) & addr_mask) 1179 return false; 1180 } 1181 1182 return true; 1183 } 1184 EXPORT_SYMBOL_GPL(iov_iter_is_aligned); 1185 1186 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 1187 { 1188 unsigned long res = 0; 1189 size_t size = i->count; 1190 size_t skip = i->iov_offset; 1191 unsigned k; 1192 1193 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1194 size_t len = i->iov[k].iov_len - skip; 1195 if (len) { 1196 res |= (unsigned long)i->iov[k].iov_base + skip; 1197 if (len > size) 1198 len = size; 1199 res |= len; 1200 size -= len; 1201 if (!size) 1202 break; 1203 } 1204 } 1205 return res; 1206 } 1207 1208 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 1209 { 1210 unsigned res = 0; 1211 size_t size = i->count; 1212 unsigned skip = i->iov_offset; 1213 unsigned k; 1214 1215 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1216 size_t len = i->bvec[k].bv_len - skip; 1217 res |= (unsigned long)i->bvec[k].bv_offset + skip; 1218 if (len > size) 1219 len = size; 1220 res |= len; 1221 size -= len; 1222 if (!size) 1223 break; 1224 } 1225 return res; 1226 } 1227 1228 unsigned long iov_iter_alignment(const struct iov_iter *i) 1229 { 1230 if (likely(iter_is_ubuf(i))) { 1231 size_t size = i->count; 1232 if (size) 1233 return ((unsigned long)i->ubuf + i->iov_offset) | size; 1234 return 0; 1235 } 1236 1237 /* iovec and kvec have identical layouts */ 1238 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1239 return iov_iter_alignment_iovec(i); 1240 1241 if (iov_iter_is_bvec(i)) 1242 return iov_iter_alignment_bvec(i); 1243 1244 if (iov_iter_is_pipe(i)) { 1245 size_t size = i->count; 1246 1247 if (size && i->last_offset > 0) 1248 return size | i->last_offset; 1249 return size; 1250 } 1251 1252 if (iov_iter_is_xarray(i)) 1253 return (i->xarray_start + i->iov_offset) | i->count; 1254 1255 return 0; 1256 } 1257 EXPORT_SYMBOL(iov_iter_alignment); 1258 1259 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1260 { 1261 unsigned long res = 0; 1262 unsigned long v = 0; 1263 size_t size = i->count; 1264 unsigned k; 1265 1266 if (iter_is_ubuf(i)) 1267 return 0; 1268 1269 if (WARN_ON(!iter_is_iovec(i))) 1270 return ~0U; 1271 1272 for (k = 0; k < i->nr_segs; k++) { 1273 if (i->iov[k].iov_len) { 1274 unsigned long base = (unsigned long)i->iov[k].iov_base; 1275 if (v) // if not the first one 1276 res |= base | v; // this start | previous end 1277 v = base + i->iov[k].iov_len; 1278 if (size <= i->iov[k].iov_len) 1279 break; 1280 size -= i->iov[k].iov_len; 1281 } 1282 } 1283 return res; 1284 } 1285 EXPORT_SYMBOL(iov_iter_gap_alignment); 1286 1287 static int want_pages_array(struct page ***res, size_t size, 1288 size_t start, unsigned int maxpages) 1289 { 1290 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE); 1291 1292 if (count > maxpages) 1293 count = maxpages; 1294 WARN_ON(!count); // caller should've prevented that 1295 if (!*res) { 1296 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); 1297 if (!*res) 1298 return 0; 1299 } 1300 return count; 1301 } 1302 1303 static ssize_t pipe_get_pages(struct iov_iter *i, 1304 struct page ***pages, size_t maxsize, unsigned maxpages, 1305 size_t *start) 1306 { 1307 unsigned int npages, count, off, chunk; 1308 struct page **p; 1309 size_t left; 1310 1311 if (!sanity(i)) 1312 return -EFAULT; 1313 1314 *start = off = pipe_npages(i, &npages); 1315 if (!npages) 1316 return -EFAULT; 1317 count = want_pages_array(pages, maxsize, off, min(npages, maxpages)); 1318 if (!count) 1319 return -ENOMEM; 1320 p = *pages; 1321 for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) { 1322 struct page *page = append_pipe(i, left, &off); 1323 if (!page) 1324 break; 1325 chunk = min_t(size_t, left, PAGE_SIZE - off); 1326 get_page(*p++ = page); 1327 } 1328 if (!npages) 1329 return -EFAULT; 1330 return maxsize - left; 1331 } 1332 1333 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1334 pgoff_t index, unsigned int nr_pages) 1335 { 1336 XA_STATE(xas, xa, index); 1337 struct page *page; 1338 unsigned int ret = 0; 1339 1340 rcu_read_lock(); 1341 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1342 if (xas_retry(&xas, page)) 1343 continue; 1344 1345 /* Has the page moved or been split? */ 1346 if (unlikely(page != xas_reload(&xas))) { 1347 xas_reset(&xas); 1348 continue; 1349 } 1350 1351 pages[ret] = find_subpage(page, xas.xa_index); 1352 get_page(pages[ret]); 1353 if (++ret == nr_pages) 1354 break; 1355 } 1356 rcu_read_unlock(); 1357 return ret; 1358 } 1359 1360 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1361 struct page ***pages, size_t maxsize, 1362 unsigned maxpages, size_t *_start_offset) 1363 { 1364 unsigned nr, offset, count; 1365 pgoff_t index; 1366 loff_t pos; 1367 1368 pos = i->xarray_start + i->iov_offset; 1369 index = pos >> PAGE_SHIFT; 1370 offset = pos & ~PAGE_MASK; 1371 *_start_offset = offset; 1372 1373 count = want_pages_array(pages, maxsize, offset, maxpages); 1374 if (!count) 1375 return -ENOMEM; 1376 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); 1377 if (nr == 0) 1378 return 0; 1379 1380 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1381 i->iov_offset += maxsize; 1382 i->count -= maxsize; 1383 return maxsize; 1384 } 1385 1386 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */ 1387 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) 1388 { 1389 size_t skip; 1390 long k; 1391 1392 if (iter_is_ubuf(i)) 1393 return (unsigned long)i->ubuf + i->iov_offset; 1394 1395 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1396 size_t len = i->iov[k].iov_len - skip; 1397 1398 if (unlikely(!len)) 1399 continue; 1400 if (*size > len) 1401 *size = len; 1402 return (unsigned long)i->iov[k].iov_base + skip; 1403 } 1404 BUG(); // if it had been empty, we wouldn't get called 1405 } 1406 1407 /* must be done on non-empty ITER_BVEC one */ 1408 static struct page *first_bvec_segment(const struct iov_iter *i, 1409 size_t *size, size_t *start) 1410 { 1411 struct page *page; 1412 size_t skip = i->iov_offset, len; 1413 1414 len = i->bvec->bv_len - skip; 1415 if (*size > len) 1416 *size = len; 1417 skip += i->bvec->bv_offset; 1418 page = i->bvec->bv_page + skip / PAGE_SIZE; 1419 *start = skip % PAGE_SIZE; 1420 return page; 1421 } 1422 1423 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, 1424 struct page ***pages, size_t maxsize, 1425 unsigned int maxpages, size_t *start) 1426 { 1427 unsigned int n; 1428 1429 if (maxsize > i->count) 1430 maxsize = i->count; 1431 if (!maxsize) 1432 return 0; 1433 if (maxsize > MAX_RW_COUNT) 1434 maxsize = MAX_RW_COUNT; 1435 1436 if (likely(user_backed_iter(i))) { 1437 unsigned int gup_flags = 0; 1438 unsigned long addr; 1439 int res; 1440 1441 if (iov_iter_rw(i) != WRITE) 1442 gup_flags |= FOLL_WRITE; 1443 if (i->nofault) 1444 gup_flags |= FOLL_NOFAULT; 1445 1446 addr = first_iovec_segment(i, &maxsize); 1447 *start = addr % PAGE_SIZE; 1448 addr &= PAGE_MASK; 1449 n = want_pages_array(pages, maxsize, *start, maxpages); 1450 if (!n) 1451 return -ENOMEM; 1452 res = get_user_pages_fast(addr, n, gup_flags, *pages); 1453 if (unlikely(res <= 0)) 1454 return res; 1455 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1456 iov_iter_advance(i, maxsize); 1457 return maxsize; 1458 } 1459 if (iov_iter_is_bvec(i)) { 1460 struct page **p; 1461 struct page *page; 1462 1463 page = first_bvec_segment(i, &maxsize, start); 1464 n = want_pages_array(pages, maxsize, *start, maxpages); 1465 if (!n) 1466 return -ENOMEM; 1467 p = *pages; 1468 for (int k = 0; k < n; k++) 1469 get_page(p[k] = page + k); 1470 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1471 i->count -= maxsize; 1472 i->iov_offset += maxsize; 1473 if (i->iov_offset == i->bvec->bv_len) { 1474 i->iov_offset = 0; 1475 i->bvec++; 1476 i->nr_segs--; 1477 } 1478 return maxsize; 1479 } 1480 if (iov_iter_is_pipe(i)) 1481 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1482 if (iov_iter_is_xarray(i)) 1483 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1484 return -EFAULT; 1485 } 1486 1487 ssize_t iov_iter_get_pages2(struct iov_iter *i, 1488 struct page **pages, size_t maxsize, unsigned maxpages, 1489 size_t *start) 1490 { 1491 if (!maxpages) 1492 return 0; 1493 BUG_ON(!pages); 1494 1495 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); 1496 } 1497 EXPORT_SYMBOL(iov_iter_get_pages2); 1498 1499 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, 1500 struct page ***pages, size_t maxsize, 1501 size_t *start) 1502 { 1503 ssize_t len; 1504 1505 *pages = NULL; 1506 1507 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); 1508 if (len <= 0) { 1509 kvfree(*pages); 1510 *pages = NULL; 1511 } 1512 return len; 1513 } 1514 EXPORT_SYMBOL(iov_iter_get_pages_alloc2); 1515 1516 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1517 struct iov_iter *i) 1518 { 1519 __wsum sum, next; 1520 sum = *csum; 1521 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1522 WARN_ON(1); 1523 return 0; 1524 } 1525 iterate_and_advance(i, bytes, base, len, off, ({ 1526 next = csum_and_copy_from_user(base, addr + off, len); 1527 sum = csum_block_add(sum, next, off); 1528 next ? 0 : len; 1529 }), ({ 1530 sum = csum_and_memcpy(addr + off, base, len, sum, off); 1531 }) 1532 ) 1533 *csum = sum; 1534 return bytes; 1535 } 1536 EXPORT_SYMBOL(csum_and_copy_from_iter); 1537 1538 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1539 struct iov_iter *i) 1540 { 1541 struct csum_state *csstate = _csstate; 1542 __wsum sum, next; 1543 1544 if (unlikely(iov_iter_is_discard(i))) { 1545 WARN_ON(1); /* for now */ 1546 return 0; 1547 } 1548 1549 sum = csum_shift(csstate->csum, csstate->off); 1550 if (unlikely(iov_iter_is_pipe(i))) 1551 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); 1552 else iterate_and_advance(i, bytes, base, len, off, ({ 1553 next = csum_and_copy_to_user(addr + off, base, len); 1554 sum = csum_block_add(sum, next, off); 1555 next ? 0 : len; 1556 }), ({ 1557 sum = csum_and_memcpy(base, addr + off, len, sum, off); 1558 }) 1559 ) 1560 csstate->csum = csum_shift(sum, csstate->off); 1561 csstate->off += bytes; 1562 return bytes; 1563 } 1564 EXPORT_SYMBOL(csum_and_copy_to_iter); 1565 1566 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1567 struct iov_iter *i) 1568 { 1569 #ifdef CONFIG_CRYPTO_HASH 1570 struct ahash_request *hash = hashp; 1571 struct scatterlist sg; 1572 size_t copied; 1573 1574 copied = copy_to_iter(addr, bytes, i); 1575 sg_init_one(&sg, addr, copied); 1576 ahash_request_set_crypt(hash, &sg, NULL, copied); 1577 crypto_ahash_update(hash); 1578 return copied; 1579 #else 1580 return 0; 1581 #endif 1582 } 1583 EXPORT_SYMBOL(hash_and_copy_to_iter); 1584 1585 static int iov_npages(const struct iov_iter *i, int maxpages) 1586 { 1587 size_t skip = i->iov_offset, size = i->count; 1588 const struct iovec *p; 1589 int npages = 0; 1590 1591 for (p = i->iov; size; skip = 0, p++) { 1592 unsigned offs = offset_in_page(p->iov_base + skip); 1593 size_t len = min(p->iov_len - skip, size); 1594 1595 if (len) { 1596 size -= len; 1597 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1598 if (unlikely(npages > maxpages)) 1599 return maxpages; 1600 } 1601 } 1602 return npages; 1603 } 1604 1605 static int bvec_npages(const struct iov_iter *i, int maxpages) 1606 { 1607 size_t skip = i->iov_offset, size = i->count; 1608 const struct bio_vec *p; 1609 int npages = 0; 1610 1611 for (p = i->bvec; size; skip = 0, p++) { 1612 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1613 size_t len = min(p->bv_len - skip, size); 1614 1615 size -= len; 1616 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1617 if (unlikely(npages > maxpages)) 1618 return maxpages; 1619 } 1620 return npages; 1621 } 1622 1623 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1624 { 1625 if (unlikely(!i->count)) 1626 return 0; 1627 if (likely(iter_is_ubuf(i))) { 1628 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); 1629 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); 1630 return min(npages, maxpages); 1631 } 1632 /* iovec and kvec have identical layouts */ 1633 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1634 return iov_npages(i, maxpages); 1635 if (iov_iter_is_bvec(i)) 1636 return bvec_npages(i, maxpages); 1637 if (iov_iter_is_pipe(i)) { 1638 int npages; 1639 1640 if (!sanity(i)) 1641 return 0; 1642 1643 pipe_npages(i, &npages); 1644 return min(npages, maxpages); 1645 } 1646 if (iov_iter_is_xarray(i)) { 1647 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1648 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1649 return min(npages, maxpages); 1650 } 1651 return 0; 1652 } 1653 EXPORT_SYMBOL(iov_iter_npages); 1654 1655 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1656 { 1657 *new = *old; 1658 if (unlikely(iov_iter_is_pipe(new))) { 1659 WARN_ON(1); 1660 return NULL; 1661 } 1662 if (iov_iter_is_bvec(new)) 1663 return new->bvec = kmemdup(new->bvec, 1664 new->nr_segs * sizeof(struct bio_vec), 1665 flags); 1666 else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) 1667 /* iovec and kvec have identical layout */ 1668 return new->iov = kmemdup(new->iov, 1669 new->nr_segs * sizeof(struct iovec), 1670 flags); 1671 return NULL; 1672 } 1673 EXPORT_SYMBOL(dup_iter); 1674 1675 static int copy_compat_iovec_from_user(struct iovec *iov, 1676 const struct iovec __user *uvec, unsigned long nr_segs) 1677 { 1678 const struct compat_iovec __user *uiov = 1679 (const struct compat_iovec __user *)uvec; 1680 int ret = -EFAULT, i; 1681 1682 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1683 return -EFAULT; 1684 1685 for (i = 0; i < nr_segs; i++) { 1686 compat_uptr_t buf; 1687 compat_ssize_t len; 1688 1689 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1690 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1691 1692 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1693 if (len < 0) { 1694 ret = -EINVAL; 1695 goto uaccess_end; 1696 } 1697 iov[i].iov_base = compat_ptr(buf); 1698 iov[i].iov_len = len; 1699 } 1700 1701 ret = 0; 1702 uaccess_end: 1703 user_access_end(); 1704 return ret; 1705 } 1706 1707 static int copy_iovec_from_user(struct iovec *iov, 1708 const struct iovec __user *uvec, unsigned long nr_segs) 1709 { 1710 unsigned long seg; 1711 1712 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1713 return -EFAULT; 1714 for (seg = 0; seg < nr_segs; seg++) { 1715 if ((ssize_t)iov[seg].iov_len < 0) 1716 return -EINVAL; 1717 } 1718 1719 return 0; 1720 } 1721 1722 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1723 unsigned long nr_segs, unsigned long fast_segs, 1724 struct iovec *fast_iov, bool compat) 1725 { 1726 struct iovec *iov = fast_iov; 1727 int ret; 1728 1729 /* 1730 * SuS says "The readv() function *may* fail if the iovcnt argument was 1731 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1732 * traditionally returned zero for zero segments, so... 1733 */ 1734 if (nr_segs == 0) 1735 return iov; 1736 if (nr_segs > UIO_MAXIOV) 1737 return ERR_PTR(-EINVAL); 1738 if (nr_segs > fast_segs) { 1739 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1740 if (!iov) 1741 return ERR_PTR(-ENOMEM); 1742 } 1743 1744 if (compat) 1745 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1746 else 1747 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1748 if (ret) { 1749 if (iov != fast_iov) 1750 kfree(iov); 1751 return ERR_PTR(ret); 1752 } 1753 1754 return iov; 1755 } 1756 1757 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1758 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1759 struct iov_iter *i, bool compat) 1760 { 1761 ssize_t total_len = 0; 1762 unsigned long seg; 1763 struct iovec *iov; 1764 1765 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1766 if (IS_ERR(iov)) { 1767 *iovp = NULL; 1768 return PTR_ERR(iov); 1769 } 1770 1771 /* 1772 * According to the Single Unix Specification we should return EINVAL if 1773 * an element length is < 0 when cast to ssize_t or if the total length 1774 * would overflow the ssize_t return value of the system call. 1775 * 1776 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1777 * overflow case. 1778 */ 1779 for (seg = 0; seg < nr_segs; seg++) { 1780 ssize_t len = (ssize_t)iov[seg].iov_len; 1781 1782 if (!access_ok(iov[seg].iov_base, len)) { 1783 if (iov != *iovp) 1784 kfree(iov); 1785 *iovp = NULL; 1786 return -EFAULT; 1787 } 1788 1789 if (len > MAX_RW_COUNT - total_len) { 1790 len = MAX_RW_COUNT - total_len; 1791 iov[seg].iov_len = len; 1792 } 1793 total_len += len; 1794 } 1795 1796 iov_iter_init(i, type, iov, nr_segs, total_len); 1797 if (iov == *iovp) 1798 *iovp = NULL; 1799 else 1800 *iovp = iov; 1801 return total_len; 1802 } 1803 1804 /** 1805 * import_iovec() - Copy an array of &struct iovec from userspace 1806 * into the kernel, check that it is valid, and initialize a new 1807 * &struct iov_iter iterator to access it. 1808 * 1809 * @type: One of %READ or %WRITE. 1810 * @uvec: Pointer to the userspace array. 1811 * @nr_segs: Number of elements in userspace array. 1812 * @fast_segs: Number of elements in @iov. 1813 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1814 * on-stack) kernel array. 1815 * @i: Pointer to iterator that will be initialized on success. 1816 * 1817 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1818 * then this function places %NULL in *@iov on return. Otherwise, a new 1819 * array will be allocated and the result placed in *@iov. This means that 1820 * the caller may call kfree() on *@iov regardless of whether the small 1821 * on-stack array was used or not (and regardless of whether this function 1822 * returns an error or not). 1823 * 1824 * Return: Negative error code on error, bytes imported on success 1825 */ 1826 ssize_t import_iovec(int type, const struct iovec __user *uvec, 1827 unsigned nr_segs, unsigned fast_segs, 1828 struct iovec **iovp, struct iov_iter *i) 1829 { 1830 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1831 in_compat_syscall()); 1832 } 1833 EXPORT_SYMBOL(import_iovec); 1834 1835 int import_single_range(int rw, void __user *buf, size_t len, 1836 struct iovec *iov, struct iov_iter *i) 1837 { 1838 if (len > MAX_RW_COUNT) 1839 len = MAX_RW_COUNT; 1840 if (unlikely(!access_ok(buf, len))) 1841 return -EFAULT; 1842 1843 iov->iov_base = buf; 1844 iov->iov_len = len; 1845 iov_iter_init(i, rw, iov, 1, len); 1846 return 0; 1847 } 1848 EXPORT_SYMBOL(import_single_range); 1849 1850 /** 1851 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 1852 * iov_iter_save_state() was called. 1853 * 1854 * @i: &struct iov_iter to restore 1855 * @state: state to restore from 1856 * 1857 * Used after iov_iter_save_state() to bring restore @i, if operations may 1858 * have advanced it. 1859 * 1860 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 1861 */ 1862 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 1863 { 1864 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 1865 !iov_iter_is_kvec(i) && !iter_is_ubuf(i)) 1866 return; 1867 i->iov_offset = state->iov_offset; 1868 i->count = state->count; 1869 if (iter_is_ubuf(i)) 1870 return; 1871 /* 1872 * For the *vec iters, nr_segs + iov is constant - if we increment 1873 * the vec, then we also decrement the nr_segs count. Hence we don't 1874 * need to track both of these, just one is enough and we can deduct 1875 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 1876 * size, so we can just increment the iov pointer as they are unionzed. 1877 * ITER_BVEC _may_ be the same size on some archs, but on others it is 1878 * not. Be safe and handle it separately. 1879 */ 1880 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 1881 if (iov_iter_is_bvec(i)) 1882 i->bvec -= state->nr_segs - i->nr_segs; 1883 else 1884 i->iov -= state->nr_segs - i->nr_segs; 1885 i->nr_segs = state->nr_segs; 1886 } 1887