1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 /* covers ubuf and kbuf alike */ 20 #define iterate_buf(i, n, base, len, off, __p, STEP) { \ 21 size_t __maybe_unused off = 0; \ 22 len = n; \ 23 base = __p + i->iov_offset; \ 24 len -= (STEP); \ 25 i->iov_offset += len; \ 26 n = len; \ 27 } 28 29 /* covers iovec and kvec alike */ 30 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ 31 size_t off = 0; \ 32 size_t skip = i->iov_offset; \ 33 do { \ 34 len = min(n, __p->iov_len - skip); \ 35 if (likely(len)) { \ 36 base = __p->iov_base + skip; \ 37 len -= (STEP); \ 38 off += len; \ 39 skip += len; \ 40 n -= len; \ 41 if (skip < __p->iov_len) \ 42 break; \ 43 } \ 44 __p++; \ 45 skip = 0; \ 46 } while (n); \ 47 i->iov_offset = skip; \ 48 n = off; \ 49 } 50 51 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ 52 size_t off = 0; \ 53 unsigned skip = i->iov_offset; \ 54 while (n) { \ 55 unsigned offset = p->bv_offset + skip; \ 56 unsigned left; \ 57 void *kaddr = kmap_local_page(p->bv_page + \ 58 offset / PAGE_SIZE); \ 59 base = kaddr + offset % PAGE_SIZE; \ 60 len = min(min(n, (size_t)(p->bv_len - skip)), \ 61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 62 left = (STEP); \ 63 kunmap_local(kaddr); \ 64 len -= left; \ 65 off += len; \ 66 skip += len; \ 67 if (skip == p->bv_len) { \ 68 skip = 0; \ 69 p++; \ 70 } \ 71 n -= len; \ 72 if (left) \ 73 break; \ 74 } \ 75 i->iov_offset = skip; \ 76 n = off; \ 77 } 78 79 #define iterate_xarray(i, n, base, len, __off, STEP) { \ 80 __label__ __out; \ 81 size_t __off = 0; \ 82 struct folio *folio; \ 83 loff_t start = i->xarray_start + i->iov_offset; \ 84 pgoff_t index = start / PAGE_SIZE; \ 85 XA_STATE(xas, i->xarray, index); \ 86 \ 87 len = PAGE_SIZE - offset_in_page(start); \ 88 rcu_read_lock(); \ 89 xas_for_each(&xas, folio, ULONG_MAX) { \ 90 unsigned left; \ 91 size_t offset; \ 92 if (xas_retry(&xas, folio)) \ 93 continue; \ 94 if (WARN_ON(xa_is_value(folio))) \ 95 break; \ 96 if (WARN_ON(folio_test_hugetlb(folio))) \ 97 break; \ 98 offset = offset_in_folio(folio, start + __off); \ 99 while (offset < folio_size(folio)) { \ 100 base = kmap_local_folio(folio, offset); \ 101 len = min(n, len); \ 102 left = (STEP); \ 103 kunmap_local(base); \ 104 len -= left; \ 105 __off += len; \ 106 n -= len; \ 107 if (left || n == 0) \ 108 goto __out; \ 109 offset += len; \ 110 len = PAGE_SIZE; \ 111 } \ 112 } \ 113 __out: \ 114 rcu_read_unlock(); \ 115 i->iov_offset += __off; \ 116 n = __off; \ 117 } 118 119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ 120 if (unlikely(i->count < n)) \ 121 n = i->count; \ 122 if (likely(n)) { \ 123 if (likely(iter_is_ubuf(i))) { \ 124 void __user *base; \ 125 size_t len; \ 126 iterate_buf(i, n, base, len, off, \ 127 i->ubuf, (I)) \ 128 } else if (likely(iter_is_iovec(i))) { \ 129 const struct iovec *iov = i->iov; \ 130 void __user *base; \ 131 size_t len; \ 132 iterate_iovec(i, n, base, len, off, \ 133 iov, (I)) \ 134 i->nr_segs -= iov - i->iov; \ 135 i->iov = iov; \ 136 } else if (iov_iter_is_bvec(i)) { \ 137 const struct bio_vec *bvec = i->bvec; \ 138 void *base; \ 139 size_t len; \ 140 iterate_bvec(i, n, base, len, off, \ 141 bvec, (K)) \ 142 i->nr_segs -= bvec - i->bvec; \ 143 i->bvec = bvec; \ 144 } else if (iov_iter_is_kvec(i)) { \ 145 const struct kvec *kvec = i->kvec; \ 146 void *base; \ 147 size_t len; \ 148 iterate_iovec(i, n, base, len, off, \ 149 kvec, (K)) \ 150 i->nr_segs -= kvec - i->kvec; \ 151 i->kvec = kvec; \ 152 } else if (iov_iter_is_xarray(i)) { \ 153 void *base; \ 154 size_t len; \ 155 iterate_xarray(i, n, base, len, off, \ 156 (K)) \ 157 } \ 158 i->count -= n; \ 159 } \ 160 } 161 #define iterate_and_advance(i, n, base, len, off, I, K) \ 162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) 163 164 static int copyout(void __user *to, const void *from, size_t n) 165 { 166 if (should_fail_usercopy()) 167 return n; 168 if (access_ok(to, n)) { 169 instrument_copy_to_user(to, from, n); 170 n = raw_copy_to_user(to, from, n); 171 } 172 return n; 173 } 174 175 static int copyin(void *to, const void __user *from, size_t n) 176 { 177 size_t res = n; 178 179 if (should_fail_usercopy()) 180 return n; 181 if (access_ok(from, n)) { 182 instrument_copy_from_user_before(to, from, n); 183 res = raw_copy_from_user(to, from, n); 184 instrument_copy_from_user_after(to, from, n, res); 185 } 186 return res; 187 } 188 189 static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe, 190 unsigned int slot) 191 { 192 return &pipe->bufs[slot & (pipe->ring_size - 1)]; 193 } 194 195 #ifdef PIPE_PARANOIA 196 static bool sanity(const struct iov_iter *i) 197 { 198 struct pipe_inode_info *pipe = i->pipe; 199 unsigned int p_head = pipe->head; 200 unsigned int p_tail = pipe->tail; 201 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 202 unsigned int i_head = i->head; 203 unsigned int idx; 204 205 if (i->last_offset) { 206 struct pipe_buffer *p; 207 if (unlikely(p_occupancy == 0)) 208 goto Bad; // pipe must be non-empty 209 if (unlikely(i_head != p_head - 1)) 210 goto Bad; // must be at the last buffer... 211 212 p = pipe_buf(pipe, i_head); 213 if (unlikely(p->offset + p->len != abs(i->last_offset))) 214 goto Bad; // ... at the end of segment 215 } else { 216 if (i_head != p_head) 217 goto Bad; // must be right after the last buffer 218 } 219 return true; 220 Bad: 221 printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset); 222 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 223 p_head, p_tail, pipe->ring_size); 224 for (idx = 0; idx < pipe->ring_size; idx++) 225 printk(KERN_ERR "[%p %p %d %d]\n", 226 pipe->bufs[idx].ops, 227 pipe->bufs[idx].page, 228 pipe->bufs[idx].offset, 229 pipe->bufs[idx].len); 230 WARN_ON(1); 231 return false; 232 } 233 #else 234 #define sanity(i) true 235 #endif 236 237 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) 238 { 239 struct page *page = alloc_page(GFP_USER); 240 if (page) { 241 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 242 *buf = (struct pipe_buffer) { 243 .ops = &default_pipe_buf_ops, 244 .page = page, 245 .offset = 0, 246 .len = size 247 }; 248 } 249 return page; 250 } 251 252 static void push_page(struct pipe_inode_info *pipe, struct page *page, 253 unsigned int offset, unsigned int size) 254 { 255 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 256 *buf = (struct pipe_buffer) { 257 .ops = &page_cache_pipe_buf_ops, 258 .page = page, 259 .offset = offset, 260 .len = size 261 }; 262 get_page(page); 263 } 264 265 static inline int last_offset(const struct pipe_buffer *buf) 266 { 267 if (buf->ops == &default_pipe_buf_ops) 268 return buf->len; // buf->offset is 0 for those 269 else 270 return -(buf->offset + buf->len); 271 } 272 273 static struct page *append_pipe(struct iov_iter *i, size_t size, 274 unsigned int *off) 275 { 276 struct pipe_inode_info *pipe = i->pipe; 277 int offset = i->last_offset; 278 struct pipe_buffer *buf; 279 struct page *page; 280 281 if (offset > 0 && offset < PAGE_SIZE) { 282 // some space in the last buffer; add to it 283 buf = pipe_buf(pipe, pipe->head - 1); 284 size = min_t(size_t, size, PAGE_SIZE - offset); 285 buf->len += size; 286 i->last_offset += size; 287 i->count -= size; 288 *off = offset; 289 return buf->page; 290 } 291 // OK, we need a new buffer 292 *off = 0; 293 size = min_t(size_t, size, PAGE_SIZE); 294 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 295 return NULL; 296 page = push_anon(pipe, size); 297 if (!page) 298 return NULL; 299 i->head = pipe->head - 1; 300 i->last_offset = size; 301 i->count -= size; 302 return page; 303 } 304 305 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 306 struct iov_iter *i) 307 { 308 struct pipe_inode_info *pipe = i->pipe; 309 unsigned int head = pipe->head; 310 311 if (unlikely(bytes > i->count)) 312 bytes = i->count; 313 314 if (unlikely(!bytes)) 315 return 0; 316 317 if (!sanity(i)) 318 return 0; 319 320 if (offset && i->last_offset == -offset) { // could we merge it? 321 struct pipe_buffer *buf = pipe_buf(pipe, head - 1); 322 if (buf->page == page) { 323 buf->len += bytes; 324 i->last_offset -= bytes; 325 i->count -= bytes; 326 return bytes; 327 } 328 } 329 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 330 return 0; 331 332 push_page(pipe, page, offset, bytes); 333 i->last_offset = -(offset + bytes); 334 i->head = head; 335 i->count -= bytes; 336 return bytes; 337 } 338 339 /* 340 * fault_in_iov_iter_readable - fault in iov iterator for reading 341 * @i: iterator 342 * @size: maximum length 343 * 344 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 345 * @size. For each iovec, fault in each page that constitutes the iovec. 346 * 347 * Returns the number of bytes not faulted in (like copy_to_user() and 348 * copy_from_user()). 349 * 350 * Always returns 0 for non-userspace iterators. 351 */ 352 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 353 { 354 if (iter_is_ubuf(i)) { 355 size_t n = min(size, iov_iter_count(i)); 356 n -= fault_in_readable(i->ubuf + i->iov_offset, n); 357 return size - n; 358 } else if (iter_is_iovec(i)) { 359 size_t count = min(size, iov_iter_count(i)); 360 const struct iovec *p; 361 size_t skip; 362 363 size -= count; 364 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 365 size_t len = min(count, p->iov_len - skip); 366 size_t ret; 367 368 if (unlikely(!len)) 369 continue; 370 ret = fault_in_readable(p->iov_base + skip, len); 371 count -= len - ret; 372 if (ret) 373 break; 374 } 375 return count + size; 376 } 377 return 0; 378 } 379 EXPORT_SYMBOL(fault_in_iov_iter_readable); 380 381 /* 382 * fault_in_iov_iter_writeable - fault in iov iterator for writing 383 * @i: iterator 384 * @size: maximum length 385 * 386 * Faults in the iterator using get_user_pages(), i.e., without triggering 387 * hardware page faults. This is primarily useful when we already know that 388 * some or all of the pages in @i aren't in memory. 389 * 390 * Returns the number of bytes not faulted in, like copy_to_user() and 391 * copy_from_user(). 392 * 393 * Always returns 0 for non-user-space iterators. 394 */ 395 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 396 { 397 if (iter_is_ubuf(i)) { 398 size_t n = min(size, iov_iter_count(i)); 399 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); 400 return size - n; 401 } else if (iter_is_iovec(i)) { 402 size_t count = min(size, iov_iter_count(i)); 403 const struct iovec *p; 404 size_t skip; 405 406 size -= count; 407 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 408 size_t len = min(count, p->iov_len - skip); 409 size_t ret; 410 411 if (unlikely(!len)) 412 continue; 413 ret = fault_in_safe_writeable(p->iov_base + skip, len); 414 count -= len - ret; 415 if (ret) 416 break; 417 } 418 return count + size; 419 } 420 return 0; 421 } 422 EXPORT_SYMBOL(fault_in_iov_iter_writeable); 423 424 void iov_iter_init(struct iov_iter *i, unsigned int direction, 425 const struct iovec *iov, unsigned long nr_segs, 426 size_t count) 427 { 428 WARN_ON(direction & ~(READ | WRITE)); 429 *i = (struct iov_iter) { 430 .iter_type = ITER_IOVEC, 431 .nofault = false, 432 .user_backed = true, 433 .data_source = direction, 434 .iov = iov, 435 .nr_segs = nr_segs, 436 .iov_offset = 0, 437 .count = count 438 }; 439 } 440 EXPORT_SYMBOL(iov_iter_init); 441 442 // returns the offset in partial buffer (if any) 443 static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages) 444 { 445 struct pipe_inode_info *pipe = i->pipe; 446 int used = pipe->head - pipe->tail; 447 int off = i->last_offset; 448 449 *npages = max((int)pipe->max_usage - used, 0); 450 451 if (off > 0 && off < PAGE_SIZE) { // anon and not full 452 (*npages)++; 453 return off; 454 } 455 return 0; 456 } 457 458 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 459 struct iov_iter *i) 460 { 461 unsigned int off, chunk; 462 463 if (unlikely(bytes > i->count)) 464 bytes = i->count; 465 if (unlikely(!bytes)) 466 return 0; 467 468 if (!sanity(i)) 469 return 0; 470 471 for (size_t n = bytes; n; n -= chunk) { 472 struct page *page = append_pipe(i, n, &off); 473 chunk = min_t(size_t, n, PAGE_SIZE - off); 474 if (!page) 475 return bytes - n; 476 memcpy_to_page(page, off, addr, chunk); 477 addr += chunk; 478 } 479 return bytes; 480 } 481 482 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 483 __wsum sum, size_t off) 484 { 485 __wsum next = csum_partial_copy_nocheck(from, to, len); 486 return csum_block_add(sum, next, off); 487 } 488 489 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 490 struct iov_iter *i, __wsum *sump) 491 { 492 __wsum sum = *sump; 493 size_t off = 0; 494 unsigned int chunk, r; 495 496 if (unlikely(bytes > i->count)) 497 bytes = i->count; 498 if (unlikely(!bytes)) 499 return 0; 500 501 if (!sanity(i)) 502 return 0; 503 504 while (bytes) { 505 struct page *page = append_pipe(i, bytes, &r); 506 char *p; 507 508 if (!page) 509 break; 510 chunk = min_t(size_t, bytes, PAGE_SIZE - r); 511 p = kmap_local_page(page); 512 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); 513 kunmap_local(p); 514 off += chunk; 515 bytes -= chunk; 516 } 517 *sump = sum; 518 return off; 519 } 520 521 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 522 { 523 if (unlikely(iov_iter_is_pipe(i))) 524 return copy_pipe_to_iter(addr, bytes, i); 525 if (user_backed_iter(i)) 526 might_fault(); 527 iterate_and_advance(i, bytes, base, len, off, 528 copyout(base, addr + off, len), 529 memcpy(base, addr + off, len) 530 ) 531 532 return bytes; 533 } 534 EXPORT_SYMBOL(_copy_to_iter); 535 536 #ifdef CONFIG_ARCH_HAS_COPY_MC 537 static int copyout_mc(void __user *to, const void *from, size_t n) 538 { 539 if (access_ok(to, n)) { 540 instrument_copy_to_user(to, from, n); 541 n = copy_mc_to_user((__force void *) to, from, n); 542 } 543 return n; 544 } 545 546 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 547 struct iov_iter *i) 548 { 549 size_t xfer = 0; 550 unsigned int off, chunk; 551 552 if (unlikely(bytes > i->count)) 553 bytes = i->count; 554 if (unlikely(!bytes)) 555 return 0; 556 557 if (!sanity(i)) 558 return 0; 559 560 while (bytes) { 561 struct page *page = append_pipe(i, bytes, &off); 562 unsigned long rem; 563 char *p; 564 565 if (!page) 566 break; 567 chunk = min_t(size_t, bytes, PAGE_SIZE - off); 568 p = kmap_local_page(page); 569 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); 570 chunk -= rem; 571 kunmap_local(p); 572 xfer += chunk; 573 bytes -= chunk; 574 if (rem) { 575 iov_iter_revert(i, rem); 576 break; 577 } 578 } 579 return xfer; 580 } 581 582 /** 583 * _copy_mc_to_iter - copy to iter with source memory error exception handling 584 * @addr: source kernel address 585 * @bytes: total transfer length 586 * @i: destination iterator 587 * 588 * The pmem driver deploys this for the dax operation 589 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 590 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 591 * successfully copied. 592 * 593 * The main differences between this and typical _copy_to_iter(). 594 * 595 * * Typical tail/residue handling after a fault retries the copy 596 * byte-by-byte until the fault happens again. Re-triggering machine 597 * checks is potentially fatal so the implementation uses source 598 * alignment and poison alignment assumptions to avoid re-triggering 599 * hardware exceptions. 600 * 601 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 602 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 603 * a short copy. 604 * 605 * Return: number of bytes copied (may be %0) 606 */ 607 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 608 { 609 if (unlikely(iov_iter_is_pipe(i))) 610 return copy_mc_pipe_to_iter(addr, bytes, i); 611 if (user_backed_iter(i)) 612 might_fault(); 613 __iterate_and_advance(i, bytes, base, len, off, 614 copyout_mc(base, addr + off, len), 615 copy_mc_to_kernel(base, addr + off, len) 616 ) 617 618 return bytes; 619 } 620 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 621 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 622 623 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 624 { 625 if (unlikely(iov_iter_is_pipe(i))) { 626 WARN_ON(1); 627 return 0; 628 } 629 if (user_backed_iter(i)) 630 might_fault(); 631 iterate_and_advance(i, bytes, base, len, off, 632 copyin(addr + off, base, len), 633 memcpy(addr + off, base, len) 634 ) 635 636 return bytes; 637 } 638 EXPORT_SYMBOL(_copy_from_iter); 639 640 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 641 { 642 if (unlikely(iov_iter_is_pipe(i))) { 643 WARN_ON(1); 644 return 0; 645 } 646 iterate_and_advance(i, bytes, base, len, off, 647 __copy_from_user_inatomic_nocache(addr + off, base, len), 648 memcpy(addr + off, base, len) 649 ) 650 651 return bytes; 652 } 653 EXPORT_SYMBOL(_copy_from_iter_nocache); 654 655 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 656 /** 657 * _copy_from_iter_flushcache - write destination through cpu cache 658 * @addr: destination kernel address 659 * @bytes: total transfer length 660 * @i: source iterator 661 * 662 * The pmem driver arranges for filesystem-dax to use this facility via 663 * dax_copy_from_iter() for ensuring that writes to persistent memory 664 * are flushed through the CPU cache. It is differentiated from 665 * _copy_from_iter_nocache() in that guarantees all data is flushed for 666 * all iterator types. The _copy_from_iter_nocache() only attempts to 667 * bypass the cache for the ITER_IOVEC case, and on some archs may use 668 * instructions that strand dirty-data in the cache. 669 * 670 * Return: number of bytes copied (may be %0) 671 */ 672 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 673 { 674 if (unlikely(iov_iter_is_pipe(i))) { 675 WARN_ON(1); 676 return 0; 677 } 678 iterate_and_advance(i, bytes, base, len, off, 679 __copy_from_user_flushcache(addr + off, base, len), 680 memcpy_flushcache(addr + off, base, len) 681 ) 682 683 return bytes; 684 } 685 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 686 #endif 687 688 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 689 { 690 struct page *head; 691 size_t v = n + offset; 692 693 /* 694 * The general case needs to access the page order in order 695 * to compute the page size. 696 * However, we mostly deal with order-0 pages and thus can 697 * avoid a possible cache line miss for requests that fit all 698 * page orders. 699 */ 700 if (n <= v && v <= PAGE_SIZE) 701 return true; 702 703 head = compound_head(page); 704 v += (page - head) << PAGE_SHIFT; 705 706 if (likely(n <= v && v <= (page_size(head)))) 707 return true; 708 WARN_ON(1); 709 return false; 710 } 711 712 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 713 struct iov_iter *i) 714 { 715 size_t res = 0; 716 if (unlikely(!page_copy_sane(page, offset, bytes))) 717 return 0; 718 if (unlikely(iov_iter_is_pipe(i))) 719 return copy_page_to_iter_pipe(page, offset, bytes, i); 720 page += offset / PAGE_SIZE; // first subpage 721 offset %= PAGE_SIZE; 722 while (1) { 723 void *kaddr = kmap_local_page(page); 724 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); 725 n = _copy_to_iter(kaddr + offset, n, i); 726 kunmap_local(kaddr); 727 res += n; 728 bytes -= n; 729 if (!bytes || !n) 730 break; 731 offset += n; 732 if (offset == PAGE_SIZE) { 733 page++; 734 offset = 0; 735 } 736 } 737 return res; 738 } 739 EXPORT_SYMBOL(copy_page_to_iter); 740 741 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 742 struct iov_iter *i) 743 { 744 size_t res = 0; 745 if (!page_copy_sane(page, offset, bytes)) 746 return 0; 747 page += offset / PAGE_SIZE; // first subpage 748 offset %= PAGE_SIZE; 749 while (1) { 750 void *kaddr = kmap_local_page(page); 751 size_t n = min(bytes, (size_t)PAGE_SIZE - offset); 752 n = _copy_from_iter(kaddr + offset, n, i); 753 kunmap_local(kaddr); 754 res += n; 755 bytes -= n; 756 if (!bytes || !n) 757 break; 758 offset += n; 759 if (offset == PAGE_SIZE) { 760 page++; 761 offset = 0; 762 } 763 } 764 return res; 765 } 766 EXPORT_SYMBOL(copy_page_from_iter); 767 768 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 769 { 770 unsigned int chunk, off; 771 772 if (unlikely(bytes > i->count)) 773 bytes = i->count; 774 if (unlikely(!bytes)) 775 return 0; 776 777 if (!sanity(i)) 778 return 0; 779 780 for (size_t n = bytes; n; n -= chunk) { 781 struct page *page = append_pipe(i, n, &off); 782 char *p; 783 784 if (!page) 785 return bytes - n; 786 chunk = min_t(size_t, n, PAGE_SIZE - off); 787 p = kmap_local_page(page); 788 memset(p + off, 0, chunk); 789 kunmap_local(p); 790 } 791 return bytes; 792 } 793 794 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 795 { 796 if (unlikely(iov_iter_is_pipe(i))) 797 return pipe_zero(bytes, i); 798 iterate_and_advance(i, bytes, base, len, count, 799 clear_user(base, len), 800 memset(base, 0, len) 801 ) 802 803 return bytes; 804 } 805 EXPORT_SYMBOL(iov_iter_zero); 806 807 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, 808 struct iov_iter *i) 809 { 810 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 811 if (unlikely(!page_copy_sane(page, offset, bytes))) { 812 kunmap_atomic(kaddr); 813 return 0; 814 } 815 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 816 kunmap_atomic(kaddr); 817 WARN_ON(1); 818 return 0; 819 } 820 iterate_and_advance(i, bytes, base, len, off, 821 copyin(p + off, base, len), 822 memcpy(p + off, base, len) 823 ) 824 kunmap_atomic(kaddr); 825 return bytes; 826 } 827 EXPORT_SYMBOL(copy_page_from_iter_atomic); 828 829 static void pipe_advance(struct iov_iter *i, size_t size) 830 { 831 struct pipe_inode_info *pipe = i->pipe; 832 int off = i->last_offset; 833 834 if (!off && !size) { 835 pipe_discard_from(pipe, i->start_head); // discard everything 836 return; 837 } 838 i->count -= size; 839 while (1) { 840 struct pipe_buffer *buf = pipe_buf(pipe, i->head); 841 if (off) /* make it relative to the beginning of buffer */ 842 size += abs(off) - buf->offset; 843 if (size <= buf->len) { 844 buf->len = size; 845 i->last_offset = last_offset(buf); 846 break; 847 } 848 size -= buf->len; 849 i->head++; 850 off = 0; 851 } 852 pipe_discard_from(pipe, i->head + 1); // discard everything past this one 853 } 854 855 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 856 { 857 const struct bio_vec *bvec, *end; 858 859 if (!i->count) 860 return; 861 i->count -= size; 862 863 size += i->iov_offset; 864 865 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { 866 if (likely(size < bvec->bv_len)) 867 break; 868 size -= bvec->bv_len; 869 } 870 i->iov_offset = size; 871 i->nr_segs -= bvec - i->bvec; 872 i->bvec = bvec; 873 } 874 875 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 876 { 877 const struct iovec *iov, *end; 878 879 if (!i->count) 880 return; 881 i->count -= size; 882 883 size += i->iov_offset; // from beginning of current segment 884 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { 885 if (likely(size < iov->iov_len)) 886 break; 887 size -= iov->iov_len; 888 } 889 i->iov_offset = size; 890 i->nr_segs -= iov - i->iov; 891 i->iov = iov; 892 } 893 894 void iov_iter_advance(struct iov_iter *i, size_t size) 895 { 896 if (unlikely(i->count < size)) 897 size = i->count; 898 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { 899 i->iov_offset += size; 900 i->count -= size; 901 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 902 /* iovec and kvec have identical layouts */ 903 iov_iter_iovec_advance(i, size); 904 } else if (iov_iter_is_bvec(i)) { 905 iov_iter_bvec_advance(i, size); 906 } else if (iov_iter_is_pipe(i)) { 907 pipe_advance(i, size); 908 } else if (iov_iter_is_discard(i)) { 909 i->count -= size; 910 } 911 } 912 EXPORT_SYMBOL(iov_iter_advance); 913 914 void iov_iter_revert(struct iov_iter *i, size_t unroll) 915 { 916 if (!unroll) 917 return; 918 if (WARN_ON(unroll > MAX_RW_COUNT)) 919 return; 920 i->count += unroll; 921 if (unlikely(iov_iter_is_pipe(i))) { 922 struct pipe_inode_info *pipe = i->pipe; 923 unsigned int head = pipe->head; 924 925 while (head > i->start_head) { 926 struct pipe_buffer *b = pipe_buf(pipe, --head); 927 if (unroll < b->len) { 928 b->len -= unroll; 929 i->last_offset = last_offset(b); 930 i->head = head; 931 return; 932 } 933 unroll -= b->len; 934 pipe_buf_release(pipe, b); 935 pipe->head--; 936 } 937 i->last_offset = 0; 938 i->head = head; 939 return; 940 } 941 if (unlikely(iov_iter_is_discard(i))) 942 return; 943 if (unroll <= i->iov_offset) { 944 i->iov_offset -= unroll; 945 return; 946 } 947 unroll -= i->iov_offset; 948 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { 949 BUG(); /* We should never go beyond the start of the specified 950 * range since we might then be straying into pages that 951 * aren't pinned. 952 */ 953 } else if (iov_iter_is_bvec(i)) { 954 const struct bio_vec *bvec = i->bvec; 955 while (1) { 956 size_t n = (--bvec)->bv_len; 957 i->nr_segs++; 958 if (unroll <= n) { 959 i->bvec = bvec; 960 i->iov_offset = n - unroll; 961 return; 962 } 963 unroll -= n; 964 } 965 } else { /* same logics for iovec and kvec */ 966 const struct iovec *iov = i->iov; 967 while (1) { 968 size_t n = (--iov)->iov_len; 969 i->nr_segs++; 970 if (unroll <= n) { 971 i->iov = iov; 972 i->iov_offset = n - unroll; 973 return; 974 } 975 unroll -= n; 976 } 977 } 978 } 979 EXPORT_SYMBOL(iov_iter_revert); 980 981 /* 982 * Return the count of just the current iov_iter segment. 983 */ 984 size_t iov_iter_single_seg_count(const struct iov_iter *i) 985 { 986 if (i->nr_segs > 1) { 987 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 988 return min(i->count, i->iov->iov_len - i->iov_offset); 989 if (iov_iter_is_bvec(i)) 990 return min(i->count, i->bvec->bv_len - i->iov_offset); 991 } 992 return i->count; 993 } 994 EXPORT_SYMBOL(iov_iter_single_seg_count); 995 996 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 997 const struct kvec *kvec, unsigned long nr_segs, 998 size_t count) 999 { 1000 WARN_ON(direction & ~(READ | WRITE)); 1001 *i = (struct iov_iter){ 1002 .iter_type = ITER_KVEC, 1003 .data_source = direction, 1004 .kvec = kvec, 1005 .nr_segs = nr_segs, 1006 .iov_offset = 0, 1007 .count = count 1008 }; 1009 } 1010 EXPORT_SYMBOL(iov_iter_kvec); 1011 1012 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1013 const struct bio_vec *bvec, unsigned long nr_segs, 1014 size_t count) 1015 { 1016 WARN_ON(direction & ~(READ | WRITE)); 1017 *i = (struct iov_iter){ 1018 .iter_type = ITER_BVEC, 1019 .data_source = direction, 1020 .bvec = bvec, 1021 .nr_segs = nr_segs, 1022 .iov_offset = 0, 1023 .count = count 1024 }; 1025 } 1026 EXPORT_SYMBOL(iov_iter_bvec); 1027 1028 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1029 struct pipe_inode_info *pipe, 1030 size_t count) 1031 { 1032 BUG_ON(direction != READ); 1033 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1034 *i = (struct iov_iter){ 1035 .iter_type = ITER_PIPE, 1036 .data_source = false, 1037 .pipe = pipe, 1038 .head = pipe->head, 1039 .start_head = pipe->head, 1040 .last_offset = 0, 1041 .count = count 1042 }; 1043 } 1044 EXPORT_SYMBOL(iov_iter_pipe); 1045 1046 /** 1047 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1048 * @i: The iterator to initialise. 1049 * @direction: The direction of the transfer. 1050 * @xarray: The xarray to access. 1051 * @start: The start file position. 1052 * @count: The size of the I/O buffer in bytes. 1053 * 1054 * Set up an I/O iterator to either draw data out of the pages attached to an 1055 * inode or to inject data into those pages. The pages *must* be prevented 1056 * from evaporation, either by taking a ref on them or locking them by the 1057 * caller. 1058 */ 1059 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1060 struct xarray *xarray, loff_t start, size_t count) 1061 { 1062 BUG_ON(direction & ~1); 1063 *i = (struct iov_iter) { 1064 .iter_type = ITER_XARRAY, 1065 .data_source = direction, 1066 .xarray = xarray, 1067 .xarray_start = start, 1068 .count = count, 1069 .iov_offset = 0 1070 }; 1071 } 1072 EXPORT_SYMBOL(iov_iter_xarray); 1073 1074 /** 1075 * iov_iter_discard - Initialise an I/O iterator that discards data 1076 * @i: The iterator to initialise. 1077 * @direction: The direction of the transfer. 1078 * @count: The size of the I/O buffer in bytes. 1079 * 1080 * Set up an I/O iterator that just discards everything that's written to it. 1081 * It's only available as a READ iterator. 1082 */ 1083 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1084 { 1085 BUG_ON(direction != READ); 1086 *i = (struct iov_iter){ 1087 .iter_type = ITER_DISCARD, 1088 .data_source = false, 1089 .count = count, 1090 .iov_offset = 0 1091 }; 1092 } 1093 EXPORT_SYMBOL(iov_iter_discard); 1094 1095 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, 1096 unsigned len_mask) 1097 { 1098 size_t size = i->count; 1099 size_t skip = i->iov_offset; 1100 unsigned k; 1101 1102 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1103 size_t len = i->iov[k].iov_len - skip; 1104 1105 if (len > size) 1106 len = size; 1107 if (len & len_mask) 1108 return false; 1109 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) 1110 return false; 1111 1112 size -= len; 1113 if (!size) 1114 break; 1115 } 1116 return true; 1117 } 1118 1119 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, 1120 unsigned len_mask) 1121 { 1122 size_t size = i->count; 1123 unsigned skip = i->iov_offset; 1124 unsigned k; 1125 1126 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1127 size_t len = i->bvec[k].bv_len - skip; 1128 1129 if (len > size) 1130 len = size; 1131 if (len & len_mask) 1132 return false; 1133 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) 1134 return false; 1135 1136 size -= len; 1137 if (!size) 1138 break; 1139 } 1140 return true; 1141 } 1142 1143 /** 1144 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments 1145 * are aligned to the parameters. 1146 * 1147 * @i: &struct iov_iter to restore 1148 * @addr_mask: bit mask to check against the iov element's addresses 1149 * @len_mask: bit mask to check against the iov element's lengths 1150 * 1151 * Return: false if any addresses or lengths intersect with the provided masks 1152 */ 1153 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 1154 unsigned len_mask) 1155 { 1156 if (likely(iter_is_ubuf(i))) { 1157 if (i->count & len_mask) 1158 return false; 1159 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) 1160 return false; 1161 return true; 1162 } 1163 1164 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1165 return iov_iter_aligned_iovec(i, addr_mask, len_mask); 1166 1167 if (iov_iter_is_bvec(i)) 1168 return iov_iter_aligned_bvec(i, addr_mask, len_mask); 1169 1170 if (iov_iter_is_pipe(i)) { 1171 size_t size = i->count; 1172 1173 if (size & len_mask) 1174 return false; 1175 if (size && i->last_offset > 0) { 1176 if (i->last_offset & addr_mask) 1177 return false; 1178 } 1179 1180 return true; 1181 } 1182 1183 if (iov_iter_is_xarray(i)) { 1184 if (i->count & len_mask) 1185 return false; 1186 if ((i->xarray_start + i->iov_offset) & addr_mask) 1187 return false; 1188 } 1189 1190 return true; 1191 } 1192 EXPORT_SYMBOL_GPL(iov_iter_is_aligned); 1193 1194 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 1195 { 1196 unsigned long res = 0; 1197 size_t size = i->count; 1198 size_t skip = i->iov_offset; 1199 unsigned k; 1200 1201 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1202 size_t len = i->iov[k].iov_len - skip; 1203 if (len) { 1204 res |= (unsigned long)i->iov[k].iov_base + skip; 1205 if (len > size) 1206 len = size; 1207 res |= len; 1208 size -= len; 1209 if (!size) 1210 break; 1211 } 1212 } 1213 return res; 1214 } 1215 1216 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 1217 { 1218 unsigned res = 0; 1219 size_t size = i->count; 1220 unsigned skip = i->iov_offset; 1221 unsigned k; 1222 1223 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1224 size_t len = i->bvec[k].bv_len - skip; 1225 res |= (unsigned long)i->bvec[k].bv_offset + skip; 1226 if (len > size) 1227 len = size; 1228 res |= len; 1229 size -= len; 1230 if (!size) 1231 break; 1232 } 1233 return res; 1234 } 1235 1236 unsigned long iov_iter_alignment(const struct iov_iter *i) 1237 { 1238 if (likely(iter_is_ubuf(i))) { 1239 size_t size = i->count; 1240 if (size) 1241 return ((unsigned long)i->ubuf + i->iov_offset) | size; 1242 return 0; 1243 } 1244 1245 /* iovec and kvec have identical layouts */ 1246 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1247 return iov_iter_alignment_iovec(i); 1248 1249 if (iov_iter_is_bvec(i)) 1250 return iov_iter_alignment_bvec(i); 1251 1252 if (iov_iter_is_pipe(i)) { 1253 size_t size = i->count; 1254 1255 if (size && i->last_offset > 0) 1256 return size | i->last_offset; 1257 return size; 1258 } 1259 1260 if (iov_iter_is_xarray(i)) 1261 return (i->xarray_start + i->iov_offset) | i->count; 1262 1263 return 0; 1264 } 1265 EXPORT_SYMBOL(iov_iter_alignment); 1266 1267 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1268 { 1269 unsigned long res = 0; 1270 unsigned long v = 0; 1271 size_t size = i->count; 1272 unsigned k; 1273 1274 if (iter_is_ubuf(i)) 1275 return 0; 1276 1277 if (WARN_ON(!iter_is_iovec(i))) 1278 return ~0U; 1279 1280 for (k = 0; k < i->nr_segs; k++) { 1281 if (i->iov[k].iov_len) { 1282 unsigned long base = (unsigned long)i->iov[k].iov_base; 1283 if (v) // if not the first one 1284 res |= base | v; // this start | previous end 1285 v = base + i->iov[k].iov_len; 1286 if (size <= i->iov[k].iov_len) 1287 break; 1288 size -= i->iov[k].iov_len; 1289 } 1290 } 1291 return res; 1292 } 1293 EXPORT_SYMBOL(iov_iter_gap_alignment); 1294 1295 static int want_pages_array(struct page ***res, size_t size, 1296 size_t start, unsigned int maxpages) 1297 { 1298 unsigned int count = DIV_ROUND_UP(size + start, PAGE_SIZE); 1299 1300 if (count > maxpages) 1301 count = maxpages; 1302 WARN_ON(!count); // caller should've prevented that 1303 if (!*res) { 1304 *res = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); 1305 if (!*res) 1306 return 0; 1307 } 1308 return count; 1309 } 1310 1311 static ssize_t pipe_get_pages(struct iov_iter *i, 1312 struct page ***pages, size_t maxsize, unsigned maxpages, 1313 size_t *start) 1314 { 1315 unsigned int npages, count, off, chunk; 1316 struct page **p; 1317 size_t left; 1318 1319 if (!sanity(i)) 1320 return -EFAULT; 1321 1322 *start = off = pipe_npages(i, &npages); 1323 if (!npages) 1324 return -EFAULT; 1325 count = want_pages_array(pages, maxsize, off, min(npages, maxpages)); 1326 if (!count) 1327 return -ENOMEM; 1328 p = *pages; 1329 for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) { 1330 struct page *page = append_pipe(i, left, &off); 1331 if (!page) 1332 break; 1333 chunk = min_t(size_t, left, PAGE_SIZE - off); 1334 get_page(*p++ = page); 1335 } 1336 if (!npages) 1337 return -EFAULT; 1338 return maxsize - left; 1339 } 1340 1341 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1342 pgoff_t index, unsigned int nr_pages) 1343 { 1344 XA_STATE(xas, xa, index); 1345 struct page *page; 1346 unsigned int ret = 0; 1347 1348 rcu_read_lock(); 1349 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1350 if (xas_retry(&xas, page)) 1351 continue; 1352 1353 /* Has the page moved or been split? */ 1354 if (unlikely(page != xas_reload(&xas))) { 1355 xas_reset(&xas); 1356 continue; 1357 } 1358 1359 pages[ret] = find_subpage(page, xas.xa_index); 1360 get_page(pages[ret]); 1361 if (++ret == nr_pages) 1362 break; 1363 } 1364 rcu_read_unlock(); 1365 return ret; 1366 } 1367 1368 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1369 struct page ***pages, size_t maxsize, 1370 unsigned maxpages, size_t *_start_offset) 1371 { 1372 unsigned nr, offset, count; 1373 pgoff_t index; 1374 loff_t pos; 1375 1376 pos = i->xarray_start + i->iov_offset; 1377 index = pos >> PAGE_SHIFT; 1378 offset = pos & ~PAGE_MASK; 1379 *_start_offset = offset; 1380 1381 count = want_pages_array(pages, maxsize, offset, maxpages); 1382 if (!count) 1383 return -ENOMEM; 1384 nr = iter_xarray_populate_pages(*pages, i->xarray, index, count); 1385 if (nr == 0) 1386 return 0; 1387 1388 maxsize = min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1389 i->iov_offset += maxsize; 1390 i->count -= maxsize; 1391 return maxsize; 1392 } 1393 1394 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */ 1395 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) 1396 { 1397 size_t skip; 1398 long k; 1399 1400 if (iter_is_ubuf(i)) 1401 return (unsigned long)i->ubuf + i->iov_offset; 1402 1403 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1404 size_t len = i->iov[k].iov_len - skip; 1405 1406 if (unlikely(!len)) 1407 continue; 1408 if (*size > len) 1409 *size = len; 1410 return (unsigned long)i->iov[k].iov_base + skip; 1411 } 1412 BUG(); // if it had been empty, we wouldn't get called 1413 } 1414 1415 /* must be done on non-empty ITER_BVEC one */ 1416 static struct page *first_bvec_segment(const struct iov_iter *i, 1417 size_t *size, size_t *start) 1418 { 1419 struct page *page; 1420 size_t skip = i->iov_offset, len; 1421 1422 len = i->bvec->bv_len - skip; 1423 if (*size > len) 1424 *size = len; 1425 skip += i->bvec->bv_offset; 1426 page = i->bvec->bv_page + skip / PAGE_SIZE; 1427 *start = skip % PAGE_SIZE; 1428 return page; 1429 } 1430 1431 static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, 1432 struct page ***pages, size_t maxsize, 1433 unsigned int maxpages, size_t *start) 1434 { 1435 unsigned int n; 1436 1437 if (maxsize > i->count) 1438 maxsize = i->count; 1439 if (!maxsize) 1440 return 0; 1441 if (maxsize > MAX_RW_COUNT) 1442 maxsize = MAX_RW_COUNT; 1443 1444 if (likely(user_backed_iter(i))) { 1445 unsigned int gup_flags = 0; 1446 unsigned long addr; 1447 int res; 1448 1449 if (iov_iter_rw(i) != WRITE) 1450 gup_flags |= FOLL_WRITE; 1451 if (i->nofault) 1452 gup_flags |= FOLL_NOFAULT; 1453 1454 addr = first_iovec_segment(i, &maxsize); 1455 *start = addr % PAGE_SIZE; 1456 addr &= PAGE_MASK; 1457 n = want_pages_array(pages, maxsize, *start, maxpages); 1458 if (!n) 1459 return -ENOMEM; 1460 res = get_user_pages_fast(addr, n, gup_flags, *pages); 1461 if (unlikely(res <= 0)) 1462 return res; 1463 maxsize = min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1464 iov_iter_advance(i, maxsize); 1465 return maxsize; 1466 } 1467 if (iov_iter_is_bvec(i)) { 1468 struct page **p; 1469 struct page *page; 1470 1471 page = first_bvec_segment(i, &maxsize, start); 1472 n = want_pages_array(pages, maxsize, *start, maxpages); 1473 if (!n) 1474 return -ENOMEM; 1475 p = *pages; 1476 for (int k = 0; k < n; k++) 1477 get_page(p[k] = page + k); 1478 maxsize = min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1479 i->count -= maxsize; 1480 i->iov_offset += maxsize; 1481 if (i->iov_offset == i->bvec->bv_len) { 1482 i->iov_offset = 0; 1483 i->bvec++; 1484 i->nr_segs--; 1485 } 1486 return maxsize; 1487 } 1488 if (iov_iter_is_pipe(i)) 1489 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1490 if (iov_iter_is_xarray(i)) 1491 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1492 return -EFAULT; 1493 } 1494 1495 ssize_t iov_iter_get_pages2(struct iov_iter *i, 1496 struct page **pages, size_t maxsize, unsigned maxpages, 1497 size_t *start) 1498 { 1499 if (!maxpages) 1500 return 0; 1501 BUG_ON(!pages); 1502 1503 return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); 1504 } 1505 EXPORT_SYMBOL(iov_iter_get_pages2); 1506 1507 ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, 1508 struct page ***pages, size_t maxsize, 1509 size_t *start) 1510 { 1511 ssize_t len; 1512 1513 *pages = NULL; 1514 1515 len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); 1516 if (len <= 0) { 1517 kvfree(*pages); 1518 *pages = NULL; 1519 } 1520 return len; 1521 } 1522 EXPORT_SYMBOL(iov_iter_get_pages_alloc2); 1523 1524 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1525 struct iov_iter *i) 1526 { 1527 __wsum sum, next; 1528 sum = *csum; 1529 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1530 WARN_ON(1); 1531 return 0; 1532 } 1533 iterate_and_advance(i, bytes, base, len, off, ({ 1534 next = csum_and_copy_from_user(base, addr + off, len); 1535 sum = csum_block_add(sum, next, off); 1536 next ? 0 : len; 1537 }), ({ 1538 sum = csum_and_memcpy(addr + off, base, len, sum, off); 1539 }) 1540 ) 1541 *csum = sum; 1542 return bytes; 1543 } 1544 EXPORT_SYMBOL(csum_and_copy_from_iter); 1545 1546 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1547 struct iov_iter *i) 1548 { 1549 struct csum_state *csstate = _csstate; 1550 __wsum sum, next; 1551 1552 if (unlikely(iov_iter_is_discard(i))) { 1553 WARN_ON(1); /* for now */ 1554 return 0; 1555 } 1556 1557 sum = csum_shift(csstate->csum, csstate->off); 1558 if (unlikely(iov_iter_is_pipe(i))) 1559 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); 1560 else iterate_and_advance(i, bytes, base, len, off, ({ 1561 next = csum_and_copy_to_user(addr + off, base, len); 1562 sum = csum_block_add(sum, next, off); 1563 next ? 0 : len; 1564 }), ({ 1565 sum = csum_and_memcpy(base, addr + off, len, sum, off); 1566 }) 1567 ) 1568 csstate->csum = csum_shift(sum, csstate->off); 1569 csstate->off += bytes; 1570 return bytes; 1571 } 1572 EXPORT_SYMBOL(csum_and_copy_to_iter); 1573 1574 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1575 struct iov_iter *i) 1576 { 1577 #ifdef CONFIG_CRYPTO_HASH 1578 struct ahash_request *hash = hashp; 1579 struct scatterlist sg; 1580 size_t copied; 1581 1582 copied = copy_to_iter(addr, bytes, i); 1583 sg_init_one(&sg, addr, copied); 1584 ahash_request_set_crypt(hash, &sg, NULL, copied); 1585 crypto_ahash_update(hash); 1586 return copied; 1587 #else 1588 return 0; 1589 #endif 1590 } 1591 EXPORT_SYMBOL(hash_and_copy_to_iter); 1592 1593 static int iov_npages(const struct iov_iter *i, int maxpages) 1594 { 1595 size_t skip = i->iov_offset, size = i->count; 1596 const struct iovec *p; 1597 int npages = 0; 1598 1599 for (p = i->iov; size; skip = 0, p++) { 1600 unsigned offs = offset_in_page(p->iov_base + skip); 1601 size_t len = min(p->iov_len - skip, size); 1602 1603 if (len) { 1604 size -= len; 1605 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1606 if (unlikely(npages > maxpages)) 1607 return maxpages; 1608 } 1609 } 1610 return npages; 1611 } 1612 1613 static int bvec_npages(const struct iov_iter *i, int maxpages) 1614 { 1615 size_t skip = i->iov_offset, size = i->count; 1616 const struct bio_vec *p; 1617 int npages = 0; 1618 1619 for (p = i->bvec; size; skip = 0, p++) { 1620 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1621 size_t len = min(p->bv_len - skip, size); 1622 1623 size -= len; 1624 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1625 if (unlikely(npages > maxpages)) 1626 return maxpages; 1627 } 1628 return npages; 1629 } 1630 1631 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1632 { 1633 if (unlikely(!i->count)) 1634 return 0; 1635 if (likely(iter_is_ubuf(i))) { 1636 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); 1637 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); 1638 return min(npages, maxpages); 1639 } 1640 /* iovec and kvec have identical layouts */ 1641 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1642 return iov_npages(i, maxpages); 1643 if (iov_iter_is_bvec(i)) 1644 return bvec_npages(i, maxpages); 1645 if (iov_iter_is_pipe(i)) { 1646 int npages; 1647 1648 if (!sanity(i)) 1649 return 0; 1650 1651 pipe_npages(i, &npages); 1652 return min(npages, maxpages); 1653 } 1654 if (iov_iter_is_xarray(i)) { 1655 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1656 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1657 return min(npages, maxpages); 1658 } 1659 return 0; 1660 } 1661 EXPORT_SYMBOL(iov_iter_npages); 1662 1663 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1664 { 1665 *new = *old; 1666 if (unlikely(iov_iter_is_pipe(new))) { 1667 WARN_ON(1); 1668 return NULL; 1669 } 1670 if (iov_iter_is_bvec(new)) 1671 return new->bvec = kmemdup(new->bvec, 1672 new->nr_segs * sizeof(struct bio_vec), 1673 flags); 1674 else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) 1675 /* iovec and kvec have identical layout */ 1676 return new->iov = kmemdup(new->iov, 1677 new->nr_segs * sizeof(struct iovec), 1678 flags); 1679 return NULL; 1680 } 1681 EXPORT_SYMBOL(dup_iter); 1682 1683 static int copy_compat_iovec_from_user(struct iovec *iov, 1684 const struct iovec __user *uvec, unsigned long nr_segs) 1685 { 1686 const struct compat_iovec __user *uiov = 1687 (const struct compat_iovec __user *)uvec; 1688 int ret = -EFAULT, i; 1689 1690 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1691 return -EFAULT; 1692 1693 for (i = 0; i < nr_segs; i++) { 1694 compat_uptr_t buf; 1695 compat_ssize_t len; 1696 1697 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1698 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1699 1700 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1701 if (len < 0) { 1702 ret = -EINVAL; 1703 goto uaccess_end; 1704 } 1705 iov[i].iov_base = compat_ptr(buf); 1706 iov[i].iov_len = len; 1707 } 1708 1709 ret = 0; 1710 uaccess_end: 1711 user_access_end(); 1712 return ret; 1713 } 1714 1715 static int copy_iovec_from_user(struct iovec *iov, 1716 const struct iovec __user *uvec, unsigned long nr_segs) 1717 { 1718 unsigned long seg; 1719 1720 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1721 return -EFAULT; 1722 for (seg = 0; seg < nr_segs; seg++) { 1723 if ((ssize_t)iov[seg].iov_len < 0) 1724 return -EINVAL; 1725 } 1726 1727 return 0; 1728 } 1729 1730 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1731 unsigned long nr_segs, unsigned long fast_segs, 1732 struct iovec *fast_iov, bool compat) 1733 { 1734 struct iovec *iov = fast_iov; 1735 int ret; 1736 1737 /* 1738 * SuS says "The readv() function *may* fail if the iovcnt argument was 1739 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1740 * traditionally returned zero for zero segments, so... 1741 */ 1742 if (nr_segs == 0) 1743 return iov; 1744 if (nr_segs > UIO_MAXIOV) 1745 return ERR_PTR(-EINVAL); 1746 if (nr_segs > fast_segs) { 1747 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1748 if (!iov) 1749 return ERR_PTR(-ENOMEM); 1750 } 1751 1752 if (compat) 1753 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1754 else 1755 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1756 if (ret) { 1757 if (iov != fast_iov) 1758 kfree(iov); 1759 return ERR_PTR(ret); 1760 } 1761 1762 return iov; 1763 } 1764 1765 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1766 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1767 struct iov_iter *i, bool compat) 1768 { 1769 ssize_t total_len = 0; 1770 unsigned long seg; 1771 struct iovec *iov; 1772 1773 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1774 if (IS_ERR(iov)) { 1775 *iovp = NULL; 1776 return PTR_ERR(iov); 1777 } 1778 1779 /* 1780 * According to the Single Unix Specification we should return EINVAL if 1781 * an element length is < 0 when cast to ssize_t or if the total length 1782 * would overflow the ssize_t return value of the system call. 1783 * 1784 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1785 * overflow case. 1786 */ 1787 for (seg = 0; seg < nr_segs; seg++) { 1788 ssize_t len = (ssize_t)iov[seg].iov_len; 1789 1790 if (!access_ok(iov[seg].iov_base, len)) { 1791 if (iov != *iovp) 1792 kfree(iov); 1793 *iovp = NULL; 1794 return -EFAULT; 1795 } 1796 1797 if (len > MAX_RW_COUNT - total_len) { 1798 len = MAX_RW_COUNT - total_len; 1799 iov[seg].iov_len = len; 1800 } 1801 total_len += len; 1802 } 1803 1804 iov_iter_init(i, type, iov, nr_segs, total_len); 1805 if (iov == *iovp) 1806 *iovp = NULL; 1807 else 1808 *iovp = iov; 1809 return total_len; 1810 } 1811 1812 /** 1813 * import_iovec() - Copy an array of &struct iovec from userspace 1814 * into the kernel, check that it is valid, and initialize a new 1815 * &struct iov_iter iterator to access it. 1816 * 1817 * @type: One of %READ or %WRITE. 1818 * @uvec: Pointer to the userspace array. 1819 * @nr_segs: Number of elements in userspace array. 1820 * @fast_segs: Number of elements in @iov. 1821 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1822 * on-stack) kernel array. 1823 * @i: Pointer to iterator that will be initialized on success. 1824 * 1825 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1826 * then this function places %NULL in *@iov on return. Otherwise, a new 1827 * array will be allocated and the result placed in *@iov. This means that 1828 * the caller may call kfree() on *@iov regardless of whether the small 1829 * on-stack array was used or not (and regardless of whether this function 1830 * returns an error or not). 1831 * 1832 * Return: Negative error code on error, bytes imported on success 1833 */ 1834 ssize_t import_iovec(int type, const struct iovec __user *uvec, 1835 unsigned nr_segs, unsigned fast_segs, 1836 struct iovec **iovp, struct iov_iter *i) 1837 { 1838 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1839 in_compat_syscall()); 1840 } 1841 EXPORT_SYMBOL(import_iovec); 1842 1843 int import_single_range(int rw, void __user *buf, size_t len, 1844 struct iovec *iov, struct iov_iter *i) 1845 { 1846 if (len > MAX_RW_COUNT) 1847 len = MAX_RW_COUNT; 1848 if (unlikely(!access_ok(buf, len))) 1849 return -EFAULT; 1850 1851 iov->iov_base = buf; 1852 iov->iov_len = len; 1853 iov_iter_init(i, rw, iov, 1, len); 1854 return 0; 1855 } 1856 EXPORT_SYMBOL(import_single_range); 1857 1858 /** 1859 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 1860 * iov_iter_save_state() was called. 1861 * 1862 * @i: &struct iov_iter to restore 1863 * @state: state to restore from 1864 * 1865 * Used after iov_iter_save_state() to bring restore @i, if operations may 1866 * have advanced it. 1867 * 1868 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 1869 */ 1870 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 1871 { 1872 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 1873 !iov_iter_is_kvec(i) && !iter_is_ubuf(i)) 1874 return; 1875 i->iov_offset = state->iov_offset; 1876 i->count = state->count; 1877 if (iter_is_ubuf(i)) 1878 return; 1879 /* 1880 * For the *vec iters, nr_segs + iov is constant - if we increment 1881 * the vec, then we also decrement the nr_segs count. Hence we don't 1882 * need to track both of these, just one is enough and we can deduct 1883 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 1884 * size, so we can just increment the iov pointer as they are unionzed. 1885 * ITER_BVEC _may_ be the same size on some archs, but on others it is 1886 * not. Be safe and handle it separately. 1887 */ 1888 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 1889 if (iov_iter_is_bvec(i)) 1890 i->bvec -= state->nr_segs - i->nr_segs; 1891 else 1892 i->iov -= state->nr_segs - i->nr_segs; 1893 i->nr_segs = state->nr_segs; 1894 } 1895