1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 /* covers ubuf and kbuf alike */ 20 #define iterate_buf(i, n, base, len, off, __p, STEP) { \ 21 size_t __maybe_unused off = 0; \ 22 len = n; \ 23 base = __p + i->iov_offset; \ 24 len -= (STEP); \ 25 i->iov_offset += len; \ 26 n = len; \ 27 } 28 29 /* covers iovec and kvec alike */ 30 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ 31 size_t off = 0; \ 32 size_t skip = i->iov_offset; \ 33 do { \ 34 len = min(n, __p->iov_len - skip); \ 35 if (likely(len)) { \ 36 base = __p->iov_base + skip; \ 37 len -= (STEP); \ 38 off += len; \ 39 skip += len; \ 40 n -= len; \ 41 if (skip < __p->iov_len) \ 42 break; \ 43 } \ 44 __p++; \ 45 skip = 0; \ 46 } while (n); \ 47 i->iov_offset = skip; \ 48 n = off; \ 49 } 50 51 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ 52 size_t off = 0; \ 53 unsigned skip = i->iov_offset; \ 54 while (n) { \ 55 unsigned offset = p->bv_offset + skip; \ 56 unsigned left; \ 57 void *kaddr = kmap_local_page(p->bv_page + \ 58 offset / PAGE_SIZE); \ 59 base = kaddr + offset % PAGE_SIZE; \ 60 len = min(min(n, (size_t)(p->bv_len - skip)), \ 61 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 62 left = (STEP); \ 63 kunmap_local(kaddr); \ 64 len -= left; \ 65 off += len; \ 66 skip += len; \ 67 if (skip == p->bv_len) { \ 68 skip = 0; \ 69 p++; \ 70 } \ 71 n -= len; \ 72 if (left) \ 73 break; \ 74 } \ 75 i->iov_offset = skip; \ 76 n = off; \ 77 } 78 79 #define iterate_xarray(i, n, base, len, __off, STEP) { \ 80 __label__ __out; \ 81 size_t __off = 0; \ 82 struct folio *folio; \ 83 loff_t start = i->xarray_start + i->iov_offset; \ 84 pgoff_t index = start / PAGE_SIZE; \ 85 XA_STATE(xas, i->xarray, index); \ 86 \ 87 len = PAGE_SIZE - offset_in_page(start); \ 88 rcu_read_lock(); \ 89 xas_for_each(&xas, folio, ULONG_MAX) { \ 90 unsigned left; \ 91 size_t offset; \ 92 if (xas_retry(&xas, folio)) \ 93 continue; \ 94 if (WARN_ON(xa_is_value(folio))) \ 95 break; \ 96 if (WARN_ON(folio_test_hugetlb(folio))) \ 97 break; \ 98 offset = offset_in_folio(folio, start + __off); \ 99 while (offset < folio_size(folio)) { \ 100 base = kmap_local_folio(folio, offset); \ 101 len = min(n, len); \ 102 left = (STEP); \ 103 kunmap_local(base); \ 104 len -= left; \ 105 __off += len; \ 106 n -= len; \ 107 if (left || n == 0) \ 108 goto __out; \ 109 offset += len; \ 110 len = PAGE_SIZE; \ 111 } \ 112 } \ 113 __out: \ 114 rcu_read_unlock(); \ 115 i->iov_offset += __off; \ 116 n = __off; \ 117 } 118 119 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ 120 if (unlikely(i->count < n)) \ 121 n = i->count; \ 122 if (likely(n)) { \ 123 if (likely(iter_is_ubuf(i))) { \ 124 void __user *base; \ 125 size_t len; \ 126 iterate_buf(i, n, base, len, off, \ 127 i->ubuf, (I)) \ 128 } else if (likely(iter_is_iovec(i))) { \ 129 const struct iovec *iov = i->iov; \ 130 void __user *base; \ 131 size_t len; \ 132 iterate_iovec(i, n, base, len, off, \ 133 iov, (I)) \ 134 i->nr_segs -= iov - i->iov; \ 135 i->iov = iov; \ 136 } else if (iov_iter_is_bvec(i)) { \ 137 const struct bio_vec *bvec = i->bvec; \ 138 void *base; \ 139 size_t len; \ 140 iterate_bvec(i, n, base, len, off, \ 141 bvec, (K)) \ 142 i->nr_segs -= bvec - i->bvec; \ 143 i->bvec = bvec; \ 144 } else if (iov_iter_is_kvec(i)) { \ 145 const struct kvec *kvec = i->kvec; \ 146 void *base; \ 147 size_t len; \ 148 iterate_iovec(i, n, base, len, off, \ 149 kvec, (K)) \ 150 i->nr_segs -= kvec - i->kvec; \ 151 i->kvec = kvec; \ 152 } else if (iov_iter_is_xarray(i)) { \ 153 void *base; \ 154 size_t len; \ 155 iterate_xarray(i, n, base, len, off, \ 156 (K)) \ 157 } \ 158 i->count -= n; \ 159 } \ 160 } 161 #define iterate_and_advance(i, n, base, len, off, I, K) \ 162 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) 163 164 static int copyout(void __user *to, const void *from, size_t n) 165 { 166 if (should_fail_usercopy()) 167 return n; 168 if (access_ok(to, n)) { 169 instrument_copy_to_user(to, from, n); 170 n = raw_copy_to_user(to, from, n); 171 } 172 return n; 173 } 174 175 static int copyin(void *to, const void __user *from, size_t n) 176 { 177 if (should_fail_usercopy()) 178 return n; 179 if (access_ok(from, n)) { 180 instrument_copy_from_user(to, from, n); 181 n = raw_copy_from_user(to, from, n); 182 } 183 return n; 184 } 185 186 static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe, 187 unsigned int slot) 188 { 189 return &pipe->bufs[slot & (pipe->ring_size - 1)]; 190 } 191 192 #ifdef PIPE_PARANOIA 193 static bool sanity(const struct iov_iter *i) 194 { 195 struct pipe_inode_info *pipe = i->pipe; 196 unsigned int p_head = pipe->head; 197 unsigned int p_tail = pipe->tail; 198 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 199 unsigned int i_head = i->head; 200 unsigned int idx; 201 202 if (i->last_offset) { 203 struct pipe_buffer *p; 204 if (unlikely(p_occupancy == 0)) 205 goto Bad; // pipe must be non-empty 206 if (unlikely(i_head != p_head - 1)) 207 goto Bad; // must be at the last buffer... 208 209 p = pipe_buf(pipe, i_head); 210 if (unlikely(p->offset + p->len != abs(i->last_offset))) 211 goto Bad; // ... at the end of segment 212 } else { 213 if (i_head != p_head) 214 goto Bad; // must be right after the last buffer 215 } 216 return true; 217 Bad: 218 printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset); 219 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 220 p_head, p_tail, pipe->ring_size); 221 for (idx = 0; idx < pipe->ring_size; idx++) 222 printk(KERN_ERR "[%p %p %d %d]\n", 223 pipe->bufs[idx].ops, 224 pipe->bufs[idx].page, 225 pipe->bufs[idx].offset, 226 pipe->bufs[idx].len); 227 WARN_ON(1); 228 return false; 229 } 230 #else 231 #define sanity(i) true 232 #endif 233 234 static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) 235 { 236 struct page *page = alloc_page(GFP_USER); 237 if (page) { 238 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 239 *buf = (struct pipe_buffer) { 240 .ops = &default_pipe_buf_ops, 241 .page = page, 242 .offset = 0, 243 .len = size 244 }; 245 } 246 return page; 247 } 248 249 static void push_page(struct pipe_inode_info *pipe, struct page *page, 250 unsigned int offset, unsigned int size) 251 { 252 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); 253 *buf = (struct pipe_buffer) { 254 .ops = &page_cache_pipe_buf_ops, 255 .page = page, 256 .offset = offset, 257 .len = size 258 }; 259 get_page(page); 260 } 261 262 static inline int last_offset(const struct pipe_buffer *buf) 263 { 264 if (buf->ops == &default_pipe_buf_ops) 265 return buf->len; // buf->offset is 0 for those 266 else 267 return -(buf->offset + buf->len); 268 } 269 270 static struct page *append_pipe(struct iov_iter *i, size_t size, 271 unsigned int *off) 272 { 273 struct pipe_inode_info *pipe = i->pipe; 274 int offset = i->last_offset; 275 struct pipe_buffer *buf; 276 struct page *page; 277 278 if (offset > 0 && offset < PAGE_SIZE) { 279 // some space in the last buffer; add to it 280 buf = pipe_buf(pipe, pipe->head - 1); 281 size = min_t(size_t, size, PAGE_SIZE - offset); 282 buf->len += size; 283 i->last_offset += size; 284 i->count -= size; 285 *off = offset; 286 return buf->page; 287 } 288 // OK, we need a new buffer 289 *off = 0; 290 size = min_t(size_t, size, PAGE_SIZE); 291 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 292 return NULL; 293 page = push_anon(pipe, size); 294 if (!page) 295 return NULL; 296 i->head = pipe->head - 1; 297 i->last_offset = size; 298 i->count -= size; 299 return page; 300 } 301 302 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 303 struct iov_iter *i) 304 { 305 struct pipe_inode_info *pipe = i->pipe; 306 unsigned int head = pipe->head; 307 308 if (unlikely(bytes > i->count)) 309 bytes = i->count; 310 311 if (unlikely(!bytes)) 312 return 0; 313 314 if (!sanity(i)) 315 return 0; 316 317 if (offset && i->last_offset == -offset) { // could we merge it? 318 struct pipe_buffer *buf = pipe_buf(pipe, head - 1); 319 if (buf->page == page) { 320 buf->len += bytes; 321 i->last_offset -= bytes; 322 i->count -= bytes; 323 return bytes; 324 } 325 } 326 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) 327 return 0; 328 329 push_page(pipe, page, offset, bytes); 330 i->last_offset = -(offset + bytes); 331 i->head = head; 332 i->count -= bytes; 333 return bytes; 334 } 335 336 /* 337 * fault_in_iov_iter_readable - fault in iov iterator for reading 338 * @i: iterator 339 * @size: maximum length 340 * 341 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 342 * @size. For each iovec, fault in each page that constitutes the iovec. 343 * 344 * Returns the number of bytes not faulted in (like copy_to_user() and 345 * copy_from_user()). 346 * 347 * Always returns 0 for non-userspace iterators. 348 */ 349 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 350 { 351 if (iter_is_ubuf(i)) { 352 size_t n = min(size, iov_iter_count(i)); 353 n -= fault_in_readable(i->ubuf + i->iov_offset, n); 354 return size - n; 355 } else if (iter_is_iovec(i)) { 356 size_t count = min(size, iov_iter_count(i)); 357 const struct iovec *p; 358 size_t skip; 359 360 size -= count; 361 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 362 size_t len = min(count, p->iov_len - skip); 363 size_t ret; 364 365 if (unlikely(!len)) 366 continue; 367 ret = fault_in_readable(p->iov_base + skip, len); 368 count -= len - ret; 369 if (ret) 370 break; 371 } 372 return count + size; 373 } 374 return 0; 375 } 376 EXPORT_SYMBOL(fault_in_iov_iter_readable); 377 378 /* 379 * fault_in_iov_iter_writeable - fault in iov iterator for writing 380 * @i: iterator 381 * @size: maximum length 382 * 383 * Faults in the iterator using get_user_pages(), i.e., without triggering 384 * hardware page faults. This is primarily useful when we already know that 385 * some or all of the pages in @i aren't in memory. 386 * 387 * Returns the number of bytes not faulted in, like copy_to_user() and 388 * copy_from_user(). 389 * 390 * Always returns 0 for non-user-space iterators. 391 */ 392 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 393 { 394 if (iter_is_ubuf(i)) { 395 size_t n = min(size, iov_iter_count(i)); 396 n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n); 397 return size - n; 398 } else if (iter_is_iovec(i)) { 399 size_t count = min(size, iov_iter_count(i)); 400 const struct iovec *p; 401 size_t skip; 402 403 size -= count; 404 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 405 size_t len = min(count, p->iov_len - skip); 406 size_t ret; 407 408 if (unlikely(!len)) 409 continue; 410 ret = fault_in_safe_writeable(p->iov_base + skip, len); 411 count -= len - ret; 412 if (ret) 413 break; 414 } 415 return count + size; 416 } 417 return 0; 418 } 419 EXPORT_SYMBOL(fault_in_iov_iter_writeable); 420 421 void iov_iter_init(struct iov_iter *i, unsigned int direction, 422 const struct iovec *iov, unsigned long nr_segs, 423 size_t count) 424 { 425 WARN_ON(direction & ~(READ | WRITE)); 426 *i = (struct iov_iter) { 427 .iter_type = ITER_IOVEC, 428 .nofault = false, 429 .user_backed = true, 430 .data_source = direction, 431 .iov = iov, 432 .nr_segs = nr_segs, 433 .iov_offset = 0, 434 .count = count 435 }; 436 } 437 EXPORT_SYMBOL(iov_iter_init); 438 439 static inline void data_start(const struct iov_iter *i, 440 unsigned int *iter_headp, size_t *offp) 441 { 442 int off = i->last_offset; 443 444 if (off > 0 && off < PAGE_SIZE) { // anon and not full 445 *iter_headp = i->pipe->head - 1; 446 *offp = off; 447 } else { 448 *iter_headp = i->pipe->head; 449 *offp = 0; 450 } 451 } 452 453 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 454 struct iov_iter *i) 455 { 456 unsigned int off, chunk; 457 458 if (unlikely(bytes > i->count)) 459 bytes = i->count; 460 if (unlikely(!bytes)) 461 return 0; 462 463 if (!sanity(i)) 464 return 0; 465 466 for (size_t n = bytes; n; n -= chunk) { 467 struct page *page = append_pipe(i, n, &off); 468 chunk = min_t(size_t, n, PAGE_SIZE - off); 469 if (!page) 470 return bytes - n; 471 memcpy_to_page(page, off, addr, chunk); 472 addr += chunk; 473 } 474 return bytes; 475 } 476 477 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 478 __wsum sum, size_t off) 479 { 480 __wsum next = csum_partial_copy_nocheck(from, to, len); 481 return csum_block_add(sum, next, off); 482 } 483 484 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 485 struct iov_iter *i, __wsum *sump) 486 { 487 __wsum sum = *sump; 488 size_t off = 0; 489 unsigned int chunk, r; 490 491 if (unlikely(bytes > i->count)) 492 bytes = i->count; 493 if (unlikely(!bytes)) 494 return 0; 495 496 if (!sanity(i)) 497 return 0; 498 499 while (bytes) { 500 struct page *page = append_pipe(i, bytes, &r); 501 char *p; 502 503 if (!page) 504 break; 505 chunk = min_t(size_t, bytes, PAGE_SIZE - r); 506 p = kmap_local_page(page); 507 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); 508 kunmap_local(p); 509 off += chunk; 510 bytes -= chunk; 511 } 512 *sump = sum; 513 return off; 514 } 515 516 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 517 { 518 if (unlikely(iov_iter_is_pipe(i))) 519 return copy_pipe_to_iter(addr, bytes, i); 520 if (user_backed_iter(i)) 521 might_fault(); 522 iterate_and_advance(i, bytes, base, len, off, 523 copyout(base, addr + off, len), 524 memcpy(base, addr + off, len) 525 ) 526 527 return bytes; 528 } 529 EXPORT_SYMBOL(_copy_to_iter); 530 531 #ifdef CONFIG_ARCH_HAS_COPY_MC 532 static int copyout_mc(void __user *to, const void *from, size_t n) 533 { 534 if (access_ok(to, n)) { 535 instrument_copy_to_user(to, from, n); 536 n = copy_mc_to_user((__force void *) to, from, n); 537 } 538 return n; 539 } 540 541 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 542 struct iov_iter *i) 543 { 544 size_t xfer = 0; 545 unsigned int off, chunk; 546 547 if (unlikely(bytes > i->count)) 548 bytes = i->count; 549 if (unlikely(!bytes)) 550 return 0; 551 552 if (!sanity(i)) 553 return 0; 554 555 while (bytes) { 556 struct page *page = append_pipe(i, bytes, &off); 557 unsigned long rem; 558 char *p; 559 560 if (!page) 561 break; 562 chunk = min_t(size_t, bytes, PAGE_SIZE - off); 563 p = kmap_local_page(page); 564 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); 565 chunk -= rem; 566 kunmap_local(p); 567 xfer += chunk; 568 bytes -= chunk; 569 if (rem) { 570 iov_iter_revert(i, rem); 571 break; 572 } 573 } 574 return xfer; 575 } 576 577 /** 578 * _copy_mc_to_iter - copy to iter with source memory error exception handling 579 * @addr: source kernel address 580 * @bytes: total transfer length 581 * @i: destination iterator 582 * 583 * The pmem driver deploys this for the dax operation 584 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 585 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 586 * successfully copied. 587 * 588 * The main differences between this and typical _copy_to_iter(). 589 * 590 * * Typical tail/residue handling after a fault retries the copy 591 * byte-by-byte until the fault happens again. Re-triggering machine 592 * checks is potentially fatal so the implementation uses source 593 * alignment and poison alignment assumptions to avoid re-triggering 594 * hardware exceptions. 595 * 596 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 597 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 598 * a short copy. 599 * 600 * Return: number of bytes copied (may be %0) 601 */ 602 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 603 { 604 if (unlikely(iov_iter_is_pipe(i))) 605 return copy_mc_pipe_to_iter(addr, bytes, i); 606 if (user_backed_iter(i)) 607 might_fault(); 608 __iterate_and_advance(i, bytes, base, len, off, 609 copyout_mc(base, addr + off, len), 610 copy_mc_to_kernel(base, addr + off, len) 611 ) 612 613 return bytes; 614 } 615 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 616 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 617 618 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 619 { 620 if (unlikely(iov_iter_is_pipe(i))) { 621 WARN_ON(1); 622 return 0; 623 } 624 if (user_backed_iter(i)) 625 might_fault(); 626 iterate_and_advance(i, bytes, base, len, off, 627 copyin(addr + off, base, len), 628 memcpy(addr + off, base, len) 629 ) 630 631 return bytes; 632 } 633 EXPORT_SYMBOL(_copy_from_iter); 634 635 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 636 { 637 if (unlikely(iov_iter_is_pipe(i))) { 638 WARN_ON(1); 639 return 0; 640 } 641 iterate_and_advance(i, bytes, base, len, off, 642 __copy_from_user_inatomic_nocache(addr + off, base, len), 643 memcpy(addr + off, base, len) 644 ) 645 646 return bytes; 647 } 648 EXPORT_SYMBOL(_copy_from_iter_nocache); 649 650 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 651 /** 652 * _copy_from_iter_flushcache - write destination through cpu cache 653 * @addr: destination kernel address 654 * @bytes: total transfer length 655 * @i: source iterator 656 * 657 * The pmem driver arranges for filesystem-dax to use this facility via 658 * dax_copy_from_iter() for ensuring that writes to persistent memory 659 * are flushed through the CPU cache. It is differentiated from 660 * _copy_from_iter_nocache() in that guarantees all data is flushed for 661 * all iterator types. The _copy_from_iter_nocache() only attempts to 662 * bypass the cache for the ITER_IOVEC case, and on some archs may use 663 * instructions that strand dirty-data in the cache. 664 * 665 * Return: number of bytes copied (may be %0) 666 */ 667 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 668 { 669 if (unlikely(iov_iter_is_pipe(i))) { 670 WARN_ON(1); 671 return 0; 672 } 673 iterate_and_advance(i, bytes, base, len, off, 674 __copy_from_user_flushcache(addr + off, base, len), 675 memcpy_flushcache(addr + off, base, len) 676 ) 677 678 return bytes; 679 } 680 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 681 #endif 682 683 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 684 { 685 struct page *head; 686 size_t v = n + offset; 687 688 /* 689 * The general case needs to access the page order in order 690 * to compute the page size. 691 * However, we mostly deal with order-0 pages and thus can 692 * avoid a possible cache line miss for requests that fit all 693 * page orders. 694 */ 695 if (n <= v && v <= PAGE_SIZE) 696 return true; 697 698 head = compound_head(page); 699 v += (page - head) << PAGE_SHIFT; 700 701 if (likely(n <= v && v <= (page_size(head)))) 702 return true; 703 WARN_ON(1); 704 return false; 705 } 706 707 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 708 struct iov_iter *i) 709 { 710 if (unlikely(iov_iter_is_pipe(i))) { 711 return copy_page_to_iter_pipe(page, offset, bytes, i); 712 } else { 713 void *kaddr = kmap_local_page(page); 714 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); 715 kunmap_local(kaddr); 716 return wanted; 717 } 718 } 719 720 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 721 struct iov_iter *i) 722 { 723 size_t res = 0; 724 if (unlikely(!page_copy_sane(page, offset, bytes))) 725 return 0; 726 page += offset / PAGE_SIZE; // first subpage 727 offset %= PAGE_SIZE; 728 while (1) { 729 size_t n = __copy_page_to_iter(page, offset, 730 min(bytes, (size_t)PAGE_SIZE - offset), i); 731 res += n; 732 bytes -= n; 733 if (!bytes || !n) 734 break; 735 offset += n; 736 if (offset == PAGE_SIZE) { 737 page++; 738 offset = 0; 739 } 740 } 741 return res; 742 } 743 EXPORT_SYMBOL(copy_page_to_iter); 744 745 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 746 struct iov_iter *i) 747 { 748 if (page_copy_sane(page, offset, bytes)) { 749 void *kaddr = kmap_local_page(page); 750 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); 751 kunmap_local(kaddr); 752 return wanted; 753 } 754 return 0; 755 } 756 EXPORT_SYMBOL(copy_page_from_iter); 757 758 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 759 { 760 unsigned int chunk, off; 761 762 if (unlikely(bytes > i->count)) 763 bytes = i->count; 764 if (unlikely(!bytes)) 765 return 0; 766 767 if (!sanity(i)) 768 return 0; 769 770 for (size_t n = bytes; n; n -= chunk) { 771 struct page *page = append_pipe(i, n, &off); 772 char *p; 773 774 if (!page) 775 return bytes - n; 776 chunk = min_t(size_t, n, PAGE_SIZE - off); 777 p = kmap_local_page(page); 778 memset(p + off, 0, chunk); 779 kunmap_local(p); 780 } 781 return bytes; 782 } 783 784 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 785 { 786 if (unlikely(iov_iter_is_pipe(i))) 787 return pipe_zero(bytes, i); 788 iterate_and_advance(i, bytes, base, len, count, 789 clear_user(base, len), 790 memset(base, 0, len) 791 ) 792 793 return bytes; 794 } 795 EXPORT_SYMBOL(iov_iter_zero); 796 797 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, 798 struct iov_iter *i) 799 { 800 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 801 if (unlikely(!page_copy_sane(page, offset, bytes))) { 802 kunmap_atomic(kaddr); 803 return 0; 804 } 805 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 806 kunmap_atomic(kaddr); 807 WARN_ON(1); 808 return 0; 809 } 810 iterate_and_advance(i, bytes, base, len, off, 811 copyin(p + off, base, len), 812 memcpy(p + off, base, len) 813 ) 814 kunmap_atomic(kaddr); 815 return bytes; 816 } 817 EXPORT_SYMBOL(copy_page_from_iter_atomic); 818 819 static void pipe_advance(struct iov_iter *i, size_t size) 820 { 821 struct pipe_inode_info *pipe = i->pipe; 822 int off = i->last_offset; 823 824 if (!off && !size) { 825 pipe_discard_from(pipe, i->start_head); // discard everything 826 return; 827 } 828 i->count -= size; 829 while (1) { 830 struct pipe_buffer *buf = pipe_buf(pipe, i->head); 831 if (off) /* make it relative to the beginning of buffer */ 832 size += abs(off) - buf->offset; 833 if (size <= buf->len) { 834 buf->len = size; 835 i->last_offset = last_offset(buf); 836 break; 837 } 838 size -= buf->len; 839 i->head++; 840 off = 0; 841 } 842 pipe_discard_from(pipe, i->head + 1); // discard everything past this one 843 } 844 845 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 846 { 847 const struct bio_vec *bvec, *end; 848 849 if (!i->count) 850 return; 851 i->count -= size; 852 853 size += i->iov_offset; 854 855 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { 856 if (likely(size < bvec->bv_len)) 857 break; 858 size -= bvec->bv_len; 859 } 860 i->iov_offset = size; 861 i->nr_segs -= bvec - i->bvec; 862 i->bvec = bvec; 863 } 864 865 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 866 { 867 const struct iovec *iov, *end; 868 869 if (!i->count) 870 return; 871 i->count -= size; 872 873 size += i->iov_offset; // from beginning of current segment 874 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { 875 if (likely(size < iov->iov_len)) 876 break; 877 size -= iov->iov_len; 878 } 879 i->iov_offset = size; 880 i->nr_segs -= iov - i->iov; 881 i->iov = iov; 882 } 883 884 void iov_iter_advance(struct iov_iter *i, size_t size) 885 { 886 if (unlikely(i->count < size)) 887 size = i->count; 888 if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) { 889 i->iov_offset += size; 890 i->count -= size; 891 } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 892 /* iovec and kvec have identical layouts */ 893 iov_iter_iovec_advance(i, size); 894 } else if (iov_iter_is_bvec(i)) { 895 iov_iter_bvec_advance(i, size); 896 } else if (iov_iter_is_pipe(i)) { 897 pipe_advance(i, size); 898 } else if (iov_iter_is_discard(i)) { 899 i->count -= size; 900 } 901 } 902 EXPORT_SYMBOL(iov_iter_advance); 903 904 void iov_iter_revert(struct iov_iter *i, size_t unroll) 905 { 906 if (!unroll) 907 return; 908 if (WARN_ON(unroll > MAX_RW_COUNT)) 909 return; 910 i->count += unroll; 911 if (unlikely(iov_iter_is_pipe(i))) { 912 struct pipe_inode_info *pipe = i->pipe; 913 unsigned int head = pipe->head; 914 915 while (head > i->start_head) { 916 struct pipe_buffer *b = pipe_buf(pipe, --head); 917 if (unroll < b->len) { 918 b->len -= unroll; 919 i->last_offset = last_offset(b); 920 i->head = head; 921 return; 922 } 923 unroll -= b->len; 924 pipe_buf_release(pipe, b); 925 pipe->head--; 926 } 927 i->last_offset = 0; 928 i->head = head; 929 return; 930 } 931 if (unlikely(iov_iter_is_discard(i))) 932 return; 933 if (unroll <= i->iov_offset) { 934 i->iov_offset -= unroll; 935 return; 936 } 937 unroll -= i->iov_offset; 938 if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) { 939 BUG(); /* We should never go beyond the start of the specified 940 * range since we might then be straying into pages that 941 * aren't pinned. 942 */ 943 } else if (iov_iter_is_bvec(i)) { 944 const struct bio_vec *bvec = i->bvec; 945 while (1) { 946 size_t n = (--bvec)->bv_len; 947 i->nr_segs++; 948 if (unroll <= n) { 949 i->bvec = bvec; 950 i->iov_offset = n - unroll; 951 return; 952 } 953 unroll -= n; 954 } 955 } else { /* same logics for iovec and kvec */ 956 const struct iovec *iov = i->iov; 957 while (1) { 958 size_t n = (--iov)->iov_len; 959 i->nr_segs++; 960 if (unroll <= n) { 961 i->iov = iov; 962 i->iov_offset = n - unroll; 963 return; 964 } 965 unroll -= n; 966 } 967 } 968 } 969 EXPORT_SYMBOL(iov_iter_revert); 970 971 /* 972 * Return the count of just the current iov_iter segment. 973 */ 974 size_t iov_iter_single_seg_count(const struct iov_iter *i) 975 { 976 if (i->nr_segs > 1) { 977 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 978 return min(i->count, i->iov->iov_len - i->iov_offset); 979 if (iov_iter_is_bvec(i)) 980 return min(i->count, i->bvec->bv_len - i->iov_offset); 981 } 982 return i->count; 983 } 984 EXPORT_SYMBOL(iov_iter_single_seg_count); 985 986 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 987 const struct kvec *kvec, unsigned long nr_segs, 988 size_t count) 989 { 990 WARN_ON(direction & ~(READ | WRITE)); 991 *i = (struct iov_iter){ 992 .iter_type = ITER_KVEC, 993 .data_source = direction, 994 .kvec = kvec, 995 .nr_segs = nr_segs, 996 .iov_offset = 0, 997 .count = count 998 }; 999 } 1000 EXPORT_SYMBOL(iov_iter_kvec); 1001 1002 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1003 const struct bio_vec *bvec, unsigned long nr_segs, 1004 size_t count) 1005 { 1006 WARN_ON(direction & ~(READ | WRITE)); 1007 *i = (struct iov_iter){ 1008 .iter_type = ITER_BVEC, 1009 .data_source = direction, 1010 .bvec = bvec, 1011 .nr_segs = nr_segs, 1012 .iov_offset = 0, 1013 .count = count 1014 }; 1015 } 1016 EXPORT_SYMBOL(iov_iter_bvec); 1017 1018 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1019 struct pipe_inode_info *pipe, 1020 size_t count) 1021 { 1022 BUG_ON(direction != READ); 1023 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1024 *i = (struct iov_iter){ 1025 .iter_type = ITER_PIPE, 1026 .data_source = false, 1027 .pipe = pipe, 1028 .head = pipe->head, 1029 .start_head = pipe->head, 1030 .last_offset = 0, 1031 .count = count 1032 }; 1033 } 1034 EXPORT_SYMBOL(iov_iter_pipe); 1035 1036 /** 1037 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1038 * @i: The iterator to initialise. 1039 * @direction: The direction of the transfer. 1040 * @xarray: The xarray to access. 1041 * @start: The start file position. 1042 * @count: The size of the I/O buffer in bytes. 1043 * 1044 * Set up an I/O iterator to either draw data out of the pages attached to an 1045 * inode or to inject data into those pages. The pages *must* be prevented 1046 * from evaporation, either by taking a ref on them or locking them by the 1047 * caller. 1048 */ 1049 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1050 struct xarray *xarray, loff_t start, size_t count) 1051 { 1052 BUG_ON(direction & ~1); 1053 *i = (struct iov_iter) { 1054 .iter_type = ITER_XARRAY, 1055 .data_source = direction, 1056 .xarray = xarray, 1057 .xarray_start = start, 1058 .count = count, 1059 .iov_offset = 0 1060 }; 1061 } 1062 EXPORT_SYMBOL(iov_iter_xarray); 1063 1064 /** 1065 * iov_iter_discard - Initialise an I/O iterator that discards data 1066 * @i: The iterator to initialise. 1067 * @direction: The direction of the transfer. 1068 * @count: The size of the I/O buffer in bytes. 1069 * 1070 * Set up an I/O iterator that just discards everything that's written to it. 1071 * It's only available as a READ iterator. 1072 */ 1073 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1074 { 1075 BUG_ON(direction != READ); 1076 *i = (struct iov_iter){ 1077 .iter_type = ITER_DISCARD, 1078 .data_source = false, 1079 .count = count, 1080 .iov_offset = 0 1081 }; 1082 } 1083 EXPORT_SYMBOL(iov_iter_discard); 1084 1085 static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask, 1086 unsigned len_mask) 1087 { 1088 size_t size = i->count; 1089 size_t skip = i->iov_offset; 1090 unsigned k; 1091 1092 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1093 size_t len = i->iov[k].iov_len - skip; 1094 1095 if (len > size) 1096 len = size; 1097 if (len & len_mask) 1098 return false; 1099 if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask) 1100 return false; 1101 1102 size -= len; 1103 if (!size) 1104 break; 1105 } 1106 return true; 1107 } 1108 1109 static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask, 1110 unsigned len_mask) 1111 { 1112 size_t size = i->count; 1113 unsigned skip = i->iov_offset; 1114 unsigned k; 1115 1116 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1117 size_t len = i->bvec[k].bv_len - skip; 1118 1119 if (len > size) 1120 len = size; 1121 if (len & len_mask) 1122 return false; 1123 if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask) 1124 return false; 1125 1126 size -= len; 1127 if (!size) 1128 break; 1129 } 1130 return true; 1131 } 1132 1133 /** 1134 * iov_iter_is_aligned() - Check if the addresses and lengths of each segments 1135 * are aligned to the parameters. 1136 * 1137 * @i: &struct iov_iter to restore 1138 * @addr_mask: bit mask to check against the iov element's addresses 1139 * @len_mask: bit mask to check against the iov element's lengths 1140 * 1141 * Return: false if any addresses or lengths intersect with the provided masks 1142 */ 1143 bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, 1144 unsigned len_mask) 1145 { 1146 if (likely(iter_is_ubuf(i))) { 1147 if (i->count & len_mask) 1148 return false; 1149 if ((unsigned long)(i->ubuf + i->iov_offset) & addr_mask) 1150 return false; 1151 return true; 1152 } 1153 1154 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1155 return iov_iter_aligned_iovec(i, addr_mask, len_mask); 1156 1157 if (iov_iter_is_bvec(i)) 1158 return iov_iter_aligned_bvec(i, addr_mask, len_mask); 1159 1160 if (iov_iter_is_pipe(i)) { 1161 size_t size = i->count; 1162 1163 if (size & len_mask) 1164 return false; 1165 if (size && i->last_offset > 0) { 1166 if (i->last_offset & addr_mask) 1167 return false; 1168 } 1169 1170 return true; 1171 } 1172 1173 if (iov_iter_is_xarray(i)) { 1174 if (i->count & len_mask) 1175 return false; 1176 if ((i->xarray_start + i->iov_offset) & addr_mask) 1177 return false; 1178 } 1179 1180 return true; 1181 } 1182 EXPORT_SYMBOL_GPL(iov_iter_is_aligned); 1183 1184 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 1185 { 1186 unsigned long res = 0; 1187 size_t size = i->count; 1188 size_t skip = i->iov_offset; 1189 unsigned k; 1190 1191 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1192 size_t len = i->iov[k].iov_len - skip; 1193 if (len) { 1194 res |= (unsigned long)i->iov[k].iov_base + skip; 1195 if (len > size) 1196 len = size; 1197 res |= len; 1198 size -= len; 1199 if (!size) 1200 break; 1201 } 1202 } 1203 return res; 1204 } 1205 1206 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 1207 { 1208 unsigned res = 0; 1209 size_t size = i->count; 1210 unsigned skip = i->iov_offset; 1211 unsigned k; 1212 1213 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1214 size_t len = i->bvec[k].bv_len - skip; 1215 res |= (unsigned long)i->bvec[k].bv_offset + skip; 1216 if (len > size) 1217 len = size; 1218 res |= len; 1219 size -= len; 1220 if (!size) 1221 break; 1222 } 1223 return res; 1224 } 1225 1226 unsigned long iov_iter_alignment(const struct iov_iter *i) 1227 { 1228 if (likely(iter_is_ubuf(i))) { 1229 size_t size = i->count; 1230 if (size) 1231 return ((unsigned long)i->ubuf + i->iov_offset) | size; 1232 return 0; 1233 } 1234 1235 /* iovec and kvec have identical layouts */ 1236 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1237 return iov_iter_alignment_iovec(i); 1238 1239 if (iov_iter_is_bvec(i)) 1240 return iov_iter_alignment_bvec(i); 1241 1242 if (iov_iter_is_pipe(i)) { 1243 size_t size = i->count; 1244 1245 if (size && i->last_offset > 0) 1246 return size | i->last_offset; 1247 return size; 1248 } 1249 1250 if (iov_iter_is_xarray(i)) 1251 return (i->xarray_start + i->iov_offset) | i->count; 1252 1253 return 0; 1254 } 1255 EXPORT_SYMBOL(iov_iter_alignment); 1256 1257 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1258 { 1259 unsigned long res = 0; 1260 unsigned long v = 0; 1261 size_t size = i->count; 1262 unsigned k; 1263 1264 if (iter_is_ubuf(i)) 1265 return 0; 1266 1267 if (WARN_ON(!iter_is_iovec(i))) 1268 return ~0U; 1269 1270 for (k = 0; k < i->nr_segs; k++) { 1271 if (i->iov[k].iov_len) { 1272 unsigned long base = (unsigned long)i->iov[k].iov_base; 1273 if (v) // if not the first one 1274 res |= base | v; // this start | previous end 1275 v = base + i->iov[k].iov_len; 1276 if (size <= i->iov[k].iov_len) 1277 break; 1278 size -= i->iov[k].iov_len; 1279 } 1280 } 1281 return res; 1282 } 1283 EXPORT_SYMBOL(iov_iter_gap_alignment); 1284 1285 static inline ssize_t __pipe_get_pages(struct iov_iter *i, 1286 size_t maxsize, 1287 struct page **pages, 1288 size_t off) 1289 { 1290 struct pipe_inode_info *pipe = i->pipe; 1291 ssize_t left = maxsize; 1292 1293 if (off) { 1294 struct pipe_buffer *buf = pipe_buf(pipe, pipe->head - 1); 1295 1296 get_page(*pages++ = buf->page); 1297 left -= PAGE_SIZE - off; 1298 if (left <= 0) { 1299 buf->len += maxsize; 1300 return maxsize; 1301 } 1302 buf->len = PAGE_SIZE; 1303 } 1304 while (!pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { 1305 struct page *page = push_anon(pipe, 1306 min_t(ssize_t, left, PAGE_SIZE)); 1307 if (!page) 1308 break; 1309 get_page(*pages++ = page); 1310 left -= PAGE_SIZE; 1311 if (left <= 0) 1312 return maxsize; 1313 } 1314 return maxsize - left ? : -EFAULT; 1315 } 1316 1317 static ssize_t pipe_get_pages(struct iov_iter *i, 1318 struct page **pages, size_t maxsize, unsigned maxpages, 1319 size_t *start) 1320 { 1321 unsigned int iter_head, npages; 1322 size_t capacity; 1323 1324 if (!sanity(i)) 1325 return -EFAULT; 1326 1327 data_start(i, &iter_head, start); 1328 /* Amount of free space: some of this one + all after this one */ 1329 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1330 capacity = min(npages, maxpages) * PAGE_SIZE - *start; 1331 1332 return __pipe_get_pages(i, min(maxsize, capacity), pages, *start); 1333 } 1334 1335 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1336 pgoff_t index, unsigned int nr_pages) 1337 { 1338 XA_STATE(xas, xa, index); 1339 struct page *page; 1340 unsigned int ret = 0; 1341 1342 rcu_read_lock(); 1343 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1344 if (xas_retry(&xas, page)) 1345 continue; 1346 1347 /* Has the page moved or been split? */ 1348 if (unlikely(page != xas_reload(&xas))) { 1349 xas_reset(&xas); 1350 continue; 1351 } 1352 1353 pages[ret] = find_subpage(page, xas.xa_index); 1354 get_page(pages[ret]); 1355 if (++ret == nr_pages) 1356 break; 1357 } 1358 rcu_read_unlock(); 1359 return ret; 1360 } 1361 1362 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1363 struct page **pages, size_t maxsize, 1364 unsigned maxpages, size_t *_start_offset) 1365 { 1366 unsigned nr, offset; 1367 pgoff_t index, count; 1368 size_t size = maxsize; 1369 loff_t pos; 1370 1371 if (!size || !maxpages) 1372 return 0; 1373 1374 pos = i->xarray_start + i->iov_offset; 1375 index = pos >> PAGE_SHIFT; 1376 offset = pos & ~PAGE_MASK; 1377 *_start_offset = offset; 1378 1379 count = 1; 1380 if (size > PAGE_SIZE - offset) { 1381 size -= PAGE_SIZE - offset; 1382 count += size >> PAGE_SHIFT; 1383 size &= ~PAGE_MASK; 1384 if (size) 1385 count++; 1386 } 1387 1388 if (count > maxpages) 1389 count = maxpages; 1390 1391 nr = iter_xarray_populate_pages(pages, i->xarray, index, count); 1392 if (nr == 0) 1393 return 0; 1394 1395 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1396 } 1397 1398 /* must be done on non-empty ITER_UBUF or ITER_IOVEC one */ 1399 static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size) 1400 { 1401 size_t skip; 1402 long k; 1403 1404 if (iter_is_ubuf(i)) 1405 return (unsigned long)i->ubuf + i->iov_offset; 1406 1407 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1408 size_t len = i->iov[k].iov_len - skip; 1409 1410 if (unlikely(!len)) 1411 continue; 1412 if (*size > len) 1413 *size = len; 1414 return (unsigned long)i->iov[k].iov_base + skip; 1415 } 1416 BUG(); // if it had been empty, we wouldn't get called 1417 } 1418 1419 /* must be done on non-empty ITER_BVEC one */ 1420 static struct page *first_bvec_segment(const struct iov_iter *i, 1421 size_t *size, size_t *start) 1422 { 1423 struct page *page; 1424 size_t skip = i->iov_offset, len; 1425 1426 len = i->bvec->bv_len - skip; 1427 if (*size > len) 1428 *size = len; 1429 skip += i->bvec->bv_offset; 1430 page = i->bvec->bv_page + skip / PAGE_SIZE; 1431 *start = skip % PAGE_SIZE; 1432 return page; 1433 } 1434 1435 ssize_t iov_iter_get_pages(struct iov_iter *i, 1436 struct page **pages, size_t maxsize, unsigned maxpages, 1437 size_t *start) 1438 { 1439 int n, res; 1440 1441 if (maxsize > i->count) 1442 maxsize = i->count; 1443 if (!maxsize) 1444 return 0; 1445 if (maxsize > MAX_RW_COUNT) 1446 maxsize = MAX_RW_COUNT; 1447 1448 if (likely(user_backed_iter(i))) { 1449 unsigned int gup_flags = 0; 1450 unsigned long addr; 1451 1452 if (iov_iter_rw(i) != WRITE) 1453 gup_flags |= FOLL_WRITE; 1454 if (i->nofault) 1455 gup_flags |= FOLL_NOFAULT; 1456 1457 addr = first_iovec_segment(i, &maxsize); 1458 *start = addr % PAGE_SIZE; 1459 addr &= PAGE_MASK; 1460 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1461 if (n > maxpages) 1462 n = maxpages; 1463 res = get_user_pages_fast(addr, n, gup_flags, pages); 1464 if (unlikely(res <= 0)) 1465 return res; 1466 return min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1467 } 1468 if (iov_iter_is_bvec(i)) { 1469 struct page *page; 1470 1471 page = first_bvec_segment(i, &maxsize, start); 1472 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1473 if (n > maxpages) 1474 n = maxpages; 1475 for (int k = 0; k < n; k++) 1476 get_page(*pages++ = page++); 1477 return min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1478 } 1479 if (iov_iter_is_pipe(i)) 1480 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1481 if (iov_iter_is_xarray(i)) 1482 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1483 return -EFAULT; 1484 } 1485 EXPORT_SYMBOL(iov_iter_get_pages); 1486 1487 static struct page **get_pages_array(size_t n) 1488 { 1489 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); 1490 } 1491 1492 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 1493 struct page ***pages, size_t maxsize, 1494 size_t *start) 1495 { 1496 struct page **p; 1497 unsigned int iter_head, npages; 1498 ssize_t n; 1499 1500 if (!sanity(i)) 1501 return -EFAULT; 1502 1503 data_start(i, &iter_head, start); 1504 /* Amount of free space: some of this one + all after this one */ 1505 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1506 n = npages * PAGE_SIZE - *start; 1507 if (maxsize > n) 1508 maxsize = n; 1509 else 1510 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1511 p = get_pages_array(npages); 1512 if (!p) 1513 return -ENOMEM; 1514 n = __pipe_get_pages(i, maxsize, p, *start); 1515 if (n > 0) 1516 *pages = p; 1517 else 1518 kvfree(p); 1519 return n; 1520 } 1521 1522 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, 1523 struct page ***pages, size_t maxsize, 1524 size_t *_start_offset) 1525 { 1526 struct page **p; 1527 unsigned nr, offset; 1528 pgoff_t index, count; 1529 size_t size = maxsize; 1530 loff_t pos; 1531 1532 if (!size) 1533 return 0; 1534 1535 pos = i->xarray_start + i->iov_offset; 1536 index = pos >> PAGE_SHIFT; 1537 offset = pos & ~PAGE_MASK; 1538 *_start_offset = offset; 1539 1540 count = 1; 1541 if (size > PAGE_SIZE - offset) { 1542 size -= PAGE_SIZE - offset; 1543 count += size >> PAGE_SHIFT; 1544 size &= ~PAGE_MASK; 1545 if (size) 1546 count++; 1547 } 1548 1549 p = get_pages_array(count); 1550 if (!p) 1551 return -ENOMEM; 1552 *pages = p; 1553 1554 nr = iter_xarray_populate_pages(p, i->xarray, index, count); 1555 if (nr == 0) 1556 return 0; 1557 1558 return min_t(size_t, nr * PAGE_SIZE - offset, maxsize); 1559 } 1560 1561 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1562 struct page ***pages, size_t maxsize, 1563 size_t *start) 1564 { 1565 struct page **p; 1566 int n, res; 1567 1568 if (maxsize > i->count) 1569 maxsize = i->count; 1570 if (!maxsize) 1571 return 0; 1572 if (maxsize > MAX_RW_COUNT) 1573 maxsize = MAX_RW_COUNT; 1574 1575 if (likely(user_backed_iter(i))) { 1576 unsigned int gup_flags = 0; 1577 unsigned long addr; 1578 1579 if (iov_iter_rw(i) != WRITE) 1580 gup_flags |= FOLL_WRITE; 1581 if (i->nofault) 1582 gup_flags |= FOLL_NOFAULT; 1583 1584 addr = first_iovec_segment(i, &maxsize); 1585 *start = addr % PAGE_SIZE; 1586 addr &= PAGE_MASK; 1587 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1588 p = get_pages_array(n); 1589 if (!p) 1590 return -ENOMEM; 1591 res = get_user_pages_fast(addr, n, gup_flags, p); 1592 if (unlikely(res <= 0)) { 1593 kvfree(p); 1594 *pages = NULL; 1595 return res; 1596 } 1597 *pages = p; 1598 return min_t(size_t, maxsize, res * PAGE_SIZE - *start); 1599 } 1600 if (iov_iter_is_bvec(i)) { 1601 struct page *page; 1602 1603 page = first_bvec_segment(i, &maxsize, start); 1604 n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1605 *pages = p = get_pages_array(n); 1606 if (!p) 1607 return -ENOMEM; 1608 for (int k = 0; k < n; k++) 1609 get_page(*p++ = page++); 1610 return min_t(size_t, maxsize, n * PAGE_SIZE - *start); 1611 } 1612 if (iov_iter_is_pipe(i)) 1613 return pipe_get_pages_alloc(i, pages, maxsize, start); 1614 if (iov_iter_is_xarray(i)) 1615 return iter_xarray_get_pages_alloc(i, pages, maxsize, start); 1616 return -EFAULT; 1617 } 1618 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1619 1620 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1621 struct iov_iter *i) 1622 { 1623 __wsum sum, next; 1624 sum = *csum; 1625 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1626 WARN_ON(1); 1627 return 0; 1628 } 1629 iterate_and_advance(i, bytes, base, len, off, ({ 1630 next = csum_and_copy_from_user(base, addr + off, len); 1631 sum = csum_block_add(sum, next, off); 1632 next ? 0 : len; 1633 }), ({ 1634 sum = csum_and_memcpy(addr + off, base, len, sum, off); 1635 }) 1636 ) 1637 *csum = sum; 1638 return bytes; 1639 } 1640 EXPORT_SYMBOL(csum_and_copy_from_iter); 1641 1642 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1643 struct iov_iter *i) 1644 { 1645 struct csum_state *csstate = _csstate; 1646 __wsum sum, next; 1647 1648 if (unlikely(iov_iter_is_discard(i))) { 1649 WARN_ON(1); /* for now */ 1650 return 0; 1651 } 1652 1653 sum = csum_shift(csstate->csum, csstate->off); 1654 if (unlikely(iov_iter_is_pipe(i))) 1655 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); 1656 else iterate_and_advance(i, bytes, base, len, off, ({ 1657 next = csum_and_copy_to_user(addr + off, base, len); 1658 sum = csum_block_add(sum, next, off); 1659 next ? 0 : len; 1660 }), ({ 1661 sum = csum_and_memcpy(base, addr + off, len, sum, off); 1662 }) 1663 ) 1664 csstate->csum = csum_shift(sum, csstate->off); 1665 csstate->off += bytes; 1666 return bytes; 1667 } 1668 EXPORT_SYMBOL(csum_and_copy_to_iter); 1669 1670 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1671 struct iov_iter *i) 1672 { 1673 #ifdef CONFIG_CRYPTO_HASH 1674 struct ahash_request *hash = hashp; 1675 struct scatterlist sg; 1676 size_t copied; 1677 1678 copied = copy_to_iter(addr, bytes, i); 1679 sg_init_one(&sg, addr, copied); 1680 ahash_request_set_crypt(hash, &sg, NULL, copied); 1681 crypto_ahash_update(hash); 1682 return copied; 1683 #else 1684 return 0; 1685 #endif 1686 } 1687 EXPORT_SYMBOL(hash_and_copy_to_iter); 1688 1689 static int iov_npages(const struct iov_iter *i, int maxpages) 1690 { 1691 size_t skip = i->iov_offset, size = i->count; 1692 const struct iovec *p; 1693 int npages = 0; 1694 1695 for (p = i->iov; size; skip = 0, p++) { 1696 unsigned offs = offset_in_page(p->iov_base + skip); 1697 size_t len = min(p->iov_len - skip, size); 1698 1699 if (len) { 1700 size -= len; 1701 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1702 if (unlikely(npages > maxpages)) 1703 return maxpages; 1704 } 1705 } 1706 return npages; 1707 } 1708 1709 static int bvec_npages(const struct iov_iter *i, int maxpages) 1710 { 1711 size_t skip = i->iov_offset, size = i->count; 1712 const struct bio_vec *p; 1713 int npages = 0; 1714 1715 for (p = i->bvec; size; skip = 0, p++) { 1716 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1717 size_t len = min(p->bv_len - skip, size); 1718 1719 size -= len; 1720 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1721 if (unlikely(npages > maxpages)) 1722 return maxpages; 1723 } 1724 return npages; 1725 } 1726 1727 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1728 { 1729 if (unlikely(!i->count)) 1730 return 0; 1731 if (likely(iter_is_ubuf(i))) { 1732 unsigned offs = offset_in_page(i->ubuf + i->iov_offset); 1733 int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE); 1734 return min(npages, maxpages); 1735 } 1736 /* iovec and kvec have identical layouts */ 1737 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1738 return iov_npages(i, maxpages); 1739 if (iov_iter_is_bvec(i)) 1740 return bvec_npages(i, maxpages); 1741 if (iov_iter_is_pipe(i)) { 1742 unsigned int iter_head; 1743 int npages; 1744 size_t off; 1745 1746 if (!sanity(i)) 1747 return 0; 1748 1749 data_start(i, &iter_head, &off); 1750 /* some of this one + all after this one */ 1751 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1752 return min(npages, maxpages); 1753 } 1754 if (iov_iter_is_xarray(i)) { 1755 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1756 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1757 return min(npages, maxpages); 1758 } 1759 return 0; 1760 } 1761 EXPORT_SYMBOL(iov_iter_npages); 1762 1763 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1764 { 1765 *new = *old; 1766 if (unlikely(iov_iter_is_pipe(new))) { 1767 WARN_ON(1); 1768 return NULL; 1769 } 1770 if (iov_iter_is_bvec(new)) 1771 return new->bvec = kmemdup(new->bvec, 1772 new->nr_segs * sizeof(struct bio_vec), 1773 flags); 1774 else if (iov_iter_is_kvec(new) || iter_is_iovec(new)) 1775 /* iovec and kvec have identical layout */ 1776 return new->iov = kmemdup(new->iov, 1777 new->nr_segs * sizeof(struct iovec), 1778 flags); 1779 return NULL; 1780 } 1781 EXPORT_SYMBOL(dup_iter); 1782 1783 static int copy_compat_iovec_from_user(struct iovec *iov, 1784 const struct iovec __user *uvec, unsigned long nr_segs) 1785 { 1786 const struct compat_iovec __user *uiov = 1787 (const struct compat_iovec __user *)uvec; 1788 int ret = -EFAULT, i; 1789 1790 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1791 return -EFAULT; 1792 1793 for (i = 0; i < nr_segs; i++) { 1794 compat_uptr_t buf; 1795 compat_ssize_t len; 1796 1797 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1798 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1799 1800 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1801 if (len < 0) { 1802 ret = -EINVAL; 1803 goto uaccess_end; 1804 } 1805 iov[i].iov_base = compat_ptr(buf); 1806 iov[i].iov_len = len; 1807 } 1808 1809 ret = 0; 1810 uaccess_end: 1811 user_access_end(); 1812 return ret; 1813 } 1814 1815 static int copy_iovec_from_user(struct iovec *iov, 1816 const struct iovec __user *uvec, unsigned long nr_segs) 1817 { 1818 unsigned long seg; 1819 1820 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1821 return -EFAULT; 1822 for (seg = 0; seg < nr_segs; seg++) { 1823 if ((ssize_t)iov[seg].iov_len < 0) 1824 return -EINVAL; 1825 } 1826 1827 return 0; 1828 } 1829 1830 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1831 unsigned long nr_segs, unsigned long fast_segs, 1832 struct iovec *fast_iov, bool compat) 1833 { 1834 struct iovec *iov = fast_iov; 1835 int ret; 1836 1837 /* 1838 * SuS says "The readv() function *may* fail if the iovcnt argument was 1839 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1840 * traditionally returned zero for zero segments, so... 1841 */ 1842 if (nr_segs == 0) 1843 return iov; 1844 if (nr_segs > UIO_MAXIOV) 1845 return ERR_PTR(-EINVAL); 1846 if (nr_segs > fast_segs) { 1847 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1848 if (!iov) 1849 return ERR_PTR(-ENOMEM); 1850 } 1851 1852 if (compat) 1853 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1854 else 1855 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1856 if (ret) { 1857 if (iov != fast_iov) 1858 kfree(iov); 1859 return ERR_PTR(ret); 1860 } 1861 1862 return iov; 1863 } 1864 1865 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1866 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1867 struct iov_iter *i, bool compat) 1868 { 1869 ssize_t total_len = 0; 1870 unsigned long seg; 1871 struct iovec *iov; 1872 1873 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1874 if (IS_ERR(iov)) { 1875 *iovp = NULL; 1876 return PTR_ERR(iov); 1877 } 1878 1879 /* 1880 * According to the Single Unix Specification we should return EINVAL if 1881 * an element length is < 0 when cast to ssize_t or if the total length 1882 * would overflow the ssize_t return value of the system call. 1883 * 1884 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1885 * overflow case. 1886 */ 1887 for (seg = 0; seg < nr_segs; seg++) { 1888 ssize_t len = (ssize_t)iov[seg].iov_len; 1889 1890 if (!access_ok(iov[seg].iov_base, len)) { 1891 if (iov != *iovp) 1892 kfree(iov); 1893 *iovp = NULL; 1894 return -EFAULT; 1895 } 1896 1897 if (len > MAX_RW_COUNT - total_len) { 1898 len = MAX_RW_COUNT - total_len; 1899 iov[seg].iov_len = len; 1900 } 1901 total_len += len; 1902 } 1903 1904 iov_iter_init(i, type, iov, nr_segs, total_len); 1905 if (iov == *iovp) 1906 *iovp = NULL; 1907 else 1908 *iovp = iov; 1909 return total_len; 1910 } 1911 1912 /** 1913 * import_iovec() - Copy an array of &struct iovec from userspace 1914 * into the kernel, check that it is valid, and initialize a new 1915 * &struct iov_iter iterator to access it. 1916 * 1917 * @type: One of %READ or %WRITE. 1918 * @uvec: Pointer to the userspace array. 1919 * @nr_segs: Number of elements in userspace array. 1920 * @fast_segs: Number of elements in @iov. 1921 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1922 * on-stack) kernel array. 1923 * @i: Pointer to iterator that will be initialized on success. 1924 * 1925 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1926 * then this function places %NULL in *@iov on return. Otherwise, a new 1927 * array will be allocated and the result placed in *@iov. This means that 1928 * the caller may call kfree() on *@iov regardless of whether the small 1929 * on-stack array was used or not (and regardless of whether this function 1930 * returns an error or not). 1931 * 1932 * Return: Negative error code on error, bytes imported on success 1933 */ 1934 ssize_t import_iovec(int type, const struct iovec __user *uvec, 1935 unsigned nr_segs, unsigned fast_segs, 1936 struct iovec **iovp, struct iov_iter *i) 1937 { 1938 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1939 in_compat_syscall()); 1940 } 1941 EXPORT_SYMBOL(import_iovec); 1942 1943 int import_single_range(int rw, void __user *buf, size_t len, 1944 struct iovec *iov, struct iov_iter *i) 1945 { 1946 if (len > MAX_RW_COUNT) 1947 len = MAX_RW_COUNT; 1948 if (unlikely(!access_ok(buf, len))) 1949 return -EFAULT; 1950 1951 iov->iov_base = buf; 1952 iov->iov_len = len; 1953 iov_iter_init(i, rw, iov, 1, len); 1954 return 0; 1955 } 1956 EXPORT_SYMBOL(import_single_range); 1957 1958 /** 1959 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 1960 * iov_iter_save_state() was called. 1961 * 1962 * @i: &struct iov_iter to restore 1963 * @state: state to restore from 1964 * 1965 * Used after iov_iter_save_state() to bring restore @i, if operations may 1966 * have advanced it. 1967 * 1968 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 1969 */ 1970 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 1971 { 1972 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 1973 !iov_iter_is_kvec(i) && !iter_is_ubuf(i)) 1974 return; 1975 i->iov_offset = state->iov_offset; 1976 i->count = state->count; 1977 if (iter_is_ubuf(i)) 1978 return; 1979 /* 1980 * For the *vec iters, nr_segs + iov is constant - if we increment 1981 * the vec, then we also decrement the nr_segs count. Hence we don't 1982 * need to track both of these, just one is enough and we can deduct 1983 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 1984 * size, so we can just increment the iov pointer as they are unionzed. 1985 * ITER_BVEC _may_ be the same size on some archs, but on others it is 1986 * not. Be safe and handle it separately. 1987 */ 1988 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 1989 if (iov_iter_is_bvec(i)) 1990 i->bvec -= state->nr_segs - i->nr_segs; 1991 else 1992 i->iov -= state->nr_segs - i->nr_segs; 1993 i->nr_segs = state->nr_segs; 1994 } 1995