1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 /* covers iovec and kvec alike */ 20 #define iterate_iovec(i, n, base, len, off, __p, STEP) { \ 21 size_t off = 0; \ 22 size_t skip = i->iov_offset; \ 23 do { \ 24 len = min(n, __p->iov_len - skip); \ 25 if (likely(len)) { \ 26 base = __p->iov_base + skip; \ 27 len -= (STEP); \ 28 off += len; \ 29 skip += len; \ 30 n -= len; \ 31 if (skip < __p->iov_len) \ 32 break; \ 33 } \ 34 __p++; \ 35 skip = 0; \ 36 } while (n); \ 37 i->iov_offset = skip; \ 38 n = off; \ 39 } 40 41 #define iterate_bvec(i, n, base, len, off, p, STEP) { \ 42 size_t off = 0; \ 43 unsigned skip = i->iov_offset; \ 44 while (n) { \ 45 unsigned offset = p->bv_offset + skip; \ 46 unsigned left; \ 47 void *kaddr = kmap_local_page(p->bv_page + \ 48 offset / PAGE_SIZE); \ 49 base = kaddr + offset % PAGE_SIZE; \ 50 len = min(min(n, (size_t)(p->bv_len - skip)), \ 51 (size_t)(PAGE_SIZE - offset % PAGE_SIZE)); \ 52 left = (STEP); \ 53 kunmap_local(kaddr); \ 54 len -= left; \ 55 off += len; \ 56 skip += len; \ 57 if (skip == p->bv_len) { \ 58 skip = 0; \ 59 p++; \ 60 } \ 61 n -= len; \ 62 if (left) \ 63 break; \ 64 } \ 65 i->iov_offset = skip; \ 66 n = off; \ 67 } 68 69 #define iterate_xarray(i, n, base, len, __off, STEP) { \ 70 __label__ __out; \ 71 size_t __off = 0; \ 72 struct folio *folio; \ 73 loff_t start = i->xarray_start + i->iov_offset; \ 74 pgoff_t index = start / PAGE_SIZE; \ 75 XA_STATE(xas, i->xarray, index); \ 76 \ 77 len = PAGE_SIZE - offset_in_page(start); \ 78 rcu_read_lock(); \ 79 xas_for_each(&xas, folio, ULONG_MAX) { \ 80 unsigned left; \ 81 size_t offset; \ 82 if (xas_retry(&xas, folio)) \ 83 continue; \ 84 if (WARN_ON(xa_is_value(folio))) \ 85 break; \ 86 if (WARN_ON(folio_test_hugetlb(folio))) \ 87 break; \ 88 offset = offset_in_folio(folio, start + __off); \ 89 while (offset < folio_size(folio)) { \ 90 base = kmap_local_folio(folio, offset); \ 91 len = min(n, len); \ 92 left = (STEP); \ 93 kunmap_local(base); \ 94 len -= left; \ 95 __off += len; \ 96 n -= len; \ 97 if (left || n == 0) \ 98 goto __out; \ 99 offset += len; \ 100 len = PAGE_SIZE; \ 101 } \ 102 } \ 103 __out: \ 104 rcu_read_unlock(); \ 105 i->iov_offset += __off; \ 106 n = __off; \ 107 } 108 109 #define __iterate_and_advance(i, n, base, len, off, I, K) { \ 110 if (unlikely(i->count < n)) \ 111 n = i->count; \ 112 if (likely(n)) { \ 113 if (likely(iter_is_iovec(i))) { \ 114 const struct iovec *iov = i->iov; \ 115 void __user *base; \ 116 size_t len; \ 117 iterate_iovec(i, n, base, len, off, \ 118 iov, (I)) \ 119 i->nr_segs -= iov - i->iov; \ 120 i->iov = iov; \ 121 } else if (iov_iter_is_bvec(i)) { \ 122 const struct bio_vec *bvec = i->bvec; \ 123 void *base; \ 124 size_t len; \ 125 iterate_bvec(i, n, base, len, off, \ 126 bvec, (K)) \ 127 i->nr_segs -= bvec - i->bvec; \ 128 i->bvec = bvec; \ 129 } else if (iov_iter_is_kvec(i)) { \ 130 const struct kvec *kvec = i->kvec; \ 131 void *base; \ 132 size_t len; \ 133 iterate_iovec(i, n, base, len, off, \ 134 kvec, (K)) \ 135 i->nr_segs -= kvec - i->kvec; \ 136 i->kvec = kvec; \ 137 } else if (iov_iter_is_xarray(i)) { \ 138 void *base; \ 139 size_t len; \ 140 iterate_xarray(i, n, base, len, off, \ 141 (K)) \ 142 } \ 143 i->count -= n; \ 144 } \ 145 } 146 #define iterate_and_advance(i, n, base, len, off, I, K) \ 147 __iterate_and_advance(i, n, base, len, off, I, ((void)(K),0)) 148 149 static int copyout(void __user *to, const void *from, size_t n) 150 { 151 if (should_fail_usercopy()) 152 return n; 153 if (access_ok(to, n)) { 154 instrument_copy_to_user(to, from, n); 155 n = raw_copy_to_user(to, from, n); 156 } 157 return n; 158 } 159 160 static int copyin(void *to, const void __user *from, size_t n) 161 { 162 if (should_fail_usercopy()) 163 return n; 164 if (access_ok(from, n)) { 165 instrument_copy_from_user(to, from, n); 166 n = raw_copy_from_user(to, from, n); 167 } 168 return n; 169 } 170 171 #ifdef PIPE_PARANOIA 172 static bool sanity(const struct iov_iter *i) 173 { 174 struct pipe_inode_info *pipe = i->pipe; 175 unsigned int p_head = pipe->head; 176 unsigned int p_tail = pipe->tail; 177 unsigned int p_mask = pipe->ring_size - 1; 178 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 179 unsigned int i_head = i->head; 180 unsigned int idx; 181 182 if (i->iov_offset) { 183 struct pipe_buffer *p; 184 if (unlikely(p_occupancy == 0)) 185 goto Bad; // pipe must be non-empty 186 if (unlikely(i_head != p_head - 1)) 187 goto Bad; // must be at the last buffer... 188 189 p = &pipe->bufs[i_head & p_mask]; 190 if (unlikely(p->offset + p->len != i->iov_offset)) 191 goto Bad; // ... at the end of segment 192 } else { 193 if (i_head != p_head) 194 goto Bad; // must be right after the last buffer 195 } 196 return true; 197 Bad: 198 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); 199 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 200 p_head, p_tail, pipe->ring_size); 201 for (idx = 0; idx < pipe->ring_size; idx++) 202 printk(KERN_ERR "[%p %p %d %d]\n", 203 pipe->bufs[idx].ops, 204 pipe->bufs[idx].page, 205 pipe->bufs[idx].offset, 206 pipe->bufs[idx].len); 207 WARN_ON(1); 208 return false; 209 } 210 #else 211 #define sanity(i) true 212 #endif 213 214 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 215 struct iov_iter *i) 216 { 217 struct pipe_inode_info *pipe = i->pipe; 218 struct pipe_buffer *buf; 219 unsigned int p_tail = pipe->tail; 220 unsigned int p_mask = pipe->ring_size - 1; 221 unsigned int i_head = i->head; 222 size_t off; 223 224 if (unlikely(bytes > i->count)) 225 bytes = i->count; 226 227 if (unlikely(!bytes)) 228 return 0; 229 230 if (!sanity(i)) 231 return 0; 232 233 off = i->iov_offset; 234 buf = &pipe->bufs[i_head & p_mask]; 235 if (off) { 236 if (offset == off && buf->page == page) { 237 /* merge with the last one */ 238 buf->len += bytes; 239 i->iov_offset += bytes; 240 goto out; 241 } 242 i_head++; 243 buf = &pipe->bufs[i_head & p_mask]; 244 } 245 if (pipe_full(i_head, p_tail, pipe->max_usage)) 246 return 0; 247 248 buf->ops = &page_cache_pipe_buf_ops; 249 buf->flags = 0; 250 get_page(page); 251 buf->page = page; 252 buf->offset = offset; 253 buf->len = bytes; 254 255 pipe->head = i_head + 1; 256 i->iov_offset = offset + bytes; 257 i->head = i_head; 258 out: 259 i->count -= bytes; 260 return bytes; 261 } 262 263 /* 264 * fault_in_iov_iter_readable - fault in iov iterator for reading 265 * @i: iterator 266 * @size: maximum length 267 * 268 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 269 * @size. For each iovec, fault in each page that constitutes the iovec. 270 * 271 * Returns the number of bytes not faulted in (like copy_to_user() and 272 * copy_from_user()). 273 * 274 * Always returns 0 for non-userspace iterators. 275 */ 276 size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) 277 { 278 if (iter_is_iovec(i)) { 279 size_t count = min(size, iov_iter_count(i)); 280 const struct iovec *p; 281 size_t skip; 282 283 size -= count; 284 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 285 size_t len = min(count, p->iov_len - skip); 286 size_t ret; 287 288 if (unlikely(!len)) 289 continue; 290 ret = fault_in_readable(p->iov_base + skip, len); 291 count -= len - ret; 292 if (ret) 293 break; 294 } 295 return count + size; 296 } 297 return 0; 298 } 299 EXPORT_SYMBOL(fault_in_iov_iter_readable); 300 301 /* 302 * fault_in_iov_iter_writeable - fault in iov iterator for writing 303 * @i: iterator 304 * @size: maximum length 305 * 306 * Faults in the iterator using get_user_pages(), i.e., without triggering 307 * hardware page faults. This is primarily useful when we already know that 308 * some or all of the pages in @i aren't in memory. 309 * 310 * Returns the number of bytes not faulted in, like copy_to_user() and 311 * copy_from_user(). 312 * 313 * Always returns 0 for non-user-space iterators. 314 */ 315 size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) 316 { 317 if (iter_is_iovec(i)) { 318 size_t count = min(size, iov_iter_count(i)); 319 const struct iovec *p; 320 size_t skip; 321 322 size -= count; 323 for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { 324 size_t len = min(count, p->iov_len - skip); 325 size_t ret; 326 327 if (unlikely(!len)) 328 continue; 329 ret = fault_in_safe_writeable(p->iov_base + skip, len); 330 count -= len - ret; 331 if (ret) 332 break; 333 } 334 return count + size; 335 } 336 return 0; 337 } 338 EXPORT_SYMBOL(fault_in_iov_iter_writeable); 339 340 void iov_iter_init(struct iov_iter *i, unsigned int direction, 341 const struct iovec *iov, unsigned long nr_segs, 342 size_t count) 343 { 344 WARN_ON(direction & ~(READ | WRITE)); 345 *i = (struct iov_iter) { 346 .iter_type = ITER_IOVEC, 347 .nofault = false, 348 .data_source = direction, 349 .iov = iov, 350 .nr_segs = nr_segs, 351 .iov_offset = 0, 352 .count = count 353 }; 354 } 355 EXPORT_SYMBOL(iov_iter_init); 356 357 static inline bool allocated(struct pipe_buffer *buf) 358 { 359 return buf->ops == &default_pipe_buf_ops; 360 } 361 362 static inline void data_start(const struct iov_iter *i, 363 unsigned int *iter_headp, size_t *offp) 364 { 365 unsigned int p_mask = i->pipe->ring_size - 1; 366 unsigned int iter_head = i->head; 367 size_t off = i->iov_offset; 368 369 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || 370 off == PAGE_SIZE)) { 371 iter_head++; 372 off = 0; 373 } 374 *iter_headp = iter_head; 375 *offp = off; 376 } 377 378 static size_t push_pipe(struct iov_iter *i, size_t size, 379 int *iter_headp, size_t *offp) 380 { 381 struct pipe_inode_info *pipe = i->pipe; 382 unsigned int p_tail = pipe->tail; 383 unsigned int p_mask = pipe->ring_size - 1; 384 unsigned int iter_head; 385 size_t off; 386 ssize_t left; 387 388 if (unlikely(size > i->count)) 389 size = i->count; 390 if (unlikely(!size)) 391 return 0; 392 393 left = size; 394 data_start(i, &iter_head, &off); 395 *iter_headp = iter_head; 396 *offp = off; 397 if (off) { 398 left -= PAGE_SIZE - off; 399 if (left <= 0) { 400 pipe->bufs[iter_head & p_mask].len += size; 401 return size; 402 } 403 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; 404 iter_head++; 405 } 406 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { 407 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; 408 struct page *page = alloc_page(GFP_USER); 409 if (!page) 410 break; 411 412 buf->ops = &default_pipe_buf_ops; 413 buf->flags = 0; 414 buf->page = page; 415 buf->offset = 0; 416 buf->len = min_t(ssize_t, left, PAGE_SIZE); 417 left -= buf->len; 418 iter_head++; 419 pipe->head = iter_head; 420 421 if (left == 0) 422 return size; 423 } 424 return size - left; 425 } 426 427 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 428 struct iov_iter *i) 429 { 430 struct pipe_inode_info *pipe = i->pipe; 431 unsigned int p_mask = pipe->ring_size - 1; 432 unsigned int i_head; 433 size_t n, off; 434 435 if (!sanity(i)) 436 return 0; 437 438 bytes = n = push_pipe(i, bytes, &i_head, &off); 439 if (unlikely(!n)) 440 return 0; 441 do { 442 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 443 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); 444 i->head = i_head; 445 i->iov_offset = off + chunk; 446 n -= chunk; 447 addr += chunk; 448 off = 0; 449 i_head++; 450 } while (n); 451 i->count -= bytes; 452 return bytes; 453 } 454 455 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 456 __wsum sum, size_t off) 457 { 458 __wsum next = csum_partial_copy_nocheck(from, to, len); 459 return csum_block_add(sum, next, off); 460 } 461 462 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 463 struct iov_iter *i, __wsum *sump) 464 { 465 struct pipe_inode_info *pipe = i->pipe; 466 unsigned int p_mask = pipe->ring_size - 1; 467 __wsum sum = *sump; 468 size_t off = 0; 469 unsigned int i_head; 470 size_t r; 471 472 if (!sanity(i)) 473 return 0; 474 475 bytes = push_pipe(i, bytes, &i_head, &r); 476 while (bytes) { 477 size_t chunk = min_t(size_t, bytes, PAGE_SIZE - r); 478 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); 479 sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); 480 kunmap_local(p); 481 i->head = i_head; 482 i->iov_offset = r + chunk; 483 bytes -= chunk; 484 off += chunk; 485 r = 0; 486 i_head++; 487 } 488 *sump = sum; 489 i->count -= off; 490 return off; 491 } 492 493 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 494 { 495 if (unlikely(iov_iter_is_pipe(i))) 496 return copy_pipe_to_iter(addr, bytes, i); 497 if (iter_is_iovec(i)) 498 might_fault(); 499 iterate_and_advance(i, bytes, base, len, off, 500 copyout(base, addr + off, len), 501 memcpy(base, addr + off, len) 502 ) 503 504 return bytes; 505 } 506 EXPORT_SYMBOL(_copy_to_iter); 507 508 #ifdef CONFIG_ARCH_HAS_COPY_MC 509 static int copyout_mc(void __user *to, const void *from, size_t n) 510 { 511 if (access_ok(to, n)) { 512 instrument_copy_to_user(to, from, n); 513 n = copy_mc_to_user((__force void *) to, from, n); 514 } 515 return n; 516 } 517 518 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 519 struct iov_iter *i) 520 { 521 struct pipe_inode_info *pipe = i->pipe; 522 unsigned int p_mask = pipe->ring_size - 1; 523 unsigned int i_head; 524 size_t n, off, xfer = 0; 525 526 if (!sanity(i)) 527 return 0; 528 529 n = push_pipe(i, bytes, &i_head, &off); 530 while (n) { 531 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 532 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); 533 unsigned long rem; 534 rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); 535 chunk -= rem; 536 kunmap_local(p); 537 i->head = i_head; 538 i->iov_offset = off + chunk; 539 xfer += chunk; 540 if (rem) 541 break; 542 n -= chunk; 543 off = 0; 544 i_head++; 545 } 546 i->count -= xfer; 547 return xfer; 548 } 549 550 /** 551 * _copy_mc_to_iter - copy to iter with source memory error exception handling 552 * @addr: source kernel address 553 * @bytes: total transfer length 554 * @i: destination iterator 555 * 556 * The pmem driver deploys this for the dax operation 557 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 558 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 559 * successfully copied. 560 * 561 * The main differences between this and typical _copy_to_iter(). 562 * 563 * * Typical tail/residue handling after a fault retries the copy 564 * byte-by-byte until the fault happens again. Re-triggering machine 565 * checks is potentially fatal so the implementation uses source 566 * alignment and poison alignment assumptions to avoid re-triggering 567 * hardware exceptions. 568 * 569 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 570 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 571 * a short copy. 572 * 573 * Return: number of bytes copied (may be %0) 574 */ 575 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 576 { 577 if (unlikely(iov_iter_is_pipe(i))) 578 return copy_mc_pipe_to_iter(addr, bytes, i); 579 if (iter_is_iovec(i)) 580 might_fault(); 581 __iterate_and_advance(i, bytes, base, len, off, 582 copyout_mc(base, addr + off, len), 583 copy_mc_to_kernel(base, addr + off, len) 584 ) 585 586 return bytes; 587 } 588 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 589 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 590 591 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 592 { 593 if (unlikely(iov_iter_is_pipe(i))) { 594 WARN_ON(1); 595 return 0; 596 } 597 if (iter_is_iovec(i)) 598 might_fault(); 599 iterate_and_advance(i, bytes, base, len, off, 600 copyin(addr + off, base, len), 601 memcpy(addr + off, base, len) 602 ) 603 604 return bytes; 605 } 606 EXPORT_SYMBOL(_copy_from_iter); 607 608 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 609 { 610 if (unlikely(iov_iter_is_pipe(i))) { 611 WARN_ON(1); 612 return 0; 613 } 614 iterate_and_advance(i, bytes, base, len, off, 615 __copy_from_user_inatomic_nocache(addr + off, base, len), 616 memcpy(addr + off, base, len) 617 ) 618 619 return bytes; 620 } 621 EXPORT_SYMBOL(_copy_from_iter_nocache); 622 623 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 624 /** 625 * _copy_from_iter_flushcache - write destination through cpu cache 626 * @addr: destination kernel address 627 * @bytes: total transfer length 628 * @i: source iterator 629 * 630 * The pmem driver arranges for filesystem-dax to use this facility via 631 * dax_copy_from_iter() for ensuring that writes to persistent memory 632 * are flushed through the CPU cache. It is differentiated from 633 * _copy_from_iter_nocache() in that guarantees all data is flushed for 634 * all iterator types. The _copy_from_iter_nocache() only attempts to 635 * bypass the cache for the ITER_IOVEC case, and on some archs may use 636 * instructions that strand dirty-data in the cache. 637 * 638 * Return: number of bytes copied (may be %0) 639 */ 640 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 641 { 642 if (unlikely(iov_iter_is_pipe(i))) { 643 WARN_ON(1); 644 return 0; 645 } 646 iterate_and_advance(i, bytes, base, len, off, 647 __copy_from_user_flushcache(addr + off, base, len), 648 memcpy_flushcache(addr + off, base, len) 649 ) 650 651 return bytes; 652 } 653 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 654 #endif 655 656 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 657 { 658 struct page *head; 659 size_t v = n + offset; 660 661 /* 662 * The general case needs to access the page order in order 663 * to compute the page size. 664 * However, we mostly deal with order-0 pages and thus can 665 * avoid a possible cache line miss for requests that fit all 666 * page orders. 667 */ 668 if (n <= v && v <= PAGE_SIZE) 669 return true; 670 671 head = compound_head(page); 672 v += (page - head) << PAGE_SHIFT; 673 674 if (likely(n <= v && v <= (page_size(head)))) 675 return true; 676 WARN_ON(1); 677 return false; 678 } 679 680 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 681 struct iov_iter *i) 682 { 683 if (unlikely(iov_iter_is_pipe(i))) { 684 return copy_page_to_iter_pipe(page, offset, bytes, i); 685 } else { 686 void *kaddr = kmap_local_page(page); 687 size_t wanted = _copy_to_iter(kaddr + offset, bytes, i); 688 kunmap_local(kaddr); 689 return wanted; 690 } 691 } 692 693 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 694 struct iov_iter *i) 695 { 696 size_t res = 0; 697 if (unlikely(!page_copy_sane(page, offset, bytes))) 698 return 0; 699 page += offset / PAGE_SIZE; // first subpage 700 offset %= PAGE_SIZE; 701 while (1) { 702 size_t n = __copy_page_to_iter(page, offset, 703 min(bytes, (size_t)PAGE_SIZE - offset), i); 704 res += n; 705 bytes -= n; 706 if (!bytes || !n) 707 break; 708 offset += n; 709 if (offset == PAGE_SIZE) { 710 page++; 711 offset = 0; 712 } 713 } 714 return res; 715 } 716 EXPORT_SYMBOL(copy_page_to_iter); 717 718 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 719 struct iov_iter *i) 720 { 721 if (page_copy_sane(page, offset, bytes)) { 722 void *kaddr = kmap_local_page(page); 723 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); 724 kunmap_local(kaddr); 725 return wanted; 726 } 727 return 0; 728 } 729 EXPORT_SYMBOL(copy_page_from_iter); 730 731 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 732 { 733 struct pipe_inode_info *pipe = i->pipe; 734 unsigned int p_mask = pipe->ring_size - 1; 735 unsigned int i_head; 736 size_t n, off; 737 738 if (!sanity(i)) 739 return 0; 740 741 bytes = n = push_pipe(i, bytes, &i_head, &off); 742 if (unlikely(!n)) 743 return 0; 744 745 do { 746 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 747 char *p = kmap_local_page(pipe->bufs[i_head & p_mask].page); 748 memset(p + off, 0, chunk); 749 kunmap_local(p); 750 i->head = i_head; 751 i->iov_offset = off + chunk; 752 n -= chunk; 753 off = 0; 754 i_head++; 755 } while (n); 756 i->count -= bytes; 757 return bytes; 758 } 759 760 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 761 { 762 if (unlikely(iov_iter_is_pipe(i))) 763 return pipe_zero(bytes, i); 764 iterate_and_advance(i, bytes, base, len, count, 765 clear_user(base, len), 766 memset(base, 0, len) 767 ) 768 769 return bytes; 770 } 771 EXPORT_SYMBOL(iov_iter_zero); 772 773 size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t bytes, 774 struct iov_iter *i) 775 { 776 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 777 if (unlikely(!page_copy_sane(page, offset, bytes))) { 778 kunmap_atomic(kaddr); 779 return 0; 780 } 781 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 782 kunmap_atomic(kaddr); 783 WARN_ON(1); 784 return 0; 785 } 786 iterate_and_advance(i, bytes, base, len, off, 787 copyin(p + off, base, len), 788 memcpy(p + off, base, len) 789 ) 790 kunmap_atomic(kaddr); 791 return bytes; 792 } 793 EXPORT_SYMBOL(copy_page_from_iter_atomic); 794 795 static inline void pipe_truncate(struct iov_iter *i) 796 { 797 struct pipe_inode_info *pipe = i->pipe; 798 unsigned int p_tail = pipe->tail; 799 unsigned int p_head = pipe->head; 800 unsigned int p_mask = pipe->ring_size - 1; 801 802 if (!pipe_empty(p_head, p_tail)) { 803 struct pipe_buffer *buf; 804 unsigned int i_head = i->head; 805 size_t off = i->iov_offset; 806 807 if (off) { 808 buf = &pipe->bufs[i_head & p_mask]; 809 buf->len = off - buf->offset; 810 i_head++; 811 } 812 while (p_head != i_head) { 813 p_head--; 814 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); 815 } 816 817 pipe->head = p_head; 818 } 819 } 820 821 static void pipe_advance(struct iov_iter *i, size_t size) 822 { 823 struct pipe_inode_info *pipe = i->pipe; 824 if (size) { 825 struct pipe_buffer *buf; 826 unsigned int p_mask = pipe->ring_size - 1; 827 unsigned int i_head = i->head; 828 size_t off = i->iov_offset, left = size; 829 830 if (off) /* make it relative to the beginning of buffer */ 831 left += off - pipe->bufs[i_head & p_mask].offset; 832 while (1) { 833 buf = &pipe->bufs[i_head & p_mask]; 834 if (left <= buf->len) 835 break; 836 left -= buf->len; 837 i_head++; 838 } 839 i->head = i_head; 840 i->iov_offset = buf->offset + left; 841 } 842 i->count -= size; 843 /* ... and discard everything past that point */ 844 pipe_truncate(i); 845 } 846 847 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 848 { 849 const struct bio_vec *bvec, *end; 850 851 if (!i->count) 852 return; 853 i->count -= size; 854 855 size += i->iov_offset; 856 857 for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) { 858 if (likely(size < bvec->bv_len)) 859 break; 860 size -= bvec->bv_len; 861 } 862 i->iov_offset = size; 863 i->nr_segs -= bvec - i->bvec; 864 i->bvec = bvec; 865 } 866 867 static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) 868 { 869 const struct iovec *iov, *end; 870 871 if (!i->count) 872 return; 873 i->count -= size; 874 875 size += i->iov_offset; // from beginning of current segment 876 for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { 877 if (likely(size < iov->iov_len)) 878 break; 879 size -= iov->iov_len; 880 } 881 i->iov_offset = size; 882 i->nr_segs -= iov - i->iov; 883 i->iov = iov; 884 } 885 886 void iov_iter_advance(struct iov_iter *i, size_t size) 887 { 888 if (unlikely(i->count < size)) 889 size = i->count; 890 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) { 891 /* iovec and kvec have identical layouts */ 892 iov_iter_iovec_advance(i, size); 893 } else if (iov_iter_is_bvec(i)) { 894 iov_iter_bvec_advance(i, size); 895 } else if (iov_iter_is_pipe(i)) { 896 pipe_advance(i, size); 897 } else if (unlikely(iov_iter_is_xarray(i))) { 898 i->iov_offset += size; 899 i->count -= size; 900 } else if (iov_iter_is_discard(i)) { 901 i->count -= size; 902 } 903 } 904 EXPORT_SYMBOL(iov_iter_advance); 905 906 void iov_iter_revert(struct iov_iter *i, size_t unroll) 907 { 908 if (!unroll) 909 return; 910 if (WARN_ON(unroll > MAX_RW_COUNT)) 911 return; 912 i->count += unroll; 913 if (unlikely(iov_iter_is_pipe(i))) { 914 struct pipe_inode_info *pipe = i->pipe; 915 unsigned int p_mask = pipe->ring_size - 1; 916 unsigned int i_head = i->head; 917 size_t off = i->iov_offset; 918 while (1) { 919 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; 920 size_t n = off - b->offset; 921 if (unroll < n) { 922 off -= unroll; 923 break; 924 } 925 unroll -= n; 926 if (!unroll && i_head == i->start_head) { 927 off = 0; 928 break; 929 } 930 i_head--; 931 b = &pipe->bufs[i_head & p_mask]; 932 off = b->offset + b->len; 933 } 934 i->iov_offset = off; 935 i->head = i_head; 936 pipe_truncate(i); 937 return; 938 } 939 if (unlikely(iov_iter_is_discard(i))) 940 return; 941 if (unroll <= i->iov_offset) { 942 i->iov_offset -= unroll; 943 return; 944 } 945 unroll -= i->iov_offset; 946 if (iov_iter_is_xarray(i)) { 947 BUG(); /* We should never go beyond the start of the specified 948 * range since we might then be straying into pages that 949 * aren't pinned. 950 */ 951 } else if (iov_iter_is_bvec(i)) { 952 const struct bio_vec *bvec = i->bvec; 953 while (1) { 954 size_t n = (--bvec)->bv_len; 955 i->nr_segs++; 956 if (unroll <= n) { 957 i->bvec = bvec; 958 i->iov_offset = n - unroll; 959 return; 960 } 961 unroll -= n; 962 } 963 } else { /* same logics for iovec and kvec */ 964 const struct iovec *iov = i->iov; 965 while (1) { 966 size_t n = (--iov)->iov_len; 967 i->nr_segs++; 968 if (unroll <= n) { 969 i->iov = iov; 970 i->iov_offset = n - unroll; 971 return; 972 } 973 unroll -= n; 974 } 975 } 976 } 977 EXPORT_SYMBOL(iov_iter_revert); 978 979 /* 980 * Return the count of just the current iov_iter segment. 981 */ 982 size_t iov_iter_single_seg_count(const struct iov_iter *i) 983 { 984 if (i->nr_segs > 1) { 985 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 986 return min(i->count, i->iov->iov_len - i->iov_offset); 987 if (iov_iter_is_bvec(i)) 988 return min(i->count, i->bvec->bv_len - i->iov_offset); 989 } 990 return i->count; 991 } 992 EXPORT_SYMBOL(iov_iter_single_seg_count); 993 994 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 995 const struct kvec *kvec, unsigned long nr_segs, 996 size_t count) 997 { 998 WARN_ON(direction & ~(READ | WRITE)); 999 *i = (struct iov_iter){ 1000 .iter_type = ITER_KVEC, 1001 .data_source = direction, 1002 .kvec = kvec, 1003 .nr_segs = nr_segs, 1004 .iov_offset = 0, 1005 .count = count 1006 }; 1007 } 1008 EXPORT_SYMBOL(iov_iter_kvec); 1009 1010 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1011 const struct bio_vec *bvec, unsigned long nr_segs, 1012 size_t count) 1013 { 1014 WARN_ON(direction & ~(READ | WRITE)); 1015 *i = (struct iov_iter){ 1016 .iter_type = ITER_BVEC, 1017 .data_source = direction, 1018 .bvec = bvec, 1019 .nr_segs = nr_segs, 1020 .iov_offset = 0, 1021 .count = count 1022 }; 1023 } 1024 EXPORT_SYMBOL(iov_iter_bvec); 1025 1026 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1027 struct pipe_inode_info *pipe, 1028 size_t count) 1029 { 1030 BUG_ON(direction != READ); 1031 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1032 *i = (struct iov_iter){ 1033 .iter_type = ITER_PIPE, 1034 .data_source = false, 1035 .pipe = pipe, 1036 .head = pipe->head, 1037 .start_head = pipe->head, 1038 .iov_offset = 0, 1039 .count = count 1040 }; 1041 } 1042 EXPORT_SYMBOL(iov_iter_pipe); 1043 1044 /** 1045 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1046 * @i: The iterator to initialise. 1047 * @direction: The direction of the transfer. 1048 * @xarray: The xarray to access. 1049 * @start: The start file position. 1050 * @count: The size of the I/O buffer in bytes. 1051 * 1052 * Set up an I/O iterator to either draw data out of the pages attached to an 1053 * inode or to inject data into those pages. The pages *must* be prevented 1054 * from evaporation, either by taking a ref on them or locking them by the 1055 * caller. 1056 */ 1057 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1058 struct xarray *xarray, loff_t start, size_t count) 1059 { 1060 BUG_ON(direction & ~1); 1061 *i = (struct iov_iter) { 1062 .iter_type = ITER_XARRAY, 1063 .data_source = direction, 1064 .xarray = xarray, 1065 .xarray_start = start, 1066 .count = count, 1067 .iov_offset = 0 1068 }; 1069 } 1070 EXPORT_SYMBOL(iov_iter_xarray); 1071 1072 /** 1073 * iov_iter_discard - Initialise an I/O iterator that discards data 1074 * @i: The iterator to initialise. 1075 * @direction: The direction of the transfer. 1076 * @count: The size of the I/O buffer in bytes. 1077 * 1078 * Set up an I/O iterator that just discards everything that's written to it. 1079 * It's only available as a READ iterator. 1080 */ 1081 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1082 { 1083 BUG_ON(direction != READ); 1084 *i = (struct iov_iter){ 1085 .iter_type = ITER_DISCARD, 1086 .data_source = false, 1087 .count = count, 1088 .iov_offset = 0 1089 }; 1090 } 1091 EXPORT_SYMBOL(iov_iter_discard); 1092 1093 static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i) 1094 { 1095 unsigned long res = 0; 1096 size_t size = i->count; 1097 size_t skip = i->iov_offset; 1098 unsigned k; 1099 1100 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1101 size_t len = i->iov[k].iov_len - skip; 1102 if (len) { 1103 res |= (unsigned long)i->iov[k].iov_base + skip; 1104 if (len > size) 1105 len = size; 1106 res |= len; 1107 size -= len; 1108 if (!size) 1109 break; 1110 } 1111 } 1112 return res; 1113 } 1114 1115 static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i) 1116 { 1117 unsigned res = 0; 1118 size_t size = i->count; 1119 unsigned skip = i->iov_offset; 1120 unsigned k; 1121 1122 for (k = 0; k < i->nr_segs; k++, skip = 0) { 1123 size_t len = i->bvec[k].bv_len - skip; 1124 res |= (unsigned long)i->bvec[k].bv_offset + skip; 1125 if (len > size) 1126 len = size; 1127 res |= len; 1128 size -= len; 1129 if (!size) 1130 break; 1131 } 1132 return res; 1133 } 1134 1135 unsigned long iov_iter_alignment(const struct iov_iter *i) 1136 { 1137 /* iovec and kvec have identical layouts */ 1138 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1139 return iov_iter_alignment_iovec(i); 1140 1141 if (iov_iter_is_bvec(i)) 1142 return iov_iter_alignment_bvec(i); 1143 1144 if (iov_iter_is_pipe(i)) { 1145 unsigned int p_mask = i->pipe->ring_size - 1; 1146 size_t size = i->count; 1147 1148 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) 1149 return size | i->iov_offset; 1150 return size; 1151 } 1152 1153 if (iov_iter_is_xarray(i)) 1154 return (i->xarray_start + i->iov_offset) | i->count; 1155 1156 return 0; 1157 } 1158 EXPORT_SYMBOL(iov_iter_alignment); 1159 1160 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1161 { 1162 unsigned long res = 0; 1163 unsigned long v = 0; 1164 size_t size = i->count; 1165 unsigned k; 1166 1167 if (WARN_ON(!iter_is_iovec(i))) 1168 return ~0U; 1169 1170 for (k = 0; k < i->nr_segs; k++) { 1171 if (i->iov[k].iov_len) { 1172 unsigned long base = (unsigned long)i->iov[k].iov_base; 1173 if (v) // if not the first one 1174 res |= base | v; // this start | previous end 1175 v = base + i->iov[k].iov_len; 1176 if (size <= i->iov[k].iov_len) 1177 break; 1178 size -= i->iov[k].iov_len; 1179 } 1180 } 1181 return res; 1182 } 1183 EXPORT_SYMBOL(iov_iter_gap_alignment); 1184 1185 static inline ssize_t __pipe_get_pages(struct iov_iter *i, 1186 size_t maxsize, 1187 struct page **pages, 1188 int iter_head, 1189 size_t *start) 1190 { 1191 struct pipe_inode_info *pipe = i->pipe; 1192 unsigned int p_mask = pipe->ring_size - 1; 1193 ssize_t n = push_pipe(i, maxsize, &iter_head, start); 1194 if (!n) 1195 return -EFAULT; 1196 1197 maxsize = n; 1198 n += *start; 1199 while (n > 0) { 1200 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); 1201 iter_head++; 1202 n -= PAGE_SIZE; 1203 } 1204 1205 return maxsize; 1206 } 1207 1208 static ssize_t pipe_get_pages(struct iov_iter *i, 1209 struct page **pages, size_t maxsize, unsigned maxpages, 1210 size_t *start) 1211 { 1212 unsigned int iter_head, npages; 1213 size_t capacity; 1214 1215 if (!sanity(i)) 1216 return -EFAULT; 1217 1218 data_start(i, &iter_head, start); 1219 /* Amount of free space: some of this one + all after this one */ 1220 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1221 capacity = min(npages, maxpages) * PAGE_SIZE - *start; 1222 1223 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); 1224 } 1225 1226 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1227 pgoff_t index, unsigned int nr_pages) 1228 { 1229 XA_STATE(xas, xa, index); 1230 struct page *page; 1231 unsigned int ret = 0; 1232 1233 rcu_read_lock(); 1234 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1235 if (xas_retry(&xas, page)) 1236 continue; 1237 1238 /* Has the page moved or been split? */ 1239 if (unlikely(page != xas_reload(&xas))) { 1240 xas_reset(&xas); 1241 continue; 1242 } 1243 1244 pages[ret] = find_subpage(page, xas.xa_index); 1245 get_page(pages[ret]); 1246 if (++ret == nr_pages) 1247 break; 1248 } 1249 rcu_read_unlock(); 1250 return ret; 1251 } 1252 1253 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1254 struct page **pages, size_t maxsize, 1255 unsigned maxpages, size_t *_start_offset) 1256 { 1257 unsigned nr, offset; 1258 pgoff_t index, count; 1259 size_t size = maxsize, actual; 1260 loff_t pos; 1261 1262 if (!size || !maxpages) 1263 return 0; 1264 1265 pos = i->xarray_start + i->iov_offset; 1266 index = pos >> PAGE_SHIFT; 1267 offset = pos & ~PAGE_MASK; 1268 *_start_offset = offset; 1269 1270 count = 1; 1271 if (size > PAGE_SIZE - offset) { 1272 size -= PAGE_SIZE - offset; 1273 count += size >> PAGE_SHIFT; 1274 size &= ~PAGE_MASK; 1275 if (size) 1276 count++; 1277 } 1278 1279 if (count > maxpages) 1280 count = maxpages; 1281 1282 nr = iter_xarray_populate_pages(pages, i->xarray, index, count); 1283 if (nr == 0) 1284 return 0; 1285 1286 actual = PAGE_SIZE * nr; 1287 actual -= offset; 1288 if (nr == count && size > 0) { 1289 unsigned last_offset = (nr > 1) ? 0 : offset; 1290 actual -= PAGE_SIZE - (last_offset + size); 1291 } 1292 return actual; 1293 } 1294 1295 /* must be done on non-empty ITER_IOVEC one */ 1296 static unsigned long first_iovec_segment(const struct iov_iter *i, 1297 size_t *size, size_t *start, 1298 size_t maxsize) 1299 { 1300 size_t skip; 1301 long k; 1302 1303 for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) { 1304 unsigned long addr = (unsigned long)i->iov[k].iov_base + skip; 1305 size_t len = i->iov[k].iov_len - skip; 1306 1307 if (unlikely(!len)) 1308 continue; 1309 if (len > maxsize) 1310 len = maxsize; 1311 *start = addr % PAGE_SIZE; 1312 *size = len; 1313 return addr & PAGE_MASK; 1314 } 1315 BUG(); // if it had been empty, we wouldn't get called 1316 } 1317 1318 /* must be done on non-empty ITER_BVEC one */ 1319 static struct page *first_bvec_segment(const struct iov_iter *i, 1320 size_t *size, size_t *start, 1321 size_t maxsize) 1322 { 1323 struct page *page; 1324 size_t skip = i->iov_offset, len; 1325 1326 len = i->bvec->bv_len - skip; 1327 if (len > maxsize) 1328 len = maxsize; 1329 skip += i->bvec->bv_offset; 1330 page = i->bvec->bv_page + skip / PAGE_SIZE; 1331 *start = skip % PAGE_SIZE; 1332 *size = len; 1333 return page; 1334 } 1335 1336 ssize_t iov_iter_get_pages(struct iov_iter *i, 1337 struct page **pages, size_t maxsize, unsigned maxpages, 1338 size_t *start) 1339 { 1340 size_t len; 1341 int n, res; 1342 1343 if (maxsize > i->count) 1344 maxsize = i->count; 1345 if (!maxsize) 1346 return 0; 1347 if (maxsize > MAX_RW_COUNT) 1348 maxsize = MAX_RW_COUNT; 1349 1350 if (likely(iter_is_iovec(i))) { 1351 unsigned int gup_flags = 0; 1352 unsigned long addr; 1353 1354 if (iov_iter_rw(i) != WRITE) 1355 gup_flags |= FOLL_WRITE; 1356 if (i->nofault) 1357 gup_flags |= FOLL_NOFAULT; 1358 1359 addr = first_iovec_segment(i, &len, start, maxsize); 1360 n = DIV_ROUND_UP(len + *start, PAGE_SIZE); 1361 if (n > maxpages) 1362 n = maxpages; 1363 res = get_user_pages_fast(addr, n, gup_flags, pages); 1364 if (unlikely(res <= 0)) 1365 return res; 1366 return min_t(size_t, len, res * PAGE_SIZE - *start); 1367 } 1368 if (iov_iter_is_bvec(i)) { 1369 struct page *page; 1370 1371 page = first_bvec_segment(i, &len, start, maxsize); 1372 n = DIV_ROUND_UP(len + *start, PAGE_SIZE); 1373 if (n > maxpages) 1374 n = maxpages; 1375 for (int k = 0; k < n; k++) 1376 get_page(*pages++ = page++); 1377 return min_t(size_t, len, n * PAGE_SIZE - *start); 1378 } 1379 if (iov_iter_is_pipe(i)) 1380 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1381 if (iov_iter_is_xarray(i)) 1382 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1383 return -EFAULT; 1384 } 1385 EXPORT_SYMBOL(iov_iter_get_pages); 1386 1387 static struct page **get_pages_array(size_t n) 1388 { 1389 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); 1390 } 1391 1392 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 1393 struct page ***pages, size_t maxsize, 1394 size_t *start) 1395 { 1396 struct page **p; 1397 unsigned int iter_head, npages; 1398 ssize_t n; 1399 1400 if (!sanity(i)) 1401 return -EFAULT; 1402 1403 data_start(i, &iter_head, start); 1404 /* Amount of free space: some of this one + all after this one */ 1405 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1406 n = npages * PAGE_SIZE - *start; 1407 if (maxsize > n) 1408 maxsize = n; 1409 else 1410 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1411 p = get_pages_array(npages); 1412 if (!p) 1413 return -ENOMEM; 1414 n = __pipe_get_pages(i, maxsize, p, iter_head, start); 1415 if (n > 0) 1416 *pages = p; 1417 else 1418 kvfree(p); 1419 return n; 1420 } 1421 1422 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, 1423 struct page ***pages, size_t maxsize, 1424 size_t *_start_offset) 1425 { 1426 struct page **p; 1427 unsigned nr, offset; 1428 pgoff_t index, count; 1429 size_t size = maxsize, actual; 1430 loff_t pos; 1431 1432 if (!size) 1433 return 0; 1434 1435 pos = i->xarray_start + i->iov_offset; 1436 index = pos >> PAGE_SHIFT; 1437 offset = pos & ~PAGE_MASK; 1438 *_start_offset = offset; 1439 1440 count = 1; 1441 if (size > PAGE_SIZE - offset) { 1442 size -= PAGE_SIZE - offset; 1443 count += size >> PAGE_SHIFT; 1444 size &= ~PAGE_MASK; 1445 if (size) 1446 count++; 1447 } 1448 1449 p = get_pages_array(count); 1450 if (!p) 1451 return -ENOMEM; 1452 *pages = p; 1453 1454 nr = iter_xarray_populate_pages(p, i->xarray, index, count); 1455 if (nr == 0) 1456 return 0; 1457 1458 actual = PAGE_SIZE * nr; 1459 actual -= offset; 1460 if (nr == count && size > 0) { 1461 unsigned last_offset = (nr > 1) ? 0 : offset; 1462 actual -= PAGE_SIZE - (last_offset + size); 1463 } 1464 return actual; 1465 } 1466 1467 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1468 struct page ***pages, size_t maxsize, 1469 size_t *start) 1470 { 1471 struct page **p; 1472 size_t len; 1473 int n, res; 1474 1475 if (maxsize > i->count) 1476 maxsize = i->count; 1477 if (!maxsize) 1478 return 0; 1479 if (maxsize > MAX_RW_COUNT) 1480 maxsize = MAX_RW_COUNT; 1481 1482 if (likely(iter_is_iovec(i))) { 1483 unsigned int gup_flags = 0; 1484 unsigned long addr; 1485 1486 if (iov_iter_rw(i) != WRITE) 1487 gup_flags |= FOLL_WRITE; 1488 if (i->nofault) 1489 gup_flags |= FOLL_NOFAULT; 1490 1491 addr = first_iovec_segment(i, &len, start, maxsize); 1492 n = DIV_ROUND_UP(len + *start, PAGE_SIZE); 1493 p = get_pages_array(n); 1494 if (!p) 1495 return -ENOMEM; 1496 res = get_user_pages_fast(addr, n, gup_flags, p); 1497 if (unlikely(res <= 0)) { 1498 kvfree(p); 1499 *pages = NULL; 1500 return res; 1501 } 1502 *pages = p; 1503 return min_t(size_t, len, res * PAGE_SIZE - *start); 1504 } 1505 if (iov_iter_is_bvec(i)) { 1506 struct page *page; 1507 1508 page = first_bvec_segment(i, &len, start, maxsize); 1509 n = DIV_ROUND_UP(len + *start, PAGE_SIZE); 1510 *pages = p = get_pages_array(n); 1511 if (!p) 1512 return -ENOMEM; 1513 for (int k = 0; k < n; k++) 1514 get_page(*p++ = page++); 1515 return min_t(size_t, len, n * PAGE_SIZE - *start); 1516 } 1517 if (iov_iter_is_pipe(i)) 1518 return pipe_get_pages_alloc(i, pages, maxsize, start); 1519 if (iov_iter_is_xarray(i)) 1520 return iter_xarray_get_pages_alloc(i, pages, maxsize, start); 1521 return -EFAULT; 1522 } 1523 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1524 1525 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1526 struct iov_iter *i) 1527 { 1528 __wsum sum, next; 1529 sum = *csum; 1530 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1531 WARN_ON(1); 1532 return 0; 1533 } 1534 iterate_and_advance(i, bytes, base, len, off, ({ 1535 next = csum_and_copy_from_user(base, addr + off, len); 1536 sum = csum_block_add(sum, next, off); 1537 next ? 0 : len; 1538 }), ({ 1539 sum = csum_and_memcpy(addr + off, base, len, sum, off); 1540 }) 1541 ) 1542 *csum = sum; 1543 return bytes; 1544 } 1545 EXPORT_SYMBOL(csum_and_copy_from_iter); 1546 1547 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1548 struct iov_iter *i) 1549 { 1550 struct csum_state *csstate = _csstate; 1551 __wsum sum, next; 1552 1553 if (unlikely(iov_iter_is_discard(i))) { 1554 WARN_ON(1); /* for now */ 1555 return 0; 1556 } 1557 1558 sum = csum_shift(csstate->csum, csstate->off); 1559 if (unlikely(iov_iter_is_pipe(i))) 1560 bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); 1561 else iterate_and_advance(i, bytes, base, len, off, ({ 1562 next = csum_and_copy_to_user(addr + off, base, len); 1563 sum = csum_block_add(sum, next, off); 1564 next ? 0 : len; 1565 }), ({ 1566 sum = csum_and_memcpy(base, addr + off, len, sum, off); 1567 }) 1568 ) 1569 csstate->csum = csum_shift(sum, csstate->off); 1570 csstate->off += bytes; 1571 return bytes; 1572 } 1573 EXPORT_SYMBOL(csum_and_copy_to_iter); 1574 1575 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1576 struct iov_iter *i) 1577 { 1578 #ifdef CONFIG_CRYPTO_HASH 1579 struct ahash_request *hash = hashp; 1580 struct scatterlist sg; 1581 size_t copied; 1582 1583 copied = copy_to_iter(addr, bytes, i); 1584 sg_init_one(&sg, addr, copied); 1585 ahash_request_set_crypt(hash, &sg, NULL, copied); 1586 crypto_ahash_update(hash); 1587 return copied; 1588 #else 1589 return 0; 1590 #endif 1591 } 1592 EXPORT_SYMBOL(hash_and_copy_to_iter); 1593 1594 static int iov_npages(const struct iov_iter *i, int maxpages) 1595 { 1596 size_t skip = i->iov_offset, size = i->count; 1597 const struct iovec *p; 1598 int npages = 0; 1599 1600 for (p = i->iov; size; skip = 0, p++) { 1601 unsigned offs = offset_in_page(p->iov_base + skip); 1602 size_t len = min(p->iov_len - skip, size); 1603 1604 if (len) { 1605 size -= len; 1606 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1607 if (unlikely(npages > maxpages)) 1608 return maxpages; 1609 } 1610 } 1611 return npages; 1612 } 1613 1614 static int bvec_npages(const struct iov_iter *i, int maxpages) 1615 { 1616 size_t skip = i->iov_offset, size = i->count; 1617 const struct bio_vec *p; 1618 int npages = 0; 1619 1620 for (p = i->bvec; size; skip = 0, p++) { 1621 unsigned offs = (p->bv_offset + skip) % PAGE_SIZE; 1622 size_t len = min(p->bv_len - skip, size); 1623 1624 size -= len; 1625 npages += DIV_ROUND_UP(offs + len, PAGE_SIZE); 1626 if (unlikely(npages > maxpages)) 1627 return maxpages; 1628 } 1629 return npages; 1630 } 1631 1632 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1633 { 1634 if (unlikely(!i->count)) 1635 return 0; 1636 /* iovec and kvec have identical layouts */ 1637 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1638 return iov_npages(i, maxpages); 1639 if (iov_iter_is_bvec(i)) 1640 return bvec_npages(i, maxpages); 1641 if (iov_iter_is_pipe(i)) { 1642 unsigned int iter_head; 1643 int npages; 1644 size_t off; 1645 1646 if (!sanity(i)) 1647 return 0; 1648 1649 data_start(i, &iter_head, &off); 1650 /* some of this one + all after this one */ 1651 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1652 return min(npages, maxpages); 1653 } 1654 if (iov_iter_is_xarray(i)) { 1655 unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; 1656 int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); 1657 return min(npages, maxpages); 1658 } 1659 return 0; 1660 } 1661 EXPORT_SYMBOL(iov_iter_npages); 1662 1663 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1664 { 1665 *new = *old; 1666 if (unlikely(iov_iter_is_pipe(new))) { 1667 WARN_ON(1); 1668 return NULL; 1669 } 1670 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) 1671 return NULL; 1672 if (iov_iter_is_bvec(new)) 1673 return new->bvec = kmemdup(new->bvec, 1674 new->nr_segs * sizeof(struct bio_vec), 1675 flags); 1676 else 1677 /* iovec and kvec have identical layout */ 1678 return new->iov = kmemdup(new->iov, 1679 new->nr_segs * sizeof(struct iovec), 1680 flags); 1681 } 1682 EXPORT_SYMBOL(dup_iter); 1683 1684 static int copy_compat_iovec_from_user(struct iovec *iov, 1685 const struct iovec __user *uvec, unsigned long nr_segs) 1686 { 1687 const struct compat_iovec __user *uiov = 1688 (const struct compat_iovec __user *)uvec; 1689 int ret = -EFAULT, i; 1690 1691 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1692 return -EFAULT; 1693 1694 for (i = 0; i < nr_segs; i++) { 1695 compat_uptr_t buf; 1696 compat_ssize_t len; 1697 1698 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1699 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1700 1701 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1702 if (len < 0) { 1703 ret = -EINVAL; 1704 goto uaccess_end; 1705 } 1706 iov[i].iov_base = compat_ptr(buf); 1707 iov[i].iov_len = len; 1708 } 1709 1710 ret = 0; 1711 uaccess_end: 1712 user_access_end(); 1713 return ret; 1714 } 1715 1716 static int copy_iovec_from_user(struct iovec *iov, 1717 const struct iovec __user *uvec, unsigned long nr_segs) 1718 { 1719 unsigned long seg; 1720 1721 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1722 return -EFAULT; 1723 for (seg = 0; seg < nr_segs; seg++) { 1724 if ((ssize_t)iov[seg].iov_len < 0) 1725 return -EINVAL; 1726 } 1727 1728 return 0; 1729 } 1730 1731 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1732 unsigned long nr_segs, unsigned long fast_segs, 1733 struct iovec *fast_iov, bool compat) 1734 { 1735 struct iovec *iov = fast_iov; 1736 int ret; 1737 1738 /* 1739 * SuS says "The readv() function *may* fail if the iovcnt argument was 1740 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1741 * traditionally returned zero for zero segments, so... 1742 */ 1743 if (nr_segs == 0) 1744 return iov; 1745 if (nr_segs > UIO_MAXIOV) 1746 return ERR_PTR(-EINVAL); 1747 if (nr_segs > fast_segs) { 1748 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1749 if (!iov) 1750 return ERR_PTR(-ENOMEM); 1751 } 1752 1753 if (compat) 1754 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1755 else 1756 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1757 if (ret) { 1758 if (iov != fast_iov) 1759 kfree(iov); 1760 return ERR_PTR(ret); 1761 } 1762 1763 return iov; 1764 } 1765 1766 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1767 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1768 struct iov_iter *i, bool compat) 1769 { 1770 ssize_t total_len = 0; 1771 unsigned long seg; 1772 struct iovec *iov; 1773 1774 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1775 if (IS_ERR(iov)) { 1776 *iovp = NULL; 1777 return PTR_ERR(iov); 1778 } 1779 1780 /* 1781 * According to the Single Unix Specification we should return EINVAL if 1782 * an element length is < 0 when cast to ssize_t or if the total length 1783 * would overflow the ssize_t return value of the system call. 1784 * 1785 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1786 * overflow case. 1787 */ 1788 for (seg = 0; seg < nr_segs; seg++) { 1789 ssize_t len = (ssize_t)iov[seg].iov_len; 1790 1791 if (!access_ok(iov[seg].iov_base, len)) { 1792 if (iov != *iovp) 1793 kfree(iov); 1794 *iovp = NULL; 1795 return -EFAULT; 1796 } 1797 1798 if (len > MAX_RW_COUNT - total_len) { 1799 len = MAX_RW_COUNT - total_len; 1800 iov[seg].iov_len = len; 1801 } 1802 total_len += len; 1803 } 1804 1805 iov_iter_init(i, type, iov, nr_segs, total_len); 1806 if (iov == *iovp) 1807 *iovp = NULL; 1808 else 1809 *iovp = iov; 1810 return total_len; 1811 } 1812 1813 /** 1814 * import_iovec() - Copy an array of &struct iovec from userspace 1815 * into the kernel, check that it is valid, and initialize a new 1816 * &struct iov_iter iterator to access it. 1817 * 1818 * @type: One of %READ or %WRITE. 1819 * @uvec: Pointer to the userspace array. 1820 * @nr_segs: Number of elements in userspace array. 1821 * @fast_segs: Number of elements in @iov. 1822 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1823 * on-stack) kernel array. 1824 * @i: Pointer to iterator that will be initialized on success. 1825 * 1826 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1827 * then this function places %NULL in *@iov on return. Otherwise, a new 1828 * array will be allocated and the result placed in *@iov. This means that 1829 * the caller may call kfree() on *@iov regardless of whether the small 1830 * on-stack array was used or not (and regardless of whether this function 1831 * returns an error or not). 1832 * 1833 * Return: Negative error code on error, bytes imported on success 1834 */ 1835 ssize_t import_iovec(int type, const struct iovec __user *uvec, 1836 unsigned nr_segs, unsigned fast_segs, 1837 struct iovec **iovp, struct iov_iter *i) 1838 { 1839 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1840 in_compat_syscall()); 1841 } 1842 EXPORT_SYMBOL(import_iovec); 1843 1844 int import_single_range(int rw, void __user *buf, size_t len, 1845 struct iovec *iov, struct iov_iter *i) 1846 { 1847 if (len > MAX_RW_COUNT) 1848 len = MAX_RW_COUNT; 1849 if (unlikely(!access_ok(buf, len))) 1850 return -EFAULT; 1851 1852 iov->iov_base = buf; 1853 iov->iov_len = len; 1854 iov_iter_init(i, rw, iov, 1, len); 1855 return 0; 1856 } 1857 EXPORT_SYMBOL(import_single_range); 1858 1859 /** 1860 * iov_iter_restore() - Restore a &struct iov_iter to the same state as when 1861 * iov_iter_save_state() was called. 1862 * 1863 * @i: &struct iov_iter to restore 1864 * @state: state to restore from 1865 * 1866 * Used after iov_iter_save_state() to bring restore @i, if operations may 1867 * have advanced it. 1868 * 1869 * Note: only works on ITER_IOVEC, ITER_BVEC, and ITER_KVEC 1870 */ 1871 void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state) 1872 { 1873 if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) && 1874 !iov_iter_is_kvec(i)) 1875 return; 1876 i->iov_offset = state->iov_offset; 1877 i->count = state->count; 1878 /* 1879 * For the *vec iters, nr_segs + iov is constant - if we increment 1880 * the vec, then we also decrement the nr_segs count. Hence we don't 1881 * need to track both of these, just one is enough and we can deduct 1882 * the other from that. ITER_KVEC and ITER_IOVEC are the same struct 1883 * size, so we can just increment the iov pointer as they are unionzed. 1884 * ITER_BVEC _may_ be the same size on some archs, but on others it is 1885 * not. Be safe and handle it separately. 1886 */ 1887 BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec)); 1888 if (iov_iter_is_bvec(i)) 1889 i->bvec -= state->nr_segs - i->nr_segs; 1890 else 1891 i->iov -= state->nr_segs - i->nr_segs; 1892 i->nr_segs = state->nr_segs; 1893 } 1894