1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \ 20 size_t left; \ 21 size_t wanted = n; \ 22 __p = i->iov; \ 23 __v.iov_len = min(n, __p->iov_len - skip); \ 24 if (likely(__v.iov_len)) { \ 25 __v.iov_base = __p->iov_base + skip; \ 26 left = (STEP); \ 27 __v.iov_len -= left; \ 28 skip += __v.iov_len; \ 29 n -= __v.iov_len; \ 30 } else { \ 31 left = 0; \ 32 } \ 33 while (unlikely(!left && n)) { \ 34 __p++; \ 35 __v.iov_len = min(n, __p->iov_len); \ 36 if (unlikely(!__v.iov_len)) \ 37 continue; \ 38 __v.iov_base = __p->iov_base; \ 39 left = (STEP); \ 40 __v.iov_len -= left; \ 41 skip = __v.iov_len; \ 42 n -= __v.iov_len; \ 43 } \ 44 n = wanted - n; \ 45 } 46 47 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \ 48 size_t wanted = n; \ 49 __p = i->kvec; \ 50 __v.iov_len = min(n, __p->iov_len - skip); \ 51 if (likely(__v.iov_len)) { \ 52 __v.iov_base = __p->iov_base + skip; \ 53 (void)(STEP); \ 54 skip += __v.iov_len; \ 55 n -= __v.iov_len; \ 56 } \ 57 while (unlikely(n)) { \ 58 __p++; \ 59 __v.iov_len = min(n, __p->iov_len); \ 60 if (unlikely(!__v.iov_len)) \ 61 continue; \ 62 __v.iov_base = __p->iov_base; \ 63 (void)(STEP); \ 64 skip = __v.iov_len; \ 65 n -= __v.iov_len; \ 66 } \ 67 n = wanted; \ 68 } 69 70 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ 71 struct bvec_iter __start; \ 72 __start.bi_size = n; \ 73 __start.bi_bvec_done = skip; \ 74 __start.bi_idx = 0; \ 75 for_each_bvec(__v, i->bvec, __bi, __start) { \ 76 (void)(STEP); \ 77 } \ 78 } 79 80 #define iterate_xarray(i, n, __v, skip, STEP) { \ 81 struct page *head = NULL; \ 82 size_t wanted = n, seg, offset; \ 83 loff_t start = i->xarray_start + skip; \ 84 pgoff_t index = start >> PAGE_SHIFT; \ 85 int j; \ 86 \ 87 XA_STATE(xas, i->xarray, index); \ 88 \ 89 rcu_read_lock(); \ 90 xas_for_each(&xas, head, ULONG_MAX) { \ 91 if (xas_retry(&xas, head)) \ 92 continue; \ 93 if (WARN_ON(xa_is_value(head))) \ 94 break; \ 95 if (WARN_ON(PageHuge(head))) \ 96 break; \ 97 for (j = (head->index < index) ? index - head->index : 0; \ 98 j < thp_nr_pages(head); j++) { \ 99 __v.bv_page = head + j; \ 100 offset = (i->xarray_start + skip) & ~PAGE_MASK; \ 101 seg = PAGE_SIZE - offset; \ 102 __v.bv_offset = offset; \ 103 __v.bv_len = min(n, seg); \ 104 (void)(STEP); \ 105 n -= __v.bv_len; \ 106 skip += __v.bv_len; \ 107 if (n == 0) \ 108 break; \ 109 } \ 110 if (n == 0) \ 111 break; \ 112 } \ 113 rcu_read_unlock(); \ 114 n = wanted - n; \ 115 } 116 117 #define iterate_all_kinds(i, n, v, I, B, K, X) { \ 118 if (likely(n)) { \ 119 size_t skip = i->iov_offset; \ 120 if (likely(iter_is_iovec(i))) { \ 121 const struct iovec *iov; \ 122 struct iovec v; \ 123 iterate_iovec(i, n, v, iov, skip, (I)) \ 124 } else if (iov_iter_is_bvec(i)) { \ 125 struct bio_vec v; \ 126 struct bvec_iter __bi; \ 127 iterate_bvec(i, n, v, __bi, skip, (B)) \ 128 } else if (iov_iter_is_kvec(i)) { \ 129 const struct kvec *kvec; \ 130 struct kvec v; \ 131 iterate_kvec(i, n, v, kvec, skip, (K)) \ 132 } else if (iov_iter_is_xarray(i)) { \ 133 struct bio_vec v; \ 134 iterate_xarray(i, n, v, skip, (X)); \ 135 } \ 136 } \ 137 } 138 139 #define iterate_and_advance(i, n, v, I, B, K, X) { \ 140 if (unlikely(i->count < n)) \ 141 n = i->count; \ 142 if (i->count) { \ 143 size_t skip = i->iov_offset; \ 144 if (likely(iter_is_iovec(i))) { \ 145 const struct iovec *iov; \ 146 struct iovec v; \ 147 iterate_iovec(i, n, v, iov, skip, (I)) \ 148 if (skip == iov->iov_len) { \ 149 iov++; \ 150 skip = 0; \ 151 } \ 152 i->nr_segs -= iov - i->iov; \ 153 i->iov = iov; \ 154 } else if (iov_iter_is_bvec(i)) { \ 155 const struct bio_vec *bvec = i->bvec; \ 156 struct bio_vec v; \ 157 struct bvec_iter __bi; \ 158 iterate_bvec(i, n, v, __bi, skip, (B)) \ 159 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ 160 i->nr_segs -= i->bvec - bvec; \ 161 skip = __bi.bi_bvec_done; \ 162 } else if (iov_iter_is_kvec(i)) { \ 163 const struct kvec *kvec; \ 164 struct kvec v; \ 165 iterate_kvec(i, n, v, kvec, skip, (K)) \ 166 if (skip == kvec->iov_len) { \ 167 kvec++; \ 168 skip = 0; \ 169 } \ 170 i->nr_segs -= kvec - i->kvec; \ 171 i->kvec = kvec; \ 172 } else if (iov_iter_is_xarray(i)) { \ 173 struct bio_vec v; \ 174 iterate_xarray(i, n, v, skip, (X)) \ 175 } else if (iov_iter_is_discard(i)) { \ 176 skip += n; \ 177 } \ 178 i->count -= n; \ 179 i->iov_offset = skip; \ 180 } \ 181 } 182 183 static int copyout(void __user *to, const void *from, size_t n) 184 { 185 if (should_fail_usercopy()) 186 return n; 187 if (access_ok(to, n)) { 188 instrument_copy_to_user(to, from, n); 189 n = raw_copy_to_user(to, from, n); 190 } 191 return n; 192 } 193 194 static int copyin(void *to, const void __user *from, size_t n) 195 { 196 if (should_fail_usercopy()) 197 return n; 198 if (access_ok(from, n)) { 199 instrument_copy_from_user(to, from, n); 200 n = raw_copy_from_user(to, from, n); 201 } 202 return n; 203 } 204 205 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, 206 struct iov_iter *i) 207 { 208 size_t skip, copy, left, wanted; 209 const struct iovec *iov; 210 char __user *buf; 211 void *kaddr, *from; 212 213 if (unlikely(bytes > i->count)) 214 bytes = i->count; 215 216 if (unlikely(!bytes)) 217 return 0; 218 219 might_fault(); 220 wanted = bytes; 221 iov = i->iov; 222 skip = i->iov_offset; 223 buf = iov->iov_base + skip; 224 copy = min(bytes, iov->iov_len - skip); 225 226 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { 227 kaddr = kmap_atomic(page); 228 from = kaddr + offset; 229 230 /* first chunk, usually the only one */ 231 left = copyout(buf, from, copy); 232 copy -= left; 233 skip += copy; 234 from += copy; 235 bytes -= copy; 236 237 while (unlikely(!left && bytes)) { 238 iov++; 239 buf = iov->iov_base; 240 copy = min(bytes, iov->iov_len); 241 left = copyout(buf, from, copy); 242 copy -= left; 243 skip = copy; 244 from += copy; 245 bytes -= copy; 246 } 247 if (likely(!bytes)) { 248 kunmap_atomic(kaddr); 249 goto done; 250 } 251 offset = from - kaddr; 252 buf += copy; 253 kunmap_atomic(kaddr); 254 copy = min(bytes, iov->iov_len - skip); 255 } 256 /* Too bad - revert to non-atomic kmap */ 257 258 kaddr = kmap(page); 259 from = kaddr + offset; 260 left = copyout(buf, from, copy); 261 copy -= left; 262 skip += copy; 263 from += copy; 264 bytes -= copy; 265 while (unlikely(!left && bytes)) { 266 iov++; 267 buf = iov->iov_base; 268 copy = min(bytes, iov->iov_len); 269 left = copyout(buf, from, copy); 270 copy -= left; 271 skip = copy; 272 from += copy; 273 bytes -= copy; 274 } 275 kunmap(page); 276 277 done: 278 if (skip == iov->iov_len) { 279 iov++; 280 skip = 0; 281 } 282 i->count -= wanted - bytes; 283 i->nr_segs -= iov - i->iov; 284 i->iov = iov; 285 i->iov_offset = skip; 286 return wanted - bytes; 287 } 288 289 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, 290 struct iov_iter *i) 291 { 292 size_t skip, copy, left, wanted; 293 const struct iovec *iov; 294 char __user *buf; 295 void *kaddr, *to; 296 297 if (unlikely(bytes > i->count)) 298 bytes = i->count; 299 300 if (unlikely(!bytes)) 301 return 0; 302 303 might_fault(); 304 wanted = bytes; 305 iov = i->iov; 306 skip = i->iov_offset; 307 buf = iov->iov_base + skip; 308 copy = min(bytes, iov->iov_len - skip); 309 310 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { 311 kaddr = kmap_atomic(page); 312 to = kaddr + offset; 313 314 /* first chunk, usually the only one */ 315 left = copyin(to, buf, copy); 316 copy -= left; 317 skip += copy; 318 to += copy; 319 bytes -= copy; 320 321 while (unlikely(!left && bytes)) { 322 iov++; 323 buf = iov->iov_base; 324 copy = min(bytes, iov->iov_len); 325 left = copyin(to, buf, copy); 326 copy -= left; 327 skip = copy; 328 to += copy; 329 bytes -= copy; 330 } 331 if (likely(!bytes)) { 332 kunmap_atomic(kaddr); 333 goto done; 334 } 335 offset = to - kaddr; 336 buf += copy; 337 kunmap_atomic(kaddr); 338 copy = min(bytes, iov->iov_len - skip); 339 } 340 /* Too bad - revert to non-atomic kmap */ 341 342 kaddr = kmap(page); 343 to = kaddr + offset; 344 left = copyin(to, buf, copy); 345 copy -= left; 346 skip += copy; 347 to += copy; 348 bytes -= copy; 349 while (unlikely(!left && bytes)) { 350 iov++; 351 buf = iov->iov_base; 352 copy = min(bytes, iov->iov_len); 353 left = copyin(to, buf, copy); 354 copy -= left; 355 skip = copy; 356 to += copy; 357 bytes -= copy; 358 } 359 kunmap(page); 360 361 done: 362 if (skip == iov->iov_len) { 363 iov++; 364 skip = 0; 365 } 366 i->count -= wanted - bytes; 367 i->nr_segs -= iov - i->iov; 368 i->iov = iov; 369 i->iov_offset = skip; 370 return wanted - bytes; 371 } 372 373 #ifdef PIPE_PARANOIA 374 static bool sanity(const struct iov_iter *i) 375 { 376 struct pipe_inode_info *pipe = i->pipe; 377 unsigned int p_head = pipe->head; 378 unsigned int p_tail = pipe->tail; 379 unsigned int p_mask = pipe->ring_size - 1; 380 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 381 unsigned int i_head = i->head; 382 unsigned int idx; 383 384 if (i->iov_offset) { 385 struct pipe_buffer *p; 386 if (unlikely(p_occupancy == 0)) 387 goto Bad; // pipe must be non-empty 388 if (unlikely(i_head != p_head - 1)) 389 goto Bad; // must be at the last buffer... 390 391 p = &pipe->bufs[i_head & p_mask]; 392 if (unlikely(p->offset + p->len != i->iov_offset)) 393 goto Bad; // ... at the end of segment 394 } else { 395 if (i_head != p_head) 396 goto Bad; // must be right after the last buffer 397 } 398 return true; 399 Bad: 400 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); 401 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 402 p_head, p_tail, pipe->ring_size); 403 for (idx = 0; idx < pipe->ring_size; idx++) 404 printk(KERN_ERR "[%p %p %d %d]\n", 405 pipe->bufs[idx].ops, 406 pipe->bufs[idx].page, 407 pipe->bufs[idx].offset, 408 pipe->bufs[idx].len); 409 WARN_ON(1); 410 return false; 411 } 412 #else 413 #define sanity(i) true 414 #endif 415 416 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 417 struct iov_iter *i) 418 { 419 struct pipe_inode_info *pipe = i->pipe; 420 struct pipe_buffer *buf; 421 unsigned int p_tail = pipe->tail; 422 unsigned int p_mask = pipe->ring_size - 1; 423 unsigned int i_head = i->head; 424 size_t off; 425 426 if (unlikely(bytes > i->count)) 427 bytes = i->count; 428 429 if (unlikely(!bytes)) 430 return 0; 431 432 if (!sanity(i)) 433 return 0; 434 435 off = i->iov_offset; 436 buf = &pipe->bufs[i_head & p_mask]; 437 if (off) { 438 if (offset == off && buf->page == page) { 439 /* merge with the last one */ 440 buf->len += bytes; 441 i->iov_offset += bytes; 442 goto out; 443 } 444 i_head++; 445 buf = &pipe->bufs[i_head & p_mask]; 446 } 447 if (pipe_full(i_head, p_tail, pipe->max_usage)) 448 return 0; 449 450 buf->ops = &page_cache_pipe_buf_ops; 451 get_page(page); 452 buf->page = page; 453 buf->offset = offset; 454 buf->len = bytes; 455 456 pipe->head = i_head + 1; 457 i->iov_offset = offset + bytes; 458 i->head = i_head; 459 out: 460 i->count -= bytes; 461 return bytes; 462 } 463 464 /* 465 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 466 * bytes. For each iovec, fault in each page that constitutes the iovec. 467 * 468 * Return 0 on success, or non-zero if the memory could not be accessed (i.e. 469 * because it is an invalid address). 470 */ 471 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) 472 { 473 size_t skip = i->iov_offset; 474 const struct iovec *iov; 475 int err; 476 struct iovec v; 477 478 if (iter_is_iovec(i)) { 479 iterate_iovec(i, bytes, v, iov, skip, ({ 480 err = fault_in_pages_readable(v.iov_base, v.iov_len); 481 if (unlikely(err)) 482 return err; 483 0;})) 484 } 485 return 0; 486 } 487 EXPORT_SYMBOL(iov_iter_fault_in_readable); 488 489 void iov_iter_init(struct iov_iter *i, unsigned int direction, 490 const struct iovec *iov, unsigned long nr_segs, 491 size_t count) 492 { 493 WARN_ON(direction & ~(READ | WRITE)); 494 direction &= READ | WRITE; 495 496 /* It will get better. Eventually... */ 497 if (uaccess_kernel()) { 498 i->type = ITER_KVEC | direction; 499 i->kvec = (struct kvec *)iov; 500 } else { 501 i->type = ITER_IOVEC | direction; 502 i->iov = iov; 503 } 504 i->nr_segs = nr_segs; 505 i->iov_offset = 0; 506 i->count = count; 507 } 508 EXPORT_SYMBOL(iov_iter_init); 509 510 static inline bool allocated(struct pipe_buffer *buf) 511 { 512 return buf->ops == &default_pipe_buf_ops; 513 } 514 515 static inline void data_start(const struct iov_iter *i, 516 unsigned int *iter_headp, size_t *offp) 517 { 518 unsigned int p_mask = i->pipe->ring_size - 1; 519 unsigned int iter_head = i->head; 520 size_t off = i->iov_offset; 521 522 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || 523 off == PAGE_SIZE)) { 524 iter_head++; 525 off = 0; 526 } 527 *iter_headp = iter_head; 528 *offp = off; 529 } 530 531 static size_t push_pipe(struct iov_iter *i, size_t size, 532 int *iter_headp, size_t *offp) 533 { 534 struct pipe_inode_info *pipe = i->pipe; 535 unsigned int p_tail = pipe->tail; 536 unsigned int p_mask = pipe->ring_size - 1; 537 unsigned int iter_head; 538 size_t off; 539 ssize_t left; 540 541 if (unlikely(size > i->count)) 542 size = i->count; 543 if (unlikely(!size)) 544 return 0; 545 546 left = size; 547 data_start(i, &iter_head, &off); 548 *iter_headp = iter_head; 549 *offp = off; 550 if (off) { 551 left -= PAGE_SIZE - off; 552 if (left <= 0) { 553 pipe->bufs[iter_head & p_mask].len += size; 554 return size; 555 } 556 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; 557 iter_head++; 558 } 559 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { 560 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; 561 struct page *page = alloc_page(GFP_USER); 562 if (!page) 563 break; 564 565 buf->ops = &default_pipe_buf_ops; 566 buf->page = page; 567 buf->offset = 0; 568 buf->len = min_t(ssize_t, left, PAGE_SIZE); 569 left -= buf->len; 570 iter_head++; 571 pipe->head = iter_head; 572 573 if (left == 0) 574 return size; 575 } 576 return size - left; 577 } 578 579 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 580 struct iov_iter *i) 581 { 582 struct pipe_inode_info *pipe = i->pipe; 583 unsigned int p_mask = pipe->ring_size - 1; 584 unsigned int i_head; 585 size_t n, off; 586 587 if (!sanity(i)) 588 return 0; 589 590 bytes = n = push_pipe(i, bytes, &i_head, &off); 591 if (unlikely(!n)) 592 return 0; 593 do { 594 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 595 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); 596 i->head = i_head; 597 i->iov_offset = off + chunk; 598 n -= chunk; 599 addr += chunk; 600 off = 0; 601 i_head++; 602 } while (n); 603 i->count -= bytes; 604 return bytes; 605 } 606 607 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 608 __wsum sum, size_t off) 609 { 610 __wsum next = csum_partial_copy_nocheck(from, to, len); 611 return csum_block_add(sum, next, off); 612 } 613 614 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 615 struct csum_state *csstate, 616 struct iov_iter *i) 617 { 618 struct pipe_inode_info *pipe = i->pipe; 619 unsigned int p_mask = pipe->ring_size - 1; 620 __wsum sum = csstate->csum; 621 size_t off = csstate->off; 622 unsigned int i_head; 623 size_t n, r; 624 625 if (!sanity(i)) 626 return 0; 627 628 bytes = n = push_pipe(i, bytes, &i_head, &r); 629 if (unlikely(!n)) 630 return 0; 631 do { 632 size_t chunk = min_t(size_t, n, PAGE_SIZE - r); 633 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page); 634 sum = csum_and_memcpy(p + r, addr, chunk, sum, off); 635 kunmap_atomic(p); 636 i->head = i_head; 637 i->iov_offset = r + chunk; 638 n -= chunk; 639 off += chunk; 640 addr += chunk; 641 r = 0; 642 i_head++; 643 } while (n); 644 i->count -= bytes; 645 csstate->csum = sum; 646 csstate->off = off; 647 return bytes; 648 } 649 650 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 651 { 652 const char *from = addr; 653 if (unlikely(iov_iter_is_pipe(i))) 654 return copy_pipe_to_iter(addr, bytes, i); 655 if (iter_is_iovec(i)) 656 might_fault(); 657 iterate_and_advance(i, bytes, v, 658 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), 659 memcpy_to_page(v.bv_page, v.bv_offset, 660 (from += v.bv_len) - v.bv_len, v.bv_len), 661 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), 662 memcpy_to_page(v.bv_page, v.bv_offset, 663 (from += v.bv_len) - v.bv_len, v.bv_len) 664 ) 665 666 return bytes; 667 } 668 EXPORT_SYMBOL(_copy_to_iter); 669 670 #ifdef CONFIG_ARCH_HAS_COPY_MC 671 static int copyout_mc(void __user *to, const void *from, size_t n) 672 { 673 if (access_ok(to, n)) { 674 instrument_copy_to_user(to, from, n); 675 n = copy_mc_to_user((__force void *) to, from, n); 676 } 677 return n; 678 } 679 680 static unsigned long copy_mc_to_page(struct page *page, size_t offset, 681 const char *from, size_t len) 682 { 683 unsigned long ret; 684 char *to; 685 686 to = kmap_atomic(page); 687 ret = copy_mc_to_kernel(to + offset, from, len); 688 kunmap_atomic(to); 689 690 return ret; 691 } 692 693 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 694 struct iov_iter *i) 695 { 696 struct pipe_inode_info *pipe = i->pipe; 697 unsigned int p_mask = pipe->ring_size - 1; 698 unsigned int i_head; 699 size_t n, off, xfer = 0; 700 701 if (!sanity(i)) 702 return 0; 703 704 bytes = n = push_pipe(i, bytes, &i_head, &off); 705 if (unlikely(!n)) 706 return 0; 707 do { 708 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 709 unsigned long rem; 710 711 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page, 712 off, addr, chunk); 713 i->head = i_head; 714 i->iov_offset = off + chunk - rem; 715 xfer += chunk - rem; 716 if (rem) 717 break; 718 n -= chunk; 719 addr += chunk; 720 off = 0; 721 i_head++; 722 } while (n); 723 i->count -= xfer; 724 return xfer; 725 } 726 727 /** 728 * _copy_mc_to_iter - copy to iter with source memory error exception handling 729 * @addr: source kernel address 730 * @bytes: total transfer length 731 * @iter: destination iterator 732 * 733 * The pmem driver deploys this for the dax operation 734 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 735 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 736 * successfully copied. 737 * 738 * The main differences between this and typical _copy_to_iter(). 739 * 740 * * Typical tail/residue handling after a fault retries the copy 741 * byte-by-byte until the fault happens again. Re-triggering machine 742 * checks is potentially fatal so the implementation uses source 743 * alignment and poison alignment assumptions to avoid re-triggering 744 * hardware exceptions. 745 * 746 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 747 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 748 * a short copy. 749 */ 750 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 751 { 752 const char *from = addr; 753 unsigned long rem, curr_addr, s_addr = (unsigned long) addr; 754 755 if (unlikely(iov_iter_is_pipe(i))) 756 return copy_mc_pipe_to_iter(addr, bytes, i); 757 if (iter_is_iovec(i)) 758 might_fault(); 759 iterate_and_advance(i, bytes, v, 760 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len, 761 v.iov_len), 762 ({ 763 rem = copy_mc_to_page(v.bv_page, v.bv_offset, 764 (from += v.bv_len) - v.bv_len, v.bv_len); 765 if (rem) { 766 curr_addr = (unsigned long) from; 767 bytes = curr_addr - s_addr - rem; 768 return bytes; 769 } 770 }), 771 ({ 772 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len) 773 - v.iov_len, v.iov_len); 774 if (rem) { 775 curr_addr = (unsigned long) from; 776 bytes = curr_addr - s_addr - rem; 777 return bytes; 778 } 779 }), 780 ({ 781 rem = copy_mc_to_page(v.bv_page, v.bv_offset, 782 (from += v.bv_len) - v.bv_len, v.bv_len); 783 if (rem) { 784 curr_addr = (unsigned long) from; 785 bytes = curr_addr - s_addr - rem; 786 rcu_read_unlock(); 787 i->iov_offset += bytes; 788 i->count -= bytes; 789 return bytes; 790 } 791 }) 792 ) 793 794 return bytes; 795 } 796 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 797 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 798 799 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 800 { 801 char *to = addr; 802 if (unlikely(iov_iter_is_pipe(i))) { 803 WARN_ON(1); 804 return 0; 805 } 806 if (iter_is_iovec(i)) 807 might_fault(); 808 iterate_and_advance(i, bytes, v, 809 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 810 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 811 v.bv_offset, v.bv_len), 812 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 813 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 814 v.bv_offset, v.bv_len) 815 ) 816 817 return bytes; 818 } 819 EXPORT_SYMBOL(_copy_from_iter); 820 821 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 822 { 823 char *to = addr; 824 if (unlikely(iov_iter_is_pipe(i))) { 825 WARN_ON(1); 826 return 0; 827 } 828 iterate_and_advance(i, bytes, v, 829 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, 830 v.iov_base, v.iov_len), 831 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 832 v.bv_offset, v.bv_len), 833 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 834 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 835 v.bv_offset, v.bv_len) 836 ) 837 838 return bytes; 839 } 840 EXPORT_SYMBOL(_copy_from_iter_nocache); 841 842 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 843 /** 844 * _copy_from_iter_flushcache - write destination through cpu cache 845 * @addr: destination kernel address 846 * @bytes: total transfer length 847 * @iter: source iterator 848 * 849 * The pmem driver arranges for filesystem-dax to use this facility via 850 * dax_copy_from_iter() for ensuring that writes to persistent memory 851 * are flushed through the CPU cache. It is differentiated from 852 * _copy_from_iter_nocache() in that guarantees all data is flushed for 853 * all iterator types. The _copy_from_iter_nocache() only attempts to 854 * bypass the cache for the ITER_IOVEC case, and on some archs may use 855 * instructions that strand dirty-data in the cache. 856 */ 857 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 858 { 859 char *to = addr; 860 if (unlikely(iov_iter_is_pipe(i))) { 861 WARN_ON(1); 862 return 0; 863 } 864 iterate_and_advance(i, bytes, v, 865 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len, 866 v.iov_base, v.iov_len), 867 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, 868 v.bv_offset, v.bv_len), 869 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, 870 v.iov_len), 871 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, 872 v.bv_offset, v.bv_len) 873 ) 874 875 return bytes; 876 } 877 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 878 #endif 879 880 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 881 { 882 struct page *head; 883 size_t v = n + offset; 884 885 /* 886 * The general case needs to access the page order in order 887 * to compute the page size. 888 * However, we mostly deal with order-0 pages and thus can 889 * avoid a possible cache line miss for requests that fit all 890 * page orders. 891 */ 892 if (n <= v && v <= PAGE_SIZE) 893 return true; 894 895 head = compound_head(page); 896 v += (page - head) << PAGE_SHIFT; 897 898 if (likely(n <= v && v <= (page_size(head)))) 899 return true; 900 WARN_ON(1); 901 return false; 902 } 903 904 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 905 struct iov_iter *i) 906 { 907 if (likely(iter_is_iovec(i))) 908 return copy_page_to_iter_iovec(page, offset, bytes, i); 909 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { 910 void *kaddr = kmap_atomic(page); 911 size_t wanted = copy_to_iter(kaddr + offset, bytes, i); 912 kunmap_atomic(kaddr); 913 return wanted; 914 } 915 if (iov_iter_is_pipe(i)) 916 return copy_page_to_iter_pipe(page, offset, bytes, i); 917 if (unlikely(iov_iter_is_discard(i))) { 918 if (unlikely(i->count < bytes)) 919 bytes = i->count; 920 i->count -= bytes; 921 return bytes; 922 } 923 WARN_ON(1); 924 return 0; 925 } 926 927 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 928 struct iov_iter *i) 929 { 930 size_t res = 0; 931 if (unlikely(!page_copy_sane(page, offset, bytes))) 932 return 0; 933 page += offset / PAGE_SIZE; // first subpage 934 offset %= PAGE_SIZE; 935 while (1) { 936 size_t n = __copy_page_to_iter(page, offset, 937 min(bytes, (size_t)PAGE_SIZE - offset), i); 938 res += n; 939 bytes -= n; 940 if (!bytes || !n) 941 break; 942 offset += n; 943 if (offset == PAGE_SIZE) { 944 page++; 945 offset = 0; 946 } 947 } 948 return res; 949 } 950 EXPORT_SYMBOL(copy_page_to_iter); 951 952 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 953 struct iov_iter *i) 954 { 955 if (unlikely(!page_copy_sane(page, offset, bytes))) 956 return 0; 957 if (likely(iter_is_iovec(i))) 958 return copy_page_from_iter_iovec(page, offset, bytes, i); 959 if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) { 960 void *kaddr = kmap_atomic(page); 961 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); 962 kunmap_atomic(kaddr); 963 return wanted; 964 } 965 WARN_ON(1); 966 return 0; 967 } 968 EXPORT_SYMBOL(copy_page_from_iter); 969 970 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 971 { 972 struct pipe_inode_info *pipe = i->pipe; 973 unsigned int p_mask = pipe->ring_size - 1; 974 unsigned int i_head; 975 size_t n, off; 976 977 if (!sanity(i)) 978 return 0; 979 980 bytes = n = push_pipe(i, bytes, &i_head, &off); 981 if (unlikely(!n)) 982 return 0; 983 984 do { 985 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 986 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk); 987 i->head = i_head; 988 i->iov_offset = off + chunk; 989 n -= chunk; 990 off = 0; 991 i_head++; 992 } while (n); 993 i->count -= bytes; 994 return bytes; 995 } 996 997 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 998 { 999 if (unlikely(iov_iter_is_pipe(i))) 1000 return pipe_zero(bytes, i); 1001 iterate_and_advance(i, bytes, v, 1002 clear_user(v.iov_base, v.iov_len), 1003 memzero_page(v.bv_page, v.bv_offset, v.bv_len), 1004 memset(v.iov_base, 0, v.iov_len), 1005 memzero_page(v.bv_page, v.bv_offset, v.bv_len) 1006 ) 1007 1008 return bytes; 1009 } 1010 EXPORT_SYMBOL(iov_iter_zero); 1011 1012 size_t iov_iter_copy_from_user_atomic(struct page *page, 1013 struct iov_iter *i, unsigned long offset, size_t bytes) 1014 { 1015 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 1016 if (unlikely(!page_copy_sane(page, offset, bytes))) { 1017 kunmap_atomic(kaddr); 1018 return 0; 1019 } 1020 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1021 kunmap_atomic(kaddr); 1022 WARN_ON(1); 1023 return 0; 1024 } 1025 iterate_all_kinds(i, bytes, v, 1026 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 1027 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, 1028 v.bv_offset, v.bv_len), 1029 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 1030 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, 1031 v.bv_offset, v.bv_len) 1032 ) 1033 kunmap_atomic(kaddr); 1034 return bytes; 1035 } 1036 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); 1037 1038 static inline void pipe_truncate(struct iov_iter *i) 1039 { 1040 struct pipe_inode_info *pipe = i->pipe; 1041 unsigned int p_tail = pipe->tail; 1042 unsigned int p_head = pipe->head; 1043 unsigned int p_mask = pipe->ring_size - 1; 1044 1045 if (!pipe_empty(p_head, p_tail)) { 1046 struct pipe_buffer *buf; 1047 unsigned int i_head = i->head; 1048 size_t off = i->iov_offset; 1049 1050 if (off) { 1051 buf = &pipe->bufs[i_head & p_mask]; 1052 buf->len = off - buf->offset; 1053 i_head++; 1054 } 1055 while (p_head != i_head) { 1056 p_head--; 1057 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); 1058 } 1059 1060 pipe->head = p_head; 1061 } 1062 } 1063 1064 static void pipe_advance(struct iov_iter *i, size_t size) 1065 { 1066 struct pipe_inode_info *pipe = i->pipe; 1067 if (size) { 1068 struct pipe_buffer *buf; 1069 unsigned int p_mask = pipe->ring_size - 1; 1070 unsigned int i_head = i->head; 1071 size_t off = i->iov_offset, left = size; 1072 1073 if (off) /* make it relative to the beginning of buffer */ 1074 left += off - pipe->bufs[i_head & p_mask].offset; 1075 while (1) { 1076 buf = &pipe->bufs[i_head & p_mask]; 1077 if (left <= buf->len) 1078 break; 1079 left -= buf->len; 1080 i_head++; 1081 } 1082 i->head = i_head; 1083 i->iov_offset = buf->offset + left; 1084 } 1085 i->count -= size; 1086 /* ... and discard everything past that point */ 1087 pipe_truncate(i); 1088 } 1089 1090 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 1091 { 1092 struct bvec_iter bi; 1093 1094 bi.bi_size = i->count; 1095 bi.bi_bvec_done = i->iov_offset; 1096 bi.bi_idx = 0; 1097 bvec_iter_advance(i->bvec, &bi, size); 1098 1099 i->bvec += bi.bi_idx; 1100 i->nr_segs -= bi.bi_idx; 1101 i->count = bi.bi_size; 1102 i->iov_offset = bi.bi_bvec_done; 1103 } 1104 1105 void iov_iter_advance(struct iov_iter *i, size_t size) 1106 { 1107 if (unlikely(i->count < size)) 1108 size = i->count; 1109 if (unlikely(iov_iter_is_pipe(i))) { 1110 pipe_advance(i, size); 1111 return; 1112 } 1113 if (unlikely(iov_iter_is_discard(i))) { 1114 i->count -= size; 1115 return; 1116 } 1117 if (unlikely(iov_iter_is_xarray(i))) { 1118 i->iov_offset += size; 1119 i->count -= size; 1120 return; 1121 } 1122 if (iov_iter_is_bvec(i)) { 1123 iov_iter_bvec_advance(i, size); 1124 return; 1125 } 1126 iterate_and_advance(i, size, v, 0, 0, 0, 0) 1127 } 1128 EXPORT_SYMBOL(iov_iter_advance); 1129 1130 void iov_iter_revert(struct iov_iter *i, size_t unroll) 1131 { 1132 if (!unroll) 1133 return; 1134 if (WARN_ON(unroll > MAX_RW_COUNT)) 1135 return; 1136 i->count += unroll; 1137 if (unlikely(iov_iter_is_pipe(i))) { 1138 struct pipe_inode_info *pipe = i->pipe; 1139 unsigned int p_mask = pipe->ring_size - 1; 1140 unsigned int i_head = i->head; 1141 size_t off = i->iov_offset; 1142 while (1) { 1143 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; 1144 size_t n = off - b->offset; 1145 if (unroll < n) { 1146 off -= unroll; 1147 break; 1148 } 1149 unroll -= n; 1150 if (!unroll && i_head == i->start_head) { 1151 off = 0; 1152 break; 1153 } 1154 i_head--; 1155 b = &pipe->bufs[i_head & p_mask]; 1156 off = b->offset + b->len; 1157 } 1158 i->iov_offset = off; 1159 i->head = i_head; 1160 pipe_truncate(i); 1161 return; 1162 } 1163 if (unlikely(iov_iter_is_discard(i))) 1164 return; 1165 if (unroll <= i->iov_offset) { 1166 i->iov_offset -= unroll; 1167 return; 1168 } 1169 unroll -= i->iov_offset; 1170 if (iov_iter_is_xarray(i)) { 1171 BUG(); /* We should never go beyond the start of the specified 1172 * range since we might then be straying into pages that 1173 * aren't pinned. 1174 */ 1175 } else if (iov_iter_is_bvec(i)) { 1176 const struct bio_vec *bvec = i->bvec; 1177 while (1) { 1178 size_t n = (--bvec)->bv_len; 1179 i->nr_segs++; 1180 if (unroll <= n) { 1181 i->bvec = bvec; 1182 i->iov_offset = n - unroll; 1183 return; 1184 } 1185 unroll -= n; 1186 } 1187 } else { /* same logics for iovec and kvec */ 1188 const struct iovec *iov = i->iov; 1189 while (1) { 1190 size_t n = (--iov)->iov_len; 1191 i->nr_segs++; 1192 if (unroll <= n) { 1193 i->iov = iov; 1194 i->iov_offset = n - unroll; 1195 return; 1196 } 1197 unroll -= n; 1198 } 1199 } 1200 } 1201 EXPORT_SYMBOL(iov_iter_revert); 1202 1203 /* 1204 * Return the count of just the current iov_iter segment. 1205 */ 1206 size_t iov_iter_single_seg_count(const struct iov_iter *i) 1207 { 1208 if (i->nr_segs > 1) { 1209 if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) 1210 return min(i->count, i->iov->iov_len - i->iov_offset); 1211 if (iov_iter_is_bvec(i)) 1212 return min(i->count, i->bvec->bv_len - i->iov_offset); 1213 } 1214 return i->count; 1215 } 1216 EXPORT_SYMBOL(iov_iter_single_seg_count); 1217 1218 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 1219 const struct kvec *kvec, unsigned long nr_segs, 1220 size_t count) 1221 { 1222 WARN_ON(direction & ~(READ | WRITE)); 1223 i->type = ITER_KVEC | (direction & (READ | WRITE)); 1224 i->kvec = kvec; 1225 i->nr_segs = nr_segs; 1226 i->iov_offset = 0; 1227 i->count = count; 1228 } 1229 EXPORT_SYMBOL(iov_iter_kvec); 1230 1231 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1232 const struct bio_vec *bvec, unsigned long nr_segs, 1233 size_t count) 1234 { 1235 WARN_ON(direction & ~(READ | WRITE)); 1236 i->type = ITER_BVEC | (direction & (READ | WRITE)); 1237 i->bvec = bvec; 1238 i->nr_segs = nr_segs; 1239 i->iov_offset = 0; 1240 i->count = count; 1241 } 1242 EXPORT_SYMBOL(iov_iter_bvec); 1243 1244 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1245 struct pipe_inode_info *pipe, 1246 size_t count) 1247 { 1248 BUG_ON(direction != READ); 1249 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1250 i->type = ITER_PIPE | READ; 1251 i->pipe = pipe; 1252 i->head = pipe->head; 1253 i->iov_offset = 0; 1254 i->count = count; 1255 i->start_head = i->head; 1256 } 1257 EXPORT_SYMBOL(iov_iter_pipe); 1258 1259 /** 1260 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1261 * @i: The iterator to initialise. 1262 * @direction: The direction of the transfer. 1263 * @xarray: The xarray to access. 1264 * @start: The start file position. 1265 * @count: The size of the I/O buffer in bytes. 1266 * 1267 * Set up an I/O iterator to either draw data out of the pages attached to an 1268 * inode or to inject data into those pages. The pages *must* be prevented 1269 * from evaporation, either by taking a ref on them or locking them by the 1270 * caller. 1271 */ 1272 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1273 struct xarray *xarray, loff_t start, size_t count) 1274 { 1275 BUG_ON(direction & ~1); 1276 i->type = ITER_XARRAY | (direction & (READ | WRITE)); 1277 i->xarray = xarray; 1278 i->xarray_start = start; 1279 i->count = count; 1280 i->iov_offset = 0; 1281 } 1282 EXPORT_SYMBOL(iov_iter_xarray); 1283 1284 /** 1285 * iov_iter_discard - Initialise an I/O iterator that discards data 1286 * @i: The iterator to initialise. 1287 * @direction: The direction of the transfer. 1288 * @count: The size of the I/O buffer in bytes. 1289 * 1290 * Set up an I/O iterator that just discards everything that's written to it. 1291 * It's only available as a READ iterator. 1292 */ 1293 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1294 { 1295 BUG_ON(direction != READ); 1296 i->type = ITER_DISCARD | READ; 1297 i->count = count; 1298 i->iov_offset = 0; 1299 } 1300 EXPORT_SYMBOL(iov_iter_discard); 1301 1302 unsigned long iov_iter_alignment(const struct iov_iter *i) 1303 { 1304 unsigned long res = 0; 1305 size_t size = i->count; 1306 1307 if (unlikely(iov_iter_is_pipe(i))) { 1308 unsigned int p_mask = i->pipe->ring_size - 1; 1309 1310 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) 1311 return size | i->iov_offset; 1312 return size; 1313 } 1314 if (unlikely(iov_iter_is_xarray(i))) 1315 return (i->xarray_start + i->iov_offset) | i->count; 1316 iterate_all_kinds(i, size, v, 1317 (res |= (unsigned long)v.iov_base | v.iov_len, 0), 1318 res |= v.bv_offset | v.bv_len, 1319 res |= (unsigned long)v.iov_base | v.iov_len, 1320 res |= v.bv_offset | v.bv_len 1321 ) 1322 return res; 1323 } 1324 EXPORT_SYMBOL(iov_iter_alignment); 1325 1326 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1327 { 1328 unsigned long res = 0; 1329 size_t size = i->count; 1330 1331 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1332 WARN_ON(1); 1333 return ~0U; 1334 } 1335 1336 iterate_all_kinds(i, size, v, 1337 (res |= (!res ? 0 : (unsigned long)v.iov_base) | 1338 (size != v.iov_len ? size : 0), 0), 1339 (res |= (!res ? 0 : (unsigned long)v.bv_offset) | 1340 (size != v.bv_len ? size : 0)), 1341 (res |= (!res ? 0 : (unsigned long)v.iov_base) | 1342 (size != v.iov_len ? size : 0)), 1343 (res |= (!res ? 0 : (unsigned long)v.bv_offset) | 1344 (size != v.bv_len ? size : 0)) 1345 ); 1346 return res; 1347 } 1348 EXPORT_SYMBOL(iov_iter_gap_alignment); 1349 1350 static inline ssize_t __pipe_get_pages(struct iov_iter *i, 1351 size_t maxsize, 1352 struct page **pages, 1353 int iter_head, 1354 size_t *start) 1355 { 1356 struct pipe_inode_info *pipe = i->pipe; 1357 unsigned int p_mask = pipe->ring_size - 1; 1358 ssize_t n = push_pipe(i, maxsize, &iter_head, start); 1359 if (!n) 1360 return -EFAULT; 1361 1362 maxsize = n; 1363 n += *start; 1364 while (n > 0) { 1365 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); 1366 iter_head++; 1367 n -= PAGE_SIZE; 1368 } 1369 1370 return maxsize; 1371 } 1372 1373 static ssize_t pipe_get_pages(struct iov_iter *i, 1374 struct page **pages, size_t maxsize, unsigned maxpages, 1375 size_t *start) 1376 { 1377 unsigned int iter_head, npages; 1378 size_t capacity; 1379 1380 if (!maxsize) 1381 return 0; 1382 1383 if (!sanity(i)) 1384 return -EFAULT; 1385 1386 data_start(i, &iter_head, start); 1387 /* Amount of free space: some of this one + all after this one */ 1388 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1389 capacity = min(npages, maxpages) * PAGE_SIZE - *start; 1390 1391 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); 1392 } 1393 1394 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1395 pgoff_t index, unsigned int nr_pages) 1396 { 1397 XA_STATE(xas, xa, index); 1398 struct page *page; 1399 unsigned int ret = 0; 1400 1401 rcu_read_lock(); 1402 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1403 if (xas_retry(&xas, page)) 1404 continue; 1405 1406 /* Has the page moved or been split? */ 1407 if (unlikely(page != xas_reload(&xas))) { 1408 xas_reset(&xas); 1409 continue; 1410 } 1411 1412 pages[ret] = find_subpage(page, xas.xa_index); 1413 get_page(pages[ret]); 1414 if (++ret == nr_pages) 1415 break; 1416 } 1417 rcu_read_unlock(); 1418 return ret; 1419 } 1420 1421 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1422 struct page **pages, size_t maxsize, 1423 unsigned maxpages, size_t *_start_offset) 1424 { 1425 unsigned nr, offset; 1426 pgoff_t index, count; 1427 size_t size = maxsize, actual; 1428 loff_t pos; 1429 1430 if (!size || !maxpages) 1431 return 0; 1432 1433 pos = i->xarray_start + i->iov_offset; 1434 index = pos >> PAGE_SHIFT; 1435 offset = pos & ~PAGE_MASK; 1436 *_start_offset = offset; 1437 1438 count = 1; 1439 if (size > PAGE_SIZE - offset) { 1440 size -= PAGE_SIZE - offset; 1441 count += size >> PAGE_SHIFT; 1442 size &= ~PAGE_MASK; 1443 if (size) 1444 count++; 1445 } 1446 1447 if (count > maxpages) 1448 count = maxpages; 1449 1450 nr = iter_xarray_populate_pages(pages, i->xarray, index, count); 1451 if (nr == 0) 1452 return 0; 1453 1454 actual = PAGE_SIZE * nr; 1455 actual -= offset; 1456 if (nr == count && size > 0) { 1457 unsigned last_offset = (nr > 1) ? 0 : offset; 1458 actual -= PAGE_SIZE - (last_offset + size); 1459 } 1460 return actual; 1461 } 1462 1463 ssize_t iov_iter_get_pages(struct iov_iter *i, 1464 struct page **pages, size_t maxsize, unsigned maxpages, 1465 size_t *start) 1466 { 1467 if (maxsize > i->count) 1468 maxsize = i->count; 1469 1470 if (unlikely(iov_iter_is_pipe(i))) 1471 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1472 if (unlikely(iov_iter_is_xarray(i))) 1473 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1474 if (unlikely(iov_iter_is_discard(i))) 1475 return -EFAULT; 1476 1477 iterate_all_kinds(i, maxsize, v, ({ 1478 unsigned long addr = (unsigned long)v.iov_base; 1479 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 1480 int n; 1481 int res; 1482 1483 if (len > maxpages * PAGE_SIZE) 1484 len = maxpages * PAGE_SIZE; 1485 addr &= ~(PAGE_SIZE - 1); 1486 n = DIV_ROUND_UP(len, PAGE_SIZE); 1487 res = get_user_pages_fast(addr, n, 1488 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, 1489 pages); 1490 if (unlikely(res < 0)) 1491 return res; 1492 return (res == n ? len : res * PAGE_SIZE) - *start; 1493 0;}),({ 1494 /* can't be more than PAGE_SIZE */ 1495 *start = v.bv_offset; 1496 get_page(*pages = v.bv_page); 1497 return v.bv_len; 1498 }),({ 1499 return -EFAULT; 1500 }), 1501 0 1502 ) 1503 return 0; 1504 } 1505 EXPORT_SYMBOL(iov_iter_get_pages); 1506 1507 static struct page **get_pages_array(size_t n) 1508 { 1509 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); 1510 } 1511 1512 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 1513 struct page ***pages, size_t maxsize, 1514 size_t *start) 1515 { 1516 struct page **p; 1517 unsigned int iter_head, npages; 1518 ssize_t n; 1519 1520 if (!maxsize) 1521 return 0; 1522 1523 if (!sanity(i)) 1524 return -EFAULT; 1525 1526 data_start(i, &iter_head, start); 1527 /* Amount of free space: some of this one + all after this one */ 1528 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1529 n = npages * PAGE_SIZE - *start; 1530 if (maxsize > n) 1531 maxsize = n; 1532 else 1533 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1534 p = get_pages_array(npages); 1535 if (!p) 1536 return -ENOMEM; 1537 n = __pipe_get_pages(i, maxsize, p, iter_head, start); 1538 if (n > 0) 1539 *pages = p; 1540 else 1541 kvfree(p); 1542 return n; 1543 } 1544 1545 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, 1546 struct page ***pages, size_t maxsize, 1547 size_t *_start_offset) 1548 { 1549 struct page **p; 1550 unsigned nr, offset; 1551 pgoff_t index, count; 1552 size_t size = maxsize, actual; 1553 loff_t pos; 1554 1555 if (!size) 1556 return 0; 1557 1558 pos = i->xarray_start + i->iov_offset; 1559 index = pos >> PAGE_SHIFT; 1560 offset = pos & ~PAGE_MASK; 1561 *_start_offset = offset; 1562 1563 count = 1; 1564 if (size > PAGE_SIZE - offset) { 1565 size -= PAGE_SIZE - offset; 1566 count += size >> PAGE_SHIFT; 1567 size &= ~PAGE_MASK; 1568 if (size) 1569 count++; 1570 } 1571 1572 p = get_pages_array(count); 1573 if (!p) 1574 return -ENOMEM; 1575 *pages = p; 1576 1577 nr = iter_xarray_populate_pages(p, i->xarray, index, count); 1578 if (nr == 0) 1579 return 0; 1580 1581 actual = PAGE_SIZE * nr; 1582 actual -= offset; 1583 if (nr == count && size > 0) { 1584 unsigned last_offset = (nr > 1) ? 0 : offset; 1585 actual -= PAGE_SIZE - (last_offset + size); 1586 } 1587 return actual; 1588 } 1589 1590 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1591 struct page ***pages, size_t maxsize, 1592 size_t *start) 1593 { 1594 struct page **p; 1595 1596 if (maxsize > i->count) 1597 maxsize = i->count; 1598 1599 if (unlikely(iov_iter_is_pipe(i))) 1600 return pipe_get_pages_alloc(i, pages, maxsize, start); 1601 if (unlikely(iov_iter_is_xarray(i))) 1602 return iter_xarray_get_pages_alloc(i, pages, maxsize, start); 1603 if (unlikely(iov_iter_is_discard(i))) 1604 return -EFAULT; 1605 1606 iterate_all_kinds(i, maxsize, v, ({ 1607 unsigned long addr = (unsigned long)v.iov_base; 1608 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 1609 int n; 1610 int res; 1611 1612 addr &= ~(PAGE_SIZE - 1); 1613 n = DIV_ROUND_UP(len, PAGE_SIZE); 1614 p = get_pages_array(n); 1615 if (!p) 1616 return -ENOMEM; 1617 res = get_user_pages_fast(addr, n, 1618 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p); 1619 if (unlikely(res < 0)) { 1620 kvfree(p); 1621 return res; 1622 } 1623 *pages = p; 1624 return (res == n ? len : res * PAGE_SIZE) - *start; 1625 0;}),({ 1626 /* can't be more than PAGE_SIZE */ 1627 *start = v.bv_offset; 1628 *pages = p = get_pages_array(1); 1629 if (!p) 1630 return -ENOMEM; 1631 get_page(*p = v.bv_page); 1632 return v.bv_len; 1633 }),({ 1634 return -EFAULT; 1635 }), 0 1636 ) 1637 return 0; 1638 } 1639 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1640 1641 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1642 struct iov_iter *i) 1643 { 1644 char *to = addr; 1645 __wsum sum, next; 1646 size_t off = 0; 1647 sum = *csum; 1648 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1649 WARN_ON(1); 1650 return 0; 1651 } 1652 iterate_and_advance(i, bytes, v, ({ 1653 next = csum_and_copy_from_user(v.iov_base, 1654 (to += v.iov_len) - v.iov_len, 1655 v.iov_len); 1656 if (next) { 1657 sum = csum_block_add(sum, next, off); 1658 off += v.iov_len; 1659 } 1660 next ? 0 : v.iov_len; 1661 }), ({ 1662 char *p = kmap_atomic(v.bv_page); 1663 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, 1664 p + v.bv_offset, v.bv_len, 1665 sum, off); 1666 kunmap_atomic(p); 1667 off += v.bv_len; 1668 }),({ 1669 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len, 1670 v.iov_base, v.iov_len, 1671 sum, off); 1672 off += v.iov_len; 1673 }), ({ 1674 char *p = kmap_atomic(v.bv_page); 1675 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, 1676 p + v.bv_offset, v.bv_len, 1677 sum, off); 1678 kunmap_atomic(p); 1679 off += v.bv_len; 1680 }) 1681 ) 1682 *csum = sum; 1683 return bytes; 1684 } 1685 EXPORT_SYMBOL(csum_and_copy_from_iter); 1686 1687 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1688 struct iov_iter *i) 1689 { 1690 struct csum_state *csstate = _csstate; 1691 const char *from = addr; 1692 __wsum sum, next; 1693 size_t off; 1694 1695 if (unlikely(iov_iter_is_pipe(i))) 1696 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i); 1697 1698 sum = csstate->csum; 1699 off = csstate->off; 1700 if (unlikely(iov_iter_is_discard(i))) { 1701 WARN_ON(1); /* for now */ 1702 return 0; 1703 } 1704 iterate_and_advance(i, bytes, v, ({ 1705 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, 1706 v.iov_base, 1707 v.iov_len); 1708 if (next) { 1709 sum = csum_block_add(sum, next, off); 1710 off += v.iov_len; 1711 } 1712 next ? 0 : v.iov_len; 1713 }), ({ 1714 char *p = kmap_atomic(v.bv_page); 1715 sum = csum_and_memcpy(p + v.bv_offset, 1716 (from += v.bv_len) - v.bv_len, 1717 v.bv_len, sum, off); 1718 kunmap_atomic(p); 1719 off += v.bv_len; 1720 }),({ 1721 sum = csum_and_memcpy(v.iov_base, 1722 (from += v.iov_len) - v.iov_len, 1723 v.iov_len, sum, off); 1724 off += v.iov_len; 1725 }), ({ 1726 char *p = kmap_atomic(v.bv_page); 1727 sum = csum_and_memcpy(p + v.bv_offset, 1728 (from += v.bv_len) - v.bv_len, 1729 v.bv_len, sum, off); 1730 kunmap_atomic(p); 1731 off += v.bv_len; 1732 }) 1733 ) 1734 csstate->csum = sum; 1735 csstate->off = off; 1736 return bytes; 1737 } 1738 EXPORT_SYMBOL(csum_and_copy_to_iter); 1739 1740 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1741 struct iov_iter *i) 1742 { 1743 #ifdef CONFIG_CRYPTO_HASH 1744 struct ahash_request *hash = hashp; 1745 struct scatterlist sg; 1746 size_t copied; 1747 1748 copied = copy_to_iter(addr, bytes, i); 1749 sg_init_one(&sg, addr, copied); 1750 ahash_request_set_crypt(hash, &sg, NULL, copied); 1751 crypto_ahash_update(hash); 1752 return copied; 1753 #else 1754 return 0; 1755 #endif 1756 } 1757 EXPORT_SYMBOL(hash_and_copy_to_iter); 1758 1759 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1760 { 1761 size_t size = i->count; 1762 int npages = 0; 1763 1764 if (!size) 1765 return 0; 1766 if (unlikely(iov_iter_is_discard(i))) 1767 return 0; 1768 1769 if (unlikely(iov_iter_is_pipe(i))) { 1770 struct pipe_inode_info *pipe = i->pipe; 1771 unsigned int iter_head; 1772 size_t off; 1773 1774 if (!sanity(i)) 1775 return 0; 1776 1777 data_start(i, &iter_head, &off); 1778 /* some of this one + all after this one */ 1779 npages = pipe_space_for_user(iter_head, pipe->tail, pipe); 1780 if (npages >= maxpages) 1781 return maxpages; 1782 } else if (unlikely(iov_iter_is_xarray(i))) { 1783 unsigned offset; 1784 1785 offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK; 1786 1787 npages = 1; 1788 if (size > PAGE_SIZE - offset) { 1789 size -= PAGE_SIZE - offset; 1790 npages += size >> PAGE_SHIFT; 1791 size &= ~PAGE_MASK; 1792 if (size) 1793 npages++; 1794 } 1795 if (npages >= maxpages) 1796 return maxpages; 1797 } else iterate_all_kinds(i, size, v, ({ 1798 unsigned long p = (unsigned long)v.iov_base; 1799 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) 1800 - p / PAGE_SIZE; 1801 if (npages >= maxpages) 1802 return maxpages; 1803 0;}),({ 1804 npages++; 1805 if (npages >= maxpages) 1806 return maxpages; 1807 }),({ 1808 unsigned long p = (unsigned long)v.iov_base; 1809 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) 1810 - p / PAGE_SIZE; 1811 if (npages >= maxpages) 1812 return maxpages; 1813 }), 1814 0 1815 ) 1816 return npages; 1817 } 1818 EXPORT_SYMBOL(iov_iter_npages); 1819 1820 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1821 { 1822 *new = *old; 1823 if (unlikely(iov_iter_is_pipe(new))) { 1824 WARN_ON(1); 1825 return NULL; 1826 } 1827 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) 1828 return NULL; 1829 if (iov_iter_is_bvec(new)) 1830 return new->bvec = kmemdup(new->bvec, 1831 new->nr_segs * sizeof(struct bio_vec), 1832 flags); 1833 else 1834 /* iovec and kvec have identical layout */ 1835 return new->iov = kmemdup(new->iov, 1836 new->nr_segs * sizeof(struct iovec), 1837 flags); 1838 } 1839 EXPORT_SYMBOL(dup_iter); 1840 1841 static int copy_compat_iovec_from_user(struct iovec *iov, 1842 const struct iovec __user *uvec, unsigned long nr_segs) 1843 { 1844 const struct compat_iovec __user *uiov = 1845 (const struct compat_iovec __user *)uvec; 1846 int ret = -EFAULT, i; 1847 1848 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1849 return -EFAULT; 1850 1851 for (i = 0; i < nr_segs; i++) { 1852 compat_uptr_t buf; 1853 compat_ssize_t len; 1854 1855 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1856 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1857 1858 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1859 if (len < 0) { 1860 ret = -EINVAL; 1861 goto uaccess_end; 1862 } 1863 iov[i].iov_base = compat_ptr(buf); 1864 iov[i].iov_len = len; 1865 } 1866 1867 ret = 0; 1868 uaccess_end: 1869 user_access_end(); 1870 return ret; 1871 } 1872 1873 static int copy_iovec_from_user(struct iovec *iov, 1874 const struct iovec __user *uvec, unsigned long nr_segs) 1875 { 1876 unsigned long seg; 1877 1878 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1879 return -EFAULT; 1880 for (seg = 0; seg < nr_segs; seg++) { 1881 if ((ssize_t)iov[seg].iov_len < 0) 1882 return -EINVAL; 1883 } 1884 1885 return 0; 1886 } 1887 1888 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1889 unsigned long nr_segs, unsigned long fast_segs, 1890 struct iovec *fast_iov, bool compat) 1891 { 1892 struct iovec *iov = fast_iov; 1893 int ret; 1894 1895 /* 1896 * SuS says "The readv() function *may* fail if the iovcnt argument was 1897 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1898 * traditionally returned zero for zero segments, so... 1899 */ 1900 if (nr_segs == 0) 1901 return iov; 1902 if (nr_segs > UIO_MAXIOV) 1903 return ERR_PTR(-EINVAL); 1904 if (nr_segs > fast_segs) { 1905 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1906 if (!iov) 1907 return ERR_PTR(-ENOMEM); 1908 } 1909 1910 if (compat) 1911 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1912 else 1913 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1914 if (ret) { 1915 if (iov != fast_iov) 1916 kfree(iov); 1917 return ERR_PTR(ret); 1918 } 1919 1920 return iov; 1921 } 1922 1923 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1924 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1925 struct iov_iter *i, bool compat) 1926 { 1927 ssize_t total_len = 0; 1928 unsigned long seg; 1929 struct iovec *iov; 1930 1931 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1932 if (IS_ERR(iov)) { 1933 *iovp = NULL; 1934 return PTR_ERR(iov); 1935 } 1936 1937 /* 1938 * According to the Single Unix Specification we should return EINVAL if 1939 * an element length is < 0 when cast to ssize_t or if the total length 1940 * would overflow the ssize_t return value of the system call. 1941 * 1942 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1943 * overflow case. 1944 */ 1945 for (seg = 0; seg < nr_segs; seg++) { 1946 ssize_t len = (ssize_t)iov[seg].iov_len; 1947 1948 if (!access_ok(iov[seg].iov_base, len)) { 1949 if (iov != *iovp) 1950 kfree(iov); 1951 *iovp = NULL; 1952 return -EFAULT; 1953 } 1954 1955 if (len > MAX_RW_COUNT - total_len) { 1956 len = MAX_RW_COUNT - total_len; 1957 iov[seg].iov_len = len; 1958 } 1959 total_len += len; 1960 } 1961 1962 iov_iter_init(i, type, iov, nr_segs, total_len); 1963 if (iov == *iovp) 1964 *iovp = NULL; 1965 else 1966 *iovp = iov; 1967 return total_len; 1968 } 1969 1970 /** 1971 * import_iovec() - Copy an array of &struct iovec from userspace 1972 * into the kernel, check that it is valid, and initialize a new 1973 * &struct iov_iter iterator to access it. 1974 * 1975 * @type: One of %READ or %WRITE. 1976 * @uvec: Pointer to the userspace array. 1977 * @nr_segs: Number of elements in userspace array. 1978 * @fast_segs: Number of elements in @iov. 1979 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1980 * on-stack) kernel array. 1981 * @i: Pointer to iterator that will be initialized on success. 1982 * 1983 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1984 * then this function places %NULL in *@iov on return. Otherwise, a new 1985 * array will be allocated and the result placed in *@iov. This means that 1986 * the caller may call kfree() on *@iov regardless of whether the small 1987 * on-stack array was used or not (and regardless of whether this function 1988 * returns an error or not). 1989 * 1990 * Return: Negative error code on error, bytes imported on success 1991 */ 1992 ssize_t import_iovec(int type, const struct iovec __user *uvec, 1993 unsigned nr_segs, unsigned fast_segs, 1994 struct iovec **iovp, struct iov_iter *i) 1995 { 1996 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1997 in_compat_syscall()); 1998 } 1999 EXPORT_SYMBOL(import_iovec); 2000 2001 int import_single_range(int rw, void __user *buf, size_t len, 2002 struct iovec *iov, struct iov_iter *i) 2003 { 2004 if (len > MAX_RW_COUNT) 2005 len = MAX_RW_COUNT; 2006 if (unlikely(!access_ok(buf, len))) 2007 return -EFAULT; 2008 2009 iov->iov_base = buf; 2010 iov->iov_len = len; 2011 iov_iter_init(i, rw, iov, 1, len); 2012 return 0; 2013 } 2014 EXPORT_SYMBOL(import_single_range); 2015