1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <crypto/hash.h> 3 #include <linux/export.h> 4 #include <linux/bvec.h> 5 #include <linux/fault-inject-usercopy.h> 6 #include <linux/uio.h> 7 #include <linux/pagemap.h> 8 #include <linux/highmem.h> 9 #include <linux/slab.h> 10 #include <linux/vmalloc.h> 11 #include <linux/splice.h> 12 #include <linux/compat.h> 13 #include <net/checksum.h> 14 #include <linux/scatterlist.h> 15 #include <linux/instrumented.h> 16 17 #define PIPE_PARANOIA /* for now */ 18 19 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \ 20 size_t left; \ 21 size_t wanted = n; \ 22 __p = i->iov; \ 23 __v.iov_len = min(n, __p->iov_len - skip); \ 24 if (likely(__v.iov_len)) { \ 25 __v.iov_base = __p->iov_base + skip; \ 26 left = (STEP); \ 27 __v.iov_len -= left; \ 28 skip += __v.iov_len; \ 29 n -= __v.iov_len; \ 30 } else { \ 31 left = 0; \ 32 } \ 33 while (unlikely(!left && n)) { \ 34 __p++; \ 35 __v.iov_len = min(n, __p->iov_len); \ 36 if (unlikely(!__v.iov_len)) \ 37 continue; \ 38 __v.iov_base = __p->iov_base; \ 39 left = (STEP); \ 40 __v.iov_len -= left; \ 41 skip = __v.iov_len; \ 42 n -= __v.iov_len; \ 43 } \ 44 n = wanted - n; \ 45 } 46 47 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \ 48 size_t wanted = n; \ 49 __p = i->kvec; \ 50 __v.iov_len = min(n, __p->iov_len - skip); \ 51 if (likely(__v.iov_len)) { \ 52 __v.iov_base = __p->iov_base + skip; \ 53 (void)(STEP); \ 54 skip += __v.iov_len; \ 55 n -= __v.iov_len; \ 56 } \ 57 while (unlikely(n)) { \ 58 __p++; \ 59 __v.iov_len = min(n, __p->iov_len); \ 60 if (unlikely(!__v.iov_len)) \ 61 continue; \ 62 __v.iov_base = __p->iov_base; \ 63 (void)(STEP); \ 64 skip = __v.iov_len; \ 65 n -= __v.iov_len; \ 66 } \ 67 n = wanted; \ 68 } 69 70 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ 71 struct bvec_iter __start; \ 72 __start.bi_size = n; \ 73 __start.bi_bvec_done = skip; \ 74 __start.bi_idx = 0; \ 75 for_each_bvec(__v, i->bvec, __bi, __start) { \ 76 (void)(STEP); \ 77 } \ 78 } 79 80 #define iterate_xarray(i, n, __v, skip, STEP) { \ 81 struct page *head = NULL; \ 82 size_t wanted = n, seg, offset; \ 83 loff_t start = i->xarray_start + skip; \ 84 pgoff_t index = start >> PAGE_SHIFT; \ 85 int j; \ 86 \ 87 XA_STATE(xas, i->xarray, index); \ 88 \ 89 rcu_read_lock(); \ 90 xas_for_each(&xas, head, ULONG_MAX) { \ 91 if (xas_retry(&xas, head)) \ 92 continue; \ 93 if (WARN_ON(xa_is_value(head))) \ 94 break; \ 95 if (WARN_ON(PageHuge(head))) \ 96 break; \ 97 for (j = (head->index < index) ? index - head->index : 0; \ 98 j < thp_nr_pages(head); j++) { \ 99 __v.bv_page = head + j; \ 100 offset = (i->xarray_start + skip) & ~PAGE_MASK; \ 101 seg = PAGE_SIZE - offset; \ 102 __v.bv_offset = offset; \ 103 __v.bv_len = min(n, seg); \ 104 (void)(STEP); \ 105 n -= __v.bv_len; \ 106 skip += __v.bv_len; \ 107 if (n == 0) \ 108 break; \ 109 } \ 110 if (n == 0) \ 111 break; \ 112 } \ 113 rcu_read_unlock(); \ 114 n = wanted - n; \ 115 } 116 117 #define iterate_all_kinds(i, n, v, I, B, K, X) { \ 118 if (likely(n)) { \ 119 size_t skip = i->iov_offset; \ 120 if (unlikely(i->type & ITER_BVEC)) { \ 121 struct bio_vec v; \ 122 struct bvec_iter __bi; \ 123 iterate_bvec(i, n, v, __bi, skip, (B)) \ 124 } else if (unlikely(i->type & ITER_KVEC)) { \ 125 const struct kvec *kvec; \ 126 struct kvec v; \ 127 iterate_kvec(i, n, v, kvec, skip, (K)) \ 128 } else if (unlikely(i->type & ITER_DISCARD)) { \ 129 } else if (unlikely(i->type & ITER_XARRAY)) { \ 130 struct bio_vec v; \ 131 iterate_xarray(i, n, v, skip, (X)); \ 132 } else { \ 133 const struct iovec *iov; \ 134 struct iovec v; \ 135 iterate_iovec(i, n, v, iov, skip, (I)) \ 136 } \ 137 } \ 138 } 139 140 #define iterate_and_advance(i, n, v, I, B, K, X) { \ 141 if (unlikely(i->count < n)) \ 142 n = i->count; \ 143 if (i->count) { \ 144 size_t skip = i->iov_offset; \ 145 if (unlikely(i->type & ITER_BVEC)) { \ 146 const struct bio_vec *bvec = i->bvec; \ 147 struct bio_vec v; \ 148 struct bvec_iter __bi; \ 149 iterate_bvec(i, n, v, __bi, skip, (B)) \ 150 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ 151 i->nr_segs -= i->bvec - bvec; \ 152 skip = __bi.bi_bvec_done; \ 153 } else if (unlikely(i->type & ITER_KVEC)) { \ 154 const struct kvec *kvec; \ 155 struct kvec v; \ 156 iterate_kvec(i, n, v, kvec, skip, (K)) \ 157 if (skip == kvec->iov_len) { \ 158 kvec++; \ 159 skip = 0; \ 160 } \ 161 i->nr_segs -= kvec - i->kvec; \ 162 i->kvec = kvec; \ 163 } else if (unlikely(i->type & ITER_DISCARD)) { \ 164 skip += n; \ 165 } else if (unlikely(i->type & ITER_XARRAY)) { \ 166 struct bio_vec v; \ 167 iterate_xarray(i, n, v, skip, (X)) \ 168 } else { \ 169 const struct iovec *iov; \ 170 struct iovec v; \ 171 iterate_iovec(i, n, v, iov, skip, (I)) \ 172 if (skip == iov->iov_len) { \ 173 iov++; \ 174 skip = 0; \ 175 } \ 176 i->nr_segs -= iov - i->iov; \ 177 i->iov = iov; \ 178 } \ 179 i->count -= n; \ 180 i->iov_offset = skip; \ 181 } \ 182 } 183 184 static int copyout(void __user *to, const void *from, size_t n) 185 { 186 if (should_fail_usercopy()) 187 return n; 188 if (access_ok(to, n)) { 189 instrument_copy_to_user(to, from, n); 190 n = raw_copy_to_user(to, from, n); 191 } 192 return n; 193 } 194 195 static int copyin(void *to, const void __user *from, size_t n) 196 { 197 if (should_fail_usercopy()) 198 return n; 199 if (access_ok(from, n)) { 200 instrument_copy_from_user(to, from, n); 201 n = raw_copy_from_user(to, from, n); 202 } 203 return n; 204 } 205 206 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, 207 struct iov_iter *i) 208 { 209 size_t skip, copy, left, wanted; 210 const struct iovec *iov; 211 char __user *buf; 212 void *kaddr, *from; 213 214 if (unlikely(bytes > i->count)) 215 bytes = i->count; 216 217 if (unlikely(!bytes)) 218 return 0; 219 220 might_fault(); 221 wanted = bytes; 222 iov = i->iov; 223 skip = i->iov_offset; 224 buf = iov->iov_base + skip; 225 copy = min(bytes, iov->iov_len - skip); 226 227 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { 228 kaddr = kmap_atomic(page); 229 from = kaddr + offset; 230 231 /* first chunk, usually the only one */ 232 left = copyout(buf, from, copy); 233 copy -= left; 234 skip += copy; 235 from += copy; 236 bytes -= copy; 237 238 while (unlikely(!left && bytes)) { 239 iov++; 240 buf = iov->iov_base; 241 copy = min(bytes, iov->iov_len); 242 left = copyout(buf, from, copy); 243 copy -= left; 244 skip = copy; 245 from += copy; 246 bytes -= copy; 247 } 248 if (likely(!bytes)) { 249 kunmap_atomic(kaddr); 250 goto done; 251 } 252 offset = from - kaddr; 253 buf += copy; 254 kunmap_atomic(kaddr); 255 copy = min(bytes, iov->iov_len - skip); 256 } 257 /* Too bad - revert to non-atomic kmap */ 258 259 kaddr = kmap(page); 260 from = kaddr + offset; 261 left = copyout(buf, from, copy); 262 copy -= left; 263 skip += copy; 264 from += copy; 265 bytes -= copy; 266 while (unlikely(!left && bytes)) { 267 iov++; 268 buf = iov->iov_base; 269 copy = min(bytes, iov->iov_len); 270 left = copyout(buf, from, copy); 271 copy -= left; 272 skip = copy; 273 from += copy; 274 bytes -= copy; 275 } 276 kunmap(page); 277 278 done: 279 if (skip == iov->iov_len) { 280 iov++; 281 skip = 0; 282 } 283 i->count -= wanted - bytes; 284 i->nr_segs -= iov - i->iov; 285 i->iov = iov; 286 i->iov_offset = skip; 287 return wanted - bytes; 288 } 289 290 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, 291 struct iov_iter *i) 292 { 293 size_t skip, copy, left, wanted; 294 const struct iovec *iov; 295 char __user *buf; 296 void *kaddr, *to; 297 298 if (unlikely(bytes > i->count)) 299 bytes = i->count; 300 301 if (unlikely(!bytes)) 302 return 0; 303 304 might_fault(); 305 wanted = bytes; 306 iov = i->iov; 307 skip = i->iov_offset; 308 buf = iov->iov_base + skip; 309 copy = min(bytes, iov->iov_len - skip); 310 311 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { 312 kaddr = kmap_atomic(page); 313 to = kaddr + offset; 314 315 /* first chunk, usually the only one */ 316 left = copyin(to, buf, copy); 317 copy -= left; 318 skip += copy; 319 to += copy; 320 bytes -= copy; 321 322 while (unlikely(!left && bytes)) { 323 iov++; 324 buf = iov->iov_base; 325 copy = min(bytes, iov->iov_len); 326 left = copyin(to, buf, copy); 327 copy -= left; 328 skip = copy; 329 to += copy; 330 bytes -= copy; 331 } 332 if (likely(!bytes)) { 333 kunmap_atomic(kaddr); 334 goto done; 335 } 336 offset = to - kaddr; 337 buf += copy; 338 kunmap_atomic(kaddr); 339 copy = min(bytes, iov->iov_len - skip); 340 } 341 /* Too bad - revert to non-atomic kmap */ 342 343 kaddr = kmap(page); 344 to = kaddr + offset; 345 left = copyin(to, buf, copy); 346 copy -= left; 347 skip += copy; 348 to += copy; 349 bytes -= copy; 350 while (unlikely(!left && bytes)) { 351 iov++; 352 buf = iov->iov_base; 353 copy = min(bytes, iov->iov_len); 354 left = copyin(to, buf, copy); 355 copy -= left; 356 skip = copy; 357 to += copy; 358 bytes -= copy; 359 } 360 kunmap(page); 361 362 done: 363 if (skip == iov->iov_len) { 364 iov++; 365 skip = 0; 366 } 367 i->count -= wanted - bytes; 368 i->nr_segs -= iov - i->iov; 369 i->iov = iov; 370 i->iov_offset = skip; 371 return wanted - bytes; 372 } 373 374 #ifdef PIPE_PARANOIA 375 static bool sanity(const struct iov_iter *i) 376 { 377 struct pipe_inode_info *pipe = i->pipe; 378 unsigned int p_head = pipe->head; 379 unsigned int p_tail = pipe->tail; 380 unsigned int p_mask = pipe->ring_size - 1; 381 unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); 382 unsigned int i_head = i->head; 383 unsigned int idx; 384 385 if (i->iov_offset) { 386 struct pipe_buffer *p; 387 if (unlikely(p_occupancy == 0)) 388 goto Bad; // pipe must be non-empty 389 if (unlikely(i_head != p_head - 1)) 390 goto Bad; // must be at the last buffer... 391 392 p = &pipe->bufs[i_head & p_mask]; 393 if (unlikely(p->offset + p->len != i->iov_offset)) 394 goto Bad; // ... at the end of segment 395 } else { 396 if (i_head != p_head) 397 goto Bad; // must be right after the last buffer 398 } 399 return true; 400 Bad: 401 printk(KERN_ERR "idx = %d, offset = %zd\n", i_head, i->iov_offset); 402 printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", 403 p_head, p_tail, pipe->ring_size); 404 for (idx = 0; idx < pipe->ring_size; idx++) 405 printk(KERN_ERR "[%p %p %d %d]\n", 406 pipe->bufs[idx].ops, 407 pipe->bufs[idx].page, 408 pipe->bufs[idx].offset, 409 pipe->bufs[idx].len); 410 WARN_ON(1); 411 return false; 412 } 413 #else 414 #define sanity(i) true 415 #endif 416 417 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 418 struct iov_iter *i) 419 { 420 struct pipe_inode_info *pipe = i->pipe; 421 struct pipe_buffer *buf; 422 unsigned int p_tail = pipe->tail; 423 unsigned int p_mask = pipe->ring_size - 1; 424 unsigned int i_head = i->head; 425 size_t off; 426 427 if (unlikely(bytes > i->count)) 428 bytes = i->count; 429 430 if (unlikely(!bytes)) 431 return 0; 432 433 if (!sanity(i)) 434 return 0; 435 436 off = i->iov_offset; 437 buf = &pipe->bufs[i_head & p_mask]; 438 if (off) { 439 if (offset == off && buf->page == page) { 440 /* merge with the last one */ 441 buf->len += bytes; 442 i->iov_offset += bytes; 443 goto out; 444 } 445 i_head++; 446 buf = &pipe->bufs[i_head & p_mask]; 447 } 448 if (pipe_full(i_head, p_tail, pipe->max_usage)) 449 return 0; 450 451 buf->ops = &page_cache_pipe_buf_ops; 452 get_page(page); 453 buf->page = page; 454 buf->offset = offset; 455 buf->len = bytes; 456 457 pipe->head = i_head + 1; 458 i->iov_offset = offset + bytes; 459 i->head = i_head; 460 out: 461 i->count -= bytes; 462 return bytes; 463 } 464 465 /* 466 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 467 * bytes. For each iovec, fault in each page that constitutes the iovec. 468 * 469 * Return 0 on success, or non-zero if the memory could not be accessed (i.e. 470 * because it is an invalid address). 471 */ 472 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) 473 { 474 size_t skip = i->iov_offset; 475 const struct iovec *iov; 476 int err; 477 struct iovec v; 478 479 if (iter_is_iovec(i)) { 480 iterate_iovec(i, bytes, v, iov, skip, ({ 481 err = fault_in_pages_readable(v.iov_base, v.iov_len); 482 if (unlikely(err)) 483 return err; 484 0;})) 485 } 486 return 0; 487 } 488 EXPORT_SYMBOL(iov_iter_fault_in_readable); 489 490 void iov_iter_init(struct iov_iter *i, unsigned int direction, 491 const struct iovec *iov, unsigned long nr_segs, 492 size_t count) 493 { 494 WARN_ON(direction & ~(READ | WRITE)); 495 direction &= READ | WRITE; 496 497 /* It will get better. Eventually... */ 498 if (uaccess_kernel()) { 499 i->type = ITER_KVEC | direction; 500 i->kvec = (struct kvec *)iov; 501 } else { 502 i->type = ITER_IOVEC | direction; 503 i->iov = iov; 504 } 505 i->nr_segs = nr_segs; 506 i->iov_offset = 0; 507 i->count = count; 508 } 509 EXPORT_SYMBOL(iov_iter_init); 510 511 static inline bool allocated(struct pipe_buffer *buf) 512 { 513 return buf->ops == &default_pipe_buf_ops; 514 } 515 516 static inline void data_start(const struct iov_iter *i, 517 unsigned int *iter_headp, size_t *offp) 518 { 519 unsigned int p_mask = i->pipe->ring_size - 1; 520 unsigned int iter_head = i->head; 521 size_t off = i->iov_offset; 522 523 if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) || 524 off == PAGE_SIZE)) { 525 iter_head++; 526 off = 0; 527 } 528 *iter_headp = iter_head; 529 *offp = off; 530 } 531 532 static size_t push_pipe(struct iov_iter *i, size_t size, 533 int *iter_headp, size_t *offp) 534 { 535 struct pipe_inode_info *pipe = i->pipe; 536 unsigned int p_tail = pipe->tail; 537 unsigned int p_mask = pipe->ring_size - 1; 538 unsigned int iter_head; 539 size_t off; 540 ssize_t left; 541 542 if (unlikely(size > i->count)) 543 size = i->count; 544 if (unlikely(!size)) 545 return 0; 546 547 left = size; 548 data_start(i, &iter_head, &off); 549 *iter_headp = iter_head; 550 *offp = off; 551 if (off) { 552 left -= PAGE_SIZE - off; 553 if (left <= 0) { 554 pipe->bufs[iter_head & p_mask].len += size; 555 return size; 556 } 557 pipe->bufs[iter_head & p_mask].len = PAGE_SIZE; 558 iter_head++; 559 } 560 while (!pipe_full(iter_head, p_tail, pipe->max_usage)) { 561 struct pipe_buffer *buf = &pipe->bufs[iter_head & p_mask]; 562 struct page *page = alloc_page(GFP_USER); 563 if (!page) 564 break; 565 566 buf->ops = &default_pipe_buf_ops; 567 buf->page = page; 568 buf->offset = 0; 569 buf->len = min_t(ssize_t, left, PAGE_SIZE); 570 left -= buf->len; 571 iter_head++; 572 pipe->head = iter_head; 573 574 if (left == 0) 575 return size; 576 } 577 return size - left; 578 } 579 580 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 581 struct iov_iter *i) 582 { 583 struct pipe_inode_info *pipe = i->pipe; 584 unsigned int p_mask = pipe->ring_size - 1; 585 unsigned int i_head; 586 size_t n, off; 587 588 if (!sanity(i)) 589 return 0; 590 591 bytes = n = push_pipe(i, bytes, &i_head, &off); 592 if (unlikely(!n)) 593 return 0; 594 do { 595 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 596 memcpy_to_page(pipe->bufs[i_head & p_mask].page, off, addr, chunk); 597 i->head = i_head; 598 i->iov_offset = off + chunk; 599 n -= chunk; 600 addr += chunk; 601 off = 0; 602 i_head++; 603 } while (n); 604 i->count -= bytes; 605 return bytes; 606 } 607 608 static __wsum csum_and_memcpy(void *to, const void *from, size_t len, 609 __wsum sum, size_t off) 610 { 611 __wsum next = csum_partial_copy_nocheck(from, to, len); 612 return csum_block_add(sum, next, off); 613 } 614 615 static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, 616 struct csum_state *csstate, 617 struct iov_iter *i) 618 { 619 struct pipe_inode_info *pipe = i->pipe; 620 unsigned int p_mask = pipe->ring_size - 1; 621 __wsum sum = csstate->csum; 622 size_t off = csstate->off; 623 unsigned int i_head; 624 size_t n, r; 625 626 if (!sanity(i)) 627 return 0; 628 629 bytes = n = push_pipe(i, bytes, &i_head, &r); 630 if (unlikely(!n)) 631 return 0; 632 do { 633 size_t chunk = min_t(size_t, n, PAGE_SIZE - r); 634 char *p = kmap_atomic(pipe->bufs[i_head & p_mask].page); 635 sum = csum_and_memcpy(p + r, addr, chunk, sum, off); 636 kunmap_atomic(p); 637 i->head = i_head; 638 i->iov_offset = r + chunk; 639 n -= chunk; 640 off += chunk; 641 addr += chunk; 642 r = 0; 643 i_head++; 644 } while (n); 645 i->count -= bytes; 646 csstate->csum = sum; 647 csstate->off = off; 648 return bytes; 649 } 650 651 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 652 { 653 const char *from = addr; 654 if (unlikely(iov_iter_is_pipe(i))) 655 return copy_pipe_to_iter(addr, bytes, i); 656 if (iter_is_iovec(i)) 657 might_fault(); 658 iterate_and_advance(i, bytes, v, 659 copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), 660 memcpy_to_page(v.bv_page, v.bv_offset, 661 (from += v.bv_len) - v.bv_len, v.bv_len), 662 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len), 663 memcpy_to_page(v.bv_page, v.bv_offset, 664 (from += v.bv_len) - v.bv_len, v.bv_len) 665 ) 666 667 return bytes; 668 } 669 EXPORT_SYMBOL(_copy_to_iter); 670 671 #ifdef CONFIG_ARCH_HAS_COPY_MC 672 static int copyout_mc(void __user *to, const void *from, size_t n) 673 { 674 if (access_ok(to, n)) { 675 instrument_copy_to_user(to, from, n); 676 n = copy_mc_to_user((__force void *) to, from, n); 677 } 678 return n; 679 } 680 681 static unsigned long copy_mc_to_page(struct page *page, size_t offset, 682 const char *from, size_t len) 683 { 684 unsigned long ret; 685 char *to; 686 687 to = kmap_atomic(page); 688 ret = copy_mc_to_kernel(to + offset, from, len); 689 kunmap_atomic(to); 690 691 return ret; 692 } 693 694 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, 695 struct iov_iter *i) 696 { 697 struct pipe_inode_info *pipe = i->pipe; 698 unsigned int p_mask = pipe->ring_size - 1; 699 unsigned int i_head; 700 size_t n, off, xfer = 0; 701 702 if (!sanity(i)) 703 return 0; 704 705 bytes = n = push_pipe(i, bytes, &i_head, &off); 706 if (unlikely(!n)) 707 return 0; 708 do { 709 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 710 unsigned long rem; 711 712 rem = copy_mc_to_page(pipe->bufs[i_head & p_mask].page, 713 off, addr, chunk); 714 i->head = i_head; 715 i->iov_offset = off + chunk - rem; 716 xfer += chunk - rem; 717 if (rem) 718 break; 719 n -= chunk; 720 addr += chunk; 721 off = 0; 722 i_head++; 723 } while (n); 724 i->count -= xfer; 725 return xfer; 726 } 727 728 /** 729 * _copy_mc_to_iter - copy to iter with source memory error exception handling 730 * @addr: source kernel address 731 * @bytes: total transfer length 732 * @iter: destination iterator 733 * 734 * The pmem driver deploys this for the dax operation 735 * (dax_copy_to_iter()) for dax reads (bypass page-cache and the 736 * block-layer). Upon #MC read(2) aborts and returns EIO or the bytes 737 * successfully copied. 738 * 739 * The main differences between this and typical _copy_to_iter(). 740 * 741 * * Typical tail/residue handling after a fault retries the copy 742 * byte-by-byte until the fault happens again. Re-triggering machine 743 * checks is potentially fatal so the implementation uses source 744 * alignment and poison alignment assumptions to avoid re-triggering 745 * hardware exceptions. 746 * 747 * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. 748 * Compare to copy_to_iter() where only ITER_IOVEC attempts might return 749 * a short copy. 750 */ 751 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 752 { 753 const char *from = addr; 754 unsigned long rem, curr_addr, s_addr = (unsigned long) addr; 755 756 if (unlikely(iov_iter_is_pipe(i))) 757 return copy_mc_pipe_to_iter(addr, bytes, i); 758 if (iter_is_iovec(i)) 759 might_fault(); 760 iterate_and_advance(i, bytes, v, 761 copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len, 762 v.iov_len), 763 ({ 764 rem = copy_mc_to_page(v.bv_page, v.bv_offset, 765 (from += v.bv_len) - v.bv_len, v.bv_len); 766 if (rem) { 767 curr_addr = (unsigned long) from; 768 bytes = curr_addr - s_addr - rem; 769 return bytes; 770 } 771 }), 772 ({ 773 rem = copy_mc_to_kernel(v.iov_base, (from += v.iov_len) 774 - v.iov_len, v.iov_len); 775 if (rem) { 776 curr_addr = (unsigned long) from; 777 bytes = curr_addr - s_addr - rem; 778 return bytes; 779 } 780 }), 781 ({ 782 rem = copy_mc_to_page(v.bv_page, v.bv_offset, 783 (from += v.bv_len) - v.bv_len, v.bv_len); 784 if (rem) { 785 curr_addr = (unsigned long) from; 786 bytes = curr_addr - s_addr - rem; 787 rcu_read_unlock(); 788 i->iov_offset += bytes; 789 i->count -= bytes; 790 return bytes; 791 } 792 }) 793 ) 794 795 return bytes; 796 } 797 EXPORT_SYMBOL_GPL(_copy_mc_to_iter); 798 #endif /* CONFIG_ARCH_HAS_COPY_MC */ 799 800 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 801 { 802 char *to = addr; 803 if (unlikely(iov_iter_is_pipe(i))) { 804 WARN_ON(1); 805 return 0; 806 } 807 if (iter_is_iovec(i)) 808 might_fault(); 809 iterate_and_advance(i, bytes, v, 810 copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 811 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 812 v.bv_offset, v.bv_len), 813 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 814 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 815 v.bv_offset, v.bv_len) 816 ) 817 818 return bytes; 819 } 820 EXPORT_SYMBOL(_copy_from_iter); 821 822 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 823 { 824 char *to = addr; 825 if (unlikely(iov_iter_is_pipe(i))) { 826 WARN_ON(1); 827 return 0; 828 } 829 iterate_and_advance(i, bytes, v, 830 __copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len, 831 v.iov_base, v.iov_len), 832 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 833 v.bv_offset, v.bv_len), 834 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 835 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 836 v.bv_offset, v.bv_len) 837 ) 838 839 return bytes; 840 } 841 EXPORT_SYMBOL(_copy_from_iter_nocache); 842 843 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 844 /** 845 * _copy_from_iter_flushcache - write destination through cpu cache 846 * @addr: destination kernel address 847 * @bytes: total transfer length 848 * @iter: source iterator 849 * 850 * The pmem driver arranges for filesystem-dax to use this facility via 851 * dax_copy_from_iter() for ensuring that writes to persistent memory 852 * are flushed through the CPU cache. It is differentiated from 853 * _copy_from_iter_nocache() in that guarantees all data is flushed for 854 * all iterator types. The _copy_from_iter_nocache() only attempts to 855 * bypass the cache for the ITER_IOVEC case, and on some archs may use 856 * instructions that strand dirty-data in the cache. 857 */ 858 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i) 859 { 860 char *to = addr; 861 if (unlikely(iov_iter_is_pipe(i))) { 862 WARN_ON(1); 863 return 0; 864 } 865 iterate_and_advance(i, bytes, v, 866 __copy_from_user_flushcache((to += v.iov_len) - v.iov_len, 867 v.iov_base, v.iov_len), 868 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, 869 v.bv_offset, v.bv_len), 870 memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base, 871 v.iov_len), 872 memcpy_page_flushcache((to += v.bv_len) - v.bv_len, v.bv_page, 873 v.bv_offset, v.bv_len) 874 ) 875 876 return bytes; 877 } 878 EXPORT_SYMBOL_GPL(_copy_from_iter_flushcache); 879 #endif 880 881 static inline bool page_copy_sane(struct page *page, size_t offset, size_t n) 882 { 883 struct page *head; 884 size_t v = n + offset; 885 886 /* 887 * The general case needs to access the page order in order 888 * to compute the page size. 889 * However, we mostly deal with order-0 pages and thus can 890 * avoid a possible cache line miss for requests that fit all 891 * page orders. 892 */ 893 if (n <= v && v <= PAGE_SIZE) 894 return true; 895 896 head = compound_head(page); 897 v += (page - head) << PAGE_SHIFT; 898 899 if (likely(n <= v && v <= (page_size(head)))) 900 return true; 901 WARN_ON(1); 902 return false; 903 } 904 905 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 906 struct iov_iter *i) 907 { 908 if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) { 909 void *kaddr = kmap_atomic(page); 910 size_t wanted = copy_to_iter(kaddr + offset, bytes, i); 911 kunmap_atomic(kaddr); 912 return wanted; 913 } else if (unlikely(iov_iter_is_discard(i))) { 914 if (unlikely(i->count < bytes)) 915 bytes = i->count; 916 i->count -= bytes; 917 return bytes; 918 } else if (likely(!iov_iter_is_pipe(i))) 919 return copy_page_to_iter_iovec(page, offset, bytes, i); 920 else 921 return copy_page_to_iter_pipe(page, offset, bytes, i); 922 } 923 924 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 925 struct iov_iter *i) 926 { 927 size_t res = 0; 928 if (unlikely(!page_copy_sane(page, offset, bytes))) 929 return 0; 930 page += offset / PAGE_SIZE; // first subpage 931 offset %= PAGE_SIZE; 932 while (1) { 933 size_t n = __copy_page_to_iter(page, offset, 934 min(bytes, (size_t)PAGE_SIZE - offset), i); 935 res += n; 936 bytes -= n; 937 if (!bytes || !n) 938 break; 939 offset += n; 940 if (offset == PAGE_SIZE) { 941 page++; 942 offset = 0; 943 } 944 } 945 return res; 946 } 947 EXPORT_SYMBOL(copy_page_to_iter); 948 949 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 950 struct iov_iter *i) 951 { 952 if (unlikely(!page_copy_sane(page, offset, bytes))) 953 return 0; 954 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 955 WARN_ON(1); 956 return 0; 957 } 958 if (i->type & (ITER_BVEC | ITER_KVEC | ITER_XARRAY)) { 959 void *kaddr = kmap_atomic(page); 960 size_t wanted = _copy_from_iter(kaddr + offset, bytes, i); 961 kunmap_atomic(kaddr); 962 return wanted; 963 } else 964 return copy_page_from_iter_iovec(page, offset, bytes, i); 965 } 966 EXPORT_SYMBOL(copy_page_from_iter); 967 968 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 969 { 970 struct pipe_inode_info *pipe = i->pipe; 971 unsigned int p_mask = pipe->ring_size - 1; 972 unsigned int i_head; 973 size_t n, off; 974 975 if (!sanity(i)) 976 return 0; 977 978 bytes = n = push_pipe(i, bytes, &i_head, &off); 979 if (unlikely(!n)) 980 return 0; 981 982 do { 983 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 984 memzero_page(pipe->bufs[i_head & p_mask].page, off, chunk); 985 i->head = i_head; 986 i->iov_offset = off + chunk; 987 n -= chunk; 988 off = 0; 989 i_head++; 990 } while (n); 991 i->count -= bytes; 992 return bytes; 993 } 994 995 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 996 { 997 if (unlikely(iov_iter_is_pipe(i))) 998 return pipe_zero(bytes, i); 999 iterate_and_advance(i, bytes, v, 1000 clear_user(v.iov_base, v.iov_len), 1001 memzero_page(v.bv_page, v.bv_offset, v.bv_len), 1002 memset(v.iov_base, 0, v.iov_len), 1003 memzero_page(v.bv_page, v.bv_offset, v.bv_len) 1004 ) 1005 1006 return bytes; 1007 } 1008 EXPORT_SYMBOL(iov_iter_zero); 1009 1010 size_t iov_iter_copy_from_user_atomic(struct page *page, 1011 struct iov_iter *i, unsigned long offset, size_t bytes) 1012 { 1013 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 1014 if (unlikely(!page_copy_sane(page, offset, bytes))) { 1015 kunmap_atomic(kaddr); 1016 return 0; 1017 } 1018 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1019 kunmap_atomic(kaddr); 1020 WARN_ON(1); 1021 return 0; 1022 } 1023 iterate_all_kinds(i, bytes, v, 1024 copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 1025 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, 1026 v.bv_offset, v.bv_len), 1027 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len), 1028 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, 1029 v.bv_offset, v.bv_len) 1030 ) 1031 kunmap_atomic(kaddr); 1032 return bytes; 1033 } 1034 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); 1035 1036 static inline void pipe_truncate(struct iov_iter *i) 1037 { 1038 struct pipe_inode_info *pipe = i->pipe; 1039 unsigned int p_tail = pipe->tail; 1040 unsigned int p_head = pipe->head; 1041 unsigned int p_mask = pipe->ring_size - 1; 1042 1043 if (!pipe_empty(p_head, p_tail)) { 1044 struct pipe_buffer *buf; 1045 unsigned int i_head = i->head; 1046 size_t off = i->iov_offset; 1047 1048 if (off) { 1049 buf = &pipe->bufs[i_head & p_mask]; 1050 buf->len = off - buf->offset; 1051 i_head++; 1052 } 1053 while (p_head != i_head) { 1054 p_head--; 1055 pipe_buf_release(pipe, &pipe->bufs[p_head & p_mask]); 1056 } 1057 1058 pipe->head = p_head; 1059 } 1060 } 1061 1062 static void pipe_advance(struct iov_iter *i, size_t size) 1063 { 1064 struct pipe_inode_info *pipe = i->pipe; 1065 if (size) { 1066 struct pipe_buffer *buf; 1067 unsigned int p_mask = pipe->ring_size - 1; 1068 unsigned int i_head = i->head; 1069 size_t off = i->iov_offset, left = size; 1070 1071 if (off) /* make it relative to the beginning of buffer */ 1072 left += off - pipe->bufs[i_head & p_mask].offset; 1073 while (1) { 1074 buf = &pipe->bufs[i_head & p_mask]; 1075 if (left <= buf->len) 1076 break; 1077 left -= buf->len; 1078 i_head++; 1079 } 1080 i->head = i_head; 1081 i->iov_offset = buf->offset + left; 1082 } 1083 i->count -= size; 1084 /* ... and discard everything past that point */ 1085 pipe_truncate(i); 1086 } 1087 1088 static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) 1089 { 1090 struct bvec_iter bi; 1091 1092 bi.bi_size = i->count; 1093 bi.bi_bvec_done = i->iov_offset; 1094 bi.bi_idx = 0; 1095 bvec_iter_advance(i->bvec, &bi, size); 1096 1097 i->bvec += bi.bi_idx; 1098 i->nr_segs -= bi.bi_idx; 1099 i->count = bi.bi_size; 1100 i->iov_offset = bi.bi_bvec_done; 1101 } 1102 1103 void iov_iter_advance(struct iov_iter *i, size_t size) 1104 { 1105 if (unlikely(i->count < size)) 1106 size = i->count; 1107 if (unlikely(iov_iter_is_pipe(i))) { 1108 pipe_advance(i, size); 1109 return; 1110 } 1111 if (unlikely(iov_iter_is_discard(i))) { 1112 i->count -= size; 1113 return; 1114 } 1115 if (unlikely(iov_iter_is_xarray(i))) { 1116 i->iov_offset += size; 1117 i->count -= size; 1118 return; 1119 } 1120 if (iov_iter_is_bvec(i)) { 1121 iov_iter_bvec_advance(i, size); 1122 return; 1123 } 1124 iterate_and_advance(i, size, v, 0, 0, 0, 0) 1125 } 1126 EXPORT_SYMBOL(iov_iter_advance); 1127 1128 void iov_iter_revert(struct iov_iter *i, size_t unroll) 1129 { 1130 if (!unroll) 1131 return; 1132 if (WARN_ON(unroll > MAX_RW_COUNT)) 1133 return; 1134 i->count += unroll; 1135 if (unlikely(iov_iter_is_pipe(i))) { 1136 struct pipe_inode_info *pipe = i->pipe; 1137 unsigned int p_mask = pipe->ring_size - 1; 1138 unsigned int i_head = i->head; 1139 size_t off = i->iov_offset; 1140 while (1) { 1141 struct pipe_buffer *b = &pipe->bufs[i_head & p_mask]; 1142 size_t n = off - b->offset; 1143 if (unroll < n) { 1144 off -= unroll; 1145 break; 1146 } 1147 unroll -= n; 1148 if (!unroll && i_head == i->start_head) { 1149 off = 0; 1150 break; 1151 } 1152 i_head--; 1153 b = &pipe->bufs[i_head & p_mask]; 1154 off = b->offset + b->len; 1155 } 1156 i->iov_offset = off; 1157 i->head = i_head; 1158 pipe_truncate(i); 1159 return; 1160 } 1161 if (unlikely(iov_iter_is_discard(i))) 1162 return; 1163 if (unroll <= i->iov_offset) { 1164 i->iov_offset -= unroll; 1165 return; 1166 } 1167 unroll -= i->iov_offset; 1168 if (iov_iter_is_xarray(i)) { 1169 BUG(); /* We should never go beyond the start of the specified 1170 * range since we might then be straying into pages that 1171 * aren't pinned. 1172 */ 1173 } else if (iov_iter_is_bvec(i)) { 1174 const struct bio_vec *bvec = i->bvec; 1175 while (1) { 1176 size_t n = (--bvec)->bv_len; 1177 i->nr_segs++; 1178 if (unroll <= n) { 1179 i->bvec = bvec; 1180 i->iov_offset = n - unroll; 1181 return; 1182 } 1183 unroll -= n; 1184 } 1185 } else { /* same logics for iovec and kvec */ 1186 const struct iovec *iov = i->iov; 1187 while (1) { 1188 size_t n = (--iov)->iov_len; 1189 i->nr_segs++; 1190 if (unroll <= n) { 1191 i->iov = iov; 1192 i->iov_offset = n - unroll; 1193 return; 1194 } 1195 unroll -= n; 1196 } 1197 } 1198 } 1199 EXPORT_SYMBOL(iov_iter_revert); 1200 1201 /* 1202 * Return the count of just the current iov_iter segment. 1203 */ 1204 size_t iov_iter_single_seg_count(const struct iov_iter *i) 1205 { 1206 if (unlikely(iov_iter_is_pipe(i))) 1207 return i->count; // it is a silly place, anyway 1208 if (i->nr_segs == 1) 1209 return i->count; 1210 if (unlikely(iov_iter_is_discard(i) || iov_iter_is_xarray(i))) 1211 return i->count; 1212 if (iov_iter_is_bvec(i)) 1213 return min(i->count, i->bvec->bv_len - i->iov_offset); 1214 else 1215 return min(i->count, i->iov->iov_len - i->iov_offset); 1216 } 1217 EXPORT_SYMBOL(iov_iter_single_seg_count); 1218 1219 void iov_iter_kvec(struct iov_iter *i, unsigned int direction, 1220 const struct kvec *kvec, unsigned long nr_segs, 1221 size_t count) 1222 { 1223 WARN_ON(direction & ~(READ | WRITE)); 1224 i->type = ITER_KVEC | (direction & (READ | WRITE)); 1225 i->kvec = kvec; 1226 i->nr_segs = nr_segs; 1227 i->iov_offset = 0; 1228 i->count = count; 1229 } 1230 EXPORT_SYMBOL(iov_iter_kvec); 1231 1232 void iov_iter_bvec(struct iov_iter *i, unsigned int direction, 1233 const struct bio_vec *bvec, unsigned long nr_segs, 1234 size_t count) 1235 { 1236 WARN_ON(direction & ~(READ | WRITE)); 1237 i->type = ITER_BVEC | (direction & (READ | WRITE)); 1238 i->bvec = bvec; 1239 i->nr_segs = nr_segs; 1240 i->iov_offset = 0; 1241 i->count = count; 1242 } 1243 EXPORT_SYMBOL(iov_iter_bvec); 1244 1245 void iov_iter_pipe(struct iov_iter *i, unsigned int direction, 1246 struct pipe_inode_info *pipe, 1247 size_t count) 1248 { 1249 BUG_ON(direction != READ); 1250 WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); 1251 i->type = ITER_PIPE | READ; 1252 i->pipe = pipe; 1253 i->head = pipe->head; 1254 i->iov_offset = 0; 1255 i->count = count; 1256 i->start_head = i->head; 1257 } 1258 EXPORT_SYMBOL(iov_iter_pipe); 1259 1260 /** 1261 * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray 1262 * @i: The iterator to initialise. 1263 * @direction: The direction of the transfer. 1264 * @xarray: The xarray to access. 1265 * @start: The start file position. 1266 * @count: The size of the I/O buffer in bytes. 1267 * 1268 * Set up an I/O iterator to either draw data out of the pages attached to an 1269 * inode or to inject data into those pages. The pages *must* be prevented 1270 * from evaporation, either by taking a ref on them or locking them by the 1271 * caller. 1272 */ 1273 void iov_iter_xarray(struct iov_iter *i, unsigned int direction, 1274 struct xarray *xarray, loff_t start, size_t count) 1275 { 1276 BUG_ON(direction & ~1); 1277 i->type = ITER_XARRAY | (direction & (READ | WRITE)); 1278 i->xarray = xarray; 1279 i->xarray_start = start; 1280 i->count = count; 1281 i->iov_offset = 0; 1282 } 1283 EXPORT_SYMBOL(iov_iter_xarray); 1284 1285 /** 1286 * iov_iter_discard - Initialise an I/O iterator that discards data 1287 * @i: The iterator to initialise. 1288 * @direction: The direction of the transfer. 1289 * @count: The size of the I/O buffer in bytes. 1290 * 1291 * Set up an I/O iterator that just discards everything that's written to it. 1292 * It's only available as a READ iterator. 1293 */ 1294 void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count) 1295 { 1296 BUG_ON(direction != READ); 1297 i->type = ITER_DISCARD | READ; 1298 i->count = count; 1299 i->iov_offset = 0; 1300 } 1301 EXPORT_SYMBOL(iov_iter_discard); 1302 1303 unsigned long iov_iter_alignment(const struct iov_iter *i) 1304 { 1305 unsigned long res = 0; 1306 size_t size = i->count; 1307 1308 if (unlikely(iov_iter_is_pipe(i))) { 1309 unsigned int p_mask = i->pipe->ring_size - 1; 1310 1311 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask])) 1312 return size | i->iov_offset; 1313 return size; 1314 } 1315 if (unlikely(iov_iter_is_xarray(i))) 1316 return (i->xarray_start + i->iov_offset) | i->count; 1317 iterate_all_kinds(i, size, v, 1318 (res |= (unsigned long)v.iov_base | v.iov_len, 0), 1319 res |= v.bv_offset | v.bv_len, 1320 res |= (unsigned long)v.iov_base | v.iov_len, 1321 res |= v.bv_offset | v.bv_len 1322 ) 1323 return res; 1324 } 1325 EXPORT_SYMBOL(iov_iter_alignment); 1326 1327 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 1328 { 1329 unsigned long res = 0; 1330 size_t size = i->count; 1331 1332 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1333 WARN_ON(1); 1334 return ~0U; 1335 } 1336 1337 iterate_all_kinds(i, size, v, 1338 (res |= (!res ? 0 : (unsigned long)v.iov_base) | 1339 (size != v.iov_len ? size : 0), 0), 1340 (res |= (!res ? 0 : (unsigned long)v.bv_offset) | 1341 (size != v.bv_len ? size : 0)), 1342 (res |= (!res ? 0 : (unsigned long)v.iov_base) | 1343 (size != v.iov_len ? size : 0)), 1344 (res |= (!res ? 0 : (unsigned long)v.bv_offset) | 1345 (size != v.bv_len ? size : 0)) 1346 ); 1347 return res; 1348 } 1349 EXPORT_SYMBOL(iov_iter_gap_alignment); 1350 1351 static inline ssize_t __pipe_get_pages(struct iov_iter *i, 1352 size_t maxsize, 1353 struct page **pages, 1354 int iter_head, 1355 size_t *start) 1356 { 1357 struct pipe_inode_info *pipe = i->pipe; 1358 unsigned int p_mask = pipe->ring_size - 1; 1359 ssize_t n = push_pipe(i, maxsize, &iter_head, start); 1360 if (!n) 1361 return -EFAULT; 1362 1363 maxsize = n; 1364 n += *start; 1365 while (n > 0) { 1366 get_page(*pages++ = pipe->bufs[iter_head & p_mask].page); 1367 iter_head++; 1368 n -= PAGE_SIZE; 1369 } 1370 1371 return maxsize; 1372 } 1373 1374 static ssize_t pipe_get_pages(struct iov_iter *i, 1375 struct page **pages, size_t maxsize, unsigned maxpages, 1376 size_t *start) 1377 { 1378 unsigned int iter_head, npages; 1379 size_t capacity; 1380 1381 if (!maxsize) 1382 return 0; 1383 1384 if (!sanity(i)) 1385 return -EFAULT; 1386 1387 data_start(i, &iter_head, start); 1388 /* Amount of free space: some of this one + all after this one */ 1389 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1390 capacity = min(npages, maxpages) * PAGE_SIZE - *start; 1391 1392 return __pipe_get_pages(i, min(maxsize, capacity), pages, iter_head, start); 1393 } 1394 1395 static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, 1396 pgoff_t index, unsigned int nr_pages) 1397 { 1398 XA_STATE(xas, xa, index); 1399 struct page *page; 1400 unsigned int ret = 0; 1401 1402 rcu_read_lock(); 1403 for (page = xas_load(&xas); page; page = xas_next(&xas)) { 1404 if (xas_retry(&xas, page)) 1405 continue; 1406 1407 /* Has the page moved or been split? */ 1408 if (unlikely(page != xas_reload(&xas))) { 1409 xas_reset(&xas); 1410 continue; 1411 } 1412 1413 pages[ret] = find_subpage(page, xas.xa_index); 1414 get_page(pages[ret]); 1415 if (++ret == nr_pages) 1416 break; 1417 } 1418 rcu_read_unlock(); 1419 return ret; 1420 } 1421 1422 static ssize_t iter_xarray_get_pages(struct iov_iter *i, 1423 struct page **pages, size_t maxsize, 1424 unsigned maxpages, size_t *_start_offset) 1425 { 1426 unsigned nr, offset; 1427 pgoff_t index, count; 1428 size_t size = maxsize, actual; 1429 loff_t pos; 1430 1431 if (!size || !maxpages) 1432 return 0; 1433 1434 pos = i->xarray_start + i->iov_offset; 1435 index = pos >> PAGE_SHIFT; 1436 offset = pos & ~PAGE_MASK; 1437 *_start_offset = offset; 1438 1439 count = 1; 1440 if (size > PAGE_SIZE - offset) { 1441 size -= PAGE_SIZE - offset; 1442 count += size >> PAGE_SHIFT; 1443 size &= ~PAGE_MASK; 1444 if (size) 1445 count++; 1446 } 1447 1448 if (count > maxpages) 1449 count = maxpages; 1450 1451 nr = iter_xarray_populate_pages(pages, i->xarray, index, count); 1452 if (nr == 0) 1453 return 0; 1454 1455 actual = PAGE_SIZE * nr; 1456 actual -= offset; 1457 if (nr == count && size > 0) { 1458 unsigned last_offset = (nr > 1) ? 0 : offset; 1459 actual -= PAGE_SIZE - (last_offset + size); 1460 } 1461 return actual; 1462 } 1463 1464 ssize_t iov_iter_get_pages(struct iov_iter *i, 1465 struct page **pages, size_t maxsize, unsigned maxpages, 1466 size_t *start) 1467 { 1468 if (maxsize > i->count) 1469 maxsize = i->count; 1470 1471 if (unlikely(iov_iter_is_pipe(i))) 1472 return pipe_get_pages(i, pages, maxsize, maxpages, start); 1473 if (unlikely(iov_iter_is_xarray(i))) 1474 return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); 1475 if (unlikely(iov_iter_is_discard(i))) 1476 return -EFAULT; 1477 1478 iterate_all_kinds(i, maxsize, v, ({ 1479 unsigned long addr = (unsigned long)v.iov_base; 1480 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 1481 int n; 1482 int res; 1483 1484 if (len > maxpages * PAGE_SIZE) 1485 len = maxpages * PAGE_SIZE; 1486 addr &= ~(PAGE_SIZE - 1); 1487 n = DIV_ROUND_UP(len, PAGE_SIZE); 1488 res = get_user_pages_fast(addr, n, 1489 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, 1490 pages); 1491 if (unlikely(res < 0)) 1492 return res; 1493 return (res == n ? len : res * PAGE_SIZE) - *start; 1494 0;}),({ 1495 /* can't be more than PAGE_SIZE */ 1496 *start = v.bv_offset; 1497 get_page(*pages = v.bv_page); 1498 return v.bv_len; 1499 }),({ 1500 return -EFAULT; 1501 }), 1502 0 1503 ) 1504 return 0; 1505 } 1506 EXPORT_SYMBOL(iov_iter_get_pages); 1507 1508 static struct page **get_pages_array(size_t n) 1509 { 1510 return kvmalloc_array(n, sizeof(struct page *), GFP_KERNEL); 1511 } 1512 1513 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 1514 struct page ***pages, size_t maxsize, 1515 size_t *start) 1516 { 1517 struct page **p; 1518 unsigned int iter_head, npages; 1519 ssize_t n; 1520 1521 if (!maxsize) 1522 return 0; 1523 1524 if (!sanity(i)) 1525 return -EFAULT; 1526 1527 data_start(i, &iter_head, start); 1528 /* Amount of free space: some of this one + all after this one */ 1529 npages = pipe_space_for_user(iter_head, i->pipe->tail, i->pipe); 1530 n = npages * PAGE_SIZE - *start; 1531 if (maxsize > n) 1532 maxsize = n; 1533 else 1534 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 1535 p = get_pages_array(npages); 1536 if (!p) 1537 return -ENOMEM; 1538 n = __pipe_get_pages(i, maxsize, p, iter_head, start); 1539 if (n > 0) 1540 *pages = p; 1541 else 1542 kvfree(p); 1543 return n; 1544 } 1545 1546 static ssize_t iter_xarray_get_pages_alloc(struct iov_iter *i, 1547 struct page ***pages, size_t maxsize, 1548 size_t *_start_offset) 1549 { 1550 struct page **p; 1551 unsigned nr, offset; 1552 pgoff_t index, count; 1553 size_t size = maxsize, actual; 1554 loff_t pos; 1555 1556 if (!size) 1557 return 0; 1558 1559 pos = i->xarray_start + i->iov_offset; 1560 index = pos >> PAGE_SHIFT; 1561 offset = pos & ~PAGE_MASK; 1562 *_start_offset = offset; 1563 1564 count = 1; 1565 if (size > PAGE_SIZE - offset) { 1566 size -= PAGE_SIZE - offset; 1567 count += size >> PAGE_SHIFT; 1568 size &= ~PAGE_MASK; 1569 if (size) 1570 count++; 1571 } 1572 1573 p = get_pages_array(count); 1574 if (!p) 1575 return -ENOMEM; 1576 *pages = p; 1577 1578 nr = iter_xarray_populate_pages(p, i->xarray, index, count); 1579 if (nr == 0) 1580 return 0; 1581 1582 actual = PAGE_SIZE * nr; 1583 actual -= offset; 1584 if (nr == count && size > 0) { 1585 unsigned last_offset = (nr > 1) ? 0 : offset; 1586 actual -= PAGE_SIZE - (last_offset + size); 1587 } 1588 return actual; 1589 } 1590 1591 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1592 struct page ***pages, size_t maxsize, 1593 size_t *start) 1594 { 1595 struct page **p; 1596 1597 if (maxsize > i->count) 1598 maxsize = i->count; 1599 1600 if (unlikely(iov_iter_is_pipe(i))) 1601 return pipe_get_pages_alloc(i, pages, maxsize, start); 1602 if (unlikely(iov_iter_is_xarray(i))) 1603 return iter_xarray_get_pages_alloc(i, pages, maxsize, start); 1604 if (unlikely(iov_iter_is_discard(i))) 1605 return -EFAULT; 1606 1607 iterate_all_kinds(i, maxsize, v, ({ 1608 unsigned long addr = (unsigned long)v.iov_base; 1609 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 1610 int n; 1611 int res; 1612 1613 addr &= ~(PAGE_SIZE - 1); 1614 n = DIV_ROUND_UP(len, PAGE_SIZE); 1615 p = get_pages_array(n); 1616 if (!p) 1617 return -ENOMEM; 1618 res = get_user_pages_fast(addr, n, 1619 iov_iter_rw(i) != WRITE ? FOLL_WRITE : 0, p); 1620 if (unlikely(res < 0)) { 1621 kvfree(p); 1622 return res; 1623 } 1624 *pages = p; 1625 return (res == n ? len : res * PAGE_SIZE) - *start; 1626 0;}),({ 1627 /* can't be more than PAGE_SIZE */ 1628 *start = v.bv_offset; 1629 *pages = p = get_pages_array(1); 1630 if (!p) 1631 return -ENOMEM; 1632 get_page(*p = v.bv_page); 1633 return v.bv_len; 1634 }),({ 1635 return -EFAULT; 1636 }), 0 1637 ) 1638 return 0; 1639 } 1640 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1641 1642 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1643 struct iov_iter *i) 1644 { 1645 char *to = addr; 1646 __wsum sum, next; 1647 size_t off = 0; 1648 sum = *csum; 1649 if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) { 1650 WARN_ON(1); 1651 return 0; 1652 } 1653 iterate_and_advance(i, bytes, v, ({ 1654 next = csum_and_copy_from_user(v.iov_base, 1655 (to += v.iov_len) - v.iov_len, 1656 v.iov_len); 1657 if (next) { 1658 sum = csum_block_add(sum, next, off); 1659 off += v.iov_len; 1660 } 1661 next ? 0 : v.iov_len; 1662 }), ({ 1663 char *p = kmap_atomic(v.bv_page); 1664 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, 1665 p + v.bv_offset, v.bv_len, 1666 sum, off); 1667 kunmap_atomic(p); 1668 off += v.bv_len; 1669 }),({ 1670 sum = csum_and_memcpy((to += v.iov_len) - v.iov_len, 1671 v.iov_base, v.iov_len, 1672 sum, off); 1673 off += v.iov_len; 1674 }), ({ 1675 char *p = kmap_atomic(v.bv_page); 1676 sum = csum_and_memcpy((to += v.bv_len) - v.bv_len, 1677 p + v.bv_offset, v.bv_len, 1678 sum, off); 1679 kunmap_atomic(p); 1680 off += v.bv_len; 1681 }) 1682 ) 1683 *csum = sum; 1684 return bytes; 1685 } 1686 EXPORT_SYMBOL(csum_and_copy_from_iter); 1687 1688 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, 1689 struct iov_iter *i) 1690 { 1691 struct csum_state *csstate = _csstate; 1692 const char *from = addr; 1693 __wsum sum, next; 1694 size_t off; 1695 1696 if (unlikely(iov_iter_is_pipe(i))) 1697 return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i); 1698 1699 sum = csstate->csum; 1700 off = csstate->off; 1701 if (unlikely(iov_iter_is_discard(i))) { 1702 WARN_ON(1); /* for now */ 1703 return 0; 1704 } 1705 iterate_and_advance(i, bytes, v, ({ 1706 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, 1707 v.iov_base, 1708 v.iov_len); 1709 if (next) { 1710 sum = csum_block_add(sum, next, off); 1711 off += v.iov_len; 1712 } 1713 next ? 0 : v.iov_len; 1714 }), ({ 1715 char *p = kmap_atomic(v.bv_page); 1716 sum = csum_and_memcpy(p + v.bv_offset, 1717 (from += v.bv_len) - v.bv_len, 1718 v.bv_len, sum, off); 1719 kunmap_atomic(p); 1720 off += v.bv_len; 1721 }),({ 1722 sum = csum_and_memcpy(v.iov_base, 1723 (from += v.iov_len) - v.iov_len, 1724 v.iov_len, sum, off); 1725 off += v.iov_len; 1726 }), ({ 1727 char *p = kmap_atomic(v.bv_page); 1728 sum = csum_and_memcpy(p + v.bv_offset, 1729 (from += v.bv_len) - v.bv_len, 1730 v.bv_len, sum, off); 1731 kunmap_atomic(p); 1732 off += v.bv_len; 1733 }) 1734 ) 1735 csstate->csum = sum; 1736 csstate->off = off; 1737 return bytes; 1738 } 1739 EXPORT_SYMBOL(csum_and_copy_to_iter); 1740 1741 size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp, 1742 struct iov_iter *i) 1743 { 1744 #ifdef CONFIG_CRYPTO_HASH 1745 struct ahash_request *hash = hashp; 1746 struct scatterlist sg; 1747 size_t copied; 1748 1749 copied = copy_to_iter(addr, bytes, i); 1750 sg_init_one(&sg, addr, copied); 1751 ahash_request_set_crypt(hash, &sg, NULL, copied); 1752 crypto_ahash_update(hash); 1753 return copied; 1754 #else 1755 return 0; 1756 #endif 1757 } 1758 EXPORT_SYMBOL(hash_and_copy_to_iter); 1759 1760 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1761 { 1762 size_t size = i->count; 1763 int npages = 0; 1764 1765 if (!size) 1766 return 0; 1767 if (unlikely(iov_iter_is_discard(i))) 1768 return 0; 1769 1770 if (unlikely(iov_iter_is_pipe(i))) { 1771 struct pipe_inode_info *pipe = i->pipe; 1772 unsigned int iter_head; 1773 size_t off; 1774 1775 if (!sanity(i)) 1776 return 0; 1777 1778 data_start(i, &iter_head, &off); 1779 /* some of this one + all after this one */ 1780 npages = pipe_space_for_user(iter_head, pipe->tail, pipe); 1781 if (npages >= maxpages) 1782 return maxpages; 1783 } else if (unlikely(iov_iter_is_xarray(i))) { 1784 unsigned offset; 1785 1786 offset = (i->xarray_start + i->iov_offset) & ~PAGE_MASK; 1787 1788 npages = 1; 1789 if (size > PAGE_SIZE - offset) { 1790 size -= PAGE_SIZE - offset; 1791 npages += size >> PAGE_SHIFT; 1792 size &= ~PAGE_MASK; 1793 if (size) 1794 npages++; 1795 } 1796 if (npages >= maxpages) 1797 return maxpages; 1798 } else iterate_all_kinds(i, size, v, ({ 1799 unsigned long p = (unsigned long)v.iov_base; 1800 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) 1801 - p / PAGE_SIZE; 1802 if (npages >= maxpages) 1803 return maxpages; 1804 0;}),({ 1805 npages++; 1806 if (npages >= maxpages) 1807 return maxpages; 1808 }),({ 1809 unsigned long p = (unsigned long)v.iov_base; 1810 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) 1811 - p / PAGE_SIZE; 1812 if (npages >= maxpages) 1813 return maxpages; 1814 }), 1815 0 1816 ) 1817 return npages; 1818 } 1819 EXPORT_SYMBOL(iov_iter_npages); 1820 1821 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1822 { 1823 *new = *old; 1824 if (unlikely(iov_iter_is_pipe(new))) { 1825 WARN_ON(1); 1826 return NULL; 1827 } 1828 if (unlikely(iov_iter_is_discard(new) || iov_iter_is_xarray(new))) 1829 return NULL; 1830 if (iov_iter_is_bvec(new)) 1831 return new->bvec = kmemdup(new->bvec, 1832 new->nr_segs * sizeof(struct bio_vec), 1833 flags); 1834 else 1835 /* iovec and kvec have identical layout */ 1836 return new->iov = kmemdup(new->iov, 1837 new->nr_segs * sizeof(struct iovec), 1838 flags); 1839 } 1840 EXPORT_SYMBOL(dup_iter); 1841 1842 static int copy_compat_iovec_from_user(struct iovec *iov, 1843 const struct iovec __user *uvec, unsigned long nr_segs) 1844 { 1845 const struct compat_iovec __user *uiov = 1846 (const struct compat_iovec __user *)uvec; 1847 int ret = -EFAULT, i; 1848 1849 if (!user_access_begin(uiov, nr_segs * sizeof(*uiov))) 1850 return -EFAULT; 1851 1852 for (i = 0; i < nr_segs; i++) { 1853 compat_uptr_t buf; 1854 compat_ssize_t len; 1855 1856 unsafe_get_user(len, &uiov[i].iov_len, uaccess_end); 1857 unsafe_get_user(buf, &uiov[i].iov_base, uaccess_end); 1858 1859 /* check for compat_size_t not fitting in compat_ssize_t .. */ 1860 if (len < 0) { 1861 ret = -EINVAL; 1862 goto uaccess_end; 1863 } 1864 iov[i].iov_base = compat_ptr(buf); 1865 iov[i].iov_len = len; 1866 } 1867 1868 ret = 0; 1869 uaccess_end: 1870 user_access_end(); 1871 return ret; 1872 } 1873 1874 static int copy_iovec_from_user(struct iovec *iov, 1875 const struct iovec __user *uvec, unsigned long nr_segs) 1876 { 1877 unsigned long seg; 1878 1879 if (copy_from_user(iov, uvec, nr_segs * sizeof(*uvec))) 1880 return -EFAULT; 1881 for (seg = 0; seg < nr_segs; seg++) { 1882 if ((ssize_t)iov[seg].iov_len < 0) 1883 return -EINVAL; 1884 } 1885 1886 return 0; 1887 } 1888 1889 struct iovec *iovec_from_user(const struct iovec __user *uvec, 1890 unsigned long nr_segs, unsigned long fast_segs, 1891 struct iovec *fast_iov, bool compat) 1892 { 1893 struct iovec *iov = fast_iov; 1894 int ret; 1895 1896 /* 1897 * SuS says "The readv() function *may* fail if the iovcnt argument was 1898 * less than or equal to 0, or greater than {IOV_MAX}. Linux has 1899 * traditionally returned zero for zero segments, so... 1900 */ 1901 if (nr_segs == 0) 1902 return iov; 1903 if (nr_segs > UIO_MAXIOV) 1904 return ERR_PTR(-EINVAL); 1905 if (nr_segs > fast_segs) { 1906 iov = kmalloc_array(nr_segs, sizeof(struct iovec), GFP_KERNEL); 1907 if (!iov) 1908 return ERR_PTR(-ENOMEM); 1909 } 1910 1911 if (compat) 1912 ret = copy_compat_iovec_from_user(iov, uvec, nr_segs); 1913 else 1914 ret = copy_iovec_from_user(iov, uvec, nr_segs); 1915 if (ret) { 1916 if (iov != fast_iov) 1917 kfree(iov); 1918 return ERR_PTR(ret); 1919 } 1920 1921 return iov; 1922 } 1923 1924 ssize_t __import_iovec(int type, const struct iovec __user *uvec, 1925 unsigned nr_segs, unsigned fast_segs, struct iovec **iovp, 1926 struct iov_iter *i, bool compat) 1927 { 1928 ssize_t total_len = 0; 1929 unsigned long seg; 1930 struct iovec *iov; 1931 1932 iov = iovec_from_user(uvec, nr_segs, fast_segs, *iovp, compat); 1933 if (IS_ERR(iov)) { 1934 *iovp = NULL; 1935 return PTR_ERR(iov); 1936 } 1937 1938 /* 1939 * According to the Single Unix Specification we should return EINVAL if 1940 * an element length is < 0 when cast to ssize_t or if the total length 1941 * would overflow the ssize_t return value of the system call. 1942 * 1943 * Linux caps all read/write calls to MAX_RW_COUNT, and avoids the 1944 * overflow case. 1945 */ 1946 for (seg = 0; seg < nr_segs; seg++) { 1947 ssize_t len = (ssize_t)iov[seg].iov_len; 1948 1949 if (!access_ok(iov[seg].iov_base, len)) { 1950 if (iov != *iovp) 1951 kfree(iov); 1952 *iovp = NULL; 1953 return -EFAULT; 1954 } 1955 1956 if (len > MAX_RW_COUNT - total_len) { 1957 len = MAX_RW_COUNT - total_len; 1958 iov[seg].iov_len = len; 1959 } 1960 total_len += len; 1961 } 1962 1963 iov_iter_init(i, type, iov, nr_segs, total_len); 1964 if (iov == *iovp) 1965 *iovp = NULL; 1966 else 1967 *iovp = iov; 1968 return total_len; 1969 } 1970 1971 /** 1972 * import_iovec() - Copy an array of &struct iovec from userspace 1973 * into the kernel, check that it is valid, and initialize a new 1974 * &struct iov_iter iterator to access it. 1975 * 1976 * @type: One of %READ or %WRITE. 1977 * @uvec: Pointer to the userspace array. 1978 * @nr_segs: Number of elements in userspace array. 1979 * @fast_segs: Number of elements in @iov. 1980 * @iovp: (input and output parameter) Pointer to pointer to (usually small 1981 * on-stack) kernel array. 1982 * @i: Pointer to iterator that will be initialized on success. 1983 * 1984 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1985 * then this function places %NULL in *@iov on return. Otherwise, a new 1986 * array will be allocated and the result placed in *@iov. This means that 1987 * the caller may call kfree() on *@iov regardless of whether the small 1988 * on-stack array was used or not (and regardless of whether this function 1989 * returns an error or not). 1990 * 1991 * Return: Negative error code on error, bytes imported on success 1992 */ 1993 ssize_t import_iovec(int type, const struct iovec __user *uvec, 1994 unsigned nr_segs, unsigned fast_segs, 1995 struct iovec **iovp, struct iov_iter *i) 1996 { 1997 return __import_iovec(type, uvec, nr_segs, fast_segs, iovp, i, 1998 in_compat_syscall()); 1999 } 2000 EXPORT_SYMBOL(import_iovec); 2001 2002 int import_single_range(int rw, void __user *buf, size_t len, 2003 struct iovec *iov, struct iov_iter *i) 2004 { 2005 if (len > MAX_RW_COUNT) 2006 len = MAX_RW_COUNT; 2007 if (unlikely(!access_ok(buf, len))) 2008 return -EFAULT; 2009 2010 iov->iov_base = buf; 2011 iov->iov_len = len; 2012 iov_iter_init(i, rw, iov, 1, len); 2013 return 0; 2014 } 2015 EXPORT_SYMBOL(import_single_range); 2016