1 #include <linux/export.h> 2 #include <linux/bvec.h> 3 #include <linux/uio.h> 4 #include <linux/pagemap.h> 5 #include <linux/slab.h> 6 #include <linux/vmalloc.h> 7 #include <linux/splice.h> 8 #include <net/checksum.h> 9 10 #define PIPE_PARANOIA /* for now */ 11 12 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \ 13 size_t left; \ 14 size_t wanted = n; \ 15 __p = i->iov; \ 16 __v.iov_len = min(n, __p->iov_len - skip); \ 17 if (likely(__v.iov_len)) { \ 18 __v.iov_base = __p->iov_base + skip; \ 19 left = (STEP); \ 20 __v.iov_len -= left; \ 21 skip += __v.iov_len; \ 22 n -= __v.iov_len; \ 23 } else { \ 24 left = 0; \ 25 } \ 26 while (unlikely(!left && n)) { \ 27 __p++; \ 28 __v.iov_len = min(n, __p->iov_len); \ 29 if (unlikely(!__v.iov_len)) \ 30 continue; \ 31 __v.iov_base = __p->iov_base; \ 32 left = (STEP); \ 33 __v.iov_len -= left; \ 34 skip = __v.iov_len; \ 35 n -= __v.iov_len; \ 36 } \ 37 n = wanted - n; \ 38 } 39 40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \ 41 size_t wanted = n; \ 42 __p = i->kvec; \ 43 __v.iov_len = min(n, __p->iov_len - skip); \ 44 if (likely(__v.iov_len)) { \ 45 __v.iov_base = __p->iov_base + skip; \ 46 (void)(STEP); \ 47 skip += __v.iov_len; \ 48 n -= __v.iov_len; \ 49 } \ 50 while (unlikely(n)) { \ 51 __p++; \ 52 __v.iov_len = min(n, __p->iov_len); \ 53 if (unlikely(!__v.iov_len)) \ 54 continue; \ 55 __v.iov_base = __p->iov_base; \ 56 (void)(STEP); \ 57 skip = __v.iov_len; \ 58 n -= __v.iov_len; \ 59 } \ 60 n = wanted; \ 61 } 62 63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ 64 struct bvec_iter __start; \ 65 __start.bi_size = n; \ 66 __start.bi_bvec_done = skip; \ 67 __start.bi_idx = 0; \ 68 for_each_bvec(__v, i->bvec, __bi, __start) { \ 69 if (!__v.bv_len) \ 70 continue; \ 71 (void)(STEP); \ 72 } \ 73 } 74 75 #define iterate_all_kinds(i, n, v, I, B, K) { \ 76 if (likely(n)) { \ 77 size_t skip = i->iov_offset; \ 78 if (unlikely(i->type & ITER_BVEC)) { \ 79 struct bio_vec v; \ 80 struct bvec_iter __bi; \ 81 iterate_bvec(i, n, v, __bi, skip, (B)) \ 82 } else if (unlikely(i->type & ITER_KVEC)) { \ 83 const struct kvec *kvec; \ 84 struct kvec v; \ 85 iterate_kvec(i, n, v, kvec, skip, (K)) \ 86 } else { \ 87 const struct iovec *iov; \ 88 struct iovec v; \ 89 iterate_iovec(i, n, v, iov, skip, (I)) \ 90 } \ 91 } \ 92 } 93 94 #define iterate_and_advance(i, n, v, I, B, K) { \ 95 if (unlikely(i->count < n)) \ 96 n = i->count; \ 97 if (i->count) { \ 98 size_t skip = i->iov_offset; \ 99 if (unlikely(i->type & ITER_BVEC)) { \ 100 const struct bio_vec *bvec = i->bvec; \ 101 struct bio_vec v; \ 102 struct bvec_iter __bi; \ 103 iterate_bvec(i, n, v, __bi, skip, (B)) \ 104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ 105 i->nr_segs -= i->bvec - bvec; \ 106 skip = __bi.bi_bvec_done; \ 107 } else if (unlikely(i->type & ITER_KVEC)) { \ 108 const struct kvec *kvec; \ 109 struct kvec v; \ 110 iterate_kvec(i, n, v, kvec, skip, (K)) \ 111 if (skip == kvec->iov_len) { \ 112 kvec++; \ 113 skip = 0; \ 114 } \ 115 i->nr_segs -= kvec - i->kvec; \ 116 i->kvec = kvec; \ 117 } else { \ 118 const struct iovec *iov; \ 119 struct iovec v; \ 120 iterate_iovec(i, n, v, iov, skip, (I)) \ 121 if (skip == iov->iov_len) { \ 122 iov++; \ 123 skip = 0; \ 124 } \ 125 i->nr_segs -= iov - i->iov; \ 126 i->iov = iov; \ 127 } \ 128 i->count -= n; \ 129 i->iov_offset = skip; \ 130 } \ 131 } 132 133 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, 134 struct iov_iter *i) 135 { 136 size_t skip, copy, left, wanted; 137 const struct iovec *iov; 138 char __user *buf; 139 void *kaddr, *from; 140 141 if (unlikely(bytes > i->count)) 142 bytes = i->count; 143 144 if (unlikely(!bytes)) 145 return 0; 146 147 wanted = bytes; 148 iov = i->iov; 149 skip = i->iov_offset; 150 buf = iov->iov_base + skip; 151 copy = min(bytes, iov->iov_len - skip); 152 153 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { 154 kaddr = kmap_atomic(page); 155 from = kaddr + offset; 156 157 /* first chunk, usually the only one */ 158 left = __copy_to_user_inatomic(buf, from, copy); 159 copy -= left; 160 skip += copy; 161 from += copy; 162 bytes -= copy; 163 164 while (unlikely(!left && bytes)) { 165 iov++; 166 buf = iov->iov_base; 167 copy = min(bytes, iov->iov_len); 168 left = __copy_to_user_inatomic(buf, from, copy); 169 copy -= left; 170 skip = copy; 171 from += copy; 172 bytes -= copy; 173 } 174 if (likely(!bytes)) { 175 kunmap_atomic(kaddr); 176 goto done; 177 } 178 offset = from - kaddr; 179 buf += copy; 180 kunmap_atomic(kaddr); 181 copy = min(bytes, iov->iov_len - skip); 182 } 183 /* Too bad - revert to non-atomic kmap */ 184 185 kaddr = kmap(page); 186 from = kaddr + offset; 187 left = __copy_to_user(buf, from, copy); 188 copy -= left; 189 skip += copy; 190 from += copy; 191 bytes -= copy; 192 while (unlikely(!left && bytes)) { 193 iov++; 194 buf = iov->iov_base; 195 copy = min(bytes, iov->iov_len); 196 left = __copy_to_user(buf, from, copy); 197 copy -= left; 198 skip = copy; 199 from += copy; 200 bytes -= copy; 201 } 202 kunmap(page); 203 204 done: 205 if (skip == iov->iov_len) { 206 iov++; 207 skip = 0; 208 } 209 i->count -= wanted - bytes; 210 i->nr_segs -= iov - i->iov; 211 i->iov = iov; 212 i->iov_offset = skip; 213 return wanted - bytes; 214 } 215 216 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, 217 struct iov_iter *i) 218 { 219 size_t skip, copy, left, wanted; 220 const struct iovec *iov; 221 char __user *buf; 222 void *kaddr, *to; 223 224 if (unlikely(bytes > i->count)) 225 bytes = i->count; 226 227 if (unlikely(!bytes)) 228 return 0; 229 230 wanted = bytes; 231 iov = i->iov; 232 skip = i->iov_offset; 233 buf = iov->iov_base + skip; 234 copy = min(bytes, iov->iov_len - skip); 235 236 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { 237 kaddr = kmap_atomic(page); 238 to = kaddr + offset; 239 240 /* first chunk, usually the only one */ 241 left = __copy_from_user_inatomic(to, buf, copy); 242 copy -= left; 243 skip += copy; 244 to += copy; 245 bytes -= copy; 246 247 while (unlikely(!left && bytes)) { 248 iov++; 249 buf = iov->iov_base; 250 copy = min(bytes, iov->iov_len); 251 left = __copy_from_user_inatomic(to, buf, copy); 252 copy -= left; 253 skip = copy; 254 to += copy; 255 bytes -= copy; 256 } 257 if (likely(!bytes)) { 258 kunmap_atomic(kaddr); 259 goto done; 260 } 261 offset = to - kaddr; 262 buf += copy; 263 kunmap_atomic(kaddr); 264 copy = min(bytes, iov->iov_len - skip); 265 } 266 /* Too bad - revert to non-atomic kmap */ 267 268 kaddr = kmap(page); 269 to = kaddr + offset; 270 left = __copy_from_user(to, buf, copy); 271 copy -= left; 272 skip += copy; 273 to += copy; 274 bytes -= copy; 275 while (unlikely(!left && bytes)) { 276 iov++; 277 buf = iov->iov_base; 278 copy = min(bytes, iov->iov_len); 279 left = __copy_from_user(to, buf, copy); 280 copy -= left; 281 skip = copy; 282 to += copy; 283 bytes -= copy; 284 } 285 kunmap(page); 286 287 done: 288 if (skip == iov->iov_len) { 289 iov++; 290 skip = 0; 291 } 292 i->count -= wanted - bytes; 293 i->nr_segs -= iov - i->iov; 294 i->iov = iov; 295 i->iov_offset = skip; 296 return wanted - bytes; 297 } 298 299 #ifdef PIPE_PARANOIA 300 static bool sanity(const struct iov_iter *i) 301 { 302 struct pipe_inode_info *pipe = i->pipe; 303 int idx = i->idx; 304 int next = pipe->curbuf + pipe->nrbufs; 305 if (i->iov_offset) { 306 struct pipe_buffer *p; 307 if (unlikely(!pipe->nrbufs)) 308 goto Bad; // pipe must be non-empty 309 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1)))) 310 goto Bad; // must be at the last buffer... 311 312 p = &pipe->bufs[idx]; 313 if (unlikely(p->offset + p->len != i->iov_offset)) 314 goto Bad; // ... at the end of segment 315 } else { 316 if (idx != (next & (pipe->buffers - 1))) 317 goto Bad; // must be right after the last buffer 318 } 319 return true; 320 Bad: 321 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset); 322 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n", 323 pipe->curbuf, pipe->nrbufs, pipe->buffers); 324 for (idx = 0; idx < pipe->buffers; idx++) 325 printk(KERN_ERR "[%p %p %d %d]\n", 326 pipe->bufs[idx].ops, 327 pipe->bufs[idx].page, 328 pipe->bufs[idx].offset, 329 pipe->bufs[idx].len); 330 WARN_ON(1); 331 return false; 332 } 333 #else 334 #define sanity(i) true 335 #endif 336 337 static inline int next_idx(int idx, struct pipe_inode_info *pipe) 338 { 339 return (idx + 1) & (pipe->buffers - 1); 340 } 341 342 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 343 struct iov_iter *i) 344 { 345 struct pipe_inode_info *pipe = i->pipe; 346 struct pipe_buffer *buf; 347 size_t off; 348 int idx; 349 350 if (unlikely(bytes > i->count)) 351 bytes = i->count; 352 353 if (unlikely(!bytes)) 354 return 0; 355 356 if (!sanity(i)) 357 return 0; 358 359 off = i->iov_offset; 360 idx = i->idx; 361 buf = &pipe->bufs[idx]; 362 if (off) { 363 if (offset == off && buf->page == page) { 364 /* merge with the last one */ 365 buf->len += bytes; 366 i->iov_offset += bytes; 367 goto out; 368 } 369 idx = next_idx(idx, pipe); 370 buf = &pipe->bufs[idx]; 371 } 372 if (idx == pipe->curbuf && pipe->nrbufs) 373 return 0; 374 pipe->nrbufs++; 375 buf->ops = &page_cache_pipe_buf_ops; 376 get_page(buf->page = page); 377 buf->offset = offset; 378 buf->len = bytes; 379 i->iov_offset = offset + bytes; 380 i->idx = idx; 381 out: 382 i->count -= bytes; 383 return bytes; 384 } 385 386 /* 387 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 388 * bytes. For each iovec, fault in each page that constitutes the iovec. 389 * 390 * Return 0 on success, or non-zero if the memory could not be accessed (i.e. 391 * because it is an invalid address). 392 */ 393 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) 394 { 395 size_t skip = i->iov_offset; 396 const struct iovec *iov; 397 int err; 398 struct iovec v; 399 400 if (!(i->type & (ITER_BVEC|ITER_KVEC))) { 401 iterate_iovec(i, bytes, v, iov, skip, ({ 402 err = fault_in_pages_readable(v.iov_base, v.iov_len); 403 if (unlikely(err)) 404 return err; 405 0;})) 406 } 407 return 0; 408 } 409 EXPORT_SYMBOL(iov_iter_fault_in_readable); 410 411 void iov_iter_init(struct iov_iter *i, int direction, 412 const struct iovec *iov, unsigned long nr_segs, 413 size_t count) 414 { 415 /* It will get better. Eventually... */ 416 if (segment_eq(get_fs(), KERNEL_DS)) { 417 direction |= ITER_KVEC; 418 i->type = direction; 419 i->kvec = (struct kvec *)iov; 420 } else { 421 i->type = direction; 422 i->iov = iov; 423 } 424 i->nr_segs = nr_segs; 425 i->iov_offset = 0; 426 i->count = count; 427 } 428 EXPORT_SYMBOL(iov_iter_init); 429 430 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) 431 { 432 char *from = kmap_atomic(page); 433 memcpy(to, from + offset, len); 434 kunmap_atomic(from); 435 } 436 437 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) 438 { 439 char *to = kmap_atomic(page); 440 memcpy(to + offset, from, len); 441 kunmap_atomic(to); 442 } 443 444 static void memzero_page(struct page *page, size_t offset, size_t len) 445 { 446 char *addr = kmap_atomic(page); 447 memset(addr + offset, 0, len); 448 kunmap_atomic(addr); 449 } 450 451 static inline bool allocated(struct pipe_buffer *buf) 452 { 453 return buf->ops == &default_pipe_buf_ops; 454 } 455 456 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp) 457 { 458 size_t off = i->iov_offset; 459 int idx = i->idx; 460 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) { 461 idx = next_idx(idx, i->pipe); 462 off = 0; 463 } 464 *idxp = idx; 465 *offp = off; 466 } 467 468 static size_t push_pipe(struct iov_iter *i, size_t size, 469 int *idxp, size_t *offp) 470 { 471 struct pipe_inode_info *pipe = i->pipe; 472 size_t off; 473 int idx; 474 ssize_t left; 475 476 if (unlikely(size > i->count)) 477 size = i->count; 478 if (unlikely(!size)) 479 return 0; 480 481 left = size; 482 data_start(i, &idx, &off); 483 *idxp = idx; 484 *offp = off; 485 if (off) { 486 left -= PAGE_SIZE - off; 487 if (left <= 0) { 488 pipe->bufs[idx].len += size; 489 return size; 490 } 491 pipe->bufs[idx].len = PAGE_SIZE; 492 idx = next_idx(idx, pipe); 493 } 494 while (idx != pipe->curbuf || !pipe->nrbufs) { 495 struct page *page = alloc_page(GFP_USER); 496 if (!page) 497 break; 498 pipe->nrbufs++; 499 pipe->bufs[idx].ops = &default_pipe_buf_ops; 500 pipe->bufs[idx].page = page; 501 pipe->bufs[idx].offset = 0; 502 if (left <= PAGE_SIZE) { 503 pipe->bufs[idx].len = left; 504 return size; 505 } 506 pipe->bufs[idx].len = PAGE_SIZE; 507 left -= PAGE_SIZE; 508 idx = next_idx(idx, pipe); 509 } 510 return size - left; 511 } 512 513 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 514 struct iov_iter *i) 515 { 516 struct pipe_inode_info *pipe = i->pipe; 517 size_t n, off; 518 int idx; 519 520 if (!sanity(i)) 521 return 0; 522 523 bytes = n = push_pipe(i, bytes, &idx, &off); 524 if (unlikely(!n)) 525 return 0; 526 for ( ; n; idx = next_idx(idx, pipe), off = 0) { 527 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 528 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk); 529 i->idx = idx; 530 i->iov_offset = off + chunk; 531 n -= chunk; 532 addr += chunk; 533 } 534 i->count -= bytes; 535 return bytes; 536 } 537 538 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 539 { 540 const char *from = addr; 541 if (unlikely(i->type & ITER_PIPE)) 542 return copy_pipe_to_iter(addr, bytes, i); 543 iterate_and_advance(i, bytes, v, 544 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, 545 v.iov_len), 546 memcpy_to_page(v.bv_page, v.bv_offset, 547 (from += v.bv_len) - v.bv_len, v.bv_len), 548 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) 549 ) 550 551 return bytes; 552 } 553 EXPORT_SYMBOL(copy_to_iter); 554 555 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 556 { 557 char *to = addr; 558 if (unlikely(i->type & ITER_PIPE)) { 559 WARN_ON(1); 560 return 0; 561 } 562 iterate_and_advance(i, bytes, v, 563 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, 564 v.iov_len), 565 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 566 v.bv_offset, v.bv_len), 567 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 568 ) 569 570 return bytes; 571 } 572 EXPORT_SYMBOL(copy_from_iter); 573 574 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 575 { 576 char *to = addr; 577 if (unlikely(i->type & ITER_PIPE)) { 578 WARN_ON(1); 579 return false; 580 } 581 if (unlikely(i->count < bytes)) 582 return false; 583 584 iterate_all_kinds(i, bytes, v, ({ 585 if (__copy_from_user((to += v.iov_len) - v.iov_len, 586 v.iov_base, v.iov_len)) 587 return false; 588 0;}), 589 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 590 v.bv_offset, v.bv_len), 591 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 592 ) 593 594 iov_iter_advance(i, bytes); 595 return true; 596 } 597 EXPORT_SYMBOL(copy_from_iter_full); 598 599 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 600 { 601 char *to = addr; 602 if (unlikely(i->type & ITER_PIPE)) { 603 WARN_ON(1); 604 return 0; 605 } 606 iterate_and_advance(i, bytes, v, 607 __copy_from_user_nocache((to += v.iov_len) - v.iov_len, 608 v.iov_base, v.iov_len), 609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 610 v.bv_offset, v.bv_len), 611 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 612 ) 613 614 return bytes; 615 } 616 EXPORT_SYMBOL(copy_from_iter_nocache); 617 618 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 619 { 620 char *to = addr; 621 if (unlikely(i->type & ITER_PIPE)) { 622 WARN_ON(1); 623 return false; 624 } 625 if (unlikely(i->count < bytes)) 626 return false; 627 iterate_all_kinds(i, bytes, v, ({ 628 if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, 629 v.iov_base, v.iov_len)) 630 return false; 631 0;}), 632 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 633 v.bv_offset, v.bv_len), 634 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 635 ) 636 637 iov_iter_advance(i, bytes); 638 return true; 639 } 640 EXPORT_SYMBOL(copy_from_iter_full_nocache); 641 642 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 643 struct iov_iter *i) 644 { 645 if (i->type & (ITER_BVEC|ITER_KVEC)) { 646 void *kaddr = kmap_atomic(page); 647 size_t wanted = copy_to_iter(kaddr + offset, bytes, i); 648 kunmap_atomic(kaddr); 649 return wanted; 650 } else if (likely(!(i->type & ITER_PIPE))) 651 return copy_page_to_iter_iovec(page, offset, bytes, i); 652 else 653 return copy_page_to_iter_pipe(page, offset, bytes, i); 654 } 655 EXPORT_SYMBOL(copy_page_to_iter); 656 657 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 658 struct iov_iter *i) 659 { 660 if (unlikely(i->type & ITER_PIPE)) { 661 WARN_ON(1); 662 return 0; 663 } 664 if (i->type & (ITER_BVEC|ITER_KVEC)) { 665 void *kaddr = kmap_atomic(page); 666 size_t wanted = copy_from_iter(kaddr + offset, bytes, i); 667 kunmap_atomic(kaddr); 668 return wanted; 669 } else 670 return copy_page_from_iter_iovec(page, offset, bytes, i); 671 } 672 EXPORT_SYMBOL(copy_page_from_iter); 673 674 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 675 { 676 struct pipe_inode_info *pipe = i->pipe; 677 size_t n, off; 678 int idx; 679 680 if (!sanity(i)) 681 return 0; 682 683 bytes = n = push_pipe(i, bytes, &idx, &off); 684 if (unlikely(!n)) 685 return 0; 686 687 for ( ; n; idx = next_idx(idx, pipe), off = 0) { 688 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 689 memzero_page(pipe->bufs[idx].page, off, chunk); 690 i->idx = idx; 691 i->iov_offset = off + chunk; 692 n -= chunk; 693 } 694 i->count -= bytes; 695 return bytes; 696 } 697 698 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 699 { 700 if (unlikely(i->type & ITER_PIPE)) 701 return pipe_zero(bytes, i); 702 iterate_and_advance(i, bytes, v, 703 __clear_user(v.iov_base, v.iov_len), 704 memzero_page(v.bv_page, v.bv_offset, v.bv_len), 705 memset(v.iov_base, 0, v.iov_len) 706 ) 707 708 return bytes; 709 } 710 EXPORT_SYMBOL(iov_iter_zero); 711 712 size_t iov_iter_copy_from_user_atomic(struct page *page, 713 struct iov_iter *i, unsigned long offset, size_t bytes) 714 { 715 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 716 if (unlikely(i->type & ITER_PIPE)) { 717 kunmap_atomic(kaddr); 718 WARN_ON(1); 719 return 0; 720 } 721 iterate_all_kinds(i, bytes, v, 722 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, 723 v.iov_base, v.iov_len), 724 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, 725 v.bv_offset, v.bv_len), 726 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 727 ) 728 kunmap_atomic(kaddr); 729 return bytes; 730 } 731 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); 732 733 static inline void pipe_truncate(struct iov_iter *i) 734 { 735 struct pipe_inode_info *pipe = i->pipe; 736 if (pipe->nrbufs) { 737 size_t off = i->iov_offset; 738 int idx = i->idx; 739 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1); 740 if (off) { 741 pipe->bufs[idx].len = off - pipe->bufs[idx].offset; 742 idx = next_idx(idx, pipe); 743 nrbufs++; 744 } 745 while (pipe->nrbufs > nrbufs) { 746 pipe_buf_release(pipe, &pipe->bufs[idx]); 747 idx = next_idx(idx, pipe); 748 pipe->nrbufs--; 749 } 750 } 751 } 752 753 static void pipe_advance(struct iov_iter *i, size_t size) 754 { 755 struct pipe_inode_info *pipe = i->pipe; 756 if (unlikely(i->count < size)) 757 size = i->count; 758 if (size) { 759 struct pipe_buffer *buf; 760 size_t off = i->iov_offset, left = size; 761 int idx = i->idx; 762 if (off) /* make it relative to the beginning of buffer */ 763 left += off - pipe->bufs[idx].offset; 764 while (1) { 765 buf = &pipe->bufs[idx]; 766 if (left <= buf->len) 767 break; 768 left -= buf->len; 769 idx = next_idx(idx, pipe); 770 } 771 i->idx = idx; 772 i->iov_offset = buf->offset + left; 773 } 774 i->count -= size; 775 /* ... and discard everything past that point */ 776 pipe_truncate(i); 777 } 778 779 void iov_iter_advance(struct iov_iter *i, size_t size) 780 { 781 if (unlikely(i->type & ITER_PIPE)) { 782 pipe_advance(i, size); 783 return; 784 } 785 iterate_and_advance(i, size, v, 0, 0, 0) 786 } 787 EXPORT_SYMBOL(iov_iter_advance); 788 789 /* 790 * Return the count of just the current iov_iter segment. 791 */ 792 size_t iov_iter_single_seg_count(const struct iov_iter *i) 793 { 794 if (unlikely(i->type & ITER_PIPE)) 795 return i->count; // it is a silly place, anyway 796 if (i->nr_segs == 1) 797 return i->count; 798 else if (i->type & ITER_BVEC) 799 return min(i->count, i->bvec->bv_len - i->iov_offset); 800 else 801 return min(i->count, i->iov->iov_len - i->iov_offset); 802 } 803 EXPORT_SYMBOL(iov_iter_single_seg_count); 804 805 void iov_iter_kvec(struct iov_iter *i, int direction, 806 const struct kvec *kvec, unsigned long nr_segs, 807 size_t count) 808 { 809 BUG_ON(!(direction & ITER_KVEC)); 810 i->type = direction; 811 i->kvec = kvec; 812 i->nr_segs = nr_segs; 813 i->iov_offset = 0; 814 i->count = count; 815 } 816 EXPORT_SYMBOL(iov_iter_kvec); 817 818 void iov_iter_bvec(struct iov_iter *i, int direction, 819 const struct bio_vec *bvec, unsigned long nr_segs, 820 size_t count) 821 { 822 BUG_ON(!(direction & ITER_BVEC)); 823 i->type = direction; 824 i->bvec = bvec; 825 i->nr_segs = nr_segs; 826 i->iov_offset = 0; 827 i->count = count; 828 } 829 EXPORT_SYMBOL(iov_iter_bvec); 830 831 void iov_iter_pipe(struct iov_iter *i, int direction, 832 struct pipe_inode_info *pipe, 833 size_t count) 834 { 835 BUG_ON(direction != ITER_PIPE); 836 WARN_ON(pipe->nrbufs == pipe->buffers); 837 i->type = direction; 838 i->pipe = pipe; 839 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 840 i->iov_offset = 0; 841 i->count = count; 842 } 843 EXPORT_SYMBOL(iov_iter_pipe); 844 845 unsigned long iov_iter_alignment(const struct iov_iter *i) 846 { 847 unsigned long res = 0; 848 size_t size = i->count; 849 850 if (unlikely(i->type & ITER_PIPE)) { 851 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) 852 return size | i->iov_offset; 853 return size; 854 } 855 iterate_all_kinds(i, size, v, 856 (res |= (unsigned long)v.iov_base | v.iov_len, 0), 857 res |= v.bv_offset | v.bv_len, 858 res |= (unsigned long)v.iov_base | v.iov_len 859 ) 860 return res; 861 } 862 EXPORT_SYMBOL(iov_iter_alignment); 863 864 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 865 { 866 unsigned long res = 0; 867 size_t size = i->count; 868 869 if (unlikely(i->type & ITER_PIPE)) { 870 WARN_ON(1); 871 return ~0U; 872 } 873 874 iterate_all_kinds(i, size, v, 875 (res |= (!res ? 0 : (unsigned long)v.iov_base) | 876 (size != v.iov_len ? size : 0), 0), 877 (res |= (!res ? 0 : (unsigned long)v.bv_offset) | 878 (size != v.bv_len ? size : 0)), 879 (res |= (!res ? 0 : (unsigned long)v.iov_base) | 880 (size != v.iov_len ? size : 0)) 881 ); 882 return res; 883 } 884 EXPORT_SYMBOL(iov_iter_gap_alignment); 885 886 static inline size_t __pipe_get_pages(struct iov_iter *i, 887 size_t maxsize, 888 struct page **pages, 889 int idx, 890 size_t *start) 891 { 892 struct pipe_inode_info *pipe = i->pipe; 893 ssize_t n = push_pipe(i, maxsize, &idx, start); 894 if (!n) 895 return -EFAULT; 896 897 maxsize = n; 898 n += *start; 899 while (n > 0) { 900 get_page(*pages++ = pipe->bufs[idx].page); 901 idx = next_idx(idx, pipe); 902 n -= PAGE_SIZE; 903 } 904 905 return maxsize; 906 } 907 908 static ssize_t pipe_get_pages(struct iov_iter *i, 909 struct page **pages, size_t maxsize, unsigned maxpages, 910 size_t *start) 911 { 912 unsigned npages; 913 size_t capacity; 914 int idx; 915 916 if (!maxsize) 917 return 0; 918 919 if (!sanity(i)) 920 return -EFAULT; 921 922 data_start(i, &idx, start); 923 /* some of this one + all after this one */ 924 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1; 925 capacity = min(npages,maxpages) * PAGE_SIZE - *start; 926 927 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start); 928 } 929 930 ssize_t iov_iter_get_pages(struct iov_iter *i, 931 struct page **pages, size_t maxsize, unsigned maxpages, 932 size_t *start) 933 { 934 if (maxsize > i->count) 935 maxsize = i->count; 936 937 if (unlikely(i->type & ITER_PIPE)) 938 return pipe_get_pages(i, pages, maxsize, maxpages, start); 939 iterate_all_kinds(i, maxsize, v, ({ 940 unsigned long addr = (unsigned long)v.iov_base; 941 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 942 int n; 943 int res; 944 945 if (len > maxpages * PAGE_SIZE) 946 len = maxpages * PAGE_SIZE; 947 addr &= ~(PAGE_SIZE - 1); 948 n = DIV_ROUND_UP(len, PAGE_SIZE); 949 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); 950 if (unlikely(res < 0)) 951 return res; 952 return (res == n ? len : res * PAGE_SIZE) - *start; 953 0;}),({ 954 /* can't be more than PAGE_SIZE */ 955 *start = v.bv_offset; 956 get_page(*pages = v.bv_page); 957 return v.bv_len; 958 }),({ 959 return -EFAULT; 960 }) 961 ) 962 return 0; 963 } 964 EXPORT_SYMBOL(iov_iter_get_pages); 965 966 static struct page **get_pages_array(size_t n) 967 { 968 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL); 969 if (!p) 970 p = vmalloc(n * sizeof(struct page *)); 971 return p; 972 } 973 974 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 975 struct page ***pages, size_t maxsize, 976 size_t *start) 977 { 978 struct page **p; 979 size_t n; 980 int idx; 981 int npages; 982 983 if (!maxsize) 984 return 0; 985 986 if (!sanity(i)) 987 return -EFAULT; 988 989 data_start(i, &idx, start); 990 /* some of this one + all after this one */ 991 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1; 992 n = npages * PAGE_SIZE - *start; 993 if (maxsize > n) 994 maxsize = n; 995 else 996 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 997 p = get_pages_array(npages); 998 if (!p) 999 return -ENOMEM; 1000 n = __pipe_get_pages(i, maxsize, p, idx, start); 1001 if (n > 0) 1002 *pages = p; 1003 else 1004 kvfree(p); 1005 return n; 1006 } 1007 1008 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1009 struct page ***pages, size_t maxsize, 1010 size_t *start) 1011 { 1012 struct page **p; 1013 1014 if (maxsize > i->count) 1015 maxsize = i->count; 1016 1017 if (unlikely(i->type & ITER_PIPE)) 1018 return pipe_get_pages_alloc(i, pages, maxsize, start); 1019 iterate_all_kinds(i, maxsize, v, ({ 1020 unsigned long addr = (unsigned long)v.iov_base; 1021 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 1022 int n; 1023 int res; 1024 1025 addr &= ~(PAGE_SIZE - 1); 1026 n = DIV_ROUND_UP(len, PAGE_SIZE); 1027 p = get_pages_array(n); 1028 if (!p) 1029 return -ENOMEM; 1030 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); 1031 if (unlikely(res < 0)) { 1032 kvfree(p); 1033 return res; 1034 } 1035 *pages = p; 1036 return (res == n ? len : res * PAGE_SIZE) - *start; 1037 0;}),({ 1038 /* can't be more than PAGE_SIZE */ 1039 *start = v.bv_offset; 1040 *pages = p = get_pages_array(1); 1041 if (!p) 1042 return -ENOMEM; 1043 get_page(*p = v.bv_page); 1044 return v.bv_len; 1045 }),({ 1046 return -EFAULT; 1047 }) 1048 ) 1049 return 0; 1050 } 1051 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1052 1053 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1054 struct iov_iter *i) 1055 { 1056 char *to = addr; 1057 __wsum sum, next; 1058 size_t off = 0; 1059 sum = *csum; 1060 if (unlikely(i->type & ITER_PIPE)) { 1061 WARN_ON(1); 1062 return 0; 1063 } 1064 iterate_and_advance(i, bytes, v, ({ 1065 int err = 0; 1066 next = csum_and_copy_from_user(v.iov_base, 1067 (to += v.iov_len) - v.iov_len, 1068 v.iov_len, 0, &err); 1069 if (!err) { 1070 sum = csum_block_add(sum, next, off); 1071 off += v.iov_len; 1072 } 1073 err ? v.iov_len : 0; 1074 }), ({ 1075 char *p = kmap_atomic(v.bv_page); 1076 next = csum_partial_copy_nocheck(p + v.bv_offset, 1077 (to += v.bv_len) - v.bv_len, 1078 v.bv_len, 0); 1079 kunmap_atomic(p); 1080 sum = csum_block_add(sum, next, off); 1081 off += v.bv_len; 1082 }),({ 1083 next = csum_partial_copy_nocheck(v.iov_base, 1084 (to += v.iov_len) - v.iov_len, 1085 v.iov_len, 0); 1086 sum = csum_block_add(sum, next, off); 1087 off += v.iov_len; 1088 }) 1089 ) 1090 *csum = sum; 1091 return bytes; 1092 } 1093 EXPORT_SYMBOL(csum_and_copy_from_iter); 1094 1095 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, 1096 struct iov_iter *i) 1097 { 1098 char *to = addr; 1099 __wsum sum, next; 1100 size_t off = 0; 1101 sum = *csum; 1102 if (unlikely(i->type & ITER_PIPE)) { 1103 WARN_ON(1); 1104 return false; 1105 } 1106 if (unlikely(i->count < bytes)) 1107 return false; 1108 iterate_all_kinds(i, bytes, v, ({ 1109 int err = 0; 1110 next = csum_and_copy_from_user(v.iov_base, 1111 (to += v.iov_len) - v.iov_len, 1112 v.iov_len, 0, &err); 1113 if (err) 1114 return false; 1115 sum = csum_block_add(sum, next, off); 1116 off += v.iov_len; 1117 0; 1118 }), ({ 1119 char *p = kmap_atomic(v.bv_page); 1120 next = csum_partial_copy_nocheck(p + v.bv_offset, 1121 (to += v.bv_len) - v.bv_len, 1122 v.bv_len, 0); 1123 kunmap_atomic(p); 1124 sum = csum_block_add(sum, next, off); 1125 off += v.bv_len; 1126 }),({ 1127 next = csum_partial_copy_nocheck(v.iov_base, 1128 (to += v.iov_len) - v.iov_len, 1129 v.iov_len, 0); 1130 sum = csum_block_add(sum, next, off); 1131 off += v.iov_len; 1132 }) 1133 ) 1134 *csum = sum; 1135 iov_iter_advance(i, bytes); 1136 return true; 1137 } 1138 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 1139 1140 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, 1141 struct iov_iter *i) 1142 { 1143 const char *from = addr; 1144 __wsum sum, next; 1145 size_t off = 0; 1146 sum = *csum; 1147 if (unlikely(i->type & ITER_PIPE)) { 1148 WARN_ON(1); /* for now */ 1149 return 0; 1150 } 1151 iterate_and_advance(i, bytes, v, ({ 1152 int err = 0; 1153 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, 1154 v.iov_base, 1155 v.iov_len, 0, &err); 1156 if (!err) { 1157 sum = csum_block_add(sum, next, off); 1158 off += v.iov_len; 1159 } 1160 err ? v.iov_len : 0; 1161 }), ({ 1162 char *p = kmap_atomic(v.bv_page); 1163 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, 1164 p + v.bv_offset, 1165 v.bv_len, 0); 1166 kunmap_atomic(p); 1167 sum = csum_block_add(sum, next, off); 1168 off += v.bv_len; 1169 }),({ 1170 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, 1171 v.iov_base, 1172 v.iov_len, 0); 1173 sum = csum_block_add(sum, next, off); 1174 off += v.iov_len; 1175 }) 1176 ) 1177 *csum = sum; 1178 return bytes; 1179 } 1180 EXPORT_SYMBOL(csum_and_copy_to_iter); 1181 1182 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1183 { 1184 size_t size = i->count; 1185 int npages = 0; 1186 1187 if (!size) 1188 return 0; 1189 1190 if (unlikely(i->type & ITER_PIPE)) { 1191 struct pipe_inode_info *pipe = i->pipe; 1192 size_t off; 1193 int idx; 1194 1195 if (!sanity(i)) 1196 return 0; 1197 1198 data_start(i, &idx, &off); 1199 /* some of this one + all after this one */ 1200 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1; 1201 if (npages >= maxpages) 1202 return maxpages; 1203 } else iterate_all_kinds(i, size, v, ({ 1204 unsigned long p = (unsigned long)v.iov_base; 1205 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) 1206 - p / PAGE_SIZE; 1207 if (npages >= maxpages) 1208 return maxpages; 1209 0;}),({ 1210 npages++; 1211 if (npages >= maxpages) 1212 return maxpages; 1213 }),({ 1214 unsigned long p = (unsigned long)v.iov_base; 1215 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) 1216 - p / PAGE_SIZE; 1217 if (npages >= maxpages) 1218 return maxpages; 1219 }) 1220 ) 1221 return npages; 1222 } 1223 EXPORT_SYMBOL(iov_iter_npages); 1224 1225 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1226 { 1227 *new = *old; 1228 if (unlikely(new->type & ITER_PIPE)) { 1229 WARN_ON(1); 1230 return NULL; 1231 } 1232 if (new->type & ITER_BVEC) 1233 return new->bvec = kmemdup(new->bvec, 1234 new->nr_segs * sizeof(struct bio_vec), 1235 flags); 1236 else 1237 /* iovec and kvec have identical layout */ 1238 return new->iov = kmemdup(new->iov, 1239 new->nr_segs * sizeof(struct iovec), 1240 flags); 1241 } 1242 EXPORT_SYMBOL(dup_iter); 1243 1244 /** 1245 * import_iovec() - Copy an array of &struct iovec from userspace 1246 * into the kernel, check that it is valid, and initialize a new 1247 * &struct iov_iter iterator to access it. 1248 * 1249 * @type: One of %READ or %WRITE. 1250 * @uvector: Pointer to the userspace array. 1251 * @nr_segs: Number of elements in userspace array. 1252 * @fast_segs: Number of elements in @iov. 1253 * @iov: (input and output parameter) Pointer to pointer to (usually small 1254 * on-stack) kernel array. 1255 * @i: Pointer to iterator that will be initialized on success. 1256 * 1257 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1258 * then this function places %NULL in *@iov on return. Otherwise, a new 1259 * array will be allocated and the result placed in *@iov. This means that 1260 * the caller may call kfree() on *@iov regardless of whether the small 1261 * on-stack array was used or not (and regardless of whether this function 1262 * returns an error or not). 1263 * 1264 * Return: 0 on success or negative error code on error. 1265 */ 1266 int import_iovec(int type, const struct iovec __user * uvector, 1267 unsigned nr_segs, unsigned fast_segs, 1268 struct iovec **iov, struct iov_iter *i) 1269 { 1270 ssize_t n; 1271 struct iovec *p; 1272 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, 1273 *iov, &p); 1274 if (n < 0) { 1275 if (p != *iov) 1276 kfree(p); 1277 *iov = NULL; 1278 return n; 1279 } 1280 iov_iter_init(i, type, p, nr_segs, n); 1281 *iov = p == *iov ? NULL : p; 1282 return 0; 1283 } 1284 EXPORT_SYMBOL(import_iovec); 1285 1286 #ifdef CONFIG_COMPAT 1287 #include <linux/compat.h> 1288 1289 int compat_import_iovec(int type, const struct compat_iovec __user * uvector, 1290 unsigned nr_segs, unsigned fast_segs, 1291 struct iovec **iov, struct iov_iter *i) 1292 { 1293 ssize_t n; 1294 struct iovec *p; 1295 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, 1296 *iov, &p); 1297 if (n < 0) { 1298 if (p != *iov) 1299 kfree(p); 1300 *iov = NULL; 1301 return n; 1302 } 1303 iov_iter_init(i, type, p, nr_segs, n); 1304 *iov = p == *iov ? NULL : p; 1305 return 0; 1306 } 1307 #endif 1308 1309 int import_single_range(int rw, void __user *buf, size_t len, 1310 struct iovec *iov, struct iov_iter *i) 1311 { 1312 if (len > MAX_RW_COUNT) 1313 len = MAX_RW_COUNT; 1314 if (unlikely(!access_ok(!rw, buf, len))) 1315 return -EFAULT; 1316 1317 iov->iov_base = buf; 1318 iov->iov_len = len; 1319 iov_iter_init(i, rw, iov, 1, len); 1320 return 0; 1321 } 1322 EXPORT_SYMBOL(import_single_range); 1323