1 #include <linux/export.h> 2 #include <linux/bvec.h> 3 #include <linux/uio.h> 4 #include <linux/pagemap.h> 5 #include <linux/slab.h> 6 #include <linux/vmalloc.h> 7 #include <linux/splice.h> 8 #include <net/checksum.h> 9 10 #define PIPE_PARANOIA /* for now */ 11 12 #define iterate_iovec(i, n, __v, __p, skip, STEP) { \ 13 size_t left; \ 14 size_t wanted = n; \ 15 __p = i->iov; \ 16 __v.iov_len = min(n, __p->iov_len - skip); \ 17 if (likely(__v.iov_len)) { \ 18 __v.iov_base = __p->iov_base + skip; \ 19 left = (STEP); \ 20 __v.iov_len -= left; \ 21 skip += __v.iov_len; \ 22 n -= __v.iov_len; \ 23 } else { \ 24 left = 0; \ 25 } \ 26 while (unlikely(!left && n)) { \ 27 __p++; \ 28 __v.iov_len = min(n, __p->iov_len); \ 29 if (unlikely(!__v.iov_len)) \ 30 continue; \ 31 __v.iov_base = __p->iov_base; \ 32 left = (STEP); \ 33 __v.iov_len -= left; \ 34 skip = __v.iov_len; \ 35 n -= __v.iov_len; \ 36 } \ 37 n = wanted - n; \ 38 } 39 40 #define iterate_kvec(i, n, __v, __p, skip, STEP) { \ 41 size_t wanted = n; \ 42 __p = i->kvec; \ 43 __v.iov_len = min(n, __p->iov_len - skip); \ 44 if (likely(__v.iov_len)) { \ 45 __v.iov_base = __p->iov_base + skip; \ 46 (void)(STEP); \ 47 skip += __v.iov_len; \ 48 n -= __v.iov_len; \ 49 } \ 50 while (unlikely(n)) { \ 51 __p++; \ 52 __v.iov_len = min(n, __p->iov_len); \ 53 if (unlikely(!__v.iov_len)) \ 54 continue; \ 55 __v.iov_base = __p->iov_base; \ 56 (void)(STEP); \ 57 skip = __v.iov_len; \ 58 n -= __v.iov_len; \ 59 } \ 60 n = wanted; \ 61 } 62 63 #define iterate_bvec(i, n, __v, __bi, skip, STEP) { \ 64 struct bvec_iter __start; \ 65 __start.bi_size = n; \ 66 __start.bi_bvec_done = skip; \ 67 __start.bi_idx = 0; \ 68 for_each_bvec(__v, i->bvec, __bi, __start) { \ 69 if (!__v.bv_len) \ 70 continue; \ 71 (void)(STEP); \ 72 } \ 73 } 74 75 #define iterate_all_kinds(i, n, v, I, B, K) { \ 76 if (likely(n)) { \ 77 size_t skip = i->iov_offset; \ 78 if (unlikely(i->type & ITER_BVEC)) { \ 79 struct bio_vec v; \ 80 struct bvec_iter __bi; \ 81 iterate_bvec(i, n, v, __bi, skip, (B)) \ 82 } else if (unlikely(i->type & ITER_KVEC)) { \ 83 const struct kvec *kvec; \ 84 struct kvec v; \ 85 iterate_kvec(i, n, v, kvec, skip, (K)) \ 86 } else { \ 87 const struct iovec *iov; \ 88 struct iovec v; \ 89 iterate_iovec(i, n, v, iov, skip, (I)) \ 90 } \ 91 } \ 92 } 93 94 #define iterate_and_advance(i, n, v, I, B, K) { \ 95 if (unlikely(i->count < n)) \ 96 n = i->count; \ 97 if (i->count) { \ 98 size_t skip = i->iov_offset; \ 99 if (unlikely(i->type & ITER_BVEC)) { \ 100 const struct bio_vec *bvec = i->bvec; \ 101 struct bio_vec v; \ 102 struct bvec_iter __bi; \ 103 iterate_bvec(i, n, v, __bi, skip, (B)) \ 104 i->bvec = __bvec_iter_bvec(i->bvec, __bi); \ 105 i->nr_segs -= i->bvec - bvec; \ 106 skip = __bi.bi_bvec_done; \ 107 } else if (unlikely(i->type & ITER_KVEC)) { \ 108 const struct kvec *kvec; \ 109 struct kvec v; \ 110 iterate_kvec(i, n, v, kvec, skip, (K)) \ 111 if (skip == kvec->iov_len) { \ 112 kvec++; \ 113 skip = 0; \ 114 } \ 115 i->nr_segs -= kvec - i->kvec; \ 116 i->kvec = kvec; \ 117 } else { \ 118 const struct iovec *iov; \ 119 struct iovec v; \ 120 iterate_iovec(i, n, v, iov, skip, (I)) \ 121 if (skip == iov->iov_len) { \ 122 iov++; \ 123 skip = 0; \ 124 } \ 125 i->nr_segs -= iov - i->iov; \ 126 i->iov = iov; \ 127 } \ 128 i->count -= n; \ 129 i->iov_offset = skip; \ 130 } \ 131 } 132 133 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes, 134 struct iov_iter *i) 135 { 136 size_t skip, copy, left, wanted; 137 const struct iovec *iov; 138 char __user *buf; 139 void *kaddr, *from; 140 141 if (unlikely(bytes > i->count)) 142 bytes = i->count; 143 144 if (unlikely(!bytes)) 145 return 0; 146 147 wanted = bytes; 148 iov = i->iov; 149 skip = i->iov_offset; 150 buf = iov->iov_base + skip; 151 copy = min(bytes, iov->iov_len - skip); 152 153 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) { 154 kaddr = kmap_atomic(page); 155 from = kaddr + offset; 156 157 /* first chunk, usually the only one */ 158 left = __copy_to_user_inatomic(buf, from, copy); 159 copy -= left; 160 skip += copy; 161 from += copy; 162 bytes -= copy; 163 164 while (unlikely(!left && bytes)) { 165 iov++; 166 buf = iov->iov_base; 167 copy = min(bytes, iov->iov_len); 168 left = __copy_to_user_inatomic(buf, from, copy); 169 copy -= left; 170 skip = copy; 171 from += copy; 172 bytes -= copy; 173 } 174 if (likely(!bytes)) { 175 kunmap_atomic(kaddr); 176 goto done; 177 } 178 offset = from - kaddr; 179 buf += copy; 180 kunmap_atomic(kaddr); 181 copy = min(bytes, iov->iov_len - skip); 182 } 183 /* Too bad - revert to non-atomic kmap */ 184 185 kaddr = kmap(page); 186 from = kaddr + offset; 187 left = __copy_to_user(buf, from, copy); 188 copy -= left; 189 skip += copy; 190 from += copy; 191 bytes -= copy; 192 while (unlikely(!left && bytes)) { 193 iov++; 194 buf = iov->iov_base; 195 copy = min(bytes, iov->iov_len); 196 left = __copy_to_user(buf, from, copy); 197 copy -= left; 198 skip = copy; 199 from += copy; 200 bytes -= copy; 201 } 202 kunmap(page); 203 204 done: 205 if (skip == iov->iov_len) { 206 iov++; 207 skip = 0; 208 } 209 i->count -= wanted - bytes; 210 i->nr_segs -= iov - i->iov; 211 i->iov = iov; 212 i->iov_offset = skip; 213 return wanted - bytes; 214 } 215 216 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes, 217 struct iov_iter *i) 218 { 219 size_t skip, copy, left, wanted; 220 const struct iovec *iov; 221 char __user *buf; 222 void *kaddr, *to; 223 224 if (unlikely(bytes > i->count)) 225 bytes = i->count; 226 227 if (unlikely(!bytes)) 228 return 0; 229 230 wanted = bytes; 231 iov = i->iov; 232 skip = i->iov_offset; 233 buf = iov->iov_base + skip; 234 copy = min(bytes, iov->iov_len - skip); 235 236 if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) { 237 kaddr = kmap_atomic(page); 238 to = kaddr + offset; 239 240 /* first chunk, usually the only one */ 241 left = __copy_from_user_inatomic(to, buf, copy); 242 copy -= left; 243 skip += copy; 244 to += copy; 245 bytes -= copy; 246 247 while (unlikely(!left && bytes)) { 248 iov++; 249 buf = iov->iov_base; 250 copy = min(bytes, iov->iov_len); 251 left = __copy_from_user_inatomic(to, buf, copy); 252 copy -= left; 253 skip = copy; 254 to += copy; 255 bytes -= copy; 256 } 257 if (likely(!bytes)) { 258 kunmap_atomic(kaddr); 259 goto done; 260 } 261 offset = to - kaddr; 262 buf += copy; 263 kunmap_atomic(kaddr); 264 copy = min(bytes, iov->iov_len - skip); 265 } 266 /* Too bad - revert to non-atomic kmap */ 267 268 kaddr = kmap(page); 269 to = kaddr + offset; 270 left = __copy_from_user(to, buf, copy); 271 copy -= left; 272 skip += copy; 273 to += copy; 274 bytes -= copy; 275 while (unlikely(!left && bytes)) { 276 iov++; 277 buf = iov->iov_base; 278 copy = min(bytes, iov->iov_len); 279 left = __copy_from_user(to, buf, copy); 280 copy -= left; 281 skip = copy; 282 to += copy; 283 bytes -= copy; 284 } 285 kunmap(page); 286 287 done: 288 if (skip == iov->iov_len) { 289 iov++; 290 skip = 0; 291 } 292 i->count -= wanted - bytes; 293 i->nr_segs -= iov - i->iov; 294 i->iov = iov; 295 i->iov_offset = skip; 296 return wanted - bytes; 297 } 298 299 #ifdef PIPE_PARANOIA 300 static bool sanity(const struct iov_iter *i) 301 { 302 struct pipe_inode_info *pipe = i->pipe; 303 int idx = i->idx; 304 int next = pipe->curbuf + pipe->nrbufs; 305 if (i->iov_offset) { 306 struct pipe_buffer *p; 307 if (unlikely(!pipe->nrbufs)) 308 goto Bad; // pipe must be non-empty 309 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1)))) 310 goto Bad; // must be at the last buffer... 311 312 p = &pipe->bufs[idx]; 313 if (unlikely(p->offset + p->len != i->iov_offset)) 314 goto Bad; // ... at the end of segment 315 } else { 316 if (idx != (next & (pipe->buffers - 1))) 317 goto Bad; // must be right after the last buffer 318 } 319 return true; 320 Bad: 321 printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset); 322 printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n", 323 pipe->curbuf, pipe->nrbufs, pipe->buffers); 324 for (idx = 0; idx < pipe->buffers; idx++) 325 printk(KERN_ERR "[%p %p %d %d]\n", 326 pipe->bufs[idx].ops, 327 pipe->bufs[idx].page, 328 pipe->bufs[idx].offset, 329 pipe->bufs[idx].len); 330 WARN_ON(1); 331 return false; 332 } 333 #else 334 #define sanity(i) true 335 #endif 336 337 static inline int next_idx(int idx, struct pipe_inode_info *pipe) 338 { 339 return (idx + 1) & (pipe->buffers - 1); 340 } 341 342 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, 343 struct iov_iter *i) 344 { 345 struct pipe_inode_info *pipe = i->pipe; 346 struct pipe_buffer *buf; 347 size_t off; 348 int idx; 349 350 if (unlikely(bytes > i->count)) 351 bytes = i->count; 352 353 if (unlikely(!bytes)) 354 return 0; 355 356 if (!sanity(i)) 357 return 0; 358 359 off = i->iov_offset; 360 idx = i->idx; 361 buf = &pipe->bufs[idx]; 362 if (off) { 363 if (offset == off && buf->page == page) { 364 /* merge with the last one */ 365 buf->len += bytes; 366 i->iov_offset += bytes; 367 goto out; 368 } 369 idx = next_idx(idx, pipe); 370 buf = &pipe->bufs[idx]; 371 } 372 if (idx == pipe->curbuf && pipe->nrbufs) 373 return 0; 374 pipe->nrbufs++; 375 buf->ops = &page_cache_pipe_buf_ops; 376 get_page(buf->page = page); 377 buf->offset = offset; 378 buf->len = bytes; 379 i->iov_offset = offset + bytes; 380 i->idx = idx; 381 out: 382 i->count -= bytes; 383 return bytes; 384 } 385 386 /* 387 * Fault in one or more iovecs of the given iov_iter, to a maximum length of 388 * bytes. For each iovec, fault in each page that constitutes the iovec. 389 * 390 * Return 0 on success, or non-zero if the memory could not be accessed (i.e. 391 * because it is an invalid address). 392 */ 393 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes) 394 { 395 size_t skip = i->iov_offset; 396 const struct iovec *iov; 397 int err; 398 struct iovec v; 399 400 if (!(i->type & (ITER_BVEC|ITER_KVEC))) { 401 iterate_iovec(i, bytes, v, iov, skip, ({ 402 err = fault_in_pages_readable(v.iov_base, v.iov_len); 403 if (unlikely(err)) 404 return err; 405 0;})) 406 } 407 return 0; 408 } 409 EXPORT_SYMBOL(iov_iter_fault_in_readable); 410 411 void iov_iter_init(struct iov_iter *i, int direction, 412 const struct iovec *iov, unsigned long nr_segs, 413 size_t count) 414 { 415 /* It will get better. Eventually... */ 416 if (segment_eq(get_fs(), KERNEL_DS)) { 417 direction |= ITER_KVEC; 418 i->type = direction; 419 i->kvec = (struct kvec *)iov; 420 } else { 421 i->type = direction; 422 i->iov = iov; 423 } 424 i->nr_segs = nr_segs; 425 i->iov_offset = 0; 426 i->count = count; 427 } 428 EXPORT_SYMBOL(iov_iter_init); 429 430 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len) 431 { 432 char *from = kmap_atomic(page); 433 memcpy(to, from + offset, len); 434 kunmap_atomic(from); 435 } 436 437 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len) 438 { 439 char *to = kmap_atomic(page); 440 memcpy(to + offset, from, len); 441 kunmap_atomic(to); 442 } 443 444 static void memzero_page(struct page *page, size_t offset, size_t len) 445 { 446 char *addr = kmap_atomic(page); 447 memset(addr + offset, 0, len); 448 kunmap_atomic(addr); 449 } 450 451 static inline bool allocated(struct pipe_buffer *buf) 452 { 453 return buf->ops == &default_pipe_buf_ops; 454 } 455 456 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp) 457 { 458 size_t off = i->iov_offset; 459 int idx = i->idx; 460 if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) { 461 idx = next_idx(idx, i->pipe); 462 off = 0; 463 } 464 *idxp = idx; 465 *offp = off; 466 } 467 468 static size_t push_pipe(struct iov_iter *i, size_t size, 469 int *idxp, size_t *offp) 470 { 471 struct pipe_inode_info *pipe = i->pipe; 472 size_t off; 473 int idx; 474 ssize_t left; 475 476 if (unlikely(size > i->count)) 477 size = i->count; 478 if (unlikely(!size)) 479 return 0; 480 481 left = size; 482 data_start(i, &idx, &off); 483 *idxp = idx; 484 *offp = off; 485 if (off) { 486 left -= PAGE_SIZE - off; 487 if (left <= 0) { 488 pipe->bufs[idx].len += size; 489 return size; 490 } 491 pipe->bufs[idx].len = PAGE_SIZE; 492 idx = next_idx(idx, pipe); 493 } 494 while (idx != pipe->curbuf || !pipe->nrbufs) { 495 struct page *page = alloc_page(GFP_USER); 496 if (!page) 497 break; 498 pipe->nrbufs++; 499 pipe->bufs[idx].ops = &default_pipe_buf_ops; 500 pipe->bufs[idx].page = page; 501 pipe->bufs[idx].offset = 0; 502 if (left <= PAGE_SIZE) { 503 pipe->bufs[idx].len = left; 504 return size; 505 } 506 pipe->bufs[idx].len = PAGE_SIZE; 507 left -= PAGE_SIZE; 508 idx = next_idx(idx, pipe); 509 } 510 return size - left; 511 } 512 513 static size_t copy_pipe_to_iter(const void *addr, size_t bytes, 514 struct iov_iter *i) 515 { 516 struct pipe_inode_info *pipe = i->pipe; 517 size_t n, off; 518 int idx; 519 520 if (!sanity(i)) 521 return 0; 522 523 bytes = n = push_pipe(i, bytes, &idx, &off); 524 if (unlikely(!n)) 525 return 0; 526 for ( ; n; idx = next_idx(idx, pipe), off = 0) { 527 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 528 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk); 529 i->idx = idx; 530 i->iov_offset = off + chunk; 531 n -= chunk; 532 addr += chunk; 533 } 534 i->count -= bytes; 535 return bytes; 536 } 537 538 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) 539 { 540 const char *from = addr; 541 if (unlikely(i->type & ITER_PIPE)) 542 return copy_pipe_to_iter(addr, bytes, i); 543 iterate_and_advance(i, bytes, v, 544 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, 545 v.iov_len), 546 memcpy_to_page(v.bv_page, v.bv_offset, 547 (from += v.bv_len) - v.bv_len, v.bv_len), 548 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len) 549 ) 550 551 return bytes; 552 } 553 EXPORT_SYMBOL(copy_to_iter); 554 555 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) 556 { 557 char *to = addr; 558 if (unlikely(i->type & ITER_PIPE)) { 559 WARN_ON(1); 560 return 0; 561 } 562 iterate_and_advance(i, bytes, v, 563 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base, 564 v.iov_len), 565 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 566 v.bv_offset, v.bv_len), 567 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 568 ) 569 570 return bytes; 571 } 572 EXPORT_SYMBOL(copy_from_iter); 573 574 bool copy_from_iter_full(void *addr, size_t bytes, struct iov_iter *i) 575 { 576 char *to = addr; 577 if (unlikely(i->type & ITER_PIPE)) { 578 WARN_ON(1); 579 return false; 580 } 581 if (unlikely(i->count < bytes)) 582 return false; 583 584 iterate_all_kinds(i, bytes, v, ({ 585 if (__copy_from_user((to += v.iov_len) - v.iov_len, 586 v.iov_base, v.iov_len)) 587 return false; 588 0;}), 589 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 590 v.bv_offset, v.bv_len), 591 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 592 ) 593 594 iov_iter_advance(i, bytes); 595 return true; 596 } 597 EXPORT_SYMBOL(copy_from_iter_full); 598 599 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i) 600 { 601 char *to = addr; 602 if (unlikely(i->type & ITER_PIPE)) { 603 WARN_ON(1); 604 return 0; 605 } 606 iterate_and_advance(i, bytes, v, 607 __copy_from_user_nocache((to += v.iov_len) - v.iov_len, 608 v.iov_base, v.iov_len), 609 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 610 v.bv_offset, v.bv_len), 611 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 612 ) 613 614 return bytes; 615 } 616 EXPORT_SYMBOL(copy_from_iter_nocache); 617 618 bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i) 619 { 620 char *to = addr; 621 if (unlikely(i->type & ITER_PIPE)) { 622 WARN_ON(1); 623 return false; 624 } 625 if (unlikely(i->count < bytes)) 626 return false; 627 iterate_all_kinds(i, bytes, v, ({ 628 if (__copy_from_user_nocache((to += v.iov_len) - v.iov_len, 629 v.iov_base, v.iov_len)) 630 return false; 631 0;}), 632 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page, 633 v.bv_offset, v.bv_len), 634 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 635 ) 636 637 iov_iter_advance(i, bytes); 638 return true; 639 } 640 EXPORT_SYMBOL(copy_from_iter_full_nocache); 641 642 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, 643 struct iov_iter *i) 644 { 645 if (i->type & (ITER_BVEC|ITER_KVEC)) { 646 void *kaddr = kmap_atomic(page); 647 size_t wanted = copy_to_iter(kaddr + offset, bytes, i); 648 kunmap_atomic(kaddr); 649 return wanted; 650 } else if (likely(!(i->type & ITER_PIPE))) 651 return copy_page_to_iter_iovec(page, offset, bytes, i); 652 else 653 return copy_page_to_iter_pipe(page, offset, bytes, i); 654 } 655 EXPORT_SYMBOL(copy_page_to_iter); 656 657 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, 658 struct iov_iter *i) 659 { 660 if (unlikely(i->type & ITER_PIPE)) { 661 WARN_ON(1); 662 return 0; 663 } 664 if (i->type & (ITER_BVEC|ITER_KVEC)) { 665 void *kaddr = kmap_atomic(page); 666 size_t wanted = copy_from_iter(kaddr + offset, bytes, i); 667 kunmap_atomic(kaddr); 668 return wanted; 669 } else 670 return copy_page_from_iter_iovec(page, offset, bytes, i); 671 } 672 EXPORT_SYMBOL(copy_page_from_iter); 673 674 static size_t pipe_zero(size_t bytes, struct iov_iter *i) 675 { 676 struct pipe_inode_info *pipe = i->pipe; 677 size_t n, off; 678 int idx; 679 680 if (!sanity(i)) 681 return 0; 682 683 bytes = n = push_pipe(i, bytes, &idx, &off); 684 if (unlikely(!n)) 685 return 0; 686 687 for ( ; n; idx = next_idx(idx, pipe), off = 0) { 688 size_t chunk = min_t(size_t, n, PAGE_SIZE - off); 689 memzero_page(pipe->bufs[idx].page, off, chunk); 690 i->idx = idx; 691 i->iov_offset = off + chunk; 692 n -= chunk; 693 } 694 i->count -= bytes; 695 return bytes; 696 } 697 698 size_t iov_iter_zero(size_t bytes, struct iov_iter *i) 699 { 700 if (unlikely(i->type & ITER_PIPE)) 701 return pipe_zero(bytes, i); 702 iterate_and_advance(i, bytes, v, 703 __clear_user(v.iov_base, v.iov_len), 704 memzero_page(v.bv_page, v.bv_offset, v.bv_len), 705 memset(v.iov_base, 0, v.iov_len) 706 ) 707 708 return bytes; 709 } 710 EXPORT_SYMBOL(iov_iter_zero); 711 712 size_t iov_iter_copy_from_user_atomic(struct page *page, 713 struct iov_iter *i, unsigned long offset, size_t bytes) 714 { 715 char *kaddr = kmap_atomic(page), *p = kaddr + offset; 716 if (unlikely(i->type & ITER_PIPE)) { 717 kunmap_atomic(kaddr); 718 WARN_ON(1); 719 return 0; 720 } 721 iterate_all_kinds(i, bytes, v, 722 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len, 723 v.iov_base, v.iov_len), 724 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page, 725 v.bv_offset, v.bv_len), 726 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len) 727 ) 728 kunmap_atomic(kaddr); 729 return bytes; 730 } 731 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic); 732 733 static void pipe_advance(struct iov_iter *i, size_t size) 734 { 735 struct pipe_inode_info *pipe = i->pipe; 736 struct pipe_buffer *buf; 737 int idx = i->idx; 738 size_t off = i->iov_offset, orig_sz; 739 740 if (unlikely(i->count < size)) 741 size = i->count; 742 orig_sz = size; 743 744 if (size) { 745 if (off) /* make it relative to the beginning of buffer */ 746 size += off - pipe->bufs[idx].offset; 747 while (1) { 748 buf = &pipe->bufs[idx]; 749 if (size <= buf->len) 750 break; 751 size -= buf->len; 752 idx = next_idx(idx, pipe); 753 } 754 buf->len = size; 755 i->idx = idx; 756 off = i->iov_offset = buf->offset + size; 757 } 758 if (off) 759 idx = next_idx(idx, pipe); 760 if (pipe->nrbufs) { 761 int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 762 /* [curbuf,unused) is in use. Free [idx,unused) */ 763 while (idx != unused) { 764 pipe_buf_release(pipe, &pipe->bufs[idx]); 765 idx = next_idx(idx, pipe); 766 pipe->nrbufs--; 767 } 768 } 769 i->count -= orig_sz; 770 } 771 772 void iov_iter_advance(struct iov_iter *i, size_t size) 773 { 774 if (unlikely(i->type & ITER_PIPE)) { 775 pipe_advance(i, size); 776 return; 777 } 778 iterate_and_advance(i, size, v, 0, 0, 0) 779 } 780 EXPORT_SYMBOL(iov_iter_advance); 781 782 /* 783 * Return the count of just the current iov_iter segment. 784 */ 785 size_t iov_iter_single_seg_count(const struct iov_iter *i) 786 { 787 if (unlikely(i->type & ITER_PIPE)) 788 return i->count; // it is a silly place, anyway 789 if (i->nr_segs == 1) 790 return i->count; 791 else if (i->type & ITER_BVEC) 792 return min(i->count, i->bvec->bv_len - i->iov_offset); 793 else 794 return min(i->count, i->iov->iov_len - i->iov_offset); 795 } 796 EXPORT_SYMBOL(iov_iter_single_seg_count); 797 798 void iov_iter_kvec(struct iov_iter *i, int direction, 799 const struct kvec *kvec, unsigned long nr_segs, 800 size_t count) 801 { 802 BUG_ON(!(direction & ITER_KVEC)); 803 i->type = direction; 804 i->kvec = kvec; 805 i->nr_segs = nr_segs; 806 i->iov_offset = 0; 807 i->count = count; 808 } 809 EXPORT_SYMBOL(iov_iter_kvec); 810 811 void iov_iter_bvec(struct iov_iter *i, int direction, 812 const struct bio_vec *bvec, unsigned long nr_segs, 813 size_t count) 814 { 815 BUG_ON(!(direction & ITER_BVEC)); 816 i->type = direction; 817 i->bvec = bvec; 818 i->nr_segs = nr_segs; 819 i->iov_offset = 0; 820 i->count = count; 821 } 822 EXPORT_SYMBOL(iov_iter_bvec); 823 824 void iov_iter_pipe(struct iov_iter *i, int direction, 825 struct pipe_inode_info *pipe, 826 size_t count) 827 { 828 BUG_ON(direction != ITER_PIPE); 829 i->type = direction; 830 i->pipe = pipe; 831 i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1); 832 i->iov_offset = 0; 833 i->count = count; 834 } 835 EXPORT_SYMBOL(iov_iter_pipe); 836 837 unsigned long iov_iter_alignment(const struct iov_iter *i) 838 { 839 unsigned long res = 0; 840 size_t size = i->count; 841 842 if (unlikely(i->type & ITER_PIPE)) { 843 if (size && i->iov_offset && allocated(&i->pipe->bufs[i->idx])) 844 return size | i->iov_offset; 845 return size; 846 } 847 iterate_all_kinds(i, size, v, 848 (res |= (unsigned long)v.iov_base | v.iov_len, 0), 849 res |= v.bv_offset | v.bv_len, 850 res |= (unsigned long)v.iov_base | v.iov_len 851 ) 852 return res; 853 } 854 EXPORT_SYMBOL(iov_iter_alignment); 855 856 unsigned long iov_iter_gap_alignment(const struct iov_iter *i) 857 { 858 unsigned long res = 0; 859 size_t size = i->count; 860 861 if (unlikely(i->type & ITER_PIPE)) { 862 WARN_ON(1); 863 return ~0U; 864 } 865 866 iterate_all_kinds(i, size, v, 867 (res |= (!res ? 0 : (unsigned long)v.iov_base) | 868 (size != v.iov_len ? size : 0), 0), 869 (res |= (!res ? 0 : (unsigned long)v.bv_offset) | 870 (size != v.bv_len ? size : 0)), 871 (res |= (!res ? 0 : (unsigned long)v.iov_base) | 872 (size != v.iov_len ? size : 0)) 873 ); 874 return res; 875 } 876 EXPORT_SYMBOL(iov_iter_gap_alignment); 877 878 static inline size_t __pipe_get_pages(struct iov_iter *i, 879 size_t maxsize, 880 struct page **pages, 881 int idx, 882 size_t *start) 883 { 884 struct pipe_inode_info *pipe = i->pipe; 885 ssize_t n = push_pipe(i, maxsize, &idx, start); 886 if (!n) 887 return -EFAULT; 888 889 maxsize = n; 890 n += *start; 891 while (n > 0) { 892 get_page(*pages++ = pipe->bufs[idx].page); 893 idx = next_idx(idx, pipe); 894 n -= PAGE_SIZE; 895 } 896 897 return maxsize; 898 } 899 900 static ssize_t pipe_get_pages(struct iov_iter *i, 901 struct page **pages, size_t maxsize, unsigned maxpages, 902 size_t *start) 903 { 904 unsigned npages; 905 size_t capacity; 906 int idx; 907 908 if (!maxsize) 909 return 0; 910 911 if (!sanity(i)) 912 return -EFAULT; 913 914 data_start(i, &idx, start); 915 /* some of this one + all after this one */ 916 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1; 917 capacity = min(npages,maxpages) * PAGE_SIZE - *start; 918 919 return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start); 920 } 921 922 ssize_t iov_iter_get_pages(struct iov_iter *i, 923 struct page **pages, size_t maxsize, unsigned maxpages, 924 size_t *start) 925 { 926 if (maxsize > i->count) 927 maxsize = i->count; 928 929 if (unlikely(i->type & ITER_PIPE)) 930 return pipe_get_pages(i, pages, maxsize, maxpages, start); 931 iterate_all_kinds(i, maxsize, v, ({ 932 unsigned long addr = (unsigned long)v.iov_base; 933 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 934 int n; 935 int res; 936 937 if (len > maxpages * PAGE_SIZE) 938 len = maxpages * PAGE_SIZE; 939 addr &= ~(PAGE_SIZE - 1); 940 n = DIV_ROUND_UP(len, PAGE_SIZE); 941 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages); 942 if (unlikely(res < 0)) 943 return res; 944 return (res == n ? len : res * PAGE_SIZE) - *start; 945 0;}),({ 946 /* can't be more than PAGE_SIZE */ 947 *start = v.bv_offset; 948 get_page(*pages = v.bv_page); 949 return v.bv_len; 950 }),({ 951 return -EFAULT; 952 }) 953 ) 954 return 0; 955 } 956 EXPORT_SYMBOL(iov_iter_get_pages); 957 958 static struct page **get_pages_array(size_t n) 959 { 960 struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL); 961 if (!p) 962 p = vmalloc(n * sizeof(struct page *)); 963 return p; 964 } 965 966 static ssize_t pipe_get_pages_alloc(struct iov_iter *i, 967 struct page ***pages, size_t maxsize, 968 size_t *start) 969 { 970 struct page **p; 971 size_t n; 972 int idx; 973 int npages; 974 975 if (!maxsize) 976 return 0; 977 978 if (!sanity(i)) 979 return -EFAULT; 980 981 data_start(i, &idx, start); 982 /* some of this one + all after this one */ 983 npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1; 984 n = npages * PAGE_SIZE - *start; 985 if (maxsize > n) 986 maxsize = n; 987 else 988 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE); 989 p = get_pages_array(npages); 990 if (!p) 991 return -ENOMEM; 992 n = __pipe_get_pages(i, maxsize, p, idx, start); 993 if (n > 0) 994 *pages = p; 995 else 996 kvfree(p); 997 return n; 998 } 999 1000 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, 1001 struct page ***pages, size_t maxsize, 1002 size_t *start) 1003 { 1004 struct page **p; 1005 1006 if (maxsize > i->count) 1007 maxsize = i->count; 1008 1009 if (unlikely(i->type & ITER_PIPE)) 1010 return pipe_get_pages_alloc(i, pages, maxsize, start); 1011 iterate_all_kinds(i, maxsize, v, ({ 1012 unsigned long addr = (unsigned long)v.iov_base; 1013 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1)); 1014 int n; 1015 int res; 1016 1017 addr &= ~(PAGE_SIZE - 1); 1018 n = DIV_ROUND_UP(len, PAGE_SIZE); 1019 p = get_pages_array(n); 1020 if (!p) 1021 return -ENOMEM; 1022 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p); 1023 if (unlikely(res < 0)) { 1024 kvfree(p); 1025 return res; 1026 } 1027 *pages = p; 1028 return (res == n ? len : res * PAGE_SIZE) - *start; 1029 0;}),({ 1030 /* can't be more than PAGE_SIZE */ 1031 *start = v.bv_offset; 1032 *pages = p = get_pages_array(1); 1033 if (!p) 1034 return -ENOMEM; 1035 get_page(*p = v.bv_page); 1036 return v.bv_len; 1037 }),({ 1038 return -EFAULT; 1039 }) 1040 ) 1041 return 0; 1042 } 1043 EXPORT_SYMBOL(iov_iter_get_pages_alloc); 1044 1045 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, 1046 struct iov_iter *i) 1047 { 1048 char *to = addr; 1049 __wsum sum, next; 1050 size_t off = 0; 1051 sum = *csum; 1052 if (unlikely(i->type & ITER_PIPE)) { 1053 WARN_ON(1); 1054 return 0; 1055 } 1056 iterate_and_advance(i, bytes, v, ({ 1057 int err = 0; 1058 next = csum_and_copy_from_user(v.iov_base, 1059 (to += v.iov_len) - v.iov_len, 1060 v.iov_len, 0, &err); 1061 if (!err) { 1062 sum = csum_block_add(sum, next, off); 1063 off += v.iov_len; 1064 } 1065 err ? v.iov_len : 0; 1066 }), ({ 1067 char *p = kmap_atomic(v.bv_page); 1068 next = csum_partial_copy_nocheck(p + v.bv_offset, 1069 (to += v.bv_len) - v.bv_len, 1070 v.bv_len, 0); 1071 kunmap_atomic(p); 1072 sum = csum_block_add(sum, next, off); 1073 off += v.bv_len; 1074 }),({ 1075 next = csum_partial_copy_nocheck(v.iov_base, 1076 (to += v.iov_len) - v.iov_len, 1077 v.iov_len, 0); 1078 sum = csum_block_add(sum, next, off); 1079 off += v.iov_len; 1080 }) 1081 ) 1082 *csum = sum; 1083 return bytes; 1084 } 1085 EXPORT_SYMBOL(csum_and_copy_from_iter); 1086 1087 bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, 1088 struct iov_iter *i) 1089 { 1090 char *to = addr; 1091 __wsum sum, next; 1092 size_t off = 0; 1093 sum = *csum; 1094 if (unlikely(i->type & ITER_PIPE)) { 1095 WARN_ON(1); 1096 return false; 1097 } 1098 if (unlikely(i->count < bytes)) 1099 return false; 1100 iterate_all_kinds(i, bytes, v, ({ 1101 int err = 0; 1102 next = csum_and_copy_from_user(v.iov_base, 1103 (to += v.iov_len) - v.iov_len, 1104 v.iov_len, 0, &err); 1105 if (err) 1106 return false; 1107 sum = csum_block_add(sum, next, off); 1108 off += v.iov_len; 1109 0; 1110 }), ({ 1111 char *p = kmap_atomic(v.bv_page); 1112 next = csum_partial_copy_nocheck(p + v.bv_offset, 1113 (to += v.bv_len) - v.bv_len, 1114 v.bv_len, 0); 1115 kunmap_atomic(p); 1116 sum = csum_block_add(sum, next, off); 1117 off += v.bv_len; 1118 }),({ 1119 next = csum_partial_copy_nocheck(v.iov_base, 1120 (to += v.iov_len) - v.iov_len, 1121 v.iov_len, 0); 1122 sum = csum_block_add(sum, next, off); 1123 off += v.iov_len; 1124 }) 1125 ) 1126 *csum = sum; 1127 iov_iter_advance(i, bytes); 1128 return true; 1129 } 1130 EXPORT_SYMBOL(csum_and_copy_from_iter_full); 1131 1132 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, 1133 struct iov_iter *i) 1134 { 1135 const char *from = addr; 1136 __wsum sum, next; 1137 size_t off = 0; 1138 sum = *csum; 1139 if (unlikely(i->type & ITER_PIPE)) { 1140 WARN_ON(1); /* for now */ 1141 return 0; 1142 } 1143 iterate_and_advance(i, bytes, v, ({ 1144 int err = 0; 1145 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, 1146 v.iov_base, 1147 v.iov_len, 0, &err); 1148 if (!err) { 1149 sum = csum_block_add(sum, next, off); 1150 off += v.iov_len; 1151 } 1152 err ? v.iov_len : 0; 1153 }), ({ 1154 char *p = kmap_atomic(v.bv_page); 1155 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, 1156 p + v.bv_offset, 1157 v.bv_len, 0); 1158 kunmap_atomic(p); 1159 sum = csum_block_add(sum, next, off); 1160 off += v.bv_len; 1161 }),({ 1162 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, 1163 v.iov_base, 1164 v.iov_len, 0); 1165 sum = csum_block_add(sum, next, off); 1166 off += v.iov_len; 1167 }) 1168 ) 1169 *csum = sum; 1170 return bytes; 1171 } 1172 EXPORT_SYMBOL(csum_and_copy_to_iter); 1173 1174 int iov_iter_npages(const struct iov_iter *i, int maxpages) 1175 { 1176 size_t size = i->count; 1177 int npages = 0; 1178 1179 if (!size) 1180 return 0; 1181 1182 if (unlikely(i->type & ITER_PIPE)) { 1183 struct pipe_inode_info *pipe = i->pipe; 1184 size_t off; 1185 int idx; 1186 1187 if (!sanity(i)) 1188 return 0; 1189 1190 data_start(i, &idx, &off); 1191 /* some of this one + all after this one */ 1192 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1; 1193 if (npages >= maxpages) 1194 return maxpages; 1195 } else iterate_all_kinds(i, size, v, ({ 1196 unsigned long p = (unsigned long)v.iov_base; 1197 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) 1198 - p / PAGE_SIZE; 1199 if (npages >= maxpages) 1200 return maxpages; 1201 0;}),({ 1202 npages++; 1203 if (npages >= maxpages) 1204 return maxpages; 1205 }),({ 1206 unsigned long p = (unsigned long)v.iov_base; 1207 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE) 1208 - p / PAGE_SIZE; 1209 if (npages >= maxpages) 1210 return maxpages; 1211 }) 1212 ) 1213 return npages; 1214 } 1215 EXPORT_SYMBOL(iov_iter_npages); 1216 1217 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) 1218 { 1219 *new = *old; 1220 if (unlikely(new->type & ITER_PIPE)) { 1221 WARN_ON(1); 1222 return NULL; 1223 } 1224 if (new->type & ITER_BVEC) 1225 return new->bvec = kmemdup(new->bvec, 1226 new->nr_segs * sizeof(struct bio_vec), 1227 flags); 1228 else 1229 /* iovec and kvec have identical layout */ 1230 return new->iov = kmemdup(new->iov, 1231 new->nr_segs * sizeof(struct iovec), 1232 flags); 1233 } 1234 EXPORT_SYMBOL(dup_iter); 1235 1236 /** 1237 * import_iovec() - Copy an array of &struct iovec from userspace 1238 * into the kernel, check that it is valid, and initialize a new 1239 * &struct iov_iter iterator to access it. 1240 * 1241 * @type: One of %READ or %WRITE. 1242 * @uvector: Pointer to the userspace array. 1243 * @nr_segs: Number of elements in userspace array. 1244 * @fast_segs: Number of elements in @iov. 1245 * @iov: (input and output parameter) Pointer to pointer to (usually small 1246 * on-stack) kernel array. 1247 * @i: Pointer to iterator that will be initialized on success. 1248 * 1249 * If the array pointed to by *@iov is large enough to hold all @nr_segs, 1250 * then this function places %NULL in *@iov on return. Otherwise, a new 1251 * array will be allocated and the result placed in *@iov. This means that 1252 * the caller may call kfree() on *@iov regardless of whether the small 1253 * on-stack array was used or not (and regardless of whether this function 1254 * returns an error or not). 1255 * 1256 * Return: 0 on success or negative error code on error. 1257 */ 1258 int import_iovec(int type, const struct iovec __user * uvector, 1259 unsigned nr_segs, unsigned fast_segs, 1260 struct iovec **iov, struct iov_iter *i) 1261 { 1262 ssize_t n; 1263 struct iovec *p; 1264 n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, 1265 *iov, &p); 1266 if (n < 0) { 1267 if (p != *iov) 1268 kfree(p); 1269 *iov = NULL; 1270 return n; 1271 } 1272 iov_iter_init(i, type, p, nr_segs, n); 1273 *iov = p == *iov ? NULL : p; 1274 return 0; 1275 } 1276 EXPORT_SYMBOL(import_iovec); 1277 1278 #ifdef CONFIG_COMPAT 1279 #include <linux/compat.h> 1280 1281 int compat_import_iovec(int type, const struct compat_iovec __user * uvector, 1282 unsigned nr_segs, unsigned fast_segs, 1283 struct iovec **iov, struct iov_iter *i) 1284 { 1285 ssize_t n; 1286 struct iovec *p; 1287 n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs, 1288 *iov, &p); 1289 if (n < 0) { 1290 if (p != *iov) 1291 kfree(p); 1292 *iov = NULL; 1293 return n; 1294 } 1295 iov_iter_init(i, type, p, nr_segs, n); 1296 *iov = p == *iov ? NULL : p; 1297 return 0; 1298 } 1299 #endif 1300 1301 int import_single_range(int rw, void __user *buf, size_t len, 1302 struct iovec *iov, struct iov_iter *i) 1303 { 1304 if (len > MAX_RW_COUNT) 1305 len = MAX_RW_COUNT; 1306 if (unlikely(!access_ok(!rw, buf, len))) 1307 return -EFAULT; 1308 1309 iov->iov_base = buf; 1310 iov->iov_len = len; 1311 iov_iter_init(i, rw, iov, 1, len); 1312 return 0; 1313 } 1314 EXPORT_SYMBOL(import_single_range); 1315