1 /* 2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> 3 * 4 * Scatterlist handling helpers. 5 * 6 * This source code is licensed under the GNU General Public License, 7 * Version 2. See the file COPYING for more details. 8 */ 9 #include <linux/export.h> 10 #include <linux/slab.h> 11 #include <linux/scatterlist.h> 12 #include <linux/highmem.h> 13 #include <linux/kmemleak.h> 14 15 /** 16 * sg_next - return the next scatterlist entry in a list 17 * @sg: The current sg entry 18 * 19 * Description: 20 * Usually the next entry will be @sg@ + 1, but if this sg element is part 21 * of a chained scatterlist, it could jump to the start of a new 22 * scatterlist array. 23 * 24 **/ 25 struct scatterlist *sg_next(struct scatterlist *sg) 26 { 27 if (sg_is_last(sg)) 28 return NULL; 29 30 sg++; 31 if (unlikely(sg_is_chain(sg))) 32 sg = sg_chain_ptr(sg); 33 34 return sg; 35 } 36 EXPORT_SYMBOL(sg_next); 37 38 /** 39 * sg_nents - return total count of entries in scatterlist 40 * @sg: The scatterlist 41 * 42 * Description: 43 * Allows to know how many entries are in sg, taking into acount 44 * chaining as well 45 * 46 **/ 47 int sg_nents(struct scatterlist *sg) 48 { 49 int nents; 50 for (nents = 0; sg; sg = sg_next(sg)) 51 nents++; 52 return nents; 53 } 54 EXPORT_SYMBOL(sg_nents); 55 56 /** 57 * sg_nents_for_len - return total count of entries in scatterlist 58 * needed to satisfy the supplied length 59 * @sg: The scatterlist 60 * @len: The total required length 61 * 62 * Description: 63 * Determines the number of entries in sg that are required to meet 64 * the supplied length, taking into acount chaining as well 65 * 66 * Returns: 67 * the number of sg entries needed, negative error on failure 68 * 69 **/ 70 int sg_nents_for_len(struct scatterlist *sg, u64 len) 71 { 72 int nents; 73 u64 total; 74 75 if (!len) 76 return 0; 77 78 for (nents = 0, total = 0; sg; sg = sg_next(sg)) { 79 nents++; 80 total += sg->length; 81 if (total >= len) 82 return nents; 83 } 84 85 return -EINVAL; 86 } 87 EXPORT_SYMBOL(sg_nents_for_len); 88 89 /** 90 * sg_last - return the last scatterlist entry in a list 91 * @sgl: First entry in the scatterlist 92 * @nents: Number of entries in the scatterlist 93 * 94 * Description: 95 * Should only be used casually, it (currently) scans the entire list 96 * to get the last entry. 97 * 98 * Note that the @sgl@ pointer passed in need not be the first one, 99 * the important bit is that @nents@ denotes the number of entries that 100 * exist from @sgl@. 101 * 102 **/ 103 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) 104 { 105 struct scatterlist *sg, *ret = NULL; 106 unsigned int i; 107 108 for_each_sg(sgl, sg, nents, i) 109 ret = sg; 110 111 BUG_ON(!sg_is_last(ret)); 112 return ret; 113 } 114 EXPORT_SYMBOL(sg_last); 115 116 /** 117 * sg_init_table - Initialize SG table 118 * @sgl: The SG table 119 * @nents: Number of entries in table 120 * 121 * Notes: 122 * If this is part of a chained sg table, sg_mark_end() should be 123 * used only on the last table part. 124 * 125 **/ 126 void sg_init_table(struct scatterlist *sgl, unsigned int nents) 127 { 128 memset(sgl, 0, sizeof(*sgl) * nents); 129 sg_init_marker(sgl, nents); 130 } 131 EXPORT_SYMBOL(sg_init_table); 132 133 /** 134 * sg_init_one - Initialize a single entry sg list 135 * @sg: SG entry 136 * @buf: Virtual address for IO 137 * @buflen: IO length 138 * 139 **/ 140 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) 141 { 142 sg_init_table(sg, 1); 143 sg_set_buf(sg, buf, buflen); 144 } 145 EXPORT_SYMBOL(sg_init_one); 146 147 /* 148 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree 149 * helpers. 150 */ 151 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) 152 { 153 if (nents == SG_MAX_SINGLE_ALLOC) { 154 /* 155 * Kmemleak doesn't track page allocations as they are not 156 * commonly used (in a raw form) for kernel data structures. 157 * As we chain together a list of pages and then a normal 158 * kmalloc (tracked by kmemleak), in order to for that last 159 * allocation not to become decoupled (and thus a 160 * false-positive) we need to inform kmemleak of all the 161 * intermediate allocations. 162 */ 163 void *ptr = (void *) __get_free_page(gfp_mask); 164 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); 165 return ptr; 166 } else 167 return kmalloc_array(nents, sizeof(struct scatterlist), 168 gfp_mask); 169 } 170 171 static void sg_kfree(struct scatterlist *sg, unsigned int nents) 172 { 173 if (nents == SG_MAX_SINGLE_ALLOC) { 174 kmemleak_free(sg); 175 free_page((unsigned long) sg); 176 } else 177 kfree(sg); 178 } 179 180 /** 181 * __sg_free_table - Free a previously mapped sg table 182 * @table: The sg table header to use 183 * @max_ents: The maximum number of entries per single scatterlist 184 * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk 185 * @free_fn: Free function 186 * 187 * Description: 188 * Free an sg table previously allocated and setup with 189 * __sg_alloc_table(). The @max_ents value must be identical to 190 * that previously used with __sg_alloc_table(). 191 * 192 **/ 193 void __sg_free_table(struct sg_table *table, unsigned int max_ents, 194 bool skip_first_chunk, sg_free_fn *free_fn) 195 { 196 struct scatterlist *sgl, *next; 197 198 if (unlikely(!table->sgl)) 199 return; 200 201 sgl = table->sgl; 202 while (table->orig_nents) { 203 unsigned int alloc_size = table->orig_nents; 204 unsigned int sg_size; 205 206 /* 207 * If we have more than max_ents segments left, 208 * then assign 'next' to the sg table after the current one. 209 * sg_size is then one less than alloc size, since the last 210 * element is the chain pointer. 211 */ 212 if (alloc_size > max_ents) { 213 next = sg_chain_ptr(&sgl[max_ents - 1]); 214 alloc_size = max_ents; 215 sg_size = alloc_size - 1; 216 } else { 217 sg_size = alloc_size; 218 next = NULL; 219 } 220 221 table->orig_nents -= sg_size; 222 if (skip_first_chunk) 223 skip_first_chunk = false; 224 else 225 free_fn(sgl, alloc_size); 226 sgl = next; 227 } 228 229 table->sgl = NULL; 230 } 231 EXPORT_SYMBOL(__sg_free_table); 232 233 /** 234 * sg_free_table - Free a previously allocated sg table 235 * @table: The mapped sg table header 236 * 237 **/ 238 void sg_free_table(struct sg_table *table) 239 { 240 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); 241 } 242 EXPORT_SYMBOL(sg_free_table); 243 244 /** 245 * __sg_alloc_table - Allocate and initialize an sg table with given allocator 246 * @table: The sg table header to use 247 * @nents: Number of entries in sg list 248 * @max_ents: The maximum number of entries the allocator returns per call 249 * @gfp_mask: GFP allocation mask 250 * @alloc_fn: Allocator to use 251 * 252 * Description: 253 * This function returns a @table @nents long. The allocator is 254 * defined to return scatterlist chunks of maximum size @max_ents. 255 * Thus if @nents is bigger than @max_ents, the scatterlists will be 256 * chained in units of @max_ents. 257 * 258 * Notes: 259 * If this function returns non-0 (eg failure), the caller must call 260 * __sg_free_table() to cleanup any leftover allocations. 261 * 262 **/ 263 int __sg_alloc_table(struct sg_table *table, unsigned int nents, 264 unsigned int max_ents, struct scatterlist *first_chunk, 265 gfp_t gfp_mask, sg_alloc_fn *alloc_fn) 266 { 267 struct scatterlist *sg, *prv; 268 unsigned int left; 269 270 memset(table, 0, sizeof(*table)); 271 272 if (nents == 0) 273 return -EINVAL; 274 #ifdef CONFIG_ARCH_NO_SG_CHAIN 275 if (WARN_ON_ONCE(nents > max_ents)) 276 return -EINVAL; 277 #endif 278 279 left = nents; 280 prv = NULL; 281 do { 282 unsigned int sg_size, alloc_size = left; 283 284 if (alloc_size > max_ents) { 285 alloc_size = max_ents; 286 sg_size = alloc_size - 1; 287 } else 288 sg_size = alloc_size; 289 290 left -= sg_size; 291 292 if (first_chunk) { 293 sg = first_chunk; 294 first_chunk = NULL; 295 } else { 296 sg = alloc_fn(alloc_size, gfp_mask); 297 } 298 if (unlikely(!sg)) { 299 /* 300 * Adjust entry count to reflect that the last 301 * entry of the previous table won't be used for 302 * linkage. Without this, sg_kfree() may get 303 * confused. 304 */ 305 if (prv) 306 table->nents = ++table->orig_nents; 307 308 return -ENOMEM; 309 } 310 311 sg_init_table(sg, alloc_size); 312 table->nents = table->orig_nents += sg_size; 313 314 /* 315 * If this is the first mapping, assign the sg table header. 316 * If this is not the first mapping, chain previous part. 317 */ 318 if (prv) 319 sg_chain(prv, max_ents, sg); 320 else 321 table->sgl = sg; 322 323 /* 324 * If no more entries after this one, mark the end 325 */ 326 if (!left) 327 sg_mark_end(&sg[sg_size - 1]); 328 329 prv = sg; 330 } while (left); 331 332 return 0; 333 } 334 EXPORT_SYMBOL(__sg_alloc_table); 335 336 /** 337 * sg_alloc_table - Allocate and initialize an sg table 338 * @table: The sg table header to use 339 * @nents: Number of entries in sg list 340 * @gfp_mask: GFP allocation mask 341 * 342 * Description: 343 * Allocate and initialize an sg table. If @nents@ is larger than 344 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. 345 * 346 **/ 347 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) 348 { 349 int ret; 350 351 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, 352 NULL, gfp_mask, sg_kmalloc); 353 if (unlikely(ret)) 354 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); 355 356 return ret; 357 } 358 EXPORT_SYMBOL(sg_alloc_table); 359 360 /** 361 * __sg_alloc_table_from_pages - Allocate and initialize an sg table from 362 * an array of pages 363 * @sgt: The sg table header to use 364 * @pages: Pointer to an array of page pointers 365 * @n_pages: Number of pages in the pages array 366 * @offset: Offset from start of the first page to the start of a buffer 367 * @size: Number of valid bytes in the buffer (after offset) 368 * @max_segment: Maximum size of a scatterlist node in bytes (page aligned) 369 * @gfp_mask: GFP allocation mask 370 * 371 * Description: 372 * Allocate and initialize an sg table from a list of pages. Contiguous 373 * ranges of the pages are squashed into a single scatterlist node up to the 374 * maximum size specified in @max_segment. An user may provide an offset at a 375 * start and a size of valid data in a buffer specified by the page array. 376 * The returned sg table is released by sg_free_table. 377 * 378 * Returns: 379 * 0 on success, negative error on failure 380 */ 381 int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, 382 unsigned int n_pages, unsigned int offset, 383 unsigned long size, unsigned int max_segment, 384 gfp_t gfp_mask) 385 { 386 unsigned int chunks, cur_page, seg_len, i; 387 int ret; 388 struct scatterlist *s; 389 390 if (WARN_ON(!max_segment || offset_in_page(max_segment))) 391 return -EINVAL; 392 393 /* compute number of contiguous chunks */ 394 chunks = 1; 395 seg_len = 0; 396 for (i = 1; i < n_pages; i++) { 397 seg_len += PAGE_SIZE; 398 if (seg_len >= max_segment || 399 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) { 400 chunks++; 401 seg_len = 0; 402 } 403 } 404 405 ret = sg_alloc_table(sgt, chunks, gfp_mask); 406 if (unlikely(ret)) 407 return ret; 408 409 /* merging chunks and putting them into the scatterlist */ 410 cur_page = 0; 411 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { 412 unsigned int j, chunk_size; 413 414 /* look for the end of the current chunk */ 415 seg_len = 0; 416 for (j = cur_page + 1; j < n_pages; j++) { 417 seg_len += PAGE_SIZE; 418 if (seg_len >= max_segment || 419 page_to_pfn(pages[j]) != 420 page_to_pfn(pages[j - 1]) + 1) 421 break; 422 } 423 424 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; 425 sg_set_page(s, pages[cur_page], 426 min_t(unsigned long, size, chunk_size), offset); 427 size -= chunk_size; 428 offset = 0; 429 cur_page = j; 430 } 431 432 return 0; 433 } 434 EXPORT_SYMBOL(__sg_alloc_table_from_pages); 435 436 /** 437 * sg_alloc_table_from_pages - Allocate and initialize an sg table from 438 * an array of pages 439 * @sgt: The sg table header to use 440 * @pages: Pointer to an array of page pointers 441 * @n_pages: Number of pages in the pages array 442 * @offset: Offset from start of the first page to the start of a buffer 443 * @size: Number of valid bytes in the buffer (after offset) 444 * @gfp_mask: GFP allocation mask 445 * 446 * Description: 447 * Allocate and initialize an sg table from a list of pages. Contiguous 448 * ranges of the pages are squashed into a single scatterlist node. A user 449 * may provide an offset at a start and a size of valid data in a buffer 450 * specified by the page array. The returned sg table is released by 451 * sg_free_table. 452 * 453 * Returns: 454 * 0 on success, negative error on failure 455 */ 456 int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, 457 unsigned int n_pages, unsigned int offset, 458 unsigned long size, gfp_t gfp_mask) 459 { 460 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, 461 SCATTERLIST_MAX_SEGMENT, gfp_mask); 462 } 463 EXPORT_SYMBOL(sg_alloc_table_from_pages); 464 465 #ifdef CONFIG_SGL_ALLOC 466 467 /** 468 * sgl_alloc_order - allocate a scatterlist and its pages 469 * @length: Length in bytes of the scatterlist. Must be at least one 470 * @order: Second argument for alloc_pages() 471 * @chainable: Whether or not to allocate an extra element in the scatterlist 472 * for scatterlist chaining purposes 473 * @gfp: Memory allocation flags 474 * @nent_p: [out] Number of entries in the scatterlist that have pages 475 * 476 * Returns: A pointer to an initialized scatterlist or %NULL upon failure. 477 */ 478 struct scatterlist *sgl_alloc_order(unsigned long long length, 479 unsigned int order, bool chainable, 480 gfp_t gfp, unsigned int *nent_p) 481 { 482 struct scatterlist *sgl, *sg; 483 struct page *page; 484 unsigned int nent, nalloc; 485 u32 elem_len; 486 487 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order); 488 /* Check for integer overflow */ 489 if (length > (nent << (PAGE_SHIFT + order))) 490 return NULL; 491 nalloc = nent; 492 if (chainable) { 493 /* Check for integer overflow */ 494 if (nalloc + 1 < nalloc) 495 return NULL; 496 nalloc++; 497 } 498 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist), 499 (gfp & ~GFP_DMA) | __GFP_ZERO); 500 if (!sgl) 501 return NULL; 502 503 sg_init_table(sgl, nalloc); 504 sg = sgl; 505 while (length) { 506 elem_len = min_t(u64, length, PAGE_SIZE << order); 507 page = alloc_pages(gfp, order); 508 if (!page) { 509 sgl_free(sgl); 510 return NULL; 511 } 512 513 sg_set_page(sg, page, elem_len, 0); 514 length -= elem_len; 515 sg = sg_next(sg); 516 } 517 WARN_ONCE(length, "length = %lld\n", length); 518 if (nent_p) 519 *nent_p = nent; 520 return sgl; 521 } 522 EXPORT_SYMBOL(sgl_alloc_order); 523 524 /** 525 * sgl_alloc - allocate a scatterlist and its pages 526 * @length: Length in bytes of the scatterlist 527 * @gfp: Memory allocation flags 528 * @nent_p: [out] Number of entries in the scatterlist 529 * 530 * Returns: A pointer to an initialized scatterlist or %NULL upon failure. 531 */ 532 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp, 533 unsigned int *nent_p) 534 { 535 return sgl_alloc_order(length, 0, false, gfp, nent_p); 536 } 537 EXPORT_SYMBOL(sgl_alloc); 538 539 /** 540 * sgl_free_n_order - free a scatterlist and its pages 541 * @sgl: Scatterlist with one or more elements 542 * @nents: Maximum number of elements to free 543 * @order: Second argument for __free_pages() 544 * 545 * Notes: 546 * - If several scatterlists have been chained and each chain element is 547 * freed separately then it's essential to set nents correctly to avoid that a 548 * page would get freed twice. 549 * - All pages in a chained scatterlist can be freed at once by setting @nents 550 * to a high number. 551 */ 552 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order) 553 { 554 struct scatterlist *sg; 555 struct page *page; 556 int i; 557 558 for_each_sg(sgl, sg, nents, i) { 559 if (!sg) 560 break; 561 page = sg_page(sg); 562 if (page) 563 __free_pages(page, order); 564 } 565 kfree(sgl); 566 } 567 EXPORT_SYMBOL(sgl_free_n_order); 568 569 /** 570 * sgl_free_order - free a scatterlist and its pages 571 * @sgl: Scatterlist with one or more elements 572 * @order: Second argument for __free_pages() 573 */ 574 void sgl_free_order(struct scatterlist *sgl, int order) 575 { 576 sgl_free_n_order(sgl, INT_MAX, order); 577 } 578 EXPORT_SYMBOL(sgl_free_order); 579 580 /** 581 * sgl_free - free a scatterlist and its pages 582 * @sgl: Scatterlist with one or more elements 583 */ 584 void sgl_free(struct scatterlist *sgl) 585 { 586 sgl_free_order(sgl, 0); 587 } 588 EXPORT_SYMBOL(sgl_free); 589 590 #endif /* CONFIG_SGL_ALLOC */ 591 592 void __sg_page_iter_start(struct sg_page_iter *piter, 593 struct scatterlist *sglist, unsigned int nents, 594 unsigned long pgoffset) 595 { 596 piter->__pg_advance = 0; 597 piter->__nents = nents; 598 599 piter->sg = sglist; 600 piter->sg_pgoffset = pgoffset; 601 } 602 EXPORT_SYMBOL(__sg_page_iter_start); 603 604 static int sg_page_count(struct scatterlist *sg) 605 { 606 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; 607 } 608 609 bool __sg_page_iter_next(struct sg_page_iter *piter) 610 { 611 if (!piter->__nents || !piter->sg) 612 return false; 613 614 piter->sg_pgoffset += piter->__pg_advance; 615 piter->__pg_advance = 1; 616 617 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { 618 piter->sg_pgoffset -= sg_page_count(piter->sg); 619 piter->sg = sg_next(piter->sg); 620 if (!--piter->__nents || !piter->sg) 621 return false; 622 } 623 624 return true; 625 } 626 EXPORT_SYMBOL(__sg_page_iter_next); 627 628 static int sg_dma_page_count(struct scatterlist *sg) 629 { 630 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT; 631 } 632 633 bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter) 634 { 635 struct sg_page_iter *piter = &dma_iter->base; 636 637 if (!piter->__nents || !piter->sg) 638 return false; 639 640 piter->sg_pgoffset += piter->__pg_advance; 641 piter->__pg_advance = 1; 642 643 while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) { 644 piter->sg_pgoffset -= sg_dma_page_count(piter->sg); 645 piter->sg = sg_next(piter->sg); 646 if (!--piter->__nents || !piter->sg) 647 return false; 648 } 649 650 return true; 651 } 652 EXPORT_SYMBOL(__sg_page_iter_dma_next); 653 654 /** 655 * sg_miter_start - start mapping iteration over a sg list 656 * @miter: sg mapping iter to be started 657 * @sgl: sg list to iterate over 658 * @nents: number of sg entries 659 * 660 * Description: 661 * Starts mapping iterator @miter. 662 * 663 * Context: 664 * Don't care. 665 */ 666 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, 667 unsigned int nents, unsigned int flags) 668 { 669 memset(miter, 0, sizeof(struct sg_mapping_iter)); 670 671 __sg_page_iter_start(&miter->piter, sgl, nents, 0); 672 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); 673 miter->__flags = flags; 674 } 675 EXPORT_SYMBOL(sg_miter_start); 676 677 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) 678 { 679 if (!miter->__remaining) { 680 struct scatterlist *sg; 681 unsigned long pgoffset; 682 683 if (!__sg_page_iter_next(&miter->piter)) 684 return false; 685 686 sg = miter->piter.sg; 687 pgoffset = miter->piter.sg_pgoffset; 688 689 miter->__offset = pgoffset ? 0 : sg->offset; 690 miter->__remaining = sg->offset + sg->length - 691 (pgoffset << PAGE_SHIFT) - miter->__offset; 692 miter->__remaining = min_t(unsigned long, miter->__remaining, 693 PAGE_SIZE - miter->__offset); 694 } 695 696 return true; 697 } 698 699 /** 700 * sg_miter_skip - reposition mapping iterator 701 * @miter: sg mapping iter to be skipped 702 * @offset: number of bytes to plus the current location 703 * 704 * Description: 705 * Sets the offset of @miter to its current location plus @offset bytes. 706 * If mapping iterator @miter has been proceeded by sg_miter_next(), this 707 * stops @miter. 708 * 709 * Context: 710 * Don't care if @miter is stopped, or not proceeded yet. 711 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. 712 * 713 * Returns: 714 * true if @miter contains the valid mapping. false if end of sg 715 * list is reached. 716 */ 717 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) 718 { 719 sg_miter_stop(miter); 720 721 while (offset) { 722 off_t consumed; 723 724 if (!sg_miter_get_next_page(miter)) 725 return false; 726 727 consumed = min_t(off_t, offset, miter->__remaining); 728 miter->__offset += consumed; 729 miter->__remaining -= consumed; 730 offset -= consumed; 731 } 732 733 return true; 734 } 735 EXPORT_SYMBOL(sg_miter_skip); 736 737 /** 738 * sg_miter_next - proceed mapping iterator to the next mapping 739 * @miter: sg mapping iter to proceed 740 * 741 * Description: 742 * Proceeds @miter to the next mapping. @miter should have been started 743 * using sg_miter_start(). On successful return, @miter->page, 744 * @miter->addr and @miter->length point to the current mapping. 745 * 746 * Context: 747 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled 748 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. 749 * 750 * Returns: 751 * true if @miter contains the next mapping. false if end of sg 752 * list is reached. 753 */ 754 bool sg_miter_next(struct sg_mapping_iter *miter) 755 { 756 sg_miter_stop(miter); 757 758 /* 759 * Get to the next page if necessary. 760 * __remaining, __offset is adjusted by sg_miter_stop 761 */ 762 if (!sg_miter_get_next_page(miter)) 763 return false; 764 765 miter->page = sg_page_iter_page(&miter->piter); 766 miter->consumed = miter->length = miter->__remaining; 767 768 if (miter->__flags & SG_MITER_ATOMIC) 769 miter->addr = kmap_atomic(miter->page) + miter->__offset; 770 else 771 miter->addr = kmap(miter->page) + miter->__offset; 772 773 return true; 774 } 775 EXPORT_SYMBOL(sg_miter_next); 776 777 /** 778 * sg_miter_stop - stop mapping iteration 779 * @miter: sg mapping iter to be stopped 780 * 781 * Description: 782 * Stops mapping iterator @miter. @miter should have been started 783 * using sg_miter_start(). A stopped iteration can be resumed by 784 * calling sg_miter_next() on it. This is useful when resources (kmap) 785 * need to be released during iteration. 786 * 787 * Context: 788 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care 789 * otherwise. 790 */ 791 void sg_miter_stop(struct sg_mapping_iter *miter) 792 { 793 WARN_ON(miter->consumed > miter->length); 794 795 /* drop resources from the last iteration */ 796 if (miter->addr) { 797 miter->__offset += miter->consumed; 798 miter->__remaining -= miter->consumed; 799 800 if ((miter->__flags & SG_MITER_TO_SG) && 801 !PageSlab(miter->page)) 802 flush_kernel_dcache_page(miter->page); 803 804 if (miter->__flags & SG_MITER_ATOMIC) { 805 WARN_ON_ONCE(preemptible()); 806 kunmap_atomic(miter->addr); 807 } else 808 kunmap(miter->page); 809 810 miter->page = NULL; 811 miter->addr = NULL; 812 miter->length = 0; 813 miter->consumed = 0; 814 } 815 } 816 EXPORT_SYMBOL(sg_miter_stop); 817 818 /** 819 * sg_copy_buffer - Copy data between a linear buffer and an SG list 820 * @sgl: The SG list 821 * @nents: Number of SG entries 822 * @buf: Where to copy from 823 * @buflen: The number of bytes to copy 824 * @skip: Number of bytes to skip before copying 825 * @to_buffer: transfer direction (true == from an sg list to a 826 * buffer, false == from a buffer to an sg list 827 * 828 * Returns the number of copied bytes. 829 * 830 **/ 831 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, 832 size_t buflen, off_t skip, bool to_buffer) 833 { 834 unsigned int offset = 0; 835 struct sg_mapping_iter miter; 836 unsigned int sg_flags = SG_MITER_ATOMIC; 837 838 if (to_buffer) 839 sg_flags |= SG_MITER_FROM_SG; 840 else 841 sg_flags |= SG_MITER_TO_SG; 842 843 sg_miter_start(&miter, sgl, nents, sg_flags); 844 845 if (!sg_miter_skip(&miter, skip)) 846 return false; 847 848 while ((offset < buflen) && sg_miter_next(&miter)) { 849 unsigned int len; 850 851 len = min(miter.length, buflen - offset); 852 853 if (to_buffer) 854 memcpy(buf + offset, miter.addr, len); 855 else 856 memcpy(miter.addr, buf + offset, len); 857 858 offset += len; 859 } 860 861 sg_miter_stop(&miter); 862 863 return offset; 864 } 865 EXPORT_SYMBOL(sg_copy_buffer); 866 867 /** 868 * sg_copy_from_buffer - Copy from a linear buffer to an SG list 869 * @sgl: The SG list 870 * @nents: Number of SG entries 871 * @buf: Where to copy from 872 * @buflen: The number of bytes to copy 873 * 874 * Returns the number of copied bytes. 875 * 876 **/ 877 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 878 const void *buf, size_t buflen) 879 { 880 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false); 881 } 882 EXPORT_SYMBOL(sg_copy_from_buffer); 883 884 /** 885 * sg_copy_to_buffer - Copy from an SG list to a linear buffer 886 * @sgl: The SG list 887 * @nents: Number of SG entries 888 * @buf: Where to copy to 889 * @buflen: The number of bytes to copy 890 * 891 * Returns the number of copied bytes. 892 * 893 **/ 894 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, 895 void *buf, size_t buflen) 896 { 897 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); 898 } 899 EXPORT_SYMBOL(sg_copy_to_buffer); 900 901 /** 902 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list 903 * @sgl: The SG list 904 * @nents: Number of SG entries 905 * @buf: Where to copy from 906 * @buflen: The number of bytes to copy 907 * @skip: Number of bytes to skip before copying 908 * 909 * Returns the number of copied bytes. 910 * 911 **/ 912 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, 913 const void *buf, size_t buflen, off_t skip) 914 { 915 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false); 916 } 917 EXPORT_SYMBOL(sg_pcopy_from_buffer); 918 919 /** 920 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer 921 * @sgl: The SG list 922 * @nents: Number of SG entries 923 * @buf: Where to copy to 924 * @buflen: The number of bytes to copy 925 * @skip: Number of bytes to skip before copying 926 * 927 * Returns the number of copied bytes. 928 * 929 **/ 930 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, 931 void *buf, size_t buflen, off_t skip) 932 { 933 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); 934 } 935 EXPORT_SYMBOL(sg_pcopy_to_buffer); 936 937 /** 938 * sg_zero_buffer - Zero-out a part of a SG list 939 * @sgl: The SG list 940 * @nents: Number of SG entries 941 * @buflen: The number of bytes to zero out 942 * @skip: Number of bytes to skip before zeroing 943 * 944 * Returns the number of bytes zeroed. 945 **/ 946 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents, 947 size_t buflen, off_t skip) 948 { 949 unsigned int offset = 0; 950 struct sg_mapping_iter miter; 951 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG; 952 953 sg_miter_start(&miter, sgl, nents, sg_flags); 954 955 if (!sg_miter_skip(&miter, skip)) 956 return false; 957 958 while (offset < buflen && sg_miter_next(&miter)) { 959 unsigned int len; 960 961 len = min(miter.length, buflen - offset); 962 memset(miter.addr, 0, len); 963 964 offset += len; 965 } 966 967 sg_miter_stop(&miter); 968 return offset; 969 } 970 EXPORT_SYMBOL(sg_zero_buffer); 971