1 /* 2 * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com> 3 * 4 * Scatterlist handling helpers. 5 * 6 * This source code is licensed under the GNU General Public License, 7 * Version 2. See the file COPYING for more details. 8 */ 9 #include <linux/export.h> 10 #include <linux/slab.h> 11 #include <linux/scatterlist.h> 12 #include <linux/highmem.h> 13 #include <linux/kmemleak.h> 14 15 /** 16 * sg_next - return the next scatterlist entry in a list 17 * @sg: The current sg entry 18 * 19 * Description: 20 * Usually the next entry will be @sg@ + 1, but if this sg element is part 21 * of a chained scatterlist, it could jump to the start of a new 22 * scatterlist array. 23 * 24 **/ 25 struct scatterlist *sg_next(struct scatterlist *sg) 26 { 27 #ifdef CONFIG_DEBUG_SG 28 BUG_ON(sg->sg_magic != SG_MAGIC); 29 #endif 30 if (sg_is_last(sg)) 31 return NULL; 32 33 sg++; 34 if (unlikely(sg_is_chain(sg))) 35 sg = sg_chain_ptr(sg); 36 37 return sg; 38 } 39 EXPORT_SYMBOL(sg_next); 40 41 /** 42 * sg_nents - return total count of entries in scatterlist 43 * @sg: The scatterlist 44 * 45 * Description: 46 * Allows to know how many entries are in sg, taking into acount 47 * chaining as well 48 * 49 **/ 50 int sg_nents(struct scatterlist *sg) 51 { 52 int nents; 53 for (nents = 0; sg; sg = sg_next(sg)) 54 nents++; 55 return nents; 56 } 57 EXPORT_SYMBOL(sg_nents); 58 59 60 /** 61 * sg_last - return the last scatterlist entry in a list 62 * @sgl: First entry in the scatterlist 63 * @nents: Number of entries in the scatterlist 64 * 65 * Description: 66 * Should only be used casually, it (currently) scans the entire list 67 * to get the last entry. 68 * 69 * Note that the @sgl@ pointer passed in need not be the first one, 70 * the important bit is that @nents@ denotes the number of entries that 71 * exist from @sgl@. 72 * 73 **/ 74 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents) 75 { 76 #ifndef ARCH_HAS_SG_CHAIN 77 struct scatterlist *ret = &sgl[nents - 1]; 78 #else 79 struct scatterlist *sg, *ret = NULL; 80 unsigned int i; 81 82 for_each_sg(sgl, sg, nents, i) 83 ret = sg; 84 85 #endif 86 #ifdef CONFIG_DEBUG_SG 87 BUG_ON(sgl[0].sg_magic != SG_MAGIC); 88 BUG_ON(!sg_is_last(ret)); 89 #endif 90 return ret; 91 } 92 EXPORT_SYMBOL(sg_last); 93 94 /** 95 * sg_init_table - Initialize SG table 96 * @sgl: The SG table 97 * @nents: Number of entries in table 98 * 99 * Notes: 100 * If this is part of a chained sg table, sg_mark_end() should be 101 * used only on the last table part. 102 * 103 **/ 104 void sg_init_table(struct scatterlist *sgl, unsigned int nents) 105 { 106 memset(sgl, 0, sizeof(*sgl) * nents); 107 #ifdef CONFIG_DEBUG_SG 108 { 109 unsigned int i; 110 for (i = 0; i < nents; i++) 111 sgl[i].sg_magic = SG_MAGIC; 112 } 113 #endif 114 sg_mark_end(&sgl[nents - 1]); 115 } 116 EXPORT_SYMBOL(sg_init_table); 117 118 /** 119 * sg_init_one - Initialize a single entry sg list 120 * @sg: SG entry 121 * @buf: Virtual address for IO 122 * @buflen: IO length 123 * 124 **/ 125 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen) 126 { 127 sg_init_table(sg, 1); 128 sg_set_buf(sg, buf, buflen); 129 } 130 EXPORT_SYMBOL(sg_init_one); 131 132 /* 133 * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree 134 * helpers. 135 */ 136 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) 137 { 138 if (nents == SG_MAX_SINGLE_ALLOC) { 139 /* 140 * Kmemleak doesn't track page allocations as they are not 141 * commonly used (in a raw form) for kernel data structures. 142 * As we chain together a list of pages and then a normal 143 * kmalloc (tracked by kmemleak), in order to for that last 144 * allocation not to become decoupled (and thus a 145 * false-positive) we need to inform kmemleak of all the 146 * intermediate allocations. 147 */ 148 void *ptr = (void *) __get_free_page(gfp_mask); 149 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); 150 return ptr; 151 } else 152 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask); 153 } 154 155 static void sg_kfree(struct scatterlist *sg, unsigned int nents) 156 { 157 if (nents == SG_MAX_SINGLE_ALLOC) { 158 kmemleak_free(sg); 159 free_page((unsigned long) sg); 160 } else 161 kfree(sg); 162 } 163 164 /** 165 * __sg_free_table - Free a previously mapped sg table 166 * @table: The sg table header to use 167 * @max_ents: The maximum number of entries per single scatterlist 168 * @free_fn: Free function 169 * 170 * Description: 171 * Free an sg table previously allocated and setup with 172 * __sg_alloc_table(). The @max_ents value must be identical to 173 * that previously used with __sg_alloc_table(). 174 * 175 **/ 176 void __sg_free_table(struct sg_table *table, unsigned int max_ents, 177 sg_free_fn *free_fn) 178 { 179 struct scatterlist *sgl, *next; 180 181 if (unlikely(!table->sgl)) 182 return; 183 184 sgl = table->sgl; 185 while (table->orig_nents) { 186 unsigned int alloc_size = table->orig_nents; 187 unsigned int sg_size; 188 189 /* 190 * If we have more than max_ents segments left, 191 * then assign 'next' to the sg table after the current one. 192 * sg_size is then one less than alloc size, since the last 193 * element is the chain pointer. 194 */ 195 if (alloc_size > max_ents) { 196 next = sg_chain_ptr(&sgl[max_ents - 1]); 197 alloc_size = max_ents; 198 sg_size = alloc_size - 1; 199 } else { 200 sg_size = alloc_size; 201 next = NULL; 202 } 203 204 table->orig_nents -= sg_size; 205 free_fn(sgl, alloc_size); 206 sgl = next; 207 } 208 209 table->sgl = NULL; 210 } 211 EXPORT_SYMBOL(__sg_free_table); 212 213 /** 214 * sg_free_table - Free a previously allocated sg table 215 * @table: The mapped sg table header 216 * 217 **/ 218 void sg_free_table(struct sg_table *table) 219 { 220 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); 221 } 222 EXPORT_SYMBOL(sg_free_table); 223 224 /** 225 * __sg_alloc_table - Allocate and initialize an sg table with given allocator 226 * @table: The sg table header to use 227 * @nents: Number of entries in sg list 228 * @max_ents: The maximum number of entries the allocator returns per call 229 * @gfp_mask: GFP allocation mask 230 * @alloc_fn: Allocator to use 231 * 232 * Description: 233 * This function returns a @table @nents long. The allocator is 234 * defined to return scatterlist chunks of maximum size @max_ents. 235 * Thus if @nents is bigger than @max_ents, the scatterlists will be 236 * chained in units of @max_ents. 237 * 238 * Notes: 239 * If this function returns non-0 (eg failure), the caller must call 240 * __sg_free_table() to cleanup any leftover allocations. 241 * 242 **/ 243 int __sg_alloc_table(struct sg_table *table, unsigned int nents, 244 unsigned int max_ents, gfp_t gfp_mask, 245 sg_alloc_fn *alloc_fn) 246 { 247 struct scatterlist *sg, *prv; 248 unsigned int left; 249 250 memset(table, 0, sizeof(*table)); 251 252 if (nents == 0) 253 return -EINVAL; 254 #ifndef ARCH_HAS_SG_CHAIN 255 if (WARN_ON_ONCE(nents > max_ents)) 256 return -EINVAL; 257 #endif 258 259 left = nents; 260 prv = NULL; 261 do { 262 unsigned int sg_size, alloc_size = left; 263 264 if (alloc_size > max_ents) { 265 alloc_size = max_ents; 266 sg_size = alloc_size - 1; 267 } else 268 sg_size = alloc_size; 269 270 left -= sg_size; 271 272 sg = alloc_fn(alloc_size, gfp_mask); 273 if (unlikely(!sg)) { 274 /* 275 * Adjust entry count to reflect that the last 276 * entry of the previous table won't be used for 277 * linkage. Without this, sg_kfree() may get 278 * confused. 279 */ 280 if (prv) 281 table->nents = ++table->orig_nents; 282 283 return -ENOMEM; 284 } 285 286 sg_init_table(sg, alloc_size); 287 table->nents = table->orig_nents += sg_size; 288 289 /* 290 * If this is the first mapping, assign the sg table header. 291 * If this is not the first mapping, chain previous part. 292 */ 293 if (prv) 294 sg_chain(prv, max_ents, sg); 295 else 296 table->sgl = sg; 297 298 /* 299 * If no more entries after this one, mark the end 300 */ 301 if (!left) 302 sg_mark_end(&sg[sg_size - 1]); 303 304 prv = sg; 305 } while (left); 306 307 return 0; 308 } 309 EXPORT_SYMBOL(__sg_alloc_table); 310 311 /** 312 * sg_alloc_table - Allocate and initialize an sg table 313 * @table: The sg table header to use 314 * @nents: Number of entries in sg list 315 * @gfp_mask: GFP allocation mask 316 * 317 * Description: 318 * Allocate and initialize an sg table. If @nents@ is larger than 319 * SG_MAX_SINGLE_ALLOC a chained sg table will be setup. 320 * 321 **/ 322 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) 323 { 324 int ret; 325 326 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC, 327 gfp_mask, sg_kmalloc); 328 if (unlikely(ret)) 329 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree); 330 331 return ret; 332 } 333 EXPORT_SYMBOL(sg_alloc_table); 334 335 /** 336 * sg_alloc_table_from_pages - Allocate and initialize an sg table from 337 * an array of pages 338 * @sgt: The sg table header to use 339 * @pages: Pointer to an array of page pointers 340 * @n_pages: Number of pages in the pages array 341 * @offset: Offset from start of the first page to the start of a buffer 342 * @size: Number of valid bytes in the buffer (after offset) 343 * @gfp_mask: GFP allocation mask 344 * 345 * Description: 346 * Allocate and initialize an sg table from a list of pages. Contiguous 347 * ranges of the pages are squashed into a single scatterlist node. A user 348 * may provide an offset at a start and a size of valid data in a buffer 349 * specified by the page array. The returned sg table is released by 350 * sg_free_table. 351 * 352 * Returns: 353 * 0 on success, negative error on failure 354 */ 355 int sg_alloc_table_from_pages(struct sg_table *sgt, 356 struct page **pages, unsigned int n_pages, 357 unsigned long offset, unsigned long size, 358 gfp_t gfp_mask) 359 { 360 unsigned int chunks; 361 unsigned int i; 362 unsigned int cur_page; 363 int ret; 364 struct scatterlist *s; 365 366 /* compute number of contiguous chunks */ 367 chunks = 1; 368 for (i = 1; i < n_pages; ++i) 369 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) 370 ++chunks; 371 372 ret = sg_alloc_table(sgt, chunks, gfp_mask); 373 if (unlikely(ret)) 374 return ret; 375 376 /* merging chunks and putting them into the scatterlist */ 377 cur_page = 0; 378 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { 379 unsigned long chunk_size; 380 unsigned int j; 381 382 /* look for the end of the current chunk */ 383 for (j = cur_page + 1; j < n_pages; ++j) 384 if (page_to_pfn(pages[j]) != 385 page_to_pfn(pages[j - 1]) + 1) 386 break; 387 388 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; 389 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset); 390 size -= chunk_size; 391 offset = 0; 392 cur_page = j; 393 } 394 395 return 0; 396 } 397 EXPORT_SYMBOL(sg_alloc_table_from_pages); 398 399 void __sg_page_iter_start(struct sg_page_iter *piter, 400 struct scatterlist *sglist, unsigned int nents, 401 unsigned long pgoffset) 402 { 403 piter->__pg_advance = 0; 404 piter->__nents = nents; 405 406 piter->sg = sglist; 407 piter->sg_pgoffset = pgoffset; 408 } 409 EXPORT_SYMBOL(__sg_page_iter_start); 410 411 static int sg_page_count(struct scatterlist *sg) 412 { 413 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT; 414 } 415 416 bool __sg_page_iter_next(struct sg_page_iter *piter) 417 { 418 if (!piter->__nents || !piter->sg) 419 return false; 420 421 piter->sg_pgoffset += piter->__pg_advance; 422 piter->__pg_advance = 1; 423 424 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) { 425 piter->sg_pgoffset -= sg_page_count(piter->sg); 426 piter->sg = sg_next(piter->sg); 427 if (!--piter->__nents || !piter->sg) 428 return false; 429 } 430 431 return true; 432 } 433 EXPORT_SYMBOL(__sg_page_iter_next); 434 435 /** 436 * sg_miter_start - start mapping iteration over a sg list 437 * @miter: sg mapping iter to be started 438 * @sgl: sg list to iterate over 439 * @nents: number of sg entries 440 * 441 * Description: 442 * Starts mapping iterator @miter. 443 * 444 * Context: 445 * Don't care. 446 */ 447 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl, 448 unsigned int nents, unsigned int flags) 449 { 450 memset(miter, 0, sizeof(struct sg_mapping_iter)); 451 452 __sg_page_iter_start(&miter->piter, sgl, nents, 0); 453 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG))); 454 miter->__flags = flags; 455 } 456 EXPORT_SYMBOL(sg_miter_start); 457 458 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter) 459 { 460 if (!miter->__remaining) { 461 struct scatterlist *sg; 462 unsigned long pgoffset; 463 464 if (!__sg_page_iter_next(&miter->piter)) 465 return false; 466 467 sg = miter->piter.sg; 468 pgoffset = miter->piter.sg_pgoffset; 469 470 miter->__offset = pgoffset ? 0 : sg->offset; 471 miter->__remaining = sg->offset + sg->length - 472 (pgoffset << PAGE_SHIFT) - miter->__offset; 473 miter->__remaining = min_t(unsigned long, miter->__remaining, 474 PAGE_SIZE - miter->__offset); 475 } 476 477 return true; 478 } 479 480 /** 481 * sg_miter_skip - reposition mapping iterator 482 * @miter: sg mapping iter to be skipped 483 * @offset: number of bytes to plus the current location 484 * 485 * Description: 486 * Sets the offset of @miter to its current location plus @offset bytes. 487 * If mapping iterator @miter has been proceeded by sg_miter_next(), this 488 * stops @miter. 489 * 490 * Context: 491 * Don't care if @miter is stopped, or not proceeded yet. 492 * Otherwise, preemption disabled if the SG_MITER_ATOMIC is set. 493 * 494 * Returns: 495 * true if @miter contains the valid mapping. false if end of sg 496 * list is reached. 497 */ 498 static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset) 499 { 500 sg_miter_stop(miter); 501 502 while (offset) { 503 off_t consumed; 504 505 if (!sg_miter_get_next_page(miter)) 506 return false; 507 508 consumed = min_t(off_t, offset, miter->__remaining); 509 miter->__offset += consumed; 510 miter->__remaining -= consumed; 511 offset -= consumed; 512 } 513 514 return true; 515 } 516 517 /** 518 * sg_miter_next - proceed mapping iterator to the next mapping 519 * @miter: sg mapping iter to proceed 520 * 521 * Description: 522 * Proceeds @miter to the next mapping. @miter should have been started 523 * using sg_miter_start(). On successful return, @miter->page, 524 * @miter->addr and @miter->length point to the current mapping. 525 * 526 * Context: 527 * Preemption disabled if SG_MITER_ATOMIC. Preemption must stay disabled 528 * till @miter is stopped. May sleep if !SG_MITER_ATOMIC. 529 * 530 * Returns: 531 * true if @miter contains the next mapping. false if end of sg 532 * list is reached. 533 */ 534 bool sg_miter_next(struct sg_mapping_iter *miter) 535 { 536 sg_miter_stop(miter); 537 538 /* 539 * Get to the next page if necessary. 540 * __remaining, __offset is adjusted by sg_miter_stop 541 */ 542 if (!sg_miter_get_next_page(miter)) 543 return false; 544 545 miter->page = sg_page_iter_page(&miter->piter); 546 miter->consumed = miter->length = miter->__remaining; 547 548 if (miter->__flags & SG_MITER_ATOMIC) 549 miter->addr = kmap_atomic(miter->page) + miter->__offset; 550 else 551 miter->addr = kmap(miter->page) + miter->__offset; 552 553 return true; 554 } 555 EXPORT_SYMBOL(sg_miter_next); 556 557 /** 558 * sg_miter_stop - stop mapping iteration 559 * @miter: sg mapping iter to be stopped 560 * 561 * Description: 562 * Stops mapping iterator @miter. @miter should have been started 563 * started using sg_miter_start(). A stopped iteration can be 564 * resumed by calling sg_miter_next() on it. This is useful when 565 * resources (kmap) need to be released during iteration. 566 * 567 * Context: 568 * Preemption disabled if the SG_MITER_ATOMIC is set. Don't care 569 * otherwise. 570 */ 571 void sg_miter_stop(struct sg_mapping_iter *miter) 572 { 573 WARN_ON(miter->consumed > miter->length); 574 575 /* drop resources from the last iteration */ 576 if (miter->addr) { 577 miter->__offset += miter->consumed; 578 miter->__remaining -= miter->consumed; 579 580 if (miter->__flags & SG_MITER_TO_SG) 581 flush_kernel_dcache_page(miter->page); 582 583 if (miter->__flags & SG_MITER_ATOMIC) { 584 WARN_ON_ONCE(preemptible()); 585 kunmap_atomic(miter->addr); 586 } else 587 kunmap(miter->page); 588 589 miter->page = NULL; 590 miter->addr = NULL; 591 miter->length = 0; 592 miter->consumed = 0; 593 } 594 } 595 EXPORT_SYMBOL(sg_miter_stop); 596 597 /** 598 * sg_copy_buffer - Copy data between a linear buffer and an SG list 599 * @sgl: The SG list 600 * @nents: Number of SG entries 601 * @buf: Where to copy from 602 * @buflen: The number of bytes to copy 603 * @skip: Number of bytes to skip before copying 604 * @to_buffer: transfer direction (true == from an sg list to a 605 * buffer, false == from a buffer to an sg list 606 * 607 * Returns the number of copied bytes. 608 * 609 **/ 610 static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, 611 void *buf, size_t buflen, off_t skip, 612 bool to_buffer) 613 { 614 unsigned int offset = 0; 615 struct sg_mapping_iter miter; 616 unsigned long flags; 617 unsigned int sg_flags = SG_MITER_ATOMIC; 618 619 if (to_buffer) 620 sg_flags |= SG_MITER_FROM_SG; 621 else 622 sg_flags |= SG_MITER_TO_SG; 623 624 sg_miter_start(&miter, sgl, nents, sg_flags); 625 626 if (!sg_miter_skip(&miter, skip)) 627 return false; 628 629 local_irq_save(flags); 630 631 while (sg_miter_next(&miter) && offset < buflen) { 632 unsigned int len; 633 634 len = min(miter.length, buflen - offset); 635 636 if (to_buffer) 637 memcpy(buf + offset, miter.addr, len); 638 else 639 memcpy(miter.addr, buf + offset, len); 640 641 offset += len; 642 } 643 644 sg_miter_stop(&miter); 645 646 local_irq_restore(flags); 647 return offset; 648 } 649 650 /** 651 * sg_copy_from_buffer - Copy from a linear buffer to an SG list 652 * @sgl: The SG list 653 * @nents: Number of SG entries 654 * @buf: Where to copy from 655 * @buflen: The number of bytes to copy 656 * 657 * Returns the number of copied bytes. 658 * 659 **/ 660 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents, 661 void *buf, size_t buflen) 662 { 663 return sg_copy_buffer(sgl, nents, buf, buflen, 0, false); 664 } 665 EXPORT_SYMBOL(sg_copy_from_buffer); 666 667 /** 668 * sg_copy_to_buffer - Copy from an SG list to a linear buffer 669 * @sgl: The SG list 670 * @nents: Number of SG entries 671 * @buf: Where to copy to 672 * @buflen: The number of bytes to copy 673 * 674 * Returns the number of copied bytes. 675 * 676 **/ 677 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents, 678 void *buf, size_t buflen) 679 { 680 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true); 681 } 682 EXPORT_SYMBOL(sg_copy_to_buffer); 683 684 /** 685 * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list 686 * @sgl: The SG list 687 * @nents: Number of SG entries 688 * @buf: Where to copy from 689 * @skip: Number of bytes to skip before copying 690 * @buflen: The number of bytes to copy 691 * 692 * Returns the number of copied bytes. 693 * 694 **/ 695 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents, 696 void *buf, size_t buflen, off_t skip) 697 { 698 return sg_copy_buffer(sgl, nents, buf, buflen, skip, false); 699 } 700 EXPORT_SYMBOL(sg_pcopy_from_buffer); 701 702 /** 703 * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer 704 * @sgl: The SG list 705 * @nents: Number of SG entries 706 * @buf: Where to copy to 707 * @skip: Number of bytes to skip before copying 708 * @buflen: The number of bytes to copy 709 * 710 * Returns the number of copied bytes. 711 * 712 **/ 713 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents, 714 void *buf, size_t buflen, off_t skip) 715 { 716 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true); 717 } 718 EXPORT_SYMBOL(sg_pcopy_to_buffer); 719