xref: /openbmc/linux/lib/scatterlist.c (revision 42bc47b3)
1 /*
2  * Copyright (C) 2007 Jens Axboe <jens.axboe@oracle.com>
3  *
4  * Scatterlist handling helpers.
5  *
6  * This source code is licensed under the GNU General Public License,
7  * Version 2. See the file COPYING for more details.
8  */
9 #include <linux/export.h>
10 #include <linux/slab.h>
11 #include <linux/scatterlist.h>
12 #include <linux/highmem.h>
13 #include <linux/kmemleak.h>
14 
15 /**
16  * sg_next - return the next scatterlist entry in a list
17  * @sg:		The current sg entry
18  *
19  * Description:
20  *   Usually the next entry will be @sg@ + 1, but if this sg element is part
21  *   of a chained scatterlist, it could jump to the start of a new
22  *   scatterlist array.
23  *
24  **/
25 struct scatterlist *sg_next(struct scatterlist *sg)
26 {
27 #ifdef CONFIG_DEBUG_SG
28 	BUG_ON(sg->sg_magic != SG_MAGIC);
29 #endif
30 	if (sg_is_last(sg))
31 		return NULL;
32 
33 	sg++;
34 	if (unlikely(sg_is_chain(sg)))
35 		sg = sg_chain_ptr(sg);
36 
37 	return sg;
38 }
39 EXPORT_SYMBOL(sg_next);
40 
41 /**
42  * sg_nents - return total count of entries in scatterlist
43  * @sg:		The scatterlist
44  *
45  * Description:
46  * Allows to know how many entries are in sg, taking into acount
47  * chaining as well
48  *
49  **/
50 int sg_nents(struct scatterlist *sg)
51 {
52 	int nents;
53 	for (nents = 0; sg; sg = sg_next(sg))
54 		nents++;
55 	return nents;
56 }
57 EXPORT_SYMBOL(sg_nents);
58 
59 /**
60  * sg_nents_for_len - return total count of entries in scatterlist
61  *                    needed to satisfy the supplied length
62  * @sg:		The scatterlist
63  * @len:	The total required length
64  *
65  * Description:
66  * Determines the number of entries in sg that are required to meet
67  * the supplied length, taking into acount chaining as well
68  *
69  * Returns:
70  *   the number of sg entries needed, negative error on failure
71  *
72  **/
73 int sg_nents_for_len(struct scatterlist *sg, u64 len)
74 {
75 	int nents;
76 	u64 total;
77 
78 	if (!len)
79 		return 0;
80 
81 	for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
82 		nents++;
83 		total += sg->length;
84 		if (total >= len)
85 			return nents;
86 	}
87 
88 	return -EINVAL;
89 }
90 EXPORT_SYMBOL(sg_nents_for_len);
91 
92 /**
93  * sg_last - return the last scatterlist entry in a list
94  * @sgl:	First entry in the scatterlist
95  * @nents:	Number of entries in the scatterlist
96  *
97  * Description:
98  *   Should only be used casually, it (currently) scans the entire list
99  *   to get the last entry.
100  *
101  *   Note that the @sgl@ pointer passed in need not be the first one,
102  *   the important bit is that @nents@ denotes the number of entries that
103  *   exist from @sgl@.
104  *
105  **/
106 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
107 {
108 	struct scatterlist *sg, *ret = NULL;
109 	unsigned int i;
110 
111 	for_each_sg(sgl, sg, nents, i)
112 		ret = sg;
113 
114 #ifdef CONFIG_DEBUG_SG
115 	BUG_ON(sgl[0].sg_magic != SG_MAGIC);
116 	BUG_ON(!sg_is_last(ret));
117 #endif
118 	return ret;
119 }
120 EXPORT_SYMBOL(sg_last);
121 
122 /**
123  * sg_init_table - Initialize SG table
124  * @sgl:	   The SG table
125  * @nents:	   Number of entries in table
126  *
127  * Notes:
128  *   If this is part of a chained sg table, sg_mark_end() should be
129  *   used only on the last table part.
130  *
131  **/
132 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
133 {
134 	memset(sgl, 0, sizeof(*sgl) * nents);
135 	sg_init_marker(sgl, nents);
136 }
137 EXPORT_SYMBOL(sg_init_table);
138 
139 /**
140  * sg_init_one - Initialize a single entry sg list
141  * @sg:		 SG entry
142  * @buf:	 Virtual address for IO
143  * @buflen:	 IO length
144  *
145  **/
146 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
147 {
148 	sg_init_table(sg, 1);
149 	sg_set_buf(sg, buf, buflen);
150 }
151 EXPORT_SYMBOL(sg_init_one);
152 
153 /*
154  * The default behaviour of sg_alloc_table() is to use these kmalloc/kfree
155  * helpers.
156  */
157 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
158 {
159 	if (nents == SG_MAX_SINGLE_ALLOC) {
160 		/*
161 		 * Kmemleak doesn't track page allocations as they are not
162 		 * commonly used (in a raw form) for kernel data structures.
163 		 * As we chain together a list of pages and then a normal
164 		 * kmalloc (tracked by kmemleak), in order to for that last
165 		 * allocation not to become decoupled (and thus a
166 		 * false-positive) we need to inform kmemleak of all the
167 		 * intermediate allocations.
168 		 */
169 		void *ptr = (void *) __get_free_page(gfp_mask);
170 		kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
171 		return ptr;
172 	} else
173 		return kmalloc_array(nents, sizeof(struct scatterlist),
174 				     gfp_mask);
175 }
176 
177 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
178 {
179 	if (nents == SG_MAX_SINGLE_ALLOC) {
180 		kmemleak_free(sg);
181 		free_page((unsigned long) sg);
182 	} else
183 		kfree(sg);
184 }
185 
186 /**
187  * __sg_free_table - Free a previously mapped sg table
188  * @table:	The sg table header to use
189  * @max_ents:	The maximum number of entries per single scatterlist
190  * @skip_first_chunk: don't free the (preallocated) first scatterlist chunk
191  * @free_fn:	Free function
192  *
193  *  Description:
194  *    Free an sg table previously allocated and setup with
195  *    __sg_alloc_table().  The @max_ents value must be identical to
196  *    that previously used with __sg_alloc_table().
197  *
198  **/
199 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
200 		     bool skip_first_chunk, sg_free_fn *free_fn)
201 {
202 	struct scatterlist *sgl, *next;
203 
204 	if (unlikely(!table->sgl))
205 		return;
206 
207 	sgl = table->sgl;
208 	while (table->orig_nents) {
209 		unsigned int alloc_size = table->orig_nents;
210 		unsigned int sg_size;
211 
212 		/*
213 		 * If we have more than max_ents segments left,
214 		 * then assign 'next' to the sg table after the current one.
215 		 * sg_size is then one less than alloc size, since the last
216 		 * element is the chain pointer.
217 		 */
218 		if (alloc_size > max_ents) {
219 			next = sg_chain_ptr(&sgl[max_ents - 1]);
220 			alloc_size = max_ents;
221 			sg_size = alloc_size - 1;
222 		} else {
223 			sg_size = alloc_size;
224 			next = NULL;
225 		}
226 
227 		table->orig_nents -= sg_size;
228 		if (skip_first_chunk)
229 			skip_first_chunk = false;
230 		else
231 			free_fn(sgl, alloc_size);
232 		sgl = next;
233 	}
234 
235 	table->sgl = NULL;
236 }
237 EXPORT_SYMBOL(__sg_free_table);
238 
239 /**
240  * sg_free_table - Free a previously allocated sg table
241  * @table:	The mapped sg table header
242  *
243  **/
244 void sg_free_table(struct sg_table *table)
245 {
246 	__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
247 }
248 EXPORT_SYMBOL(sg_free_table);
249 
250 /**
251  * __sg_alloc_table - Allocate and initialize an sg table with given allocator
252  * @table:	The sg table header to use
253  * @nents:	Number of entries in sg list
254  * @max_ents:	The maximum number of entries the allocator returns per call
255  * @gfp_mask:	GFP allocation mask
256  * @alloc_fn:	Allocator to use
257  *
258  * Description:
259  *   This function returns a @table @nents long. The allocator is
260  *   defined to return scatterlist chunks of maximum size @max_ents.
261  *   Thus if @nents is bigger than @max_ents, the scatterlists will be
262  *   chained in units of @max_ents.
263  *
264  * Notes:
265  *   If this function returns non-0 (eg failure), the caller must call
266  *   __sg_free_table() to cleanup any leftover allocations.
267  *
268  **/
269 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
270 		     unsigned int max_ents, struct scatterlist *first_chunk,
271 		     gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
272 {
273 	struct scatterlist *sg, *prv;
274 	unsigned int left;
275 
276 	memset(table, 0, sizeof(*table));
277 
278 	if (nents == 0)
279 		return -EINVAL;
280 #ifndef CONFIG_ARCH_HAS_SG_CHAIN
281 	if (WARN_ON_ONCE(nents > max_ents))
282 		return -EINVAL;
283 #endif
284 
285 	left = nents;
286 	prv = NULL;
287 	do {
288 		unsigned int sg_size, alloc_size = left;
289 
290 		if (alloc_size > max_ents) {
291 			alloc_size = max_ents;
292 			sg_size = alloc_size - 1;
293 		} else
294 			sg_size = alloc_size;
295 
296 		left -= sg_size;
297 
298 		if (first_chunk) {
299 			sg = first_chunk;
300 			first_chunk = NULL;
301 		} else {
302 			sg = alloc_fn(alloc_size, gfp_mask);
303 		}
304 		if (unlikely(!sg)) {
305 			/*
306 			 * Adjust entry count to reflect that the last
307 			 * entry of the previous table won't be used for
308 			 * linkage.  Without this, sg_kfree() may get
309 			 * confused.
310 			 */
311 			if (prv)
312 				table->nents = ++table->orig_nents;
313 
314  			return -ENOMEM;
315 		}
316 
317 		sg_init_table(sg, alloc_size);
318 		table->nents = table->orig_nents += sg_size;
319 
320 		/*
321 		 * If this is the first mapping, assign the sg table header.
322 		 * If this is not the first mapping, chain previous part.
323 		 */
324 		if (prv)
325 			sg_chain(prv, max_ents, sg);
326 		else
327 			table->sgl = sg;
328 
329 		/*
330 		 * If no more entries after this one, mark the end
331 		 */
332 		if (!left)
333 			sg_mark_end(&sg[sg_size - 1]);
334 
335 		prv = sg;
336 	} while (left);
337 
338 	return 0;
339 }
340 EXPORT_SYMBOL(__sg_alloc_table);
341 
342 /**
343  * sg_alloc_table - Allocate and initialize an sg table
344  * @table:	The sg table header to use
345  * @nents:	Number of entries in sg list
346  * @gfp_mask:	GFP allocation mask
347  *
348  *  Description:
349  *    Allocate and initialize an sg table. If @nents@ is larger than
350  *    SG_MAX_SINGLE_ALLOC a chained sg table will be setup.
351  *
352  **/
353 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
354 {
355 	int ret;
356 
357 	ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
358 			       NULL, gfp_mask, sg_kmalloc);
359 	if (unlikely(ret))
360 		__sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
361 
362 	return ret;
363 }
364 EXPORT_SYMBOL(sg_alloc_table);
365 
366 /**
367  * __sg_alloc_table_from_pages - Allocate and initialize an sg table from
368  *			         an array of pages
369  * @sgt:	 The sg table header to use
370  * @pages:	 Pointer to an array of page pointers
371  * @n_pages:	 Number of pages in the pages array
372  * @offset:      Offset from start of the first page to the start of a buffer
373  * @size:        Number of valid bytes in the buffer (after offset)
374  * @max_segment: Maximum size of a scatterlist node in bytes (page aligned)
375  * @gfp_mask:	 GFP allocation mask
376  *
377  *  Description:
378  *    Allocate and initialize an sg table from a list of pages. Contiguous
379  *    ranges of the pages are squashed into a single scatterlist node up to the
380  *    maximum size specified in @max_segment. An user may provide an offset at a
381  *    start and a size of valid data in a buffer specified by the page array.
382  *    The returned sg table is released by sg_free_table.
383  *
384  * Returns:
385  *   0 on success, negative error on failure
386  */
387 int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
388 				unsigned int n_pages, unsigned int offset,
389 				unsigned long size, unsigned int max_segment,
390 				gfp_t gfp_mask)
391 {
392 	unsigned int chunks, cur_page, seg_len, i;
393 	int ret;
394 	struct scatterlist *s;
395 
396 	if (WARN_ON(!max_segment || offset_in_page(max_segment)))
397 		return -EINVAL;
398 
399 	/* compute number of contiguous chunks */
400 	chunks = 1;
401 	seg_len = 0;
402 	for (i = 1; i < n_pages; i++) {
403 		seg_len += PAGE_SIZE;
404 		if (seg_len >= max_segment ||
405 		    page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
406 			chunks++;
407 			seg_len = 0;
408 		}
409 	}
410 
411 	ret = sg_alloc_table(sgt, chunks, gfp_mask);
412 	if (unlikely(ret))
413 		return ret;
414 
415 	/* merging chunks and putting them into the scatterlist */
416 	cur_page = 0;
417 	for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
418 		unsigned int j, chunk_size;
419 
420 		/* look for the end of the current chunk */
421 		seg_len = 0;
422 		for (j = cur_page + 1; j < n_pages; j++) {
423 			seg_len += PAGE_SIZE;
424 			if (seg_len >= max_segment ||
425 			    page_to_pfn(pages[j]) !=
426 			    page_to_pfn(pages[j - 1]) + 1)
427 				break;
428 		}
429 
430 		chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
431 		sg_set_page(s, pages[cur_page],
432 			    min_t(unsigned long, size, chunk_size), offset);
433 		size -= chunk_size;
434 		offset = 0;
435 		cur_page = j;
436 	}
437 
438 	return 0;
439 }
440 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
441 
442 /**
443  * sg_alloc_table_from_pages - Allocate and initialize an sg table from
444  *			       an array of pages
445  * @sgt:	 The sg table header to use
446  * @pages:	 Pointer to an array of page pointers
447  * @n_pages:	 Number of pages in the pages array
448  * @offset:      Offset from start of the first page to the start of a buffer
449  * @size:        Number of valid bytes in the buffer (after offset)
450  * @gfp_mask:	 GFP allocation mask
451  *
452  *  Description:
453  *    Allocate and initialize an sg table from a list of pages. Contiguous
454  *    ranges of the pages are squashed into a single scatterlist node. A user
455  *    may provide an offset at a start and a size of valid data in a buffer
456  *    specified by the page array. The returned sg table is released by
457  *    sg_free_table.
458  *
459  * Returns:
460  *   0 on success, negative error on failure
461  */
462 int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
463 			      unsigned int n_pages, unsigned int offset,
464 			      unsigned long size, gfp_t gfp_mask)
465 {
466 	return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
467 					   SCATTERLIST_MAX_SEGMENT, gfp_mask);
468 }
469 EXPORT_SYMBOL(sg_alloc_table_from_pages);
470 
471 #ifdef CONFIG_SGL_ALLOC
472 
473 /**
474  * sgl_alloc_order - allocate a scatterlist and its pages
475  * @length: Length in bytes of the scatterlist. Must be at least one
476  * @order: Second argument for alloc_pages()
477  * @chainable: Whether or not to allocate an extra element in the scatterlist
478  *	for scatterlist chaining purposes
479  * @gfp: Memory allocation flags
480  * @nent_p: [out] Number of entries in the scatterlist that have pages
481  *
482  * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
483  */
484 struct scatterlist *sgl_alloc_order(unsigned long long length,
485 				    unsigned int order, bool chainable,
486 				    gfp_t gfp, unsigned int *nent_p)
487 {
488 	struct scatterlist *sgl, *sg;
489 	struct page *page;
490 	unsigned int nent, nalloc;
491 	u32 elem_len;
492 
493 	nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
494 	/* Check for integer overflow */
495 	if (length > (nent << (PAGE_SHIFT + order)))
496 		return NULL;
497 	nalloc = nent;
498 	if (chainable) {
499 		/* Check for integer overflow */
500 		if (nalloc + 1 < nalloc)
501 			return NULL;
502 		nalloc++;
503 	}
504 	sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
505 			    (gfp & ~GFP_DMA) | __GFP_ZERO);
506 	if (!sgl)
507 		return NULL;
508 
509 	sg_init_table(sgl, nalloc);
510 	sg = sgl;
511 	while (length) {
512 		elem_len = min_t(u64, length, PAGE_SIZE << order);
513 		page = alloc_pages(gfp, order);
514 		if (!page) {
515 			sgl_free(sgl);
516 			return NULL;
517 		}
518 
519 		sg_set_page(sg, page, elem_len, 0);
520 		length -= elem_len;
521 		sg = sg_next(sg);
522 	}
523 	WARN_ONCE(length, "length = %lld\n", length);
524 	if (nent_p)
525 		*nent_p = nent;
526 	return sgl;
527 }
528 EXPORT_SYMBOL(sgl_alloc_order);
529 
530 /**
531  * sgl_alloc - allocate a scatterlist and its pages
532  * @length: Length in bytes of the scatterlist
533  * @gfp: Memory allocation flags
534  * @nent_p: [out] Number of entries in the scatterlist
535  *
536  * Returns: A pointer to an initialized scatterlist or %NULL upon failure.
537  */
538 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
539 			      unsigned int *nent_p)
540 {
541 	return sgl_alloc_order(length, 0, false, gfp, nent_p);
542 }
543 EXPORT_SYMBOL(sgl_alloc);
544 
545 /**
546  * sgl_free_n_order - free a scatterlist and its pages
547  * @sgl: Scatterlist with one or more elements
548  * @nents: Maximum number of elements to free
549  * @order: Second argument for __free_pages()
550  *
551  * Notes:
552  * - If several scatterlists have been chained and each chain element is
553  *   freed separately then it's essential to set nents correctly to avoid that a
554  *   page would get freed twice.
555  * - All pages in a chained scatterlist can be freed at once by setting @nents
556  *   to a high number.
557  */
558 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
559 {
560 	struct scatterlist *sg;
561 	struct page *page;
562 	int i;
563 
564 	for_each_sg(sgl, sg, nents, i) {
565 		if (!sg)
566 			break;
567 		page = sg_page(sg);
568 		if (page)
569 			__free_pages(page, order);
570 	}
571 	kfree(sgl);
572 }
573 EXPORT_SYMBOL(sgl_free_n_order);
574 
575 /**
576  * sgl_free_order - free a scatterlist and its pages
577  * @sgl: Scatterlist with one or more elements
578  * @order: Second argument for __free_pages()
579  */
580 void sgl_free_order(struct scatterlist *sgl, int order)
581 {
582 	sgl_free_n_order(sgl, INT_MAX, order);
583 }
584 EXPORT_SYMBOL(sgl_free_order);
585 
586 /**
587  * sgl_free - free a scatterlist and its pages
588  * @sgl: Scatterlist with one or more elements
589  */
590 void sgl_free(struct scatterlist *sgl)
591 {
592 	sgl_free_order(sgl, 0);
593 }
594 EXPORT_SYMBOL(sgl_free);
595 
596 #endif /* CONFIG_SGL_ALLOC */
597 
598 void __sg_page_iter_start(struct sg_page_iter *piter,
599 			  struct scatterlist *sglist, unsigned int nents,
600 			  unsigned long pgoffset)
601 {
602 	piter->__pg_advance = 0;
603 	piter->__nents = nents;
604 
605 	piter->sg = sglist;
606 	piter->sg_pgoffset = pgoffset;
607 }
608 EXPORT_SYMBOL(__sg_page_iter_start);
609 
610 static int sg_page_count(struct scatterlist *sg)
611 {
612 	return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
613 }
614 
615 bool __sg_page_iter_next(struct sg_page_iter *piter)
616 {
617 	if (!piter->__nents || !piter->sg)
618 		return false;
619 
620 	piter->sg_pgoffset += piter->__pg_advance;
621 	piter->__pg_advance = 1;
622 
623 	while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
624 		piter->sg_pgoffset -= sg_page_count(piter->sg);
625 		piter->sg = sg_next(piter->sg);
626 		if (!--piter->__nents || !piter->sg)
627 			return false;
628 	}
629 
630 	return true;
631 }
632 EXPORT_SYMBOL(__sg_page_iter_next);
633 
634 /**
635  * sg_miter_start - start mapping iteration over a sg list
636  * @miter: sg mapping iter to be started
637  * @sgl: sg list to iterate over
638  * @nents: number of sg entries
639  *
640  * Description:
641  *   Starts mapping iterator @miter.
642  *
643  * Context:
644  *   Don't care.
645  */
646 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
647 		    unsigned int nents, unsigned int flags)
648 {
649 	memset(miter, 0, sizeof(struct sg_mapping_iter));
650 
651 	__sg_page_iter_start(&miter->piter, sgl, nents, 0);
652 	WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
653 	miter->__flags = flags;
654 }
655 EXPORT_SYMBOL(sg_miter_start);
656 
657 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
658 {
659 	if (!miter->__remaining) {
660 		struct scatterlist *sg;
661 		unsigned long pgoffset;
662 
663 		if (!__sg_page_iter_next(&miter->piter))
664 			return false;
665 
666 		sg = miter->piter.sg;
667 		pgoffset = miter->piter.sg_pgoffset;
668 
669 		miter->__offset = pgoffset ? 0 : sg->offset;
670 		miter->__remaining = sg->offset + sg->length -
671 				(pgoffset << PAGE_SHIFT) - miter->__offset;
672 		miter->__remaining = min_t(unsigned long, miter->__remaining,
673 					   PAGE_SIZE - miter->__offset);
674 	}
675 
676 	return true;
677 }
678 
679 /**
680  * sg_miter_skip - reposition mapping iterator
681  * @miter: sg mapping iter to be skipped
682  * @offset: number of bytes to plus the current location
683  *
684  * Description:
685  *   Sets the offset of @miter to its current location plus @offset bytes.
686  *   If mapping iterator @miter has been proceeded by sg_miter_next(), this
687  *   stops @miter.
688  *
689  * Context:
690  *   Don't care if @miter is stopped, or not proceeded yet.
691  *   Otherwise, preemption disabled if the SG_MITER_ATOMIC is set.
692  *
693  * Returns:
694  *   true if @miter contains the valid mapping.  false if end of sg
695  *   list is reached.
696  */
697 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
698 {
699 	sg_miter_stop(miter);
700 
701 	while (offset) {
702 		off_t consumed;
703 
704 		if (!sg_miter_get_next_page(miter))
705 			return false;
706 
707 		consumed = min_t(off_t, offset, miter->__remaining);
708 		miter->__offset += consumed;
709 		miter->__remaining -= consumed;
710 		offset -= consumed;
711 	}
712 
713 	return true;
714 }
715 EXPORT_SYMBOL(sg_miter_skip);
716 
717 /**
718  * sg_miter_next - proceed mapping iterator to the next mapping
719  * @miter: sg mapping iter to proceed
720  *
721  * Description:
722  *   Proceeds @miter to the next mapping.  @miter should have been started
723  *   using sg_miter_start().  On successful return, @miter->page,
724  *   @miter->addr and @miter->length point to the current mapping.
725  *
726  * Context:
727  *   Preemption disabled if SG_MITER_ATOMIC.  Preemption must stay disabled
728  *   till @miter is stopped.  May sleep if !SG_MITER_ATOMIC.
729  *
730  * Returns:
731  *   true if @miter contains the next mapping.  false if end of sg
732  *   list is reached.
733  */
734 bool sg_miter_next(struct sg_mapping_iter *miter)
735 {
736 	sg_miter_stop(miter);
737 
738 	/*
739 	 * Get to the next page if necessary.
740 	 * __remaining, __offset is adjusted by sg_miter_stop
741 	 */
742 	if (!sg_miter_get_next_page(miter))
743 		return false;
744 
745 	miter->page = sg_page_iter_page(&miter->piter);
746 	miter->consumed = miter->length = miter->__remaining;
747 
748 	if (miter->__flags & SG_MITER_ATOMIC)
749 		miter->addr = kmap_atomic(miter->page) + miter->__offset;
750 	else
751 		miter->addr = kmap(miter->page) + miter->__offset;
752 
753 	return true;
754 }
755 EXPORT_SYMBOL(sg_miter_next);
756 
757 /**
758  * sg_miter_stop - stop mapping iteration
759  * @miter: sg mapping iter to be stopped
760  *
761  * Description:
762  *   Stops mapping iterator @miter.  @miter should have been started
763  *   using sg_miter_start().  A stopped iteration can be resumed by
764  *   calling sg_miter_next() on it.  This is useful when resources (kmap)
765  *   need to be released during iteration.
766  *
767  * Context:
768  *   Preemption disabled if the SG_MITER_ATOMIC is set.  Don't care
769  *   otherwise.
770  */
771 void sg_miter_stop(struct sg_mapping_iter *miter)
772 {
773 	WARN_ON(miter->consumed > miter->length);
774 
775 	/* drop resources from the last iteration */
776 	if (miter->addr) {
777 		miter->__offset += miter->consumed;
778 		miter->__remaining -= miter->consumed;
779 
780 		if ((miter->__flags & SG_MITER_TO_SG) &&
781 		    !PageSlab(miter->page))
782 			flush_kernel_dcache_page(miter->page);
783 
784 		if (miter->__flags & SG_MITER_ATOMIC) {
785 			WARN_ON_ONCE(preemptible());
786 			kunmap_atomic(miter->addr);
787 		} else
788 			kunmap(miter->page);
789 
790 		miter->page = NULL;
791 		miter->addr = NULL;
792 		miter->length = 0;
793 		miter->consumed = 0;
794 	}
795 }
796 EXPORT_SYMBOL(sg_miter_stop);
797 
798 /**
799  * sg_copy_buffer - Copy data between a linear buffer and an SG list
800  * @sgl:		 The SG list
801  * @nents:		 Number of SG entries
802  * @buf:		 Where to copy from
803  * @buflen:		 The number of bytes to copy
804  * @skip:		 Number of bytes to skip before copying
805  * @to_buffer:		 transfer direction (true == from an sg list to a
806  *			 buffer, false == from a buffer to an sg list
807  *
808  * Returns the number of copied bytes.
809  *
810  **/
811 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
812 		      size_t buflen, off_t skip, bool to_buffer)
813 {
814 	unsigned int offset = 0;
815 	struct sg_mapping_iter miter;
816 	unsigned int sg_flags = SG_MITER_ATOMIC;
817 
818 	if (to_buffer)
819 		sg_flags |= SG_MITER_FROM_SG;
820 	else
821 		sg_flags |= SG_MITER_TO_SG;
822 
823 	sg_miter_start(&miter, sgl, nents, sg_flags);
824 
825 	if (!sg_miter_skip(&miter, skip))
826 		return false;
827 
828 	while ((offset < buflen) && sg_miter_next(&miter)) {
829 		unsigned int len;
830 
831 		len = min(miter.length, buflen - offset);
832 
833 		if (to_buffer)
834 			memcpy(buf + offset, miter.addr, len);
835 		else
836 			memcpy(miter.addr, buf + offset, len);
837 
838 		offset += len;
839 	}
840 
841 	sg_miter_stop(&miter);
842 
843 	return offset;
844 }
845 EXPORT_SYMBOL(sg_copy_buffer);
846 
847 /**
848  * sg_copy_from_buffer - Copy from a linear buffer to an SG list
849  * @sgl:		 The SG list
850  * @nents:		 Number of SG entries
851  * @buf:		 Where to copy from
852  * @buflen:		 The number of bytes to copy
853  *
854  * Returns the number of copied bytes.
855  *
856  **/
857 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
858 			   const void *buf, size_t buflen)
859 {
860 	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
861 }
862 EXPORT_SYMBOL(sg_copy_from_buffer);
863 
864 /**
865  * sg_copy_to_buffer - Copy from an SG list to a linear buffer
866  * @sgl:		 The SG list
867  * @nents:		 Number of SG entries
868  * @buf:		 Where to copy to
869  * @buflen:		 The number of bytes to copy
870  *
871  * Returns the number of copied bytes.
872  *
873  **/
874 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
875 			 void *buf, size_t buflen)
876 {
877 	return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
878 }
879 EXPORT_SYMBOL(sg_copy_to_buffer);
880 
881 /**
882  * sg_pcopy_from_buffer - Copy from a linear buffer to an SG list
883  * @sgl:		 The SG list
884  * @nents:		 Number of SG entries
885  * @buf:		 Where to copy from
886  * @buflen:		 The number of bytes to copy
887  * @skip:		 Number of bytes to skip before copying
888  *
889  * Returns the number of copied bytes.
890  *
891  **/
892 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
893 			    const void *buf, size_t buflen, off_t skip)
894 {
895 	return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
896 }
897 EXPORT_SYMBOL(sg_pcopy_from_buffer);
898 
899 /**
900  * sg_pcopy_to_buffer - Copy from an SG list to a linear buffer
901  * @sgl:		 The SG list
902  * @nents:		 Number of SG entries
903  * @buf:		 Where to copy to
904  * @buflen:		 The number of bytes to copy
905  * @skip:		 Number of bytes to skip before copying
906  *
907  * Returns the number of copied bytes.
908  *
909  **/
910 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
911 			  void *buf, size_t buflen, off_t skip)
912 {
913 	return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
914 }
915 EXPORT_SYMBOL(sg_pcopy_to_buffer);
916 
917 /**
918  * sg_zero_buffer - Zero-out a part of a SG list
919  * @sgl:		 The SG list
920  * @nents:		 Number of SG entries
921  * @buflen:		 The number of bytes to zero out
922  * @skip:		 Number of bytes to skip before zeroing
923  *
924  * Returns the number of bytes zeroed.
925  **/
926 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
927 		       size_t buflen, off_t skip)
928 {
929 	unsigned int offset = 0;
930 	struct sg_mapping_iter miter;
931 	unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
932 
933 	sg_miter_start(&miter, sgl, nents, sg_flags);
934 
935 	if (!sg_miter_skip(&miter, skip))
936 		return false;
937 
938 	while (offset < buflen && sg_miter_next(&miter)) {
939 		unsigned int len;
940 
941 		len = min(miter.length, buflen - offset);
942 		memset(miter.addr, 0, len);
943 
944 		offset += len;
945 	}
946 
947 	sg_miter_stop(&miter);
948 	return offset;
949 }
950 EXPORT_SYMBOL(sg_zero_buffer);
951