xref: /openbmc/linux/include/crypto/scatterwalk.h (revision f1575595)
1 /*
2  * Cryptographic scatter and gather helpers.
3  *
4  * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
5  * Copyright (c) 2002 Adam J. Richter <adam@yggdrasil.com>
6  * Copyright (c) 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
7  * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms of the GNU General Public License as published by the Free
11  * Software Foundation; either version 2 of the License, or (at your option)
12  * any later version.
13  *
14  */
15 
16 #ifndef _CRYPTO_SCATTERWALK_H
17 #define _CRYPTO_SCATTERWALK_H
18 
19 #include <crypto/algapi.h>
20 #include <linux/highmem.h>
21 #include <linux/kernel.h>
22 #include <linux/scatterlist.h>
23 
24 static inline void scatterwalk_crypto_chain(struct scatterlist *head,
25 					    struct scatterlist *sg, int num)
26 {
27 	if (sg)
28 		sg_chain(head, num, sg);
29 	else
30 		sg_mark_end(head);
31 }
32 
33 static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
34 {
35 	unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
36 	unsigned int len_this_page = offset_in_page(~walk->offset) + 1;
37 	return len_this_page > len ? len : len_this_page;
38 }
39 
40 static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk,
41 					     unsigned int nbytes)
42 {
43 	unsigned int len_this_page = scatterwalk_pagelen(walk);
44 	return nbytes > len_this_page ? len_this_page : nbytes;
45 }
46 
47 static inline void scatterwalk_advance(struct scatter_walk *walk,
48 				       unsigned int nbytes)
49 {
50 	walk->offset += nbytes;
51 }
52 
53 static inline unsigned int scatterwalk_aligned(struct scatter_walk *walk,
54 					       unsigned int alignmask)
55 {
56 	return !(walk->offset & alignmask);
57 }
58 
59 static inline struct page *scatterwalk_page(struct scatter_walk *walk)
60 {
61 	return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
62 }
63 
64 static inline void scatterwalk_unmap(void *vaddr)
65 {
66 	kunmap_atomic(vaddr);
67 }
68 
69 static inline void scatterwalk_start(struct scatter_walk *walk,
70 				     struct scatterlist *sg)
71 {
72 	walk->sg = sg;
73 	walk->offset = sg->offset;
74 }
75 
76 static inline void *scatterwalk_map(struct scatter_walk *walk)
77 {
78 	return kmap_atomic(scatterwalk_page(walk)) +
79 	       offset_in_page(walk->offset);
80 }
81 
82 static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
83 					unsigned int more)
84 {
85 	if (out) {
86 		struct page *page;
87 
88 		page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT);
89 		/* Test ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE first as
90 		 * PageSlab cannot be optimised away per se due to
91 		 * use of volatile pointer.
92 		 */
93 		if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE && !PageSlab(page))
94 			flush_dcache_page(page);
95 	}
96 
97 	if (more && walk->offset >= walk->sg->offset + walk->sg->length)
98 		scatterwalk_start(walk, sg_next(walk->sg));
99 }
100 
101 static inline void scatterwalk_done(struct scatter_walk *walk, int out,
102 				    int more)
103 {
104 	if (!more || walk->offset >= walk->sg->offset + walk->sg->length ||
105 	    !(walk->offset & (PAGE_SIZE - 1)))
106 		scatterwalk_pagedone(walk, out, more);
107 }
108 
109 void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
110 			    size_t nbytes, int out);
111 void *scatterwalk_map(struct scatter_walk *walk);
112 
113 void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
114 			      unsigned int start, unsigned int nbytes, int out);
115 
116 struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
117 				     struct scatterlist *src,
118 				     unsigned int len);
119 
120 #endif  /* _CRYPTO_SCATTERWALK_H */
121