1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Glue code for accelerated AES-GCM stitched implementation for ppc64le.
4  *
5  * Copyright 2022- IBM Inc. All rights reserved
6  */
7 
8 #include <asm/unaligned.h>
9 #include <asm/simd.h>
10 #include <asm/switch_to.h>
11 #include <crypto/aes.h>
12 #include <crypto/algapi.h>
13 #include <crypto/b128ops.h>
14 #include <crypto/gf128mul.h>
15 #include <crypto/internal/simd.h>
16 #include <crypto/internal/aead.h>
17 #include <crypto/internal/hash.h>
18 #include <crypto/internal/skcipher.h>
19 #include <crypto/scatterwalk.h>
20 #include <linux/cpufeature.h>
21 #include <linux/crypto.h>
22 #include <linux/module.h>
23 #include <linux/types.h>
24 
25 #define PPC_MODULE_FEATURE_P10	(32 + ilog2(PPC_FEATURE2_ARCH_3_1))
26 #define	PPC_ALIGN		16
27 #define GCM_IV_SIZE		12
28 
29 MODULE_DESCRIPTION("PPC64le AES-GCM with Stitched implementation");
30 MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
31 MODULE_LICENSE("GPL v2");
32 MODULE_ALIAS_CRYPTO("aes");
33 
34 asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
35 				      void *key);
36 asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key);
37 asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
38 				    void *rkey, u8 *iv, void *Xi);
39 asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
40 				    void *rkey, u8 *iv, void *Xi);
41 asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
42 asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable,
43 		unsigned char *aad, unsigned int alen);
44 
45 struct aes_key {
46 	u8 key[AES_MAX_KEYLENGTH];
47 	u64 rounds;
48 };
49 
50 struct gcm_ctx {
51 	u8 iv[16];
52 	u8 ivtag[16];
53 	u8 aad_hash[16];
54 	u64 aadLen;
55 	u64 Plen;	/* offset 56 - used in aes_p10_gcm_{en/de}crypt */
56 };
57 struct Hash_ctx {
58 	u8 H[16];	/* subkey */
59 	u8 Htable[256];	/* Xi, Hash table(offset 32) */
60 };
61 
62 struct p10_aes_gcm_ctx {
63 	struct aes_key enc_key;
64 };
65 
66 static void vsx_begin(void)
67 {
68 	preempt_disable();
69 	enable_kernel_vsx();
70 }
71 
72 static void vsx_end(void)
73 {
74 	disable_kernel_vsx();
75 	preempt_enable();
76 }
77 
78 static void set_subkey(unsigned char *hash)
79 {
80 	*(u64 *)&hash[0] = be64_to_cpup((__be64 *)&hash[0]);
81 	*(u64 *)&hash[8] = be64_to_cpup((__be64 *)&hash[8]);
82 }
83 
84 /*
85  * Compute aad if any.
86  *   - Hash aad and copy to Xi.
87  */
88 static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
89 		    unsigned char *aad, int alen)
90 {
91 	int i;
92 	u8 nXi[16] = {0, };
93 
94 	gctx->aadLen = alen;
95 	i = alen & ~0xf;
96 	if (i) {
97 		gcm_ghash_p8(nXi, hash->Htable+32, aad, i);
98 		aad += i;
99 		alen -= i;
100 	}
101 	if (alen) {
102 		for (i = 0; i < alen; i++)
103 			nXi[i] ^= aad[i];
104 
105 		memset(gctx->aad_hash, 0, 16);
106 		gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16);
107 	} else {
108 		memcpy(gctx->aad_hash, nXi, 16);
109 	}
110 
111 	memcpy(hash->Htable, gctx->aad_hash, 16);
112 }
113 
114 static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
115 			struct Hash_ctx *hash, u8 *assoc, unsigned int assoclen)
116 {
117 	__be32 counter = cpu_to_be32(1);
118 
119 	aes_p8_encrypt(hash->H, hash->H, rdkey);
120 	set_subkey(hash->H);
121 	gcm_init_htable(hash->Htable+32, hash->H);
122 
123 	*((__be32 *)(iv+12)) = counter;
124 
125 	gctx->Plen = 0;
126 
127 	/*
128 	 * Encrypt counter vector as iv tag and increment counter.
129 	 */
130 	aes_p8_encrypt(iv, gctx->ivtag, rdkey);
131 
132 	counter = cpu_to_be32(2);
133 	*((__be32 *)(iv+12)) = counter;
134 	memcpy(gctx->iv, iv, 16);
135 
136 	gctx->aadLen = assoclen;
137 	memset(gctx->aad_hash, 0, 16);
138 	if (assoclen)
139 		set_aad(gctx, hash, assoc, assoclen);
140 }
141 
142 static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
143 {
144 	int i;
145 	unsigned char len_ac[16 + PPC_ALIGN];
146 	unsigned char *aclen = PTR_ALIGN((void *)len_ac, PPC_ALIGN);
147 	__be64 clen = cpu_to_be64(len << 3);
148 	__be64 alen = cpu_to_be64(gctx->aadLen << 3);
149 
150 	if (len == 0 && gctx->aadLen == 0) {
151 		memcpy(hash->Htable, gctx->ivtag, 16);
152 		return;
153 	}
154 
155 	/*
156 	 * Len is in bits.
157 	 */
158 	*((__be64 *)(aclen)) = alen;
159 	*((__be64 *)(aclen+8)) = clen;
160 
161 	/*
162 	 * hash (AAD len and len)
163 	 */
164 	gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16);
165 
166 	for (i = 0; i < 16; i++)
167 		hash->Htable[i] ^= gctx->ivtag[i];
168 }
169 
170 static int set_authsize(struct crypto_aead *tfm, unsigned int authsize)
171 {
172 	switch (authsize) {
173 	case 4:
174 	case 8:
175 	case 12:
176 	case 13:
177 	case 14:
178 	case 15:
179 	case 16:
180 		break;
181 	default:
182 		return -EINVAL;
183 	}
184 
185 	return 0;
186 }
187 
188 static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
189 			     unsigned int keylen)
190 {
191 	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
192 	struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
193 	int ret;
194 
195 	vsx_begin();
196 	ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
197 	vsx_end();
198 
199 	return ret ? -EINVAL : 0;
200 }
201 
202 static int p10_aes_gcm_crypt(struct aead_request *req, int enc)
203 {
204 	struct crypto_tfm *tfm = req->base.tfm;
205 	struct p10_aes_gcm_ctx *ctx = crypto_tfm_ctx(tfm);
206 	u8 databuf[sizeof(struct gcm_ctx) + PPC_ALIGN];
207 	struct gcm_ctx *gctx = PTR_ALIGN((void *)databuf, PPC_ALIGN);
208 	u8 hashbuf[sizeof(struct Hash_ctx) + PPC_ALIGN];
209 	struct Hash_ctx *hash = PTR_ALIGN((void *)hashbuf, PPC_ALIGN);
210 	struct scatter_walk assoc_sg_walk;
211 	struct skcipher_walk walk;
212 	u8 *assocmem = NULL;
213 	u8 *assoc;
214 	unsigned int assoclen = req->assoclen;
215 	unsigned int cryptlen = req->cryptlen;
216 	unsigned char ivbuf[AES_BLOCK_SIZE+PPC_ALIGN];
217 	unsigned char *iv = PTR_ALIGN((void *)ivbuf, PPC_ALIGN);
218 	int ret;
219 	unsigned long auth_tag_len = crypto_aead_authsize(__crypto_aead_cast(tfm));
220 	u8 otag[16];
221 	int total_processed = 0;
222 
223 	memset(databuf, 0, sizeof(databuf));
224 	memset(hashbuf, 0, sizeof(hashbuf));
225 	memset(ivbuf, 0, sizeof(ivbuf));
226 	memcpy(iv, req->iv, GCM_IV_SIZE);
227 
228 	/* Linearize assoc, if not already linear */
229 	if (req->src->length >= assoclen && req->src->length) {
230 		scatterwalk_start(&assoc_sg_walk, req->src);
231 		assoc = scatterwalk_map(&assoc_sg_walk);
232 	} else {
233 		gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
234 			      GFP_KERNEL : GFP_ATOMIC;
235 
236 		/* assoc can be any length, so must be on heap */
237 		assocmem = kmalloc(assoclen, flags);
238 		if (unlikely(!assocmem))
239 			return -ENOMEM;
240 		assoc = assocmem;
241 
242 		scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
243 	}
244 
245 	vsx_begin();
246 	gcmp10_init(gctx, iv, (unsigned char *) &ctx->enc_key, hash, assoc, assoclen);
247 	vsx_end();
248 
249 	if (!assocmem)
250 		scatterwalk_unmap(assoc);
251 	else
252 		kfree(assocmem);
253 
254 	if (enc)
255 		ret = skcipher_walk_aead_encrypt(&walk, req, false);
256 	else
257 		ret = skcipher_walk_aead_decrypt(&walk, req, false);
258 	if (ret)
259 		return ret;
260 
261 	while (walk.nbytes > 0 && ret == 0) {
262 
263 		vsx_begin();
264 		if (enc)
265 			aes_p10_gcm_encrypt(walk.src.virt.addr,
266 					    walk.dst.virt.addr,
267 					    walk.nbytes,
268 					    &ctx->enc_key, gctx->iv, hash->Htable);
269 		else
270 			aes_p10_gcm_decrypt(walk.src.virt.addr,
271 					    walk.dst.virt.addr,
272 					    walk.nbytes,
273 					    &ctx->enc_key, gctx->iv, hash->Htable);
274 		vsx_end();
275 
276 		total_processed += walk.nbytes;
277 		ret = skcipher_walk_done(&walk, 0);
278 	}
279 
280 	if (ret)
281 		return ret;
282 
283 	/* Finalize hash */
284 	vsx_begin();
285 	finish_tag(gctx, hash, total_processed);
286 	vsx_end();
287 
288 	/* copy Xi to end of dst */
289 	if (enc)
290 		scatterwalk_map_and_copy(hash->Htable, req->dst, req->assoclen + cryptlen,
291 					 auth_tag_len, 1);
292 	else {
293 		scatterwalk_map_and_copy(otag, req->src,
294 					 req->assoclen + cryptlen - auth_tag_len,
295 					 auth_tag_len, 0);
296 
297 		if (crypto_memneq(otag, hash->Htable, auth_tag_len)) {
298 			memzero_explicit(hash->Htable, 16);
299 			return -EBADMSG;
300 		}
301 	}
302 
303 	return 0;
304 }
305 
306 static int p10_aes_gcm_encrypt(struct aead_request *req)
307 {
308 	return p10_aes_gcm_crypt(req, 1);
309 }
310 
311 static int p10_aes_gcm_decrypt(struct aead_request *req)
312 {
313 	return p10_aes_gcm_crypt(req, 0);
314 }
315 
316 static struct aead_alg gcm_aes_alg = {
317 	.ivsize			= GCM_IV_SIZE,
318 	.maxauthsize		= 16,
319 
320 	.setauthsize		= set_authsize,
321 	.setkey			= p10_aes_gcm_setkey,
322 	.encrypt		= p10_aes_gcm_encrypt,
323 	.decrypt		= p10_aes_gcm_decrypt,
324 
325 	.base.cra_name		= "gcm(aes)",
326 	.base.cra_driver_name	= "aes_gcm_p10",
327 	.base.cra_priority	= 2100,
328 	.base.cra_blocksize	= 1,
329 	.base.cra_ctxsize	= sizeof(struct p10_aes_gcm_ctx),
330 	.base.cra_module	= THIS_MODULE,
331 };
332 
333 static int __init p10_init(void)
334 {
335 	return crypto_register_aead(&gcm_aes_alg);
336 }
337 
338 static void __exit p10_exit(void)
339 {
340 	crypto_unregister_aead(&gcm_aes_alg);
341 }
342 
343 module_cpu_feature_match(PPC_MODULE_FEATURE_P10, p10_init);
344 module_exit(p10_exit);
345