1 /*
2  * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
3  *
4  * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
5  *
6  * Author: Gary R Hook <gary.hook@amd.com>
7  * Author: Tom Lendacky <thomas.lendacky@amd.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License version 2 as
11  * published by the Free Software Foundation.
12  */
13 
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/delay.h>
17 #include <linux/scatterlist.h>
18 #include <crypto/aes.h>
19 #include <crypto/internal/skcipher.h>
20 #include <crypto/scatterwalk.h>
21 
22 #include "ccp-crypto.h"
23 
24 struct ccp_aes_xts_def {
25 	const char *name;
26 	const char *drv_name;
27 };
28 
29 static struct ccp_aes_xts_def aes_xts_algs[] = {
30 	{
31 		.name		= "xts(aes)",
32 		.drv_name	= "xts-aes-ccp",
33 	},
34 };
35 
36 struct ccp_unit_size_map {
37 	unsigned int size;
38 	u32 value;
39 };
40 
41 static struct ccp_unit_size_map unit_size_map[] = {
42 	{
43 		.size	= 4096,
44 		.value	= CCP_XTS_AES_UNIT_SIZE_4096,
45 	},
46 	{
47 		.size	= 2048,
48 		.value	= CCP_XTS_AES_UNIT_SIZE_2048,
49 	},
50 	{
51 		.size	= 1024,
52 		.value	= CCP_XTS_AES_UNIT_SIZE_1024,
53 	},
54 	{
55 		.size	= 512,
56 		.value	= CCP_XTS_AES_UNIT_SIZE_512,
57 	},
58 	{
59 		.size	= 256,
60 		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
61 	},
62 	{
63 		.size	= 128,
64 		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
65 	},
66 	{
67 		.size	= 64,
68 		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
69 	},
70 	{
71 		.size	= 32,
72 		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
73 	},
74 	{
75 		.size	= 16,
76 		.value	= CCP_XTS_AES_UNIT_SIZE_16,
77 	},
78 	{
79 		.size	= 1,
80 		.value	= CCP_XTS_AES_UNIT_SIZE__LAST,
81 	},
82 };
83 
84 static int ccp_aes_xts_complete(struct crypto_async_request *async_req, int ret)
85 {
86 	struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
87 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
88 
89 	if (ret)
90 		return ret;
91 
92 	memcpy(req->info, rctx->iv, AES_BLOCK_SIZE);
93 
94 	return 0;
95 }
96 
97 static int ccp_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
98 			      unsigned int key_len)
99 {
100 	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
101 
102 	/* Only support 128-bit AES key with a 128-bit Tweak key,
103 	 * otherwise use the fallback
104 	 */
105 	switch (key_len) {
106 	case AES_KEYSIZE_128 * 2:
107 		memcpy(ctx->u.aes.key, key, key_len);
108 		break;
109 	}
110 	ctx->u.aes.key_len = key_len / 2;
111 	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);
112 
113 	return crypto_skcipher_setkey(ctx->u.aes.tfm_skcipher, key, key_len);
114 }
115 
116 static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
117 			     unsigned int encrypt)
118 {
119 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
120 	struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req);
121 	unsigned int unit;
122 	u32 unit_size;
123 	int ret;
124 
125 	if (!ctx->u.aes.key_len)
126 		return -EINVAL;
127 
128 	if (req->nbytes & (AES_BLOCK_SIZE - 1))
129 		return -EINVAL;
130 
131 	if (!req->info)
132 		return -EINVAL;
133 
134 	unit_size = CCP_XTS_AES_UNIT_SIZE__LAST;
135 	if (req->nbytes <= unit_size_map[0].size) {
136 		for (unit = 0; unit < ARRAY_SIZE(unit_size_map); unit++) {
137 			if (!(req->nbytes & (unit_size_map[unit].size - 1))) {
138 				unit_size = unit_size_map[unit].value;
139 				break;
140 			}
141 		}
142 	}
143 
144 	if ((unit_size == CCP_XTS_AES_UNIT_SIZE__LAST) ||
145 	    (ctx->u.aes.key_len != AES_KEYSIZE_128)) {
146 		SKCIPHER_REQUEST_ON_STACK(subreq, ctx->u.aes.tfm_skcipher);
147 
148 		/* Use the fallback to process the request for any
149 		 * unsupported unit sizes or key sizes
150 		 */
151 		skcipher_request_set_tfm(subreq, ctx->u.aes.tfm_skcipher);
152 		skcipher_request_set_callback(subreq, req->base.flags,
153 					      NULL, NULL);
154 		skcipher_request_set_crypt(subreq, req->src, req->dst,
155 					   req->nbytes, req->info);
156 		ret = encrypt ? crypto_skcipher_encrypt(subreq) :
157 				crypto_skcipher_decrypt(subreq);
158 		skcipher_request_zero(subreq);
159 		return ret;
160 	}
161 
162 	memcpy(rctx->iv, req->info, AES_BLOCK_SIZE);
163 	sg_init_one(&rctx->iv_sg, rctx->iv, AES_BLOCK_SIZE);
164 
165 	memset(&rctx->cmd, 0, sizeof(rctx->cmd));
166 	INIT_LIST_HEAD(&rctx->cmd.entry);
167 	rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
168 	rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
169 	rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
170 					   : CCP_AES_ACTION_DECRYPT;
171 	rctx->cmd.u.xts.unit_size = unit_size;
172 	rctx->cmd.u.xts.key = &ctx->u.aes.key_sg;
173 	rctx->cmd.u.xts.key_len = ctx->u.aes.key_len;
174 	rctx->cmd.u.xts.iv = &rctx->iv_sg;
175 	rctx->cmd.u.xts.iv_len = AES_BLOCK_SIZE;
176 	rctx->cmd.u.xts.src = req->src;
177 	rctx->cmd.u.xts.src_len = req->nbytes;
178 	rctx->cmd.u.xts.dst = req->dst;
179 
180 	ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd);
181 
182 	return ret;
183 }
184 
185 static int ccp_aes_xts_encrypt(struct ablkcipher_request *req)
186 {
187 	return ccp_aes_xts_crypt(req, 1);
188 }
189 
190 static int ccp_aes_xts_decrypt(struct ablkcipher_request *req)
191 {
192 	return ccp_aes_xts_crypt(req, 0);
193 }
194 
195 static int ccp_aes_xts_cra_init(struct crypto_tfm *tfm)
196 {
197 	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
198 	struct crypto_skcipher *fallback_tfm;
199 
200 	ctx->complete = ccp_aes_xts_complete;
201 	ctx->u.aes.key_len = 0;
202 
203 	fallback_tfm = crypto_alloc_skcipher("xts(aes)", 0,
204 					     CRYPTO_ALG_ASYNC |
205 					     CRYPTO_ALG_NEED_FALLBACK);
206 	if (IS_ERR(fallback_tfm)) {
207 		pr_warn("could not load fallback driver xts(aes)\n");
208 		return PTR_ERR(fallback_tfm);
209 	}
210 	ctx->u.aes.tfm_skcipher = fallback_tfm;
211 
212 	tfm->crt_ablkcipher.reqsize = sizeof(struct ccp_aes_req_ctx);
213 
214 	return 0;
215 }
216 
217 static void ccp_aes_xts_cra_exit(struct crypto_tfm *tfm)
218 {
219 	struct ccp_ctx *ctx = crypto_tfm_ctx(tfm);
220 
221 	crypto_free_skcipher(ctx->u.aes.tfm_skcipher);
222 }
223 
224 static int ccp_register_aes_xts_alg(struct list_head *head,
225 				    const struct ccp_aes_xts_def *def)
226 {
227 	struct ccp_crypto_ablkcipher_alg *ccp_alg;
228 	struct crypto_alg *alg;
229 	int ret;
230 
231 	ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL);
232 	if (!ccp_alg)
233 		return -ENOMEM;
234 
235 	INIT_LIST_HEAD(&ccp_alg->entry);
236 
237 	alg = &ccp_alg->alg;
238 
239 	snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
240 	snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
241 		 def->drv_name);
242 	alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
243 			 CRYPTO_ALG_KERN_DRIVER_ONLY |
244 			 CRYPTO_ALG_NEED_FALLBACK;
245 	alg->cra_blocksize = AES_BLOCK_SIZE;
246 	alg->cra_ctxsize = sizeof(struct ccp_ctx);
247 	alg->cra_priority = CCP_CRA_PRIORITY;
248 	alg->cra_type = &crypto_ablkcipher_type;
249 	alg->cra_ablkcipher.setkey = ccp_aes_xts_setkey;
250 	alg->cra_ablkcipher.encrypt = ccp_aes_xts_encrypt;
251 	alg->cra_ablkcipher.decrypt = ccp_aes_xts_decrypt;
252 	alg->cra_ablkcipher.min_keysize = AES_MIN_KEY_SIZE * 2;
253 	alg->cra_ablkcipher.max_keysize = AES_MAX_KEY_SIZE * 2;
254 	alg->cra_ablkcipher.ivsize = AES_BLOCK_SIZE;
255 	alg->cra_init = ccp_aes_xts_cra_init;
256 	alg->cra_exit = ccp_aes_xts_cra_exit;
257 	alg->cra_module = THIS_MODULE;
258 
259 	ret = crypto_register_alg(alg);
260 	if (ret) {
261 		pr_err("%s ablkcipher algorithm registration error (%d)\n",
262 		       alg->cra_name, ret);
263 		kfree(ccp_alg);
264 		return ret;
265 	}
266 
267 	list_add(&ccp_alg->entry, head);
268 
269 	return 0;
270 }
271 
272 int ccp_register_aes_xts_algs(struct list_head *head)
273 {
274 	int i, ret;
275 
276 	for (i = 0; i < ARRAY_SIZE(aes_xts_algs); i++) {
277 		ret = ccp_register_aes_xts_alg(head, &aes_xts_algs[i]);
278 		if (ret)
279 			return ret;
280 	}
281 
282 	return 0;
283 }
284