1 /*
2  * AMD Cryptographic Coprocessor (CCP) crypto API support
3  *
4  * Copyright (C) 2013 Advanced Micro Devices, Inc.
5  *
6  * Author: Tom Lendacky <thomas.lendacky@amd.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/ccp.h>
17 #include <linux/scatterlist.h>
18 #include <crypto/internal/hash.h>
19 
20 #include "ccp-crypto.h"
21 
22 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
23 MODULE_LICENSE("GPL");
24 MODULE_VERSION("1.0.0");
25 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
26 
27 
28 /* List heads for the supported algorithms */
29 static LIST_HEAD(hash_algs);
30 static LIST_HEAD(cipher_algs);
31 
32 /* For any tfm, requests for that tfm on the same CPU must be returned
33  * in the order received.  With multiple queues available, the CCP can
34  * process more than one cmd at a time.  Therefore we must maintain
35  * a cmd list to insure the proper ordering of requests on a given tfm/cpu
36  * combination.
37  */
38 struct ccp_crypto_cpu_queue {
39 	struct list_head cmds;
40 	struct list_head *backlog;
41 	unsigned int cmd_count;
42 };
43 #define CCP_CRYPTO_MAX_QLEN	50
44 
45 struct ccp_crypto_percpu_queue {
46 	struct ccp_crypto_cpu_queue __percpu *cpu_queue;
47 };
48 static struct ccp_crypto_percpu_queue req_queue;
49 
50 struct ccp_crypto_cmd {
51 	struct list_head entry;
52 
53 	struct ccp_cmd *cmd;
54 
55 	/* Save the crypto_tfm and crypto_async_request addresses
56 	 * separately to avoid any reference to a possibly invalid
57 	 * crypto_async_request structure after invoking the request
58 	 * callback
59 	 */
60 	struct crypto_async_request *req;
61 	struct crypto_tfm *tfm;
62 
63 	/* Used for held command processing to determine state */
64 	int ret;
65 
66 	int cpu;
67 };
68 
69 struct ccp_crypto_cpu {
70 	struct work_struct work;
71 	struct completion completion;
72 	struct ccp_crypto_cmd *crypto_cmd;
73 	int err;
74 };
75 
76 
77 static inline bool ccp_crypto_success(int err)
78 {
79 	if (err && (err != -EINPROGRESS) && (err != -EBUSY))
80 		return false;
81 
82 	return true;
83 }
84 
85 /*
86  * ccp_crypto_cmd_complete must be called while running on the appropriate
87  * cpu and the caller must have done a get_cpu to disable preemption
88  */
89 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
90 	struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
91 {
92 	struct ccp_crypto_cpu_queue *cpu_queue;
93 	struct ccp_crypto_cmd *held = NULL, *tmp;
94 
95 	*backlog = NULL;
96 
97 	cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
98 
99 	/* Held cmds will be after the current cmd in the queue so start
100 	 * searching for a cmd with a matching tfm for submission.
101 	 */
102 	tmp = crypto_cmd;
103 	list_for_each_entry_continue(tmp, &cpu_queue->cmds, entry) {
104 		if (crypto_cmd->tfm != tmp->tfm)
105 			continue;
106 		held = tmp;
107 		break;
108 	}
109 
110 	/* Process the backlog:
111 	 *   Because cmds can be executed from any point in the cmd list
112 	 *   special precautions have to be taken when handling the backlog.
113 	 */
114 	if (cpu_queue->backlog != &cpu_queue->cmds) {
115 		/* Skip over this cmd if it is the next backlog cmd */
116 		if (cpu_queue->backlog == &crypto_cmd->entry)
117 			cpu_queue->backlog = crypto_cmd->entry.next;
118 
119 		*backlog = container_of(cpu_queue->backlog,
120 					struct ccp_crypto_cmd, entry);
121 		cpu_queue->backlog = cpu_queue->backlog->next;
122 
123 		/* Skip over this cmd if it is now the next backlog cmd */
124 		if (cpu_queue->backlog == &crypto_cmd->entry)
125 			cpu_queue->backlog = crypto_cmd->entry.next;
126 	}
127 
128 	/* Remove the cmd entry from the list of cmds */
129 	cpu_queue->cmd_count--;
130 	list_del(&crypto_cmd->entry);
131 
132 	return held;
133 }
134 
135 static void ccp_crypto_complete_on_cpu(struct work_struct *work)
136 {
137 	struct ccp_crypto_cpu *cpu_work =
138 		container_of(work, struct ccp_crypto_cpu, work);
139 	struct ccp_crypto_cmd *crypto_cmd = cpu_work->crypto_cmd;
140 	struct ccp_crypto_cmd *held, *next, *backlog;
141 	struct crypto_async_request *req = crypto_cmd->req;
142 	struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
143 	int cpu, ret;
144 
145 	cpu = get_cpu();
146 
147 	if (cpu_work->err == -EINPROGRESS) {
148 		/* Only propogate the -EINPROGRESS if necessary */
149 		if (crypto_cmd->ret == -EBUSY) {
150 			crypto_cmd->ret = -EINPROGRESS;
151 			req->complete(req, -EINPROGRESS);
152 		}
153 
154 		goto e_cpu;
155 	}
156 
157 	/* Operation has completed - update the queue before invoking
158 	 * the completion callbacks and retrieve the next cmd (cmd with
159 	 * a matching tfm) that can be submitted to the CCP.
160 	 */
161 	held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
162 	if (backlog) {
163 		backlog->ret = -EINPROGRESS;
164 		backlog->req->complete(backlog->req, -EINPROGRESS);
165 	}
166 
167 	/* Transition the state from -EBUSY to -EINPROGRESS first */
168 	if (crypto_cmd->ret == -EBUSY)
169 		req->complete(req, -EINPROGRESS);
170 
171 	/* Completion callbacks */
172 	ret = cpu_work->err;
173 	if (ctx->complete)
174 		ret = ctx->complete(req, ret);
175 	req->complete(req, ret);
176 
177 	/* Submit the next cmd */
178 	while (held) {
179 		ret = ccp_enqueue_cmd(held->cmd);
180 		if (ccp_crypto_success(ret))
181 			break;
182 
183 		/* Error occurred, report it and get the next entry */
184 		held->req->complete(held->req, ret);
185 
186 		next = ccp_crypto_cmd_complete(held, &backlog);
187 		if (backlog) {
188 			backlog->ret = -EINPROGRESS;
189 			backlog->req->complete(backlog->req, -EINPROGRESS);
190 		}
191 
192 		kfree(held);
193 		held = next;
194 	}
195 
196 	kfree(crypto_cmd);
197 
198 e_cpu:
199 	put_cpu();
200 
201 	complete(&cpu_work->completion);
202 }
203 
204 static void ccp_crypto_complete(void *data, int err)
205 {
206 	struct ccp_crypto_cmd *crypto_cmd = data;
207 	struct ccp_crypto_cpu cpu_work;
208 
209 	INIT_WORK(&cpu_work.work, ccp_crypto_complete_on_cpu);
210 	init_completion(&cpu_work.completion);
211 	cpu_work.crypto_cmd = crypto_cmd;
212 	cpu_work.err = err;
213 
214 	schedule_work_on(crypto_cmd->cpu, &cpu_work.work);
215 
216 	/* Keep the completion call synchronous */
217 	wait_for_completion(&cpu_work.completion);
218 }
219 
220 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
221 {
222 	struct ccp_crypto_cpu_queue *cpu_queue;
223 	struct ccp_crypto_cmd *active = NULL, *tmp;
224 	int cpu, ret;
225 
226 	cpu = get_cpu();
227 	crypto_cmd->cpu = cpu;
228 
229 	cpu_queue = this_cpu_ptr(req_queue.cpu_queue);
230 
231 	/* Check if the cmd can/should be queued */
232 	if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
233 		ret = -EBUSY;
234 		if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
235 			goto e_cpu;
236 	}
237 
238 	/* Look for an entry with the same tfm.  If there is a cmd
239 	 * with the same tfm in the list for this cpu then the current
240 	 * cmd cannot be submitted to the CCP yet.
241 	 */
242 	list_for_each_entry(tmp, &cpu_queue->cmds, entry) {
243 		if (crypto_cmd->tfm != tmp->tfm)
244 			continue;
245 		active = tmp;
246 		break;
247 	}
248 
249 	ret = -EINPROGRESS;
250 	if (!active) {
251 		ret = ccp_enqueue_cmd(crypto_cmd->cmd);
252 		if (!ccp_crypto_success(ret))
253 			goto e_cpu;
254 	}
255 
256 	if (cpu_queue->cmd_count >= CCP_CRYPTO_MAX_QLEN) {
257 		ret = -EBUSY;
258 		if (cpu_queue->backlog == &cpu_queue->cmds)
259 			cpu_queue->backlog = &crypto_cmd->entry;
260 	}
261 	crypto_cmd->ret = ret;
262 
263 	cpu_queue->cmd_count++;
264 	list_add_tail(&crypto_cmd->entry, &cpu_queue->cmds);
265 
266 e_cpu:
267 	put_cpu();
268 
269 	return ret;
270 }
271 
272 /**
273  * ccp_crypto_enqueue_request - queue an crypto async request for processing
274  *				by the CCP
275  *
276  * @req: crypto_async_request struct to be processed
277  * @cmd: ccp_cmd struct to be sent to the CCP
278  */
279 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
280 			       struct ccp_cmd *cmd)
281 {
282 	struct ccp_crypto_cmd *crypto_cmd;
283 	gfp_t gfp;
284 	int ret;
285 
286 	gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
287 
288 	crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
289 	if (!crypto_cmd)
290 		return -ENOMEM;
291 
292 	/* The tfm pointer must be saved and not referenced from the
293 	 * crypto_async_request (req) pointer because it is used after
294 	 * completion callback for the request and the req pointer
295 	 * might not be valid anymore.
296 	 */
297 	crypto_cmd->cmd = cmd;
298 	crypto_cmd->req = req;
299 	crypto_cmd->tfm = req->tfm;
300 
301 	cmd->callback = ccp_crypto_complete;
302 	cmd->data = crypto_cmd;
303 
304 	if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
305 		cmd->flags |= CCP_CMD_MAY_BACKLOG;
306 	else
307 		cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
308 
309 	ret = ccp_crypto_enqueue_cmd(crypto_cmd);
310 	if (!ccp_crypto_success(ret))
311 		kfree(crypto_cmd);
312 
313 	return ret;
314 }
315 
316 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
317 					    struct scatterlist *sg_add)
318 {
319 	struct scatterlist *sg, *sg_last = NULL;
320 
321 	for (sg = table->sgl; sg; sg = sg_next(sg))
322 		if (!sg_page(sg))
323 			break;
324 	BUG_ON(!sg);
325 
326 	for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
327 		sg_set_page(sg, sg_page(sg_add), sg_add->length,
328 			    sg_add->offset);
329 		sg_last = sg;
330 	}
331 	BUG_ON(sg_add);
332 
333 	return sg_last;
334 }
335 
336 static int ccp_register_algs(void)
337 {
338 	int ret;
339 
340 	ret = ccp_register_aes_algs(&cipher_algs);
341 	if (ret)
342 		return ret;
343 
344 	ret = ccp_register_aes_cmac_algs(&hash_algs);
345 	if (ret)
346 		return ret;
347 
348 	ret = ccp_register_aes_xts_algs(&cipher_algs);
349 	if (ret)
350 		return ret;
351 
352 	ret = ccp_register_sha_algs(&hash_algs);
353 	if (ret)
354 		return ret;
355 
356 	return 0;
357 }
358 
359 static void ccp_unregister_algs(void)
360 {
361 	struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
362 	struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
363 
364 	list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
365 		crypto_unregister_ahash(&ahash_alg->alg);
366 		list_del(&ahash_alg->entry);
367 		kfree(ahash_alg);
368 	}
369 
370 	list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
371 		crypto_unregister_alg(&ablk_alg->alg);
372 		list_del(&ablk_alg->entry);
373 		kfree(ablk_alg);
374 	}
375 }
376 
377 static int ccp_init_queues(void)
378 {
379 	struct ccp_crypto_cpu_queue *cpu_queue;
380 	int cpu;
381 
382 	req_queue.cpu_queue = alloc_percpu(struct ccp_crypto_cpu_queue);
383 	if (!req_queue.cpu_queue)
384 		return -ENOMEM;
385 
386 	for_each_possible_cpu(cpu) {
387 		cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
388 		INIT_LIST_HEAD(&cpu_queue->cmds);
389 		cpu_queue->backlog = &cpu_queue->cmds;
390 		cpu_queue->cmd_count = 0;
391 	}
392 
393 	return 0;
394 }
395 
396 static void ccp_fini_queue(void)
397 {
398 	struct ccp_crypto_cpu_queue *cpu_queue;
399 	int cpu;
400 
401 	for_each_possible_cpu(cpu) {
402 		cpu_queue = per_cpu_ptr(req_queue.cpu_queue, cpu);
403 		BUG_ON(!list_empty(&cpu_queue->cmds));
404 	}
405 	free_percpu(req_queue.cpu_queue);
406 }
407 
408 static int ccp_crypto_init(void)
409 {
410 	int ret;
411 
412 	ret = ccp_init_queues();
413 	if (ret)
414 		return ret;
415 
416 	ret = ccp_register_algs();
417 	if (ret) {
418 		ccp_unregister_algs();
419 		ccp_fini_queue();
420 	}
421 
422 	return ret;
423 }
424 
425 static void ccp_crypto_exit(void)
426 {
427 	ccp_unregister_algs();
428 	ccp_fini_queue();
429 }
430 
431 module_init(ccp_crypto_init);
432 module_exit(ccp_crypto_exit);
433