xref: /openbmc/linux/crypto/crypto_engine.c (revision 0f4b20ef)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Handle async block request by crypto hardware engine.
4  *
5  * Copyright (C) 2016 Linaro, Inc.
6  *
7  * Author: Baolin Wang <baolin.wang@linaro.org>
8  */
9 
10 #include <linux/err.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <crypto/engine.h>
14 #include <uapi/linux/sched/types.h>
15 #include "internal.h"
16 
17 #define CRYPTO_ENGINE_MAX_QLEN 10
18 
19 /**
20  * crypto_finalize_request - finalize one request if the request is done
21  * @engine: the hardware engine
22  * @req: the request need to be finalized
23  * @err: error number
24  */
25 static void crypto_finalize_request(struct crypto_engine *engine,
26 				    struct crypto_async_request *req, int err)
27 {
28 	unsigned long flags;
29 	bool finalize_req = false;
30 	int ret;
31 	struct crypto_engine_ctx *enginectx;
32 
33 	/*
34 	 * If hardware cannot enqueue more requests
35 	 * and retry mechanism is not supported
36 	 * make sure we are completing the current request
37 	 */
38 	if (!engine->retry_support) {
39 		spin_lock_irqsave(&engine->queue_lock, flags);
40 		if (engine->cur_req == req) {
41 			finalize_req = true;
42 			engine->cur_req = NULL;
43 		}
44 		spin_unlock_irqrestore(&engine->queue_lock, flags);
45 	}
46 
47 	if (finalize_req || engine->retry_support) {
48 		enginectx = crypto_tfm_ctx(req->tfm);
49 		if (enginectx->op.prepare_request &&
50 		    enginectx->op.unprepare_request) {
51 			ret = enginectx->op.unprepare_request(engine, req);
52 			if (ret)
53 				dev_err(engine->dev, "failed to unprepare request\n");
54 		}
55 	}
56 	lockdep_assert_in_softirq();
57 	req->complete(req, err);
58 
59 	kthread_queue_work(engine->kworker, &engine->pump_requests);
60 }
61 
62 /**
63  * crypto_pump_requests - dequeue one request from engine queue to process
64  * @engine: the hardware engine
65  * @in_kthread: true if we are in the context of the request pump thread
66  *
67  * This function checks if there is any request in the engine queue that
68  * needs processing and if so call out to the driver to initialize hardware
69  * and handle each request.
70  */
71 static void crypto_pump_requests(struct crypto_engine *engine,
72 				 bool in_kthread)
73 {
74 	struct crypto_async_request *async_req, *backlog;
75 	unsigned long flags;
76 	bool was_busy = false;
77 	int ret;
78 	struct crypto_engine_ctx *enginectx;
79 
80 	spin_lock_irqsave(&engine->queue_lock, flags);
81 
82 	/* Make sure we are not already running a request */
83 	if (!engine->retry_support && engine->cur_req)
84 		goto out;
85 
86 	/* If another context is idling then defer */
87 	if (engine->idling) {
88 		kthread_queue_work(engine->kworker, &engine->pump_requests);
89 		goto out;
90 	}
91 
92 	/* Check if the engine queue is idle */
93 	if (!crypto_queue_len(&engine->queue) || !engine->running) {
94 		if (!engine->busy)
95 			goto out;
96 
97 		/* Only do teardown in the thread */
98 		if (!in_kthread) {
99 			kthread_queue_work(engine->kworker,
100 					   &engine->pump_requests);
101 			goto out;
102 		}
103 
104 		engine->busy = false;
105 		engine->idling = true;
106 		spin_unlock_irqrestore(&engine->queue_lock, flags);
107 
108 		if (engine->unprepare_crypt_hardware &&
109 		    engine->unprepare_crypt_hardware(engine))
110 			dev_err(engine->dev, "failed to unprepare crypt hardware\n");
111 
112 		spin_lock_irqsave(&engine->queue_lock, flags);
113 		engine->idling = false;
114 		goto out;
115 	}
116 
117 start_request:
118 	/* Get the fist request from the engine queue to handle */
119 	backlog = crypto_get_backlog(&engine->queue);
120 	async_req = crypto_dequeue_request(&engine->queue);
121 	if (!async_req)
122 		goto out;
123 
124 	/*
125 	 * If hardware doesn't support the retry mechanism,
126 	 * keep track of the request we are processing now.
127 	 * We'll need it on completion (crypto_finalize_request).
128 	 */
129 	if (!engine->retry_support)
130 		engine->cur_req = async_req;
131 
132 	if (backlog)
133 		backlog->complete(backlog, -EINPROGRESS);
134 
135 	if (engine->busy)
136 		was_busy = true;
137 	else
138 		engine->busy = true;
139 
140 	spin_unlock_irqrestore(&engine->queue_lock, flags);
141 
142 	/* Until here we get the request need to be encrypted successfully */
143 	if (!was_busy && engine->prepare_crypt_hardware) {
144 		ret = engine->prepare_crypt_hardware(engine);
145 		if (ret) {
146 			dev_err(engine->dev, "failed to prepare crypt hardware\n");
147 			goto req_err_2;
148 		}
149 	}
150 
151 	enginectx = crypto_tfm_ctx(async_req->tfm);
152 
153 	if (enginectx->op.prepare_request) {
154 		ret = enginectx->op.prepare_request(engine, async_req);
155 		if (ret) {
156 			dev_err(engine->dev, "failed to prepare request: %d\n",
157 				ret);
158 			goto req_err_2;
159 		}
160 	}
161 	if (!enginectx->op.do_one_request) {
162 		dev_err(engine->dev, "failed to do request\n");
163 		ret = -EINVAL;
164 		goto req_err_1;
165 	}
166 
167 	ret = enginectx->op.do_one_request(engine, async_req);
168 
169 	/* Request unsuccessfully executed by hardware */
170 	if (ret < 0) {
171 		/*
172 		 * If hardware queue is full (-ENOSPC), requeue request
173 		 * regardless of backlog flag.
174 		 * Otherwise, unprepare and complete the request.
175 		 */
176 		if (!engine->retry_support ||
177 		    (ret != -ENOSPC)) {
178 			dev_err(engine->dev,
179 				"Failed to do one request from queue: %d\n",
180 				ret);
181 			goto req_err_1;
182 		}
183 		/*
184 		 * If retry mechanism is supported,
185 		 * unprepare current request and
186 		 * enqueue it back into crypto-engine queue.
187 		 */
188 		if (enginectx->op.unprepare_request) {
189 			ret = enginectx->op.unprepare_request(engine,
190 							      async_req);
191 			if (ret)
192 				dev_err(engine->dev,
193 					"failed to unprepare request\n");
194 		}
195 		spin_lock_irqsave(&engine->queue_lock, flags);
196 		/*
197 		 * If hardware was unable to execute request, enqueue it
198 		 * back in front of crypto-engine queue, to keep the order
199 		 * of requests.
200 		 */
201 		crypto_enqueue_request_head(&engine->queue, async_req);
202 
203 		kthread_queue_work(engine->kworker, &engine->pump_requests);
204 		goto out;
205 	}
206 
207 	goto retry;
208 
209 req_err_1:
210 	if (enginectx->op.unprepare_request) {
211 		ret = enginectx->op.unprepare_request(engine, async_req);
212 		if (ret)
213 			dev_err(engine->dev, "failed to unprepare request\n");
214 	}
215 
216 req_err_2:
217 	async_req->complete(async_req, ret);
218 
219 retry:
220 	/* If retry mechanism is supported, send new requests to engine */
221 	if (engine->retry_support) {
222 		spin_lock_irqsave(&engine->queue_lock, flags);
223 		goto start_request;
224 	}
225 	return;
226 
227 out:
228 	spin_unlock_irqrestore(&engine->queue_lock, flags);
229 
230 	/*
231 	 * Batch requests is possible only if
232 	 * hardware can enqueue multiple requests
233 	 */
234 	if (engine->do_batch_requests) {
235 		ret = engine->do_batch_requests(engine);
236 		if (ret)
237 			dev_err(engine->dev, "failed to do batch requests: %d\n",
238 				ret);
239 	}
240 
241 	return;
242 }
243 
244 static void crypto_pump_work(struct kthread_work *work)
245 {
246 	struct crypto_engine *engine =
247 		container_of(work, struct crypto_engine, pump_requests);
248 
249 	crypto_pump_requests(engine, true);
250 }
251 
252 /**
253  * crypto_transfer_request - transfer the new request into the engine queue
254  * @engine: the hardware engine
255  * @req: the request need to be listed into the engine queue
256  */
257 static int crypto_transfer_request(struct crypto_engine *engine,
258 				   struct crypto_async_request *req,
259 				   bool need_pump)
260 {
261 	unsigned long flags;
262 	int ret;
263 
264 	spin_lock_irqsave(&engine->queue_lock, flags);
265 
266 	if (!engine->running) {
267 		spin_unlock_irqrestore(&engine->queue_lock, flags);
268 		return -ESHUTDOWN;
269 	}
270 
271 	ret = crypto_enqueue_request(&engine->queue, req);
272 
273 	if (!engine->busy && need_pump)
274 		kthread_queue_work(engine->kworker, &engine->pump_requests);
275 
276 	spin_unlock_irqrestore(&engine->queue_lock, flags);
277 	return ret;
278 }
279 
280 /**
281  * crypto_transfer_request_to_engine - transfer one request to list
282  * into the engine queue
283  * @engine: the hardware engine
284  * @req: the request need to be listed into the engine queue
285  */
286 static int crypto_transfer_request_to_engine(struct crypto_engine *engine,
287 					     struct crypto_async_request *req)
288 {
289 	return crypto_transfer_request(engine, req, true);
290 }
291 
292 /**
293  * crypto_transfer_aead_request_to_engine - transfer one aead_request
294  * to list into the engine queue
295  * @engine: the hardware engine
296  * @req: the request need to be listed into the engine queue
297  */
298 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
299 					   struct aead_request *req)
300 {
301 	return crypto_transfer_request_to_engine(engine, &req->base);
302 }
303 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine);
304 
305 /**
306  * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request
307  * to list into the engine queue
308  * @engine: the hardware engine
309  * @req: the request need to be listed into the engine queue
310  */
311 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
312 					       struct akcipher_request *req)
313 {
314 	return crypto_transfer_request_to_engine(engine, &req->base);
315 }
316 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine);
317 
318 /**
319  * crypto_transfer_hash_request_to_engine - transfer one ahash_request
320  * to list into the engine queue
321  * @engine: the hardware engine
322  * @req: the request need to be listed into the engine queue
323  */
324 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
325 					   struct ahash_request *req)
326 {
327 	return crypto_transfer_request_to_engine(engine, &req->base);
328 }
329 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine);
330 
331 /**
332  * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list
333  * into the engine queue
334  * @engine: the hardware engine
335  * @req: the request need to be listed into the engine queue
336  */
337 int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine,
338 					  struct kpp_request *req)
339 {
340 	return crypto_transfer_request_to_engine(engine, &req->base);
341 }
342 EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine);
343 
344 /**
345  * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request
346  * to list into the engine queue
347  * @engine: the hardware engine
348  * @req: the request need to be listed into the engine queue
349  */
350 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
351 					       struct skcipher_request *req)
352 {
353 	return crypto_transfer_request_to_engine(engine, &req->base);
354 }
355 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine);
356 
357 /**
358  * crypto_finalize_aead_request - finalize one aead_request if
359  * the request is done
360  * @engine: the hardware engine
361  * @req: the request need to be finalized
362  * @err: error number
363  */
364 void crypto_finalize_aead_request(struct crypto_engine *engine,
365 				  struct aead_request *req, int err)
366 {
367 	return crypto_finalize_request(engine, &req->base, err);
368 }
369 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request);
370 
371 /**
372  * crypto_finalize_akcipher_request - finalize one akcipher_request if
373  * the request is done
374  * @engine: the hardware engine
375  * @req: the request need to be finalized
376  * @err: error number
377  */
378 void crypto_finalize_akcipher_request(struct crypto_engine *engine,
379 				      struct akcipher_request *req, int err)
380 {
381 	return crypto_finalize_request(engine, &req->base, err);
382 }
383 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request);
384 
385 /**
386  * crypto_finalize_hash_request - finalize one ahash_request if
387  * the request is done
388  * @engine: the hardware engine
389  * @req: the request need to be finalized
390  * @err: error number
391  */
392 void crypto_finalize_hash_request(struct crypto_engine *engine,
393 				  struct ahash_request *req, int err)
394 {
395 	return crypto_finalize_request(engine, &req->base, err);
396 }
397 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request);
398 
399 /**
400  * crypto_finalize_kpp_request - finalize one kpp_request if the request is done
401  * @engine: the hardware engine
402  * @req: the request need to be finalized
403  * @err: error number
404  */
405 void crypto_finalize_kpp_request(struct crypto_engine *engine,
406 				 struct kpp_request *req, int err)
407 {
408 	return crypto_finalize_request(engine, &req->base, err);
409 }
410 EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request);
411 
412 /**
413  * crypto_finalize_skcipher_request - finalize one skcipher_request if
414  * the request is done
415  * @engine: the hardware engine
416  * @req: the request need to be finalized
417  * @err: error number
418  */
419 void crypto_finalize_skcipher_request(struct crypto_engine *engine,
420 				      struct skcipher_request *req, int err)
421 {
422 	return crypto_finalize_request(engine, &req->base, err);
423 }
424 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request);
425 
426 /**
427  * crypto_engine_start - start the hardware engine
428  * @engine: the hardware engine need to be started
429  *
430  * Return 0 on success, else on fail.
431  */
432 int crypto_engine_start(struct crypto_engine *engine)
433 {
434 	unsigned long flags;
435 
436 	spin_lock_irqsave(&engine->queue_lock, flags);
437 
438 	if (engine->running || engine->busy) {
439 		spin_unlock_irqrestore(&engine->queue_lock, flags);
440 		return -EBUSY;
441 	}
442 
443 	engine->running = true;
444 	spin_unlock_irqrestore(&engine->queue_lock, flags);
445 
446 	kthread_queue_work(engine->kworker, &engine->pump_requests);
447 
448 	return 0;
449 }
450 EXPORT_SYMBOL_GPL(crypto_engine_start);
451 
452 /**
453  * crypto_engine_stop - stop the hardware engine
454  * @engine: the hardware engine need to be stopped
455  *
456  * Return 0 on success, else on fail.
457  */
458 int crypto_engine_stop(struct crypto_engine *engine)
459 {
460 	unsigned long flags;
461 	unsigned int limit = 500;
462 	int ret = 0;
463 
464 	spin_lock_irqsave(&engine->queue_lock, flags);
465 
466 	/*
467 	 * If the engine queue is not empty or the engine is on busy state,
468 	 * we need to wait for a while to pump the requests of engine queue.
469 	 */
470 	while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) {
471 		spin_unlock_irqrestore(&engine->queue_lock, flags);
472 		msleep(20);
473 		spin_lock_irqsave(&engine->queue_lock, flags);
474 	}
475 
476 	if (crypto_queue_len(&engine->queue) || engine->busy)
477 		ret = -EBUSY;
478 	else
479 		engine->running = false;
480 
481 	spin_unlock_irqrestore(&engine->queue_lock, flags);
482 
483 	if (ret)
484 		dev_warn(engine->dev, "could not stop engine\n");
485 
486 	return ret;
487 }
488 EXPORT_SYMBOL_GPL(crypto_engine_stop);
489 
490 /**
491  * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure
492  * and initialize it by setting the maximum number of entries in the software
493  * crypto-engine queue.
494  * @dev: the device attached with one hardware engine
495  * @retry_support: whether hardware has support for retry mechanism
496  * @cbk_do_batch: pointer to a callback function to be invoked when executing
497  *                a batch of requests.
498  *                This has the form:
499  *                callback(struct crypto_engine *engine)
500  *                where:
501  *                @engine: the crypto engine structure.
502  * @rt: whether this queue is set to run as a realtime task
503  * @qlen: maximum size of the crypto-engine queue
504  *
505  * This must be called from context that can sleep.
506  * Return: the crypto engine structure on success, else NULL.
507  */
508 struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
509 						       bool retry_support,
510 						       int (*cbk_do_batch)(struct crypto_engine *engine),
511 						       bool rt, int qlen)
512 {
513 	struct crypto_engine *engine;
514 
515 	if (!dev)
516 		return NULL;
517 
518 	engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL);
519 	if (!engine)
520 		return NULL;
521 
522 	engine->dev = dev;
523 	engine->rt = rt;
524 	engine->running = false;
525 	engine->busy = false;
526 	engine->idling = false;
527 	engine->retry_support = retry_support;
528 	engine->priv_data = dev;
529 	/*
530 	 * Batch requests is possible only if
531 	 * hardware has support for retry mechanism.
532 	 */
533 	engine->do_batch_requests = retry_support ? cbk_do_batch : NULL;
534 
535 	snprintf(engine->name, sizeof(engine->name),
536 		 "%s-engine", dev_name(dev));
537 
538 	crypto_init_queue(&engine->queue, qlen);
539 	spin_lock_init(&engine->queue_lock);
540 
541 	engine->kworker = kthread_create_worker(0, "%s", engine->name);
542 	if (IS_ERR(engine->kworker)) {
543 		dev_err(dev, "failed to create crypto request pump task\n");
544 		return NULL;
545 	}
546 	kthread_init_work(&engine->pump_requests, crypto_pump_work);
547 
548 	if (engine->rt) {
549 		dev_info(dev, "will run requests pump with realtime priority\n");
550 		sched_set_fifo(engine->kworker->task);
551 	}
552 
553 	return engine;
554 }
555 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set);
556 
557 /**
558  * crypto_engine_alloc_init - allocate crypto hardware engine structure and
559  * initialize it.
560  * @dev: the device attached with one hardware engine
561  * @rt: whether this queue is set to run as a realtime task
562  *
563  * This must be called from context that can sleep.
564  * Return: the crypto engine structure on success, else NULL.
565  */
566 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt)
567 {
568 	return crypto_engine_alloc_init_and_set(dev, false, NULL, rt,
569 						CRYPTO_ENGINE_MAX_QLEN);
570 }
571 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init);
572 
573 /**
574  * crypto_engine_exit - free the resources of hardware engine when exit
575  * @engine: the hardware engine need to be freed
576  *
577  * Return 0 for success.
578  */
579 int crypto_engine_exit(struct crypto_engine *engine)
580 {
581 	int ret;
582 
583 	ret = crypto_engine_stop(engine);
584 	if (ret)
585 		return ret;
586 
587 	kthread_destroy_worker(engine->kworker);
588 
589 	return 0;
590 }
591 EXPORT_SYMBOL_GPL(crypto_engine_exit);
592 
593 MODULE_LICENSE("GPL");
594 MODULE_DESCRIPTION("Crypto hardware engine framework");
595