1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Handle async block request by crypto hardware engine. 4 * 5 * Copyright (C) 2016 Linaro, Inc. 6 * 7 * Author: Baolin Wang <baolin.wang@linaro.org> 8 */ 9 10 #include <linux/err.h> 11 #include <linux/delay.h> 12 #include <crypto/engine.h> 13 #include <uapi/linux/sched/types.h> 14 #include "internal.h" 15 16 #define CRYPTO_ENGINE_MAX_QLEN 10 17 18 /** 19 * crypto_finalize_request - finalize one request if the request is done 20 * @engine: the hardware engine 21 * @req: the request need to be finalized 22 * @err: error number 23 */ 24 static void crypto_finalize_request(struct crypto_engine *engine, 25 struct crypto_async_request *req, int err) 26 { 27 unsigned long flags; 28 bool finalize_cur_req = false; 29 int ret; 30 struct crypto_engine_ctx *enginectx; 31 32 spin_lock_irqsave(&engine->queue_lock, flags); 33 if (engine->cur_req == req) 34 finalize_cur_req = true; 35 spin_unlock_irqrestore(&engine->queue_lock, flags); 36 37 if (finalize_cur_req) { 38 enginectx = crypto_tfm_ctx(req->tfm); 39 if (engine->cur_req_prepared && 40 enginectx->op.unprepare_request) { 41 ret = enginectx->op.unprepare_request(engine, req); 42 if (ret) 43 dev_err(engine->dev, "failed to unprepare request\n"); 44 } 45 spin_lock_irqsave(&engine->queue_lock, flags); 46 engine->cur_req = NULL; 47 engine->cur_req_prepared = false; 48 spin_unlock_irqrestore(&engine->queue_lock, flags); 49 } 50 51 req->complete(req, err); 52 53 kthread_queue_work(engine->kworker, &engine->pump_requests); 54 } 55 56 /** 57 * crypto_pump_requests - dequeue one request from engine queue to process 58 * @engine: the hardware engine 59 * @in_kthread: true if we are in the context of the request pump thread 60 * 61 * This function checks if there is any request in the engine queue that 62 * needs processing and if so call out to the driver to initialize hardware 63 * and handle each request. 64 */ 65 static void crypto_pump_requests(struct crypto_engine *engine, 66 bool in_kthread) 67 { 68 struct crypto_async_request *async_req, *backlog; 69 unsigned long flags; 70 bool was_busy = false; 71 int ret; 72 struct crypto_engine_ctx *enginectx; 73 74 spin_lock_irqsave(&engine->queue_lock, flags); 75 76 /* Make sure we are not already running a request */ 77 if (engine->cur_req) 78 goto out; 79 80 /* If another context is idling then defer */ 81 if (engine->idling) { 82 kthread_queue_work(engine->kworker, &engine->pump_requests); 83 goto out; 84 } 85 86 /* Check if the engine queue is idle */ 87 if (!crypto_queue_len(&engine->queue) || !engine->running) { 88 if (!engine->busy) 89 goto out; 90 91 /* Only do teardown in the thread */ 92 if (!in_kthread) { 93 kthread_queue_work(engine->kworker, 94 &engine->pump_requests); 95 goto out; 96 } 97 98 engine->busy = false; 99 engine->idling = true; 100 spin_unlock_irqrestore(&engine->queue_lock, flags); 101 102 if (engine->unprepare_crypt_hardware && 103 engine->unprepare_crypt_hardware(engine)) 104 dev_err(engine->dev, "failed to unprepare crypt hardware\n"); 105 106 spin_lock_irqsave(&engine->queue_lock, flags); 107 engine->idling = false; 108 goto out; 109 } 110 111 /* Get the fist request from the engine queue to handle */ 112 backlog = crypto_get_backlog(&engine->queue); 113 async_req = crypto_dequeue_request(&engine->queue); 114 if (!async_req) 115 goto out; 116 117 engine->cur_req = async_req; 118 if (backlog) 119 backlog->complete(backlog, -EINPROGRESS); 120 121 if (engine->busy) 122 was_busy = true; 123 else 124 engine->busy = true; 125 126 spin_unlock_irqrestore(&engine->queue_lock, flags); 127 128 /* Until here we get the request need to be encrypted successfully */ 129 if (!was_busy && engine->prepare_crypt_hardware) { 130 ret = engine->prepare_crypt_hardware(engine); 131 if (ret) { 132 dev_err(engine->dev, "failed to prepare crypt hardware\n"); 133 goto req_err; 134 } 135 } 136 137 enginectx = crypto_tfm_ctx(async_req->tfm); 138 139 if (enginectx->op.prepare_request) { 140 ret = enginectx->op.prepare_request(engine, async_req); 141 if (ret) { 142 dev_err(engine->dev, "failed to prepare request: %d\n", 143 ret); 144 goto req_err; 145 } 146 engine->cur_req_prepared = true; 147 } 148 if (!enginectx->op.do_one_request) { 149 dev_err(engine->dev, "failed to do request\n"); 150 ret = -EINVAL; 151 goto req_err; 152 } 153 ret = enginectx->op.do_one_request(engine, async_req); 154 if (ret) { 155 dev_err(engine->dev, "Failed to do one request from queue: %d\n", ret); 156 goto req_err; 157 } 158 return; 159 160 req_err: 161 crypto_finalize_request(engine, async_req, ret); 162 return; 163 164 out: 165 spin_unlock_irqrestore(&engine->queue_lock, flags); 166 } 167 168 static void crypto_pump_work(struct kthread_work *work) 169 { 170 struct crypto_engine *engine = 171 container_of(work, struct crypto_engine, pump_requests); 172 173 crypto_pump_requests(engine, true); 174 } 175 176 /** 177 * crypto_transfer_request - transfer the new request into the engine queue 178 * @engine: the hardware engine 179 * @req: the request need to be listed into the engine queue 180 */ 181 static int crypto_transfer_request(struct crypto_engine *engine, 182 struct crypto_async_request *req, 183 bool need_pump) 184 { 185 unsigned long flags; 186 int ret; 187 188 spin_lock_irqsave(&engine->queue_lock, flags); 189 190 if (!engine->running) { 191 spin_unlock_irqrestore(&engine->queue_lock, flags); 192 return -ESHUTDOWN; 193 } 194 195 ret = crypto_enqueue_request(&engine->queue, req); 196 197 if (!engine->busy && need_pump) 198 kthread_queue_work(engine->kworker, &engine->pump_requests); 199 200 spin_unlock_irqrestore(&engine->queue_lock, flags); 201 return ret; 202 } 203 204 /** 205 * crypto_transfer_request_to_engine - transfer one request to list 206 * into the engine queue 207 * @engine: the hardware engine 208 * @req: the request need to be listed into the engine queue 209 */ 210 static int crypto_transfer_request_to_engine(struct crypto_engine *engine, 211 struct crypto_async_request *req) 212 { 213 return crypto_transfer_request(engine, req, true); 214 } 215 216 /** 217 * crypto_transfer_aead_request_to_engine - transfer one aead_request 218 * to list into the engine queue 219 * @engine: the hardware engine 220 * @req: the request need to be listed into the engine queue 221 */ 222 int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, 223 struct aead_request *req) 224 { 225 return crypto_transfer_request_to_engine(engine, &req->base); 226 } 227 EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine); 228 229 /** 230 * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request 231 * to list into the engine queue 232 * @engine: the hardware engine 233 * @req: the request need to be listed into the engine queue 234 */ 235 int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, 236 struct akcipher_request *req) 237 { 238 return crypto_transfer_request_to_engine(engine, &req->base); 239 } 240 EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine); 241 242 /** 243 * crypto_transfer_hash_request_to_engine - transfer one ahash_request 244 * to list into the engine queue 245 * @engine: the hardware engine 246 * @req: the request need to be listed into the engine queue 247 */ 248 int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, 249 struct ahash_request *req) 250 { 251 return crypto_transfer_request_to_engine(engine, &req->base); 252 } 253 EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); 254 255 /** 256 * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request 257 * to list into the engine queue 258 * @engine: the hardware engine 259 * @req: the request need to be listed into the engine queue 260 */ 261 int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, 262 struct skcipher_request *req) 263 { 264 return crypto_transfer_request_to_engine(engine, &req->base); 265 } 266 EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); 267 268 /** 269 * crypto_finalize_aead_request - finalize one aead_request if 270 * the request is done 271 * @engine: the hardware engine 272 * @req: the request need to be finalized 273 * @err: error number 274 */ 275 void crypto_finalize_aead_request(struct crypto_engine *engine, 276 struct aead_request *req, int err) 277 { 278 return crypto_finalize_request(engine, &req->base, err); 279 } 280 EXPORT_SYMBOL_GPL(crypto_finalize_aead_request); 281 282 /** 283 * crypto_finalize_akcipher_request - finalize one akcipher_request if 284 * the request is done 285 * @engine: the hardware engine 286 * @req: the request need to be finalized 287 * @err: error number 288 */ 289 void crypto_finalize_akcipher_request(struct crypto_engine *engine, 290 struct akcipher_request *req, int err) 291 { 292 return crypto_finalize_request(engine, &req->base, err); 293 } 294 EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request); 295 296 /** 297 * crypto_finalize_hash_request - finalize one ahash_request if 298 * the request is done 299 * @engine: the hardware engine 300 * @req: the request need to be finalized 301 * @err: error number 302 */ 303 void crypto_finalize_hash_request(struct crypto_engine *engine, 304 struct ahash_request *req, int err) 305 { 306 return crypto_finalize_request(engine, &req->base, err); 307 } 308 EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); 309 310 /** 311 * crypto_finalize_skcipher_request - finalize one skcipher_request if 312 * the request is done 313 * @engine: the hardware engine 314 * @req: the request need to be finalized 315 * @err: error number 316 */ 317 void crypto_finalize_skcipher_request(struct crypto_engine *engine, 318 struct skcipher_request *req, int err) 319 { 320 return crypto_finalize_request(engine, &req->base, err); 321 } 322 EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request); 323 324 /** 325 * crypto_engine_start - start the hardware engine 326 * @engine: the hardware engine need to be started 327 * 328 * Return 0 on success, else on fail. 329 */ 330 int crypto_engine_start(struct crypto_engine *engine) 331 { 332 unsigned long flags; 333 334 spin_lock_irqsave(&engine->queue_lock, flags); 335 336 if (engine->running || engine->busy) { 337 spin_unlock_irqrestore(&engine->queue_lock, flags); 338 return -EBUSY; 339 } 340 341 engine->running = true; 342 spin_unlock_irqrestore(&engine->queue_lock, flags); 343 344 kthread_queue_work(engine->kworker, &engine->pump_requests); 345 346 return 0; 347 } 348 EXPORT_SYMBOL_GPL(crypto_engine_start); 349 350 /** 351 * crypto_engine_stop - stop the hardware engine 352 * @engine: the hardware engine need to be stopped 353 * 354 * Return 0 on success, else on fail. 355 */ 356 int crypto_engine_stop(struct crypto_engine *engine) 357 { 358 unsigned long flags; 359 unsigned int limit = 500; 360 int ret = 0; 361 362 spin_lock_irqsave(&engine->queue_lock, flags); 363 364 /* 365 * If the engine queue is not empty or the engine is on busy state, 366 * we need to wait for a while to pump the requests of engine queue. 367 */ 368 while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { 369 spin_unlock_irqrestore(&engine->queue_lock, flags); 370 msleep(20); 371 spin_lock_irqsave(&engine->queue_lock, flags); 372 } 373 374 if (crypto_queue_len(&engine->queue) || engine->busy) 375 ret = -EBUSY; 376 else 377 engine->running = false; 378 379 spin_unlock_irqrestore(&engine->queue_lock, flags); 380 381 if (ret) 382 dev_warn(engine->dev, "could not stop engine\n"); 383 384 return ret; 385 } 386 EXPORT_SYMBOL_GPL(crypto_engine_stop); 387 388 /** 389 * crypto_engine_alloc_init - allocate crypto hardware engine structure and 390 * initialize it. 391 * @dev: the device attached with one hardware engine 392 * @rt: whether this queue is set to run as a realtime task 393 * 394 * This must be called from context that can sleep. 395 * Return: the crypto engine structure on success, else NULL. 396 */ 397 struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) 398 { 399 struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 }; 400 struct crypto_engine *engine; 401 402 if (!dev) 403 return NULL; 404 405 engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); 406 if (!engine) 407 return NULL; 408 409 engine->dev = dev; 410 engine->rt = rt; 411 engine->running = false; 412 engine->busy = false; 413 engine->idling = false; 414 engine->cur_req_prepared = false; 415 engine->priv_data = dev; 416 snprintf(engine->name, sizeof(engine->name), 417 "%s-engine", dev_name(dev)); 418 419 crypto_init_queue(&engine->queue, CRYPTO_ENGINE_MAX_QLEN); 420 spin_lock_init(&engine->queue_lock); 421 422 engine->kworker = kthread_create_worker(0, "%s", engine->name); 423 if (IS_ERR(engine->kworker)) { 424 dev_err(dev, "failed to create crypto request pump task\n"); 425 return NULL; 426 } 427 kthread_init_work(&engine->pump_requests, crypto_pump_work); 428 429 if (engine->rt) { 430 dev_info(dev, "will run requests pump with realtime priority\n"); 431 sched_setscheduler(engine->kworker->task, SCHED_FIFO, ¶m); 432 } 433 434 return engine; 435 } 436 EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); 437 438 /** 439 * crypto_engine_exit - free the resources of hardware engine when exit 440 * @engine: the hardware engine need to be freed 441 * 442 * Return 0 for success. 443 */ 444 int crypto_engine_exit(struct crypto_engine *engine) 445 { 446 int ret; 447 448 ret = crypto_engine_stop(engine); 449 if (ret) 450 return ret; 451 452 kthread_destroy_worker(engine->kworker); 453 454 return 0; 455 } 456 EXPORT_SYMBOL_GPL(crypto_engine_exit); 457 458 MODULE_LICENSE("GPL"); 459 MODULE_DESCRIPTION("Crypto hardware engine framework"); 460