1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */ 3 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <crypto/algapi.h> 7 #include <crypto/internal/skcipher.h> 8 #include <crypto/des.h> 9 #include <crypto/xts.h> 10 #include <crypto/scatterwalk.h> 11 12 #include "cc_driver.h" 13 #include "cc_lli_defs.h" 14 #include "cc_buffer_mgr.h" 15 #include "cc_cipher.h" 16 #include "cc_request_mgr.h" 17 18 #define MAX_ABLKCIPHER_SEQ_LEN 6 19 20 #define template_skcipher template_u.skcipher 21 22 #define CC_MIN_AES_XTS_SIZE 0x10 23 #define CC_MAX_AES_XTS_SIZE 0x2000 24 struct cc_cipher_handle { 25 struct list_head alg_list; 26 }; 27 28 struct cc_user_key_info { 29 u8 *key; 30 dma_addr_t key_dma_addr; 31 }; 32 33 struct cc_hw_key_info { 34 enum cc_hw_crypto_key key1_slot; 35 enum cc_hw_crypto_key key2_slot; 36 }; 37 38 struct cc_cipher_ctx { 39 struct cc_drvdata *drvdata; 40 int keylen; 41 int key_round_number; 42 int cipher_mode; 43 int flow_mode; 44 unsigned int flags; 45 struct cc_user_key_info user; 46 struct cc_hw_key_info hw; 47 struct crypto_shash *shash_tfm; 48 }; 49 50 static void cc_cipher_complete(struct device *dev, void *cc_req, int err); 51 52 static int validate_keys_sizes(struct cc_cipher_ctx *ctx_p, u32 size) 53 { 54 switch (ctx_p->flow_mode) { 55 case S_DIN_to_AES: 56 switch (size) { 57 case CC_AES_128_BIT_KEY_SIZE: 58 case CC_AES_192_BIT_KEY_SIZE: 59 if (ctx_p->cipher_mode != DRV_CIPHER_XTS && 60 ctx_p->cipher_mode != DRV_CIPHER_ESSIV && 61 ctx_p->cipher_mode != DRV_CIPHER_BITLOCKER) 62 return 0; 63 break; 64 case CC_AES_256_BIT_KEY_SIZE: 65 return 0; 66 case (CC_AES_192_BIT_KEY_SIZE * 2): 67 case (CC_AES_256_BIT_KEY_SIZE * 2): 68 if (ctx_p->cipher_mode == DRV_CIPHER_XTS || 69 ctx_p->cipher_mode == DRV_CIPHER_ESSIV || 70 ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) 71 return 0; 72 break; 73 default: 74 break; 75 } 76 case S_DIN_to_DES: 77 if (size == DES3_EDE_KEY_SIZE || size == DES_KEY_SIZE) 78 return 0; 79 break; 80 default: 81 break; 82 } 83 return -EINVAL; 84 } 85 86 static int validate_data_size(struct cc_cipher_ctx *ctx_p, 87 unsigned int size) 88 { 89 switch (ctx_p->flow_mode) { 90 case S_DIN_to_AES: 91 switch (ctx_p->cipher_mode) { 92 case DRV_CIPHER_XTS: 93 if (size >= CC_MIN_AES_XTS_SIZE && 94 size <= CC_MAX_AES_XTS_SIZE && 95 IS_ALIGNED(size, AES_BLOCK_SIZE)) 96 return 0; 97 break; 98 case DRV_CIPHER_CBC_CTS: 99 if (size >= AES_BLOCK_SIZE) 100 return 0; 101 break; 102 case DRV_CIPHER_OFB: 103 case DRV_CIPHER_CTR: 104 return 0; 105 case DRV_CIPHER_ECB: 106 case DRV_CIPHER_CBC: 107 case DRV_CIPHER_ESSIV: 108 case DRV_CIPHER_BITLOCKER: 109 if (IS_ALIGNED(size, AES_BLOCK_SIZE)) 110 return 0; 111 break; 112 default: 113 break; 114 } 115 break; 116 case S_DIN_to_DES: 117 if (IS_ALIGNED(size, DES_BLOCK_SIZE)) 118 return 0; 119 break; 120 default: 121 break; 122 } 123 return -EINVAL; 124 } 125 126 static int cc_cipher_init(struct crypto_tfm *tfm) 127 { 128 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); 129 struct cc_crypto_alg *cc_alg = 130 container_of(tfm->__crt_alg, struct cc_crypto_alg, 131 skcipher_alg.base); 132 struct device *dev = drvdata_to_dev(cc_alg->drvdata); 133 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; 134 int rc = 0; 135 136 dev_dbg(dev, "Initializing context @%p for %s\n", ctx_p, 137 crypto_tfm_alg_name(tfm)); 138 139 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm), 140 sizeof(struct cipher_req_ctx)); 141 142 ctx_p->cipher_mode = cc_alg->cipher_mode; 143 ctx_p->flow_mode = cc_alg->flow_mode; 144 ctx_p->drvdata = cc_alg->drvdata; 145 146 /* Allocate key buffer, cache line aligned */ 147 ctx_p->user.key = kmalloc(max_key_buf_size, GFP_KERNEL); 148 if (!ctx_p->user.key) 149 return -ENOMEM; 150 151 dev_dbg(dev, "Allocated key buffer in context. key=@%p\n", 152 ctx_p->user.key); 153 154 /* Map key buffer */ 155 ctx_p->user.key_dma_addr = dma_map_single(dev, (void *)ctx_p->user.key, 156 max_key_buf_size, 157 DMA_TO_DEVICE); 158 if (dma_mapping_error(dev, ctx_p->user.key_dma_addr)) { 159 dev_err(dev, "Mapping Key %u B at va=%pK for DMA failed\n", 160 max_key_buf_size, ctx_p->user.key); 161 return -ENOMEM; 162 } 163 dev_dbg(dev, "Mapped key %u B at va=%pK to dma=%pad\n", 164 max_key_buf_size, ctx_p->user.key, &ctx_p->user.key_dma_addr); 165 166 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { 167 /* Alloc hash tfm for essiv */ 168 ctx_p->shash_tfm = crypto_alloc_shash("sha256-generic", 0, 0); 169 if (IS_ERR(ctx_p->shash_tfm)) { 170 dev_err(dev, "Error allocating hash tfm for ESSIV.\n"); 171 return PTR_ERR(ctx_p->shash_tfm); 172 } 173 } 174 175 return rc; 176 } 177 178 static void cc_cipher_exit(struct crypto_tfm *tfm) 179 { 180 struct crypto_alg *alg = tfm->__crt_alg; 181 struct cc_crypto_alg *cc_alg = 182 container_of(alg, struct cc_crypto_alg, 183 skcipher_alg.base); 184 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; 185 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); 186 struct device *dev = drvdata_to_dev(ctx_p->drvdata); 187 188 dev_dbg(dev, "Clearing context @%p for %s\n", 189 crypto_tfm_ctx(tfm), crypto_tfm_alg_name(tfm)); 190 191 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { 192 /* Free hash tfm for essiv */ 193 crypto_free_shash(ctx_p->shash_tfm); 194 ctx_p->shash_tfm = NULL; 195 } 196 197 /* Unmap key buffer */ 198 dma_unmap_single(dev, ctx_p->user.key_dma_addr, max_key_buf_size, 199 DMA_TO_DEVICE); 200 dev_dbg(dev, "Unmapped key buffer key_dma_addr=%pad\n", 201 &ctx_p->user.key_dma_addr); 202 203 /* Free key buffer in context */ 204 kzfree(ctx_p->user.key); 205 dev_dbg(dev, "Free key buffer in context. key=@%p\n", ctx_p->user.key); 206 } 207 208 struct tdes_keys { 209 u8 key1[DES_KEY_SIZE]; 210 u8 key2[DES_KEY_SIZE]; 211 u8 key3[DES_KEY_SIZE]; 212 }; 213 214 static enum cc_hw_crypto_key hw_key_to_cc_hw_key(int slot_num) 215 { 216 switch (slot_num) { 217 case 0: 218 return KFDE0_KEY; 219 case 1: 220 return KFDE1_KEY; 221 case 2: 222 return KFDE2_KEY; 223 case 3: 224 return KFDE3_KEY; 225 } 226 return END_OF_KEYS; 227 } 228 229 static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key, 230 unsigned int keylen) 231 { 232 struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm); 233 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); 234 struct device *dev = drvdata_to_dev(ctx_p->drvdata); 235 u32 tmp[DES3_EDE_EXPKEY_WORDS]; 236 struct cc_crypto_alg *cc_alg = 237 container_of(tfm->__crt_alg, struct cc_crypto_alg, 238 skcipher_alg.base); 239 unsigned int max_key_buf_size = cc_alg->skcipher_alg.max_keysize; 240 241 dev_dbg(dev, "Setting key in context @%p for %s. keylen=%u\n", 242 ctx_p, crypto_tfm_alg_name(tfm), keylen); 243 dump_byte_array("key", (u8 *)key, keylen); 244 245 /* STAT_PHASE_0: Init and sanity checks */ 246 247 if (validate_keys_sizes(ctx_p, keylen)) { 248 dev_err(dev, "Unsupported key size %d.\n", keylen); 249 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 250 return -EINVAL; 251 } 252 253 if (cc_is_hw_key(tfm)) { 254 /* setting HW key slots */ 255 struct arm_hw_key_info *hki = (struct arm_hw_key_info *)key; 256 257 if (ctx_p->flow_mode != S_DIN_to_AES) { 258 dev_err(dev, "HW key not supported for non-AES flows\n"); 259 return -EINVAL; 260 } 261 262 ctx_p->hw.key1_slot = hw_key_to_cc_hw_key(hki->hw_key1); 263 if (ctx_p->hw.key1_slot == END_OF_KEYS) { 264 dev_err(dev, "Unsupported hw key1 number (%d)\n", 265 hki->hw_key1); 266 return -EINVAL; 267 } 268 269 if (ctx_p->cipher_mode == DRV_CIPHER_XTS || 270 ctx_p->cipher_mode == DRV_CIPHER_ESSIV || 271 ctx_p->cipher_mode == DRV_CIPHER_BITLOCKER) { 272 if (hki->hw_key1 == hki->hw_key2) { 273 dev_err(dev, "Illegal hw key numbers (%d,%d)\n", 274 hki->hw_key1, hki->hw_key2); 275 return -EINVAL; 276 } 277 ctx_p->hw.key2_slot = 278 hw_key_to_cc_hw_key(hki->hw_key2); 279 if (ctx_p->hw.key2_slot == END_OF_KEYS) { 280 dev_err(dev, "Unsupported hw key2 number (%d)\n", 281 hki->hw_key2); 282 return -EINVAL; 283 } 284 } 285 286 ctx_p->keylen = keylen; 287 dev_dbg(dev, "cc_is_hw_key ret 0"); 288 289 return 0; 290 } 291 292 /* 293 * Verify DES weak keys 294 * Note that we're dropping the expanded key since the 295 * HW does the expansion on its own. 296 */ 297 if (ctx_p->flow_mode == S_DIN_to_DES) { 298 if (keylen == DES3_EDE_KEY_SIZE && 299 __des3_ede_setkey(tmp, &tfm->crt_flags, key, 300 DES3_EDE_KEY_SIZE)) { 301 dev_dbg(dev, "weak 3DES key"); 302 return -EINVAL; 303 } else if (!des_ekey(tmp, key) && 304 (crypto_tfm_get_flags(tfm) & CRYPTO_TFM_REQ_WEAK_KEY)) { 305 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 306 dev_dbg(dev, "weak DES key"); 307 return -EINVAL; 308 } 309 } 310 311 if (ctx_p->cipher_mode == DRV_CIPHER_XTS && 312 xts_check_key(tfm, key, keylen)) { 313 dev_dbg(dev, "weak XTS key"); 314 return -EINVAL; 315 } 316 317 /* STAT_PHASE_1: Copy key to ctx */ 318 dma_sync_single_for_cpu(dev, ctx_p->user.key_dma_addr, 319 max_key_buf_size, DMA_TO_DEVICE); 320 321 memcpy(ctx_p->user.key, key, keylen); 322 if (keylen == 24) 323 memset(ctx_p->user.key + 24, 0, CC_AES_KEY_SIZE_MAX - 24); 324 325 if (ctx_p->cipher_mode == DRV_CIPHER_ESSIV) { 326 /* sha256 for key2 - use sw implementation */ 327 int key_len = keylen >> 1; 328 int err; 329 330 SHASH_DESC_ON_STACK(desc, ctx_p->shash_tfm); 331 332 desc->tfm = ctx_p->shash_tfm; 333 334 err = crypto_shash_digest(desc, ctx_p->user.key, key_len, 335 ctx_p->user.key + key_len); 336 if (err) { 337 dev_err(dev, "Failed to hash ESSIV key.\n"); 338 return err; 339 } 340 } 341 dma_sync_single_for_device(dev, ctx_p->user.key_dma_addr, 342 max_key_buf_size, DMA_TO_DEVICE); 343 ctx_p->keylen = keylen; 344 345 dev_dbg(dev, "return safely"); 346 return 0; 347 } 348 349 static void cc_setup_cipher_desc(struct crypto_tfm *tfm, 350 struct cipher_req_ctx *req_ctx, 351 unsigned int ivsize, unsigned int nbytes, 352 struct cc_hw_desc desc[], 353 unsigned int *seq_size) 354 { 355 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); 356 struct device *dev = drvdata_to_dev(ctx_p->drvdata); 357 int cipher_mode = ctx_p->cipher_mode; 358 int flow_mode = ctx_p->flow_mode; 359 int direction = req_ctx->gen_ctx.op_type; 360 dma_addr_t key_dma_addr = ctx_p->user.key_dma_addr; 361 unsigned int key_len = ctx_p->keylen; 362 dma_addr_t iv_dma_addr = req_ctx->gen_ctx.iv_dma_addr; 363 unsigned int du_size = nbytes; 364 365 struct cc_crypto_alg *cc_alg = 366 container_of(tfm->__crt_alg, struct cc_crypto_alg, 367 skcipher_alg.base); 368 369 if (cc_alg->data_unit) 370 du_size = cc_alg->data_unit; 371 372 switch (cipher_mode) { 373 case DRV_CIPHER_CBC: 374 case DRV_CIPHER_CBC_CTS: 375 case DRV_CIPHER_CTR: 376 case DRV_CIPHER_OFB: 377 /* Load cipher state */ 378 hw_desc_init(&desc[*seq_size]); 379 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, ivsize, 380 NS_BIT); 381 set_cipher_config0(&desc[*seq_size], direction); 382 set_flow_mode(&desc[*seq_size], flow_mode); 383 set_cipher_mode(&desc[*seq_size], cipher_mode); 384 if (cipher_mode == DRV_CIPHER_CTR || 385 cipher_mode == DRV_CIPHER_OFB) { 386 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1); 387 } else { 388 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE0); 389 } 390 (*seq_size)++; 391 /*FALLTHROUGH*/ 392 case DRV_CIPHER_ECB: 393 /* Load key */ 394 hw_desc_init(&desc[*seq_size]); 395 set_cipher_mode(&desc[*seq_size], cipher_mode); 396 set_cipher_config0(&desc[*seq_size], direction); 397 if (flow_mode == S_DIN_to_AES) { 398 if (cc_is_hw_key(tfm)) { 399 set_hw_crypto_key(&desc[*seq_size], 400 ctx_p->hw.key1_slot); 401 } else { 402 set_din_type(&desc[*seq_size], DMA_DLLI, 403 key_dma_addr, ((key_len == 24) ? 404 AES_MAX_KEY_SIZE : 405 key_len), NS_BIT); 406 } 407 set_key_size_aes(&desc[*seq_size], key_len); 408 } else { 409 /*des*/ 410 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr, 411 key_len, NS_BIT); 412 set_key_size_des(&desc[*seq_size], key_len); 413 } 414 set_flow_mode(&desc[*seq_size], flow_mode); 415 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0); 416 (*seq_size)++; 417 break; 418 case DRV_CIPHER_XTS: 419 case DRV_CIPHER_ESSIV: 420 case DRV_CIPHER_BITLOCKER: 421 /* Load AES key */ 422 hw_desc_init(&desc[*seq_size]); 423 set_cipher_mode(&desc[*seq_size], cipher_mode); 424 set_cipher_config0(&desc[*seq_size], direction); 425 if (cc_is_hw_key(tfm)) { 426 set_hw_crypto_key(&desc[*seq_size], 427 ctx_p->hw.key1_slot); 428 } else { 429 set_din_type(&desc[*seq_size], DMA_DLLI, key_dma_addr, 430 (key_len / 2), NS_BIT); 431 } 432 set_key_size_aes(&desc[*seq_size], (key_len / 2)); 433 set_flow_mode(&desc[*seq_size], flow_mode); 434 set_setup_mode(&desc[*seq_size], SETUP_LOAD_KEY0); 435 (*seq_size)++; 436 437 /* load XEX key */ 438 hw_desc_init(&desc[*seq_size]); 439 set_cipher_mode(&desc[*seq_size], cipher_mode); 440 set_cipher_config0(&desc[*seq_size], direction); 441 if (cc_is_hw_key(tfm)) { 442 set_hw_crypto_key(&desc[*seq_size], 443 ctx_p->hw.key2_slot); 444 } else { 445 set_din_type(&desc[*seq_size], DMA_DLLI, 446 (key_dma_addr + (key_len / 2)), 447 (key_len / 2), NS_BIT); 448 } 449 set_xex_data_unit_size(&desc[*seq_size], du_size); 450 set_flow_mode(&desc[*seq_size], S_DIN_to_AES2); 451 set_key_size_aes(&desc[*seq_size], (key_len / 2)); 452 set_setup_mode(&desc[*seq_size], SETUP_LOAD_XEX_KEY); 453 (*seq_size)++; 454 455 /* Set state */ 456 hw_desc_init(&desc[*seq_size]); 457 set_setup_mode(&desc[*seq_size], SETUP_LOAD_STATE1); 458 set_cipher_mode(&desc[*seq_size], cipher_mode); 459 set_cipher_config0(&desc[*seq_size], direction); 460 set_key_size_aes(&desc[*seq_size], (key_len / 2)); 461 set_flow_mode(&desc[*seq_size], flow_mode); 462 set_din_type(&desc[*seq_size], DMA_DLLI, iv_dma_addr, 463 CC_AES_BLOCK_SIZE, NS_BIT); 464 (*seq_size)++; 465 break; 466 default: 467 dev_err(dev, "Unsupported cipher mode (%d)\n", cipher_mode); 468 } 469 } 470 471 static void cc_setup_cipher_data(struct crypto_tfm *tfm, 472 struct cipher_req_ctx *req_ctx, 473 struct scatterlist *dst, 474 struct scatterlist *src, unsigned int nbytes, 475 void *areq, struct cc_hw_desc desc[], 476 unsigned int *seq_size) 477 { 478 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); 479 struct device *dev = drvdata_to_dev(ctx_p->drvdata); 480 unsigned int flow_mode = ctx_p->flow_mode; 481 482 switch (ctx_p->flow_mode) { 483 case S_DIN_to_AES: 484 flow_mode = DIN_AES_DOUT; 485 break; 486 case S_DIN_to_DES: 487 flow_mode = DIN_DES_DOUT; 488 break; 489 default: 490 dev_err(dev, "invalid flow mode, flow_mode = %d\n", flow_mode); 491 return; 492 } 493 /* Process */ 494 if (req_ctx->dma_buf_type == CC_DMA_BUF_DLLI) { 495 dev_dbg(dev, " data params addr %pad length 0x%X\n", 496 &sg_dma_address(src), nbytes); 497 dev_dbg(dev, " data params addr %pad length 0x%X\n", 498 &sg_dma_address(dst), nbytes); 499 hw_desc_init(&desc[*seq_size]); 500 set_din_type(&desc[*seq_size], DMA_DLLI, sg_dma_address(src), 501 nbytes, NS_BIT); 502 set_dout_dlli(&desc[*seq_size], sg_dma_address(dst), 503 nbytes, NS_BIT, (!areq ? 0 : 1)); 504 if (areq) 505 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]); 506 507 set_flow_mode(&desc[*seq_size], flow_mode); 508 (*seq_size)++; 509 } else { 510 /* bypass */ 511 dev_dbg(dev, " bypass params addr %pad length 0x%X addr 0x%08X\n", 512 &req_ctx->mlli_params.mlli_dma_addr, 513 req_ctx->mlli_params.mlli_len, 514 (unsigned int)ctx_p->drvdata->mlli_sram_addr); 515 hw_desc_init(&desc[*seq_size]); 516 set_din_type(&desc[*seq_size], DMA_DLLI, 517 req_ctx->mlli_params.mlli_dma_addr, 518 req_ctx->mlli_params.mlli_len, NS_BIT); 519 set_dout_sram(&desc[*seq_size], 520 ctx_p->drvdata->mlli_sram_addr, 521 req_ctx->mlli_params.mlli_len); 522 set_flow_mode(&desc[*seq_size], BYPASS); 523 (*seq_size)++; 524 525 hw_desc_init(&desc[*seq_size]); 526 set_din_type(&desc[*seq_size], DMA_MLLI, 527 ctx_p->drvdata->mlli_sram_addr, 528 req_ctx->in_mlli_nents, NS_BIT); 529 if (req_ctx->out_nents == 0) { 530 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n", 531 (unsigned int)ctx_p->drvdata->mlli_sram_addr, 532 (unsigned int)ctx_p->drvdata->mlli_sram_addr); 533 set_dout_mlli(&desc[*seq_size], 534 ctx_p->drvdata->mlli_sram_addr, 535 req_ctx->in_mlli_nents, NS_BIT, 536 (!areq ? 0 : 1)); 537 } else { 538 dev_dbg(dev, " din/dout params addr 0x%08X addr 0x%08X\n", 539 (unsigned int)ctx_p->drvdata->mlli_sram_addr, 540 (unsigned int)ctx_p->drvdata->mlli_sram_addr + 541 (u32)LLI_ENTRY_BYTE_SIZE * req_ctx->in_nents); 542 set_dout_mlli(&desc[*seq_size], 543 (ctx_p->drvdata->mlli_sram_addr + 544 (LLI_ENTRY_BYTE_SIZE * 545 req_ctx->in_mlli_nents)), 546 req_ctx->out_mlli_nents, NS_BIT, 547 (!areq ? 0 : 1)); 548 } 549 if (areq) 550 set_queue_last_ind(ctx_p->drvdata, &desc[*seq_size]); 551 552 set_flow_mode(&desc[*seq_size], flow_mode); 553 (*seq_size)++; 554 } 555 } 556 557 static void cc_cipher_complete(struct device *dev, void *cc_req, int err) 558 { 559 struct skcipher_request *req = (struct skcipher_request *)cc_req; 560 struct scatterlist *dst = req->dst; 561 struct scatterlist *src = req->src; 562 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); 563 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 564 unsigned int ivsize = crypto_skcipher_ivsize(tfm); 565 566 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); 567 kzfree(req_ctx->iv); 568 569 /* 570 * The crypto API expects us to set the req->iv to the last 571 * ciphertext block. For encrypt, simply copy from the result. 572 * For decrypt, we must copy from a saved buffer since this 573 * could be an in-place decryption operation and the src is 574 * lost by this point. 575 */ 576 if (req_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { 577 memcpy(req->iv, req_ctx->backup_info, ivsize); 578 kzfree(req_ctx->backup_info); 579 } else if (!err) { 580 scatterwalk_map_and_copy(req->iv, req->dst, 581 (req->cryptlen - ivsize), 582 ivsize, 0); 583 } 584 585 skcipher_request_complete(req, err); 586 } 587 588 static int cc_cipher_process(struct skcipher_request *req, 589 enum drv_crypto_direction direction) 590 { 591 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req); 592 struct crypto_tfm *tfm = crypto_skcipher_tfm(sk_tfm); 593 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); 594 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); 595 struct scatterlist *dst = req->dst; 596 struct scatterlist *src = req->src; 597 unsigned int nbytes = req->cryptlen; 598 void *iv = req->iv; 599 struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm); 600 struct device *dev = drvdata_to_dev(ctx_p->drvdata); 601 struct cc_hw_desc desc[MAX_ABLKCIPHER_SEQ_LEN]; 602 struct cc_crypto_req cc_req = {}; 603 int rc, cts_restore_flag = 0; 604 unsigned int seq_len = 0; 605 gfp_t flags = cc_gfp_flags(&req->base); 606 607 dev_dbg(dev, "%s req=%p iv=%p nbytes=%d\n", 608 ((direction == DRV_CRYPTO_DIRECTION_ENCRYPT) ? 609 "Encrypt" : "Decrypt"), req, iv, nbytes); 610 611 /* STAT_PHASE_0: Init and sanity checks */ 612 613 /* TODO: check data length according to mode */ 614 if (validate_data_size(ctx_p, nbytes)) { 615 dev_err(dev, "Unsupported data size %d.\n", nbytes); 616 crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_BAD_BLOCK_LEN); 617 rc = -EINVAL; 618 goto exit_process; 619 } 620 if (nbytes == 0) { 621 /* No data to process is valid */ 622 rc = 0; 623 goto exit_process; 624 } 625 626 /* The IV we are handed may be allocted from the stack so 627 * we must copy it to a DMAable buffer before use. 628 */ 629 req_ctx->iv = kmemdup(iv, ivsize, flags); 630 if (!req_ctx->iv) { 631 rc = -ENOMEM; 632 goto exit_process; 633 } 634 635 /*For CTS in case of data size aligned to 16 use CBC mode*/ 636 if (((nbytes % AES_BLOCK_SIZE) == 0) && 637 ctx_p->cipher_mode == DRV_CIPHER_CBC_CTS) { 638 ctx_p->cipher_mode = DRV_CIPHER_CBC; 639 cts_restore_flag = 1; 640 } 641 642 /* Setup request structure */ 643 cc_req.user_cb = (void *)cc_cipher_complete; 644 cc_req.user_arg = (void *)req; 645 646 #ifdef ENABLE_CYCLE_COUNT 647 cc_req.op_type = (direction == DRV_CRYPTO_DIRECTION_DECRYPT) ? 648 STAT_OP_TYPE_DECODE : STAT_OP_TYPE_ENCODE; 649 650 #endif 651 652 /* Setup request context */ 653 req_ctx->gen_ctx.op_type = direction; 654 655 /* STAT_PHASE_1: Map buffers */ 656 657 rc = cc_map_cipher_request(ctx_p->drvdata, req_ctx, ivsize, nbytes, 658 req_ctx->iv, src, dst, flags); 659 if (rc) { 660 dev_err(dev, "map_request() failed\n"); 661 goto exit_process; 662 } 663 664 /* STAT_PHASE_2: Create sequence */ 665 666 /* Setup processing */ 667 cc_setup_cipher_desc(tfm, req_ctx, ivsize, nbytes, desc, &seq_len); 668 /* Data processing */ 669 cc_setup_cipher_data(tfm, req_ctx, dst, src, nbytes, req, desc, 670 &seq_len); 671 672 /* do we need to generate IV? */ 673 if (req_ctx->is_giv) { 674 cc_req.ivgen_dma_addr[0] = req_ctx->gen_ctx.iv_dma_addr; 675 cc_req.ivgen_dma_addr_len = 1; 676 /* set the IV size (8/16 B long)*/ 677 cc_req.ivgen_size = ivsize; 678 } 679 680 /* STAT_PHASE_3: Lock HW and push sequence */ 681 682 rc = cc_send_request(ctx_p->drvdata, &cc_req, desc, seq_len, 683 &req->base); 684 if (rc != -EINPROGRESS && rc != -EBUSY) { 685 /* Failed to send the request or request completed 686 * synchronously 687 */ 688 cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); 689 } 690 691 exit_process: 692 if (cts_restore_flag) 693 ctx_p->cipher_mode = DRV_CIPHER_CBC_CTS; 694 695 if (rc != -EINPROGRESS && rc != -EBUSY) { 696 kzfree(req_ctx->backup_info); 697 kzfree(req_ctx->iv); 698 } 699 700 return rc; 701 } 702 703 static int cc_cipher_encrypt(struct skcipher_request *req) 704 { 705 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); 706 707 req_ctx->is_giv = false; 708 req_ctx->backup_info = NULL; 709 710 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_ENCRYPT); 711 } 712 713 static int cc_cipher_decrypt(struct skcipher_request *req) 714 { 715 struct crypto_skcipher *sk_tfm = crypto_skcipher_reqtfm(req); 716 struct cipher_req_ctx *req_ctx = skcipher_request_ctx(req); 717 unsigned int ivsize = crypto_skcipher_ivsize(sk_tfm); 718 gfp_t flags = cc_gfp_flags(&req->base); 719 720 /* 721 * Allocate and save the last IV sized bytes of the source, which will 722 * be lost in case of in-place decryption and might be needed for CTS. 723 */ 724 req_ctx->backup_info = kmalloc(ivsize, flags); 725 if (!req_ctx->backup_info) 726 return -ENOMEM; 727 728 scatterwalk_map_and_copy(req_ctx->backup_info, req->src, 729 (req->cryptlen - ivsize), ivsize, 0); 730 req_ctx->is_giv = false; 731 732 return cc_cipher_process(req, DRV_CRYPTO_DIRECTION_DECRYPT); 733 } 734 735 /* Block cipher alg */ 736 static const struct cc_alg_template skcipher_algs[] = { 737 { 738 .name = "xts(aes)", 739 .driver_name = "xts-aes-ccree", 740 .blocksize = AES_BLOCK_SIZE, 741 .template_skcipher = { 742 .setkey = cc_cipher_setkey, 743 .encrypt = cc_cipher_encrypt, 744 .decrypt = cc_cipher_decrypt, 745 .min_keysize = AES_MIN_KEY_SIZE * 2, 746 .max_keysize = AES_MAX_KEY_SIZE * 2, 747 .ivsize = AES_BLOCK_SIZE, 748 }, 749 .cipher_mode = DRV_CIPHER_XTS, 750 .flow_mode = S_DIN_to_AES, 751 .min_hw_rev = CC_HW_REV_630, 752 }, 753 { 754 .name = "xts512(aes)", 755 .driver_name = "xts-aes-du512-ccree", 756 .blocksize = AES_BLOCK_SIZE, 757 .template_skcipher = { 758 .setkey = cc_cipher_setkey, 759 .encrypt = cc_cipher_encrypt, 760 .decrypt = cc_cipher_decrypt, 761 .min_keysize = AES_MIN_KEY_SIZE * 2, 762 .max_keysize = AES_MAX_KEY_SIZE * 2, 763 .ivsize = AES_BLOCK_SIZE, 764 }, 765 .cipher_mode = DRV_CIPHER_XTS, 766 .flow_mode = S_DIN_to_AES, 767 .data_unit = 512, 768 .min_hw_rev = CC_HW_REV_712, 769 }, 770 { 771 .name = "xts4096(aes)", 772 .driver_name = "xts-aes-du4096-ccree", 773 .blocksize = AES_BLOCK_SIZE, 774 .template_skcipher = { 775 .setkey = cc_cipher_setkey, 776 .encrypt = cc_cipher_encrypt, 777 .decrypt = cc_cipher_decrypt, 778 .min_keysize = AES_MIN_KEY_SIZE * 2, 779 .max_keysize = AES_MAX_KEY_SIZE * 2, 780 .ivsize = AES_BLOCK_SIZE, 781 }, 782 .cipher_mode = DRV_CIPHER_XTS, 783 .flow_mode = S_DIN_to_AES, 784 .data_unit = 4096, 785 .min_hw_rev = CC_HW_REV_712, 786 }, 787 { 788 .name = "essiv(aes)", 789 .driver_name = "essiv-aes-ccree", 790 .blocksize = AES_BLOCK_SIZE, 791 .template_skcipher = { 792 .setkey = cc_cipher_setkey, 793 .encrypt = cc_cipher_encrypt, 794 .decrypt = cc_cipher_decrypt, 795 .min_keysize = AES_MIN_KEY_SIZE * 2, 796 .max_keysize = AES_MAX_KEY_SIZE * 2, 797 .ivsize = AES_BLOCK_SIZE, 798 }, 799 .cipher_mode = DRV_CIPHER_ESSIV, 800 .flow_mode = S_DIN_to_AES, 801 .min_hw_rev = CC_HW_REV_712, 802 }, 803 { 804 .name = "essiv512(aes)", 805 .driver_name = "essiv-aes-du512-ccree", 806 .blocksize = AES_BLOCK_SIZE, 807 .template_skcipher = { 808 .setkey = cc_cipher_setkey, 809 .encrypt = cc_cipher_encrypt, 810 .decrypt = cc_cipher_decrypt, 811 .min_keysize = AES_MIN_KEY_SIZE * 2, 812 .max_keysize = AES_MAX_KEY_SIZE * 2, 813 .ivsize = AES_BLOCK_SIZE, 814 }, 815 .cipher_mode = DRV_CIPHER_ESSIV, 816 .flow_mode = S_DIN_to_AES, 817 .data_unit = 512, 818 .min_hw_rev = CC_HW_REV_712, 819 }, 820 { 821 .name = "essiv4096(aes)", 822 .driver_name = "essiv-aes-du4096-ccree", 823 .blocksize = AES_BLOCK_SIZE, 824 .template_skcipher = { 825 .setkey = cc_cipher_setkey, 826 .encrypt = cc_cipher_encrypt, 827 .decrypt = cc_cipher_decrypt, 828 .min_keysize = AES_MIN_KEY_SIZE * 2, 829 .max_keysize = AES_MAX_KEY_SIZE * 2, 830 .ivsize = AES_BLOCK_SIZE, 831 }, 832 .cipher_mode = DRV_CIPHER_ESSIV, 833 .flow_mode = S_DIN_to_AES, 834 .data_unit = 4096, 835 .min_hw_rev = CC_HW_REV_712, 836 }, 837 { 838 .name = "bitlocker(aes)", 839 .driver_name = "bitlocker-aes-ccree", 840 .blocksize = AES_BLOCK_SIZE, 841 .template_skcipher = { 842 .setkey = cc_cipher_setkey, 843 .encrypt = cc_cipher_encrypt, 844 .decrypt = cc_cipher_decrypt, 845 .min_keysize = AES_MIN_KEY_SIZE * 2, 846 .max_keysize = AES_MAX_KEY_SIZE * 2, 847 .ivsize = AES_BLOCK_SIZE, 848 }, 849 .cipher_mode = DRV_CIPHER_BITLOCKER, 850 .flow_mode = S_DIN_to_AES, 851 .min_hw_rev = CC_HW_REV_712, 852 }, 853 { 854 .name = "bitlocker512(aes)", 855 .driver_name = "bitlocker-aes-du512-ccree", 856 .blocksize = AES_BLOCK_SIZE, 857 .template_skcipher = { 858 .setkey = cc_cipher_setkey, 859 .encrypt = cc_cipher_encrypt, 860 .decrypt = cc_cipher_decrypt, 861 .min_keysize = AES_MIN_KEY_SIZE * 2, 862 .max_keysize = AES_MAX_KEY_SIZE * 2, 863 .ivsize = AES_BLOCK_SIZE, 864 }, 865 .cipher_mode = DRV_CIPHER_BITLOCKER, 866 .flow_mode = S_DIN_to_AES, 867 .data_unit = 512, 868 .min_hw_rev = CC_HW_REV_712, 869 }, 870 { 871 .name = "bitlocker4096(aes)", 872 .driver_name = "bitlocker-aes-du4096-ccree", 873 .blocksize = AES_BLOCK_SIZE, 874 .template_skcipher = { 875 .setkey = cc_cipher_setkey, 876 .encrypt = cc_cipher_encrypt, 877 .decrypt = cc_cipher_decrypt, 878 .min_keysize = AES_MIN_KEY_SIZE * 2, 879 .max_keysize = AES_MAX_KEY_SIZE * 2, 880 .ivsize = AES_BLOCK_SIZE, 881 }, 882 .cipher_mode = DRV_CIPHER_BITLOCKER, 883 .flow_mode = S_DIN_to_AES, 884 .data_unit = 4096, 885 .min_hw_rev = CC_HW_REV_712, 886 }, 887 { 888 .name = "ecb(aes)", 889 .driver_name = "ecb-aes-ccree", 890 .blocksize = AES_BLOCK_SIZE, 891 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 892 .template_skcipher = { 893 .setkey = cc_cipher_setkey, 894 .encrypt = cc_cipher_encrypt, 895 .decrypt = cc_cipher_decrypt, 896 .min_keysize = AES_MIN_KEY_SIZE, 897 .max_keysize = AES_MAX_KEY_SIZE, 898 .ivsize = 0, 899 }, 900 .cipher_mode = DRV_CIPHER_ECB, 901 .flow_mode = S_DIN_to_AES, 902 .min_hw_rev = CC_HW_REV_630, 903 }, 904 { 905 .name = "cbc(aes)", 906 .driver_name = "cbc-aes-ccree", 907 .blocksize = AES_BLOCK_SIZE, 908 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 909 .template_skcipher = { 910 .setkey = cc_cipher_setkey, 911 .encrypt = cc_cipher_encrypt, 912 .decrypt = cc_cipher_decrypt, 913 .min_keysize = AES_MIN_KEY_SIZE, 914 .max_keysize = AES_MAX_KEY_SIZE, 915 .ivsize = AES_BLOCK_SIZE, 916 }, 917 .cipher_mode = DRV_CIPHER_CBC, 918 .flow_mode = S_DIN_to_AES, 919 .min_hw_rev = CC_HW_REV_630, 920 }, 921 { 922 .name = "ofb(aes)", 923 .driver_name = "ofb-aes-ccree", 924 .blocksize = AES_BLOCK_SIZE, 925 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 926 .template_skcipher = { 927 .setkey = cc_cipher_setkey, 928 .encrypt = cc_cipher_encrypt, 929 .decrypt = cc_cipher_decrypt, 930 .min_keysize = AES_MIN_KEY_SIZE, 931 .max_keysize = AES_MAX_KEY_SIZE, 932 .ivsize = AES_BLOCK_SIZE, 933 }, 934 .cipher_mode = DRV_CIPHER_OFB, 935 .flow_mode = S_DIN_to_AES, 936 .min_hw_rev = CC_HW_REV_630, 937 }, 938 { 939 .name = "cts1(cbc(aes))", 940 .driver_name = "cts1-cbc-aes-ccree", 941 .blocksize = AES_BLOCK_SIZE, 942 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 943 .template_skcipher = { 944 .setkey = cc_cipher_setkey, 945 .encrypt = cc_cipher_encrypt, 946 .decrypt = cc_cipher_decrypt, 947 .min_keysize = AES_MIN_KEY_SIZE, 948 .max_keysize = AES_MAX_KEY_SIZE, 949 .ivsize = AES_BLOCK_SIZE, 950 }, 951 .cipher_mode = DRV_CIPHER_CBC_CTS, 952 .flow_mode = S_DIN_to_AES, 953 .min_hw_rev = CC_HW_REV_630, 954 }, 955 { 956 .name = "ctr(aes)", 957 .driver_name = "ctr-aes-ccree", 958 .blocksize = 1, 959 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 960 .template_skcipher = { 961 .setkey = cc_cipher_setkey, 962 .encrypt = cc_cipher_encrypt, 963 .decrypt = cc_cipher_decrypt, 964 .min_keysize = AES_MIN_KEY_SIZE, 965 .max_keysize = AES_MAX_KEY_SIZE, 966 .ivsize = AES_BLOCK_SIZE, 967 }, 968 .cipher_mode = DRV_CIPHER_CTR, 969 .flow_mode = S_DIN_to_AES, 970 .min_hw_rev = CC_HW_REV_630, 971 }, 972 { 973 .name = "cbc(des3_ede)", 974 .driver_name = "cbc-3des-ccree", 975 .blocksize = DES3_EDE_BLOCK_SIZE, 976 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 977 .template_skcipher = { 978 .setkey = cc_cipher_setkey, 979 .encrypt = cc_cipher_encrypt, 980 .decrypt = cc_cipher_decrypt, 981 .min_keysize = DES3_EDE_KEY_SIZE, 982 .max_keysize = DES3_EDE_KEY_SIZE, 983 .ivsize = DES3_EDE_BLOCK_SIZE, 984 }, 985 .cipher_mode = DRV_CIPHER_CBC, 986 .flow_mode = S_DIN_to_DES, 987 .min_hw_rev = CC_HW_REV_630, 988 }, 989 { 990 .name = "ecb(des3_ede)", 991 .driver_name = "ecb-3des-ccree", 992 .blocksize = DES3_EDE_BLOCK_SIZE, 993 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 994 .template_skcipher = { 995 .setkey = cc_cipher_setkey, 996 .encrypt = cc_cipher_encrypt, 997 .decrypt = cc_cipher_decrypt, 998 .min_keysize = DES3_EDE_KEY_SIZE, 999 .max_keysize = DES3_EDE_KEY_SIZE, 1000 .ivsize = 0, 1001 }, 1002 .cipher_mode = DRV_CIPHER_ECB, 1003 .flow_mode = S_DIN_to_DES, 1004 .min_hw_rev = CC_HW_REV_630, 1005 }, 1006 { 1007 .name = "cbc(des)", 1008 .driver_name = "cbc-des-ccree", 1009 .blocksize = DES_BLOCK_SIZE, 1010 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1011 .template_skcipher = { 1012 .setkey = cc_cipher_setkey, 1013 .encrypt = cc_cipher_encrypt, 1014 .decrypt = cc_cipher_decrypt, 1015 .min_keysize = DES_KEY_SIZE, 1016 .max_keysize = DES_KEY_SIZE, 1017 .ivsize = DES_BLOCK_SIZE, 1018 }, 1019 .cipher_mode = DRV_CIPHER_CBC, 1020 .flow_mode = S_DIN_to_DES, 1021 .min_hw_rev = CC_HW_REV_630, 1022 }, 1023 { 1024 .name = "ecb(des)", 1025 .driver_name = "ecb-des-ccree", 1026 .blocksize = DES_BLOCK_SIZE, 1027 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 1028 .template_skcipher = { 1029 .setkey = cc_cipher_setkey, 1030 .encrypt = cc_cipher_encrypt, 1031 .decrypt = cc_cipher_decrypt, 1032 .min_keysize = DES_KEY_SIZE, 1033 .max_keysize = DES_KEY_SIZE, 1034 .ivsize = 0, 1035 }, 1036 .cipher_mode = DRV_CIPHER_ECB, 1037 .flow_mode = S_DIN_to_DES, 1038 .min_hw_rev = CC_HW_REV_630, 1039 }, 1040 }; 1041 1042 static struct cc_crypto_alg *cc_create_alg(const struct cc_alg_template *tmpl, 1043 struct device *dev) 1044 { 1045 struct cc_crypto_alg *t_alg; 1046 struct skcipher_alg *alg; 1047 1048 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL); 1049 if (!t_alg) 1050 return ERR_PTR(-ENOMEM); 1051 1052 alg = &t_alg->skcipher_alg; 1053 1054 memcpy(alg, &tmpl->template_skcipher, sizeof(*alg)); 1055 1056 snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 1057 snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", 1058 tmpl->driver_name); 1059 alg->base.cra_module = THIS_MODULE; 1060 alg->base.cra_priority = CC_CRA_PRIO; 1061 alg->base.cra_blocksize = tmpl->blocksize; 1062 alg->base.cra_alignmask = 0; 1063 alg->base.cra_ctxsize = sizeof(struct cc_cipher_ctx); 1064 1065 alg->base.cra_init = cc_cipher_init; 1066 alg->base.cra_exit = cc_cipher_exit; 1067 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY | 1068 CRYPTO_ALG_TYPE_SKCIPHER; 1069 1070 t_alg->cipher_mode = tmpl->cipher_mode; 1071 t_alg->flow_mode = tmpl->flow_mode; 1072 t_alg->data_unit = tmpl->data_unit; 1073 1074 return t_alg; 1075 } 1076 1077 int cc_cipher_free(struct cc_drvdata *drvdata) 1078 { 1079 struct cc_crypto_alg *t_alg, *n; 1080 struct cc_cipher_handle *cipher_handle = drvdata->cipher_handle; 1081 1082 if (cipher_handle) { 1083 /* Remove registered algs */ 1084 list_for_each_entry_safe(t_alg, n, &cipher_handle->alg_list, 1085 entry) { 1086 crypto_unregister_skcipher(&t_alg->skcipher_alg); 1087 list_del(&t_alg->entry); 1088 kfree(t_alg); 1089 } 1090 kfree(cipher_handle); 1091 drvdata->cipher_handle = NULL; 1092 } 1093 return 0; 1094 } 1095 1096 int cc_cipher_alloc(struct cc_drvdata *drvdata) 1097 { 1098 struct cc_cipher_handle *cipher_handle; 1099 struct cc_crypto_alg *t_alg; 1100 struct device *dev = drvdata_to_dev(drvdata); 1101 int rc = -ENOMEM; 1102 int alg; 1103 1104 cipher_handle = kmalloc(sizeof(*cipher_handle), GFP_KERNEL); 1105 if (!cipher_handle) 1106 return -ENOMEM; 1107 1108 INIT_LIST_HEAD(&cipher_handle->alg_list); 1109 drvdata->cipher_handle = cipher_handle; 1110 1111 /* Linux crypto */ 1112 dev_dbg(dev, "Number of algorithms = %zu\n", 1113 ARRAY_SIZE(skcipher_algs)); 1114 for (alg = 0; alg < ARRAY_SIZE(skcipher_algs); alg++) { 1115 if (skcipher_algs[alg].min_hw_rev > drvdata->hw_rev) 1116 continue; 1117 1118 dev_dbg(dev, "creating %s\n", skcipher_algs[alg].driver_name); 1119 t_alg = cc_create_alg(&skcipher_algs[alg], dev); 1120 if (IS_ERR(t_alg)) { 1121 rc = PTR_ERR(t_alg); 1122 dev_err(dev, "%s alg allocation failed\n", 1123 skcipher_algs[alg].driver_name); 1124 goto fail0; 1125 } 1126 t_alg->drvdata = drvdata; 1127 1128 dev_dbg(dev, "registering %s\n", 1129 skcipher_algs[alg].driver_name); 1130 rc = crypto_register_skcipher(&t_alg->skcipher_alg); 1131 dev_dbg(dev, "%s alg registration rc = %x\n", 1132 t_alg->skcipher_alg.base.cra_driver_name, rc); 1133 if (rc) { 1134 dev_err(dev, "%s alg registration failed\n", 1135 t_alg->skcipher_alg.base.cra_driver_name); 1136 kfree(t_alg); 1137 goto fail0; 1138 } else { 1139 list_add_tail(&t_alg->entry, 1140 &cipher_handle->alg_list); 1141 dev_dbg(dev, "Registered %s\n", 1142 t_alg->skcipher_alg.base.cra_driver_name); 1143 } 1144 } 1145 return 0; 1146 1147 fail0: 1148 cc_cipher_free(drvdata); 1149 return rc; 1150 } 1151