1 /* 2 * Copyright 2016 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation (the "GPL"). 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License version 2 (GPLv2) for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * version 2 (GPLv2) along with this source code. 15 */ 16 17 #include <linux/err.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/errno.h> 21 #include <linux/kernel.h> 22 #include <linux/interrupt.h> 23 #include <linux/platform_device.h> 24 #include <linux/scatterlist.h> 25 #include <linux/crypto.h> 26 #include <linux/kthread.h> 27 #include <linux/rtnetlink.h> 28 #include <linux/sched.h> 29 #include <linux/of_address.h> 30 #include <linux/of_device.h> 31 #include <linux/io.h> 32 #include <linux/bitops.h> 33 34 #include <crypto/algapi.h> 35 #include <crypto/aead.h> 36 #include <crypto/internal/aead.h> 37 #include <crypto/aes.h> 38 #include <crypto/des.h> 39 #include <crypto/hmac.h> 40 #include <crypto/sha.h> 41 #include <crypto/md5.h> 42 #include <crypto/authenc.h> 43 #include <crypto/skcipher.h> 44 #include <crypto/hash.h> 45 #include <crypto/aes.h> 46 #include <crypto/sha3.h> 47 48 #include "util.h" 49 #include "cipher.h" 50 #include "spu.h" 51 #include "spum.h" 52 #include "spu2.h" 53 54 /* ================= Device Structure ================== */ 55 56 struct device_private iproc_priv; 57 58 /* ==================== Parameters ===================== */ 59 60 int flow_debug_logging; 61 module_param(flow_debug_logging, int, 0644); 62 MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging"); 63 64 int packet_debug_logging; 65 module_param(packet_debug_logging, int, 0644); 66 MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging"); 67 68 int debug_logging_sleep; 69 module_param(debug_logging_sleep, int, 0644); 70 MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep"); 71 72 /* 73 * The value of these module parameters is used to set the priority for each 74 * algo type when this driver registers algos with the kernel crypto API. 75 * To use a priority other than the default, set the priority in the insmod or 76 * modprobe. Changing the module priority after init time has no effect. 77 * 78 * The default priorities are chosen to be lower (less preferred) than ARMv8 CE 79 * algos, but more preferred than generic software algos. 80 */ 81 static int cipher_pri = 150; 82 module_param(cipher_pri, int, 0644); 83 MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos"); 84 85 static int hash_pri = 100; 86 module_param(hash_pri, int, 0644); 87 MODULE_PARM_DESC(hash_pri, "Priority for hash algos"); 88 89 static int aead_pri = 150; 90 module_param(aead_pri, int, 0644); 91 MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos"); 92 93 /* A type 3 BCM header, expected to precede the SPU header for SPU-M. 94 * Bits 3 and 4 in the first byte encode the channel number (the dma ringset). 95 * 0x60 - ring 0 96 * 0x68 - ring 1 97 * 0x70 - ring 2 98 * 0x78 - ring 3 99 */ 100 char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 }; 101 /* 102 * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN 103 * is set dynamically after reading SPU type from device tree. 104 */ 105 #define BCM_HDR_LEN iproc_priv.bcm_hdr_len 106 107 /* min and max time to sleep before retrying when mbox queue is full. usec */ 108 #define MBOX_SLEEP_MIN 800 109 #define MBOX_SLEEP_MAX 1000 110 111 /** 112 * select_channel() - Select a SPU channel to handle a crypto request. Selects 113 * channel in round robin order. 114 * 115 * Return: channel index 116 */ 117 static u8 select_channel(void) 118 { 119 u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan); 120 121 return chan_idx % iproc_priv.spu.num_chan; 122 } 123 124 /** 125 * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to 126 * receive a SPU response message for an ablkcipher request. Includes buffers to 127 * catch SPU message headers and the response data. 128 * @mssg: mailbox message containing the receive sg 129 * @rctx: crypto request context 130 * @rx_frag_num: number of scatterlist elements required to hold the 131 * SPU response message 132 * @chunksize: Number of bytes of response data expected 133 * @stat_pad_len: Number of bytes required to pad the STAT field to 134 * a 4-byte boundary 135 * 136 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 137 * when the request completes, whether the request is handled successfully or 138 * there is an error. 139 * 140 * Returns: 141 * 0 if successful 142 * < 0 if an error 143 */ 144 static int 145 spu_ablkcipher_rx_sg_create(struct brcm_message *mssg, 146 struct iproc_reqctx_s *rctx, 147 u8 rx_frag_num, 148 unsigned int chunksize, u32 stat_pad_len) 149 { 150 struct spu_hw *spu = &iproc_priv.spu; 151 struct scatterlist *sg; /* used to build sgs in mbox message */ 152 struct iproc_ctx_s *ctx = rctx->ctx; 153 u32 datalen; /* Number of bytes of response data expected */ 154 155 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), 156 rctx->gfp); 157 if (!mssg->spu.dst) 158 return -ENOMEM; 159 160 sg = mssg->spu.dst; 161 sg_init_table(sg, rx_frag_num); 162 /* Space for SPU message header */ 163 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len); 164 165 /* If XTS tweak in payload, add buffer to receive encrypted tweak */ 166 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 167 spu->spu_xts_tweak_in_payload()) 168 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, 169 SPU_XTS_TWEAK_SIZE); 170 171 /* Copy in each dst sg entry from request, up to chunksize */ 172 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip, 173 rctx->dst_nents, chunksize); 174 if (datalen < chunksize) { 175 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u", 176 __func__, chunksize, datalen); 177 return -EFAULT; 178 } 179 180 if (ctx->cipher.alg == CIPHER_ALG_RC4) 181 /* Add buffer to catch 260-byte SUPDT field for RC4 */ 182 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN); 183 184 if (stat_pad_len) 185 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); 186 187 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); 188 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len()); 189 190 return 0; 191 } 192 193 /** 194 * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to 195 * send a SPU request message for an ablkcipher request. Includes SPU message 196 * headers and the request data. 197 * @mssg: mailbox message containing the transmit sg 198 * @rctx: crypto request context 199 * @tx_frag_num: number of scatterlist elements required to construct the 200 * SPU request message 201 * @chunksize: Number of bytes of request data 202 * @pad_len: Number of pad bytes 203 * 204 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 205 * when the request completes, whether the request is handled successfully or 206 * there is an error. 207 * 208 * Returns: 209 * 0 if successful 210 * < 0 if an error 211 */ 212 static int 213 spu_ablkcipher_tx_sg_create(struct brcm_message *mssg, 214 struct iproc_reqctx_s *rctx, 215 u8 tx_frag_num, unsigned int chunksize, u32 pad_len) 216 { 217 struct spu_hw *spu = &iproc_priv.spu; 218 struct scatterlist *sg; /* used to build sgs in mbox message */ 219 struct iproc_ctx_s *ctx = rctx->ctx; 220 u32 datalen; /* Number of bytes of response data expected */ 221 u32 stat_len; 222 223 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), 224 rctx->gfp); 225 if (unlikely(!mssg->spu.src)) 226 return -ENOMEM; 227 228 sg = mssg->spu.src; 229 sg_init_table(sg, tx_frag_num); 230 231 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr, 232 BCM_HDR_LEN + ctx->spu_req_hdr_len); 233 234 /* if XTS tweak in payload, copy from IV (where crypto API puts it) */ 235 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 236 spu->spu_xts_tweak_in_payload()) 237 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE); 238 239 /* Copy in each src sg entry from request, up to chunksize */ 240 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip, 241 rctx->src_nents, chunksize); 242 if (unlikely(datalen < chunksize)) { 243 pr_err("%s(): failed to copy src sg to mbox msg", 244 __func__); 245 return -EFAULT; 246 } 247 248 if (pad_len) 249 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len); 250 251 stat_len = spu->spu_tx_status_len(); 252 if (stat_len) { 253 memset(rctx->msg_buf.tx_stat, 0, stat_len); 254 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len); 255 } 256 return 0; 257 } 258 259 /** 260 * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in 261 * a single SPU request message, starting at the current position in the request 262 * data. 263 * @rctx: Crypto request context 264 * 265 * This may be called on the crypto API thread, or, when a request is so large 266 * it must be broken into multiple SPU messages, on the thread used to invoke 267 * the response callback. When requests are broken into multiple SPU 268 * messages, we assume subsequent messages depend on previous results, and 269 * thus always wait for previous results before submitting the next message. 270 * Because requests are submitted in lock step like this, there is no need 271 * to synchronize access to request data structures. 272 * 273 * Return: -EINPROGRESS: request has been accepted and result will be returned 274 * asynchronously 275 * Any other value indicates an error 276 */ 277 static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) 278 { 279 struct spu_hw *spu = &iproc_priv.spu; 280 struct crypto_async_request *areq = rctx->parent; 281 struct ablkcipher_request *req = 282 container_of(areq, struct ablkcipher_request, base); 283 struct iproc_ctx_s *ctx = rctx->ctx; 284 struct spu_cipher_parms cipher_parms; 285 int err = 0; 286 unsigned int chunksize = 0; /* Num bytes of request to submit */ 287 int remaining = 0; /* Bytes of request still to process */ 288 int chunk_start; /* Beginning of data for current SPU msg */ 289 290 /* IV or ctr value to use in this SPU msg */ 291 u8 local_iv_ctr[MAX_IV_SIZE]; 292 u32 stat_pad_len; /* num bytes to align status field */ 293 u32 pad_len; /* total length of all padding */ 294 bool update_key = false; 295 struct brcm_message *mssg; /* mailbox message */ 296 int retry_cnt = 0; 297 298 /* number of entries in src and dst sg in mailbox message. */ 299 u8 rx_frag_num = 2; /* response header and STATUS */ 300 u8 tx_frag_num = 1; /* request header */ 301 302 flow_log("%s\n", __func__); 303 304 cipher_parms.alg = ctx->cipher.alg; 305 cipher_parms.mode = ctx->cipher.mode; 306 cipher_parms.type = ctx->cipher_type; 307 cipher_parms.key_len = ctx->enckeylen; 308 cipher_parms.key_buf = ctx->enckey; 309 cipher_parms.iv_buf = local_iv_ctr; 310 cipher_parms.iv_len = rctx->iv_ctr_len; 311 312 mssg = &rctx->mb_mssg; 313 chunk_start = rctx->src_sent; 314 remaining = rctx->total_todo - chunk_start; 315 316 /* determine the chunk we are breaking off and update the indexes */ 317 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && 318 (remaining > ctx->max_payload)) 319 chunksize = ctx->max_payload; 320 else 321 chunksize = remaining; 322 323 rctx->src_sent += chunksize; 324 rctx->total_sent = rctx->src_sent; 325 326 /* Count number of sg entries to be included in this request */ 327 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize); 328 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize); 329 330 if ((ctx->cipher.mode == CIPHER_MODE_CBC) && 331 rctx->is_encrypt && chunk_start) 332 /* 333 * Encrypting non-first first chunk. Copy last block of 334 * previous result to IV for this chunk. 335 */ 336 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr, 337 rctx->iv_ctr_len, 338 chunk_start - rctx->iv_ctr_len); 339 340 if (rctx->iv_ctr_len) { 341 /* get our local copy of the iv */ 342 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr, 343 rctx->iv_ctr_len); 344 345 /* generate the next IV if possible */ 346 if ((ctx->cipher.mode == CIPHER_MODE_CBC) && 347 !rctx->is_encrypt) { 348 /* 349 * CBC Decrypt: next IV is the last ciphertext block in 350 * this chunk 351 */ 352 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr, 353 rctx->iv_ctr_len, 354 rctx->src_sent - rctx->iv_ctr_len); 355 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) { 356 /* 357 * The SPU hardware increments the counter once for 358 * each AES block of 16 bytes. So update the counter 359 * for the next chunk, if there is one. Note that for 360 * this chunk, the counter has already been copied to 361 * local_iv_ctr. We can assume a block size of 16, 362 * because we only support CTR mode for AES, not for 363 * any other cipher alg. 364 */ 365 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4); 366 } 367 } 368 369 if (ctx->cipher.alg == CIPHER_ALG_RC4) { 370 rx_frag_num++; 371 if (chunk_start) { 372 /* 373 * for non-first RC4 chunks, use SUPDT from previous 374 * response as key for this chunk. 375 */ 376 cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak; 377 update_key = true; 378 cipher_parms.type = CIPHER_TYPE_UPDT; 379 } else if (!rctx->is_encrypt) { 380 /* 381 * First RC4 chunk. For decrypt, key in pre-built msg 382 * header may have been changed if encrypt required 383 * multiple chunks. So revert the key to the 384 * ctx->enckey value. 385 */ 386 update_key = true; 387 cipher_parms.type = CIPHER_TYPE_INIT; 388 } 389 } 390 391 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) 392 flow_log("max_payload infinite\n"); 393 else 394 flow_log("max_payload %u\n", ctx->max_payload); 395 396 flow_log("sent:%u start:%u remains:%u size:%u\n", 397 rctx->src_sent, chunk_start, remaining, chunksize); 398 399 /* Copy SPU header template created at setkey time */ 400 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr, 401 sizeof(rctx->msg_buf.bcm_spu_req_hdr)); 402 403 /* 404 * Pass SUPDT field as key. Key field in finish() call is only used 405 * when update_key has been set above for RC4. Will be ignored in 406 * all other cases. 407 */ 408 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, 409 ctx->spu_req_hdr_len, !(rctx->is_encrypt), 410 &cipher_parms, update_key, chunksize); 411 412 atomic64_add(chunksize, &iproc_priv.bytes_out); 413 414 stat_pad_len = spu->spu_wordalign_padlen(chunksize); 415 if (stat_pad_len) 416 rx_frag_num++; 417 pad_len = stat_pad_len; 418 if (pad_len) { 419 tx_frag_num++; 420 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0, 421 0, ctx->auth.alg, ctx->auth.mode, 422 rctx->total_sent, stat_pad_len); 423 } 424 425 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, 426 ctx->spu_req_hdr_len); 427 packet_log("payload:\n"); 428 dump_sg(rctx->src_sg, rctx->src_skip, chunksize); 429 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len); 430 431 /* 432 * Build mailbox message containing SPU request msg and rx buffers 433 * to catch response message 434 */ 435 memset(mssg, 0, sizeof(*mssg)); 436 mssg->type = BRCM_MESSAGE_SPU; 437 mssg->ctx = rctx; /* Will be returned in response */ 438 439 /* Create rx scatterlist to catch result */ 440 rx_frag_num += rctx->dst_nents; 441 442 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 443 spu->spu_xts_tweak_in_payload()) 444 rx_frag_num++; /* extra sg to insert tweak */ 445 446 err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize, 447 stat_pad_len); 448 if (err) 449 return err; 450 451 /* Create tx scatterlist containing SPU request message */ 452 tx_frag_num += rctx->src_nents; 453 if (spu->spu_tx_status_len()) 454 tx_frag_num++; 455 456 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 457 spu->spu_xts_tweak_in_payload()) 458 tx_frag_num++; /* extra sg to insert tweak */ 459 460 err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize, 461 pad_len); 462 if (err) 463 return err; 464 465 err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); 466 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { 467 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { 468 /* 469 * Mailbox queue is full. Since MAY_SLEEP is set, assume 470 * not in atomic context and we can wait and try again. 471 */ 472 retry_cnt++; 473 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); 474 err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], 475 mssg); 476 atomic_inc(&iproc_priv.mb_no_spc); 477 } 478 } 479 if (unlikely(err < 0)) { 480 atomic_inc(&iproc_priv.mb_send_fail); 481 return err; 482 } 483 484 return -EINPROGRESS; 485 } 486 487 /** 488 * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the 489 * total received count for the request and updates global stats. 490 * @rctx: Crypto request context 491 */ 492 static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx) 493 { 494 struct spu_hw *spu = &iproc_priv.spu; 495 #ifdef DEBUG 496 struct crypto_async_request *areq = rctx->parent; 497 struct ablkcipher_request *req = ablkcipher_request_cast(areq); 498 #endif 499 struct iproc_ctx_s *ctx = rctx->ctx; 500 u32 payload_len; 501 502 /* See how much data was returned */ 503 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr); 504 505 /* 506 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the 507 * encrypted tweak ("i") value; we don't count those. 508 */ 509 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 510 spu->spu_xts_tweak_in_payload() && 511 (payload_len >= SPU_XTS_TWEAK_SIZE)) 512 payload_len -= SPU_XTS_TWEAK_SIZE; 513 514 atomic64_add(payload_len, &iproc_priv.bytes_in); 515 516 flow_log("%s() offset: %u, bd_len: %u BD:\n", 517 __func__, rctx->total_received, payload_len); 518 519 dump_sg(req->dst, rctx->total_received, payload_len); 520 if (ctx->cipher.alg == CIPHER_ALG_RC4) 521 packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak, 522 SPU_SUPDT_LEN); 523 524 rctx->total_received += payload_len; 525 if (rctx->total_received == rctx->total_todo) { 526 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]); 527 atomic_inc( 528 &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]); 529 } 530 } 531 532 /** 533 * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to 534 * receive a SPU response message for an ahash request. 535 * @mssg: mailbox message containing the receive sg 536 * @rctx: crypto request context 537 * @rx_frag_num: number of scatterlist elements required to hold the 538 * SPU response message 539 * @digestsize: length of hash digest, in bytes 540 * @stat_pad_len: Number of bytes required to pad the STAT field to 541 * a 4-byte boundary 542 * 543 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 544 * when the request completes, whether the request is handled successfully or 545 * there is an error. 546 * 547 * Return: 548 * 0 if successful 549 * < 0 if an error 550 */ 551 static int 552 spu_ahash_rx_sg_create(struct brcm_message *mssg, 553 struct iproc_reqctx_s *rctx, 554 u8 rx_frag_num, unsigned int digestsize, 555 u32 stat_pad_len) 556 { 557 struct spu_hw *spu = &iproc_priv.spu; 558 struct scatterlist *sg; /* used to build sgs in mbox message */ 559 struct iproc_ctx_s *ctx = rctx->ctx; 560 561 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), 562 rctx->gfp); 563 if (!mssg->spu.dst) 564 return -ENOMEM; 565 566 sg = mssg->spu.dst; 567 sg_init_table(sg, rx_frag_num); 568 /* Space for SPU message header */ 569 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len); 570 571 /* Space for digest */ 572 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize); 573 574 if (stat_pad_len) 575 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); 576 577 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); 578 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len()); 579 return 0; 580 } 581 582 /** 583 * spu_ahash_tx_sg_create() - Build up the scatterlist of buffers used to send 584 * a SPU request message for an ahash request. Includes SPU message headers and 585 * the request data. 586 * @mssg: mailbox message containing the transmit sg 587 * @rctx: crypto request context 588 * @tx_frag_num: number of scatterlist elements required to construct the 589 * SPU request message 590 * @spu_hdr_len: length in bytes of SPU message header 591 * @hash_carry_len: Number of bytes of data carried over from previous req 592 * @new_data_len: Number of bytes of new request data 593 * @pad_len: Number of pad bytes 594 * 595 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 596 * when the request completes, whether the request is handled successfully or 597 * there is an error. 598 * 599 * Return: 600 * 0 if successful 601 * < 0 if an error 602 */ 603 static int 604 spu_ahash_tx_sg_create(struct brcm_message *mssg, 605 struct iproc_reqctx_s *rctx, 606 u8 tx_frag_num, 607 u32 spu_hdr_len, 608 unsigned int hash_carry_len, 609 unsigned int new_data_len, u32 pad_len) 610 { 611 struct spu_hw *spu = &iproc_priv.spu; 612 struct scatterlist *sg; /* used to build sgs in mbox message */ 613 u32 datalen; /* Number of bytes of response data expected */ 614 u32 stat_len; 615 616 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), 617 rctx->gfp); 618 if (!mssg->spu.src) 619 return -ENOMEM; 620 621 sg = mssg->spu.src; 622 sg_init_table(sg, tx_frag_num); 623 624 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr, 625 BCM_HDR_LEN + spu_hdr_len); 626 627 if (hash_carry_len) 628 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len); 629 630 if (new_data_len) { 631 /* Copy in each src sg entry from request, up to chunksize */ 632 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip, 633 rctx->src_nents, new_data_len); 634 if (datalen < new_data_len) { 635 pr_err("%s(): failed to copy src sg to mbox msg", 636 __func__); 637 return -EFAULT; 638 } 639 } 640 641 if (pad_len) 642 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len); 643 644 stat_len = spu->spu_tx_status_len(); 645 if (stat_len) { 646 memset(rctx->msg_buf.tx_stat, 0, stat_len); 647 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len); 648 } 649 650 return 0; 651 } 652 653 /** 654 * handle_ahash_req() - Process an asynchronous hash request from the crypto 655 * API. 656 * @rctx: Crypto request context 657 * 658 * Builds a SPU request message embedded in a mailbox message and submits the 659 * mailbox message on a selected mailbox channel. The SPU request message is 660 * constructed as a scatterlist, including entries from the crypto API's 661 * src scatterlist to avoid copying the data to be hashed. This function is 662 * called either on the thread from the crypto API, or, in the case that the 663 * crypto API request is too large to fit in a single SPU request message, 664 * on the thread that invokes the receive callback with a response message. 665 * Because some operations require the response from one chunk before the next 666 * chunk can be submitted, we always wait for the response for the previous 667 * chunk before submitting the next chunk. Because requests are submitted in 668 * lock step like this, there is no need to synchronize access to request data 669 * structures. 670 * 671 * Return: 672 * -EINPROGRESS: request has been submitted to SPU and response will be 673 * returned asynchronously 674 * -EAGAIN: non-final request included a small amount of data, which for 675 * efficiency we did not submit to the SPU, but instead stored 676 * to be submitted to the SPU with the next part of the request 677 * other: an error code 678 */ 679 static int handle_ahash_req(struct iproc_reqctx_s *rctx) 680 { 681 struct spu_hw *spu = &iproc_priv.spu; 682 struct crypto_async_request *areq = rctx->parent; 683 struct ahash_request *req = ahash_request_cast(areq); 684 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 685 struct crypto_tfm *tfm = crypto_ahash_tfm(ahash); 686 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm); 687 struct iproc_ctx_s *ctx = rctx->ctx; 688 689 /* number of bytes still to be hashed in this req */ 690 unsigned int nbytes_to_hash = 0; 691 int err = 0; 692 unsigned int chunksize = 0; /* length of hash carry + new data */ 693 /* 694 * length of new data, not from hash carry, to be submitted in 695 * this hw request 696 */ 697 unsigned int new_data_len; 698 699 unsigned int chunk_start = 0; 700 u32 db_size; /* Length of data field, incl gcm and hash padding */ 701 int pad_len = 0; /* total pad len, including gcm, hash, stat padding */ 702 u32 data_pad_len = 0; /* length of GCM/CCM padding */ 703 u32 stat_pad_len = 0; /* length of padding to align STATUS word */ 704 struct brcm_message *mssg; /* mailbox message */ 705 struct spu_request_opts req_opts; 706 struct spu_cipher_parms cipher_parms; 707 struct spu_hash_parms hash_parms; 708 struct spu_aead_parms aead_parms; 709 unsigned int local_nbuf; 710 u32 spu_hdr_len; 711 unsigned int digestsize; 712 u16 rem = 0; 713 int retry_cnt = 0; 714 715 /* 716 * number of entries in src and dst sg. Always includes SPU msg header. 717 * rx always includes a buffer to catch digest and STATUS. 718 */ 719 u8 rx_frag_num = 3; 720 u8 tx_frag_num = 1; 721 722 flow_log("total_todo %u, total_sent %u\n", 723 rctx->total_todo, rctx->total_sent); 724 725 memset(&req_opts, 0, sizeof(req_opts)); 726 memset(&cipher_parms, 0, sizeof(cipher_parms)); 727 memset(&hash_parms, 0, sizeof(hash_parms)); 728 memset(&aead_parms, 0, sizeof(aead_parms)); 729 730 req_opts.bd_suppress = true; 731 hash_parms.alg = ctx->auth.alg; 732 hash_parms.mode = ctx->auth.mode; 733 hash_parms.type = HASH_TYPE_NONE; 734 hash_parms.key_buf = (u8 *)ctx->authkey; 735 hash_parms.key_len = ctx->authkeylen; 736 737 /* 738 * For hash algorithms below assignment looks bit odd but 739 * it's needed for AES-XCBC and AES-CMAC hash algorithms 740 * to differentiate between 128, 192, 256 bit key values. 741 * Based on the key values, hash algorithm is selected. 742 * For example for 128 bit key, hash algorithm is AES-128. 743 */ 744 cipher_parms.type = ctx->cipher_type; 745 746 mssg = &rctx->mb_mssg; 747 chunk_start = rctx->src_sent; 748 749 /* 750 * Compute the amount remaining to hash. This may include data 751 * carried over from previous requests. 752 */ 753 nbytes_to_hash = rctx->total_todo - rctx->total_sent; 754 chunksize = nbytes_to_hash; 755 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && 756 (chunksize > ctx->max_payload)) 757 chunksize = ctx->max_payload; 758 759 /* 760 * If this is not a final request and the request data is not a multiple 761 * of a full block, then simply park the extra data and prefix it to the 762 * data for the next request. 763 */ 764 if (!rctx->is_final) { 765 u8 *dest = rctx->hash_carry + rctx->hash_carry_len; 766 u16 new_len; /* len of data to add to hash carry */ 767 768 rem = chunksize % blocksize; /* remainder */ 769 if (rem) { 770 /* chunksize not a multiple of blocksize */ 771 chunksize -= rem; 772 if (chunksize == 0) { 773 /* Don't have a full block to submit to hw */ 774 new_len = rem - rctx->hash_carry_len; 775 sg_copy_part_to_buf(req->src, dest, new_len, 776 rctx->src_sent); 777 rctx->hash_carry_len = rem; 778 flow_log("Exiting with hash carry len: %u\n", 779 rctx->hash_carry_len); 780 packet_dump(" buf: ", 781 rctx->hash_carry, 782 rctx->hash_carry_len); 783 return -EAGAIN; 784 } 785 } 786 } 787 788 /* if we have hash carry, then prefix it to the data in this request */ 789 local_nbuf = rctx->hash_carry_len; 790 rctx->hash_carry_len = 0; 791 if (local_nbuf) 792 tx_frag_num++; 793 new_data_len = chunksize - local_nbuf; 794 795 /* Count number of sg entries to be used in this request */ 796 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, 797 new_data_len); 798 799 /* AES hashing keeps key size in type field, so need to copy it here */ 800 if (hash_parms.alg == HASH_ALG_AES) 801 hash_parms.type = cipher_parms.type; 802 else 803 hash_parms.type = spu->spu_hash_type(rctx->total_sent); 804 805 digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg, 806 hash_parms.type); 807 hash_parms.digestsize = digestsize; 808 809 /* update the indexes */ 810 rctx->total_sent += chunksize; 811 /* if you sent a prebuf then that wasn't from this req->src */ 812 rctx->src_sent += new_data_len; 813 814 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final) 815 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg, 816 hash_parms.mode, 817 chunksize, 818 blocksize); 819 820 /* 821 * If a non-first chunk, then include the digest returned from the 822 * previous chunk so that hw can add to it (except for AES types). 823 */ 824 if ((hash_parms.type == HASH_TYPE_UPDT) && 825 (hash_parms.alg != HASH_ALG_AES)) { 826 hash_parms.key_buf = rctx->incr_hash; 827 hash_parms.key_len = digestsize; 828 } 829 830 atomic64_add(chunksize, &iproc_priv.bytes_out); 831 832 flow_log("%s() final: %u nbuf: %u ", 833 __func__, rctx->is_final, local_nbuf); 834 835 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) 836 flow_log("max_payload infinite\n"); 837 else 838 flow_log("max_payload %u\n", ctx->max_payload); 839 840 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize); 841 842 /* Prepend SPU header with type 3 BCM header */ 843 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); 844 845 hash_parms.prebuf_len = local_nbuf; 846 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr + 847 BCM_HDR_LEN, 848 &req_opts, &cipher_parms, 849 &hash_parms, &aead_parms, 850 new_data_len); 851 852 if (spu_hdr_len == 0) { 853 pr_err("Failed to create SPU request header\n"); 854 return -EFAULT; 855 } 856 857 /* 858 * Determine total length of padding required. Put all padding in one 859 * buffer. 860 */ 861 data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize); 862 db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len, 863 0, 0, hash_parms.pad_len); 864 if (spu->spu_tx_status_len()) 865 stat_pad_len = spu->spu_wordalign_padlen(db_size); 866 if (stat_pad_len) 867 rx_frag_num++; 868 pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len; 869 if (pad_len) { 870 tx_frag_num++; 871 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len, 872 hash_parms.pad_len, ctx->auth.alg, 873 ctx->auth.mode, rctx->total_sent, 874 stat_pad_len); 875 } 876 877 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, 878 spu_hdr_len); 879 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf); 880 flow_log("Data:\n"); 881 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len); 882 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len); 883 884 /* 885 * Build mailbox message containing SPU request msg and rx buffers 886 * to catch response message 887 */ 888 memset(mssg, 0, sizeof(*mssg)); 889 mssg->type = BRCM_MESSAGE_SPU; 890 mssg->ctx = rctx; /* Will be returned in response */ 891 892 /* Create rx scatterlist to catch result */ 893 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize, 894 stat_pad_len); 895 if (err) 896 return err; 897 898 /* Create tx scatterlist containing SPU request message */ 899 tx_frag_num += rctx->src_nents; 900 if (spu->spu_tx_status_len()) 901 tx_frag_num++; 902 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len, 903 local_nbuf, new_data_len, pad_len); 904 if (err) 905 return err; 906 907 err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); 908 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { 909 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { 910 /* 911 * Mailbox queue is full. Since MAY_SLEEP is set, assume 912 * not in atomic context and we can wait and try again. 913 */ 914 retry_cnt++; 915 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); 916 err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], 917 mssg); 918 atomic_inc(&iproc_priv.mb_no_spc); 919 } 920 } 921 if (err < 0) { 922 atomic_inc(&iproc_priv.mb_send_fail); 923 return err; 924 } 925 return -EINPROGRESS; 926 } 927 928 /** 929 * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash 930 * for an HMAC request. 931 * @req: The HMAC request from the crypto API 932 * @ctx: The session context 933 * 934 * Return: 0 if synchronous hash operation successful 935 * -EINVAL if the hash algo is unrecognized 936 * any other value indicates an error 937 */ 938 static int spu_hmac_outer_hash(struct ahash_request *req, 939 struct iproc_ctx_s *ctx) 940 { 941 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 942 unsigned int blocksize = 943 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); 944 int rc; 945 946 switch (ctx->auth.alg) { 947 case HASH_ALG_MD5: 948 rc = do_shash("md5", req->result, ctx->opad, blocksize, 949 req->result, ctx->digestsize, NULL, 0); 950 break; 951 case HASH_ALG_SHA1: 952 rc = do_shash("sha1", req->result, ctx->opad, blocksize, 953 req->result, ctx->digestsize, NULL, 0); 954 break; 955 case HASH_ALG_SHA224: 956 rc = do_shash("sha224", req->result, ctx->opad, blocksize, 957 req->result, ctx->digestsize, NULL, 0); 958 break; 959 case HASH_ALG_SHA256: 960 rc = do_shash("sha256", req->result, ctx->opad, blocksize, 961 req->result, ctx->digestsize, NULL, 0); 962 break; 963 case HASH_ALG_SHA384: 964 rc = do_shash("sha384", req->result, ctx->opad, blocksize, 965 req->result, ctx->digestsize, NULL, 0); 966 break; 967 case HASH_ALG_SHA512: 968 rc = do_shash("sha512", req->result, ctx->opad, blocksize, 969 req->result, ctx->digestsize, NULL, 0); 970 break; 971 default: 972 pr_err("%s() Error : unknown hmac type\n", __func__); 973 rc = -EINVAL; 974 } 975 return rc; 976 } 977 978 /** 979 * ahash_req_done() - Process a hash result from the SPU hardware. 980 * @rctx: Crypto request context 981 * 982 * Return: 0 if successful 983 * < 0 if an error 984 */ 985 static int ahash_req_done(struct iproc_reqctx_s *rctx) 986 { 987 struct spu_hw *spu = &iproc_priv.spu; 988 struct crypto_async_request *areq = rctx->parent; 989 struct ahash_request *req = ahash_request_cast(areq); 990 struct iproc_ctx_s *ctx = rctx->ctx; 991 int err; 992 993 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize); 994 995 if (spu->spu_type == SPU_TYPE_SPUM) { 996 /* byte swap the output from the UPDT function to network byte 997 * order 998 */ 999 if (ctx->auth.alg == HASH_ALG_MD5) { 1000 __swab32s((u32 *)req->result); 1001 __swab32s(((u32 *)req->result) + 1); 1002 __swab32s(((u32 *)req->result) + 2); 1003 __swab32s(((u32 *)req->result) + 3); 1004 __swab32s(((u32 *)req->result) + 4); 1005 } 1006 } 1007 1008 flow_dump(" digest ", req->result, ctx->digestsize); 1009 1010 /* if this an HMAC then do the outer hash */ 1011 if (rctx->is_sw_hmac) { 1012 err = spu_hmac_outer_hash(req, ctx); 1013 if (err < 0) 1014 return err; 1015 flow_dump(" hmac: ", req->result, ctx->digestsize); 1016 } 1017 1018 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) { 1019 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]); 1020 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]); 1021 } else { 1022 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]); 1023 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]); 1024 } 1025 1026 return 0; 1027 } 1028 1029 /** 1030 * handle_ahash_resp() - Process a SPU response message for a hash request. 1031 * Checks if the entire crypto API request has been processed, and if so, 1032 * invokes post processing on the result. 1033 * @rctx: Crypto request context 1034 */ 1035 static void handle_ahash_resp(struct iproc_reqctx_s *rctx) 1036 { 1037 struct iproc_ctx_s *ctx = rctx->ctx; 1038 #ifdef DEBUG 1039 struct crypto_async_request *areq = rctx->parent; 1040 struct ahash_request *req = ahash_request_cast(areq); 1041 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1042 unsigned int blocksize = 1043 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); 1044 #endif 1045 /* 1046 * Save hash to use as input to next op if incremental. Might be copying 1047 * too much, but that's easier than figuring out actual digest size here 1048 */ 1049 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE); 1050 1051 flow_log("%s() blocksize:%u digestsize:%u\n", 1052 __func__, blocksize, ctx->digestsize); 1053 1054 atomic64_add(ctx->digestsize, &iproc_priv.bytes_in); 1055 1056 if (rctx->is_final && (rctx->total_sent == rctx->total_todo)) 1057 ahash_req_done(rctx); 1058 } 1059 1060 /** 1061 * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive 1062 * a SPU response message for an AEAD request. Includes buffers to catch SPU 1063 * message headers and the response data. 1064 * @mssg: mailbox message containing the receive sg 1065 * @rctx: crypto request context 1066 * @rx_frag_num: number of scatterlist elements required to hold the 1067 * SPU response message 1068 * @assoc_len: Length of associated data included in the crypto request 1069 * @ret_iv_len: Length of IV returned in response 1070 * @resp_len: Number of bytes of response data expected to be written to 1071 * dst buffer from crypto API 1072 * @digestsize: Length of hash digest, in bytes 1073 * @stat_pad_len: Number of bytes required to pad the STAT field to 1074 * a 4-byte boundary 1075 * 1076 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 1077 * when the request completes, whether the request is handled successfully or 1078 * there is an error. 1079 * 1080 * Returns: 1081 * 0 if successful 1082 * < 0 if an error 1083 */ 1084 static int spu_aead_rx_sg_create(struct brcm_message *mssg, 1085 struct aead_request *req, 1086 struct iproc_reqctx_s *rctx, 1087 u8 rx_frag_num, 1088 unsigned int assoc_len, 1089 u32 ret_iv_len, unsigned int resp_len, 1090 unsigned int digestsize, u32 stat_pad_len) 1091 { 1092 struct spu_hw *spu = &iproc_priv.spu; 1093 struct scatterlist *sg; /* used to build sgs in mbox message */ 1094 struct iproc_ctx_s *ctx = rctx->ctx; 1095 u32 datalen; /* Number of bytes of response data expected */ 1096 u32 assoc_buf_len; 1097 u8 data_padlen = 0; 1098 1099 if (ctx->is_rfc4543) { 1100 /* RFC4543: only pad after data, not after AAD */ 1101 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1102 assoc_len + resp_len); 1103 assoc_buf_len = assoc_len; 1104 } else { 1105 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1106 resp_len); 1107 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode, 1108 assoc_len, ret_iv_len, 1109 rctx->is_encrypt); 1110 } 1111 1112 if (ctx->cipher.mode == CIPHER_MODE_CCM) 1113 /* ICV (after data) must be in the next 32-bit word for CCM */ 1114 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len + 1115 resp_len + 1116 data_padlen); 1117 1118 if (data_padlen) 1119 /* have to catch gcm pad in separate buffer */ 1120 rx_frag_num++; 1121 1122 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), 1123 rctx->gfp); 1124 if (!mssg->spu.dst) 1125 return -ENOMEM; 1126 1127 sg = mssg->spu.dst; 1128 sg_init_table(sg, rx_frag_num); 1129 1130 /* Space for SPU message header */ 1131 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len); 1132 1133 if (assoc_buf_len) { 1134 /* 1135 * Don't write directly to req->dst, because SPU may pad the 1136 * assoc data in the response 1137 */ 1138 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len); 1139 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len); 1140 } 1141 1142 if (resp_len) { 1143 /* 1144 * Copy in each dst sg entry from request, up to chunksize. 1145 * dst sg catches just the data. digest caught in separate buf. 1146 */ 1147 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip, 1148 rctx->dst_nents, resp_len); 1149 if (datalen < (resp_len)) { 1150 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u", 1151 __func__, resp_len, datalen); 1152 return -EFAULT; 1153 } 1154 } 1155 1156 /* If GCM/CCM data is padded, catch padding in separate buffer */ 1157 if (data_padlen) { 1158 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen); 1159 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen); 1160 } 1161 1162 /* Always catch ICV in separate buffer */ 1163 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize); 1164 1165 flow_log("stat_pad_len %u\n", stat_pad_len); 1166 if (stat_pad_len) { 1167 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len); 1168 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); 1169 } 1170 1171 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); 1172 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len()); 1173 1174 return 0; 1175 } 1176 1177 /** 1178 * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a 1179 * SPU request message for an AEAD request. Includes SPU message headers and the 1180 * request data. 1181 * @mssg: mailbox message containing the transmit sg 1182 * @rctx: crypto request context 1183 * @tx_frag_num: number of scatterlist elements required to construct the 1184 * SPU request message 1185 * @spu_hdr_len: length of SPU message header in bytes 1186 * @assoc: crypto API associated data scatterlist 1187 * @assoc_len: length of associated data 1188 * @assoc_nents: number of scatterlist entries containing assoc data 1189 * @aead_iv_len: length of AEAD IV, if included 1190 * @chunksize: Number of bytes of request data 1191 * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM. 1192 * @pad_len: Number of pad bytes 1193 * @incl_icv: If true, write separate ICV buffer after data and 1194 * any padding 1195 * 1196 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 1197 * when the request completes, whether the request is handled successfully or 1198 * there is an error. 1199 * 1200 * Return: 1201 * 0 if successful 1202 * < 0 if an error 1203 */ 1204 static int spu_aead_tx_sg_create(struct brcm_message *mssg, 1205 struct iproc_reqctx_s *rctx, 1206 u8 tx_frag_num, 1207 u32 spu_hdr_len, 1208 struct scatterlist *assoc, 1209 unsigned int assoc_len, 1210 int assoc_nents, 1211 unsigned int aead_iv_len, 1212 unsigned int chunksize, 1213 u32 aad_pad_len, u32 pad_len, bool incl_icv) 1214 { 1215 struct spu_hw *spu = &iproc_priv.spu; 1216 struct scatterlist *sg; /* used to build sgs in mbox message */ 1217 struct scatterlist *assoc_sg = assoc; 1218 struct iproc_ctx_s *ctx = rctx->ctx; 1219 u32 datalen; /* Number of bytes of data to write */ 1220 u32 written; /* Number of bytes of data written */ 1221 u32 assoc_offset = 0; 1222 u32 stat_len; 1223 1224 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), 1225 rctx->gfp); 1226 if (!mssg->spu.src) 1227 return -ENOMEM; 1228 1229 sg = mssg->spu.src; 1230 sg_init_table(sg, tx_frag_num); 1231 1232 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr, 1233 BCM_HDR_LEN + spu_hdr_len); 1234 1235 if (assoc_len) { 1236 /* Copy in each associated data sg entry from request */ 1237 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset, 1238 assoc_nents, assoc_len); 1239 if (written < assoc_len) { 1240 pr_err("%s(): failed to copy assoc sg to mbox msg", 1241 __func__); 1242 return -EFAULT; 1243 } 1244 } 1245 1246 if (aead_iv_len) 1247 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len); 1248 1249 if (aad_pad_len) { 1250 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len); 1251 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len); 1252 } 1253 1254 datalen = chunksize; 1255 if ((chunksize > ctx->digestsize) && incl_icv) 1256 datalen -= ctx->digestsize; 1257 if (datalen) { 1258 /* For aead, a single msg should consume the entire src sg */ 1259 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip, 1260 rctx->src_nents, datalen); 1261 if (written < datalen) { 1262 pr_err("%s(): failed to copy src sg to mbox msg", 1263 __func__); 1264 return -EFAULT; 1265 } 1266 } 1267 1268 if (pad_len) { 1269 memset(rctx->msg_buf.spu_req_pad, 0, pad_len); 1270 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len); 1271 } 1272 1273 if (incl_icv) 1274 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize); 1275 1276 stat_len = spu->spu_tx_status_len(); 1277 if (stat_len) { 1278 memset(rctx->msg_buf.tx_stat, 0, stat_len); 1279 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len); 1280 } 1281 return 0; 1282 } 1283 1284 /** 1285 * handle_aead_req() - Submit a SPU request message for the next chunk of the 1286 * current AEAD request. 1287 * @rctx: Crypto request context 1288 * 1289 * Unlike other operation types, we assume the length of the request fits in 1290 * a single SPU request message. aead_enqueue() makes sure this is true. 1291 * Comments for other op types regarding threads applies here as well. 1292 * 1293 * Unlike incremental hash ops, where the spu returns the entire hash for 1294 * truncated algs like sha-224, the SPU returns just the truncated hash in 1295 * response to aead requests. So digestsize is always ctx->digestsize here. 1296 * 1297 * Return: -EINPROGRESS: crypto request has been accepted and result will be 1298 * returned asynchronously 1299 * Any other value indicates an error 1300 */ 1301 static int handle_aead_req(struct iproc_reqctx_s *rctx) 1302 { 1303 struct spu_hw *spu = &iproc_priv.spu; 1304 struct crypto_async_request *areq = rctx->parent; 1305 struct aead_request *req = container_of(areq, 1306 struct aead_request, base); 1307 struct iproc_ctx_s *ctx = rctx->ctx; 1308 int err; 1309 unsigned int chunksize; 1310 unsigned int resp_len; 1311 u32 spu_hdr_len; 1312 u32 db_size; 1313 u32 stat_pad_len; 1314 u32 pad_len; 1315 struct brcm_message *mssg; /* mailbox message */ 1316 struct spu_request_opts req_opts; 1317 struct spu_cipher_parms cipher_parms; 1318 struct spu_hash_parms hash_parms; 1319 struct spu_aead_parms aead_parms; 1320 int assoc_nents = 0; 1321 bool incl_icv = false; 1322 unsigned int digestsize = ctx->digestsize; 1323 int retry_cnt = 0; 1324 1325 /* number of entries in src and dst sg. Always includes SPU msg header. 1326 */ 1327 u8 rx_frag_num = 2; /* and STATUS */ 1328 u8 tx_frag_num = 1; 1329 1330 /* doing the whole thing at once */ 1331 chunksize = rctx->total_todo; 1332 1333 flow_log("%s: chunksize %u\n", __func__, chunksize); 1334 1335 memset(&req_opts, 0, sizeof(req_opts)); 1336 memset(&hash_parms, 0, sizeof(hash_parms)); 1337 memset(&aead_parms, 0, sizeof(aead_parms)); 1338 1339 req_opts.is_inbound = !(rctx->is_encrypt); 1340 req_opts.auth_first = ctx->auth_first; 1341 req_opts.is_aead = true; 1342 req_opts.is_esp = ctx->is_esp; 1343 1344 cipher_parms.alg = ctx->cipher.alg; 1345 cipher_parms.mode = ctx->cipher.mode; 1346 cipher_parms.type = ctx->cipher_type; 1347 cipher_parms.key_buf = ctx->enckey; 1348 cipher_parms.key_len = ctx->enckeylen; 1349 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr; 1350 cipher_parms.iv_len = rctx->iv_ctr_len; 1351 1352 hash_parms.alg = ctx->auth.alg; 1353 hash_parms.mode = ctx->auth.mode; 1354 hash_parms.type = HASH_TYPE_NONE; 1355 hash_parms.key_buf = (u8 *)ctx->authkey; 1356 hash_parms.key_len = ctx->authkeylen; 1357 hash_parms.digestsize = digestsize; 1358 1359 if ((ctx->auth.alg == HASH_ALG_SHA224) && 1360 (ctx->authkeylen < SHA224_DIGEST_SIZE)) 1361 hash_parms.key_len = SHA224_DIGEST_SIZE; 1362 1363 aead_parms.assoc_size = req->assoclen; 1364 if (ctx->is_esp && !ctx->is_rfc4543) { 1365 /* 1366 * 8-byte IV is included assoc data in request. SPU2 1367 * expects AAD to include just SPI and seqno. So 1368 * subtract off the IV len. 1369 */ 1370 aead_parms.assoc_size -= GCM_ESP_IV_SIZE; 1371 1372 if (rctx->is_encrypt) { 1373 aead_parms.return_iv = true; 1374 aead_parms.ret_iv_len = GCM_ESP_IV_SIZE; 1375 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE; 1376 } 1377 } else { 1378 aead_parms.ret_iv_len = 0; 1379 } 1380 1381 /* 1382 * Count number of sg entries from the crypto API request that are to 1383 * be included in this mailbox message. For dst sg, don't count space 1384 * for digest. Digest gets caught in a separate buffer and copied back 1385 * to dst sg when processing response. 1386 */ 1387 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize); 1388 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize); 1389 if (aead_parms.assoc_size) 1390 assoc_nents = spu_sg_count(rctx->assoc, 0, 1391 aead_parms.assoc_size); 1392 1393 mssg = &rctx->mb_mssg; 1394 1395 rctx->total_sent = chunksize; 1396 rctx->src_sent = chunksize; 1397 if (spu->spu_assoc_resp_len(ctx->cipher.mode, 1398 aead_parms.assoc_size, 1399 aead_parms.ret_iv_len, 1400 rctx->is_encrypt)) 1401 rx_frag_num++; 1402 1403 aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode, 1404 rctx->iv_ctr_len); 1405 1406 if (ctx->auth.alg == HASH_ALG_AES) 1407 hash_parms.type = ctx->cipher_type; 1408 1409 /* General case AAD padding (CCM and RFC4543 special cases below) */ 1410 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1411 aead_parms.assoc_size); 1412 1413 /* General case data padding (CCM decrypt special case below) */ 1414 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1415 chunksize); 1416 1417 if (ctx->cipher.mode == CIPHER_MODE_CCM) { 1418 /* 1419 * for CCM, AAD len + 2 (rather than AAD len) needs to be 1420 * 128-bit aligned 1421 */ 1422 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len( 1423 ctx->cipher.mode, 1424 aead_parms.assoc_size + 2); 1425 1426 /* 1427 * And when decrypting CCM, need to pad without including 1428 * size of ICV which is tacked on to end of chunk 1429 */ 1430 if (!rctx->is_encrypt) 1431 aead_parms.data_pad_len = 1432 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1433 chunksize - digestsize); 1434 1435 /* CCM also requires software to rewrite portions of IV: */ 1436 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen, 1437 chunksize, rctx->is_encrypt, 1438 ctx->is_esp); 1439 } 1440 1441 if (ctx->is_rfc4543) { 1442 /* 1443 * RFC4543: data is included in AAD, so don't pad after AAD 1444 * and pad data based on both AAD + data size 1445 */ 1446 aead_parms.aad_pad_len = 0; 1447 if (!rctx->is_encrypt) 1448 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len( 1449 ctx->cipher.mode, 1450 aead_parms.assoc_size + chunksize - 1451 digestsize); 1452 else 1453 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len( 1454 ctx->cipher.mode, 1455 aead_parms.assoc_size + chunksize); 1456 1457 req_opts.is_rfc4543 = true; 1458 } 1459 1460 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) { 1461 incl_icv = true; 1462 tx_frag_num++; 1463 /* Copy ICV from end of src scatterlist to digest buf */ 1464 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize, 1465 req->assoclen + rctx->total_sent - 1466 digestsize); 1467 } 1468 1469 atomic64_add(chunksize, &iproc_priv.bytes_out); 1470 1471 flow_log("%s()-sent chunksize:%u\n", __func__, chunksize); 1472 1473 /* Prepend SPU header with type 3 BCM header */ 1474 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); 1475 1476 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr + 1477 BCM_HDR_LEN, &req_opts, 1478 &cipher_parms, &hash_parms, 1479 &aead_parms, chunksize); 1480 1481 /* Determine total length of padding. Put all padding in one buffer. */ 1482 db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0, 1483 chunksize, aead_parms.aad_pad_len, 1484 aead_parms.data_pad_len, 0); 1485 1486 stat_pad_len = spu->spu_wordalign_padlen(db_size); 1487 1488 if (stat_pad_len) 1489 rx_frag_num++; 1490 pad_len = aead_parms.data_pad_len + stat_pad_len; 1491 if (pad_len) { 1492 tx_frag_num++; 1493 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 1494 aead_parms.data_pad_len, 0, 1495 ctx->auth.alg, ctx->auth.mode, 1496 rctx->total_sent, stat_pad_len); 1497 } 1498 1499 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, 1500 spu_hdr_len); 1501 dump_sg(rctx->assoc, 0, aead_parms.assoc_size); 1502 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len); 1503 packet_log("BD:\n"); 1504 dump_sg(rctx->src_sg, rctx->src_skip, chunksize); 1505 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len); 1506 1507 /* 1508 * Build mailbox message containing SPU request msg and rx buffers 1509 * to catch response message 1510 */ 1511 memset(mssg, 0, sizeof(*mssg)); 1512 mssg->type = BRCM_MESSAGE_SPU; 1513 mssg->ctx = rctx; /* Will be returned in response */ 1514 1515 /* Create rx scatterlist to catch result */ 1516 rx_frag_num += rctx->dst_nents; 1517 resp_len = chunksize; 1518 1519 /* 1520 * Always catch ICV in separate buffer. Have to for GCM/CCM because of 1521 * padding. Have to for SHA-224 and other truncated SHAs because SPU 1522 * sends entire digest back. 1523 */ 1524 rx_frag_num++; 1525 1526 if (((ctx->cipher.mode == CIPHER_MODE_GCM) || 1527 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) { 1528 /* 1529 * Input is ciphertxt plus ICV, but ICV not incl 1530 * in output. 1531 */ 1532 resp_len -= ctx->digestsize; 1533 if (resp_len == 0) 1534 /* no rx frags to catch output data */ 1535 rx_frag_num -= rctx->dst_nents; 1536 } 1537 1538 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num, 1539 aead_parms.assoc_size, 1540 aead_parms.ret_iv_len, resp_len, digestsize, 1541 stat_pad_len); 1542 if (err) 1543 return err; 1544 1545 /* Create tx scatterlist containing SPU request message */ 1546 tx_frag_num += rctx->src_nents; 1547 tx_frag_num += assoc_nents; 1548 if (aead_parms.aad_pad_len) 1549 tx_frag_num++; 1550 if (aead_parms.iv_len) 1551 tx_frag_num++; 1552 if (spu->spu_tx_status_len()) 1553 tx_frag_num++; 1554 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len, 1555 rctx->assoc, aead_parms.assoc_size, 1556 assoc_nents, aead_parms.iv_len, chunksize, 1557 aead_parms.aad_pad_len, pad_len, incl_icv); 1558 if (err) 1559 return err; 1560 1561 err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); 1562 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { 1563 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { 1564 /* 1565 * Mailbox queue is full. Since MAY_SLEEP is set, assume 1566 * not in atomic context and we can wait and try again. 1567 */ 1568 retry_cnt++; 1569 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); 1570 err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], 1571 mssg); 1572 atomic_inc(&iproc_priv.mb_no_spc); 1573 } 1574 } 1575 if (err < 0) { 1576 atomic_inc(&iproc_priv.mb_send_fail); 1577 return err; 1578 } 1579 1580 return -EINPROGRESS; 1581 } 1582 1583 /** 1584 * handle_aead_resp() - Process a SPU response message for an AEAD request. 1585 * @rctx: Crypto request context 1586 */ 1587 static void handle_aead_resp(struct iproc_reqctx_s *rctx) 1588 { 1589 struct spu_hw *spu = &iproc_priv.spu; 1590 struct crypto_async_request *areq = rctx->parent; 1591 struct aead_request *req = container_of(areq, 1592 struct aead_request, base); 1593 struct iproc_ctx_s *ctx = rctx->ctx; 1594 u32 payload_len; 1595 unsigned int icv_offset; 1596 u32 result_len; 1597 1598 /* See how much data was returned */ 1599 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr); 1600 flow_log("payload_len %u\n", payload_len); 1601 1602 /* only count payload */ 1603 atomic64_add(payload_len, &iproc_priv.bytes_in); 1604 1605 if (req->assoclen) 1606 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad, 1607 req->assoclen); 1608 1609 /* 1610 * Copy the ICV back to the destination 1611 * buffer. In decrypt case, SPU gives us back the digest, but crypto 1612 * API doesn't expect ICV in dst buffer. 1613 */ 1614 result_len = req->cryptlen; 1615 if (rctx->is_encrypt) { 1616 icv_offset = req->assoclen + rctx->total_sent; 1617 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize); 1618 flow_log("copying ICV to dst sg at offset %u\n", icv_offset); 1619 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest, 1620 ctx->digestsize, icv_offset); 1621 result_len += ctx->digestsize; 1622 } 1623 1624 packet_log("response data: "); 1625 dump_sg(req->dst, req->assoclen, result_len); 1626 1627 atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]); 1628 if (ctx->cipher.alg == CIPHER_ALG_AES) { 1629 if (ctx->cipher.mode == CIPHER_MODE_CCM) 1630 atomic_inc(&iproc_priv.aead_cnt[AES_CCM]); 1631 else if (ctx->cipher.mode == CIPHER_MODE_GCM) 1632 atomic_inc(&iproc_priv.aead_cnt[AES_GCM]); 1633 else 1634 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]); 1635 } else { 1636 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]); 1637 } 1638 } 1639 1640 /** 1641 * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request 1642 * @rctx: request context 1643 * 1644 * Mailbox scatterlists are allocated for each chunk. So free them after 1645 * processing each chunk. 1646 */ 1647 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx) 1648 { 1649 /* mailbox message used to tx request */ 1650 struct brcm_message *mssg = &rctx->mb_mssg; 1651 1652 kfree(mssg->spu.src); 1653 kfree(mssg->spu.dst); 1654 memset(mssg, 0, sizeof(struct brcm_message)); 1655 } 1656 1657 /** 1658 * finish_req() - Used to invoke the complete callback from the requester when 1659 * a request has been handled asynchronously. 1660 * @rctx: Request context 1661 * @err: Indicates whether the request was successful or not 1662 * 1663 * Ensures that cleanup has been done for request 1664 */ 1665 static void finish_req(struct iproc_reqctx_s *rctx, int err) 1666 { 1667 struct crypto_async_request *areq = rctx->parent; 1668 1669 flow_log("%s() err:%d\n\n", __func__, err); 1670 1671 /* No harm done if already called */ 1672 spu_chunk_cleanup(rctx); 1673 1674 if (areq) 1675 areq->complete(areq, err); 1676 } 1677 1678 /** 1679 * spu_rx_callback() - Callback from mailbox framework with a SPU response. 1680 * @cl: mailbox client structure for SPU driver 1681 * @msg: mailbox message containing SPU response 1682 */ 1683 static void spu_rx_callback(struct mbox_client *cl, void *msg) 1684 { 1685 struct spu_hw *spu = &iproc_priv.spu; 1686 struct brcm_message *mssg = msg; 1687 struct iproc_reqctx_s *rctx; 1688 struct iproc_ctx_s *ctx; 1689 struct crypto_async_request *areq; 1690 int err = 0; 1691 1692 rctx = mssg->ctx; 1693 if (unlikely(!rctx)) { 1694 /* This is fatal */ 1695 pr_err("%s(): no request context", __func__); 1696 err = -EFAULT; 1697 goto cb_finish; 1698 } 1699 areq = rctx->parent; 1700 ctx = rctx->ctx; 1701 1702 /* process the SPU status */ 1703 err = spu->spu_status_process(rctx->msg_buf.rx_stat); 1704 if (err != 0) { 1705 if (err == SPU_INVALID_ICV) 1706 atomic_inc(&iproc_priv.bad_icv); 1707 err = -EBADMSG; 1708 goto cb_finish; 1709 } 1710 1711 /* Process the SPU response message */ 1712 switch (rctx->ctx->alg->type) { 1713 case CRYPTO_ALG_TYPE_ABLKCIPHER: 1714 handle_ablkcipher_resp(rctx); 1715 break; 1716 case CRYPTO_ALG_TYPE_AHASH: 1717 handle_ahash_resp(rctx); 1718 break; 1719 case CRYPTO_ALG_TYPE_AEAD: 1720 handle_aead_resp(rctx); 1721 break; 1722 default: 1723 err = -EINVAL; 1724 goto cb_finish; 1725 } 1726 1727 /* 1728 * If this response does not complete the request, then send the next 1729 * request chunk. 1730 */ 1731 if (rctx->total_sent < rctx->total_todo) { 1732 /* Deallocate anything specific to previous chunk */ 1733 spu_chunk_cleanup(rctx); 1734 1735 switch (rctx->ctx->alg->type) { 1736 case CRYPTO_ALG_TYPE_ABLKCIPHER: 1737 err = handle_ablkcipher_req(rctx); 1738 break; 1739 case CRYPTO_ALG_TYPE_AHASH: 1740 err = handle_ahash_req(rctx); 1741 if (err == -EAGAIN) 1742 /* 1743 * we saved data in hash carry, but tell crypto 1744 * API we successfully completed request. 1745 */ 1746 err = 0; 1747 break; 1748 case CRYPTO_ALG_TYPE_AEAD: 1749 err = handle_aead_req(rctx); 1750 break; 1751 default: 1752 err = -EINVAL; 1753 } 1754 1755 if (err == -EINPROGRESS) 1756 /* Successfully submitted request for next chunk */ 1757 return; 1758 } 1759 1760 cb_finish: 1761 finish_req(rctx, err); 1762 } 1763 1764 /* ==================== Kernel Cryptographic API ==================== */ 1765 1766 /** 1767 * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request. 1768 * @req: Crypto API request 1769 * @encrypt: true if encrypting; false if decrypting 1770 * 1771 * Return: -EINPROGRESS if request accepted and result will be returned 1772 * asynchronously 1773 * < 0 if an error 1774 */ 1775 static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt) 1776 { 1777 struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req); 1778 struct iproc_ctx_s *ctx = 1779 crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 1780 int err; 1781 1782 flow_log("%s() enc:%u\n", __func__, encrypt); 1783 1784 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1785 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1786 rctx->parent = &req->base; 1787 rctx->is_encrypt = encrypt; 1788 rctx->bd_suppress = false; 1789 rctx->total_todo = req->nbytes; 1790 rctx->src_sent = 0; 1791 rctx->total_sent = 0; 1792 rctx->total_received = 0; 1793 rctx->ctx = ctx; 1794 1795 /* Initialize current position in src and dst scatterlists */ 1796 rctx->src_sg = req->src; 1797 rctx->src_nents = 0; 1798 rctx->src_skip = 0; 1799 rctx->dst_sg = req->dst; 1800 rctx->dst_nents = 0; 1801 rctx->dst_skip = 0; 1802 1803 if (ctx->cipher.mode == CIPHER_MODE_CBC || 1804 ctx->cipher.mode == CIPHER_MODE_CTR || 1805 ctx->cipher.mode == CIPHER_MODE_OFB || 1806 ctx->cipher.mode == CIPHER_MODE_XTS || 1807 ctx->cipher.mode == CIPHER_MODE_GCM || 1808 ctx->cipher.mode == CIPHER_MODE_CCM) { 1809 rctx->iv_ctr_len = 1810 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); 1811 memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len); 1812 } else { 1813 rctx->iv_ctr_len = 0; 1814 } 1815 1816 /* Choose a SPU to process this request */ 1817 rctx->chan_idx = select_channel(); 1818 err = handle_ablkcipher_req(rctx); 1819 if (err != -EINPROGRESS) 1820 /* synchronous result */ 1821 spu_chunk_cleanup(rctx); 1822 1823 return err; 1824 } 1825 1826 static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1827 unsigned int keylen) 1828 { 1829 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1830 u32 tmp[DES_EXPKEY_WORDS]; 1831 1832 if (keylen == DES_KEY_SIZE) { 1833 if (des_ekey(tmp, key) == 0) { 1834 if (crypto_ablkcipher_get_flags(cipher) & 1835 CRYPTO_TFM_REQ_WEAK_KEY) { 1836 u32 flags = CRYPTO_TFM_RES_WEAK_KEY; 1837 1838 crypto_ablkcipher_set_flags(cipher, flags); 1839 return -EINVAL; 1840 } 1841 } 1842 1843 ctx->cipher_type = CIPHER_TYPE_DES; 1844 } else { 1845 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1846 return -EINVAL; 1847 } 1848 return 0; 1849 } 1850 1851 static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1852 unsigned int keylen) 1853 { 1854 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1855 1856 if (keylen == (DES_KEY_SIZE * 3)) { 1857 const u32 *K = (const u32 *)key; 1858 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 1859 1860 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 1861 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) { 1862 crypto_ablkcipher_set_flags(cipher, flags); 1863 return -EINVAL; 1864 } 1865 1866 ctx->cipher_type = CIPHER_TYPE_3DES; 1867 } else { 1868 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1869 return -EINVAL; 1870 } 1871 return 0; 1872 } 1873 1874 static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1875 unsigned int keylen) 1876 { 1877 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1878 1879 if (ctx->cipher.mode == CIPHER_MODE_XTS) 1880 /* XTS includes two keys of equal length */ 1881 keylen = keylen / 2; 1882 1883 switch (keylen) { 1884 case AES_KEYSIZE_128: 1885 ctx->cipher_type = CIPHER_TYPE_AES128; 1886 break; 1887 case AES_KEYSIZE_192: 1888 ctx->cipher_type = CIPHER_TYPE_AES192; 1889 break; 1890 case AES_KEYSIZE_256: 1891 ctx->cipher_type = CIPHER_TYPE_AES256; 1892 break; 1893 default: 1894 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1895 return -EINVAL; 1896 } 1897 WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && 1898 ((ctx->max_payload % AES_BLOCK_SIZE) != 0)); 1899 return 0; 1900 } 1901 1902 static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1903 unsigned int keylen) 1904 { 1905 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1906 int i; 1907 1908 ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE; 1909 1910 ctx->enckey[0] = 0x00; /* 0x00 */ 1911 ctx->enckey[1] = 0x00; /* i */ 1912 ctx->enckey[2] = 0x00; /* 0x00 */ 1913 ctx->enckey[3] = 0x00; /* j */ 1914 for (i = 0; i < ARC4_MAX_KEY_SIZE; i++) 1915 ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen]; 1916 1917 ctx->cipher_type = CIPHER_TYPE_INIT; 1918 1919 return 0; 1920 } 1921 1922 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1923 unsigned int keylen) 1924 { 1925 struct spu_hw *spu = &iproc_priv.spu; 1926 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1927 struct spu_cipher_parms cipher_parms; 1928 u32 alloc_len = 0; 1929 int err; 1930 1931 flow_log("ablkcipher_setkey() keylen: %d\n", keylen); 1932 flow_dump(" key: ", key, keylen); 1933 1934 switch (ctx->cipher.alg) { 1935 case CIPHER_ALG_DES: 1936 err = des_setkey(cipher, key, keylen); 1937 break; 1938 case CIPHER_ALG_3DES: 1939 err = threedes_setkey(cipher, key, keylen); 1940 break; 1941 case CIPHER_ALG_AES: 1942 err = aes_setkey(cipher, key, keylen); 1943 break; 1944 case CIPHER_ALG_RC4: 1945 err = rc4_setkey(cipher, key, keylen); 1946 break; 1947 default: 1948 pr_err("%s() Error: unknown cipher alg\n", __func__); 1949 err = -EINVAL; 1950 } 1951 if (err) 1952 return err; 1953 1954 /* RC4 already populated ctx->enkey */ 1955 if (ctx->cipher.alg != CIPHER_ALG_RC4) { 1956 memcpy(ctx->enckey, key, keylen); 1957 ctx->enckeylen = keylen; 1958 } 1959 /* SPU needs XTS keys in the reverse order the crypto API presents */ 1960 if ((ctx->cipher.alg == CIPHER_ALG_AES) && 1961 (ctx->cipher.mode == CIPHER_MODE_XTS)) { 1962 unsigned int xts_keylen = keylen / 2; 1963 1964 memcpy(ctx->enckey, key + xts_keylen, xts_keylen); 1965 memcpy(ctx->enckey + xts_keylen, key, xts_keylen); 1966 } 1967 1968 if (spu->spu_type == SPU_TYPE_SPUM) 1969 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN; 1970 else if (spu->spu_type == SPU_TYPE_SPU2) 1971 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN; 1972 memset(ctx->bcm_spu_req_hdr, 0, alloc_len); 1973 cipher_parms.iv_buf = NULL; 1974 cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher); 1975 flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len); 1976 1977 cipher_parms.alg = ctx->cipher.alg; 1978 cipher_parms.mode = ctx->cipher.mode; 1979 cipher_parms.type = ctx->cipher_type; 1980 cipher_parms.key_buf = ctx->enckey; 1981 cipher_parms.key_len = ctx->enckeylen; 1982 1983 /* Prepend SPU request message with BCM header */ 1984 memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); 1985 ctx->spu_req_hdr_len = 1986 spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN, 1987 &cipher_parms); 1988 1989 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 1990 ctx->enckeylen, 1991 false); 1992 1993 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]); 1994 1995 return 0; 1996 } 1997 1998 static int ablkcipher_encrypt(struct ablkcipher_request *req) 1999 { 2000 flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes); 2001 2002 return ablkcipher_enqueue(req, true); 2003 } 2004 2005 static int ablkcipher_decrypt(struct ablkcipher_request *req) 2006 { 2007 flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes); 2008 return ablkcipher_enqueue(req, false); 2009 } 2010 2011 static int ahash_enqueue(struct ahash_request *req) 2012 { 2013 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2014 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2015 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2016 int err = 0; 2017 const char *alg_name; 2018 2019 flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes); 2020 2021 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2022 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2023 rctx->parent = &req->base; 2024 rctx->ctx = ctx; 2025 rctx->bd_suppress = true; 2026 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message)); 2027 2028 /* Initialize position in src scatterlist */ 2029 rctx->src_sg = req->src; 2030 rctx->src_skip = 0; 2031 rctx->src_nents = 0; 2032 rctx->dst_sg = NULL; 2033 rctx->dst_skip = 0; 2034 rctx->dst_nents = 0; 2035 2036 /* SPU2 hardware does not compute hash of zero length data */ 2037 if ((rctx->is_final == 1) && (rctx->total_todo == 0) && 2038 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) { 2039 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); 2040 flow_log("Doing %sfinal %s zero-len hash request in software\n", 2041 rctx->is_final ? "" : "non-", alg_name); 2042 err = do_shash((unsigned char *)alg_name, req->result, 2043 NULL, 0, NULL, 0, ctx->authkey, 2044 ctx->authkeylen); 2045 if (err < 0) 2046 flow_log("Hash request failed with error %d\n", err); 2047 return err; 2048 } 2049 /* Choose a SPU to process this request */ 2050 rctx->chan_idx = select_channel(); 2051 2052 err = handle_ahash_req(rctx); 2053 if (err != -EINPROGRESS) 2054 /* synchronous result */ 2055 spu_chunk_cleanup(rctx); 2056 2057 if (err == -EAGAIN) 2058 /* 2059 * we saved data in hash carry, but tell crypto API 2060 * we successfully completed request. 2061 */ 2062 err = 0; 2063 2064 return err; 2065 } 2066 2067 static int __ahash_init(struct ahash_request *req) 2068 { 2069 struct spu_hw *spu = &iproc_priv.spu; 2070 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2071 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2072 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2073 2074 flow_log("%s()\n", __func__); 2075 2076 /* Initialize the context */ 2077 rctx->hash_carry_len = 0; 2078 rctx->is_final = 0; 2079 2080 rctx->total_todo = 0; 2081 rctx->src_sent = 0; 2082 rctx->total_sent = 0; 2083 rctx->total_received = 0; 2084 2085 ctx->digestsize = crypto_ahash_digestsize(tfm); 2086 /* If we add a hash whose digest is larger, catch it here. */ 2087 WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE); 2088 2089 rctx->is_sw_hmac = false; 2090 2091 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0, 2092 true); 2093 2094 return 0; 2095 } 2096 2097 /** 2098 * spu_no_incr_hash() - Determine whether incremental hashing is supported. 2099 * @ctx: Crypto session context 2100 * 2101 * SPU-2 does not support incremental hashing (we'll have to revisit and 2102 * condition based on chip revision or device tree entry if future versions do 2103 * support incremental hash) 2104 * 2105 * SPU-M also doesn't support incremental hashing of AES-XCBC 2106 * 2107 * Return: true if incremental hashing is not supported 2108 * false otherwise 2109 */ 2110 bool spu_no_incr_hash(struct iproc_ctx_s *ctx) 2111 { 2112 struct spu_hw *spu = &iproc_priv.spu; 2113 2114 if (spu->spu_type == SPU_TYPE_SPU2) 2115 return true; 2116 2117 if ((ctx->auth.alg == HASH_ALG_AES) && 2118 (ctx->auth.mode == HASH_MODE_XCBC)) 2119 return true; 2120 2121 /* Otherwise, incremental hashing is supported */ 2122 return false; 2123 } 2124 2125 static int ahash_init(struct ahash_request *req) 2126 { 2127 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2128 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2129 const char *alg_name; 2130 struct crypto_shash *hash; 2131 int ret; 2132 gfp_t gfp; 2133 2134 if (spu_no_incr_hash(ctx)) { 2135 /* 2136 * If we get an incremental hashing request and it's not 2137 * supported by the hardware, we need to handle it in software 2138 * by calling synchronous hash functions. 2139 */ 2140 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); 2141 hash = crypto_alloc_shash(alg_name, 0, 0); 2142 if (IS_ERR(hash)) { 2143 ret = PTR_ERR(hash); 2144 goto err; 2145 } 2146 2147 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2148 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2149 ctx->shash = kmalloc(sizeof(*ctx->shash) + 2150 crypto_shash_descsize(hash), gfp); 2151 if (!ctx->shash) { 2152 ret = -ENOMEM; 2153 goto err_hash; 2154 } 2155 ctx->shash->tfm = hash; 2156 ctx->shash->flags = 0; 2157 2158 /* Set the key using data we already have from setkey */ 2159 if (ctx->authkeylen > 0) { 2160 ret = crypto_shash_setkey(hash, ctx->authkey, 2161 ctx->authkeylen); 2162 if (ret) 2163 goto err_shash; 2164 } 2165 2166 /* Initialize hash w/ this key and other params */ 2167 ret = crypto_shash_init(ctx->shash); 2168 if (ret) 2169 goto err_shash; 2170 } else { 2171 /* Otherwise call the internal function which uses SPU hw */ 2172 ret = __ahash_init(req); 2173 } 2174 2175 return ret; 2176 2177 err_shash: 2178 kfree(ctx->shash); 2179 err_hash: 2180 crypto_free_shash(hash); 2181 err: 2182 return ret; 2183 } 2184 2185 static int __ahash_update(struct ahash_request *req) 2186 { 2187 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2188 2189 flow_log("ahash_update() nbytes:%u\n", req->nbytes); 2190 2191 if (!req->nbytes) 2192 return 0; 2193 rctx->total_todo += req->nbytes; 2194 rctx->src_sent = 0; 2195 2196 return ahash_enqueue(req); 2197 } 2198 2199 static int ahash_update(struct ahash_request *req) 2200 { 2201 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2202 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2203 u8 *tmpbuf; 2204 int ret; 2205 int nents; 2206 gfp_t gfp; 2207 2208 if (spu_no_incr_hash(ctx)) { 2209 /* 2210 * If we get an incremental hashing request and it's not 2211 * supported by the hardware, we need to handle it in software 2212 * by calling synchronous hash functions. 2213 */ 2214 if (req->src) 2215 nents = sg_nents(req->src); 2216 else 2217 return -EINVAL; 2218 2219 /* Copy data from req scatterlist to tmp buffer */ 2220 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2221 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2222 tmpbuf = kmalloc(req->nbytes, gfp); 2223 if (!tmpbuf) 2224 return -ENOMEM; 2225 2226 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) != 2227 req->nbytes) { 2228 kfree(tmpbuf); 2229 return -EINVAL; 2230 } 2231 2232 /* Call synchronous update */ 2233 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes); 2234 kfree(tmpbuf); 2235 } else { 2236 /* Otherwise call the internal function which uses SPU hw */ 2237 ret = __ahash_update(req); 2238 } 2239 2240 return ret; 2241 } 2242 2243 static int __ahash_final(struct ahash_request *req) 2244 { 2245 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2246 2247 flow_log("ahash_final() nbytes:%u\n", req->nbytes); 2248 2249 rctx->is_final = 1; 2250 2251 return ahash_enqueue(req); 2252 } 2253 2254 static int ahash_final(struct ahash_request *req) 2255 { 2256 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2257 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2258 int ret; 2259 2260 if (spu_no_incr_hash(ctx)) { 2261 /* 2262 * If we get an incremental hashing request and it's not 2263 * supported by the hardware, we need to handle it in software 2264 * by calling synchronous hash functions. 2265 */ 2266 ret = crypto_shash_final(ctx->shash, req->result); 2267 2268 /* Done with hash, can deallocate it now */ 2269 crypto_free_shash(ctx->shash->tfm); 2270 kfree(ctx->shash); 2271 2272 } else { 2273 /* Otherwise call the internal function which uses SPU hw */ 2274 ret = __ahash_final(req); 2275 } 2276 2277 return ret; 2278 } 2279 2280 static int __ahash_finup(struct ahash_request *req) 2281 { 2282 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2283 2284 flow_log("ahash_finup() nbytes:%u\n", req->nbytes); 2285 2286 rctx->total_todo += req->nbytes; 2287 rctx->src_sent = 0; 2288 rctx->is_final = 1; 2289 2290 return ahash_enqueue(req); 2291 } 2292 2293 static int ahash_finup(struct ahash_request *req) 2294 { 2295 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2296 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2297 u8 *tmpbuf; 2298 int ret; 2299 int nents; 2300 gfp_t gfp; 2301 2302 if (spu_no_incr_hash(ctx)) { 2303 /* 2304 * If we get an incremental hashing request and it's not 2305 * supported by the hardware, we need to handle it in software 2306 * by calling synchronous hash functions. 2307 */ 2308 if (req->src) { 2309 nents = sg_nents(req->src); 2310 } else { 2311 ret = -EINVAL; 2312 goto ahash_finup_exit; 2313 } 2314 2315 /* Copy data from req scatterlist to tmp buffer */ 2316 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2317 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2318 tmpbuf = kmalloc(req->nbytes, gfp); 2319 if (!tmpbuf) { 2320 ret = -ENOMEM; 2321 goto ahash_finup_exit; 2322 } 2323 2324 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) != 2325 req->nbytes) { 2326 ret = -EINVAL; 2327 goto ahash_finup_free; 2328 } 2329 2330 /* Call synchronous update */ 2331 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes, 2332 req->result); 2333 } else { 2334 /* Otherwise call the internal function which uses SPU hw */ 2335 return __ahash_finup(req); 2336 } 2337 ahash_finup_free: 2338 kfree(tmpbuf); 2339 2340 ahash_finup_exit: 2341 /* Done with hash, can deallocate it now */ 2342 crypto_free_shash(ctx->shash->tfm); 2343 kfree(ctx->shash); 2344 return ret; 2345 } 2346 2347 static int ahash_digest(struct ahash_request *req) 2348 { 2349 int err = 0; 2350 2351 flow_log("ahash_digest() nbytes:%u\n", req->nbytes); 2352 2353 /* whole thing at once */ 2354 err = __ahash_init(req); 2355 if (!err) 2356 err = __ahash_finup(req); 2357 2358 return err; 2359 } 2360 2361 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, 2362 unsigned int keylen) 2363 { 2364 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash); 2365 2366 flow_log("%s() ahash:%p key:%p keylen:%u\n", 2367 __func__, ahash, key, keylen); 2368 flow_dump(" key: ", key, keylen); 2369 2370 if (ctx->auth.alg == HASH_ALG_AES) { 2371 switch (keylen) { 2372 case AES_KEYSIZE_128: 2373 ctx->cipher_type = CIPHER_TYPE_AES128; 2374 break; 2375 case AES_KEYSIZE_192: 2376 ctx->cipher_type = CIPHER_TYPE_AES192; 2377 break; 2378 case AES_KEYSIZE_256: 2379 ctx->cipher_type = CIPHER_TYPE_AES256; 2380 break; 2381 default: 2382 pr_err("%s() Error: Invalid key length\n", __func__); 2383 return -EINVAL; 2384 } 2385 } else { 2386 pr_err("%s() Error: unknown hash alg\n", __func__); 2387 return -EINVAL; 2388 } 2389 memcpy(ctx->authkey, key, keylen); 2390 ctx->authkeylen = keylen; 2391 2392 return 0; 2393 } 2394 2395 static int ahash_export(struct ahash_request *req, void *out) 2396 { 2397 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2398 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out; 2399 2400 spu_exp->total_todo = rctx->total_todo; 2401 spu_exp->total_sent = rctx->total_sent; 2402 spu_exp->is_sw_hmac = rctx->is_sw_hmac; 2403 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry)); 2404 spu_exp->hash_carry_len = rctx->hash_carry_len; 2405 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash)); 2406 2407 return 0; 2408 } 2409 2410 static int ahash_import(struct ahash_request *req, const void *in) 2411 { 2412 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2413 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in; 2414 2415 rctx->total_todo = spu_exp->total_todo; 2416 rctx->total_sent = spu_exp->total_sent; 2417 rctx->is_sw_hmac = spu_exp->is_sw_hmac; 2418 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry)); 2419 rctx->hash_carry_len = spu_exp->hash_carry_len; 2420 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash)); 2421 2422 return 0; 2423 } 2424 2425 static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, 2426 unsigned int keylen) 2427 { 2428 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash); 2429 unsigned int blocksize = 2430 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); 2431 unsigned int digestsize = crypto_ahash_digestsize(ahash); 2432 unsigned int index; 2433 int rc; 2434 2435 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n", 2436 __func__, ahash, key, keylen, blocksize, digestsize); 2437 flow_dump(" key: ", key, keylen); 2438 2439 if (keylen > blocksize) { 2440 switch (ctx->auth.alg) { 2441 case HASH_ALG_MD5: 2442 rc = do_shash("md5", ctx->authkey, key, keylen, NULL, 2443 0, NULL, 0); 2444 break; 2445 case HASH_ALG_SHA1: 2446 rc = do_shash("sha1", ctx->authkey, key, keylen, NULL, 2447 0, NULL, 0); 2448 break; 2449 case HASH_ALG_SHA224: 2450 rc = do_shash("sha224", ctx->authkey, key, keylen, NULL, 2451 0, NULL, 0); 2452 break; 2453 case HASH_ALG_SHA256: 2454 rc = do_shash("sha256", ctx->authkey, key, keylen, NULL, 2455 0, NULL, 0); 2456 break; 2457 case HASH_ALG_SHA384: 2458 rc = do_shash("sha384", ctx->authkey, key, keylen, NULL, 2459 0, NULL, 0); 2460 break; 2461 case HASH_ALG_SHA512: 2462 rc = do_shash("sha512", ctx->authkey, key, keylen, NULL, 2463 0, NULL, 0); 2464 break; 2465 case HASH_ALG_SHA3_224: 2466 rc = do_shash("sha3-224", ctx->authkey, key, keylen, 2467 NULL, 0, NULL, 0); 2468 break; 2469 case HASH_ALG_SHA3_256: 2470 rc = do_shash("sha3-256", ctx->authkey, key, keylen, 2471 NULL, 0, NULL, 0); 2472 break; 2473 case HASH_ALG_SHA3_384: 2474 rc = do_shash("sha3-384", ctx->authkey, key, keylen, 2475 NULL, 0, NULL, 0); 2476 break; 2477 case HASH_ALG_SHA3_512: 2478 rc = do_shash("sha3-512", ctx->authkey, key, keylen, 2479 NULL, 0, NULL, 0); 2480 break; 2481 default: 2482 pr_err("%s() Error: unknown hash alg\n", __func__); 2483 return -EINVAL; 2484 } 2485 if (rc < 0) { 2486 pr_err("%s() Error %d computing shash for %s\n", 2487 __func__, rc, hash_alg_name[ctx->auth.alg]); 2488 return rc; 2489 } 2490 ctx->authkeylen = digestsize; 2491 2492 flow_log(" keylen > digestsize... hashed\n"); 2493 flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen); 2494 } else { 2495 memcpy(ctx->authkey, key, keylen); 2496 ctx->authkeylen = keylen; 2497 } 2498 2499 /* 2500 * Full HMAC operation in SPUM is not verified, 2501 * So keeping the generation of IPAD, OPAD and 2502 * outer hashing in software. 2503 */ 2504 if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) { 2505 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen); 2506 memset(ctx->ipad + ctx->authkeylen, 0, 2507 blocksize - ctx->authkeylen); 2508 ctx->authkeylen = 0; 2509 memcpy(ctx->opad, ctx->ipad, blocksize); 2510 2511 for (index = 0; index < blocksize; index++) { 2512 ctx->ipad[index] ^= HMAC_IPAD_VALUE; 2513 ctx->opad[index] ^= HMAC_OPAD_VALUE; 2514 } 2515 2516 flow_dump(" ipad: ", ctx->ipad, blocksize); 2517 flow_dump(" opad: ", ctx->opad, blocksize); 2518 } 2519 ctx->digestsize = digestsize; 2520 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]); 2521 2522 return 0; 2523 } 2524 2525 static int ahash_hmac_init(struct ahash_request *req) 2526 { 2527 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2528 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2529 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2530 unsigned int blocksize = 2531 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2532 2533 flow_log("ahash_hmac_init()\n"); 2534 2535 /* init the context as a hash */ 2536 ahash_init(req); 2537 2538 if (!spu_no_incr_hash(ctx)) { 2539 /* SPU-M can do incr hashing but needs sw for outer HMAC */ 2540 rctx->is_sw_hmac = true; 2541 ctx->auth.mode = HASH_MODE_HASH; 2542 /* start with a prepended ipad */ 2543 memcpy(rctx->hash_carry, ctx->ipad, blocksize); 2544 rctx->hash_carry_len = blocksize; 2545 rctx->total_todo += blocksize; 2546 } 2547 2548 return 0; 2549 } 2550 2551 static int ahash_hmac_update(struct ahash_request *req) 2552 { 2553 flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes); 2554 2555 if (!req->nbytes) 2556 return 0; 2557 2558 return ahash_update(req); 2559 } 2560 2561 static int ahash_hmac_final(struct ahash_request *req) 2562 { 2563 flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes); 2564 2565 return ahash_final(req); 2566 } 2567 2568 static int ahash_hmac_finup(struct ahash_request *req) 2569 { 2570 flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes); 2571 2572 return ahash_finup(req); 2573 } 2574 2575 static int ahash_hmac_digest(struct ahash_request *req) 2576 { 2577 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2578 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2579 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2580 unsigned int blocksize = 2581 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2582 2583 flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes); 2584 2585 /* Perform initialization and then call finup */ 2586 __ahash_init(req); 2587 2588 if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) { 2589 /* 2590 * SPU2 supports full HMAC implementation in the 2591 * hardware, need not to generate IPAD, OPAD and 2592 * outer hash in software. 2593 * Only for hash key len > hash block size, SPU2 2594 * expects to perform hashing on the key, shorten 2595 * it to digest size and feed it as hash key. 2596 */ 2597 rctx->is_sw_hmac = false; 2598 ctx->auth.mode = HASH_MODE_HMAC; 2599 } else { 2600 rctx->is_sw_hmac = true; 2601 ctx->auth.mode = HASH_MODE_HASH; 2602 /* start with a prepended ipad */ 2603 memcpy(rctx->hash_carry, ctx->ipad, blocksize); 2604 rctx->hash_carry_len = blocksize; 2605 rctx->total_todo += blocksize; 2606 } 2607 2608 return __ahash_finup(req); 2609 } 2610 2611 /* aead helpers */ 2612 2613 static int aead_need_fallback(struct aead_request *req) 2614 { 2615 struct iproc_reqctx_s *rctx = aead_request_ctx(req); 2616 struct spu_hw *spu = &iproc_priv.spu; 2617 struct crypto_aead *aead = crypto_aead_reqtfm(req); 2618 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead); 2619 u32 payload_len; 2620 2621 /* 2622 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext 2623 * and AAD are both 0 bytes long. So use fallback in this case. 2624 */ 2625 if (((ctx->cipher.mode == CIPHER_MODE_GCM) || 2626 (ctx->cipher.mode == CIPHER_MODE_CCM)) && 2627 (req->assoclen == 0)) { 2628 if ((rctx->is_encrypt && (req->cryptlen == 0)) || 2629 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) { 2630 flow_log("AES GCM/CCM needs fallback for 0 len req\n"); 2631 return 1; 2632 } 2633 } 2634 2635 /* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */ 2636 if ((ctx->cipher.mode == CIPHER_MODE_CCM) && 2637 (spu->spu_type == SPU_TYPE_SPUM) && 2638 (ctx->digestsize != 8) && (ctx->digestsize != 12) && 2639 (ctx->digestsize != 16)) { 2640 flow_log("%s() AES CCM needs fallback for digest size %d\n", 2641 __func__, ctx->digestsize); 2642 return 1; 2643 } 2644 2645 /* 2646 * SPU-M on NSP has an issue where AES-CCM hash is not correct 2647 * when AAD size is 0 2648 */ 2649 if ((ctx->cipher.mode == CIPHER_MODE_CCM) && 2650 (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) && 2651 (req->assoclen == 0)) { 2652 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n", 2653 __func__); 2654 return 1; 2655 } 2656 2657 payload_len = req->cryptlen; 2658 if (spu->spu_type == SPU_TYPE_SPUM) 2659 payload_len += req->assoclen; 2660 2661 flow_log("%s() payload len: %u\n", __func__, payload_len); 2662 2663 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) 2664 return 0; 2665 else 2666 return payload_len > ctx->max_payload; 2667 } 2668 2669 static void aead_complete(struct crypto_async_request *areq, int err) 2670 { 2671 struct aead_request *req = 2672 container_of(areq, struct aead_request, base); 2673 struct iproc_reqctx_s *rctx = aead_request_ctx(req); 2674 struct crypto_aead *aead = crypto_aead_reqtfm(req); 2675 2676 flow_log("%s() err:%d\n", __func__, err); 2677 2678 areq->tfm = crypto_aead_tfm(aead); 2679 2680 areq->complete = rctx->old_complete; 2681 areq->data = rctx->old_data; 2682 2683 areq->complete(areq, err); 2684 } 2685 2686 static int aead_do_fallback(struct aead_request *req, bool is_encrypt) 2687 { 2688 struct crypto_aead *aead = crypto_aead_reqtfm(req); 2689 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 2690 struct iproc_reqctx_s *rctx = aead_request_ctx(req); 2691 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); 2692 int err; 2693 u32 req_flags; 2694 2695 flow_log("%s() enc:%u\n", __func__, is_encrypt); 2696 2697 if (ctx->fallback_cipher) { 2698 /* Store the cipher tfm and then use the fallback tfm */ 2699 rctx->old_tfm = tfm; 2700 aead_request_set_tfm(req, ctx->fallback_cipher); 2701 /* 2702 * Save the callback and chain ourselves in, so we can restore 2703 * the tfm 2704 */ 2705 rctx->old_complete = req->base.complete; 2706 rctx->old_data = req->base.data; 2707 req_flags = aead_request_flags(req); 2708 aead_request_set_callback(req, req_flags, aead_complete, req); 2709 err = is_encrypt ? crypto_aead_encrypt(req) : 2710 crypto_aead_decrypt(req); 2711 2712 if (err == 0) { 2713 /* 2714 * fallback was synchronous (did not return 2715 * -EINPROGRESS). So restore request state here. 2716 */ 2717 aead_request_set_callback(req, req_flags, 2718 rctx->old_complete, req); 2719 req->base.data = rctx->old_data; 2720 aead_request_set_tfm(req, aead); 2721 flow_log("%s() fallback completed successfully\n\n", 2722 __func__); 2723 } 2724 } else { 2725 err = -EINVAL; 2726 } 2727 2728 return err; 2729 } 2730 2731 static int aead_enqueue(struct aead_request *req, bool is_encrypt) 2732 { 2733 struct iproc_reqctx_s *rctx = aead_request_ctx(req); 2734 struct crypto_aead *aead = crypto_aead_reqtfm(req); 2735 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead); 2736 int err; 2737 2738 flow_log("%s() enc:%u\n", __func__, is_encrypt); 2739 2740 if (req->assoclen > MAX_ASSOC_SIZE) { 2741 pr_err 2742 ("%s() Error: associated data too long. (%u > %u bytes)\n", 2743 __func__, req->assoclen, MAX_ASSOC_SIZE); 2744 return -EINVAL; 2745 } 2746 2747 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2748 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2749 rctx->parent = &req->base; 2750 rctx->is_encrypt = is_encrypt; 2751 rctx->bd_suppress = false; 2752 rctx->total_todo = req->cryptlen; 2753 rctx->src_sent = 0; 2754 rctx->total_sent = 0; 2755 rctx->total_received = 0; 2756 rctx->is_sw_hmac = false; 2757 rctx->ctx = ctx; 2758 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message)); 2759 2760 /* assoc data is at start of src sg */ 2761 rctx->assoc = req->src; 2762 2763 /* 2764 * Init current position in src scatterlist to be after assoc data. 2765 * src_skip set to buffer offset where data begins. (Assoc data could 2766 * end in the middle of a buffer.) 2767 */ 2768 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg, 2769 &rctx->src_skip) < 0) { 2770 pr_err("%s() Error: Unable to find start of src data\n", 2771 __func__); 2772 return -EINVAL; 2773 } 2774 2775 rctx->src_nents = 0; 2776 rctx->dst_nents = 0; 2777 if (req->dst == req->src) { 2778 rctx->dst_sg = rctx->src_sg; 2779 rctx->dst_skip = rctx->src_skip; 2780 } else { 2781 /* 2782 * Expect req->dst to have room for assoc data followed by 2783 * output data and ICV, if encrypt. So initialize dst_sg 2784 * to point beyond assoc len offset. 2785 */ 2786 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg, 2787 &rctx->dst_skip) < 0) { 2788 pr_err("%s() Error: Unable to find start of dst data\n", 2789 __func__); 2790 return -EINVAL; 2791 } 2792 } 2793 2794 if (ctx->cipher.mode == CIPHER_MODE_CBC || 2795 ctx->cipher.mode == CIPHER_MODE_CTR || 2796 ctx->cipher.mode == CIPHER_MODE_OFB || 2797 ctx->cipher.mode == CIPHER_MODE_XTS || 2798 ctx->cipher.mode == CIPHER_MODE_GCM) { 2799 rctx->iv_ctr_len = 2800 ctx->salt_len + 2801 crypto_aead_ivsize(crypto_aead_reqtfm(req)); 2802 } else if (ctx->cipher.mode == CIPHER_MODE_CCM) { 2803 rctx->iv_ctr_len = CCM_AES_IV_SIZE; 2804 } else { 2805 rctx->iv_ctr_len = 0; 2806 } 2807 2808 rctx->hash_carry_len = 0; 2809 2810 flow_log(" src sg: %p\n", req->src); 2811 flow_log(" rctx->src_sg: %p, src_skip %u\n", 2812 rctx->src_sg, rctx->src_skip); 2813 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen); 2814 flow_log(" dst sg: %p\n", req->dst); 2815 flow_log(" rctx->dst_sg: %p, dst_skip %u\n", 2816 rctx->dst_sg, rctx->dst_skip); 2817 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len); 2818 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len); 2819 flow_log(" authkeylen:%u\n", ctx->authkeylen); 2820 flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no"); 2821 2822 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) 2823 flow_log(" max_payload infinite"); 2824 else 2825 flow_log(" max_payload: %u\n", ctx->max_payload); 2826 2827 if (unlikely(aead_need_fallback(req))) 2828 return aead_do_fallback(req, is_encrypt); 2829 2830 /* 2831 * Do memory allocations for request after fallback check, because if we 2832 * do fallback, we won't call finish_req() to dealloc. 2833 */ 2834 if (rctx->iv_ctr_len) { 2835 if (ctx->salt_len) 2836 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset, 2837 ctx->salt, ctx->salt_len); 2838 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len, 2839 req->iv, 2840 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset); 2841 } 2842 2843 rctx->chan_idx = select_channel(); 2844 err = handle_aead_req(rctx); 2845 if (err != -EINPROGRESS) 2846 /* synchronous result */ 2847 spu_chunk_cleanup(rctx); 2848 2849 return err; 2850 } 2851 2852 static int aead_authenc_setkey(struct crypto_aead *cipher, 2853 const u8 *key, unsigned int keylen) 2854 { 2855 struct spu_hw *spu = &iproc_priv.spu; 2856 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 2857 struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 2858 struct rtattr *rta = (void *)key; 2859 struct crypto_authenc_key_param *param; 2860 const u8 *origkey = key; 2861 const unsigned int origkeylen = keylen; 2862 2863 int ret = 0; 2864 2865 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, 2866 keylen); 2867 flow_dump(" key: ", key, keylen); 2868 2869 if (!RTA_OK(rta, keylen)) 2870 goto badkey; 2871 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 2872 goto badkey; 2873 if (RTA_PAYLOAD(rta) < sizeof(*param)) 2874 goto badkey; 2875 2876 param = RTA_DATA(rta); 2877 ctx->enckeylen = be32_to_cpu(param->enckeylen); 2878 2879 key += RTA_ALIGN(rta->rta_len); 2880 keylen -= RTA_ALIGN(rta->rta_len); 2881 2882 if (keylen < ctx->enckeylen) 2883 goto badkey; 2884 if (ctx->enckeylen > MAX_KEY_SIZE) 2885 goto badkey; 2886 2887 ctx->authkeylen = keylen - ctx->enckeylen; 2888 2889 if (ctx->authkeylen > MAX_KEY_SIZE) 2890 goto badkey; 2891 2892 memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); 2893 /* May end up padding auth key. So make sure it's zeroed. */ 2894 memset(ctx->authkey, 0, sizeof(ctx->authkey)); 2895 memcpy(ctx->authkey, key, ctx->authkeylen); 2896 2897 switch (ctx->alg->cipher_info.alg) { 2898 case CIPHER_ALG_DES: 2899 if (ctx->enckeylen == DES_KEY_SIZE) { 2900 u32 tmp[DES_EXPKEY_WORDS]; 2901 u32 flags = CRYPTO_TFM_RES_WEAK_KEY; 2902 2903 if (des_ekey(tmp, key) == 0) { 2904 if (crypto_aead_get_flags(cipher) & 2905 CRYPTO_TFM_REQ_WEAK_KEY) { 2906 crypto_aead_set_flags(cipher, flags); 2907 return -EINVAL; 2908 } 2909 } 2910 2911 ctx->cipher_type = CIPHER_TYPE_DES; 2912 } else { 2913 goto badkey; 2914 } 2915 break; 2916 case CIPHER_ALG_3DES: 2917 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { 2918 const u32 *K = (const u32 *)key; 2919 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 2920 2921 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 2922 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) { 2923 crypto_aead_set_flags(cipher, flags); 2924 return -EINVAL; 2925 } 2926 2927 ctx->cipher_type = CIPHER_TYPE_3DES; 2928 } else { 2929 crypto_aead_set_flags(cipher, 2930 CRYPTO_TFM_RES_BAD_KEY_LEN); 2931 return -EINVAL; 2932 } 2933 break; 2934 case CIPHER_ALG_AES: 2935 switch (ctx->enckeylen) { 2936 case AES_KEYSIZE_128: 2937 ctx->cipher_type = CIPHER_TYPE_AES128; 2938 break; 2939 case AES_KEYSIZE_192: 2940 ctx->cipher_type = CIPHER_TYPE_AES192; 2941 break; 2942 case AES_KEYSIZE_256: 2943 ctx->cipher_type = CIPHER_TYPE_AES256; 2944 break; 2945 default: 2946 goto badkey; 2947 } 2948 break; 2949 case CIPHER_ALG_RC4: 2950 ctx->cipher_type = CIPHER_TYPE_INIT; 2951 break; 2952 default: 2953 pr_err("%s() Error: Unknown cipher alg\n", __func__); 2954 return -EINVAL; 2955 } 2956 2957 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, 2958 ctx->authkeylen); 2959 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); 2960 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen); 2961 2962 /* setkey the fallback just in case we needto use it */ 2963 if (ctx->fallback_cipher) { 2964 flow_log(" running fallback setkey()\n"); 2965 2966 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 2967 ctx->fallback_cipher->base.crt_flags |= 2968 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 2969 ret = 2970 crypto_aead_setkey(ctx->fallback_cipher, origkey, 2971 origkeylen); 2972 if (ret) { 2973 flow_log(" fallback setkey() returned:%d\n", ret); 2974 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 2975 tfm->crt_flags |= 2976 (ctx->fallback_cipher->base.crt_flags & 2977 CRYPTO_TFM_RES_MASK); 2978 } 2979 } 2980 2981 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 2982 ctx->enckeylen, 2983 false); 2984 2985 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]); 2986 2987 return ret; 2988 2989 badkey: 2990 ctx->enckeylen = 0; 2991 ctx->authkeylen = 0; 2992 ctx->digestsize = 0; 2993 2994 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 2995 return -EINVAL; 2996 } 2997 2998 static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, 2999 const u8 *key, unsigned int keylen) 3000 { 3001 struct spu_hw *spu = &iproc_priv.spu; 3002 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 3003 struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 3004 3005 int ret = 0; 3006 3007 flow_log("%s() keylen:%u\n", __func__, keylen); 3008 flow_dump(" key: ", key, keylen); 3009 3010 if (!ctx->is_esp) 3011 ctx->digestsize = keylen; 3012 3013 ctx->enckeylen = keylen; 3014 ctx->authkeylen = 0; 3015 memcpy(ctx->enckey, key, ctx->enckeylen); 3016 3017 switch (ctx->enckeylen) { 3018 case AES_KEYSIZE_128: 3019 ctx->cipher_type = CIPHER_TYPE_AES128; 3020 break; 3021 case AES_KEYSIZE_192: 3022 ctx->cipher_type = CIPHER_TYPE_AES192; 3023 break; 3024 case AES_KEYSIZE_256: 3025 ctx->cipher_type = CIPHER_TYPE_AES256; 3026 break; 3027 default: 3028 goto badkey; 3029 } 3030 3031 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, 3032 ctx->authkeylen); 3033 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); 3034 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen); 3035 3036 /* setkey the fallback just in case we need to use it */ 3037 if (ctx->fallback_cipher) { 3038 flow_log(" running fallback setkey()\n"); 3039 3040 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 3041 ctx->fallback_cipher->base.crt_flags |= 3042 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 3043 ret = crypto_aead_setkey(ctx->fallback_cipher, key, 3044 keylen + ctx->salt_len); 3045 if (ret) { 3046 flow_log(" fallback setkey() returned:%d\n", ret); 3047 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 3048 tfm->crt_flags |= 3049 (ctx->fallback_cipher->base.crt_flags & 3050 CRYPTO_TFM_RES_MASK); 3051 } 3052 } 3053 3054 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 3055 ctx->enckeylen, 3056 false); 3057 3058 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]); 3059 3060 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, 3061 ctx->authkeylen); 3062 3063 return ret; 3064 3065 badkey: 3066 ctx->enckeylen = 0; 3067 ctx->authkeylen = 0; 3068 ctx->digestsize = 0; 3069 3070 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 3071 return -EINVAL; 3072 } 3073 3074 /** 3075 * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES. 3076 * @cipher: AEAD structure 3077 * @key: Key followed by 4 bytes of salt 3078 * @keylen: Length of key plus salt, in bytes 3079 * 3080 * Extracts salt from key and stores it to be prepended to IV on each request. 3081 * Digest is always 16 bytes 3082 * 3083 * Return: Value from generic gcm setkey. 3084 */ 3085 static int aead_gcm_esp_setkey(struct crypto_aead *cipher, 3086 const u8 *key, unsigned int keylen) 3087 { 3088 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 3089 3090 flow_log("%s\n", __func__); 3091 ctx->salt_len = GCM_ESP_SALT_SIZE; 3092 ctx->salt_offset = GCM_ESP_SALT_OFFSET; 3093 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); 3094 keylen -= GCM_ESP_SALT_SIZE; 3095 ctx->digestsize = GCM_ESP_DIGESTSIZE; 3096 ctx->is_esp = true; 3097 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE); 3098 3099 return aead_gcm_ccm_setkey(cipher, key, keylen); 3100 } 3101 3102 /** 3103 * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC. 3104 * cipher: AEAD structure 3105 * key: Key followed by 4 bytes of salt 3106 * keylen: Length of key plus salt, in bytes 3107 * 3108 * Extracts salt from key and stores it to be prepended to IV on each request. 3109 * Digest is always 16 bytes 3110 * 3111 * Return: Value from generic gcm setkey. 3112 */ 3113 static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher, 3114 const u8 *key, unsigned int keylen) 3115 { 3116 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 3117 3118 flow_log("%s\n", __func__); 3119 ctx->salt_len = GCM_ESP_SALT_SIZE; 3120 ctx->salt_offset = GCM_ESP_SALT_OFFSET; 3121 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); 3122 keylen -= GCM_ESP_SALT_SIZE; 3123 ctx->digestsize = GCM_ESP_DIGESTSIZE; 3124 ctx->is_esp = true; 3125 ctx->is_rfc4543 = true; 3126 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE); 3127 3128 return aead_gcm_ccm_setkey(cipher, key, keylen); 3129 } 3130 3131 /** 3132 * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES. 3133 * @cipher: AEAD structure 3134 * @key: Key followed by 4 bytes of salt 3135 * @keylen: Length of key plus salt, in bytes 3136 * 3137 * Extracts salt from key and stores it to be prepended to IV on each request. 3138 * Digest is always 16 bytes 3139 * 3140 * Return: Value from generic ccm setkey. 3141 */ 3142 static int aead_ccm_esp_setkey(struct crypto_aead *cipher, 3143 const u8 *key, unsigned int keylen) 3144 { 3145 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 3146 3147 flow_log("%s\n", __func__); 3148 ctx->salt_len = CCM_ESP_SALT_SIZE; 3149 ctx->salt_offset = CCM_ESP_SALT_OFFSET; 3150 memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE); 3151 keylen -= CCM_ESP_SALT_SIZE; 3152 ctx->is_esp = true; 3153 flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE); 3154 3155 return aead_gcm_ccm_setkey(cipher, key, keylen); 3156 } 3157 3158 static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize) 3159 { 3160 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 3161 int ret = 0; 3162 3163 flow_log("%s() authkeylen:%u authsize:%u\n", 3164 __func__, ctx->authkeylen, authsize); 3165 3166 ctx->digestsize = authsize; 3167 3168 /* setkey the fallback just in case we needto use it */ 3169 if (ctx->fallback_cipher) { 3170 flow_log(" running fallback setauth()\n"); 3171 3172 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize); 3173 if (ret) 3174 flow_log(" fallback setauth() returned:%d\n", ret); 3175 } 3176 3177 return ret; 3178 } 3179 3180 static int aead_encrypt(struct aead_request *req) 3181 { 3182 flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen, 3183 req->cryptlen); 3184 dump_sg(req->src, 0, req->cryptlen + req->assoclen); 3185 flow_log(" assoc_len:%u\n", req->assoclen); 3186 3187 return aead_enqueue(req, true); 3188 } 3189 3190 static int aead_decrypt(struct aead_request *req) 3191 { 3192 flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen); 3193 dump_sg(req->src, 0, req->cryptlen + req->assoclen); 3194 flow_log(" assoc_len:%u\n", req->assoclen); 3195 3196 return aead_enqueue(req, false); 3197 } 3198 3199 /* ==================== Supported Cipher Algorithms ==================== */ 3200 3201 static struct iproc_alg_s driver_algs[] = { 3202 { 3203 .type = CRYPTO_ALG_TYPE_AEAD, 3204 .alg.aead = { 3205 .base = { 3206 .cra_name = "gcm(aes)", 3207 .cra_driver_name = "gcm-aes-iproc", 3208 .cra_blocksize = AES_BLOCK_SIZE, 3209 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3210 }, 3211 .setkey = aead_gcm_ccm_setkey, 3212 .ivsize = GCM_AES_IV_SIZE, 3213 .maxauthsize = AES_BLOCK_SIZE, 3214 }, 3215 .cipher_info = { 3216 .alg = CIPHER_ALG_AES, 3217 .mode = CIPHER_MODE_GCM, 3218 }, 3219 .auth_info = { 3220 .alg = HASH_ALG_AES, 3221 .mode = HASH_MODE_GCM, 3222 }, 3223 .auth_first = 0, 3224 }, 3225 { 3226 .type = CRYPTO_ALG_TYPE_AEAD, 3227 .alg.aead = { 3228 .base = { 3229 .cra_name = "ccm(aes)", 3230 .cra_driver_name = "ccm-aes-iproc", 3231 .cra_blocksize = AES_BLOCK_SIZE, 3232 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3233 }, 3234 .setkey = aead_gcm_ccm_setkey, 3235 .ivsize = CCM_AES_IV_SIZE, 3236 .maxauthsize = AES_BLOCK_SIZE, 3237 }, 3238 .cipher_info = { 3239 .alg = CIPHER_ALG_AES, 3240 .mode = CIPHER_MODE_CCM, 3241 }, 3242 .auth_info = { 3243 .alg = HASH_ALG_AES, 3244 .mode = HASH_MODE_CCM, 3245 }, 3246 .auth_first = 0, 3247 }, 3248 { 3249 .type = CRYPTO_ALG_TYPE_AEAD, 3250 .alg.aead = { 3251 .base = { 3252 .cra_name = "rfc4106(gcm(aes))", 3253 .cra_driver_name = "gcm-aes-esp-iproc", 3254 .cra_blocksize = AES_BLOCK_SIZE, 3255 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3256 }, 3257 .setkey = aead_gcm_esp_setkey, 3258 .ivsize = GCM_ESP_IV_SIZE, 3259 .maxauthsize = AES_BLOCK_SIZE, 3260 }, 3261 .cipher_info = { 3262 .alg = CIPHER_ALG_AES, 3263 .mode = CIPHER_MODE_GCM, 3264 }, 3265 .auth_info = { 3266 .alg = HASH_ALG_AES, 3267 .mode = HASH_MODE_GCM, 3268 }, 3269 .auth_first = 0, 3270 }, 3271 { 3272 .type = CRYPTO_ALG_TYPE_AEAD, 3273 .alg.aead = { 3274 .base = { 3275 .cra_name = "rfc4309(ccm(aes))", 3276 .cra_driver_name = "ccm-aes-esp-iproc", 3277 .cra_blocksize = AES_BLOCK_SIZE, 3278 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3279 }, 3280 .setkey = aead_ccm_esp_setkey, 3281 .ivsize = CCM_AES_IV_SIZE, 3282 .maxauthsize = AES_BLOCK_SIZE, 3283 }, 3284 .cipher_info = { 3285 .alg = CIPHER_ALG_AES, 3286 .mode = CIPHER_MODE_CCM, 3287 }, 3288 .auth_info = { 3289 .alg = HASH_ALG_AES, 3290 .mode = HASH_MODE_CCM, 3291 }, 3292 .auth_first = 0, 3293 }, 3294 { 3295 .type = CRYPTO_ALG_TYPE_AEAD, 3296 .alg.aead = { 3297 .base = { 3298 .cra_name = "rfc4543(gcm(aes))", 3299 .cra_driver_name = "gmac-aes-esp-iproc", 3300 .cra_blocksize = AES_BLOCK_SIZE, 3301 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3302 }, 3303 .setkey = rfc4543_gcm_esp_setkey, 3304 .ivsize = GCM_ESP_IV_SIZE, 3305 .maxauthsize = AES_BLOCK_SIZE, 3306 }, 3307 .cipher_info = { 3308 .alg = CIPHER_ALG_AES, 3309 .mode = CIPHER_MODE_GCM, 3310 }, 3311 .auth_info = { 3312 .alg = HASH_ALG_AES, 3313 .mode = HASH_MODE_GCM, 3314 }, 3315 .auth_first = 0, 3316 }, 3317 { 3318 .type = CRYPTO_ALG_TYPE_AEAD, 3319 .alg.aead = { 3320 .base = { 3321 .cra_name = "authenc(hmac(md5),cbc(aes))", 3322 .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc", 3323 .cra_blocksize = AES_BLOCK_SIZE, 3324 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3325 }, 3326 .setkey = aead_authenc_setkey, 3327 .ivsize = AES_BLOCK_SIZE, 3328 .maxauthsize = MD5_DIGEST_SIZE, 3329 }, 3330 .cipher_info = { 3331 .alg = CIPHER_ALG_AES, 3332 .mode = CIPHER_MODE_CBC, 3333 }, 3334 .auth_info = { 3335 .alg = HASH_ALG_MD5, 3336 .mode = HASH_MODE_HMAC, 3337 }, 3338 .auth_first = 0, 3339 }, 3340 { 3341 .type = CRYPTO_ALG_TYPE_AEAD, 3342 .alg.aead = { 3343 .base = { 3344 .cra_name = "authenc(hmac(sha1),cbc(aes))", 3345 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc", 3346 .cra_blocksize = AES_BLOCK_SIZE, 3347 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3348 }, 3349 .setkey = aead_authenc_setkey, 3350 .ivsize = AES_BLOCK_SIZE, 3351 .maxauthsize = SHA1_DIGEST_SIZE, 3352 }, 3353 .cipher_info = { 3354 .alg = CIPHER_ALG_AES, 3355 .mode = CIPHER_MODE_CBC, 3356 }, 3357 .auth_info = { 3358 .alg = HASH_ALG_SHA1, 3359 .mode = HASH_MODE_HMAC, 3360 }, 3361 .auth_first = 0, 3362 }, 3363 { 3364 .type = CRYPTO_ALG_TYPE_AEAD, 3365 .alg.aead = { 3366 .base = { 3367 .cra_name = "authenc(hmac(sha256),cbc(aes))", 3368 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc", 3369 .cra_blocksize = AES_BLOCK_SIZE, 3370 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3371 }, 3372 .setkey = aead_authenc_setkey, 3373 .ivsize = AES_BLOCK_SIZE, 3374 .maxauthsize = SHA256_DIGEST_SIZE, 3375 }, 3376 .cipher_info = { 3377 .alg = CIPHER_ALG_AES, 3378 .mode = CIPHER_MODE_CBC, 3379 }, 3380 .auth_info = { 3381 .alg = HASH_ALG_SHA256, 3382 .mode = HASH_MODE_HMAC, 3383 }, 3384 .auth_first = 0, 3385 }, 3386 { 3387 .type = CRYPTO_ALG_TYPE_AEAD, 3388 .alg.aead = { 3389 .base = { 3390 .cra_name = "authenc(hmac(md5),cbc(des))", 3391 .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc", 3392 .cra_blocksize = DES_BLOCK_SIZE, 3393 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3394 }, 3395 .setkey = aead_authenc_setkey, 3396 .ivsize = DES_BLOCK_SIZE, 3397 .maxauthsize = MD5_DIGEST_SIZE, 3398 }, 3399 .cipher_info = { 3400 .alg = CIPHER_ALG_DES, 3401 .mode = CIPHER_MODE_CBC, 3402 }, 3403 .auth_info = { 3404 .alg = HASH_ALG_MD5, 3405 .mode = HASH_MODE_HMAC, 3406 }, 3407 .auth_first = 0, 3408 }, 3409 { 3410 .type = CRYPTO_ALG_TYPE_AEAD, 3411 .alg.aead = { 3412 .base = { 3413 .cra_name = "authenc(hmac(sha1),cbc(des))", 3414 .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc", 3415 .cra_blocksize = DES_BLOCK_SIZE, 3416 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3417 }, 3418 .setkey = aead_authenc_setkey, 3419 .ivsize = DES_BLOCK_SIZE, 3420 .maxauthsize = SHA1_DIGEST_SIZE, 3421 }, 3422 .cipher_info = { 3423 .alg = CIPHER_ALG_DES, 3424 .mode = CIPHER_MODE_CBC, 3425 }, 3426 .auth_info = { 3427 .alg = HASH_ALG_SHA1, 3428 .mode = HASH_MODE_HMAC, 3429 }, 3430 .auth_first = 0, 3431 }, 3432 { 3433 .type = CRYPTO_ALG_TYPE_AEAD, 3434 .alg.aead = { 3435 .base = { 3436 .cra_name = "authenc(hmac(sha224),cbc(des))", 3437 .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc", 3438 .cra_blocksize = DES_BLOCK_SIZE, 3439 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3440 }, 3441 .setkey = aead_authenc_setkey, 3442 .ivsize = DES_BLOCK_SIZE, 3443 .maxauthsize = SHA224_DIGEST_SIZE, 3444 }, 3445 .cipher_info = { 3446 .alg = CIPHER_ALG_DES, 3447 .mode = CIPHER_MODE_CBC, 3448 }, 3449 .auth_info = { 3450 .alg = HASH_ALG_SHA224, 3451 .mode = HASH_MODE_HMAC, 3452 }, 3453 .auth_first = 0, 3454 }, 3455 { 3456 .type = CRYPTO_ALG_TYPE_AEAD, 3457 .alg.aead = { 3458 .base = { 3459 .cra_name = "authenc(hmac(sha256),cbc(des))", 3460 .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc", 3461 .cra_blocksize = DES_BLOCK_SIZE, 3462 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3463 }, 3464 .setkey = aead_authenc_setkey, 3465 .ivsize = DES_BLOCK_SIZE, 3466 .maxauthsize = SHA256_DIGEST_SIZE, 3467 }, 3468 .cipher_info = { 3469 .alg = CIPHER_ALG_DES, 3470 .mode = CIPHER_MODE_CBC, 3471 }, 3472 .auth_info = { 3473 .alg = HASH_ALG_SHA256, 3474 .mode = HASH_MODE_HMAC, 3475 }, 3476 .auth_first = 0, 3477 }, 3478 { 3479 .type = CRYPTO_ALG_TYPE_AEAD, 3480 .alg.aead = { 3481 .base = { 3482 .cra_name = "authenc(hmac(sha384),cbc(des))", 3483 .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc", 3484 .cra_blocksize = DES_BLOCK_SIZE, 3485 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3486 }, 3487 .setkey = aead_authenc_setkey, 3488 .ivsize = DES_BLOCK_SIZE, 3489 .maxauthsize = SHA384_DIGEST_SIZE, 3490 }, 3491 .cipher_info = { 3492 .alg = CIPHER_ALG_DES, 3493 .mode = CIPHER_MODE_CBC, 3494 }, 3495 .auth_info = { 3496 .alg = HASH_ALG_SHA384, 3497 .mode = HASH_MODE_HMAC, 3498 }, 3499 .auth_first = 0, 3500 }, 3501 { 3502 .type = CRYPTO_ALG_TYPE_AEAD, 3503 .alg.aead = { 3504 .base = { 3505 .cra_name = "authenc(hmac(sha512),cbc(des))", 3506 .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc", 3507 .cra_blocksize = DES_BLOCK_SIZE, 3508 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3509 }, 3510 .setkey = aead_authenc_setkey, 3511 .ivsize = DES_BLOCK_SIZE, 3512 .maxauthsize = SHA512_DIGEST_SIZE, 3513 }, 3514 .cipher_info = { 3515 .alg = CIPHER_ALG_DES, 3516 .mode = CIPHER_MODE_CBC, 3517 }, 3518 .auth_info = { 3519 .alg = HASH_ALG_SHA512, 3520 .mode = HASH_MODE_HMAC, 3521 }, 3522 .auth_first = 0, 3523 }, 3524 { 3525 .type = CRYPTO_ALG_TYPE_AEAD, 3526 .alg.aead = { 3527 .base = { 3528 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 3529 .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc", 3530 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3531 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3532 }, 3533 .setkey = aead_authenc_setkey, 3534 .ivsize = DES3_EDE_BLOCK_SIZE, 3535 .maxauthsize = MD5_DIGEST_SIZE, 3536 }, 3537 .cipher_info = { 3538 .alg = CIPHER_ALG_3DES, 3539 .mode = CIPHER_MODE_CBC, 3540 }, 3541 .auth_info = { 3542 .alg = HASH_ALG_MD5, 3543 .mode = HASH_MODE_HMAC, 3544 }, 3545 .auth_first = 0, 3546 }, 3547 { 3548 .type = CRYPTO_ALG_TYPE_AEAD, 3549 .alg.aead = { 3550 .base = { 3551 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 3552 .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc", 3553 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3554 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3555 }, 3556 .setkey = aead_authenc_setkey, 3557 .ivsize = DES3_EDE_BLOCK_SIZE, 3558 .maxauthsize = SHA1_DIGEST_SIZE, 3559 }, 3560 .cipher_info = { 3561 .alg = CIPHER_ALG_3DES, 3562 .mode = CIPHER_MODE_CBC, 3563 }, 3564 .auth_info = { 3565 .alg = HASH_ALG_SHA1, 3566 .mode = HASH_MODE_HMAC, 3567 }, 3568 .auth_first = 0, 3569 }, 3570 { 3571 .type = CRYPTO_ALG_TYPE_AEAD, 3572 .alg.aead = { 3573 .base = { 3574 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", 3575 .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc", 3576 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3577 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3578 }, 3579 .setkey = aead_authenc_setkey, 3580 .ivsize = DES3_EDE_BLOCK_SIZE, 3581 .maxauthsize = SHA224_DIGEST_SIZE, 3582 }, 3583 .cipher_info = { 3584 .alg = CIPHER_ALG_3DES, 3585 .mode = CIPHER_MODE_CBC, 3586 }, 3587 .auth_info = { 3588 .alg = HASH_ALG_SHA224, 3589 .mode = HASH_MODE_HMAC, 3590 }, 3591 .auth_first = 0, 3592 }, 3593 { 3594 .type = CRYPTO_ALG_TYPE_AEAD, 3595 .alg.aead = { 3596 .base = { 3597 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", 3598 .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc", 3599 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3600 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3601 }, 3602 .setkey = aead_authenc_setkey, 3603 .ivsize = DES3_EDE_BLOCK_SIZE, 3604 .maxauthsize = SHA256_DIGEST_SIZE, 3605 }, 3606 .cipher_info = { 3607 .alg = CIPHER_ALG_3DES, 3608 .mode = CIPHER_MODE_CBC, 3609 }, 3610 .auth_info = { 3611 .alg = HASH_ALG_SHA256, 3612 .mode = HASH_MODE_HMAC, 3613 }, 3614 .auth_first = 0, 3615 }, 3616 { 3617 .type = CRYPTO_ALG_TYPE_AEAD, 3618 .alg.aead = { 3619 .base = { 3620 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", 3621 .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc", 3622 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3623 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3624 }, 3625 .setkey = aead_authenc_setkey, 3626 .ivsize = DES3_EDE_BLOCK_SIZE, 3627 .maxauthsize = SHA384_DIGEST_SIZE, 3628 }, 3629 .cipher_info = { 3630 .alg = CIPHER_ALG_3DES, 3631 .mode = CIPHER_MODE_CBC, 3632 }, 3633 .auth_info = { 3634 .alg = HASH_ALG_SHA384, 3635 .mode = HASH_MODE_HMAC, 3636 }, 3637 .auth_first = 0, 3638 }, 3639 { 3640 .type = CRYPTO_ALG_TYPE_AEAD, 3641 .alg.aead = { 3642 .base = { 3643 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", 3644 .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc", 3645 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3646 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3647 }, 3648 .setkey = aead_authenc_setkey, 3649 .ivsize = DES3_EDE_BLOCK_SIZE, 3650 .maxauthsize = SHA512_DIGEST_SIZE, 3651 }, 3652 .cipher_info = { 3653 .alg = CIPHER_ALG_3DES, 3654 .mode = CIPHER_MODE_CBC, 3655 }, 3656 .auth_info = { 3657 .alg = HASH_ALG_SHA512, 3658 .mode = HASH_MODE_HMAC, 3659 }, 3660 .auth_first = 0, 3661 }, 3662 3663 /* ABLKCIPHER algorithms. */ 3664 { 3665 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3666 .alg.crypto = { 3667 .cra_name = "ecb(arc4)", 3668 .cra_driver_name = "ecb-arc4-iproc", 3669 .cra_blocksize = ARC4_BLOCK_SIZE, 3670 .cra_ablkcipher = { 3671 .min_keysize = ARC4_MIN_KEY_SIZE, 3672 .max_keysize = ARC4_MAX_KEY_SIZE, 3673 .ivsize = 0, 3674 } 3675 }, 3676 .cipher_info = { 3677 .alg = CIPHER_ALG_RC4, 3678 .mode = CIPHER_MODE_NONE, 3679 }, 3680 .auth_info = { 3681 .alg = HASH_ALG_NONE, 3682 .mode = HASH_MODE_NONE, 3683 }, 3684 }, 3685 { 3686 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3687 .alg.crypto = { 3688 .cra_name = "ofb(des)", 3689 .cra_driver_name = "ofb-des-iproc", 3690 .cra_blocksize = DES_BLOCK_SIZE, 3691 .cra_ablkcipher = { 3692 .min_keysize = DES_KEY_SIZE, 3693 .max_keysize = DES_KEY_SIZE, 3694 .ivsize = DES_BLOCK_SIZE, 3695 } 3696 }, 3697 .cipher_info = { 3698 .alg = CIPHER_ALG_DES, 3699 .mode = CIPHER_MODE_OFB, 3700 }, 3701 .auth_info = { 3702 .alg = HASH_ALG_NONE, 3703 .mode = HASH_MODE_NONE, 3704 }, 3705 }, 3706 { 3707 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3708 .alg.crypto = { 3709 .cra_name = "cbc(des)", 3710 .cra_driver_name = "cbc-des-iproc", 3711 .cra_blocksize = DES_BLOCK_SIZE, 3712 .cra_ablkcipher = { 3713 .min_keysize = DES_KEY_SIZE, 3714 .max_keysize = DES_KEY_SIZE, 3715 .ivsize = DES_BLOCK_SIZE, 3716 } 3717 }, 3718 .cipher_info = { 3719 .alg = CIPHER_ALG_DES, 3720 .mode = CIPHER_MODE_CBC, 3721 }, 3722 .auth_info = { 3723 .alg = HASH_ALG_NONE, 3724 .mode = HASH_MODE_NONE, 3725 }, 3726 }, 3727 { 3728 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3729 .alg.crypto = { 3730 .cra_name = "ecb(des)", 3731 .cra_driver_name = "ecb-des-iproc", 3732 .cra_blocksize = DES_BLOCK_SIZE, 3733 .cra_ablkcipher = { 3734 .min_keysize = DES_KEY_SIZE, 3735 .max_keysize = DES_KEY_SIZE, 3736 .ivsize = 0, 3737 } 3738 }, 3739 .cipher_info = { 3740 .alg = CIPHER_ALG_DES, 3741 .mode = CIPHER_MODE_ECB, 3742 }, 3743 .auth_info = { 3744 .alg = HASH_ALG_NONE, 3745 .mode = HASH_MODE_NONE, 3746 }, 3747 }, 3748 { 3749 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3750 .alg.crypto = { 3751 .cra_name = "ofb(des3_ede)", 3752 .cra_driver_name = "ofb-des3-iproc", 3753 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3754 .cra_ablkcipher = { 3755 .min_keysize = DES3_EDE_KEY_SIZE, 3756 .max_keysize = DES3_EDE_KEY_SIZE, 3757 .ivsize = DES3_EDE_BLOCK_SIZE, 3758 } 3759 }, 3760 .cipher_info = { 3761 .alg = CIPHER_ALG_3DES, 3762 .mode = CIPHER_MODE_OFB, 3763 }, 3764 .auth_info = { 3765 .alg = HASH_ALG_NONE, 3766 .mode = HASH_MODE_NONE, 3767 }, 3768 }, 3769 { 3770 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3771 .alg.crypto = { 3772 .cra_name = "cbc(des3_ede)", 3773 .cra_driver_name = "cbc-des3-iproc", 3774 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3775 .cra_ablkcipher = { 3776 .min_keysize = DES3_EDE_KEY_SIZE, 3777 .max_keysize = DES3_EDE_KEY_SIZE, 3778 .ivsize = DES3_EDE_BLOCK_SIZE, 3779 } 3780 }, 3781 .cipher_info = { 3782 .alg = CIPHER_ALG_3DES, 3783 .mode = CIPHER_MODE_CBC, 3784 }, 3785 .auth_info = { 3786 .alg = HASH_ALG_NONE, 3787 .mode = HASH_MODE_NONE, 3788 }, 3789 }, 3790 { 3791 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3792 .alg.crypto = { 3793 .cra_name = "ecb(des3_ede)", 3794 .cra_driver_name = "ecb-des3-iproc", 3795 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3796 .cra_ablkcipher = { 3797 .min_keysize = DES3_EDE_KEY_SIZE, 3798 .max_keysize = DES3_EDE_KEY_SIZE, 3799 .ivsize = 0, 3800 } 3801 }, 3802 .cipher_info = { 3803 .alg = CIPHER_ALG_3DES, 3804 .mode = CIPHER_MODE_ECB, 3805 }, 3806 .auth_info = { 3807 .alg = HASH_ALG_NONE, 3808 .mode = HASH_MODE_NONE, 3809 }, 3810 }, 3811 { 3812 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3813 .alg.crypto = { 3814 .cra_name = "ofb(aes)", 3815 .cra_driver_name = "ofb-aes-iproc", 3816 .cra_blocksize = AES_BLOCK_SIZE, 3817 .cra_ablkcipher = { 3818 .min_keysize = AES_MIN_KEY_SIZE, 3819 .max_keysize = AES_MAX_KEY_SIZE, 3820 .ivsize = AES_BLOCK_SIZE, 3821 } 3822 }, 3823 .cipher_info = { 3824 .alg = CIPHER_ALG_AES, 3825 .mode = CIPHER_MODE_OFB, 3826 }, 3827 .auth_info = { 3828 .alg = HASH_ALG_NONE, 3829 .mode = HASH_MODE_NONE, 3830 }, 3831 }, 3832 { 3833 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3834 .alg.crypto = { 3835 .cra_name = "cbc(aes)", 3836 .cra_driver_name = "cbc-aes-iproc", 3837 .cra_blocksize = AES_BLOCK_SIZE, 3838 .cra_ablkcipher = { 3839 .min_keysize = AES_MIN_KEY_SIZE, 3840 .max_keysize = AES_MAX_KEY_SIZE, 3841 .ivsize = AES_BLOCK_SIZE, 3842 } 3843 }, 3844 .cipher_info = { 3845 .alg = CIPHER_ALG_AES, 3846 .mode = CIPHER_MODE_CBC, 3847 }, 3848 .auth_info = { 3849 .alg = HASH_ALG_NONE, 3850 .mode = HASH_MODE_NONE, 3851 }, 3852 }, 3853 { 3854 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3855 .alg.crypto = { 3856 .cra_name = "ecb(aes)", 3857 .cra_driver_name = "ecb-aes-iproc", 3858 .cra_blocksize = AES_BLOCK_SIZE, 3859 .cra_ablkcipher = { 3860 .min_keysize = AES_MIN_KEY_SIZE, 3861 .max_keysize = AES_MAX_KEY_SIZE, 3862 .ivsize = 0, 3863 } 3864 }, 3865 .cipher_info = { 3866 .alg = CIPHER_ALG_AES, 3867 .mode = CIPHER_MODE_ECB, 3868 }, 3869 .auth_info = { 3870 .alg = HASH_ALG_NONE, 3871 .mode = HASH_MODE_NONE, 3872 }, 3873 }, 3874 { 3875 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3876 .alg.crypto = { 3877 .cra_name = "ctr(aes)", 3878 .cra_driver_name = "ctr-aes-iproc", 3879 .cra_blocksize = AES_BLOCK_SIZE, 3880 .cra_ablkcipher = { 3881 /* .geniv = "chainiv", */ 3882 .min_keysize = AES_MIN_KEY_SIZE, 3883 .max_keysize = AES_MAX_KEY_SIZE, 3884 .ivsize = AES_BLOCK_SIZE, 3885 } 3886 }, 3887 .cipher_info = { 3888 .alg = CIPHER_ALG_AES, 3889 .mode = CIPHER_MODE_CTR, 3890 }, 3891 .auth_info = { 3892 .alg = HASH_ALG_NONE, 3893 .mode = HASH_MODE_NONE, 3894 }, 3895 }, 3896 { 3897 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3898 .alg.crypto = { 3899 .cra_name = "xts(aes)", 3900 .cra_driver_name = "xts-aes-iproc", 3901 .cra_blocksize = AES_BLOCK_SIZE, 3902 .cra_ablkcipher = { 3903 .min_keysize = 2 * AES_MIN_KEY_SIZE, 3904 .max_keysize = 2 * AES_MAX_KEY_SIZE, 3905 .ivsize = AES_BLOCK_SIZE, 3906 } 3907 }, 3908 .cipher_info = { 3909 .alg = CIPHER_ALG_AES, 3910 .mode = CIPHER_MODE_XTS, 3911 }, 3912 .auth_info = { 3913 .alg = HASH_ALG_NONE, 3914 .mode = HASH_MODE_NONE, 3915 }, 3916 }, 3917 3918 /* AHASH algorithms. */ 3919 { 3920 .type = CRYPTO_ALG_TYPE_AHASH, 3921 .alg.hash = { 3922 .halg.digestsize = MD5_DIGEST_SIZE, 3923 .halg.base = { 3924 .cra_name = "md5", 3925 .cra_driver_name = "md5-iproc", 3926 .cra_blocksize = MD5_BLOCK_WORDS * 4, 3927 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 3928 CRYPTO_ALG_ASYNC, 3929 } 3930 }, 3931 .cipher_info = { 3932 .alg = CIPHER_ALG_NONE, 3933 .mode = CIPHER_MODE_NONE, 3934 }, 3935 .auth_info = { 3936 .alg = HASH_ALG_MD5, 3937 .mode = HASH_MODE_HASH, 3938 }, 3939 }, 3940 { 3941 .type = CRYPTO_ALG_TYPE_AHASH, 3942 .alg.hash = { 3943 .halg.digestsize = MD5_DIGEST_SIZE, 3944 .halg.base = { 3945 .cra_name = "hmac(md5)", 3946 .cra_driver_name = "hmac-md5-iproc", 3947 .cra_blocksize = MD5_BLOCK_WORDS * 4, 3948 } 3949 }, 3950 .cipher_info = { 3951 .alg = CIPHER_ALG_NONE, 3952 .mode = CIPHER_MODE_NONE, 3953 }, 3954 .auth_info = { 3955 .alg = HASH_ALG_MD5, 3956 .mode = HASH_MODE_HMAC, 3957 }, 3958 }, 3959 {.type = CRYPTO_ALG_TYPE_AHASH, 3960 .alg.hash = { 3961 .halg.digestsize = SHA1_DIGEST_SIZE, 3962 .halg.base = { 3963 .cra_name = "sha1", 3964 .cra_driver_name = "sha1-iproc", 3965 .cra_blocksize = SHA1_BLOCK_SIZE, 3966 } 3967 }, 3968 .cipher_info = { 3969 .alg = CIPHER_ALG_NONE, 3970 .mode = CIPHER_MODE_NONE, 3971 }, 3972 .auth_info = { 3973 .alg = HASH_ALG_SHA1, 3974 .mode = HASH_MODE_HASH, 3975 }, 3976 }, 3977 {.type = CRYPTO_ALG_TYPE_AHASH, 3978 .alg.hash = { 3979 .halg.digestsize = SHA1_DIGEST_SIZE, 3980 .halg.base = { 3981 .cra_name = "hmac(sha1)", 3982 .cra_driver_name = "hmac-sha1-iproc", 3983 .cra_blocksize = SHA1_BLOCK_SIZE, 3984 } 3985 }, 3986 .cipher_info = { 3987 .alg = CIPHER_ALG_NONE, 3988 .mode = CIPHER_MODE_NONE, 3989 }, 3990 .auth_info = { 3991 .alg = HASH_ALG_SHA1, 3992 .mode = HASH_MODE_HMAC, 3993 }, 3994 }, 3995 {.type = CRYPTO_ALG_TYPE_AHASH, 3996 .alg.hash = { 3997 .halg.digestsize = SHA224_DIGEST_SIZE, 3998 .halg.base = { 3999 .cra_name = "sha224", 4000 .cra_driver_name = "sha224-iproc", 4001 .cra_blocksize = SHA224_BLOCK_SIZE, 4002 } 4003 }, 4004 .cipher_info = { 4005 .alg = CIPHER_ALG_NONE, 4006 .mode = CIPHER_MODE_NONE, 4007 }, 4008 .auth_info = { 4009 .alg = HASH_ALG_SHA224, 4010 .mode = HASH_MODE_HASH, 4011 }, 4012 }, 4013 {.type = CRYPTO_ALG_TYPE_AHASH, 4014 .alg.hash = { 4015 .halg.digestsize = SHA224_DIGEST_SIZE, 4016 .halg.base = { 4017 .cra_name = "hmac(sha224)", 4018 .cra_driver_name = "hmac-sha224-iproc", 4019 .cra_blocksize = SHA224_BLOCK_SIZE, 4020 } 4021 }, 4022 .cipher_info = { 4023 .alg = CIPHER_ALG_NONE, 4024 .mode = CIPHER_MODE_NONE, 4025 }, 4026 .auth_info = { 4027 .alg = HASH_ALG_SHA224, 4028 .mode = HASH_MODE_HMAC, 4029 }, 4030 }, 4031 {.type = CRYPTO_ALG_TYPE_AHASH, 4032 .alg.hash = { 4033 .halg.digestsize = SHA256_DIGEST_SIZE, 4034 .halg.base = { 4035 .cra_name = "sha256", 4036 .cra_driver_name = "sha256-iproc", 4037 .cra_blocksize = SHA256_BLOCK_SIZE, 4038 } 4039 }, 4040 .cipher_info = { 4041 .alg = CIPHER_ALG_NONE, 4042 .mode = CIPHER_MODE_NONE, 4043 }, 4044 .auth_info = { 4045 .alg = HASH_ALG_SHA256, 4046 .mode = HASH_MODE_HASH, 4047 }, 4048 }, 4049 {.type = CRYPTO_ALG_TYPE_AHASH, 4050 .alg.hash = { 4051 .halg.digestsize = SHA256_DIGEST_SIZE, 4052 .halg.base = { 4053 .cra_name = "hmac(sha256)", 4054 .cra_driver_name = "hmac-sha256-iproc", 4055 .cra_blocksize = SHA256_BLOCK_SIZE, 4056 } 4057 }, 4058 .cipher_info = { 4059 .alg = CIPHER_ALG_NONE, 4060 .mode = CIPHER_MODE_NONE, 4061 }, 4062 .auth_info = { 4063 .alg = HASH_ALG_SHA256, 4064 .mode = HASH_MODE_HMAC, 4065 }, 4066 }, 4067 { 4068 .type = CRYPTO_ALG_TYPE_AHASH, 4069 .alg.hash = { 4070 .halg.digestsize = SHA384_DIGEST_SIZE, 4071 .halg.base = { 4072 .cra_name = "sha384", 4073 .cra_driver_name = "sha384-iproc", 4074 .cra_blocksize = SHA384_BLOCK_SIZE, 4075 } 4076 }, 4077 .cipher_info = { 4078 .alg = CIPHER_ALG_NONE, 4079 .mode = CIPHER_MODE_NONE, 4080 }, 4081 .auth_info = { 4082 .alg = HASH_ALG_SHA384, 4083 .mode = HASH_MODE_HASH, 4084 }, 4085 }, 4086 { 4087 .type = CRYPTO_ALG_TYPE_AHASH, 4088 .alg.hash = { 4089 .halg.digestsize = SHA384_DIGEST_SIZE, 4090 .halg.base = { 4091 .cra_name = "hmac(sha384)", 4092 .cra_driver_name = "hmac-sha384-iproc", 4093 .cra_blocksize = SHA384_BLOCK_SIZE, 4094 } 4095 }, 4096 .cipher_info = { 4097 .alg = CIPHER_ALG_NONE, 4098 .mode = CIPHER_MODE_NONE, 4099 }, 4100 .auth_info = { 4101 .alg = HASH_ALG_SHA384, 4102 .mode = HASH_MODE_HMAC, 4103 }, 4104 }, 4105 { 4106 .type = CRYPTO_ALG_TYPE_AHASH, 4107 .alg.hash = { 4108 .halg.digestsize = SHA512_DIGEST_SIZE, 4109 .halg.base = { 4110 .cra_name = "sha512", 4111 .cra_driver_name = "sha512-iproc", 4112 .cra_blocksize = SHA512_BLOCK_SIZE, 4113 } 4114 }, 4115 .cipher_info = { 4116 .alg = CIPHER_ALG_NONE, 4117 .mode = CIPHER_MODE_NONE, 4118 }, 4119 .auth_info = { 4120 .alg = HASH_ALG_SHA512, 4121 .mode = HASH_MODE_HASH, 4122 }, 4123 }, 4124 { 4125 .type = CRYPTO_ALG_TYPE_AHASH, 4126 .alg.hash = { 4127 .halg.digestsize = SHA512_DIGEST_SIZE, 4128 .halg.base = { 4129 .cra_name = "hmac(sha512)", 4130 .cra_driver_name = "hmac-sha512-iproc", 4131 .cra_blocksize = SHA512_BLOCK_SIZE, 4132 } 4133 }, 4134 .cipher_info = { 4135 .alg = CIPHER_ALG_NONE, 4136 .mode = CIPHER_MODE_NONE, 4137 }, 4138 .auth_info = { 4139 .alg = HASH_ALG_SHA512, 4140 .mode = HASH_MODE_HMAC, 4141 }, 4142 }, 4143 { 4144 .type = CRYPTO_ALG_TYPE_AHASH, 4145 .alg.hash = { 4146 .halg.digestsize = SHA3_224_DIGEST_SIZE, 4147 .halg.base = { 4148 .cra_name = "sha3-224", 4149 .cra_driver_name = "sha3-224-iproc", 4150 .cra_blocksize = SHA3_224_BLOCK_SIZE, 4151 } 4152 }, 4153 .cipher_info = { 4154 .alg = CIPHER_ALG_NONE, 4155 .mode = CIPHER_MODE_NONE, 4156 }, 4157 .auth_info = { 4158 .alg = HASH_ALG_SHA3_224, 4159 .mode = HASH_MODE_HASH, 4160 }, 4161 }, 4162 { 4163 .type = CRYPTO_ALG_TYPE_AHASH, 4164 .alg.hash = { 4165 .halg.digestsize = SHA3_224_DIGEST_SIZE, 4166 .halg.base = { 4167 .cra_name = "hmac(sha3-224)", 4168 .cra_driver_name = "hmac-sha3-224-iproc", 4169 .cra_blocksize = SHA3_224_BLOCK_SIZE, 4170 } 4171 }, 4172 .cipher_info = { 4173 .alg = CIPHER_ALG_NONE, 4174 .mode = CIPHER_MODE_NONE, 4175 }, 4176 .auth_info = { 4177 .alg = HASH_ALG_SHA3_224, 4178 .mode = HASH_MODE_HMAC 4179 }, 4180 }, 4181 { 4182 .type = CRYPTO_ALG_TYPE_AHASH, 4183 .alg.hash = { 4184 .halg.digestsize = SHA3_256_DIGEST_SIZE, 4185 .halg.base = { 4186 .cra_name = "sha3-256", 4187 .cra_driver_name = "sha3-256-iproc", 4188 .cra_blocksize = SHA3_256_BLOCK_SIZE, 4189 } 4190 }, 4191 .cipher_info = { 4192 .alg = CIPHER_ALG_NONE, 4193 .mode = CIPHER_MODE_NONE, 4194 }, 4195 .auth_info = { 4196 .alg = HASH_ALG_SHA3_256, 4197 .mode = HASH_MODE_HASH, 4198 }, 4199 }, 4200 { 4201 .type = CRYPTO_ALG_TYPE_AHASH, 4202 .alg.hash = { 4203 .halg.digestsize = SHA3_256_DIGEST_SIZE, 4204 .halg.base = { 4205 .cra_name = "hmac(sha3-256)", 4206 .cra_driver_name = "hmac-sha3-256-iproc", 4207 .cra_blocksize = SHA3_256_BLOCK_SIZE, 4208 } 4209 }, 4210 .cipher_info = { 4211 .alg = CIPHER_ALG_NONE, 4212 .mode = CIPHER_MODE_NONE, 4213 }, 4214 .auth_info = { 4215 .alg = HASH_ALG_SHA3_256, 4216 .mode = HASH_MODE_HMAC, 4217 }, 4218 }, 4219 { 4220 .type = CRYPTO_ALG_TYPE_AHASH, 4221 .alg.hash = { 4222 .halg.digestsize = SHA3_384_DIGEST_SIZE, 4223 .halg.base = { 4224 .cra_name = "sha3-384", 4225 .cra_driver_name = "sha3-384-iproc", 4226 .cra_blocksize = SHA3_224_BLOCK_SIZE, 4227 } 4228 }, 4229 .cipher_info = { 4230 .alg = CIPHER_ALG_NONE, 4231 .mode = CIPHER_MODE_NONE, 4232 }, 4233 .auth_info = { 4234 .alg = HASH_ALG_SHA3_384, 4235 .mode = HASH_MODE_HASH, 4236 }, 4237 }, 4238 { 4239 .type = CRYPTO_ALG_TYPE_AHASH, 4240 .alg.hash = { 4241 .halg.digestsize = SHA3_384_DIGEST_SIZE, 4242 .halg.base = { 4243 .cra_name = "hmac(sha3-384)", 4244 .cra_driver_name = "hmac-sha3-384-iproc", 4245 .cra_blocksize = SHA3_384_BLOCK_SIZE, 4246 } 4247 }, 4248 .cipher_info = { 4249 .alg = CIPHER_ALG_NONE, 4250 .mode = CIPHER_MODE_NONE, 4251 }, 4252 .auth_info = { 4253 .alg = HASH_ALG_SHA3_384, 4254 .mode = HASH_MODE_HMAC, 4255 }, 4256 }, 4257 { 4258 .type = CRYPTO_ALG_TYPE_AHASH, 4259 .alg.hash = { 4260 .halg.digestsize = SHA3_512_DIGEST_SIZE, 4261 .halg.base = { 4262 .cra_name = "sha3-512", 4263 .cra_driver_name = "sha3-512-iproc", 4264 .cra_blocksize = SHA3_512_BLOCK_SIZE, 4265 } 4266 }, 4267 .cipher_info = { 4268 .alg = CIPHER_ALG_NONE, 4269 .mode = CIPHER_MODE_NONE, 4270 }, 4271 .auth_info = { 4272 .alg = HASH_ALG_SHA3_512, 4273 .mode = HASH_MODE_HASH, 4274 }, 4275 }, 4276 { 4277 .type = CRYPTO_ALG_TYPE_AHASH, 4278 .alg.hash = { 4279 .halg.digestsize = SHA3_512_DIGEST_SIZE, 4280 .halg.base = { 4281 .cra_name = "hmac(sha3-512)", 4282 .cra_driver_name = "hmac-sha3-512-iproc", 4283 .cra_blocksize = SHA3_512_BLOCK_SIZE, 4284 } 4285 }, 4286 .cipher_info = { 4287 .alg = CIPHER_ALG_NONE, 4288 .mode = CIPHER_MODE_NONE, 4289 }, 4290 .auth_info = { 4291 .alg = HASH_ALG_SHA3_512, 4292 .mode = HASH_MODE_HMAC, 4293 }, 4294 }, 4295 { 4296 .type = CRYPTO_ALG_TYPE_AHASH, 4297 .alg.hash = { 4298 .halg.digestsize = AES_BLOCK_SIZE, 4299 .halg.base = { 4300 .cra_name = "xcbc(aes)", 4301 .cra_driver_name = "xcbc-aes-iproc", 4302 .cra_blocksize = AES_BLOCK_SIZE, 4303 } 4304 }, 4305 .cipher_info = { 4306 .alg = CIPHER_ALG_NONE, 4307 .mode = CIPHER_MODE_NONE, 4308 }, 4309 .auth_info = { 4310 .alg = HASH_ALG_AES, 4311 .mode = HASH_MODE_XCBC, 4312 }, 4313 }, 4314 { 4315 .type = CRYPTO_ALG_TYPE_AHASH, 4316 .alg.hash = { 4317 .halg.digestsize = AES_BLOCK_SIZE, 4318 .halg.base = { 4319 .cra_name = "cmac(aes)", 4320 .cra_driver_name = "cmac-aes-iproc", 4321 .cra_blocksize = AES_BLOCK_SIZE, 4322 } 4323 }, 4324 .cipher_info = { 4325 .alg = CIPHER_ALG_NONE, 4326 .mode = CIPHER_MODE_NONE, 4327 }, 4328 .auth_info = { 4329 .alg = HASH_ALG_AES, 4330 .mode = HASH_MODE_CMAC, 4331 }, 4332 }, 4333 }; 4334 4335 static int generic_cra_init(struct crypto_tfm *tfm, 4336 struct iproc_alg_s *cipher_alg) 4337 { 4338 struct spu_hw *spu = &iproc_priv.spu; 4339 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); 4340 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm); 4341 4342 flow_log("%s()\n", __func__); 4343 4344 ctx->alg = cipher_alg; 4345 ctx->cipher = cipher_alg->cipher_info; 4346 ctx->auth = cipher_alg->auth_info; 4347 ctx->auth_first = cipher_alg->auth_first; 4348 ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg, 4349 ctx->cipher.mode, 4350 blocksize); 4351 ctx->fallback_cipher = NULL; 4352 4353 ctx->enckeylen = 0; 4354 ctx->authkeylen = 0; 4355 4356 atomic_inc(&iproc_priv.stream_count); 4357 atomic_inc(&iproc_priv.session_count); 4358 4359 return 0; 4360 } 4361 4362 static int ablkcipher_cra_init(struct crypto_tfm *tfm) 4363 { 4364 struct crypto_alg *alg = tfm->__crt_alg; 4365 struct iproc_alg_s *cipher_alg; 4366 4367 flow_log("%s()\n", __func__); 4368 4369 tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s); 4370 4371 cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto); 4372 return generic_cra_init(tfm, cipher_alg); 4373 } 4374 4375 static int ahash_cra_init(struct crypto_tfm *tfm) 4376 { 4377 int err; 4378 struct crypto_alg *alg = tfm->__crt_alg; 4379 struct iproc_alg_s *cipher_alg; 4380 4381 cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s, 4382 alg.hash); 4383 4384 err = generic_cra_init(tfm, cipher_alg); 4385 flow_log("%s()\n", __func__); 4386 4387 /* 4388 * export state size has to be < 512 bytes. So don't include msg bufs 4389 * in state size. 4390 */ 4391 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 4392 sizeof(struct iproc_reqctx_s)); 4393 4394 return err; 4395 } 4396 4397 static int aead_cra_init(struct crypto_aead *aead) 4398 { 4399 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 4400 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); 4401 struct crypto_alg *alg = tfm->__crt_alg; 4402 struct aead_alg *aalg = container_of(alg, struct aead_alg, base); 4403 struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s, 4404 alg.aead); 4405 4406 int err = generic_cra_init(tfm, cipher_alg); 4407 4408 flow_log("%s()\n", __func__); 4409 4410 crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s)); 4411 ctx->is_esp = false; 4412 ctx->salt_len = 0; 4413 ctx->salt_offset = 0; 4414 4415 /* random first IV */ 4416 get_random_bytes(ctx->iv, MAX_IV_SIZE); 4417 flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE); 4418 4419 if (!err) { 4420 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { 4421 flow_log("%s() creating fallback cipher\n", __func__); 4422 4423 ctx->fallback_cipher = 4424 crypto_alloc_aead(alg->cra_name, 0, 4425 CRYPTO_ALG_ASYNC | 4426 CRYPTO_ALG_NEED_FALLBACK); 4427 if (IS_ERR(ctx->fallback_cipher)) { 4428 pr_err("%s() Error: failed to allocate fallback for %s\n", 4429 __func__, alg->cra_name); 4430 return PTR_ERR(ctx->fallback_cipher); 4431 } 4432 } 4433 } 4434 4435 return err; 4436 } 4437 4438 static void generic_cra_exit(struct crypto_tfm *tfm) 4439 { 4440 atomic_dec(&iproc_priv.session_count); 4441 } 4442 4443 static void aead_cra_exit(struct crypto_aead *aead) 4444 { 4445 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 4446 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); 4447 4448 generic_cra_exit(tfm); 4449 4450 if (ctx->fallback_cipher) { 4451 crypto_free_aead(ctx->fallback_cipher); 4452 ctx->fallback_cipher = NULL; 4453 } 4454 } 4455 4456 /** 4457 * spu_functions_register() - Specify hardware-specific SPU functions based on 4458 * SPU type read from device tree. 4459 * @dev: device structure 4460 * @spu_type: SPU hardware generation 4461 * @spu_subtype: SPU hardware version 4462 */ 4463 static void spu_functions_register(struct device *dev, 4464 enum spu_spu_type spu_type, 4465 enum spu_spu_subtype spu_subtype) 4466 { 4467 struct spu_hw *spu = &iproc_priv.spu; 4468 4469 if (spu_type == SPU_TYPE_SPUM) { 4470 dev_dbg(dev, "Registering SPUM functions"); 4471 spu->spu_dump_msg_hdr = spum_dump_msg_hdr; 4472 spu->spu_payload_length = spum_payload_length; 4473 spu->spu_response_hdr_len = spum_response_hdr_len; 4474 spu->spu_hash_pad_len = spum_hash_pad_len; 4475 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len; 4476 spu->spu_assoc_resp_len = spum_assoc_resp_len; 4477 spu->spu_aead_ivlen = spum_aead_ivlen; 4478 spu->spu_hash_type = spum_hash_type; 4479 spu->spu_digest_size = spum_digest_size; 4480 spu->spu_create_request = spum_create_request; 4481 spu->spu_cipher_req_init = spum_cipher_req_init; 4482 spu->spu_cipher_req_finish = spum_cipher_req_finish; 4483 spu->spu_request_pad = spum_request_pad; 4484 spu->spu_tx_status_len = spum_tx_status_len; 4485 spu->spu_rx_status_len = spum_rx_status_len; 4486 spu->spu_status_process = spum_status_process; 4487 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload; 4488 spu->spu_ccm_update_iv = spum_ccm_update_iv; 4489 spu->spu_wordalign_padlen = spum_wordalign_padlen; 4490 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2) 4491 spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload; 4492 else 4493 spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload; 4494 } else { 4495 dev_dbg(dev, "Registering SPU2 functions"); 4496 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr; 4497 spu->spu_ctx_max_payload = spu2_ctx_max_payload; 4498 spu->spu_payload_length = spu2_payload_length; 4499 spu->spu_response_hdr_len = spu2_response_hdr_len; 4500 spu->spu_hash_pad_len = spu2_hash_pad_len; 4501 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len; 4502 spu->spu_assoc_resp_len = spu2_assoc_resp_len; 4503 spu->spu_aead_ivlen = spu2_aead_ivlen; 4504 spu->spu_hash_type = spu2_hash_type; 4505 spu->spu_digest_size = spu2_digest_size; 4506 spu->spu_create_request = spu2_create_request; 4507 spu->spu_cipher_req_init = spu2_cipher_req_init; 4508 spu->spu_cipher_req_finish = spu2_cipher_req_finish; 4509 spu->spu_request_pad = spu2_request_pad; 4510 spu->spu_tx_status_len = spu2_tx_status_len; 4511 spu->spu_rx_status_len = spu2_rx_status_len; 4512 spu->spu_status_process = spu2_status_process; 4513 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload; 4514 spu->spu_ccm_update_iv = spu2_ccm_update_iv; 4515 spu->spu_wordalign_padlen = spu2_wordalign_padlen; 4516 } 4517 } 4518 4519 /** 4520 * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox 4521 * channel for the SPU being probed. 4522 * @dev: SPU driver device structure 4523 * 4524 * Return: 0 if successful 4525 * < 0 otherwise 4526 */ 4527 static int spu_mb_init(struct device *dev) 4528 { 4529 struct mbox_client *mcl = &iproc_priv.mcl; 4530 int err, i; 4531 4532 iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan, 4533 sizeof(struct mbox_chan *), GFP_KERNEL); 4534 if (!iproc_priv.mbox) 4535 return -ENOMEM; 4536 4537 mcl->dev = dev; 4538 mcl->tx_block = false; 4539 mcl->tx_tout = 0; 4540 mcl->knows_txdone = false; 4541 mcl->rx_callback = spu_rx_callback; 4542 mcl->tx_done = NULL; 4543 4544 for (i = 0; i < iproc_priv.spu.num_chan; i++) { 4545 iproc_priv.mbox[i] = mbox_request_channel(mcl, i); 4546 if (IS_ERR(iproc_priv.mbox[i])) { 4547 err = (int)PTR_ERR(iproc_priv.mbox[i]); 4548 dev_err(dev, 4549 "Mbox channel %d request failed with err %d", 4550 i, err); 4551 iproc_priv.mbox[i] = NULL; 4552 goto free_channels; 4553 } 4554 } 4555 4556 return 0; 4557 free_channels: 4558 for (i = 0; i < iproc_priv.spu.num_chan; i++) { 4559 if (iproc_priv.mbox[i]) 4560 mbox_free_channel(iproc_priv.mbox[i]); 4561 } 4562 4563 return err; 4564 } 4565 4566 static void spu_mb_release(struct platform_device *pdev) 4567 { 4568 int i; 4569 4570 for (i = 0; i < iproc_priv.spu.num_chan; i++) 4571 mbox_free_channel(iproc_priv.mbox[i]); 4572 } 4573 4574 static void spu_counters_init(void) 4575 { 4576 int i; 4577 int j; 4578 4579 atomic_set(&iproc_priv.session_count, 0); 4580 atomic_set(&iproc_priv.stream_count, 0); 4581 atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan); 4582 atomic64_set(&iproc_priv.bytes_in, 0); 4583 atomic64_set(&iproc_priv.bytes_out, 0); 4584 for (i = 0; i < SPU_OP_NUM; i++) { 4585 atomic_set(&iproc_priv.op_counts[i], 0); 4586 atomic_set(&iproc_priv.setkey_cnt[i], 0); 4587 } 4588 for (i = 0; i < CIPHER_ALG_LAST; i++) 4589 for (j = 0; j < CIPHER_MODE_LAST; j++) 4590 atomic_set(&iproc_priv.cipher_cnt[i][j], 0); 4591 4592 for (i = 0; i < HASH_ALG_LAST; i++) { 4593 atomic_set(&iproc_priv.hash_cnt[i], 0); 4594 atomic_set(&iproc_priv.hmac_cnt[i], 0); 4595 } 4596 for (i = 0; i < AEAD_TYPE_LAST; i++) 4597 atomic_set(&iproc_priv.aead_cnt[i], 0); 4598 4599 atomic_set(&iproc_priv.mb_no_spc, 0); 4600 atomic_set(&iproc_priv.mb_send_fail, 0); 4601 atomic_set(&iproc_priv.bad_icv, 0); 4602 } 4603 4604 static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg) 4605 { 4606 struct spu_hw *spu = &iproc_priv.spu; 4607 struct crypto_alg *crypto = &driver_alg->alg.crypto; 4608 int err; 4609 4610 /* SPU2 does not support RC4 */ 4611 if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) && 4612 (spu->spu_type == SPU_TYPE_SPU2)) 4613 return 0; 4614 4615 crypto->cra_module = THIS_MODULE; 4616 crypto->cra_priority = cipher_pri; 4617 crypto->cra_alignmask = 0; 4618 crypto->cra_ctxsize = sizeof(struct iproc_ctx_s); 4619 INIT_LIST_HEAD(&crypto->cra_list); 4620 4621 crypto->cra_init = ablkcipher_cra_init; 4622 crypto->cra_exit = generic_cra_exit; 4623 crypto->cra_type = &crypto_ablkcipher_type; 4624 crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | 4625 CRYPTO_ALG_KERN_DRIVER_ONLY; 4626 4627 crypto->cra_ablkcipher.setkey = ablkcipher_setkey; 4628 crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt; 4629 crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt; 4630 4631 err = crypto_register_alg(crypto); 4632 /* Mark alg as having been registered, if successful */ 4633 if (err == 0) 4634 driver_alg->registered = true; 4635 pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name); 4636 return err; 4637 } 4638 4639 static int spu_register_ahash(struct iproc_alg_s *driver_alg) 4640 { 4641 struct spu_hw *spu = &iproc_priv.spu; 4642 struct ahash_alg *hash = &driver_alg->alg.hash; 4643 int err; 4644 4645 /* AES-XCBC is the only AES hash type currently supported on SPU-M */ 4646 if ((driver_alg->auth_info.alg == HASH_ALG_AES) && 4647 (driver_alg->auth_info.mode != HASH_MODE_XCBC) && 4648 (spu->spu_type == SPU_TYPE_SPUM)) 4649 return 0; 4650 4651 /* SHA3 algorithm variants are not registered for SPU-M or SPU2. */ 4652 if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) && 4653 (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2)) 4654 return 0; 4655 4656 hash->halg.base.cra_module = THIS_MODULE; 4657 hash->halg.base.cra_priority = hash_pri; 4658 hash->halg.base.cra_alignmask = 0; 4659 hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s); 4660 hash->halg.base.cra_init = ahash_cra_init; 4661 hash->halg.base.cra_exit = generic_cra_exit; 4662 hash->halg.base.cra_type = &crypto_ahash_type; 4663 hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; 4664 hash->halg.statesize = sizeof(struct spu_hash_export_s); 4665 4666 if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { 4667 hash->setkey = ahash_setkey; 4668 hash->init = ahash_init; 4669 hash->update = ahash_update; 4670 hash->final = ahash_final; 4671 hash->finup = ahash_finup; 4672 hash->digest = ahash_digest; 4673 } else { 4674 hash->setkey = ahash_hmac_setkey; 4675 hash->init = ahash_hmac_init; 4676 hash->update = ahash_hmac_update; 4677 hash->final = ahash_hmac_final; 4678 hash->finup = ahash_hmac_finup; 4679 hash->digest = ahash_hmac_digest; 4680 } 4681 hash->export = ahash_export; 4682 hash->import = ahash_import; 4683 4684 err = crypto_register_ahash(hash); 4685 /* Mark alg as having been registered, if successful */ 4686 if (err == 0) 4687 driver_alg->registered = true; 4688 pr_debug(" registered ahash %s\n", 4689 hash->halg.base.cra_driver_name); 4690 return err; 4691 } 4692 4693 static int spu_register_aead(struct iproc_alg_s *driver_alg) 4694 { 4695 struct aead_alg *aead = &driver_alg->alg.aead; 4696 int err; 4697 4698 aead->base.cra_module = THIS_MODULE; 4699 aead->base.cra_priority = aead_pri; 4700 aead->base.cra_alignmask = 0; 4701 aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); 4702 INIT_LIST_HEAD(&aead->base.cra_list); 4703 4704 aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; 4705 /* setkey set in alg initialization */ 4706 aead->setauthsize = aead_setauthsize; 4707 aead->encrypt = aead_encrypt; 4708 aead->decrypt = aead_decrypt; 4709 aead->init = aead_cra_init; 4710 aead->exit = aead_cra_exit; 4711 4712 err = crypto_register_aead(aead); 4713 /* Mark alg as having been registered, if successful */ 4714 if (err == 0) 4715 driver_alg->registered = true; 4716 pr_debug(" registered aead %s\n", aead->base.cra_driver_name); 4717 return err; 4718 } 4719 4720 /* register crypto algorithms the device supports */ 4721 static int spu_algs_register(struct device *dev) 4722 { 4723 int i, j; 4724 int err; 4725 4726 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 4727 switch (driver_algs[i].type) { 4728 case CRYPTO_ALG_TYPE_ABLKCIPHER: 4729 err = spu_register_ablkcipher(&driver_algs[i]); 4730 break; 4731 case CRYPTO_ALG_TYPE_AHASH: 4732 err = spu_register_ahash(&driver_algs[i]); 4733 break; 4734 case CRYPTO_ALG_TYPE_AEAD: 4735 err = spu_register_aead(&driver_algs[i]); 4736 break; 4737 default: 4738 dev_err(dev, 4739 "iproc-crypto: unknown alg type: %d", 4740 driver_algs[i].type); 4741 err = -EINVAL; 4742 } 4743 4744 if (err) { 4745 dev_err(dev, "alg registration failed with error %d\n", 4746 err); 4747 goto err_algs; 4748 } 4749 } 4750 4751 return 0; 4752 4753 err_algs: 4754 for (j = 0; j < i; j++) { 4755 /* Skip any algorithm not registered */ 4756 if (!driver_algs[j].registered) 4757 continue; 4758 switch (driver_algs[j].type) { 4759 case CRYPTO_ALG_TYPE_ABLKCIPHER: 4760 crypto_unregister_alg(&driver_algs[j].alg.crypto); 4761 driver_algs[j].registered = false; 4762 break; 4763 case CRYPTO_ALG_TYPE_AHASH: 4764 crypto_unregister_ahash(&driver_algs[j].alg.hash); 4765 driver_algs[j].registered = false; 4766 break; 4767 case CRYPTO_ALG_TYPE_AEAD: 4768 crypto_unregister_aead(&driver_algs[j].alg.aead); 4769 driver_algs[j].registered = false; 4770 break; 4771 } 4772 } 4773 return err; 4774 } 4775 4776 /* ==================== Kernel Platform API ==================== */ 4777 4778 static struct spu_type_subtype spum_ns2_types = { 4779 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2 4780 }; 4781 4782 static struct spu_type_subtype spum_nsp_types = { 4783 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP 4784 }; 4785 4786 static struct spu_type_subtype spu2_types = { 4787 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1 4788 }; 4789 4790 static struct spu_type_subtype spu2_v2_types = { 4791 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2 4792 }; 4793 4794 static const struct of_device_id bcm_spu_dt_ids[] = { 4795 { 4796 .compatible = "brcm,spum-crypto", 4797 .data = &spum_ns2_types, 4798 }, 4799 { 4800 .compatible = "brcm,spum-nsp-crypto", 4801 .data = &spum_nsp_types, 4802 }, 4803 { 4804 .compatible = "brcm,spu2-crypto", 4805 .data = &spu2_types, 4806 }, 4807 { 4808 .compatible = "brcm,spu2-v2-crypto", 4809 .data = &spu2_v2_types, 4810 }, 4811 { /* sentinel */ } 4812 }; 4813 4814 MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids); 4815 4816 static int spu_dt_read(struct platform_device *pdev) 4817 { 4818 struct device *dev = &pdev->dev; 4819 struct spu_hw *spu = &iproc_priv.spu; 4820 struct resource *spu_ctrl_regs; 4821 const struct of_device_id *match; 4822 const struct spu_type_subtype *matched_spu_type; 4823 struct device_node *dn = pdev->dev.of_node; 4824 int err, i; 4825 4826 /* Count number of mailbox channels */ 4827 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells"); 4828 4829 match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev); 4830 if (!match) { 4831 dev_err(&pdev->dev, "Failed to match device\n"); 4832 return -ENODEV; 4833 } 4834 4835 matched_spu_type = match->data; 4836 4837 spu->spu_type = matched_spu_type->type; 4838 spu->spu_subtype = matched_spu_type->subtype; 4839 4840 i = 0; 4841 for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs = 4842 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) { 4843 4844 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs); 4845 if (IS_ERR(spu->reg_vbase[i])) { 4846 err = PTR_ERR(spu->reg_vbase[i]); 4847 dev_err(&pdev->dev, "Failed to map registers: %d\n", 4848 err); 4849 spu->reg_vbase[i] = NULL; 4850 return err; 4851 } 4852 } 4853 spu->num_spu = i; 4854 dev_dbg(dev, "Device has %d SPUs", spu->num_spu); 4855 4856 return 0; 4857 } 4858 4859 int bcm_spu_probe(struct platform_device *pdev) 4860 { 4861 struct device *dev = &pdev->dev; 4862 struct spu_hw *spu = &iproc_priv.spu; 4863 int err = 0; 4864 4865 iproc_priv.pdev = pdev; 4866 platform_set_drvdata(iproc_priv.pdev, 4867 &iproc_priv); 4868 4869 err = spu_dt_read(pdev); 4870 if (err < 0) 4871 goto failure; 4872 4873 err = spu_mb_init(&pdev->dev); 4874 if (err < 0) 4875 goto failure; 4876 4877 if (spu->spu_type == SPU_TYPE_SPUM) 4878 iproc_priv.bcm_hdr_len = 8; 4879 else if (spu->spu_type == SPU_TYPE_SPU2) 4880 iproc_priv.bcm_hdr_len = 0; 4881 4882 spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype); 4883 4884 spu_counters_init(); 4885 4886 spu_setup_debugfs(); 4887 4888 err = spu_algs_register(dev); 4889 if (err < 0) 4890 goto fail_reg; 4891 4892 return 0; 4893 4894 fail_reg: 4895 spu_free_debugfs(); 4896 failure: 4897 spu_mb_release(pdev); 4898 dev_err(dev, "%s failed with error %d.\n", __func__, err); 4899 4900 return err; 4901 } 4902 4903 int bcm_spu_remove(struct platform_device *pdev) 4904 { 4905 int i; 4906 struct device *dev = &pdev->dev; 4907 char *cdn; 4908 4909 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 4910 /* 4911 * Not all algorithms were registered, depending on whether 4912 * hardware is SPU or SPU2. So here we make sure to skip 4913 * those algorithms that were not previously registered. 4914 */ 4915 if (!driver_algs[i].registered) 4916 continue; 4917 4918 switch (driver_algs[i].type) { 4919 case CRYPTO_ALG_TYPE_ABLKCIPHER: 4920 crypto_unregister_alg(&driver_algs[i].alg.crypto); 4921 dev_dbg(dev, " unregistered cipher %s\n", 4922 driver_algs[i].alg.crypto.cra_driver_name); 4923 driver_algs[i].registered = false; 4924 break; 4925 case CRYPTO_ALG_TYPE_AHASH: 4926 crypto_unregister_ahash(&driver_algs[i].alg.hash); 4927 cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name; 4928 dev_dbg(dev, " unregistered hash %s\n", cdn); 4929 driver_algs[i].registered = false; 4930 break; 4931 case CRYPTO_ALG_TYPE_AEAD: 4932 crypto_unregister_aead(&driver_algs[i].alg.aead); 4933 dev_dbg(dev, " unregistered aead %s\n", 4934 driver_algs[i].alg.aead.base.cra_driver_name); 4935 driver_algs[i].registered = false; 4936 break; 4937 } 4938 } 4939 spu_free_debugfs(); 4940 spu_mb_release(pdev); 4941 return 0; 4942 } 4943 4944 /* ===== Kernel Module API ===== */ 4945 4946 static struct platform_driver bcm_spu_pdriver = { 4947 .driver = { 4948 .name = "brcm-spu-crypto", 4949 .of_match_table = of_match_ptr(bcm_spu_dt_ids), 4950 }, 4951 .probe = bcm_spu_probe, 4952 .remove = bcm_spu_remove, 4953 }; 4954 module_platform_driver(bcm_spu_pdriver); 4955 4956 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>"); 4957 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver"); 4958 MODULE_LICENSE("GPL v2"); 4959