1 /* 2 * Copyright 2016 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation (the "GPL"). 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License version 2 (GPLv2) for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * version 2 (GPLv2) along with this source code. 15 */ 16 17 #include <linux/err.h> 18 #include <linux/module.h> 19 #include <linux/init.h> 20 #include <linux/errno.h> 21 #include <linux/kernel.h> 22 #include <linux/interrupt.h> 23 #include <linux/platform_device.h> 24 #include <linux/scatterlist.h> 25 #include <linux/crypto.h> 26 #include <linux/kthread.h> 27 #include <linux/rtnetlink.h> 28 #include <linux/sched.h> 29 #include <linux/of_address.h> 30 #include <linux/of_device.h> 31 #include <linux/io.h> 32 #include <linux/bitops.h> 33 34 #include <crypto/algapi.h> 35 #include <crypto/aead.h> 36 #include <crypto/internal/aead.h> 37 #include <crypto/aes.h> 38 #include <crypto/des.h> 39 #include <crypto/hmac.h> 40 #include <crypto/sha.h> 41 #include <crypto/md5.h> 42 #include <crypto/authenc.h> 43 #include <crypto/skcipher.h> 44 #include <crypto/hash.h> 45 #include <crypto/aes.h> 46 #include <crypto/sha3.h> 47 48 #include "util.h" 49 #include "cipher.h" 50 #include "spu.h" 51 #include "spum.h" 52 #include "spu2.h" 53 54 /* ================= Device Structure ================== */ 55 56 struct device_private iproc_priv; 57 58 /* ==================== Parameters ===================== */ 59 60 int flow_debug_logging; 61 module_param(flow_debug_logging, int, 0644); 62 MODULE_PARM_DESC(flow_debug_logging, "Enable Flow Debug Logging"); 63 64 int packet_debug_logging; 65 module_param(packet_debug_logging, int, 0644); 66 MODULE_PARM_DESC(packet_debug_logging, "Enable Packet Debug Logging"); 67 68 int debug_logging_sleep; 69 module_param(debug_logging_sleep, int, 0644); 70 MODULE_PARM_DESC(debug_logging_sleep, "Packet Debug Logging Sleep"); 71 72 /* 73 * The value of these module parameters is used to set the priority for each 74 * algo type when this driver registers algos with the kernel crypto API. 75 * To use a priority other than the default, set the priority in the insmod or 76 * modprobe. Changing the module priority after init time has no effect. 77 * 78 * The default priorities are chosen to be lower (less preferred) than ARMv8 CE 79 * algos, but more preferred than generic software algos. 80 */ 81 static int cipher_pri = 150; 82 module_param(cipher_pri, int, 0644); 83 MODULE_PARM_DESC(cipher_pri, "Priority for cipher algos"); 84 85 static int hash_pri = 100; 86 module_param(hash_pri, int, 0644); 87 MODULE_PARM_DESC(hash_pri, "Priority for hash algos"); 88 89 static int aead_pri = 150; 90 module_param(aead_pri, int, 0644); 91 MODULE_PARM_DESC(aead_pri, "Priority for AEAD algos"); 92 93 /* A type 3 BCM header, expected to precede the SPU header for SPU-M. 94 * Bits 3 and 4 in the first byte encode the channel number (the dma ringset). 95 * 0x60 - ring 0 96 * 0x68 - ring 1 97 * 0x70 - ring 2 98 * 0x78 - ring 3 99 */ 100 char BCMHEADER[] = { 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x28 }; 101 /* 102 * Some SPU hw does not use BCM header on SPU messages. So BCM_HDR_LEN 103 * is set dynamically after reading SPU type from device tree. 104 */ 105 #define BCM_HDR_LEN iproc_priv.bcm_hdr_len 106 107 /* min and max time to sleep before retrying when mbox queue is full. usec */ 108 #define MBOX_SLEEP_MIN 800 109 #define MBOX_SLEEP_MAX 1000 110 111 /** 112 * select_channel() - Select a SPU channel to handle a crypto request. Selects 113 * channel in round robin order. 114 * 115 * Return: channel index 116 */ 117 static u8 select_channel(void) 118 { 119 u8 chan_idx = atomic_inc_return(&iproc_priv.next_chan); 120 121 return chan_idx % iproc_priv.spu.num_chan; 122 } 123 124 /** 125 * spu_ablkcipher_rx_sg_create() - Build up the scatterlist of buffers used to 126 * receive a SPU response message for an ablkcipher request. Includes buffers to 127 * catch SPU message headers and the response data. 128 * @mssg: mailbox message containing the receive sg 129 * @rctx: crypto request context 130 * @rx_frag_num: number of scatterlist elements required to hold the 131 * SPU response message 132 * @chunksize: Number of bytes of response data expected 133 * @stat_pad_len: Number of bytes required to pad the STAT field to 134 * a 4-byte boundary 135 * 136 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 137 * when the request completes, whether the request is handled successfully or 138 * there is an error. 139 * 140 * Returns: 141 * 0 if successful 142 * < 0 if an error 143 */ 144 static int 145 spu_ablkcipher_rx_sg_create(struct brcm_message *mssg, 146 struct iproc_reqctx_s *rctx, 147 u8 rx_frag_num, 148 unsigned int chunksize, u32 stat_pad_len) 149 { 150 struct spu_hw *spu = &iproc_priv.spu; 151 struct scatterlist *sg; /* used to build sgs in mbox message */ 152 struct iproc_ctx_s *ctx = rctx->ctx; 153 u32 datalen; /* Number of bytes of response data expected */ 154 155 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), 156 rctx->gfp); 157 if (!mssg->spu.dst) 158 return -ENOMEM; 159 160 sg = mssg->spu.dst; 161 sg_init_table(sg, rx_frag_num); 162 /* Space for SPU message header */ 163 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len); 164 165 /* If XTS tweak in payload, add buffer to receive encrypted tweak */ 166 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 167 spu->spu_xts_tweak_in_payload()) 168 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, 169 SPU_XTS_TWEAK_SIZE); 170 171 /* Copy in each dst sg entry from request, up to chunksize */ 172 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip, 173 rctx->dst_nents, chunksize); 174 if (datalen < chunksize) { 175 pr_err("%s(): failed to copy dst sg to mbox msg. chunksize %u, datalen %u", 176 __func__, chunksize, datalen); 177 return -EFAULT; 178 } 179 180 if (ctx->cipher.alg == CIPHER_ALG_RC4) 181 /* Add buffer to catch 260-byte SUPDT field for RC4 */ 182 sg_set_buf(sg++, rctx->msg_buf.c.supdt_tweak, SPU_SUPDT_LEN); 183 184 if (stat_pad_len) 185 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); 186 187 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); 188 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len()); 189 190 return 0; 191 } 192 193 /** 194 * spu_ablkcipher_tx_sg_create() - Build up the scatterlist of buffers used to 195 * send a SPU request message for an ablkcipher request. Includes SPU message 196 * headers and the request data. 197 * @mssg: mailbox message containing the transmit sg 198 * @rctx: crypto request context 199 * @tx_frag_num: number of scatterlist elements required to construct the 200 * SPU request message 201 * @chunksize: Number of bytes of request data 202 * @pad_len: Number of pad bytes 203 * 204 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 205 * when the request completes, whether the request is handled successfully or 206 * there is an error. 207 * 208 * Returns: 209 * 0 if successful 210 * < 0 if an error 211 */ 212 static int 213 spu_ablkcipher_tx_sg_create(struct brcm_message *mssg, 214 struct iproc_reqctx_s *rctx, 215 u8 tx_frag_num, unsigned int chunksize, u32 pad_len) 216 { 217 struct spu_hw *spu = &iproc_priv.spu; 218 struct scatterlist *sg; /* used to build sgs in mbox message */ 219 struct iproc_ctx_s *ctx = rctx->ctx; 220 u32 datalen; /* Number of bytes of response data expected */ 221 u32 stat_len; 222 223 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), 224 rctx->gfp); 225 if (unlikely(!mssg->spu.src)) 226 return -ENOMEM; 227 228 sg = mssg->spu.src; 229 sg_init_table(sg, tx_frag_num); 230 231 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr, 232 BCM_HDR_LEN + ctx->spu_req_hdr_len); 233 234 /* if XTS tweak in payload, copy from IV (where crypto API puts it) */ 235 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 236 spu->spu_xts_tweak_in_payload()) 237 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, SPU_XTS_TWEAK_SIZE); 238 239 /* Copy in each src sg entry from request, up to chunksize */ 240 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip, 241 rctx->src_nents, chunksize); 242 if (unlikely(datalen < chunksize)) { 243 pr_err("%s(): failed to copy src sg to mbox msg", 244 __func__); 245 return -EFAULT; 246 } 247 248 if (pad_len) 249 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len); 250 251 stat_len = spu->spu_tx_status_len(); 252 if (stat_len) { 253 memset(rctx->msg_buf.tx_stat, 0, stat_len); 254 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len); 255 } 256 return 0; 257 } 258 259 static int mailbox_send_message(struct brcm_message *mssg, u32 flags, 260 u8 chan_idx) 261 { 262 int err; 263 int retry_cnt = 0; 264 struct device *dev = &(iproc_priv.pdev->dev); 265 266 err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg); 267 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) { 268 while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { 269 /* 270 * Mailbox queue is full. Since MAY_SLEEP is set, assume 271 * not in atomic context and we can wait and try again. 272 */ 273 retry_cnt++; 274 usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); 275 err = mbox_send_message(iproc_priv.mbox[chan_idx], 276 mssg); 277 atomic_inc(&iproc_priv.mb_no_spc); 278 } 279 } 280 if (err < 0) { 281 atomic_inc(&iproc_priv.mb_send_fail); 282 return err; 283 } 284 285 /* Check error returned by mailbox controller */ 286 err = mssg->error; 287 if (unlikely(err < 0)) { 288 dev_err(dev, "message error %d", err); 289 /* Signal txdone for mailbox channel */ 290 } 291 292 /* Signal txdone for mailbox channel */ 293 mbox_client_txdone(iproc_priv.mbox[chan_idx], err); 294 return err; 295 } 296 297 /** 298 * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in 299 * a single SPU request message, starting at the current position in the request 300 * data. 301 * @rctx: Crypto request context 302 * 303 * This may be called on the crypto API thread, or, when a request is so large 304 * it must be broken into multiple SPU messages, on the thread used to invoke 305 * the response callback. When requests are broken into multiple SPU 306 * messages, we assume subsequent messages depend on previous results, and 307 * thus always wait for previous results before submitting the next message. 308 * Because requests are submitted in lock step like this, there is no need 309 * to synchronize access to request data structures. 310 * 311 * Return: -EINPROGRESS: request has been accepted and result will be returned 312 * asynchronously 313 * Any other value indicates an error 314 */ 315 static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) 316 { 317 struct spu_hw *spu = &iproc_priv.spu; 318 struct crypto_async_request *areq = rctx->parent; 319 struct ablkcipher_request *req = 320 container_of(areq, struct ablkcipher_request, base); 321 struct iproc_ctx_s *ctx = rctx->ctx; 322 struct spu_cipher_parms cipher_parms; 323 int err = 0; 324 unsigned int chunksize = 0; /* Num bytes of request to submit */ 325 int remaining = 0; /* Bytes of request still to process */ 326 int chunk_start; /* Beginning of data for current SPU msg */ 327 328 /* IV or ctr value to use in this SPU msg */ 329 u8 local_iv_ctr[MAX_IV_SIZE]; 330 u32 stat_pad_len; /* num bytes to align status field */ 331 u32 pad_len; /* total length of all padding */ 332 bool update_key = false; 333 struct brcm_message *mssg; /* mailbox message */ 334 335 /* number of entries in src and dst sg in mailbox message. */ 336 u8 rx_frag_num = 2; /* response header and STATUS */ 337 u8 tx_frag_num = 1; /* request header */ 338 339 flow_log("%s\n", __func__); 340 341 cipher_parms.alg = ctx->cipher.alg; 342 cipher_parms.mode = ctx->cipher.mode; 343 cipher_parms.type = ctx->cipher_type; 344 cipher_parms.key_len = ctx->enckeylen; 345 cipher_parms.key_buf = ctx->enckey; 346 cipher_parms.iv_buf = local_iv_ctr; 347 cipher_parms.iv_len = rctx->iv_ctr_len; 348 349 mssg = &rctx->mb_mssg; 350 chunk_start = rctx->src_sent; 351 remaining = rctx->total_todo - chunk_start; 352 353 /* determine the chunk we are breaking off and update the indexes */ 354 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && 355 (remaining > ctx->max_payload)) 356 chunksize = ctx->max_payload; 357 else 358 chunksize = remaining; 359 360 rctx->src_sent += chunksize; 361 rctx->total_sent = rctx->src_sent; 362 363 /* Count number of sg entries to be included in this request */ 364 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize); 365 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize); 366 367 if ((ctx->cipher.mode == CIPHER_MODE_CBC) && 368 rctx->is_encrypt && chunk_start) 369 /* 370 * Encrypting non-first first chunk. Copy last block of 371 * previous result to IV for this chunk. 372 */ 373 sg_copy_part_to_buf(req->dst, rctx->msg_buf.iv_ctr, 374 rctx->iv_ctr_len, 375 chunk_start - rctx->iv_ctr_len); 376 377 if (rctx->iv_ctr_len) { 378 /* get our local copy of the iv */ 379 __builtin_memcpy(local_iv_ctr, rctx->msg_buf.iv_ctr, 380 rctx->iv_ctr_len); 381 382 /* generate the next IV if possible */ 383 if ((ctx->cipher.mode == CIPHER_MODE_CBC) && 384 !rctx->is_encrypt) { 385 /* 386 * CBC Decrypt: next IV is the last ciphertext block in 387 * this chunk 388 */ 389 sg_copy_part_to_buf(req->src, rctx->msg_buf.iv_ctr, 390 rctx->iv_ctr_len, 391 rctx->src_sent - rctx->iv_ctr_len); 392 } else if (ctx->cipher.mode == CIPHER_MODE_CTR) { 393 /* 394 * The SPU hardware increments the counter once for 395 * each AES block of 16 bytes. So update the counter 396 * for the next chunk, if there is one. Note that for 397 * this chunk, the counter has already been copied to 398 * local_iv_ctr. We can assume a block size of 16, 399 * because we only support CTR mode for AES, not for 400 * any other cipher alg. 401 */ 402 add_to_ctr(rctx->msg_buf.iv_ctr, chunksize >> 4); 403 } 404 } 405 406 if (ctx->cipher.alg == CIPHER_ALG_RC4) { 407 rx_frag_num++; 408 if (chunk_start) { 409 /* 410 * for non-first RC4 chunks, use SUPDT from previous 411 * response as key for this chunk. 412 */ 413 cipher_parms.key_buf = rctx->msg_buf.c.supdt_tweak; 414 update_key = true; 415 cipher_parms.type = CIPHER_TYPE_UPDT; 416 } else if (!rctx->is_encrypt) { 417 /* 418 * First RC4 chunk. For decrypt, key in pre-built msg 419 * header may have been changed if encrypt required 420 * multiple chunks. So revert the key to the 421 * ctx->enckey value. 422 */ 423 update_key = true; 424 cipher_parms.type = CIPHER_TYPE_INIT; 425 } 426 } 427 428 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) 429 flow_log("max_payload infinite\n"); 430 else 431 flow_log("max_payload %u\n", ctx->max_payload); 432 433 flow_log("sent:%u start:%u remains:%u size:%u\n", 434 rctx->src_sent, chunk_start, remaining, chunksize); 435 436 /* Copy SPU header template created at setkey time */ 437 memcpy(rctx->msg_buf.bcm_spu_req_hdr, ctx->bcm_spu_req_hdr, 438 sizeof(rctx->msg_buf.bcm_spu_req_hdr)); 439 440 /* 441 * Pass SUPDT field as key. Key field in finish() call is only used 442 * when update_key has been set above for RC4. Will be ignored in 443 * all other cases. 444 */ 445 spu->spu_cipher_req_finish(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, 446 ctx->spu_req_hdr_len, !(rctx->is_encrypt), 447 &cipher_parms, update_key, chunksize); 448 449 atomic64_add(chunksize, &iproc_priv.bytes_out); 450 451 stat_pad_len = spu->spu_wordalign_padlen(chunksize); 452 if (stat_pad_len) 453 rx_frag_num++; 454 pad_len = stat_pad_len; 455 if (pad_len) { 456 tx_frag_num++; 457 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 0, 458 0, ctx->auth.alg, ctx->auth.mode, 459 rctx->total_sent, stat_pad_len); 460 } 461 462 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, 463 ctx->spu_req_hdr_len); 464 packet_log("payload:\n"); 465 dump_sg(rctx->src_sg, rctx->src_skip, chunksize); 466 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len); 467 468 /* 469 * Build mailbox message containing SPU request msg and rx buffers 470 * to catch response message 471 */ 472 memset(mssg, 0, sizeof(*mssg)); 473 mssg->type = BRCM_MESSAGE_SPU; 474 mssg->ctx = rctx; /* Will be returned in response */ 475 476 /* Create rx scatterlist to catch result */ 477 rx_frag_num += rctx->dst_nents; 478 479 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 480 spu->spu_xts_tweak_in_payload()) 481 rx_frag_num++; /* extra sg to insert tweak */ 482 483 err = spu_ablkcipher_rx_sg_create(mssg, rctx, rx_frag_num, chunksize, 484 stat_pad_len); 485 if (err) 486 return err; 487 488 /* Create tx scatterlist containing SPU request message */ 489 tx_frag_num += rctx->src_nents; 490 if (spu->spu_tx_status_len()) 491 tx_frag_num++; 492 493 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 494 spu->spu_xts_tweak_in_payload()) 495 tx_frag_num++; /* extra sg to insert tweak */ 496 497 err = spu_ablkcipher_tx_sg_create(mssg, rctx, tx_frag_num, chunksize, 498 pad_len); 499 if (err) 500 return err; 501 502 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); 503 if (unlikely(err < 0)) 504 return err; 505 506 return -EINPROGRESS; 507 } 508 509 /** 510 * handle_ablkcipher_resp() - Process a block cipher SPU response. Updates the 511 * total received count for the request and updates global stats. 512 * @rctx: Crypto request context 513 */ 514 static void handle_ablkcipher_resp(struct iproc_reqctx_s *rctx) 515 { 516 struct spu_hw *spu = &iproc_priv.spu; 517 #ifdef DEBUG 518 struct crypto_async_request *areq = rctx->parent; 519 struct ablkcipher_request *req = ablkcipher_request_cast(areq); 520 #endif 521 struct iproc_ctx_s *ctx = rctx->ctx; 522 u32 payload_len; 523 524 /* See how much data was returned */ 525 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr); 526 527 /* 528 * In XTS mode, the first SPU_XTS_TWEAK_SIZE bytes may be the 529 * encrypted tweak ("i") value; we don't count those. 530 */ 531 if ((ctx->cipher.mode == CIPHER_MODE_XTS) && 532 spu->spu_xts_tweak_in_payload() && 533 (payload_len >= SPU_XTS_TWEAK_SIZE)) 534 payload_len -= SPU_XTS_TWEAK_SIZE; 535 536 atomic64_add(payload_len, &iproc_priv.bytes_in); 537 538 flow_log("%s() offset: %u, bd_len: %u BD:\n", 539 __func__, rctx->total_received, payload_len); 540 541 dump_sg(req->dst, rctx->total_received, payload_len); 542 if (ctx->cipher.alg == CIPHER_ALG_RC4) 543 packet_dump(" supdt ", rctx->msg_buf.c.supdt_tweak, 544 SPU_SUPDT_LEN); 545 546 rctx->total_received += payload_len; 547 if (rctx->total_received == rctx->total_todo) { 548 atomic_inc(&iproc_priv.op_counts[SPU_OP_CIPHER]); 549 atomic_inc( 550 &iproc_priv.cipher_cnt[ctx->cipher.alg][ctx->cipher.mode]); 551 } 552 } 553 554 /** 555 * spu_ahash_rx_sg_create() - Build up the scatterlist of buffers used to 556 * receive a SPU response message for an ahash request. 557 * @mssg: mailbox message containing the receive sg 558 * @rctx: crypto request context 559 * @rx_frag_num: number of scatterlist elements required to hold the 560 * SPU response message 561 * @digestsize: length of hash digest, in bytes 562 * @stat_pad_len: Number of bytes required to pad the STAT field to 563 * a 4-byte boundary 564 * 565 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 566 * when the request completes, whether the request is handled successfully or 567 * there is an error. 568 * 569 * Return: 570 * 0 if successful 571 * < 0 if an error 572 */ 573 static int 574 spu_ahash_rx_sg_create(struct brcm_message *mssg, 575 struct iproc_reqctx_s *rctx, 576 u8 rx_frag_num, unsigned int digestsize, 577 u32 stat_pad_len) 578 { 579 struct spu_hw *spu = &iproc_priv.spu; 580 struct scatterlist *sg; /* used to build sgs in mbox message */ 581 struct iproc_ctx_s *ctx = rctx->ctx; 582 583 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), 584 rctx->gfp); 585 if (!mssg->spu.dst) 586 return -ENOMEM; 587 588 sg = mssg->spu.dst; 589 sg_init_table(sg, rx_frag_num); 590 /* Space for SPU message header */ 591 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len); 592 593 /* Space for digest */ 594 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize); 595 596 if (stat_pad_len) 597 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); 598 599 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); 600 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len()); 601 return 0; 602 } 603 604 /** 605 * spu_ahash_tx_sg_create() - Build up the scatterlist of buffers used to send 606 * a SPU request message for an ahash request. Includes SPU message headers and 607 * the request data. 608 * @mssg: mailbox message containing the transmit sg 609 * @rctx: crypto request context 610 * @tx_frag_num: number of scatterlist elements required to construct the 611 * SPU request message 612 * @spu_hdr_len: length in bytes of SPU message header 613 * @hash_carry_len: Number of bytes of data carried over from previous req 614 * @new_data_len: Number of bytes of new request data 615 * @pad_len: Number of pad bytes 616 * 617 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 618 * when the request completes, whether the request is handled successfully or 619 * there is an error. 620 * 621 * Return: 622 * 0 if successful 623 * < 0 if an error 624 */ 625 static int 626 spu_ahash_tx_sg_create(struct brcm_message *mssg, 627 struct iproc_reqctx_s *rctx, 628 u8 tx_frag_num, 629 u32 spu_hdr_len, 630 unsigned int hash_carry_len, 631 unsigned int new_data_len, u32 pad_len) 632 { 633 struct spu_hw *spu = &iproc_priv.spu; 634 struct scatterlist *sg; /* used to build sgs in mbox message */ 635 u32 datalen; /* Number of bytes of response data expected */ 636 u32 stat_len; 637 638 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), 639 rctx->gfp); 640 if (!mssg->spu.src) 641 return -ENOMEM; 642 643 sg = mssg->spu.src; 644 sg_init_table(sg, tx_frag_num); 645 646 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr, 647 BCM_HDR_LEN + spu_hdr_len); 648 649 if (hash_carry_len) 650 sg_set_buf(sg++, rctx->hash_carry, hash_carry_len); 651 652 if (new_data_len) { 653 /* Copy in each src sg entry from request, up to chunksize */ 654 datalen = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip, 655 rctx->src_nents, new_data_len); 656 if (datalen < new_data_len) { 657 pr_err("%s(): failed to copy src sg to mbox msg", 658 __func__); 659 return -EFAULT; 660 } 661 } 662 663 if (pad_len) 664 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len); 665 666 stat_len = spu->spu_tx_status_len(); 667 if (stat_len) { 668 memset(rctx->msg_buf.tx_stat, 0, stat_len); 669 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len); 670 } 671 672 return 0; 673 } 674 675 /** 676 * handle_ahash_req() - Process an asynchronous hash request from the crypto 677 * API. 678 * @rctx: Crypto request context 679 * 680 * Builds a SPU request message embedded in a mailbox message and submits the 681 * mailbox message on a selected mailbox channel. The SPU request message is 682 * constructed as a scatterlist, including entries from the crypto API's 683 * src scatterlist to avoid copying the data to be hashed. This function is 684 * called either on the thread from the crypto API, or, in the case that the 685 * crypto API request is too large to fit in a single SPU request message, 686 * on the thread that invokes the receive callback with a response message. 687 * Because some operations require the response from one chunk before the next 688 * chunk can be submitted, we always wait for the response for the previous 689 * chunk before submitting the next chunk. Because requests are submitted in 690 * lock step like this, there is no need to synchronize access to request data 691 * structures. 692 * 693 * Return: 694 * -EINPROGRESS: request has been submitted to SPU and response will be 695 * returned asynchronously 696 * -EAGAIN: non-final request included a small amount of data, which for 697 * efficiency we did not submit to the SPU, but instead stored 698 * to be submitted to the SPU with the next part of the request 699 * other: an error code 700 */ 701 static int handle_ahash_req(struct iproc_reqctx_s *rctx) 702 { 703 struct spu_hw *spu = &iproc_priv.spu; 704 struct crypto_async_request *areq = rctx->parent; 705 struct ahash_request *req = ahash_request_cast(areq); 706 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 707 struct crypto_tfm *tfm = crypto_ahash_tfm(ahash); 708 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm); 709 struct iproc_ctx_s *ctx = rctx->ctx; 710 711 /* number of bytes still to be hashed in this req */ 712 unsigned int nbytes_to_hash = 0; 713 int err = 0; 714 unsigned int chunksize = 0; /* length of hash carry + new data */ 715 /* 716 * length of new data, not from hash carry, to be submitted in 717 * this hw request 718 */ 719 unsigned int new_data_len; 720 721 unsigned int chunk_start = 0; 722 u32 db_size; /* Length of data field, incl gcm and hash padding */ 723 int pad_len = 0; /* total pad len, including gcm, hash, stat padding */ 724 u32 data_pad_len = 0; /* length of GCM/CCM padding */ 725 u32 stat_pad_len = 0; /* length of padding to align STATUS word */ 726 struct brcm_message *mssg; /* mailbox message */ 727 struct spu_request_opts req_opts; 728 struct spu_cipher_parms cipher_parms; 729 struct spu_hash_parms hash_parms; 730 struct spu_aead_parms aead_parms; 731 unsigned int local_nbuf; 732 u32 spu_hdr_len; 733 unsigned int digestsize; 734 u16 rem = 0; 735 736 /* 737 * number of entries in src and dst sg. Always includes SPU msg header. 738 * rx always includes a buffer to catch digest and STATUS. 739 */ 740 u8 rx_frag_num = 3; 741 u8 tx_frag_num = 1; 742 743 flow_log("total_todo %u, total_sent %u\n", 744 rctx->total_todo, rctx->total_sent); 745 746 memset(&req_opts, 0, sizeof(req_opts)); 747 memset(&cipher_parms, 0, sizeof(cipher_parms)); 748 memset(&hash_parms, 0, sizeof(hash_parms)); 749 memset(&aead_parms, 0, sizeof(aead_parms)); 750 751 req_opts.bd_suppress = true; 752 hash_parms.alg = ctx->auth.alg; 753 hash_parms.mode = ctx->auth.mode; 754 hash_parms.type = HASH_TYPE_NONE; 755 hash_parms.key_buf = (u8 *)ctx->authkey; 756 hash_parms.key_len = ctx->authkeylen; 757 758 /* 759 * For hash algorithms below assignment looks bit odd but 760 * it's needed for AES-XCBC and AES-CMAC hash algorithms 761 * to differentiate between 128, 192, 256 bit key values. 762 * Based on the key values, hash algorithm is selected. 763 * For example for 128 bit key, hash algorithm is AES-128. 764 */ 765 cipher_parms.type = ctx->cipher_type; 766 767 mssg = &rctx->mb_mssg; 768 chunk_start = rctx->src_sent; 769 770 /* 771 * Compute the amount remaining to hash. This may include data 772 * carried over from previous requests. 773 */ 774 nbytes_to_hash = rctx->total_todo - rctx->total_sent; 775 chunksize = nbytes_to_hash; 776 if ((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && 777 (chunksize > ctx->max_payload)) 778 chunksize = ctx->max_payload; 779 780 /* 781 * If this is not a final request and the request data is not a multiple 782 * of a full block, then simply park the extra data and prefix it to the 783 * data for the next request. 784 */ 785 if (!rctx->is_final) { 786 u8 *dest = rctx->hash_carry + rctx->hash_carry_len; 787 u16 new_len; /* len of data to add to hash carry */ 788 789 rem = chunksize % blocksize; /* remainder */ 790 if (rem) { 791 /* chunksize not a multiple of blocksize */ 792 chunksize -= rem; 793 if (chunksize == 0) { 794 /* Don't have a full block to submit to hw */ 795 new_len = rem - rctx->hash_carry_len; 796 sg_copy_part_to_buf(req->src, dest, new_len, 797 rctx->src_sent); 798 rctx->hash_carry_len = rem; 799 flow_log("Exiting with hash carry len: %u\n", 800 rctx->hash_carry_len); 801 packet_dump(" buf: ", 802 rctx->hash_carry, 803 rctx->hash_carry_len); 804 return -EAGAIN; 805 } 806 } 807 } 808 809 /* if we have hash carry, then prefix it to the data in this request */ 810 local_nbuf = rctx->hash_carry_len; 811 rctx->hash_carry_len = 0; 812 if (local_nbuf) 813 tx_frag_num++; 814 new_data_len = chunksize - local_nbuf; 815 816 /* Count number of sg entries to be used in this request */ 817 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, 818 new_data_len); 819 820 /* AES hashing keeps key size in type field, so need to copy it here */ 821 if (hash_parms.alg == HASH_ALG_AES) 822 hash_parms.type = cipher_parms.type; 823 else 824 hash_parms.type = spu->spu_hash_type(rctx->total_sent); 825 826 digestsize = spu->spu_digest_size(ctx->digestsize, ctx->auth.alg, 827 hash_parms.type); 828 hash_parms.digestsize = digestsize; 829 830 /* update the indexes */ 831 rctx->total_sent += chunksize; 832 /* if you sent a prebuf then that wasn't from this req->src */ 833 rctx->src_sent += new_data_len; 834 835 if ((rctx->total_sent == rctx->total_todo) && rctx->is_final) 836 hash_parms.pad_len = spu->spu_hash_pad_len(hash_parms.alg, 837 hash_parms.mode, 838 chunksize, 839 blocksize); 840 841 /* 842 * If a non-first chunk, then include the digest returned from the 843 * previous chunk so that hw can add to it (except for AES types). 844 */ 845 if ((hash_parms.type == HASH_TYPE_UPDT) && 846 (hash_parms.alg != HASH_ALG_AES)) { 847 hash_parms.key_buf = rctx->incr_hash; 848 hash_parms.key_len = digestsize; 849 } 850 851 atomic64_add(chunksize, &iproc_priv.bytes_out); 852 853 flow_log("%s() final: %u nbuf: %u ", 854 __func__, rctx->is_final, local_nbuf); 855 856 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) 857 flow_log("max_payload infinite\n"); 858 else 859 flow_log("max_payload %u\n", ctx->max_payload); 860 861 flow_log("chunk_start: %u chunk_size: %u\n", chunk_start, chunksize); 862 863 /* Prepend SPU header with type 3 BCM header */ 864 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); 865 866 hash_parms.prebuf_len = local_nbuf; 867 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr + 868 BCM_HDR_LEN, 869 &req_opts, &cipher_parms, 870 &hash_parms, &aead_parms, 871 new_data_len); 872 873 if (spu_hdr_len == 0) { 874 pr_err("Failed to create SPU request header\n"); 875 return -EFAULT; 876 } 877 878 /* 879 * Determine total length of padding required. Put all padding in one 880 * buffer. 881 */ 882 data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, chunksize); 883 db_size = spu_real_db_size(0, 0, local_nbuf, new_data_len, 884 0, 0, hash_parms.pad_len); 885 if (spu->spu_tx_status_len()) 886 stat_pad_len = spu->spu_wordalign_padlen(db_size); 887 if (stat_pad_len) 888 rx_frag_num++; 889 pad_len = hash_parms.pad_len + data_pad_len + stat_pad_len; 890 if (pad_len) { 891 tx_frag_num++; 892 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, data_pad_len, 893 hash_parms.pad_len, ctx->auth.alg, 894 ctx->auth.mode, rctx->total_sent, 895 stat_pad_len); 896 } 897 898 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, 899 spu_hdr_len); 900 packet_dump(" prebuf: ", rctx->hash_carry, local_nbuf); 901 flow_log("Data:\n"); 902 dump_sg(rctx->src_sg, rctx->src_skip, new_data_len); 903 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len); 904 905 /* 906 * Build mailbox message containing SPU request msg and rx buffers 907 * to catch response message 908 */ 909 memset(mssg, 0, sizeof(*mssg)); 910 mssg->type = BRCM_MESSAGE_SPU; 911 mssg->ctx = rctx; /* Will be returned in response */ 912 913 /* Create rx scatterlist to catch result */ 914 err = spu_ahash_rx_sg_create(mssg, rctx, rx_frag_num, digestsize, 915 stat_pad_len); 916 if (err) 917 return err; 918 919 /* Create tx scatterlist containing SPU request message */ 920 tx_frag_num += rctx->src_nents; 921 if (spu->spu_tx_status_len()) 922 tx_frag_num++; 923 err = spu_ahash_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len, 924 local_nbuf, new_data_len, pad_len); 925 if (err) 926 return err; 927 928 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); 929 if (unlikely(err < 0)) 930 return err; 931 932 return -EINPROGRESS; 933 } 934 935 /** 936 * spu_hmac_outer_hash() - Request synchonous software compute of the outer hash 937 * for an HMAC request. 938 * @req: The HMAC request from the crypto API 939 * @ctx: The session context 940 * 941 * Return: 0 if synchronous hash operation successful 942 * -EINVAL if the hash algo is unrecognized 943 * any other value indicates an error 944 */ 945 static int spu_hmac_outer_hash(struct ahash_request *req, 946 struct iproc_ctx_s *ctx) 947 { 948 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 949 unsigned int blocksize = 950 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); 951 int rc; 952 953 switch (ctx->auth.alg) { 954 case HASH_ALG_MD5: 955 rc = do_shash("md5", req->result, ctx->opad, blocksize, 956 req->result, ctx->digestsize, NULL, 0); 957 break; 958 case HASH_ALG_SHA1: 959 rc = do_shash("sha1", req->result, ctx->opad, blocksize, 960 req->result, ctx->digestsize, NULL, 0); 961 break; 962 case HASH_ALG_SHA224: 963 rc = do_shash("sha224", req->result, ctx->opad, blocksize, 964 req->result, ctx->digestsize, NULL, 0); 965 break; 966 case HASH_ALG_SHA256: 967 rc = do_shash("sha256", req->result, ctx->opad, blocksize, 968 req->result, ctx->digestsize, NULL, 0); 969 break; 970 case HASH_ALG_SHA384: 971 rc = do_shash("sha384", req->result, ctx->opad, blocksize, 972 req->result, ctx->digestsize, NULL, 0); 973 break; 974 case HASH_ALG_SHA512: 975 rc = do_shash("sha512", req->result, ctx->opad, blocksize, 976 req->result, ctx->digestsize, NULL, 0); 977 break; 978 default: 979 pr_err("%s() Error : unknown hmac type\n", __func__); 980 rc = -EINVAL; 981 } 982 return rc; 983 } 984 985 /** 986 * ahash_req_done() - Process a hash result from the SPU hardware. 987 * @rctx: Crypto request context 988 * 989 * Return: 0 if successful 990 * < 0 if an error 991 */ 992 static int ahash_req_done(struct iproc_reqctx_s *rctx) 993 { 994 struct spu_hw *spu = &iproc_priv.spu; 995 struct crypto_async_request *areq = rctx->parent; 996 struct ahash_request *req = ahash_request_cast(areq); 997 struct iproc_ctx_s *ctx = rctx->ctx; 998 int err; 999 1000 memcpy(req->result, rctx->msg_buf.digest, ctx->digestsize); 1001 1002 if (spu->spu_type == SPU_TYPE_SPUM) { 1003 /* byte swap the output from the UPDT function to network byte 1004 * order 1005 */ 1006 if (ctx->auth.alg == HASH_ALG_MD5) { 1007 __swab32s((u32 *)req->result); 1008 __swab32s(((u32 *)req->result) + 1); 1009 __swab32s(((u32 *)req->result) + 2); 1010 __swab32s(((u32 *)req->result) + 3); 1011 __swab32s(((u32 *)req->result) + 4); 1012 } 1013 } 1014 1015 flow_dump(" digest ", req->result, ctx->digestsize); 1016 1017 /* if this an HMAC then do the outer hash */ 1018 if (rctx->is_sw_hmac) { 1019 err = spu_hmac_outer_hash(req, ctx); 1020 if (err < 0) 1021 return err; 1022 flow_dump(" hmac: ", req->result, ctx->digestsize); 1023 } 1024 1025 if (rctx->is_sw_hmac || ctx->auth.mode == HASH_MODE_HMAC) { 1026 atomic_inc(&iproc_priv.op_counts[SPU_OP_HMAC]); 1027 atomic_inc(&iproc_priv.hmac_cnt[ctx->auth.alg]); 1028 } else { 1029 atomic_inc(&iproc_priv.op_counts[SPU_OP_HASH]); 1030 atomic_inc(&iproc_priv.hash_cnt[ctx->auth.alg]); 1031 } 1032 1033 return 0; 1034 } 1035 1036 /** 1037 * handle_ahash_resp() - Process a SPU response message for a hash request. 1038 * Checks if the entire crypto API request has been processed, and if so, 1039 * invokes post processing on the result. 1040 * @rctx: Crypto request context 1041 */ 1042 static void handle_ahash_resp(struct iproc_reqctx_s *rctx) 1043 { 1044 struct iproc_ctx_s *ctx = rctx->ctx; 1045 #ifdef DEBUG 1046 struct crypto_async_request *areq = rctx->parent; 1047 struct ahash_request *req = ahash_request_cast(areq); 1048 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); 1049 unsigned int blocksize = 1050 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); 1051 #endif 1052 /* 1053 * Save hash to use as input to next op if incremental. Might be copying 1054 * too much, but that's easier than figuring out actual digest size here 1055 */ 1056 memcpy(rctx->incr_hash, rctx->msg_buf.digest, MAX_DIGEST_SIZE); 1057 1058 flow_log("%s() blocksize:%u digestsize:%u\n", 1059 __func__, blocksize, ctx->digestsize); 1060 1061 atomic64_add(ctx->digestsize, &iproc_priv.bytes_in); 1062 1063 if (rctx->is_final && (rctx->total_sent == rctx->total_todo)) 1064 ahash_req_done(rctx); 1065 } 1066 1067 /** 1068 * spu_aead_rx_sg_create() - Build up the scatterlist of buffers used to receive 1069 * a SPU response message for an AEAD request. Includes buffers to catch SPU 1070 * message headers and the response data. 1071 * @mssg: mailbox message containing the receive sg 1072 * @rctx: crypto request context 1073 * @rx_frag_num: number of scatterlist elements required to hold the 1074 * SPU response message 1075 * @assoc_len: Length of associated data included in the crypto request 1076 * @ret_iv_len: Length of IV returned in response 1077 * @resp_len: Number of bytes of response data expected to be written to 1078 * dst buffer from crypto API 1079 * @digestsize: Length of hash digest, in bytes 1080 * @stat_pad_len: Number of bytes required to pad the STAT field to 1081 * a 4-byte boundary 1082 * 1083 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 1084 * when the request completes, whether the request is handled successfully or 1085 * there is an error. 1086 * 1087 * Returns: 1088 * 0 if successful 1089 * < 0 if an error 1090 */ 1091 static int spu_aead_rx_sg_create(struct brcm_message *mssg, 1092 struct aead_request *req, 1093 struct iproc_reqctx_s *rctx, 1094 u8 rx_frag_num, 1095 unsigned int assoc_len, 1096 u32 ret_iv_len, unsigned int resp_len, 1097 unsigned int digestsize, u32 stat_pad_len) 1098 { 1099 struct spu_hw *spu = &iproc_priv.spu; 1100 struct scatterlist *sg; /* used to build sgs in mbox message */ 1101 struct iproc_ctx_s *ctx = rctx->ctx; 1102 u32 datalen; /* Number of bytes of response data expected */ 1103 u32 assoc_buf_len; 1104 u8 data_padlen = 0; 1105 1106 if (ctx->is_rfc4543) { 1107 /* RFC4543: only pad after data, not after AAD */ 1108 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1109 assoc_len + resp_len); 1110 assoc_buf_len = assoc_len; 1111 } else { 1112 data_padlen = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1113 resp_len); 1114 assoc_buf_len = spu->spu_assoc_resp_len(ctx->cipher.mode, 1115 assoc_len, ret_iv_len, 1116 rctx->is_encrypt); 1117 } 1118 1119 if (ctx->cipher.mode == CIPHER_MODE_CCM) 1120 /* ICV (after data) must be in the next 32-bit word for CCM */ 1121 data_padlen += spu->spu_wordalign_padlen(assoc_buf_len + 1122 resp_len + 1123 data_padlen); 1124 1125 if (data_padlen) 1126 /* have to catch gcm pad in separate buffer */ 1127 rx_frag_num++; 1128 1129 mssg->spu.dst = kcalloc(rx_frag_num, sizeof(struct scatterlist), 1130 rctx->gfp); 1131 if (!mssg->spu.dst) 1132 return -ENOMEM; 1133 1134 sg = mssg->spu.dst; 1135 sg_init_table(sg, rx_frag_num); 1136 1137 /* Space for SPU message header */ 1138 sg_set_buf(sg++, rctx->msg_buf.spu_resp_hdr, ctx->spu_resp_hdr_len); 1139 1140 if (assoc_buf_len) { 1141 /* 1142 * Don't write directly to req->dst, because SPU may pad the 1143 * assoc data in the response 1144 */ 1145 memset(rctx->msg_buf.a.resp_aad, 0, assoc_buf_len); 1146 sg_set_buf(sg++, rctx->msg_buf.a.resp_aad, assoc_buf_len); 1147 } 1148 1149 if (resp_len) { 1150 /* 1151 * Copy in each dst sg entry from request, up to chunksize. 1152 * dst sg catches just the data. digest caught in separate buf. 1153 */ 1154 datalen = spu_msg_sg_add(&sg, &rctx->dst_sg, &rctx->dst_skip, 1155 rctx->dst_nents, resp_len); 1156 if (datalen < (resp_len)) { 1157 pr_err("%s(): failed to copy dst sg to mbox msg. expected len %u, datalen %u", 1158 __func__, resp_len, datalen); 1159 return -EFAULT; 1160 } 1161 } 1162 1163 /* If GCM/CCM data is padded, catch padding in separate buffer */ 1164 if (data_padlen) { 1165 memset(rctx->msg_buf.a.gcmpad, 0, data_padlen); 1166 sg_set_buf(sg++, rctx->msg_buf.a.gcmpad, data_padlen); 1167 } 1168 1169 /* Always catch ICV in separate buffer */ 1170 sg_set_buf(sg++, rctx->msg_buf.digest, digestsize); 1171 1172 flow_log("stat_pad_len %u\n", stat_pad_len); 1173 if (stat_pad_len) { 1174 memset(rctx->msg_buf.rx_stat_pad, 0, stat_pad_len); 1175 sg_set_buf(sg++, rctx->msg_buf.rx_stat_pad, stat_pad_len); 1176 } 1177 1178 memset(rctx->msg_buf.rx_stat, 0, SPU_RX_STATUS_LEN); 1179 sg_set_buf(sg, rctx->msg_buf.rx_stat, spu->spu_rx_status_len()); 1180 1181 return 0; 1182 } 1183 1184 /** 1185 * spu_aead_tx_sg_create() - Build up the scatterlist of buffers used to send a 1186 * SPU request message for an AEAD request. Includes SPU message headers and the 1187 * request data. 1188 * @mssg: mailbox message containing the transmit sg 1189 * @rctx: crypto request context 1190 * @tx_frag_num: number of scatterlist elements required to construct the 1191 * SPU request message 1192 * @spu_hdr_len: length of SPU message header in bytes 1193 * @assoc: crypto API associated data scatterlist 1194 * @assoc_len: length of associated data 1195 * @assoc_nents: number of scatterlist entries containing assoc data 1196 * @aead_iv_len: length of AEAD IV, if included 1197 * @chunksize: Number of bytes of request data 1198 * @aad_pad_len: Number of bytes of padding at end of AAD. For GCM/CCM. 1199 * @pad_len: Number of pad bytes 1200 * @incl_icv: If true, write separate ICV buffer after data and 1201 * any padding 1202 * 1203 * The scatterlist that gets allocated here is freed in spu_chunk_cleanup() 1204 * when the request completes, whether the request is handled successfully or 1205 * there is an error. 1206 * 1207 * Return: 1208 * 0 if successful 1209 * < 0 if an error 1210 */ 1211 static int spu_aead_tx_sg_create(struct brcm_message *mssg, 1212 struct iproc_reqctx_s *rctx, 1213 u8 tx_frag_num, 1214 u32 spu_hdr_len, 1215 struct scatterlist *assoc, 1216 unsigned int assoc_len, 1217 int assoc_nents, 1218 unsigned int aead_iv_len, 1219 unsigned int chunksize, 1220 u32 aad_pad_len, u32 pad_len, bool incl_icv) 1221 { 1222 struct spu_hw *spu = &iproc_priv.spu; 1223 struct scatterlist *sg; /* used to build sgs in mbox message */ 1224 struct scatterlist *assoc_sg = assoc; 1225 struct iproc_ctx_s *ctx = rctx->ctx; 1226 u32 datalen; /* Number of bytes of data to write */ 1227 u32 written; /* Number of bytes of data written */ 1228 u32 assoc_offset = 0; 1229 u32 stat_len; 1230 1231 mssg->spu.src = kcalloc(tx_frag_num, sizeof(struct scatterlist), 1232 rctx->gfp); 1233 if (!mssg->spu.src) 1234 return -ENOMEM; 1235 1236 sg = mssg->spu.src; 1237 sg_init_table(sg, tx_frag_num); 1238 1239 sg_set_buf(sg++, rctx->msg_buf.bcm_spu_req_hdr, 1240 BCM_HDR_LEN + spu_hdr_len); 1241 1242 if (assoc_len) { 1243 /* Copy in each associated data sg entry from request */ 1244 written = spu_msg_sg_add(&sg, &assoc_sg, &assoc_offset, 1245 assoc_nents, assoc_len); 1246 if (written < assoc_len) { 1247 pr_err("%s(): failed to copy assoc sg to mbox msg", 1248 __func__); 1249 return -EFAULT; 1250 } 1251 } 1252 1253 if (aead_iv_len) 1254 sg_set_buf(sg++, rctx->msg_buf.iv_ctr, aead_iv_len); 1255 1256 if (aad_pad_len) { 1257 memset(rctx->msg_buf.a.req_aad_pad, 0, aad_pad_len); 1258 sg_set_buf(sg++, rctx->msg_buf.a.req_aad_pad, aad_pad_len); 1259 } 1260 1261 datalen = chunksize; 1262 if ((chunksize > ctx->digestsize) && incl_icv) 1263 datalen -= ctx->digestsize; 1264 if (datalen) { 1265 /* For aead, a single msg should consume the entire src sg */ 1266 written = spu_msg_sg_add(&sg, &rctx->src_sg, &rctx->src_skip, 1267 rctx->src_nents, datalen); 1268 if (written < datalen) { 1269 pr_err("%s(): failed to copy src sg to mbox msg", 1270 __func__); 1271 return -EFAULT; 1272 } 1273 } 1274 1275 if (pad_len) { 1276 memset(rctx->msg_buf.spu_req_pad, 0, pad_len); 1277 sg_set_buf(sg++, rctx->msg_buf.spu_req_pad, pad_len); 1278 } 1279 1280 if (incl_icv) 1281 sg_set_buf(sg++, rctx->msg_buf.digest, ctx->digestsize); 1282 1283 stat_len = spu->spu_tx_status_len(); 1284 if (stat_len) { 1285 memset(rctx->msg_buf.tx_stat, 0, stat_len); 1286 sg_set_buf(sg, rctx->msg_buf.tx_stat, stat_len); 1287 } 1288 return 0; 1289 } 1290 1291 /** 1292 * handle_aead_req() - Submit a SPU request message for the next chunk of the 1293 * current AEAD request. 1294 * @rctx: Crypto request context 1295 * 1296 * Unlike other operation types, we assume the length of the request fits in 1297 * a single SPU request message. aead_enqueue() makes sure this is true. 1298 * Comments for other op types regarding threads applies here as well. 1299 * 1300 * Unlike incremental hash ops, where the spu returns the entire hash for 1301 * truncated algs like sha-224, the SPU returns just the truncated hash in 1302 * response to aead requests. So digestsize is always ctx->digestsize here. 1303 * 1304 * Return: -EINPROGRESS: crypto request has been accepted and result will be 1305 * returned asynchronously 1306 * Any other value indicates an error 1307 */ 1308 static int handle_aead_req(struct iproc_reqctx_s *rctx) 1309 { 1310 struct spu_hw *spu = &iproc_priv.spu; 1311 struct crypto_async_request *areq = rctx->parent; 1312 struct aead_request *req = container_of(areq, 1313 struct aead_request, base); 1314 struct iproc_ctx_s *ctx = rctx->ctx; 1315 int err; 1316 unsigned int chunksize; 1317 unsigned int resp_len; 1318 u32 spu_hdr_len; 1319 u32 db_size; 1320 u32 stat_pad_len; 1321 u32 pad_len; 1322 struct brcm_message *mssg; /* mailbox message */ 1323 struct spu_request_opts req_opts; 1324 struct spu_cipher_parms cipher_parms; 1325 struct spu_hash_parms hash_parms; 1326 struct spu_aead_parms aead_parms; 1327 int assoc_nents = 0; 1328 bool incl_icv = false; 1329 unsigned int digestsize = ctx->digestsize; 1330 1331 /* number of entries in src and dst sg. Always includes SPU msg header. 1332 */ 1333 u8 rx_frag_num = 2; /* and STATUS */ 1334 u8 tx_frag_num = 1; 1335 1336 /* doing the whole thing at once */ 1337 chunksize = rctx->total_todo; 1338 1339 flow_log("%s: chunksize %u\n", __func__, chunksize); 1340 1341 memset(&req_opts, 0, sizeof(req_opts)); 1342 memset(&hash_parms, 0, sizeof(hash_parms)); 1343 memset(&aead_parms, 0, sizeof(aead_parms)); 1344 1345 req_opts.is_inbound = !(rctx->is_encrypt); 1346 req_opts.auth_first = ctx->auth_first; 1347 req_opts.is_aead = true; 1348 req_opts.is_esp = ctx->is_esp; 1349 1350 cipher_parms.alg = ctx->cipher.alg; 1351 cipher_parms.mode = ctx->cipher.mode; 1352 cipher_parms.type = ctx->cipher_type; 1353 cipher_parms.key_buf = ctx->enckey; 1354 cipher_parms.key_len = ctx->enckeylen; 1355 cipher_parms.iv_buf = rctx->msg_buf.iv_ctr; 1356 cipher_parms.iv_len = rctx->iv_ctr_len; 1357 1358 hash_parms.alg = ctx->auth.alg; 1359 hash_parms.mode = ctx->auth.mode; 1360 hash_parms.type = HASH_TYPE_NONE; 1361 hash_parms.key_buf = (u8 *)ctx->authkey; 1362 hash_parms.key_len = ctx->authkeylen; 1363 hash_parms.digestsize = digestsize; 1364 1365 if ((ctx->auth.alg == HASH_ALG_SHA224) && 1366 (ctx->authkeylen < SHA224_DIGEST_SIZE)) 1367 hash_parms.key_len = SHA224_DIGEST_SIZE; 1368 1369 aead_parms.assoc_size = req->assoclen; 1370 if (ctx->is_esp && !ctx->is_rfc4543) { 1371 /* 1372 * 8-byte IV is included assoc data in request. SPU2 1373 * expects AAD to include just SPI and seqno. So 1374 * subtract off the IV len. 1375 */ 1376 aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE; 1377 1378 if (rctx->is_encrypt) { 1379 aead_parms.return_iv = true; 1380 aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE; 1381 aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE; 1382 } 1383 } else { 1384 aead_parms.ret_iv_len = 0; 1385 } 1386 1387 /* 1388 * Count number of sg entries from the crypto API request that are to 1389 * be included in this mailbox message. For dst sg, don't count space 1390 * for digest. Digest gets caught in a separate buffer and copied back 1391 * to dst sg when processing response. 1392 */ 1393 rctx->src_nents = spu_sg_count(rctx->src_sg, rctx->src_skip, chunksize); 1394 rctx->dst_nents = spu_sg_count(rctx->dst_sg, rctx->dst_skip, chunksize); 1395 if (aead_parms.assoc_size) 1396 assoc_nents = spu_sg_count(rctx->assoc, 0, 1397 aead_parms.assoc_size); 1398 1399 mssg = &rctx->mb_mssg; 1400 1401 rctx->total_sent = chunksize; 1402 rctx->src_sent = chunksize; 1403 if (spu->spu_assoc_resp_len(ctx->cipher.mode, 1404 aead_parms.assoc_size, 1405 aead_parms.ret_iv_len, 1406 rctx->is_encrypt)) 1407 rx_frag_num++; 1408 1409 aead_parms.iv_len = spu->spu_aead_ivlen(ctx->cipher.mode, 1410 rctx->iv_ctr_len); 1411 1412 if (ctx->auth.alg == HASH_ALG_AES) 1413 hash_parms.type = ctx->cipher_type; 1414 1415 /* General case AAD padding (CCM and RFC4543 special cases below) */ 1416 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1417 aead_parms.assoc_size); 1418 1419 /* General case data padding (CCM decrypt special case below) */ 1420 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1421 chunksize); 1422 1423 if (ctx->cipher.mode == CIPHER_MODE_CCM) { 1424 /* 1425 * for CCM, AAD len + 2 (rather than AAD len) needs to be 1426 * 128-bit aligned 1427 */ 1428 aead_parms.aad_pad_len = spu->spu_gcm_ccm_pad_len( 1429 ctx->cipher.mode, 1430 aead_parms.assoc_size + 2); 1431 1432 /* 1433 * And when decrypting CCM, need to pad without including 1434 * size of ICV which is tacked on to end of chunk 1435 */ 1436 if (!rctx->is_encrypt) 1437 aead_parms.data_pad_len = 1438 spu->spu_gcm_ccm_pad_len(ctx->cipher.mode, 1439 chunksize - digestsize); 1440 1441 /* CCM also requires software to rewrite portions of IV: */ 1442 spu->spu_ccm_update_iv(digestsize, &cipher_parms, req->assoclen, 1443 chunksize, rctx->is_encrypt, 1444 ctx->is_esp); 1445 } 1446 1447 if (ctx->is_rfc4543) { 1448 /* 1449 * RFC4543: data is included in AAD, so don't pad after AAD 1450 * and pad data based on both AAD + data size 1451 */ 1452 aead_parms.aad_pad_len = 0; 1453 if (!rctx->is_encrypt) 1454 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len( 1455 ctx->cipher.mode, 1456 aead_parms.assoc_size + chunksize - 1457 digestsize); 1458 else 1459 aead_parms.data_pad_len = spu->spu_gcm_ccm_pad_len( 1460 ctx->cipher.mode, 1461 aead_parms.assoc_size + chunksize); 1462 1463 req_opts.is_rfc4543 = true; 1464 } 1465 1466 if (spu_req_incl_icv(ctx->cipher.mode, rctx->is_encrypt)) { 1467 incl_icv = true; 1468 tx_frag_num++; 1469 /* Copy ICV from end of src scatterlist to digest buf */ 1470 sg_copy_part_to_buf(req->src, rctx->msg_buf.digest, digestsize, 1471 req->assoclen + rctx->total_sent - 1472 digestsize); 1473 } 1474 1475 atomic64_add(chunksize, &iproc_priv.bytes_out); 1476 1477 flow_log("%s()-sent chunksize:%u\n", __func__, chunksize); 1478 1479 /* Prepend SPU header with type 3 BCM header */ 1480 memcpy(rctx->msg_buf.bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); 1481 1482 spu_hdr_len = spu->spu_create_request(rctx->msg_buf.bcm_spu_req_hdr + 1483 BCM_HDR_LEN, &req_opts, 1484 &cipher_parms, &hash_parms, 1485 &aead_parms, chunksize); 1486 1487 /* Determine total length of padding. Put all padding in one buffer. */ 1488 db_size = spu_real_db_size(aead_parms.assoc_size, aead_parms.iv_len, 0, 1489 chunksize, aead_parms.aad_pad_len, 1490 aead_parms.data_pad_len, 0); 1491 1492 stat_pad_len = spu->spu_wordalign_padlen(db_size); 1493 1494 if (stat_pad_len) 1495 rx_frag_num++; 1496 pad_len = aead_parms.data_pad_len + stat_pad_len; 1497 if (pad_len) { 1498 tx_frag_num++; 1499 spu->spu_request_pad(rctx->msg_buf.spu_req_pad, 1500 aead_parms.data_pad_len, 0, 1501 ctx->auth.alg, ctx->auth.mode, 1502 rctx->total_sent, stat_pad_len); 1503 } 1504 1505 spu->spu_dump_msg_hdr(rctx->msg_buf.bcm_spu_req_hdr + BCM_HDR_LEN, 1506 spu_hdr_len); 1507 dump_sg(rctx->assoc, 0, aead_parms.assoc_size); 1508 packet_dump(" aead iv: ", rctx->msg_buf.iv_ctr, aead_parms.iv_len); 1509 packet_log("BD:\n"); 1510 dump_sg(rctx->src_sg, rctx->src_skip, chunksize); 1511 packet_dump(" pad: ", rctx->msg_buf.spu_req_pad, pad_len); 1512 1513 /* 1514 * Build mailbox message containing SPU request msg and rx buffers 1515 * to catch response message 1516 */ 1517 memset(mssg, 0, sizeof(*mssg)); 1518 mssg->type = BRCM_MESSAGE_SPU; 1519 mssg->ctx = rctx; /* Will be returned in response */ 1520 1521 /* Create rx scatterlist to catch result */ 1522 rx_frag_num += rctx->dst_nents; 1523 resp_len = chunksize; 1524 1525 /* 1526 * Always catch ICV in separate buffer. Have to for GCM/CCM because of 1527 * padding. Have to for SHA-224 and other truncated SHAs because SPU 1528 * sends entire digest back. 1529 */ 1530 rx_frag_num++; 1531 1532 if (((ctx->cipher.mode == CIPHER_MODE_GCM) || 1533 (ctx->cipher.mode == CIPHER_MODE_CCM)) && !rctx->is_encrypt) { 1534 /* 1535 * Input is ciphertxt plus ICV, but ICV not incl 1536 * in output. 1537 */ 1538 resp_len -= ctx->digestsize; 1539 if (resp_len == 0) 1540 /* no rx frags to catch output data */ 1541 rx_frag_num -= rctx->dst_nents; 1542 } 1543 1544 err = spu_aead_rx_sg_create(mssg, req, rctx, rx_frag_num, 1545 aead_parms.assoc_size, 1546 aead_parms.ret_iv_len, resp_len, digestsize, 1547 stat_pad_len); 1548 if (err) 1549 return err; 1550 1551 /* Create tx scatterlist containing SPU request message */ 1552 tx_frag_num += rctx->src_nents; 1553 tx_frag_num += assoc_nents; 1554 if (aead_parms.aad_pad_len) 1555 tx_frag_num++; 1556 if (aead_parms.iv_len) 1557 tx_frag_num++; 1558 if (spu->spu_tx_status_len()) 1559 tx_frag_num++; 1560 err = spu_aead_tx_sg_create(mssg, rctx, tx_frag_num, spu_hdr_len, 1561 rctx->assoc, aead_parms.assoc_size, 1562 assoc_nents, aead_parms.iv_len, chunksize, 1563 aead_parms.aad_pad_len, pad_len, incl_icv); 1564 if (err) 1565 return err; 1566 1567 err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); 1568 if (unlikely(err < 0)) 1569 return err; 1570 1571 return -EINPROGRESS; 1572 } 1573 1574 /** 1575 * handle_aead_resp() - Process a SPU response message for an AEAD request. 1576 * @rctx: Crypto request context 1577 */ 1578 static void handle_aead_resp(struct iproc_reqctx_s *rctx) 1579 { 1580 struct spu_hw *spu = &iproc_priv.spu; 1581 struct crypto_async_request *areq = rctx->parent; 1582 struct aead_request *req = container_of(areq, 1583 struct aead_request, base); 1584 struct iproc_ctx_s *ctx = rctx->ctx; 1585 u32 payload_len; 1586 unsigned int icv_offset; 1587 u32 result_len; 1588 1589 /* See how much data was returned */ 1590 payload_len = spu->spu_payload_length(rctx->msg_buf.spu_resp_hdr); 1591 flow_log("payload_len %u\n", payload_len); 1592 1593 /* only count payload */ 1594 atomic64_add(payload_len, &iproc_priv.bytes_in); 1595 1596 if (req->assoclen) 1597 packet_dump(" assoc_data ", rctx->msg_buf.a.resp_aad, 1598 req->assoclen); 1599 1600 /* 1601 * Copy the ICV back to the destination 1602 * buffer. In decrypt case, SPU gives us back the digest, but crypto 1603 * API doesn't expect ICV in dst buffer. 1604 */ 1605 result_len = req->cryptlen; 1606 if (rctx->is_encrypt) { 1607 icv_offset = req->assoclen + rctx->total_sent; 1608 packet_dump(" ICV: ", rctx->msg_buf.digest, ctx->digestsize); 1609 flow_log("copying ICV to dst sg at offset %u\n", icv_offset); 1610 sg_copy_part_from_buf(req->dst, rctx->msg_buf.digest, 1611 ctx->digestsize, icv_offset); 1612 result_len += ctx->digestsize; 1613 } 1614 1615 packet_log("response data: "); 1616 dump_sg(req->dst, req->assoclen, result_len); 1617 1618 atomic_inc(&iproc_priv.op_counts[SPU_OP_AEAD]); 1619 if (ctx->cipher.alg == CIPHER_ALG_AES) { 1620 if (ctx->cipher.mode == CIPHER_MODE_CCM) 1621 atomic_inc(&iproc_priv.aead_cnt[AES_CCM]); 1622 else if (ctx->cipher.mode == CIPHER_MODE_GCM) 1623 atomic_inc(&iproc_priv.aead_cnt[AES_GCM]); 1624 else 1625 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]); 1626 } else { 1627 atomic_inc(&iproc_priv.aead_cnt[AUTHENC]); 1628 } 1629 } 1630 1631 /** 1632 * spu_chunk_cleanup() - Do cleanup after processing one chunk of a request 1633 * @rctx: request context 1634 * 1635 * Mailbox scatterlists are allocated for each chunk. So free them after 1636 * processing each chunk. 1637 */ 1638 static void spu_chunk_cleanup(struct iproc_reqctx_s *rctx) 1639 { 1640 /* mailbox message used to tx request */ 1641 struct brcm_message *mssg = &rctx->mb_mssg; 1642 1643 kfree(mssg->spu.src); 1644 kfree(mssg->spu.dst); 1645 memset(mssg, 0, sizeof(struct brcm_message)); 1646 } 1647 1648 /** 1649 * finish_req() - Used to invoke the complete callback from the requester when 1650 * a request has been handled asynchronously. 1651 * @rctx: Request context 1652 * @err: Indicates whether the request was successful or not 1653 * 1654 * Ensures that cleanup has been done for request 1655 */ 1656 static void finish_req(struct iproc_reqctx_s *rctx, int err) 1657 { 1658 struct crypto_async_request *areq = rctx->parent; 1659 1660 flow_log("%s() err:%d\n\n", __func__, err); 1661 1662 /* No harm done if already called */ 1663 spu_chunk_cleanup(rctx); 1664 1665 if (areq) 1666 areq->complete(areq, err); 1667 } 1668 1669 /** 1670 * spu_rx_callback() - Callback from mailbox framework with a SPU response. 1671 * @cl: mailbox client structure for SPU driver 1672 * @msg: mailbox message containing SPU response 1673 */ 1674 static void spu_rx_callback(struct mbox_client *cl, void *msg) 1675 { 1676 struct spu_hw *spu = &iproc_priv.spu; 1677 struct brcm_message *mssg = msg; 1678 struct iproc_reqctx_s *rctx; 1679 struct iproc_ctx_s *ctx; 1680 struct crypto_async_request *areq; 1681 int err = 0; 1682 1683 rctx = mssg->ctx; 1684 if (unlikely(!rctx)) { 1685 /* This is fatal */ 1686 pr_err("%s(): no request context", __func__); 1687 err = -EFAULT; 1688 goto cb_finish; 1689 } 1690 areq = rctx->parent; 1691 ctx = rctx->ctx; 1692 1693 /* process the SPU status */ 1694 err = spu->spu_status_process(rctx->msg_buf.rx_stat); 1695 if (err != 0) { 1696 if (err == SPU_INVALID_ICV) 1697 atomic_inc(&iproc_priv.bad_icv); 1698 err = -EBADMSG; 1699 goto cb_finish; 1700 } 1701 1702 /* Process the SPU response message */ 1703 switch (rctx->ctx->alg->type) { 1704 case CRYPTO_ALG_TYPE_ABLKCIPHER: 1705 handle_ablkcipher_resp(rctx); 1706 break; 1707 case CRYPTO_ALG_TYPE_AHASH: 1708 handle_ahash_resp(rctx); 1709 break; 1710 case CRYPTO_ALG_TYPE_AEAD: 1711 handle_aead_resp(rctx); 1712 break; 1713 default: 1714 err = -EINVAL; 1715 goto cb_finish; 1716 } 1717 1718 /* 1719 * If this response does not complete the request, then send the next 1720 * request chunk. 1721 */ 1722 if (rctx->total_sent < rctx->total_todo) { 1723 /* Deallocate anything specific to previous chunk */ 1724 spu_chunk_cleanup(rctx); 1725 1726 switch (rctx->ctx->alg->type) { 1727 case CRYPTO_ALG_TYPE_ABLKCIPHER: 1728 err = handle_ablkcipher_req(rctx); 1729 break; 1730 case CRYPTO_ALG_TYPE_AHASH: 1731 err = handle_ahash_req(rctx); 1732 if (err == -EAGAIN) 1733 /* 1734 * we saved data in hash carry, but tell crypto 1735 * API we successfully completed request. 1736 */ 1737 err = 0; 1738 break; 1739 case CRYPTO_ALG_TYPE_AEAD: 1740 err = handle_aead_req(rctx); 1741 break; 1742 default: 1743 err = -EINVAL; 1744 } 1745 1746 if (err == -EINPROGRESS) 1747 /* Successfully submitted request for next chunk */ 1748 return; 1749 } 1750 1751 cb_finish: 1752 finish_req(rctx, err); 1753 } 1754 1755 /* ==================== Kernel Cryptographic API ==================== */ 1756 1757 /** 1758 * ablkcipher_enqueue() - Handle ablkcipher encrypt or decrypt request. 1759 * @req: Crypto API request 1760 * @encrypt: true if encrypting; false if decrypting 1761 * 1762 * Return: -EINPROGRESS if request accepted and result will be returned 1763 * asynchronously 1764 * < 0 if an error 1765 */ 1766 static int ablkcipher_enqueue(struct ablkcipher_request *req, bool encrypt) 1767 { 1768 struct iproc_reqctx_s *rctx = ablkcipher_request_ctx(req); 1769 struct iproc_ctx_s *ctx = 1770 crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); 1771 int err; 1772 1773 flow_log("%s() enc:%u\n", __func__, encrypt); 1774 1775 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 1776 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 1777 rctx->parent = &req->base; 1778 rctx->is_encrypt = encrypt; 1779 rctx->bd_suppress = false; 1780 rctx->total_todo = req->nbytes; 1781 rctx->src_sent = 0; 1782 rctx->total_sent = 0; 1783 rctx->total_received = 0; 1784 rctx->ctx = ctx; 1785 1786 /* Initialize current position in src and dst scatterlists */ 1787 rctx->src_sg = req->src; 1788 rctx->src_nents = 0; 1789 rctx->src_skip = 0; 1790 rctx->dst_sg = req->dst; 1791 rctx->dst_nents = 0; 1792 rctx->dst_skip = 0; 1793 1794 if (ctx->cipher.mode == CIPHER_MODE_CBC || 1795 ctx->cipher.mode == CIPHER_MODE_CTR || 1796 ctx->cipher.mode == CIPHER_MODE_OFB || 1797 ctx->cipher.mode == CIPHER_MODE_XTS || 1798 ctx->cipher.mode == CIPHER_MODE_GCM || 1799 ctx->cipher.mode == CIPHER_MODE_CCM) { 1800 rctx->iv_ctr_len = 1801 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(req)); 1802 memcpy(rctx->msg_buf.iv_ctr, req->info, rctx->iv_ctr_len); 1803 } else { 1804 rctx->iv_ctr_len = 0; 1805 } 1806 1807 /* Choose a SPU to process this request */ 1808 rctx->chan_idx = select_channel(); 1809 err = handle_ablkcipher_req(rctx); 1810 if (err != -EINPROGRESS) 1811 /* synchronous result */ 1812 spu_chunk_cleanup(rctx); 1813 1814 return err; 1815 } 1816 1817 static int des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1818 unsigned int keylen) 1819 { 1820 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1821 u32 tmp[DES_EXPKEY_WORDS]; 1822 1823 if (keylen == DES_KEY_SIZE) { 1824 if (des_ekey(tmp, key) == 0) { 1825 if (crypto_ablkcipher_get_flags(cipher) & 1826 CRYPTO_TFM_REQ_WEAK_KEY) { 1827 u32 flags = CRYPTO_TFM_RES_WEAK_KEY; 1828 1829 crypto_ablkcipher_set_flags(cipher, flags); 1830 return -EINVAL; 1831 } 1832 } 1833 1834 ctx->cipher_type = CIPHER_TYPE_DES; 1835 } else { 1836 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1837 return -EINVAL; 1838 } 1839 return 0; 1840 } 1841 1842 static int threedes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1843 unsigned int keylen) 1844 { 1845 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1846 1847 if (keylen == (DES_KEY_SIZE * 3)) { 1848 const u32 *K = (const u32 *)key; 1849 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 1850 1851 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 1852 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) { 1853 crypto_ablkcipher_set_flags(cipher, flags); 1854 return -EINVAL; 1855 } 1856 1857 ctx->cipher_type = CIPHER_TYPE_3DES; 1858 } else { 1859 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1860 return -EINVAL; 1861 } 1862 return 0; 1863 } 1864 1865 static int aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1866 unsigned int keylen) 1867 { 1868 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1869 1870 if (ctx->cipher.mode == CIPHER_MODE_XTS) 1871 /* XTS includes two keys of equal length */ 1872 keylen = keylen / 2; 1873 1874 switch (keylen) { 1875 case AES_KEYSIZE_128: 1876 ctx->cipher_type = CIPHER_TYPE_AES128; 1877 break; 1878 case AES_KEYSIZE_192: 1879 ctx->cipher_type = CIPHER_TYPE_AES192; 1880 break; 1881 case AES_KEYSIZE_256: 1882 ctx->cipher_type = CIPHER_TYPE_AES256; 1883 break; 1884 default: 1885 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1886 return -EINVAL; 1887 } 1888 WARN_ON((ctx->max_payload != SPU_MAX_PAYLOAD_INF) && 1889 ((ctx->max_payload % AES_BLOCK_SIZE) != 0)); 1890 return 0; 1891 } 1892 1893 static int rc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1894 unsigned int keylen) 1895 { 1896 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1897 int i; 1898 1899 ctx->enckeylen = ARC4_MAX_KEY_SIZE + ARC4_STATE_SIZE; 1900 1901 ctx->enckey[0] = 0x00; /* 0x00 */ 1902 ctx->enckey[1] = 0x00; /* i */ 1903 ctx->enckey[2] = 0x00; /* 0x00 */ 1904 ctx->enckey[3] = 0x00; /* j */ 1905 for (i = 0; i < ARC4_MAX_KEY_SIZE; i++) 1906 ctx->enckey[i + ARC4_STATE_SIZE] = key[i % keylen]; 1907 1908 ctx->cipher_type = CIPHER_TYPE_INIT; 1909 1910 return 0; 1911 } 1912 1913 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 1914 unsigned int keylen) 1915 { 1916 struct spu_hw *spu = &iproc_priv.spu; 1917 struct iproc_ctx_s *ctx = crypto_ablkcipher_ctx(cipher); 1918 struct spu_cipher_parms cipher_parms; 1919 u32 alloc_len = 0; 1920 int err; 1921 1922 flow_log("ablkcipher_setkey() keylen: %d\n", keylen); 1923 flow_dump(" key: ", key, keylen); 1924 1925 switch (ctx->cipher.alg) { 1926 case CIPHER_ALG_DES: 1927 err = des_setkey(cipher, key, keylen); 1928 break; 1929 case CIPHER_ALG_3DES: 1930 err = threedes_setkey(cipher, key, keylen); 1931 break; 1932 case CIPHER_ALG_AES: 1933 err = aes_setkey(cipher, key, keylen); 1934 break; 1935 case CIPHER_ALG_RC4: 1936 err = rc4_setkey(cipher, key, keylen); 1937 break; 1938 default: 1939 pr_err("%s() Error: unknown cipher alg\n", __func__); 1940 err = -EINVAL; 1941 } 1942 if (err) 1943 return err; 1944 1945 /* RC4 already populated ctx->enkey */ 1946 if (ctx->cipher.alg != CIPHER_ALG_RC4) { 1947 memcpy(ctx->enckey, key, keylen); 1948 ctx->enckeylen = keylen; 1949 } 1950 /* SPU needs XTS keys in the reverse order the crypto API presents */ 1951 if ((ctx->cipher.alg == CIPHER_ALG_AES) && 1952 (ctx->cipher.mode == CIPHER_MODE_XTS)) { 1953 unsigned int xts_keylen = keylen / 2; 1954 1955 memcpy(ctx->enckey, key + xts_keylen, xts_keylen); 1956 memcpy(ctx->enckey + xts_keylen, key, xts_keylen); 1957 } 1958 1959 if (spu->spu_type == SPU_TYPE_SPUM) 1960 alloc_len = BCM_HDR_LEN + SPU_HEADER_ALLOC_LEN; 1961 else if (spu->spu_type == SPU_TYPE_SPU2) 1962 alloc_len = BCM_HDR_LEN + SPU2_HEADER_ALLOC_LEN; 1963 memset(ctx->bcm_spu_req_hdr, 0, alloc_len); 1964 cipher_parms.iv_buf = NULL; 1965 cipher_parms.iv_len = crypto_ablkcipher_ivsize(cipher); 1966 flow_log("%s: iv_len %u\n", __func__, cipher_parms.iv_len); 1967 1968 cipher_parms.alg = ctx->cipher.alg; 1969 cipher_parms.mode = ctx->cipher.mode; 1970 cipher_parms.type = ctx->cipher_type; 1971 cipher_parms.key_buf = ctx->enckey; 1972 cipher_parms.key_len = ctx->enckeylen; 1973 1974 /* Prepend SPU request message with BCM header */ 1975 memcpy(ctx->bcm_spu_req_hdr, BCMHEADER, BCM_HDR_LEN); 1976 ctx->spu_req_hdr_len = 1977 spu->spu_cipher_req_init(ctx->bcm_spu_req_hdr + BCM_HDR_LEN, 1978 &cipher_parms); 1979 1980 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 1981 ctx->enckeylen, 1982 false); 1983 1984 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_CIPHER]); 1985 1986 return 0; 1987 } 1988 1989 static int ablkcipher_encrypt(struct ablkcipher_request *req) 1990 { 1991 flow_log("ablkcipher_encrypt() nbytes:%u\n", req->nbytes); 1992 1993 return ablkcipher_enqueue(req, true); 1994 } 1995 1996 static int ablkcipher_decrypt(struct ablkcipher_request *req) 1997 { 1998 flow_log("ablkcipher_decrypt() nbytes:%u\n", req->nbytes); 1999 return ablkcipher_enqueue(req, false); 2000 } 2001 2002 static int ahash_enqueue(struct ahash_request *req) 2003 { 2004 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2005 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2006 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2007 int err = 0; 2008 const char *alg_name; 2009 2010 flow_log("ahash_enqueue() nbytes:%u\n", req->nbytes); 2011 2012 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2013 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2014 rctx->parent = &req->base; 2015 rctx->ctx = ctx; 2016 rctx->bd_suppress = true; 2017 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message)); 2018 2019 /* Initialize position in src scatterlist */ 2020 rctx->src_sg = req->src; 2021 rctx->src_skip = 0; 2022 rctx->src_nents = 0; 2023 rctx->dst_sg = NULL; 2024 rctx->dst_skip = 0; 2025 rctx->dst_nents = 0; 2026 2027 /* SPU2 hardware does not compute hash of zero length data */ 2028 if ((rctx->is_final == 1) && (rctx->total_todo == 0) && 2029 (iproc_priv.spu.spu_type == SPU_TYPE_SPU2)) { 2030 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); 2031 flow_log("Doing %sfinal %s zero-len hash request in software\n", 2032 rctx->is_final ? "" : "non-", alg_name); 2033 err = do_shash((unsigned char *)alg_name, req->result, 2034 NULL, 0, NULL, 0, ctx->authkey, 2035 ctx->authkeylen); 2036 if (err < 0) 2037 flow_log("Hash request failed with error %d\n", err); 2038 return err; 2039 } 2040 /* Choose a SPU to process this request */ 2041 rctx->chan_idx = select_channel(); 2042 2043 err = handle_ahash_req(rctx); 2044 if (err != -EINPROGRESS) 2045 /* synchronous result */ 2046 spu_chunk_cleanup(rctx); 2047 2048 if (err == -EAGAIN) 2049 /* 2050 * we saved data in hash carry, but tell crypto API 2051 * we successfully completed request. 2052 */ 2053 err = 0; 2054 2055 return err; 2056 } 2057 2058 static int __ahash_init(struct ahash_request *req) 2059 { 2060 struct spu_hw *spu = &iproc_priv.spu; 2061 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2062 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2063 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2064 2065 flow_log("%s()\n", __func__); 2066 2067 /* Initialize the context */ 2068 rctx->hash_carry_len = 0; 2069 rctx->is_final = 0; 2070 2071 rctx->total_todo = 0; 2072 rctx->src_sent = 0; 2073 rctx->total_sent = 0; 2074 rctx->total_received = 0; 2075 2076 ctx->digestsize = crypto_ahash_digestsize(tfm); 2077 /* If we add a hash whose digest is larger, catch it here. */ 2078 WARN_ON(ctx->digestsize > MAX_DIGEST_SIZE); 2079 2080 rctx->is_sw_hmac = false; 2081 2082 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 0, 2083 true); 2084 2085 return 0; 2086 } 2087 2088 /** 2089 * spu_no_incr_hash() - Determine whether incremental hashing is supported. 2090 * @ctx: Crypto session context 2091 * 2092 * SPU-2 does not support incremental hashing (we'll have to revisit and 2093 * condition based on chip revision or device tree entry if future versions do 2094 * support incremental hash) 2095 * 2096 * SPU-M also doesn't support incremental hashing of AES-XCBC 2097 * 2098 * Return: true if incremental hashing is not supported 2099 * false otherwise 2100 */ 2101 bool spu_no_incr_hash(struct iproc_ctx_s *ctx) 2102 { 2103 struct spu_hw *spu = &iproc_priv.spu; 2104 2105 if (spu->spu_type == SPU_TYPE_SPU2) 2106 return true; 2107 2108 if ((ctx->auth.alg == HASH_ALG_AES) && 2109 (ctx->auth.mode == HASH_MODE_XCBC)) 2110 return true; 2111 2112 /* Otherwise, incremental hashing is supported */ 2113 return false; 2114 } 2115 2116 static int ahash_init(struct ahash_request *req) 2117 { 2118 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2119 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2120 const char *alg_name; 2121 struct crypto_shash *hash; 2122 int ret; 2123 gfp_t gfp; 2124 2125 if (spu_no_incr_hash(ctx)) { 2126 /* 2127 * If we get an incremental hashing request and it's not 2128 * supported by the hardware, we need to handle it in software 2129 * by calling synchronous hash functions. 2130 */ 2131 alg_name = crypto_tfm_alg_name(crypto_ahash_tfm(tfm)); 2132 hash = crypto_alloc_shash(alg_name, 0, 0); 2133 if (IS_ERR(hash)) { 2134 ret = PTR_ERR(hash); 2135 goto err; 2136 } 2137 2138 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2139 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2140 ctx->shash = kmalloc(sizeof(*ctx->shash) + 2141 crypto_shash_descsize(hash), gfp); 2142 if (!ctx->shash) { 2143 ret = -ENOMEM; 2144 goto err_hash; 2145 } 2146 ctx->shash->tfm = hash; 2147 ctx->shash->flags = 0; 2148 2149 /* Set the key using data we already have from setkey */ 2150 if (ctx->authkeylen > 0) { 2151 ret = crypto_shash_setkey(hash, ctx->authkey, 2152 ctx->authkeylen); 2153 if (ret) 2154 goto err_shash; 2155 } 2156 2157 /* Initialize hash w/ this key and other params */ 2158 ret = crypto_shash_init(ctx->shash); 2159 if (ret) 2160 goto err_shash; 2161 } else { 2162 /* Otherwise call the internal function which uses SPU hw */ 2163 ret = __ahash_init(req); 2164 } 2165 2166 return ret; 2167 2168 err_shash: 2169 kfree(ctx->shash); 2170 err_hash: 2171 crypto_free_shash(hash); 2172 err: 2173 return ret; 2174 } 2175 2176 static int __ahash_update(struct ahash_request *req) 2177 { 2178 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2179 2180 flow_log("ahash_update() nbytes:%u\n", req->nbytes); 2181 2182 if (!req->nbytes) 2183 return 0; 2184 rctx->total_todo += req->nbytes; 2185 rctx->src_sent = 0; 2186 2187 return ahash_enqueue(req); 2188 } 2189 2190 static int ahash_update(struct ahash_request *req) 2191 { 2192 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2193 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2194 u8 *tmpbuf; 2195 int ret; 2196 int nents; 2197 gfp_t gfp; 2198 2199 if (spu_no_incr_hash(ctx)) { 2200 /* 2201 * If we get an incremental hashing request and it's not 2202 * supported by the hardware, we need to handle it in software 2203 * by calling synchronous hash functions. 2204 */ 2205 if (req->src) 2206 nents = sg_nents(req->src); 2207 else 2208 return -EINVAL; 2209 2210 /* Copy data from req scatterlist to tmp buffer */ 2211 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2212 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2213 tmpbuf = kmalloc(req->nbytes, gfp); 2214 if (!tmpbuf) 2215 return -ENOMEM; 2216 2217 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) != 2218 req->nbytes) { 2219 kfree(tmpbuf); 2220 return -EINVAL; 2221 } 2222 2223 /* Call synchronous update */ 2224 ret = crypto_shash_update(ctx->shash, tmpbuf, req->nbytes); 2225 kfree(tmpbuf); 2226 } else { 2227 /* Otherwise call the internal function which uses SPU hw */ 2228 ret = __ahash_update(req); 2229 } 2230 2231 return ret; 2232 } 2233 2234 static int __ahash_final(struct ahash_request *req) 2235 { 2236 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2237 2238 flow_log("ahash_final() nbytes:%u\n", req->nbytes); 2239 2240 rctx->is_final = 1; 2241 2242 return ahash_enqueue(req); 2243 } 2244 2245 static int ahash_final(struct ahash_request *req) 2246 { 2247 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2248 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2249 int ret; 2250 2251 if (spu_no_incr_hash(ctx)) { 2252 /* 2253 * If we get an incremental hashing request and it's not 2254 * supported by the hardware, we need to handle it in software 2255 * by calling synchronous hash functions. 2256 */ 2257 ret = crypto_shash_final(ctx->shash, req->result); 2258 2259 /* Done with hash, can deallocate it now */ 2260 crypto_free_shash(ctx->shash->tfm); 2261 kfree(ctx->shash); 2262 2263 } else { 2264 /* Otherwise call the internal function which uses SPU hw */ 2265 ret = __ahash_final(req); 2266 } 2267 2268 return ret; 2269 } 2270 2271 static int __ahash_finup(struct ahash_request *req) 2272 { 2273 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2274 2275 flow_log("ahash_finup() nbytes:%u\n", req->nbytes); 2276 2277 rctx->total_todo += req->nbytes; 2278 rctx->src_sent = 0; 2279 rctx->is_final = 1; 2280 2281 return ahash_enqueue(req); 2282 } 2283 2284 static int ahash_finup(struct ahash_request *req) 2285 { 2286 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2287 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2288 u8 *tmpbuf; 2289 int ret; 2290 int nents; 2291 gfp_t gfp; 2292 2293 if (spu_no_incr_hash(ctx)) { 2294 /* 2295 * If we get an incremental hashing request and it's not 2296 * supported by the hardware, we need to handle it in software 2297 * by calling synchronous hash functions. 2298 */ 2299 if (req->src) { 2300 nents = sg_nents(req->src); 2301 } else { 2302 ret = -EINVAL; 2303 goto ahash_finup_exit; 2304 } 2305 2306 /* Copy data from req scatterlist to tmp buffer */ 2307 gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2308 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2309 tmpbuf = kmalloc(req->nbytes, gfp); 2310 if (!tmpbuf) { 2311 ret = -ENOMEM; 2312 goto ahash_finup_exit; 2313 } 2314 2315 if (sg_copy_to_buffer(req->src, nents, tmpbuf, req->nbytes) != 2316 req->nbytes) { 2317 ret = -EINVAL; 2318 goto ahash_finup_free; 2319 } 2320 2321 /* Call synchronous update */ 2322 ret = crypto_shash_finup(ctx->shash, tmpbuf, req->nbytes, 2323 req->result); 2324 } else { 2325 /* Otherwise call the internal function which uses SPU hw */ 2326 return __ahash_finup(req); 2327 } 2328 ahash_finup_free: 2329 kfree(tmpbuf); 2330 2331 ahash_finup_exit: 2332 /* Done with hash, can deallocate it now */ 2333 crypto_free_shash(ctx->shash->tfm); 2334 kfree(ctx->shash); 2335 return ret; 2336 } 2337 2338 static int ahash_digest(struct ahash_request *req) 2339 { 2340 int err = 0; 2341 2342 flow_log("ahash_digest() nbytes:%u\n", req->nbytes); 2343 2344 /* whole thing at once */ 2345 err = __ahash_init(req); 2346 if (!err) 2347 err = __ahash_finup(req); 2348 2349 return err; 2350 } 2351 2352 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key, 2353 unsigned int keylen) 2354 { 2355 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash); 2356 2357 flow_log("%s() ahash:%p key:%p keylen:%u\n", 2358 __func__, ahash, key, keylen); 2359 flow_dump(" key: ", key, keylen); 2360 2361 if (ctx->auth.alg == HASH_ALG_AES) { 2362 switch (keylen) { 2363 case AES_KEYSIZE_128: 2364 ctx->cipher_type = CIPHER_TYPE_AES128; 2365 break; 2366 case AES_KEYSIZE_192: 2367 ctx->cipher_type = CIPHER_TYPE_AES192; 2368 break; 2369 case AES_KEYSIZE_256: 2370 ctx->cipher_type = CIPHER_TYPE_AES256; 2371 break; 2372 default: 2373 pr_err("%s() Error: Invalid key length\n", __func__); 2374 return -EINVAL; 2375 } 2376 } else { 2377 pr_err("%s() Error: unknown hash alg\n", __func__); 2378 return -EINVAL; 2379 } 2380 memcpy(ctx->authkey, key, keylen); 2381 ctx->authkeylen = keylen; 2382 2383 return 0; 2384 } 2385 2386 static int ahash_export(struct ahash_request *req, void *out) 2387 { 2388 const struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2389 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)out; 2390 2391 spu_exp->total_todo = rctx->total_todo; 2392 spu_exp->total_sent = rctx->total_sent; 2393 spu_exp->is_sw_hmac = rctx->is_sw_hmac; 2394 memcpy(spu_exp->hash_carry, rctx->hash_carry, sizeof(rctx->hash_carry)); 2395 spu_exp->hash_carry_len = rctx->hash_carry_len; 2396 memcpy(spu_exp->incr_hash, rctx->incr_hash, sizeof(rctx->incr_hash)); 2397 2398 return 0; 2399 } 2400 2401 static int ahash_import(struct ahash_request *req, const void *in) 2402 { 2403 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2404 struct spu_hash_export_s *spu_exp = (struct spu_hash_export_s *)in; 2405 2406 rctx->total_todo = spu_exp->total_todo; 2407 rctx->total_sent = spu_exp->total_sent; 2408 rctx->is_sw_hmac = spu_exp->is_sw_hmac; 2409 memcpy(rctx->hash_carry, spu_exp->hash_carry, sizeof(rctx->hash_carry)); 2410 rctx->hash_carry_len = spu_exp->hash_carry_len; 2411 memcpy(rctx->incr_hash, spu_exp->incr_hash, sizeof(rctx->incr_hash)); 2412 2413 return 0; 2414 } 2415 2416 static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key, 2417 unsigned int keylen) 2418 { 2419 struct iproc_ctx_s *ctx = crypto_ahash_ctx(ahash); 2420 unsigned int blocksize = 2421 crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash)); 2422 unsigned int digestsize = crypto_ahash_digestsize(ahash); 2423 unsigned int index; 2424 int rc; 2425 2426 flow_log("%s() ahash:%p key:%p keylen:%u blksz:%u digestsz:%u\n", 2427 __func__, ahash, key, keylen, blocksize, digestsize); 2428 flow_dump(" key: ", key, keylen); 2429 2430 if (keylen > blocksize) { 2431 switch (ctx->auth.alg) { 2432 case HASH_ALG_MD5: 2433 rc = do_shash("md5", ctx->authkey, key, keylen, NULL, 2434 0, NULL, 0); 2435 break; 2436 case HASH_ALG_SHA1: 2437 rc = do_shash("sha1", ctx->authkey, key, keylen, NULL, 2438 0, NULL, 0); 2439 break; 2440 case HASH_ALG_SHA224: 2441 rc = do_shash("sha224", ctx->authkey, key, keylen, NULL, 2442 0, NULL, 0); 2443 break; 2444 case HASH_ALG_SHA256: 2445 rc = do_shash("sha256", ctx->authkey, key, keylen, NULL, 2446 0, NULL, 0); 2447 break; 2448 case HASH_ALG_SHA384: 2449 rc = do_shash("sha384", ctx->authkey, key, keylen, NULL, 2450 0, NULL, 0); 2451 break; 2452 case HASH_ALG_SHA512: 2453 rc = do_shash("sha512", ctx->authkey, key, keylen, NULL, 2454 0, NULL, 0); 2455 break; 2456 case HASH_ALG_SHA3_224: 2457 rc = do_shash("sha3-224", ctx->authkey, key, keylen, 2458 NULL, 0, NULL, 0); 2459 break; 2460 case HASH_ALG_SHA3_256: 2461 rc = do_shash("sha3-256", ctx->authkey, key, keylen, 2462 NULL, 0, NULL, 0); 2463 break; 2464 case HASH_ALG_SHA3_384: 2465 rc = do_shash("sha3-384", ctx->authkey, key, keylen, 2466 NULL, 0, NULL, 0); 2467 break; 2468 case HASH_ALG_SHA3_512: 2469 rc = do_shash("sha3-512", ctx->authkey, key, keylen, 2470 NULL, 0, NULL, 0); 2471 break; 2472 default: 2473 pr_err("%s() Error: unknown hash alg\n", __func__); 2474 return -EINVAL; 2475 } 2476 if (rc < 0) { 2477 pr_err("%s() Error %d computing shash for %s\n", 2478 __func__, rc, hash_alg_name[ctx->auth.alg]); 2479 return rc; 2480 } 2481 ctx->authkeylen = digestsize; 2482 2483 flow_log(" keylen > digestsize... hashed\n"); 2484 flow_dump(" newkey: ", ctx->authkey, ctx->authkeylen); 2485 } else { 2486 memcpy(ctx->authkey, key, keylen); 2487 ctx->authkeylen = keylen; 2488 } 2489 2490 /* 2491 * Full HMAC operation in SPUM is not verified, 2492 * So keeping the generation of IPAD, OPAD and 2493 * outer hashing in software. 2494 */ 2495 if (iproc_priv.spu.spu_type == SPU_TYPE_SPUM) { 2496 memcpy(ctx->ipad, ctx->authkey, ctx->authkeylen); 2497 memset(ctx->ipad + ctx->authkeylen, 0, 2498 blocksize - ctx->authkeylen); 2499 ctx->authkeylen = 0; 2500 memcpy(ctx->opad, ctx->ipad, blocksize); 2501 2502 for (index = 0; index < blocksize; index++) { 2503 ctx->ipad[index] ^= HMAC_IPAD_VALUE; 2504 ctx->opad[index] ^= HMAC_OPAD_VALUE; 2505 } 2506 2507 flow_dump(" ipad: ", ctx->ipad, blocksize); 2508 flow_dump(" opad: ", ctx->opad, blocksize); 2509 } 2510 ctx->digestsize = digestsize; 2511 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_HMAC]); 2512 2513 return 0; 2514 } 2515 2516 static int ahash_hmac_init(struct ahash_request *req) 2517 { 2518 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2519 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2520 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2521 unsigned int blocksize = 2522 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2523 2524 flow_log("ahash_hmac_init()\n"); 2525 2526 /* init the context as a hash */ 2527 ahash_init(req); 2528 2529 if (!spu_no_incr_hash(ctx)) { 2530 /* SPU-M can do incr hashing but needs sw for outer HMAC */ 2531 rctx->is_sw_hmac = true; 2532 ctx->auth.mode = HASH_MODE_HASH; 2533 /* start with a prepended ipad */ 2534 memcpy(rctx->hash_carry, ctx->ipad, blocksize); 2535 rctx->hash_carry_len = blocksize; 2536 rctx->total_todo += blocksize; 2537 } 2538 2539 return 0; 2540 } 2541 2542 static int ahash_hmac_update(struct ahash_request *req) 2543 { 2544 flow_log("ahash_hmac_update() nbytes:%u\n", req->nbytes); 2545 2546 if (!req->nbytes) 2547 return 0; 2548 2549 return ahash_update(req); 2550 } 2551 2552 static int ahash_hmac_final(struct ahash_request *req) 2553 { 2554 flow_log("ahash_hmac_final() nbytes:%u\n", req->nbytes); 2555 2556 return ahash_final(req); 2557 } 2558 2559 static int ahash_hmac_finup(struct ahash_request *req) 2560 { 2561 flow_log("ahash_hmac_finupl() nbytes:%u\n", req->nbytes); 2562 2563 return ahash_finup(req); 2564 } 2565 2566 static int ahash_hmac_digest(struct ahash_request *req) 2567 { 2568 struct iproc_reqctx_s *rctx = ahash_request_ctx(req); 2569 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 2570 struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm); 2571 unsigned int blocksize = 2572 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2573 2574 flow_log("ahash_hmac_digest() nbytes:%u\n", req->nbytes); 2575 2576 /* Perform initialization and then call finup */ 2577 __ahash_init(req); 2578 2579 if (iproc_priv.spu.spu_type == SPU_TYPE_SPU2) { 2580 /* 2581 * SPU2 supports full HMAC implementation in the 2582 * hardware, need not to generate IPAD, OPAD and 2583 * outer hash in software. 2584 * Only for hash key len > hash block size, SPU2 2585 * expects to perform hashing on the key, shorten 2586 * it to digest size and feed it as hash key. 2587 */ 2588 rctx->is_sw_hmac = false; 2589 ctx->auth.mode = HASH_MODE_HMAC; 2590 } else { 2591 rctx->is_sw_hmac = true; 2592 ctx->auth.mode = HASH_MODE_HASH; 2593 /* start with a prepended ipad */ 2594 memcpy(rctx->hash_carry, ctx->ipad, blocksize); 2595 rctx->hash_carry_len = blocksize; 2596 rctx->total_todo += blocksize; 2597 } 2598 2599 return __ahash_finup(req); 2600 } 2601 2602 /* aead helpers */ 2603 2604 static int aead_need_fallback(struct aead_request *req) 2605 { 2606 struct iproc_reqctx_s *rctx = aead_request_ctx(req); 2607 struct spu_hw *spu = &iproc_priv.spu; 2608 struct crypto_aead *aead = crypto_aead_reqtfm(req); 2609 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead); 2610 u32 payload_len; 2611 2612 /* 2613 * SPU hardware cannot handle the AES-GCM/CCM case where plaintext 2614 * and AAD are both 0 bytes long. So use fallback in this case. 2615 */ 2616 if (((ctx->cipher.mode == CIPHER_MODE_GCM) || 2617 (ctx->cipher.mode == CIPHER_MODE_CCM)) && 2618 (req->assoclen == 0)) { 2619 if ((rctx->is_encrypt && (req->cryptlen == 0)) || 2620 (!rctx->is_encrypt && (req->cryptlen == ctx->digestsize))) { 2621 flow_log("AES GCM/CCM needs fallback for 0 len req\n"); 2622 return 1; 2623 } 2624 } 2625 2626 /* SPU-M hardware only supports CCM digest size of 8, 12, or 16 bytes */ 2627 if ((ctx->cipher.mode == CIPHER_MODE_CCM) && 2628 (spu->spu_type == SPU_TYPE_SPUM) && 2629 (ctx->digestsize != 8) && (ctx->digestsize != 12) && 2630 (ctx->digestsize != 16)) { 2631 flow_log("%s() AES CCM needs fallback for digest size %d\n", 2632 __func__, ctx->digestsize); 2633 return 1; 2634 } 2635 2636 /* 2637 * SPU-M on NSP has an issue where AES-CCM hash is not correct 2638 * when AAD size is 0 2639 */ 2640 if ((ctx->cipher.mode == CIPHER_MODE_CCM) && 2641 (spu->spu_subtype == SPU_SUBTYPE_SPUM_NSP) && 2642 (req->assoclen == 0)) { 2643 flow_log("%s() AES_CCM needs fallback for 0 len AAD on NSP\n", 2644 __func__); 2645 return 1; 2646 } 2647 2648 payload_len = req->cryptlen; 2649 if (spu->spu_type == SPU_TYPE_SPUM) 2650 payload_len += req->assoclen; 2651 2652 flow_log("%s() payload len: %u\n", __func__, payload_len); 2653 2654 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) 2655 return 0; 2656 else 2657 return payload_len > ctx->max_payload; 2658 } 2659 2660 static void aead_complete(struct crypto_async_request *areq, int err) 2661 { 2662 struct aead_request *req = 2663 container_of(areq, struct aead_request, base); 2664 struct iproc_reqctx_s *rctx = aead_request_ctx(req); 2665 struct crypto_aead *aead = crypto_aead_reqtfm(req); 2666 2667 flow_log("%s() err:%d\n", __func__, err); 2668 2669 areq->tfm = crypto_aead_tfm(aead); 2670 2671 areq->complete = rctx->old_complete; 2672 areq->data = rctx->old_data; 2673 2674 areq->complete(areq, err); 2675 } 2676 2677 static int aead_do_fallback(struct aead_request *req, bool is_encrypt) 2678 { 2679 struct crypto_aead *aead = crypto_aead_reqtfm(req); 2680 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 2681 struct iproc_reqctx_s *rctx = aead_request_ctx(req); 2682 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); 2683 int err; 2684 u32 req_flags; 2685 2686 flow_log("%s() enc:%u\n", __func__, is_encrypt); 2687 2688 if (ctx->fallback_cipher) { 2689 /* Store the cipher tfm and then use the fallback tfm */ 2690 rctx->old_tfm = tfm; 2691 aead_request_set_tfm(req, ctx->fallback_cipher); 2692 /* 2693 * Save the callback and chain ourselves in, so we can restore 2694 * the tfm 2695 */ 2696 rctx->old_complete = req->base.complete; 2697 rctx->old_data = req->base.data; 2698 req_flags = aead_request_flags(req); 2699 aead_request_set_callback(req, req_flags, aead_complete, req); 2700 err = is_encrypt ? crypto_aead_encrypt(req) : 2701 crypto_aead_decrypt(req); 2702 2703 if (err == 0) { 2704 /* 2705 * fallback was synchronous (did not return 2706 * -EINPROGRESS). So restore request state here. 2707 */ 2708 aead_request_set_callback(req, req_flags, 2709 rctx->old_complete, req); 2710 req->base.data = rctx->old_data; 2711 aead_request_set_tfm(req, aead); 2712 flow_log("%s() fallback completed successfully\n\n", 2713 __func__); 2714 } 2715 } else { 2716 err = -EINVAL; 2717 } 2718 2719 return err; 2720 } 2721 2722 static int aead_enqueue(struct aead_request *req, bool is_encrypt) 2723 { 2724 struct iproc_reqctx_s *rctx = aead_request_ctx(req); 2725 struct crypto_aead *aead = crypto_aead_reqtfm(req); 2726 struct iproc_ctx_s *ctx = crypto_aead_ctx(aead); 2727 int err; 2728 2729 flow_log("%s() enc:%u\n", __func__, is_encrypt); 2730 2731 if (req->assoclen > MAX_ASSOC_SIZE) { 2732 pr_err 2733 ("%s() Error: associated data too long. (%u > %u bytes)\n", 2734 __func__, req->assoclen, MAX_ASSOC_SIZE); 2735 return -EINVAL; 2736 } 2737 2738 rctx->gfp = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | 2739 CRYPTO_TFM_REQ_MAY_SLEEP)) ? GFP_KERNEL : GFP_ATOMIC; 2740 rctx->parent = &req->base; 2741 rctx->is_encrypt = is_encrypt; 2742 rctx->bd_suppress = false; 2743 rctx->total_todo = req->cryptlen; 2744 rctx->src_sent = 0; 2745 rctx->total_sent = 0; 2746 rctx->total_received = 0; 2747 rctx->is_sw_hmac = false; 2748 rctx->ctx = ctx; 2749 memset(&rctx->mb_mssg, 0, sizeof(struct brcm_message)); 2750 2751 /* assoc data is at start of src sg */ 2752 rctx->assoc = req->src; 2753 2754 /* 2755 * Init current position in src scatterlist to be after assoc data. 2756 * src_skip set to buffer offset where data begins. (Assoc data could 2757 * end in the middle of a buffer.) 2758 */ 2759 if (spu_sg_at_offset(req->src, req->assoclen, &rctx->src_sg, 2760 &rctx->src_skip) < 0) { 2761 pr_err("%s() Error: Unable to find start of src data\n", 2762 __func__); 2763 return -EINVAL; 2764 } 2765 2766 rctx->src_nents = 0; 2767 rctx->dst_nents = 0; 2768 if (req->dst == req->src) { 2769 rctx->dst_sg = rctx->src_sg; 2770 rctx->dst_skip = rctx->src_skip; 2771 } else { 2772 /* 2773 * Expect req->dst to have room for assoc data followed by 2774 * output data and ICV, if encrypt. So initialize dst_sg 2775 * to point beyond assoc len offset. 2776 */ 2777 if (spu_sg_at_offset(req->dst, req->assoclen, &rctx->dst_sg, 2778 &rctx->dst_skip) < 0) { 2779 pr_err("%s() Error: Unable to find start of dst data\n", 2780 __func__); 2781 return -EINVAL; 2782 } 2783 } 2784 2785 if (ctx->cipher.mode == CIPHER_MODE_CBC || 2786 ctx->cipher.mode == CIPHER_MODE_CTR || 2787 ctx->cipher.mode == CIPHER_MODE_OFB || 2788 ctx->cipher.mode == CIPHER_MODE_XTS || 2789 ctx->cipher.mode == CIPHER_MODE_GCM) { 2790 rctx->iv_ctr_len = 2791 ctx->salt_len + 2792 crypto_aead_ivsize(crypto_aead_reqtfm(req)); 2793 } else if (ctx->cipher.mode == CIPHER_MODE_CCM) { 2794 rctx->iv_ctr_len = CCM_AES_IV_SIZE; 2795 } else { 2796 rctx->iv_ctr_len = 0; 2797 } 2798 2799 rctx->hash_carry_len = 0; 2800 2801 flow_log(" src sg: %p\n", req->src); 2802 flow_log(" rctx->src_sg: %p, src_skip %u\n", 2803 rctx->src_sg, rctx->src_skip); 2804 flow_log(" assoc: %p, assoclen %u\n", rctx->assoc, req->assoclen); 2805 flow_log(" dst sg: %p\n", req->dst); 2806 flow_log(" rctx->dst_sg: %p, dst_skip %u\n", 2807 rctx->dst_sg, rctx->dst_skip); 2808 flow_log(" iv_ctr_len:%u\n", rctx->iv_ctr_len); 2809 flow_dump(" iv: ", req->iv, rctx->iv_ctr_len); 2810 flow_log(" authkeylen:%u\n", ctx->authkeylen); 2811 flow_log(" is_esp: %s\n", ctx->is_esp ? "yes" : "no"); 2812 2813 if (ctx->max_payload == SPU_MAX_PAYLOAD_INF) 2814 flow_log(" max_payload infinite"); 2815 else 2816 flow_log(" max_payload: %u\n", ctx->max_payload); 2817 2818 if (unlikely(aead_need_fallback(req))) 2819 return aead_do_fallback(req, is_encrypt); 2820 2821 /* 2822 * Do memory allocations for request after fallback check, because if we 2823 * do fallback, we won't call finish_req() to dealloc. 2824 */ 2825 if (rctx->iv_ctr_len) { 2826 if (ctx->salt_len) 2827 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset, 2828 ctx->salt, ctx->salt_len); 2829 memcpy(rctx->msg_buf.iv_ctr + ctx->salt_offset + ctx->salt_len, 2830 req->iv, 2831 rctx->iv_ctr_len - ctx->salt_len - ctx->salt_offset); 2832 } 2833 2834 rctx->chan_idx = select_channel(); 2835 err = handle_aead_req(rctx); 2836 if (err != -EINPROGRESS) 2837 /* synchronous result */ 2838 spu_chunk_cleanup(rctx); 2839 2840 return err; 2841 } 2842 2843 static int aead_authenc_setkey(struct crypto_aead *cipher, 2844 const u8 *key, unsigned int keylen) 2845 { 2846 struct spu_hw *spu = &iproc_priv.spu; 2847 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 2848 struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 2849 struct rtattr *rta = (void *)key; 2850 struct crypto_authenc_key_param *param; 2851 const u8 *origkey = key; 2852 const unsigned int origkeylen = keylen; 2853 2854 int ret = 0; 2855 2856 flow_log("%s() aead:%p key:%p keylen:%u\n", __func__, cipher, key, 2857 keylen); 2858 flow_dump(" key: ", key, keylen); 2859 2860 if (!RTA_OK(rta, keylen)) 2861 goto badkey; 2862 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) 2863 goto badkey; 2864 if (RTA_PAYLOAD(rta) < sizeof(*param)) 2865 goto badkey; 2866 2867 param = RTA_DATA(rta); 2868 ctx->enckeylen = be32_to_cpu(param->enckeylen); 2869 2870 key += RTA_ALIGN(rta->rta_len); 2871 keylen -= RTA_ALIGN(rta->rta_len); 2872 2873 if (keylen < ctx->enckeylen) 2874 goto badkey; 2875 if (ctx->enckeylen > MAX_KEY_SIZE) 2876 goto badkey; 2877 2878 ctx->authkeylen = keylen - ctx->enckeylen; 2879 2880 if (ctx->authkeylen > MAX_KEY_SIZE) 2881 goto badkey; 2882 2883 memcpy(ctx->enckey, key + ctx->authkeylen, ctx->enckeylen); 2884 /* May end up padding auth key. So make sure it's zeroed. */ 2885 memset(ctx->authkey, 0, sizeof(ctx->authkey)); 2886 memcpy(ctx->authkey, key, ctx->authkeylen); 2887 2888 switch (ctx->alg->cipher_info.alg) { 2889 case CIPHER_ALG_DES: 2890 if (ctx->enckeylen == DES_KEY_SIZE) { 2891 u32 tmp[DES_EXPKEY_WORDS]; 2892 u32 flags = CRYPTO_TFM_RES_WEAK_KEY; 2893 2894 if (des_ekey(tmp, key) == 0) { 2895 if (crypto_aead_get_flags(cipher) & 2896 CRYPTO_TFM_REQ_WEAK_KEY) { 2897 crypto_aead_set_flags(cipher, flags); 2898 return -EINVAL; 2899 } 2900 } 2901 2902 ctx->cipher_type = CIPHER_TYPE_DES; 2903 } else { 2904 goto badkey; 2905 } 2906 break; 2907 case CIPHER_ALG_3DES: 2908 if (ctx->enckeylen == (DES_KEY_SIZE * 3)) { 2909 const u32 *K = (const u32 *)key; 2910 u32 flags = CRYPTO_TFM_RES_BAD_KEY_SCHED; 2911 2912 if (!((K[0] ^ K[2]) | (K[1] ^ K[3])) || 2913 !((K[2] ^ K[4]) | (K[3] ^ K[5]))) { 2914 crypto_aead_set_flags(cipher, flags); 2915 return -EINVAL; 2916 } 2917 2918 ctx->cipher_type = CIPHER_TYPE_3DES; 2919 } else { 2920 crypto_aead_set_flags(cipher, 2921 CRYPTO_TFM_RES_BAD_KEY_LEN); 2922 return -EINVAL; 2923 } 2924 break; 2925 case CIPHER_ALG_AES: 2926 switch (ctx->enckeylen) { 2927 case AES_KEYSIZE_128: 2928 ctx->cipher_type = CIPHER_TYPE_AES128; 2929 break; 2930 case AES_KEYSIZE_192: 2931 ctx->cipher_type = CIPHER_TYPE_AES192; 2932 break; 2933 case AES_KEYSIZE_256: 2934 ctx->cipher_type = CIPHER_TYPE_AES256; 2935 break; 2936 default: 2937 goto badkey; 2938 } 2939 break; 2940 case CIPHER_ALG_RC4: 2941 ctx->cipher_type = CIPHER_TYPE_INIT; 2942 break; 2943 default: 2944 pr_err("%s() Error: Unknown cipher alg\n", __func__); 2945 return -EINVAL; 2946 } 2947 2948 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, 2949 ctx->authkeylen); 2950 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); 2951 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen); 2952 2953 /* setkey the fallback just in case we needto use it */ 2954 if (ctx->fallback_cipher) { 2955 flow_log(" running fallback setkey()\n"); 2956 2957 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 2958 ctx->fallback_cipher->base.crt_flags |= 2959 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 2960 ret = 2961 crypto_aead_setkey(ctx->fallback_cipher, origkey, 2962 origkeylen); 2963 if (ret) { 2964 flow_log(" fallback setkey() returned:%d\n", ret); 2965 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 2966 tfm->crt_flags |= 2967 (ctx->fallback_cipher->base.crt_flags & 2968 CRYPTO_TFM_RES_MASK); 2969 } 2970 } 2971 2972 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 2973 ctx->enckeylen, 2974 false); 2975 2976 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]); 2977 2978 return ret; 2979 2980 badkey: 2981 ctx->enckeylen = 0; 2982 ctx->authkeylen = 0; 2983 ctx->digestsize = 0; 2984 2985 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 2986 return -EINVAL; 2987 } 2988 2989 static int aead_gcm_ccm_setkey(struct crypto_aead *cipher, 2990 const u8 *key, unsigned int keylen) 2991 { 2992 struct spu_hw *spu = &iproc_priv.spu; 2993 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 2994 struct crypto_tfm *tfm = crypto_aead_tfm(cipher); 2995 2996 int ret = 0; 2997 2998 flow_log("%s() keylen:%u\n", __func__, keylen); 2999 flow_dump(" key: ", key, keylen); 3000 3001 if (!ctx->is_esp) 3002 ctx->digestsize = keylen; 3003 3004 ctx->enckeylen = keylen; 3005 ctx->authkeylen = 0; 3006 memcpy(ctx->enckey, key, ctx->enckeylen); 3007 3008 switch (ctx->enckeylen) { 3009 case AES_KEYSIZE_128: 3010 ctx->cipher_type = CIPHER_TYPE_AES128; 3011 break; 3012 case AES_KEYSIZE_192: 3013 ctx->cipher_type = CIPHER_TYPE_AES192; 3014 break; 3015 case AES_KEYSIZE_256: 3016 ctx->cipher_type = CIPHER_TYPE_AES256; 3017 break; 3018 default: 3019 goto badkey; 3020 } 3021 3022 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, 3023 ctx->authkeylen); 3024 flow_dump(" enc: ", ctx->enckey, ctx->enckeylen); 3025 flow_dump(" auth: ", ctx->authkey, ctx->authkeylen); 3026 3027 /* setkey the fallback just in case we need to use it */ 3028 if (ctx->fallback_cipher) { 3029 flow_log(" running fallback setkey()\n"); 3030 3031 ctx->fallback_cipher->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 3032 ctx->fallback_cipher->base.crt_flags |= 3033 tfm->crt_flags & CRYPTO_TFM_REQ_MASK; 3034 ret = crypto_aead_setkey(ctx->fallback_cipher, key, 3035 keylen + ctx->salt_len); 3036 if (ret) { 3037 flow_log(" fallback setkey() returned:%d\n", ret); 3038 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK; 3039 tfm->crt_flags |= 3040 (ctx->fallback_cipher->base.crt_flags & 3041 CRYPTO_TFM_RES_MASK); 3042 } 3043 } 3044 3045 ctx->spu_resp_hdr_len = spu->spu_response_hdr_len(ctx->authkeylen, 3046 ctx->enckeylen, 3047 false); 3048 3049 atomic_inc(&iproc_priv.setkey_cnt[SPU_OP_AEAD]); 3050 3051 flow_log(" enckeylen:%u authkeylen:%u\n", ctx->enckeylen, 3052 ctx->authkeylen); 3053 3054 return ret; 3055 3056 badkey: 3057 ctx->enckeylen = 0; 3058 ctx->authkeylen = 0; 3059 ctx->digestsize = 0; 3060 3061 crypto_aead_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 3062 return -EINVAL; 3063 } 3064 3065 /** 3066 * aead_gcm_esp_setkey() - setkey() operation for ESP variant of GCM AES. 3067 * @cipher: AEAD structure 3068 * @key: Key followed by 4 bytes of salt 3069 * @keylen: Length of key plus salt, in bytes 3070 * 3071 * Extracts salt from key and stores it to be prepended to IV on each request. 3072 * Digest is always 16 bytes 3073 * 3074 * Return: Value from generic gcm setkey. 3075 */ 3076 static int aead_gcm_esp_setkey(struct crypto_aead *cipher, 3077 const u8 *key, unsigned int keylen) 3078 { 3079 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 3080 3081 flow_log("%s\n", __func__); 3082 ctx->salt_len = GCM_ESP_SALT_SIZE; 3083 ctx->salt_offset = GCM_ESP_SALT_OFFSET; 3084 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); 3085 keylen -= GCM_ESP_SALT_SIZE; 3086 ctx->digestsize = GCM_ESP_DIGESTSIZE; 3087 ctx->is_esp = true; 3088 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE); 3089 3090 return aead_gcm_ccm_setkey(cipher, key, keylen); 3091 } 3092 3093 /** 3094 * rfc4543_gcm_esp_setkey() - setkey operation for RFC4543 variant of GCM/GMAC. 3095 * cipher: AEAD structure 3096 * key: Key followed by 4 bytes of salt 3097 * keylen: Length of key plus salt, in bytes 3098 * 3099 * Extracts salt from key and stores it to be prepended to IV on each request. 3100 * Digest is always 16 bytes 3101 * 3102 * Return: Value from generic gcm setkey. 3103 */ 3104 static int rfc4543_gcm_esp_setkey(struct crypto_aead *cipher, 3105 const u8 *key, unsigned int keylen) 3106 { 3107 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 3108 3109 flow_log("%s\n", __func__); 3110 ctx->salt_len = GCM_ESP_SALT_SIZE; 3111 ctx->salt_offset = GCM_ESP_SALT_OFFSET; 3112 memcpy(ctx->salt, key + keylen - GCM_ESP_SALT_SIZE, GCM_ESP_SALT_SIZE); 3113 keylen -= GCM_ESP_SALT_SIZE; 3114 ctx->digestsize = GCM_ESP_DIGESTSIZE; 3115 ctx->is_esp = true; 3116 ctx->is_rfc4543 = true; 3117 flow_dump("salt: ", ctx->salt, GCM_ESP_SALT_SIZE); 3118 3119 return aead_gcm_ccm_setkey(cipher, key, keylen); 3120 } 3121 3122 /** 3123 * aead_ccm_esp_setkey() - setkey() operation for ESP variant of CCM AES. 3124 * @cipher: AEAD structure 3125 * @key: Key followed by 4 bytes of salt 3126 * @keylen: Length of key plus salt, in bytes 3127 * 3128 * Extracts salt from key and stores it to be prepended to IV on each request. 3129 * Digest is always 16 bytes 3130 * 3131 * Return: Value from generic ccm setkey. 3132 */ 3133 static int aead_ccm_esp_setkey(struct crypto_aead *cipher, 3134 const u8 *key, unsigned int keylen) 3135 { 3136 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 3137 3138 flow_log("%s\n", __func__); 3139 ctx->salt_len = CCM_ESP_SALT_SIZE; 3140 ctx->salt_offset = CCM_ESP_SALT_OFFSET; 3141 memcpy(ctx->salt, key + keylen - CCM_ESP_SALT_SIZE, CCM_ESP_SALT_SIZE); 3142 keylen -= CCM_ESP_SALT_SIZE; 3143 ctx->is_esp = true; 3144 flow_dump("salt: ", ctx->salt, CCM_ESP_SALT_SIZE); 3145 3146 return aead_gcm_ccm_setkey(cipher, key, keylen); 3147 } 3148 3149 static int aead_setauthsize(struct crypto_aead *cipher, unsigned int authsize) 3150 { 3151 struct iproc_ctx_s *ctx = crypto_aead_ctx(cipher); 3152 int ret = 0; 3153 3154 flow_log("%s() authkeylen:%u authsize:%u\n", 3155 __func__, ctx->authkeylen, authsize); 3156 3157 ctx->digestsize = authsize; 3158 3159 /* setkey the fallback just in case we needto use it */ 3160 if (ctx->fallback_cipher) { 3161 flow_log(" running fallback setauth()\n"); 3162 3163 ret = crypto_aead_setauthsize(ctx->fallback_cipher, authsize); 3164 if (ret) 3165 flow_log(" fallback setauth() returned:%d\n", ret); 3166 } 3167 3168 return ret; 3169 } 3170 3171 static int aead_encrypt(struct aead_request *req) 3172 { 3173 flow_log("%s() cryptlen:%u %08x\n", __func__, req->cryptlen, 3174 req->cryptlen); 3175 dump_sg(req->src, 0, req->cryptlen + req->assoclen); 3176 flow_log(" assoc_len:%u\n", req->assoclen); 3177 3178 return aead_enqueue(req, true); 3179 } 3180 3181 static int aead_decrypt(struct aead_request *req) 3182 { 3183 flow_log("%s() cryptlen:%u\n", __func__, req->cryptlen); 3184 dump_sg(req->src, 0, req->cryptlen + req->assoclen); 3185 flow_log(" assoc_len:%u\n", req->assoclen); 3186 3187 return aead_enqueue(req, false); 3188 } 3189 3190 /* ==================== Supported Cipher Algorithms ==================== */ 3191 3192 static struct iproc_alg_s driver_algs[] = { 3193 { 3194 .type = CRYPTO_ALG_TYPE_AEAD, 3195 .alg.aead = { 3196 .base = { 3197 .cra_name = "gcm(aes)", 3198 .cra_driver_name = "gcm-aes-iproc", 3199 .cra_blocksize = AES_BLOCK_SIZE, 3200 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3201 }, 3202 .setkey = aead_gcm_ccm_setkey, 3203 .ivsize = GCM_AES_IV_SIZE, 3204 .maxauthsize = AES_BLOCK_SIZE, 3205 }, 3206 .cipher_info = { 3207 .alg = CIPHER_ALG_AES, 3208 .mode = CIPHER_MODE_GCM, 3209 }, 3210 .auth_info = { 3211 .alg = HASH_ALG_AES, 3212 .mode = HASH_MODE_GCM, 3213 }, 3214 .auth_first = 0, 3215 }, 3216 { 3217 .type = CRYPTO_ALG_TYPE_AEAD, 3218 .alg.aead = { 3219 .base = { 3220 .cra_name = "ccm(aes)", 3221 .cra_driver_name = "ccm-aes-iproc", 3222 .cra_blocksize = AES_BLOCK_SIZE, 3223 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3224 }, 3225 .setkey = aead_gcm_ccm_setkey, 3226 .ivsize = CCM_AES_IV_SIZE, 3227 .maxauthsize = AES_BLOCK_SIZE, 3228 }, 3229 .cipher_info = { 3230 .alg = CIPHER_ALG_AES, 3231 .mode = CIPHER_MODE_CCM, 3232 }, 3233 .auth_info = { 3234 .alg = HASH_ALG_AES, 3235 .mode = HASH_MODE_CCM, 3236 }, 3237 .auth_first = 0, 3238 }, 3239 { 3240 .type = CRYPTO_ALG_TYPE_AEAD, 3241 .alg.aead = { 3242 .base = { 3243 .cra_name = "rfc4106(gcm(aes))", 3244 .cra_driver_name = "gcm-aes-esp-iproc", 3245 .cra_blocksize = AES_BLOCK_SIZE, 3246 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3247 }, 3248 .setkey = aead_gcm_esp_setkey, 3249 .ivsize = GCM_RFC4106_IV_SIZE, 3250 .maxauthsize = AES_BLOCK_SIZE, 3251 }, 3252 .cipher_info = { 3253 .alg = CIPHER_ALG_AES, 3254 .mode = CIPHER_MODE_GCM, 3255 }, 3256 .auth_info = { 3257 .alg = HASH_ALG_AES, 3258 .mode = HASH_MODE_GCM, 3259 }, 3260 .auth_first = 0, 3261 }, 3262 { 3263 .type = CRYPTO_ALG_TYPE_AEAD, 3264 .alg.aead = { 3265 .base = { 3266 .cra_name = "rfc4309(ccm(aes))", 3267 .cra_driver_name = "ccm-aes-esp-iproc", 3268 .cra_blocksize = AES_BLOCK_SIZE, 3269 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3270 }, 3271 .setkey = aead_ccm_esp_setkey, 3272 .ivsize = CCM_AES_IV_SIZE, 3273 .maxauthsize = AES_BLOCK_SIZE, 3274 }, 3275 .cipher_info = { 3276 .alg = CIPHER_ALG_AES, 3277 .mode = CIPHER_MODE_CCM, 3278 }, 3279 .auth_info = { 3280 .alg = HASH_ALG_AES, 3281 .mode = HASH_MODE_CCM, 3282 }, 3283 .auth_first = 0, 3284 }, 3285 { 3286 .type = CRYPTO_ALG_TYPE_AEAD, 3287 .alg.aead = { 3288 .base = { 3289 .cra_name = "rfc4543(gcm(aes))", 3290 .cra_driver_name = "gmac-aes-esp-iproc", 3291 .cra_blocksize = AES_BLOCK_SIZE, 3292 .cra_flags = CRYPTO_ALG_NEED_FALLBACK 3293 }, 3294 .setkey = rfc4543_gcm_esp_setkey, 3295 .ivsize = GCM_RFC4106_IV_SIZE, 3296 .maxauthsize = AES_BLOCK_SIZE, 3297 }, 3298 .cipher_info = { 3299 .alg = CIPHER_ALG_AES, 3300 .mode = CIPHER_MODE_GCM, 3301 }, 3302 .auth_info = { 3303 .alg = HASH_ALG_AES, 3304 .mode = HASH_MODE_GCM, 3305 }, 3306 .auth_first = 0, 3307 }, 3308 { 3309 .type = CRYPTO_ALG_TYPE_AEAD, 3310 .alg.aead = { 3311 .base = { 3312 .cra_name = "authenc(hmac(md5),cbc(aes))", 3313 .cra_driver_name = "authenc-hmac-md5-cbc-aes-iproc", 3314 .cra_blocksize = AES_BLOCK_SIZE, 3315 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3316 }, 3317 .setkey = aead_authenc_setkey, 3318 .ivsize = AES_BLOCK_SIZE, 3319 .maxauthsize = MD5_DIGEST_SIZE, 3320 }, 3321 .cipher_info = { 3322 .alg = CIPHER_ALG_AES, 3323 .mode = CIPHER_MODE_CBC, 3324 }, 3325 .auth_info = { 3326 .alg = HASH_ALG_MD5, 3327 .mode = HASH_MODE_HMAC, 3328 }, 3329 .auth_first = 0, 3330 }, 3331 { 3332 .type = CRYPTO_ALG_TYPE_AEAD, 3333 .alg.aead = { 3334 .base = { 3335 .cra_name = "authenc(hmac(sha1),cbc(aes))", 3336 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-iproc", 3337 .cra_blocksize = AES_BLOCK_SIZE, 3338 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3339 }, 3340 .setkey = aead_authenc_setkey, 3341 .ivsize = AES_BLOCK_SIZE, 3342 .maxauthsize = SHA1_DIGEST_SIZE, 3343 }, 3344 .cipher_info = { 3345 .alg = CIPHER_ALG_AES, 3346 .mode = CIPHER_MODE_CBC, 3347 }, 3348 .auth_info = { 3349 .alg = HASH_ALG_SHA1, 3350 .mode = HASH_MODE_HMAC, 3351 }, 3352 .auth_first = 0, 3353 }, 3354 { 3355 .type = CRYPTO_ALG_TYPE_AEAD, 3356 .alg.aead = { 3357 .base = { 3358 .cra_name = "authenc(hmac(sha256),cbc(aes))", 3359 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-iproc", 3360 .cra_blocksize = AES_BLOCK_SIZE, 3361 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3362 }, 3363 .setkey = aead_authenc_setkey, 3364 .ivsize = AES_BLOCK_SIZE, 3365 .maxauthsize = SHA256_DIGEST_SIZE, 3366 }, 3367 .cipher_info = { 3368 .alg = CIPHER_ALG_AES, 3369 .mode = CIPHER_MODE_CBC, 3370 }, 3371 .auth_info = { 3372 .alg = HASH_ALG_SHA256, 3373 .mode = HASH_MODE_HMAC, 3374 }, 3375 .auth_first = 0, 3376 }, 3377 { 3378 .type = CRYPTO_ALG_TYPE_AEAD, 3379 .alg.aead = { 3380 .base = { 3381 .cra_name = "authenc(hmac(md5),cbc(des))", 3382 .cra_driver_name = "authenc-hmac-md5-cbc-des-iproc", 3383 .cra_blocksize = DES_BLOCK_SIZE, 3384 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3385 }, 3386 .setkey = aead_authenc_setkey, 3387 .ivsize = DES_BLOCK_SIZE, 3388 .maxauthsize = MD5_DIGEST_SIZE, 3389 }, 3390 .cipher_info = { 3391 .alg = CIPHER_ALG_DES, 3392 .mode = CIPHER_MODE_CBC, 3393 }, 3394 .auth_info = { 3395 .alg = HASH_ALG_MD5, 3396 .mode = HASH_MODE_HMAC, 3397 }, 3398 .auth_first = 0, 3399 }, 3400 { 3401 .type = CRYPTO_ALG_TYPE_AEAD, 3402 .alg.aead = { 3403 .base = { 3404 .cra_name = "authenc(hmac(sha1),cbc(des))", 3405 .cra_driver_name = "authenc-hmac-sha1-cbc-des-iproc", 3406 .cra_blocksize = DES_BLOCK_SIZE, 3407 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3408 }, 3409 .setkey = aead_authenc_setkey, 3410 .ivsize = DES_BLOCK_SIZE, 3411 .maxauthsize = SHA1_DIGEST_SIZE, 3412 }, 3413 .cipher_info = { 3414 .alg = CIPHER_ALG_DES, 3415 .mode = CIPHER_MODE_CBC, 3416 }, 3417 .auth_info = { 3418 .alg = HASH_ALG_SHA1, 3419 .mode = HASH_MODE_HMAC, 3420 }, 3421 .auth_first = 0, 3422 }, 3423 { 3424 .type = CRYPTO_ALG_TYPE_AEAD, 3425 .alg.aead = { 3426 .base = { 3427 .cra_name = "authenc(hmac(sha224),cbc(des))", 3428 .cra_driver_name = "authenc-hmac-sha224-cbc-des-iproc", 3429 .cra_blocksize = DES_BLOCK_SIZE, 3430 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3431 }, 3432 .setkey = aead_authenc_setkey, 3433 .ivsize = DES_BLOCK_SIZE, 3434 .maxauthsize = SHA224_DIGEST_SIZE, 3435 }, 3436 .cipher_info = { 3437 .alg = CIPHER_ALG_DES, 3438 .mode = CIPHER_MODE_CBC, 3439 }, 3440 .auth_info = { 3441 .alg = HASH_ALG_SHA224, 3442 .mode = HASH_MODE_HMAC, 3443 }, 3444 .auth_first = 0, 3445 }, 3446 { 3447 .type = CRYPTO_ALG_TYPE_AEAD, 3448 .alg.aead = { 3449 .base = { 3450 .cra_name = "authenc(hmac(sha256),cbc(des))", 3451 .cra_driver_name = "authenc-hmac-sha256-cbc-des-iproc", 3452 .cra_blocksize = DES_BLOCK_SIZE, 3453 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3454 }, 3455 .setkey = aead_authenc_setkey, 3456 .ivsize = DES_BLOCK_SIZE, 3457 .maxauthsize = SHA256_DIGEST_SIZE, 3458 }, 3459 .cipher_info = { 3460 .alg = CIPHER_ALG_DES, 3461 .mode = CIPHER_MODE_CBC, 3462 }, 3463 .auth_info = { 3464 .alg = HASH_ALG_SHA256, 3465 .mode = HASH_MODE_HMAC, 3466 }, 3467 .auth_first = 0, 3468 }, 3469 { 3470 .type = CRYPTO_ALG_TYPE_AEAD, 3471 .alg.aead = { 3472 .base = { 3473 .cra_name = "authenc(hmac(sha384),cbc(des))", 3474 .cra_driver_name = "authenc-hmac-sha384-cbc-des-iproc", 3475 .cra_blocksize = DES_BLOCK_SIZE, 3476 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3477 }, 3478 .setkey = aead_authenc_setkey, 3479 .ivsize = DES_BLOCK_SIZE, 3480 .maxauthsize = SHA384_DIGEST_SIZE, 3481 }, 3482 .cipher_info = { 3483 .alg = CIPHER_ALG_DES, 3484 .mode = CIPHER_MODE_CBC, 3485 }, 3486 .auth_info = { 3487 .alg = HASH_ALG_SHA384, 3488 .mode = HASH_MODE_HMAC, 3489 }, 3490 .auth_first = 0, 3491 }, 3492 { 3493 .type = CRYPTO_ALG_TYPE_AEAD, 3494 .alg.aead = { 3495 .base = { 3496 .cra_name = "authenc(hmac(sha512),cbc(des))", 3497 .cra_driver_name = "authenc-hmac-sha512-cbc-des-iproc", 3498 .cra_blocksize = DES_BLOCK_SIZE, 3499 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3500 }, 3501 .setkey = aead_authenc_setkey, 3502 .ivsize = DES_BLOCK_SIZE, 3503 .maxauthsize = SHA512_DIGEST_SIZE, 3504 }, 3505 .cipher_info = { 3506 .alg = CIPHER_ALG_DES, 3507 .mode = CIPHER_MODE_CBC, 3508 }, 3509 .auth_info = { 3510 .alg = HASH_ALG_SHA512, 3511 .mode = HASH_MODE_HMAC, 3512 }, 3513 .auth_first = 0, 3514 }, 3515 { 3516 .type = CRYPTO_ALG_TYPE_AEAD, 3517 .alg.aead = { 3518 .base = { 3519 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 3520 .cra_driver_name = "authenc-hmac-md5-cbc-des3-iproc", 3521 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3522 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3523 }, 3524 .setkey = aead_authenc_setkey, 3525 .ivsize = DES3_EDE_BLOCK_SIZE, 3526 .maxauthsize = MD5_DIGEST_SIZE, 3527 }, 3528 .cipher_info = { 3529 .alg = CIPHER_ALG_3DES, 3530 .mode = CIPHER_MODE_CBC, 3531 }, 3532 .auth_info = { 3533 .alg = HASH_ALG_MD5, 3534 .mode = HASH_MODE_HMAC, 3535 }, 3536 .auth_first = 0, 3537 }, 3538 { 3539 .type = CRYPTO_ALG_TYPE_AEAD, 3540 .alg.aead = { 3541 .base = { 3542 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 3543 .cra_driver_name = "authenc-hmac-sha1-cbc-des3-iproc", 3544 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3545 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3546 }, 3547 .setkey = aead_authenc_setkey, 3548 .ivsize = DES3_EDE_BLOCK_SIZE, 3549 .maxauthsize = SHA1_DIGEST_SIZE, 3550 }, 3551 .cipher_info = { 3552 .alg = CIPHER_ALG_3DES, 3553 .mode = CIPHER_MODE_CBC, 3554 }, 3555 .auth_info = { 3556 .alg = HASH_ALG_SHA1, 3557 .mode = HASH_MODE_HMAC, 3558 }, 3559 .auth_first = 0, 3560 }, 3561 { 3562 .type = CRYPTO_ALG_TYPE_AEAD, 3563 .alg.aead = { 3564 .base = { 3565 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", 3566 .cra_driver_name = "authenc-hmac-sha224-cbc-des3-iproc", 3567 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3568 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3569 }, 3570 .setkey = aead_authenc_setkey, 3571 .ivsize = DES3_EDE_BLOCK_SIZE, 3572 .maxauthsize = SHA224_DIGEST_SIZE, 3573 }, 3574 .cipher_info = { 3575 .alg = CIPHER_ALG_3DES, 3576 .mode = CIPHER_MODE_CBC, 3577 }, 3578 .auth_info = { 3579 .alg = HASH_ALG_SHA224, 3580 .mode = HASH_MODE_HMAC, 3581 }, 3582 .auth_first = 0, 3583 }, 3584 { 3585 .type = CRYPTO_ALG_TYPE_AEAD, 3586 .alg.aead = { 3587 .base = { 3588 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", 3589 .cra_driver_name = "authenc-hmac-sha256-cbc-des3-iproc", 3590 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3591 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3592 }, 3593 .setkey = aead_authenc_setkey, 3594 .ivsize = DES3_EDE_BLOCK_SIZE, 3595 .maxauthsize = SHA256_DIGEST_SIZE, 3596 }, 3597 .cipher_info = { 3598 .alg = CIPHER_ALG_3DES, 3599 .mode = CIPHER_MODE_CBC, 3600 }, 3601 .auth_info = { 3602 .alg = HASH_ALG_SHA256, 3603 .mode = HASH_MODE_HMAC, 3604 }, 3605 .auth_first = 0, 3606 }, 3607 { 3608 .type = CRYPTO_ALG_TYPE_AEAD, 3609 .alg.aead = { 3610 .base = { 3611 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", 3612 .cra_driver_name = "authenc-hmac-sha384-cbc-des3-iproc", 3613 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3614 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3615 }, 3616 .setkey = aead_authenc_setkey, 3617 .ivsize = DES3_EDE_BLOCK_SIZE, 3618 .maxauthsize = SHA384_DIGEST_SIZE, 3619 }, 3620 .cipher_info = { 3621 .alg = CIPHER_ALG_3DES, 3622 .mode = CIPHER_MODE_CBC, 3623 }, 3624 .auth_info = { 3625 .alg = HASH_ALG_SHA384, 3626 .mode = HASH_MODE_HMAC, 3627 }, 3628 .auth_first = 0, 3629 }, 3630 { 3631 .type = CRYPTO_ALG_TYPE_AEAD, 3632 .alg.aead = { 3633 .base = { 3634 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", 3635 .cra_driver_name = "authenc-hmac-sha512-cbc-des3-iproc", 3636 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3637 .cra_flags = CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC 3638 }, 3639 .setkey = aead_authenc_setkey, 3640 .ivsize = DES3_EDE_BLOCK_SIZE, 3641 .maxauthsize = SHA512_DIGEST_SIZE, 3642 }, 3643 .cipher_info = { 3644 .alg = CIPHER_ALG_3DES, 3645 .mode = CIPHER_MODE_CBC, 3646 }, 3647 .auth_info = { 3648 .alg = HASH_ALG_SHA512, 3649 .mode = HASH_MODE_HMAC, 3650 }, 3651 .auth_first = 0, 3652 }, 3653 3654 /* ABLKCIPHER algorithms. */ 3655 { 3656 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3657 .alg.crypto = { 3658 .cra_name = "ecb(arc4)", 3659 .cra_driver_name = "ecb-arc4-iproc", 3660 .cra_blocksize = ARC4_BLOCK_SIZE, 3661 .cra_ablkcipher = { 3662 .min_keysize = ARC4_MIN_KEY_SIZE, 3663 .max_keysize = ARC4_MAX_KEY_SIZE, 3664 .ivsize = 0, 3665 } 3666 }, 3667 .cipher_info = { 3668 .alg = CIPHER_ALG_RC4, 3669 .mode = CIPHER_MODE_NONE, 3670 }, 3671 .auth_info = { 3672 .alg = HASH_ALG_NONE, 3673 .mode = HASH_MODE_NONE, 3674 }, 3675 }, 3676 { 3677 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3678 .alg.crypto = { 3679 .cra_name = "ofb(des)", 3680 .cra_driver_name = "ofb-des-iproc", 3681 .cra_blocksize = DES_BLOCK_SIZE, 3682 .cra_ablkcipher = { 3683 .min_keysize = DES_KEY_SIZE, 3684 .max_keysize = DES_KEY_SIZE, 3685 .ivsize = DES_BLOCK_SIZE, 3686 } 3687 }, 3688 .cipher_info = { 3689 .alg = CIPHER_ALG_DES, 3690 .mode = CIPHER_MODE_OFB, 3691 }, 3692 .auth_info = { 3693 .alg = HASH_ALG_NONE, 3694 .mode = HASH_MODE_NONE, 3695 }, 3696 }, 3697 { 3698 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3699 .alg.crypto = { 3700 .cra_name = "cbc(des)", 3701 .cra_driver_name = "cbc-des-iproc", 3702 .cra_blocksize = DES_BLOCK_SIZE, 3703 .cra_ablkcipher = { 3704 .min_keysize = DES_KEY_SIZE, 3705 .max_keysize = DES_KEY_SIZE, 3706 .ivsize = DES_BLOCK_SIZE, 3707 } 3708 }, 3709 .cipher_info = { 3710 .alg = CIPHER_ALG_DES, 3711 .mode = CIPHER_MODE_CBC, 3712 }, 3713 .auth_info = { 3714 .alg = HASH_ALG_NONE, 3715 .mode = HASH_MODE_NONE, 3716 }, 3717 }, 3718 { 3719 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3720 .alg.crypto = { 3721 .cra_name = "ecb(des)", 3722 .cra_driver_name = "ecb-des-iproc", 3723 .cra_blocksize = DES_BLOCK_SIZE, 3724 .cra_ablkcipher = { 3725 .min_keysize = DES_KEY_SIZE, 3726 .max_keysize = DES_KEY_SIZE, 3727 .ivsize = 0, 3728 } 3729 }, 3730 .cipher_info = { 3731 .alg = CIPHER_ALG_DES, 3732 .mode = CIPHER_MODE_ECB, 3733 }, 3734 .auth_info = { 3735 .alg = HASH_ALG_NONE, 3736 .mode = HASH_MODE_NONE, 3737 }, 3738 }, 3739 { 3740 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3741 .alg.crypto = { 3742 .cra_name = "ofb(des3_ede)", 3743 .cra_driver_name = "ofb-des3-iproc", 3744 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3745 .cra_ablkcipher = { 3746 .min_keysize = DES3_EDE_KEY_SIZE, 3747 .max_keysize = DES3_EDE_KEY_SIZE, 3748 .ivsize = DES3_EDE_BLOCK_SIZE, 3749 } 3750 }, 3751 .cipher_info = { 3752 .alg = CIPHER_ALG_3DES, 3753 .mode = CIPHER_MODE_OFB, 3754 }, 3755 .auth_info = { 3756 .alg = HASH_ALG_NONE, 3757 .mode = HASH_MODE_NONE, 3758 }, 3759 }, 3760 { 3761 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3762 .alg.crypto = { 3763 .cra_name = "cbc(des3_ede)", 3764 .cra_driver_name = "cbc-des3-iproc", 3765 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3766 .cra_ablkcipher = { 3767 .min_keysize = DES3_EDE_KEY_SIZE, 3768 .max_keysize = DES3_EDE_KEY_SIZE, 3769 .ivsize = DES3_EDE_BLOCK_SIZE, 3770 } 3771 }, 3772 .cipher_info = { 3773 .alg = CIPHER_ALG_3DES, 3774 .mode = CIPHER_MODE_CBC, 3775 }, 3776 .auth_info = { 3777 .alg = HASH_ALG_NONE, 3778 .mode = HASH_MODE_NONE, 3779 }, 3780 }, 3781 { 3782 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3783 .alg.crypto = { 3784 .cra_name = "ecb(des3_ede)", 3785 .cra_driver_name = "ecb-des3-iproc", 3786 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 3787 .cra_ablkcipher = { 3788 .min_keysize = DES3_EDE_KEY_SIZE, 3789 .max_keysize = DES3_EDE_KEY_SIZE, 3790 .ivsize = 0, 3791 } 3792 }, 3793 .cipher_info = { 3794 .alg = CIPHER_ALG_3DES, 3795 .mode = CIPHER_MODE_ECB, 3796 }, 3797 .auth_info = { 3798 .alg = HASH_ALG_NONE, 3799 .mode = HASH_MODE_NONE, 3800 }, 3801 }, 3802 { 3803 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3804 .alg.crypto = { 3805 .cra_name = "ofb(aes)", 3806 .cra_driver_name = "ofb-aes-iproc", 3807 .cra_blocksize = AES_BLOCK_SIZE, 3808 .cra_ablkcipher = { 3809 .min_keysize = AES_MIN_KEY_SIZE, 3810 .max_keysize = AES_MAX_KEY_SIZE, 3811 .ivsize = AES_BLOCK_SIZE, 3812 } 3813 }, 3814 .cipher_info = { 3815 .alg = CIPHER_ALG_AES, 3816 .mode = CIPHER_MODE_OFB, 3817 }, 3818 .auth_info = { 3819 .alg = HASH_ALG_NONE, 3820 .mode = HASH_MODE_NONE, 3821 }, 3822 }, 3823 { 3824 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3825 .alg.crypto = { 3826 .cra_name = "cbc(aes)", 3827 .cra_driver_name = "cbc-aes-iproc", 3828 .cra_blocksize = AES_BLOCK_SIZE, 3829 .cra_ablkcipher = { 3830 .min_keysize = AES_MIN_KEY_SIZE, 3831 .max_keysize = AES_MAX_KEY_SIZE, 3832 .ivsize = AES_BLOCK_SIZE, 3833 } 3834 }, 3835 .cipher_info = { 3836 .alg = CIPHER_ALG_AES, 3837 .mode = CIPHER_MODE_CBC, 3838 }, 3839 .auth_info = { 3840 .alg = HASH_ALG_NONE, 3841 .mode = HASH_MODE_NONE, 3842 }, 3843 }, 3844 { 3845 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3846 .alg.crypto = { 3847 .cra_name = "ecb(aes)", 3848 .cra_driver_name = "ecb-aes-iproc", 3849 .cra_blocksize = AES_BLOCK_SIZE, 3850 .cra_ablkcipher = { 3851 .min_keysize = AES_MIN_KEY_SIZE, 3852 .max_keysize = AES_MAX_KEY_SIZE, 3853 .ivsize = 0, 3854 } 3855 }, 3856 .cipher_info = { 3857 .alg = CIPHER_ALG_AES, 3858 .mode = CIPHER_MODE_ECB, 3859 }, 3860 .auth_info = { 3861 .alg = HASH_ALG_NONE, 3862 .mode = HASH_MODE_NONE, 3863 }, 3864 }, 3865 { 3866 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3867 .alg.crypto = { 3868 .cra_name = "ctr(aes)", 3869 .cra_driver_name = "ctr-aes-iproc", 3870 .cra_blocksize = AES_BLOCK_SIZE, 3871 .cra_ablkcipher = { 3872 /* .geniv = "chainiv", */ 3873 .min_keysize = AES_MIN_KEY_SIZE, 3874 .max_keysize = AES_MAX_KEY_SIZE, 3875 .ivsize = AES_BLOCK_SIZE, 3876 } 3877 }, 3878 .cipher_info = { 3879 .alg = CIPHER_ALG_AES, 3880 .mode = CIPHER_MODE_CTR, 3881 }, 3882 .auth_info = { 3883 .alg = HASH_ALG_NONE, 3884 .mode = HASH_MODE_NONE, 3885 }, 3886 }, 3887 { 3888 .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 3889 .alg.crypto = { 3890 .cra_name = "xts(aes)", 3891 .cra_driver_name = "xts-aes-iproc", 3892 .cra_blocksize = AES_BLOCK_SIZE, 3893 .cra_ablkcipher = { 3894 .min_keysize = 2 * AES_MIN_KEY_SIZE, 3895 .max_keysize = 2 * AES_MAX_KEY_SIZE, 3896 .ivsize = AES_BLOCK_SIZE, 3897 } 3898 }, 3899 .cipher_info = { 3900 .alg = CIPHER_ALG_AES, 3901 .mode = CIPHER_MODE_XTS, 3902 }, 3903 .auth_info = { 3904 .alg = HASH_ALG_NONE, 3905 .mode = HASH_MODE_NONE, 3906 }, 3907 }, 3908 3909 /* AHASH algorithms. */ 3910 { 3911 .type = CRYPTO_ALG_TYPE_AHASH, 3912 .alg.hash = { 3913 .halg.digestsize = MD5_DIGEST_SIZE, 3914 .halg.base = { 3915 .cra_name = "md5", 3916 .cra_driver_name = "md5-iproc", 3917 .cra_blocksize = MD5_BLOCK_WORDS * 4, 3918 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 3919 CRYPTO_ALG_ASYNC, 3920 } 3921 }, 3922 .cipher_info = { 3923 .alg = CIPHER_ALG_NONE, 3924 .mode = CIPHER_MODE_NONE, 3925 }, 3926 .auth_info = { 3927 .alg = HASH_ALG_MD5, 3928 .mode = HASH_MODE_HASH, 3929 }, 3930 }, 3931 { 3932 .type = CRYPTO_ALG_TYPE_AHASH, 3933 .alg.hash = { 3934 .halg.digestsize = MD5_DIGEST_SIZE, 3935 .halg.base = { 3936 .cra_name = "hmac(md5)", 3937 .cra_driver_name = "hmac-md5-iproc", 3938 .cra_blocksize = MD5_BLOCK_WORDS * 4, 3939 } 3940 }, 3941 .cipher_info = { 3942 .alg = CIPHER_ALG_NONE, 3943 .mode = CIPHER_MODE_NONE, 3944 }, 3945 .auth_info = { 3946 .alg = HASH_ALG_MD5, 3947 .mode = HASH_MODE_HMAC, 3948 }, 3949 }, 3950 {.type = CRYPTO_ALG_TYPE_AHASH, 3951 .alg.hash = { 3952 .halg.digestsize = SHA1_DIGEST_SIZE, 3953 .halg.base = { 3954 .cra_name = "sha1", 3955 .cra_driver_name = "sha1-iproc", 3956 .cra_blocksize = SHA1_BLOCK_SIZE, 3957 } 3958 }, 3959 .cipher_info = { 3960 .alg = CIPHER_ALG_NONE, 3961 .mode = CIPHER_MODE_NONE, 3962 }, 3963 .auth_info = { 3964 .alg = HASH_ALG_SHA1, 3965 .mode = HASH_MODE_HASH, 3966 }, 3967 }, 3968 {.type = CRYPTO_ALG_TYPE_AHASH, 3969 .alg.hash = { 3970 .halg.digestsize = SHA1_DIGEST_SIZE, 3971 .halg.base = { 3972 .cra_name = "hmac(sha1)", 3973 .cra_driver_name = "hmac-sha1-iproc", 3974 .cra_blocksize = SHA1_BLOCK_SIZE, 3975 } 3976 }, 3977 .cipher_info = { 3978 .alg = CIPHER_ALG_NONE, 3979 .mode = CIPHER_MODE_NONE, 3980 }, 3981 .auth_info = { 3982 .alg = HASH_ALG_SHA1, 3983 .mode = HASH_MODE_HMAC, 3984 }, 3985 }, 3986 {.type = CRYPTO_ALG_TYPE_AHASH, 3987 .alg.hash = { 3988 .halg.digestsize = SHA224_DIGEST_SIZE, 3989 .halg.base = { 3990 .cra_name = "sha224", 3991 .cra_driver_name = "sha224-iproc", 3992 .cra_blocksize = SHA224_BLOCK_SIZE, 3993 } 3994 }, 3995 .cipher_info = { 3996 .alg = CIPHER_ALG_NONE, 3997 .mode = CIPHER_MODE_NONE, 3998 }, 3999 .auth_info = { 4000 .alg = HASH_ALG_SHA224, 4001 .mode = HASH_MODE_HASH, 4002 }, 4003 }, 4004 {.type = CRYPTO_ALG_TYPE_AHASH, 4005 .alg.hash = { 4006 .halg.digestsize = SHA224_DIGEST_SIZE, 4007 .halg.base = { 4008 .cra_name = "hmac(sha224)", 4009 .cra_driver_name = "hmac-sha224-iproc", 4010 .cra_blocksize = SHA224_BLOCK_SIZE, 4011 } 4012 }, 4013 .cipher_info = { 4014 .alg = CIPHER_ALG_NONE, 4015 .mode = CIPHER_MODE_NONE, 4016 }, 4017 .auth_info = { 4018 .alg = HASH_ALG_SHA224, 4019 .mode = HASH_MODE_HMAC, 4020 }, 4021 }, 4022 {.type = CRYPTO_ALG_TYPE_AHASH, 4023 .alg.hash = { 4024 .halg.digestsize = SHA256_DIGEST_SIZE, 4025 .halg.base = { 4026 .cra_name = "sha256", 4027 .cra_driver_name = "sha256-iproc", 4028 .cra_blocksize = SHA256_BLOCK_SIZE, 4029 } 4030 }, 4031 .cipher_info = { 4032 .alg = CIPHER_ALG_NONE, 4033 .mode = CIPHER_MODE_NONE, 4034 }, 4035 .auth_info = { 4036 .alg = HASH_ALG_SHA256, 4037 .mode = HASH_MODE_HASH, 4038 }, 4039 }, 4040 {.type = CRYPTO_ALG_TYPE_AHASH, 4041 .alg.hash = { 4042 .halg.digestsize = SHA256_DIGEST_SIZE, 4043 .halg.base = { 4044 .cra_name = "hmac(sha256)", 4045 .cra_driver_name = "hmac-sha256-iproc", 4046 .cra_blocksize = SHA256_BLOCK_SIZE, 4047 } 4048 }, 4049 .cipher_info = { 4050 .alg = CIPHER_ALG_NONE, 4051 .mode = CIPHER_MODE_NONE, 4052 }, 4053 .auth_info = { 4054 .alg = HASH_ALG_SHA256, 4055 .mode = HASH_MODE_HMAC, 4056 }, 4057 }, 4058 { 4059 .type = CRYPTO_ALG_TYPE_AHASH, 4060 .alg.hash = { 4061 .halg.digestsize = SHA384_DIGEST_SIZE, 4062 .halg.base = { 4063 .cra_name = "sha384", 4064 .cra_driver_name = "sha384-iproc", 4065 .cra_blocksize = SHA384_BLOCK_SIZE, 4066 } 4067 }, 4068 .cipher_info = { 4069 .alg = CIPHER_ALG_NONE, 4070 .mode = CIPHER_MODE_NONE, 4071 }, 4072 .auth_info = { 4073 .alg = HASH_ALG_SHA384, 4074 .mode = HASH_MODE_HASH, 4075 }, 4076 }, 4077 { 4078 .type = CRYPTO_ALG_TYPE_AHASH, 4079 .alg.hash = { 4080 .halg.digestsize = SHA384_DIGEST_SIZE, 4081 .halg.base = { 4082 .cra_name = "hmac(sha384)", 4083 .cra_driver_name = "hmac-sha384-iproc", 4084 .cra_blocksize = SHA384_BLOCK_SIZE, 4085 } 4086 }, 4087 .cipher_info = { 4088 .alg = CIPHER_ALG_NONE, 4089 .mode = CIPHER_MODE_NONE, 4090 }, 4091 .auth_info = { 4092 .alg = HASH_ALG_SHA384, 4093 .mode = HASH_MODE_HMAC, 4094 }, 4095 }, 4096 { 4097 .type = CRYPTO_ALG_TYPE_AHASH, 4098 .alg.hash = { 4099 .halg.digestsize = SHA512_DIGEST_SIZE, 4100 .halg.base = { 4101 .cra_name = "sha512", 4102 .cra_driver_name = "sha512-iproc", 4103 .cra_blocksize = SHA512_BLOCK_SIZE, 4104 } 4105 }, 4106 .cipher_info = { 4107 .alg = CIPHER_ALG_NONE, 4108 .mode = CIPHER_MODE_NONE, 4109 }, 4110 .auth_info = { 4111 .alg = HASH_ALG_SHA512, 4112 .mode = HASH_MODE_HASH, 4113 }, 4114 }, 4115 { 4116 .type = CRYPTO_ALG_TYPE_AHASH, 4117 .alg.hash = { 4118 .halg.digestsize = SHA512_DIGEST_SIZE, 4119 .halg.base = { 4120 .cra_name = "hmac(sha512)", 4121 .cra_driver_name = "hmac-sha512-iproc", 4122 .cra_blocksize = SHA512_BLOCK_SIZE, 4123 } 4124 }, 4125 .cipher_info = { 4126 .alg = CIPHER_ALG_NONE, 4127 .mode = CIPHER_MODE_NONE, 4128 }, 4129 .auth_info = { 4130 .alg = HASH_ALG_SHA512, 4131 .mode = HASH_MODE_HMAC, 4132 }, 4133 }, 4134 { 4135 .type = CRYPTO_ALG_TYPE_AHASH, 4136 .alg.hash = { 4137 .halg.digestsize = SHA3_224_DIGEST_SIZE, 4138 .halg.base = { 4139 .cra_name = "sha3-224", 4140 .cra_driver_name = "sha3-224-iproc", 4141 .cra_blocksize = SHA3_224_BLOCK_SIZE, 4142 } 4143 }, 4144 .cipher_info = { 4145 .alg = CIPHER_ALG_NONE, 4146 .mode = CIPHER_MODE_NONE, 4147 }, 4148 .auth_info = { 4149 .alg = HASH_ALG_SHA3_224, 4150 .mode = HASH_MODE_HASH, 4151 }, 4152 }, 4153 { 4154 .type = CRYPTO_ALG_TYPE_AHASH, 4155 .alg.hash = { 4156 .halg.digestsize = SHA3_224_DIGEST_SIZE, 4157 .halg.base = { 4158 .cra_name = "hmac(sha3-224)", 4159 .cra_driver_name = "hmac-sha3-224-iproc", 4160 .cra_blocksize = SHA3_224_BLOCK_SIZE, 4161 } 4162 }, 4163 .cipher_info = { 4164 .alg = CIPHER_ALG_NONE, 4165 .mode = CIPHER_MODE_NONE, 4166 }, 4167 .auth_info = { 4168 .alg = HASH_ALG_SHA3_224, 4169 .mode = HASH_MODE_HMAC 4170 }, 4171 }, 4172 { 4173 .type = CRYPTO_ALG_TYPE_AHASH, 4174 .alg.hash = { 4175 .halg.digestsize = SHA3_256_DIGEST_SIZE, 4176 .halg.base = { 4177 .cra_name = "sha3-256", 4178 .cra_driver_name = "sha3-256-iproc", 4179 .cra_blocksize = SHA3_256_BLOCK_SIZE, 4180 } 4181 }, 4182 .cipher_info = { 4183 .alg = CIPHER_ALG_NONE, 4184 .mode = CIPHER_MODE_NONE, 4185 }, 4186 .auth_info = { 4187 .alg = HASH_ALG_SHA3_256, 4188 .mode = HASH_MODE_HASH, 4189 }, 4190 }, 4191 { 4192 .type = CRYPTO_ALG_TYPE_AHASH, 4193 .alg.hash = { 4194 .halg.digestsize = SHA3_256_DIGEST_SIZE, 4195 .halg.base = { 4196 .cra_name = "hmac(sha3-256)", 4197 .cra_driver_name = "hmac-sha3-256-iproc", 4198 .cra_blocksize = SHA3_256_BLOCK_SIZE, 4199 } 4200 }, 4201 .cipher_info = { 4202 .alg = CIPHER_ALG_NONE, 4203 .mode = CIPHER_MODE_NONE, 4204 }, 4205 .auth_info = { 4206 .alg = HASH_ALG_SHA3_256, 4207 .mode = HASH_MODE_HMAC, 4208 }, 4209 }, 4210 { 4211 .type = CRYPTO_ALG_TYPE_AHASH, 4212 .alg.hash = { 4213 .halg.digestsize = SHA3_384_DIGEST_SIZE, 4214 .halg.base = { 4215 .cra_name = "sha3-384", 4216 .cra_driver_name = "sha3-384-iproc", 4217 .cra_blocksize = SHA3_224_BLOCK_SIZE, 4218 } 4219 }, 4220 .cipher_info = { 4221 .alg = CIPHER_ALG_NONE, 4222 .mode = CIPHER_MODE_NONE, 4223 }, 4224 .auth_info = { 4225 .alg = HASH_ALG_SHA3_384, 4226 .mode = HASH_MODE_HASH, 4227 }, 4228 }, 4229 { 4230 .type = CRYPTO_ALG_TYPE_AHASH, 4231 .alg.hash = { 4232 .halg.digestsize = SHA3_384_DIGEST_SIZE, 4233 .halg.base = { 4234 .cra_name = "hmac(sha3-384)", 4235 .cra_driver_name = "hmac-sha3-384-iproc", 4236 .cra_blocksize = SHA3_384_BLOCK_SIZE, 4237 } 4238 }, 4239 .cipher_info = { 4240 .alg = CIPHER_ALG_NONE, 4241 .mode = CIPHER_MODE_NONE, 4242 }, 4243 .auth_info = { 4244 .alg = HASH_ALG_SHA3_384, 4245 .mode = HASH_MODE_HMAC, 4246 }, 4247 }, 4248 { 4249 .type = CRYPTO_ALG_TYPE_AHASH, 4250 .alg.hash = { 4251 .halg.digestsize = SHA3_512_DIGEST_SIZE, 4252 .halg.base = { 4253 .cra_name = "sha3-512", 4254 .cra_driver_name = "sha3-512-iproc", 4255 .cra_blocksize = SHA3_512_BLOCK_SIZE, 4256 } 4257 }, 4258 .cipher_info = { 4259 .alg = CIPHER_ALG_NONE, 4260 .mode = CIPHER_MODE_NONE, 4261 }, 4262 .auth_info = { 4263 .alg = HASH_ALG_SHA3_512, 4264 .mode = HASH_MODE_HASH, 4265 }, 4266 }, 4267 { 4268 .type = CRYPTO_ALG_TYPE_AHASH, 4269 .alg.hash = { 4270 .halg.digestsize = SHA3_512_DIGEST_SIZE, 4271 .halg.base = { 4272 .cra_name = "hmac(sha3-512)", 4273 .cra_driver_name = "hmac-sha3-512-iproc", 4274 .cra_blocksize = SHA3_512_BLOCK_SIZE, 4275 } 4276 }, 4277 .cipher_info = { 4278 .alg = CIPHER_ALG_NONE, 4279 .mode = CIPHER_MODE_NONE, 4280 }, 4281 .auth_info = { 4282 .alg = HASH_ALG_SHA3_512, 4283 .mode = HASH_MODE_HMAC, 4284 }, 4285 }, 4286 { 4287 .type = CRYPTO_ALG_TYPE_AHASH, 4288 .alg.hash = { 4289 .halg.digestsize = AES_BLOCK_SIZE, 4290 .halg.base = { 4291 .cra_name = "xcbc(aes)", 4292 .cra_driver_name = "xcbc-aes-iproc", 4293 .cra_blocksize = AES_BLOCK_SIZE, 4294 } 4295 }, 4296 .cipher_info = { 4297 .alg = CIPHER_ALG_NONE, 4298 .mode = CIPHER_MODE_NONE, 4299 }, 4300 .auth_info = { 4301 .alg = HASH_ALG_AES, 4302 .mode = HASH_MODE_XCBC, 4303 }, 4304 }, 4305 { 4306 .type = CRYPTO_ALG_TYPE_AHASH, 4307 .alg.hash = { 4308 .halg.digestsize = AES_BLOCK_SIZE, 4309 .halg.base = { 4310 .cra_name = "cmac(aes)", 4311 .cra_driver_name = "cmac-aes-iproc", 4312 .cra_blocksize = AES_BLOCK_SIZE, 4313 } 4314 }, 4315 .cipher_info = { 4316 .alg = CIPHER_ALG_NONE, 4317 .mode = CIPHER_MODE_NONE, 4318 }, 4319 .auth_info = { 4320 .alg = HASH_ALG_AES, 4321 .mode = HASH_MODE_CMAC, 4322 }, 4323 }, 4324 }; 4325 4326 static int generic_cra_init(struct crypto_tfm *tfm, 4327 struct iproc_alg_s *cipher_alg) 4328 { 4329 struct spu_hw *spu = &iproc_priv.spu; 4330 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); 4331 unsigned int blocksize = crypto_tfm_alg_blocksize(tfm); 4332 4333 flow_log("%s()\n", __func__); 4334 4335 ctx->alg = cipher_alg; 4336 ctx->cipher = cipher_alg->cipher_info; 4337 ctx->auth = cipher_alg->auth_info; 4338 ctx->auth_first = cipher_alg->auth_first; 4339 ctx->max_payload = spu->spu_ctx_max_payload(ctx->cipher.alg, 4340 ctx->cipher.mode, 4341 blocksize); 4342 ctx->fallback_cipher = NULL; 4343 4344 ctx->enckeylen = 0; 4345 ctx->authkeylen = 0; 4346 4347 atomic_inc(&iproc_priv.stream_count); 4348 atomic_inc(&iproc_priv.session_count); 4349 4350 return 0; 4351 } 4352 4353 static int ablkcipher_cra_init(struct crypto_tfm *tfm) 4354 { 4355 struct crypto_alg *alg = tfm->__crt_alg; 4356 struct iproc_alg_s *cipher_alg; 4357 4358 flow_log("%s()\n", __func__); 4359 4360 tfm->crt_ablkcipher.reqsize = sizeof(struct iproc_reqctx_s); 4361 4362 cipher_alg = container_of(alg, struct iproc_alg_s, alg.crypto); 4363 return generic_cra_init(tfm, cipher_alg); 4364 } 4365 4366 static int ahash_cra_init(struct crypto_tfm *tfm) 4367 { 4368 int err; 4369 struct crypto_alg *alg = tfm->__crt_alg; 4370 struct iproc_alg_s *cipher_alg; 4371 4372 cipher_alg = container_of(__crypto_ahash_alg(alg), struct iproc_alg_s, 4373 alg.hash); 4374 4375 err = generic_cra_init(tfm, cipher_alg); 4376 flow_log("%s()\n", __func__); 4377 4378 /* 4379 * export state size has to be < 512 bytes. So don't include msg bufs 4380 * in state size. 4381 */ 4382 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 4383 sizeof(struct iproc_reqctx_s)); 4384 4385 return err; 4386 } 4387 4388 static int aead_cra_init(struct crypto_aead *aead) 4389 { 4390 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 4391 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); 4392 struct crypto_alg *alg = tfm->__crt_alg; 4393 struct aead_alg *aalg = container_of(alg, struct aead_alg, base); 4394 struct iproc_alg_s *cipher_alg = container_of(aalg, struct iproc_alg_s, 4395 alg.aead); 4396 4397 int err = generic_cra_init(tfm, cipher_alg); 4398 4399 flow_log("%s()\n", __func__); 4400 4401 crypto_aead_set_reqsize(aead, sizeof(struct iproc_reqctx_s)); 4402 ctx->is_esp = false; 4403 ctx->salt_len = 0; 4404 ctx->salt_offset = 0; 4405 4406 /* random first IV */ 4407 get_random_bytes(ctx->iv, MAX_IV_SIZE); 4408 flow_dump(" iv: ", ctx->iv, MAX_IV_SIZE); 4409 4410 if (!err) { 4411 if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) { 4412 flow_log("%s() creating fallback cipher\n", __func__); 4413 4414 ctx->fallback_cipher = 4415 crypto_alloc_aead(alg->cra_name, 0, 4416 CRYPTO_ALG_ASYNC | 4417 CRYPTO_ALG_NEED_FALLBACK); 4418 if (IS_ERR(ctx->fallback_cipher)) { 4419 pr_err("%s() Error: failed to allocate fallback for %s\n", 4420 __func__, alg->cra_name); 4421 return PTR_ERR(ctx->fallback_cipher); 4422 } 4423 } 4424 } 4425 4426 return err; 4427 } 4428 4429 static void generic_cra_exit(struct crypto_tfm *tfm) 4430 { 4431 atomic_dec(&iproc_priv.session_count); 4432 } 4433 4434 static void aead_cra_exit(struct crypto_aead *aead) 4435 { 4436 struct crypto_tfm *tfm = crypto_aead_tfm(aead); 4437 struct iproc_ctx_s *ctx = crypto_tfm_ctx(tfm); 4438 4439 generic_cra_exit(tfm); 4440 4441 if (ctx->fallback_cipher) { 4442 crypto_free_aead(ctx->fallback_cipher); 4443 ctx->fallback_cipher = NULL; 4444 } 4445 } 4446 4447 /** 4448 * spu_functions_register() - Specify hardware-specific SPU functions based on 4449 * SPU type read from device tree. 4450 * @dev: device structure 4451 * @spu_type: SPU hardware generation 4452 * @spu_subtype: SPU hardware version 4453 */ 4454 static void spu_functions_register(struct device *dev, 4455 enum spu_spu_type spu_type, 4456 enum spu_spu_subtype spu_subtype) 4457 { 4458 struct spu_hw *spu = &iproc_priv.spu; 4459 4460 if (spu_type == SPU_TYPE_SPUM) { 4461 dev_dbg(dev, "Registering SPUM functions"); 4462 spu->spu_dump_msg_hdr = spum_dump_msg_hdr; 4463 spu->spu_payload_length = spum_payload_length; 4464 spu->spu_response_hdr_len = spum_response_hdr_len; 4465 spu->spu_hash_pad_len = spum_hash_pad_len; 4466 spu->spu_gcm_ccm_pad_len = spum_gcm_ccm_pad_len; 4467 spu->spu_assoc_resp_len = spum_assoc_resp_len; 4468 spu->spu_aead_ivlen = spum_aead_ivlen; 4469 spu->spu_hash_type = spum_hash_type; 4470 spu->spu_digest_size = spum_digest_size; 4471 spu->spu_create_request = spum_create_request; 4472 spu->spu_cipher_req_init = spum_cipher_req_init; 4473 spu->spu_cipher_req_finish = spum_cipher_req_finish; 4474 spu->spu_request_pad = spum_request_pad; 4475 spu->spu_tx_status_len = spum_tx_status_len; 4476 spu->spu_rx_status_len = spum_rx_status_len; 4477 spu->spu_status_process = spum_status_process; 4478 spu->spu_xts_tweak_in_payload = spum_xts_tweak_in_payload; 4479 spu->spu_ccm_update_iv = spum_ccm_update_iv; 4480 spu->spu_wordalign_padlen = spum_wordalign_padlen; 4481 if (spu_subtype == SPU_SUBTYPE_SPUM_NS2) 4482 spu->spu_ctx_max_payload = spum_ns2_ctx_max_payload; 4483 else 4484 spu->spu_ctx_max_payload = spum_nsp_ctx_max_payload; 4485 } else { 4486 dev_dbg(dev, "Registering SPU2 functions"); 4487 spu->spu_dump_msg_hdr = spu2_dump_msg_hdr; 4488 spu->spu_ctx_max_payload = spu2_ctx_max_payload; 4489 spu->spu_payload_length = spu2_payload_length; 4490 spu->spu_response_hdr_len = spu2_response_hdr_len; 4491 spu->spu_hash_pad_len = spu2_hash_pad_len; 4492 spu->spu_gcm_ccm_pad_len = spu2_gcm_ccm_pad_len; 4493 spu->spu_assoc_resp_len = spu2_assoc_resp_len; 4494 spu->spu_aead_ivlen = spu2_aead_ivlen; 4495 spu->spu_hash_type = spu2_hash_type; 4496 spu->spu_digest_size = spu2_digest_size; 4497 spu->spu_create_request = spu2_create_request; 4498 spu->spu_cipher_req_init = spu2_cipher_req_init; 4499 spu->spu_cipher_req_finish = spu2_cipher_req_finish; 4500 spu->spu_request_pad = spu2_request_pad; 4501 spu->spu_tx_status_len = spu2_tx_status_len; 4502 spu->spu_rx_status_len = spu2_rx_status_len; 4503 spu->spu_status_process = spu2_status_process; 4504 spu->spu_xts_tweak_in_payload = spu2_xts_tweak_in_payload; 4505 spu->spu_ccm_update_iv = spu2_ccm_update_iv; 4506 spu->spu_wordalign_padlen = spu2_wordalign_padlen; 4507 } 4508 } 4509 4510 /** 4511 * spu_mb_init() - Initialize mailbox client. Request ownership of a mailbox 4512 * channel for the SPU being probed. 4513 * @dev: SPU driver device structure 4514 * 4515 * Return: 0 if successful 4516 * < 0 otherwise 4517 */ 4518 static int spu_mb_init(struct device *dev) 4519 { 4520 struct mbox_client *mcl = &iproc_priv.mcl; 4521 int err, i; 4522 4523 iproc_priv.mbox = devm_kcalloc(dev, iproc_priv.spu.num_chan, 4524 sizeof(struct mbox_chan *), GFP_KERNEL); 4525 if (!iproc_priv.mbox) 4526 return -ENOMEM; 4527 4528 mcl->dev = dev; 4529 mcl->tx_block = false; 4530 mcl->tx_tout = 0; 4531 mcl->knows_txdone = true; 4532 mcl->rx_callback = spu_rx_callback; 4533 mcl->tx_done = NULL; 4534 4535 for (i = 0; i < iproc_priv.spu.num_chan; i++) { 4536 iproc_priv.mbox[i] = mbox_request_channel(mcl, i); 4537 if (IS_ERR(iproc_priv.mbox[i])) { 4538 err = (int)PTR_ERR(iproc_priv.mbox[i]); 4539 dev_err(dev, 4540 "Mbox channel %d request failed with err %d", 4541 i, err); 4542 iproc_priv.mbox[i] = NULL; 4543 goto free_channels; 4544 } 4545 } 4546 4547 return 0; 4548 free_channels: 4549 for (i = 0; i < iproc_priv.spu.num_chan; i++) { 4550 if (iproc_priv.mbox[i]) 4551 mbox_free_channel(iproc_priv.mbox[i]); 4552 } 4553 4554 return err; 4555 } 4556 4557 static void spu_mb_release(struct platform_device *pdev) 4558 { 4559 int i; 4560 4561 for (i = 0; i < iproc_priv.spu.num_chan; i++) 4562 mbox_free_channel(iproc_priv.mbox[i]); 4563 } 4564 4565 static void spu_counters_init(void) 4566 { 4567 int i; 4568 int j; 4569 4570 atomic_set(&iproc_priv.session_count, 0); 4571 atomic_set(&iproc_priv.stream_count, 0); 4572 atomic_set(&iproc_priv.next_chan, (int)iproc_priv.spu.num_chan); 4573 atomic64_set(&iproc_priv.bytes_in, 0); 4574 atomic64_set(&iproc_priv.bytes_out, 0); 4575 for (i = 0; i < SPU_OP_NUM; i++) { 4576 atomic_set(&iproc_priv.op_counts[i], 0); 4577 atomic_set(&iproc_priv.setkey_cnt[i], 0); 4578 } 4579 for (i = 0; i < CIPHER_ALG_LAST; i++) 4580 for (j = 0; j < CIPHER_MODE_LAST; j++) 4581 atomic_set(&iproc_priv.cipher_cnt[i][j], 0); 4582 4583 for (i = 0; i < HASH_ALG_LAST; i++) { 4584 atomic_set(&iproc_priv.hash_cnt[i], 0); 4585 atomic_set(&iproc_priv.hmac_cnt[i], 0); 4586 } 4587 for (i = 0; i < AEAD_TYPE_LAST; i++) 4588 atomic_set(&iproc_priv.aead_cnt[i], 0); 4589 4590 atomic_set(&iproc_priv.mb_no_spc, 0); 4591 atomic_set(&iproc_priv.mb_send_fail, 0); 4592 atomic_set(&iproc_priv.bad_icv, 0); 4593 } 4594 4595 static int spu_register_ablkcipher(struct iproc_alg_s *driver_alg) 4596 { 4597 struct spu_hw *spu = &iproc_priv.spu; 4598 struct crypto_alg *crypto = &driver_alg->alg.crypto; 4599 int err; 4600 4601 /* SPU2 does not support RC4 */ 4602 if ((driver_alg->cipher_info.alg == CIPHER_ALG_RC4) && 4603 (spu->spu_type == SPU_TYPE_SPU2)) 4604 return 0; 4605 4606 crypto->cra_module = THIS_MODULE; 4607 crypto->cra_priority = cipher_pri; 4608 crypto->cra_alignmask = 0; 4609 crypto->cra_ctxsize = sizeof(struct iproc_ctx_s); 4610 INIT_LIST_HEAD(&crypto->cra_list); 4611 4612 crypto->cra_init = ablkcipher_cra_init; 4613 crypto->cra_exit = generic_cra_exit; 4614 crypto->cra_type = &crypto_ablkcipher_type; 4615 crypto->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC | 4616 CRYPTO_ALG_KERN_DRIVER_ONLY; 4617 4618 crypto->cra_ablkcipher.setkey = ablkcipher_setkey; 4619 crypto->cra_ablkcipher.encrypt = ablkcipher_encrypt; 4620 crypto->cra_ablkcipher.decrypt = ablkcipher_decrypt; 4621 4622 err = crypto_register_alg(crypto); 4623 /* Mark alg as having been registered, if successful */ 4624 if (err == 0) 4625 driver_alg->registered = true; 4626 pr_debug(" registered ablkcipher %s\n", crypto->cra_driver_name); 4627 return err; 4628 } 4629 4630 static int spu_register_ahash(struct iproc_alg_s *driver_alg) 4631 { 4632 struct spu_hw *spu = &iproc_priv.spu; 4633 struct ahash_alg *hash = &driver_alg->alg.hash; 4634 int err; 4635 4636 /* AES-XCBC is the only AES hash type currently supported on SPU-M */ 4637 if ((driver_alg->auth_info.alg == HASH_ALG_AES) && 4638 (driver_alg->auth_info.mode != HASH_MODE_XCBC) && 4639 (spu->spu_type == SPU_TYPE_SPUM)) 4640 return 0; 4641 4642 /* SHA3 algorithm variants are not registered for SPU-M or SPU2. */ 4643 if ((driver_alg->auth_info.alg >= HASH_ALG_SHA3_224) && 4644 (spu->spu_subtype != SPU_SUBTYPE_SPU2_V2)) 4645 return 0; 4646 4647 hash->halg.base.cra_module = THIS_MODULE; 4648 hash->halg.base.cra_priority = hash_pri; 4649 hash->halg.base.cra_alignmask = 0; 4650 hash->halg.base.cra_ctxsize = sizeof(struct iproc_ctx_s); 4651 hash->halg.base.cra_init = ahash_cra_init; 4652 hash->halg.base.cra_exit = generic_cra_exit; 4653 hash->halg.base.cra_type = &crypto_ahash_type; 4654 hash->halg.base.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; 4655 hash->halg.statesize = sizeof(struct spu_hash_export_s); 4656 4657 if (driver_alg->auth_info.mode != HASH_MODE_HMAC) { 4658 hash->setkey = ahash_setkey; 4659 hash->init = ahash_init; 4660 hash->update = ahash_update; 4661 hash->final = ahash_final; 4662 hash->finup = ahash_finup; 4663 hash->digest = ahash_digest; 4664 } else { 4665 hash->setkey = ahash_hmac_setkey; 4666 hash->init = ahash_hmac_init; 4667 hash->update = ahash_hmac_update; 4668 hash->final = ahash_hmac_final; 4669 hash->finup = ahash_hmac_finup; 4670 hash->digest = ahash_hmac_digest; 4671 } 4672 hash->export = ahash_export; 4673 hash->import = ahash_import; 4674 4675 err = crypto_register_ahash(hash); 4676 /* Mark alg as having been registered, if successful */ 4677 if (err == 0) 4678 driver_alg->registered = true; 4679 pr_debug(" registered ahash %s\n", 4680 hash->halg.base.cra_driver_name); 4681 return err; 4682 } 4683 4684 static int spu_register_aead(struct iproc_alg_s *driver_alg) 4685 { 4686 struct aead_alg *aead = &driver_alg->alg.aead; 4687 int err; 4688 4689 aead->base.cra_module = THIS_MODULE; 4690 aead->base.cra_priority = aead_pri; 4691 aead->base.cra_alignmask = 0; 4692 aead->base.cra_ctxsize = sizeof(struct iproc_ctx_s); 4693 INIT_LIST_HEAD(&aead->base.cra_list); 4694 4695 aead->base.cra_flags |= CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; 4696 /* setkey set in alg initialization */ 4697 aead->setauthsize = aead_setauthsize; 4698 aead->encrypt = aead_encrypt; 4699 aead->decrypt = aead_decrypt; 4700 aead->init = aead_cra_init; 4701 aead->exit = aead_cra_exit; 4702 4703 err = crypto_register_aead(aead); 4704 /* Mark alg as having been registered, if successful */ 4705 if (err == 0) 4706 driver_alg->registered = true; 4707 pr_debug(" registered aead %s\n", aead->base.cra_driver_name); 4708 return err; 4709 } 4710 4711 /* register crypto algorithms the device supports */ 4712 static int spu_algs_register(struct device *dev) 4713 { 4714 int i, j; 4715 int err; 4716 4717 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 4718 switch (driver_algs[i].type) { 4719 case CRYPTO_ALG_TYPE_ABLKCIPHER: 4720 err = spu_register_ablkcipher(&driver_algs[i]); 4721 break; 4722 case CRYPTO_ALG_TYPE_AHASH: 4723 err = spu_register_ahash(&driver_algs[i]); 4724 break; 4725 case CRYPTO_ALG_TYPE_AEAD: 4726 err = spu_register_aead(&driver_algs[i]); 4727 break; 4728 default: 4729 dev_err(dev, 4730 "iproc-crypto: unknown alg type: %d", 4731 driver_algs[i].type); 4732 err = -EINVAL; 4733 } 4734 4735 if (err) { 4736 dev_err(dev, "alg registration failed with error %d\n", 4737 err); 4738 goto err_algs; 4739 } 4740 } 4741 4742 return 0; 4743 4744 err_algs: 4745 for (j = 0; j < i; j++) { 4746 /* Skip any algorithm not registered */ 4747 if (!driver_algs[j].registered) 4748 continue; 4749 switch (driver_algs[j].type) { 4750 case CRYPTO_ALG_TYPE_ABLKCIPHER: 4751 crypto_unregister_alg(&driver_algs[j].alg.crypto); 4752 driver_algs[j].registered = false; 4753 break; 4754 case CRYPTO_ALG_TYPE_AHASH: 4755 crypto_unregister_ahash(&driver_algs[j].alg.hash); 4756 driver_algs[j].registered = false; 4757 break; 4758 case CRYPTO_ALG_TYPE_AEAD: 4759 crypto_unregister_aead(&driver_algs[j].alg.aead); 4760 driver_algs[j].registered = false; 4761 break; 4762 } 4763 } 4764 return err; 4765 } 4766 4767 /* ==================== Kernel Platform API ==================== */ 4768 4769 static struct spu_type_subtype spum_ns2_types = { 4770 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NS2 4771 }; 4772 4773 static struct spu_type_subtype spum_nsp_types = { 4774 SPU_TYPE_SPUM, SPU_SUBTYPE_SPUM_NSP 4775 }; 4776 4777 static struct spu_type_subtype spu2_types = { 4778 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V1 4779 }; 4780 4781 static struct spu_type_subtype spu2_v2_types = { 4782 SPU_TYPE_SPU2, SPU_SUBTYPE_SPU2_V2 4783 }; 4784 4785 static const struct of_device_id bcm_spu_dt_ids[] = { 4786 { 4787 .compatible = "brcm,spum-crypto", 4788 .data = &spum_ns2_types, 4789 }, 4790 { 4791 .compatible = "brcm,spum-nsp-crypto", 4792 .data = &spum_nsp_types, 4793 }, 4794 { 4795 .compatible = "brcm,spu2-crypto", 4796 .data = &spu2_types, 4797 }, 4798 { 4799 .compatible = "brcm,spu2-v2-crypto", 4800 .data = &spu2_v2_types, 4801 }, 4802 { /* sentinel */ } 4803 }; 4804 4805 MODULE_DEVICE_TABLE(of, bcm_spu_dt_ids); 4806 4807 static int spu_dt_read(struct platform_device *pdev) 4808 { 4809 struct device *dev = &pdev->dev; 4810 struct spu_hw *spu = &iproc_priv.spu; 4811 struct resource *spu_ctrl_regs; 4812 const struct spu_type_subtype *matched_spu_type; 4813 struct device_node *dn = pdev->dev.of_node; 4814 int err, i; 4815 4816 /* Count number of mailbox channels */ 4817 spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells"); 4818 4819 matched_spu_type = of_device_get_match_data(dev); 4820 if (!matched_spu_type) { 4821 dev_err(&pdev->dev, "Failed to match device\n"); 4822 return -ENODEV; 4823 } 4824 4825 spu->spu_type = matched_spu_type->type; 4826 spu->spu_subtype = matched_spu_type->subtype; 4827 4828 i = 0; 4829 for (i = 0; (i < MAX_SPUS) && ((spu_ctrl_regs = 4830 platform_get_resource(pdev, IORESOURCE_MEM, i)) != NULL); i++) { 4831 4832 spu->reg_vbase[i] = devm_ioremap_resource(dev, spu_ctrl_regs); 4833 if (IS_ERR(spu->reg_vbase[i])) { 4834 err = PTR_ERR(spu->reg_vbase[i]); 4835 dev_err(&pdev->dev, "Failed to map registers: %d\n", 4836 err); 4837 spu->reg_vbase[i] = NULL; 4838 return err; 4839 } 4840 } 4841 spu->num_spu = i; 4842 dev_dbg(dev, "Device has %d SPUs", spu->num_spu); 4843 4844 return 0; 4845 } 4846 4847 int bcm_spu_probe(struct platform_device *pdev) 4848 { 4849 struct device *dev = &pdev->dev; 4850 struct spu_hw *spu = &iproc_priv.spu; 4851 int err = 0; 4852 4853 iproc_priv.pdev = pdev; 4854 platform_set_drvdata(iproc_priv.pdev, 4855 &iproc_priv); 4856 4857 err = spu_dt_read(pdev); 4858 if (err < 0) 4859 goto failure; 4860 4861 err = spu_mb_init(&pdev->dev); 4862 if (err < 0) 4863 goto failure; 4864 4865 if (spu->spu_type == SPU_TYPE_SPUM) 4866 iproc_priv.bcm_hdr_len = 8; 4867 else if (spu->spu_type == SPU_TYPE_SPU2) 4868 iproc_priv.bcm_hdr_len = 0; 4869 4870 spu_functions_register(&pdev->dev, spu->spu_type, spu->spu_subtype); 4871 4872 spu_counters_init(); 4873 4874 spu_setup_debugfs(); 4875 4876 err = spu_algs_register(dev); 4877 if (err < 0) 4878 goto fail_reg; 4879 4880 return 0; 4881 4882 fail_reg: 4883 spu_free_debugfs(); 4884 failure: 4885 spu_mb_release(pdev); 4886 dev_err(dev, "%s failed with error %d.\n", __func__, err); 4887 4888 return err; 4889 } 4890 4891 int bcm_spu_remove(struct platform_device *pdev) 4892 { 4893 int i; 4894 struct device *dev = &pdev->dev; 4895 char *cdn; 4896 4897 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 4898 /* 4899 * Not all algorithms were registered, depending on whether 4900 * hardware is SPU or SPU2. So here we make sure to skip 4901 * those algorithms that were not previously registered. 4902 */ 4903 if (!driver_algs[i].registered) 4904 continue; 4905 4906 switch (driver_algs[i].type) { 4907 case CRYPTO_ALG_TYPE_ABLKCIPHER: 4908 crypto_unregister_alg(&driver_algs[i].alg.crypto); 4909 dev_dbg(dev, " unregistered cipher %s\n", 4910 driver_algs[i].alg.crypto.cra_driver_name); 4911 driver_algs[i].registered = false; 4912 break; 4913 case CRYPTO_ALG_TYPE_AHASH: 4914 crypto_unregister_ahash(&driver_algs[i].alg.hash); 4915 cdn = driver_algs[i].alg.hash.halg.base.cra_driver_name; 4916 dev_dbg(dev, " unregistered hash %s\n", cdn); 4917 driver_algs[i].registered = false; 4918 break; 4919 case CRYPTO_ALG_TYPE_AEAD: 4920 crypto_unregister_aead(&driver_algs[i].alg.aead); 4921 dev_dbg(dev, " unregistered aead %s\n", 4922 driver_algs[i].alg.aead.base.cra_driver_name); 4923 driver_algs[i].registered = false; 4924 break; 4925 } 4926 } 4927 spu_free_debugfs(); 4928 spu_mb_release(pdev); 4929 return 0; 4930 } 4931 4932 /* ===== Kernel Module API ===== */ 4933 4934 static struct platform_driver bcm_spu_pdriver = { 4935 .driver = { 4936 .name = "brcm-spu-crypto", 4937 .of_match_table = of_match_ptr(bcm_spu_dt_ids), 4938 }, 4939 .probe = bcm_spu_probe, 4940 .remove = bcm_spu_remove, 4941 }; 4942 module_platform_driver(bcm_spu_pdriver); 4943 4944 MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>"); 4945 MODULE_DESCRIPTION("Broadcom symmetric crypto offload driver"); 4946 MODULE_LICENSE("GPL v2"); 4947