1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * talitos - Freescale Integrated Security Engine (SEC) device driver 4 * 5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc. 6 * 7 * Scatterlist Crypto API glue code copied from files with the following: 8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> 9 * 10 * Crypto algorithm registration code copied from hifn driver: 11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 12 * All rights reserved. 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/mod_devicetable.h> 18 #include <linux/device.h> 19 #include <linux/interrupt.h> 20 #include <linux/crypto.h> 21 #include <linux/hw_random.h> 22 #include <linux/of_address.h> 23 #include <linux/of_irq.h> 24 #include <linux/of_platform.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/io.h> 27 #include <linux/spinlock.h> 28 #include <linux/rtnetlink.h> 29 #include <linux/slab.h> 30 31 #include <crypto/algapi.h> 32 #include <crypto/aes.h> 33 #include <crypto/des.h> 34 #include <crypto/sha.h> 35 #include <crypto/md5.h> 36 #include <crypto/internal/aead.h> 37 #include <crypto/authenc.h> 38 #include <crypto/skcipher.h> 39 #include <crypto/hash.h> 40 #include <crypto/internal/hash.h> 41 #include <crypto/scatterwalk.h> 42 43 #include "talitos.h" 44 45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, 46 unsigned int len, bool is_sec1) 47 { 48 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 49 if (is_sec1) { 50 ptr->len1 = cpu_to_be16(len); 51 } else { 52 ptr->len = cpu_to_be16(len); 53 ptr->eptr = upper_32_bits(dma_addr); 54 } 55 } 56 57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr, 58 struct talitos_ptr *src_ptr, bool is_sec1) 59 { 60 dst_ptr->ptr = src_ptr->ptr; 61 if (is_sec1) { 62 dst_ptr->len1 = src_ptr->len1; 63 } else { 64 dst_ptr->len = src_ptr->len; 65 dst_ptr->eptr = src_ptr->eptr; 66 } 67 } 68 69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, 70 bool is_sec1) 71 { 72 if (is_sec1) 73 return be16_to_cpu(ptr->len1); 74 else 75 return be16_to_cpu(ptr->len); 76 } 77 78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val, 79 bool is_sec1) 80 { 81 if (!is_sec1) 82 ptr->j_extent = val; 83 } 84 85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1) 86 { 87 if (!is_sec1) 88 ptr->j_extent |= val; 89 } 90 91 /* 92 * map virtual single (contiguous) pointer to h/w descriptor pointer 93 */ 94 static void __map_single_talitos_ptr(struct device *dev, 95 struct talitos_ptr *ptr, 96 unsigned int len, void *data, 97 enum dma_data_direction dir, 98 unsigned long attrs) 99 { 100 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs); 101 struct talitos_private *priv = dev_get_drvdata(dev); 102 bool is_sec1 = has_ftr_sec1(priv); 103 104 to_talitos_ptr(ptr, dma_addr, len, is_sec1); 105 } 106 107 static void map_single_talitos_ptr(struct device *dev, 108 struct talitos_ptr *ptr, 109 unsigned int len, void *data, 110 enum dma_data_direction dir) 111 { 112 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0); 113 } 114 115 static void map_single_talitos_ptr_nosync(struct device *dev, 116 struct talitos_ptr *ptr, 117 unsigned int len, void *data, 118 enum dma_data_direction dir) 119 { 120 __map_single_talitos_ptr(dev, ptr, len, data, dir, 121 DMA_ATTR_SKIP_CPU_SYNC); 122 } 123 124 /* 125 * unmap bus single (contiguous) h/w descriptor pointer 126 */ 127 static void unmap_single_talitos_ptr(struct device *dev, 128 struct talitos_ptr *ptr, 129 enum dma_data_direction dir) 130 { 131 struct talitos_private *priv = dev_get_drvdata(dev); 132 bool is_sec1 = has_ftr_sec1(priv); 133 134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr), 135 from_talitos_ptr_len(ptr, is_sec1), dir); 136 } 137 138 static int reset_channel(struct device *dev, int ch) 139 { 140 struct talitos_private *priv = dev_get_drvdata(dev); 141 unsigned int timeout = TALITOS_TIMEOUT; 142 bool is_sec1 = has_ftr_sec1(priv); 143 144 if (is_sec1) { 145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 146 TALITOS1_CCCR_LO_RESET); 147 148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) & 149 TALITOS1_CCCR_LO_RESET) && --timeout) 150 cpu_relax(); 151 } else { 152 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 153 TALITOS2_CCCR_RESET); 154 155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 156 TALITOS2_CCCR_RESET) && --timeout) 157 cpu_relax(); 158 } 159 160 if (timeout == 0) { 161 dev_err(dev, "failed to reset channel %d\n", ch); 162 return -EIO; 163 } 164 165 /* set 36-bit addressing, done writeback enable and done IRQ enable */ 166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE | 167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); 168 /* enable chaining descriptors */ 169 if (is_sec1) 170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 171 TALITOS_CCCR_LO_NE); 172 173 /* and ICCR writeback, if available */ 174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 175 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 176 TALITOS_CCCR_LO_IWSE); 177 178 return 0; 179 } 180 181 static int reset_device(struct device *dev) 182 { 183 struct talitos_private *priv = dev_get_drvdata(dev); 184 unsigned int timeout = TALITOS_TIMEOUT; 185 bool is_sec1 = has_ftr_sec1(priv); 186 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR; 187 188 setbits32(priv->reg + TALITOS_MCR, mcr); 189 190 while ((in_be32(priv->reg + TALITOS_MCR) & mcr) 191 && --timeout) 192 cpu_relax(); 193 194 if (priv->irq[1]) { 195 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3; 196 setbits32(priv->reg + TALITOS_MCR, mcr); 197 } 198 199 if (timeout == 0) { 200 dev_err(dev, "failed to reset device\n"); 201 return -EIO; 202 } 203 204 return 0; 205 } 206 207 /* 208 * Reset and initialize the device 209 */ 210 static int init_device(struct device *dev) 211 { 212 struct talitos_private *priv = dev_get_drvdata(dev); 213 int ch, err; 214 bool is_sec1 = has_ftr_sec1(priv); 215 216 /* 217 * Master reset 218 * errata documentation: warning: certain SEC interrupts 219 * are not fully cleared by writing the MCR:SWR bit, 220 * set bit twice to completely reset 221 */ 222 err = reset_device(dev); 223 if (err) 224 return err; 225 226 err = reset_device(dev); 227 if (err) 228 return err; 229 230 /* reset channels */ 231 for (ch = 0; ch < priv->num_channels; ch++) { 232 err = reset_channel(dev, ch); 233 if (err) 234 return err; 235 } 236 237 /* enable channel done and error interrupts */ 238 if (is_sec1) { 239 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT); 240 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); 241 /* disable parity error check in DEU (erroneous? test vect.) */ 242 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE); 243 } else { 244 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT); 245 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); 246 } 247 248 /* disable integrity check error interrupts (use writeback instead) */ 249 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 250 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO, 251 TALITOS_MDEUICR_LO_ICE); 252 253 return 0; 254 } 255 256 /** 257 * talitos_submit - submits a descriptor to the device for processing 258 * @dev: the SEC device to be used 259 * @ch: the SEC device channel to be used 260 * @desc: the descriptor to be processed by the device 261 * @callback: whom to call when processing is complete 262 * @context: a handle for use by caller (optional) 263 * 264 * desc must contain valid dma-mapped (bus physical) address pointers. 265 * callback must check err and feedback in descriptor header 266 * for device processing status. 267 */ 268 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 269 void (*callback)(struct device *dev, 270 struct talitos_desc *desc, 271 void *context, int error), 272 void *context) 273 { 274 struct talitos_private *priv = dev_get_drvdata(dev); 275 struct talitos_request *request; 276 unsigned long flags; 277 int head; 278 bool is_sec1 = has_ftr_sec1(priv); 279 280 spin_lock_irqsave(&priv->chan[ch].head_lock, flags); 281 282 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { 283 /* h/w fifo is full */ 284 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 285 return -EAGAIN; 286 } 287 288 head = priv->chan[ch].head; 289 request = &priv->chan[ch].fifo[head]; 290 291 /* map descriptor and save caller data */ 292 if (is_sec1) { 293 desc->hdr1 = desc->hdr; 294 request->dma_desc = dma_map_single(dev, &desc->hdr1, 295 TALITOS_DESC_SIZE, 296 DMA_BIDIRECTIONAL); 297 } else { 298 request->dma_desc = dma_map_single(dev, desc, 299 TALITOS_DESC_SIZE, 300 DMA_BIDIRECTIONAL); 301 } 302 request->callback = callback; 303 request->context = context; 304 305 /* increment fifo head */ 306 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); 307 308 smp_wmb(); 309 request->desc = desc; 310 311 /* GO! */ 312 wmb(); 313 out_be32(priv->chan[ch].reg + TALITOS_FF, 314 upper_32_bits(request->dma_desc)); 315 out_be32(priv->chan[ch].reg + TALITOS_FF_LO, 316 lower_32_bits(request->dma_desc)); 317 318 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 319 320 return -EINPROGRESS; 321 } 322 EXPORT_SYMBOL(talitos_submit); 323 324 /* 325 * process what was done, notify callback of error if not 326 */ 327 static void flush_channel(struct device *dev, int ch, int error, int reset_ch) 328 { 329 struct talitos_private *priv = dev_get_drvdata(dev); 330 struct talitos_request *request, saved_req; 331 unsigned long flags; 332 int tail, status; 333 bool is_sec1 = has_ftr_sec1(priv); 334 335 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 336 337 tail = priv->chan[ch].tail; 338 while (priv->chan[ch].fifo[tail].desc) { 339 __be32 hdr; 340 341 request = &priv->chan[ch].fifo[tail]; 342 343 /* descriptors with their done bits set don't get the error */ 344 rmb(); 345 if (!is_sec1) 346 hdr = request->desc->hdr; 347 else if (request->desc->next_desc) 348 hdr = (request->desc + 1)->hdr1; 349 else 350 hdr = request->desc->hdr1; 351 352 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 353 status = 0; 354 else 355 if (!error) 356 break; 357 else 358 status = error; 359 360 dma_unmap_single(dev, request->dma_desc, 361 TALITOS_DESC_SIZE, 362 DMA_BIDIRECTIONAL); 363 364 /* copy entries so we can call callback outside lock */ 365 saved_req.desc = request->desc; 366 saved_req.callback = request->callback; 367 saved_req.context = request->context; 368 369 /* release request entry in fifo */ 370 smp_wmb(); 371 request->desc = NULL; 372 373 /* increment fifo tail */ 374 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1); 375 376 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 377 378 atomic_dec(&priv->chan[ch].submit_count); 379 380 saved_req.callback(dev, saved_req.desc, saved_req.context, 381 status); 382 /* channel may resume processing in single desc error case */ 383 if (error && !reset_ch && status == error) 384 return; 385 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 386 tail = priv->chan[ch].tail; 387 } 388 389 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 390 } 391 392 /* 393 * process completed requests for channels that have done status 394 */ 395 #define DEF_TALITOS1_DONE(name, ch_done_mask) \ 396 static void talitos1_done_##name(unsigned long data) \ 397 { \ 398 struct device *dev = (struct device *)data; \ 399 struct talitos_private *priv = dev_get_drvdata(dev); \ 400 unsigned long flags; \ 401 \ 402 if (ch_done_mask & 0x10000000) \ 403 flush_channel(dev, 0, 0, 0); \ 404 if (ch_done_mask & 0x40000000) \ 405 flush_channel(dev, 1, 0, 0); \ 406 if (ch_done_mask & 0x00010000) \ 407 flush_channel(dev, 2, 0, 0); \ 408 if (ch_done_mask & 0x00040000) \ 409 flush_channel(dev, 3, 0, 0); \ 410 \ 411 /* At this point, all completed channels have been processed */ \ 412 /* Unmask done interrupts for channels completed later on. */ \ 413 spin_lock_irqsave(&priv->reg_lock, flags); \ 414 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 415 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \ 416 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 417 } 418 419 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) 420 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE) 421 422 #define DEF_TALITOS2_DONE(name, ch_done_mask) \ 423 static void talitos2_done_##name(unsigned long data) \ 424 { \ 425 struct device *dev = (struct device *)data; \ 426 struct talitos_private *priv = dev_get_drvdata(dev); \ 427 unsigned long flags; \ 428 \ 429 if (ch_done_mask & 1) \ 430 flush_channel(dev, 0, 0, 0); \ 431 if (ch_done_mask & (1 << 2)) \ 432 flush_channel(dev, 1, 0, 0); \ 433 if (ch_done_mask & (1 << 4)) \ 434 flush_channel(dev, 2, 0, 0); \ 435 if (ch_done_mask & (1 << 6)) \ 436 flush_channel(dev, 3, 0, 0); \ 437 \ 438 /* At this point, all completed channels have been processed */ \ 439 /* Unmask done interrupts for channels completed later on. */ \ 440 spin_lock_irqsave(&priv->reg_lock, flags); \ 441 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 442 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \ 443 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 444 } 445 446 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) 447 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE) 448 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) 449 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) 450 451 /* 452 * locate current (offending) descriptor 453 */ 454 static u32 current_desc_hdr(struct device *dev, int ch) 455 { 456 struct talitos_private *priv = dev_get_drvdata(dev); 457 int tail, iter; 458 dma_addr_t cur_desc; 459 460 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32; 461 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO); 462 463 if (!cur_desc) { 464 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n"); 465 return 0; 466 } 467 468 tail = priv->chan[ch].tail; 469 470 iter = tail; 471 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc && 472 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) { 473 iter = (iter + 1) & (priv->fifo_len - 1); 474 if (iter == tail) { 475 dev_err(dev, "couldn't locate current descriptor\n"); 476 return 0; 477 } 478 } 479 480 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) 481 return (priv->chan[ch].fifo[iter].desc + 1)->hdr; 482 483 return priv->chan[ch].fifo[iter].desc->hdr; 484 } 485 486 /* 487 * user diagnostics; report root cause of error based on execution unit status 488 */ 489 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) 490 { 491 struct talitos_private *priv = dev_get_drvdata(dev); 492 int i; 493 494 if (!desc_hdr) 495 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); 496 497 switch (desc_hdr & DESC_HDR_SEL0_MASK) { 498 case DESC_HDR_SEL0_AFEU: 499 dev_err(dev, "AFEUISR 0x%08x_%08x\n", 500 in_be32(priv->reg_afeu + TALITOS_EUISR), 501 in_be32(priv->reg_afeu + TALITOS_EUISR_LO)); 502 break; 503 case DESC_HDR_SEL0_DEU: 504 dev_err(dev, "DEUISR 0x%08x_%08x\n", 505 in_be32(priv->reg_deu + TALITOS_EUISR), 506 in_be32(priv->reg_deu + TALITOS_EUISR_LO)); 507 break; 508 case DESC_HDR_SEL0_MDEUA: 509 case DESC_HDR_SEL0_MDEUB: 510 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 511 in_be32(priv->reg_mdeu + TALITOS_EUISR), 512 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 513 break; 514 case DESC_HDR_SEL0_RNG: 515 dev_err(dev, "RNGUISR 0x%08x_%08x\n", 516 in_be32(priv->reg_rngu + TALITOS_ISR), 517 in_be32(priv->reg_rngu + TALITOS_ISR_LO)); 518 break; 519 case DESC_HDR_SEL0_PKEU: 520 dev_err(dev, "PKEUISR 0x%08x_%08x\n", 521 in_be32(priv->reg_pkeu + TALITOS_EUISR), 522 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 523 break; 524 case DESC_HDR_SEL0_AESU: 525 dev_err(dev, "AESUISR 0x%08x_%08x\n", 526 in_be32(priv->reg_aesu + TALITOS_EUISR), 527 in_be32(priv->reg_aesu + TALITOS_EUISR_LO)); 528 break; 529 case DESC_HDR_SEL0_CRCU: 530 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 531 in_be32(priv->reg_crcu + TALITOS_EUISR), 532 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 533 break; 534 case DESC_HDR_SEL0_KEU: 535 dev_err(dev, "KEUISR 0x%08x_%08x\n", 536 in_be32(priv->reg_pkeu + TALITOS_EUISR), 537 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 538 break; 539 } 540 541 switch (desc_hdr & DESC_HDR_SEL1_MASK) { 542 case DESC_HDR_SEL1_MDEUA: 543 case DESC_HDR_SEL1_MDEUB: 544 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 545 in_be32(priv->reg_mdeu + TALITOS_EUISR), 546 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 547 break; 548 case DESC_HDR_SEL1_CRCU: 549 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 550 in_be32(priv->reg_crcu + TALITOS_EUISR), 551 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 552 break; 553 } 554 555 for (i = 0; i < 8; i++) 556 dev_err(dev, "DESCBUF 0x%08x_%08x\n", 557 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i), 558 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i)); 559 } 560 561 /* 562 * recover from error interrupts 563 */ 564 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) 565 { 566 struct talitos_private *priv = dev_get_drvdata(dev); 567 unsigned int timeout = TALITOS_TIMEOUT; 568 int ch, error, reset_dev = 0; 569 u32 v_lo; 570 bool is_sec1 = has_ftr_sec1(priv); 571 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ 572 573 for (ch = 0; ch < priv->num_channels; ch++) { 574 /* skip channels without errors */ 575 if (is_sec1) { 576 /* bits 29, 31, 17, 19 */ 577 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6)))) 578 continue; 579 } else { 580 if (!(isr & (1 << (ch * 2 + 1)))) 581 continue; 582 } 583 584 error = -EINVAL; 585 586 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); 587 588 if (v_lo & TALITOS_CCPSR_LO_DOF) { 589 dev_err(dev, "double fetch fifo overflow error\n"); 590 error = -EAGAIN; 591 reset_ch = 1; 592 } 593 if (v_lo & TALITOS_CCPSR_LO_SOF) { 594 /* h/w dropped descriptor */ 595 dev_err(dev, "single fetch fifo overflow error\n"); 596 error = -EAGAIN; 597 } 598 if (v_lo & TALITOS_CCPSR_LO_MDTE) 599 dev_err(dev, "master data transfer error\n"); 600 if (v_lo & TALITOS_CCPSR_LO_SGDLZ) 601 dev_err(dev, is_sec1 ? "pointer not complete error\n" 602 : "s/g data length zero error\n"); 603 if (v_lo & TALITOS_CCPSR_LO_FPZ) 604 dev_err(dev, is_sec1 ? "parity error\n" 605 : "fetch pointer zero error\n"); 606 if (v_lo & TALITOS_CCPSR_LO_IDH) 607 dev_err(dev, "illegal descriptor header error\n"); 608 if (v_lo & TALITOS_CCPSR_LO_IEU) 609 dev_err(dev, is_sec1 ? "static assignment error\n" 610 : "invalid exec unit error\n"); 611 if (v_lo & TALITOS_CCPSR_LO_EU) 612 report_eu_error(dev, ch, current_desc_hdr(dev, ch)); 613 if (!is_sec1) { 614 if (v_lo & TALITOS_CCPSR_LO_GB) 615 dev_err(dev, "gather boundary error\n"); 616 if (v_lo & TALITOS_CCPSR_LO_GRL) 617 dev_err(dev, "gather return/length error\n"); 618 if (v_lo & TALITOS_CCPSR_LO_SB) 619 dev_err(dev, "scatter boundary error\n"); 620 if (v_lo & TALITOS_CCPSR_LO_SRL) 621 dev_err(dev, "scatter return/length error\n"); 622 } 623 624 flush_channel(dev, ch, error, reset_ch); 625 626 if (reset_ch) { 627 reset_channel(dev, ch); 628 } else { 629 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 630 TALITOS2_CCCR_CONT); 631 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); 632 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 633 TALITOS2_CCCR_CONT) && --timeout) 634 cpu_relax(); 635 if (timeout == 0) { 636 dev_err(dev, "failed to restart channel %d\n", 637 ch); 638 reset_dev = 1; 639 } 640 } 641 } 642 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) || 643 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) { 644 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR)) 645 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n", 646 isr, isr_lo); 647 else 648 dev_err(dev, "done overflow, internal time out, or " 649 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo); 650 651 /* purge request queues */ 652 for (ch = 0; ch < priv->num_channels; ch++) 653 flush_channel(dev, ch, -EIO, 1); 654 655 /* reset and reinitialize the device */ 656 init_device(dev); 657 } 658 } 659 660 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 661 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \ 662 { \ 663 struct device *dev = data; \ 664 struct talitos_private *priv = dev_get_drvdata(dev); \ 665 u32 isr, isr_lo; \ 666 unsigned long flags; \ 667 \ 668 spin_lock_irqsave(&priv->reg_lock, flags); \ 669 isr = in_be32(priv->reg + TALITOS_ISR); \ 670 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 671 /* Acknowledge interrupt */ \ 672 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 673 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 674 \ 675 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \ 676 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 677 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 678 } \ 679 else { \ 680 if (likely(isr & ch_done_mask)) { \ 681 /* mask further done interrupts. */ \ 682 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 683 /* done_task will unmask done interrupts at exit */ \ 684 tasklet_schedule(&priv->done_task[tlet]); \ 685 } \ 686 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 687 } \ 688 \ 689 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 690 IRQ_NONE; \ 691 } 692 693 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0) 694 695 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 696 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \ 697 { \ 698 struct device *dev = data; \ 699 struct talitos_private *priv = dev_get_drvdata(dev); \ 700 u32 isr, isr_lo; \ 701 unsigned long flags; \ 702 \ 703 spin_lock_irqsave(&priv->reg_lock, flags); \ 704 isr = in_be32(priv->reg + TALITOS_ISR); \ 705 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 706 /* Acknowledge interrupt */ \ 707 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 708 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 709 \ 710 if (unlikely(isr & ch_err_mask || isr_lo)) { \ 711 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 712 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 713 } \ 714 else { \ 715 if (likely(isr & ch_done_mask)) { \ 716 /* mask further done interrupts. */ \ 717 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 718 /* done_task will unmask done interrupts at exit */ \ 719 tasklet_schedule(&priv->done_task[tlet]); \ 720 } \ 721 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 722 } \ 723 \ 724 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 725 IRQ_NONE; \ 726 } 727 728 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0) 729 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR, 730 0) 731 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR, 732 1) 733 734 /* 735 * hwrng 736 */ 737 static int talitos_rng_data_present(struct hwrng *rng, int wait) 738 { 739 struct device *dev = (struct device *)rng->priv; 740 struct talitos_private *priv = dev_get_drvdata(dev); 741 u32 ofl; 742 int i; 743 744 for (i = 0; i < 20; i++) { 745 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) & 746 TALITOS_RNGUSR_LO_OFL; 747 if (ofl || !wait) 748 break; 749 udelay(10); 750 } 751 752 return !!ofl; 753 } 754 755 static int talitos_rng_data_read(struct hwrng *rng, u32 *data) 756 { 757 struct device *dev = (struct device *)rng->priv; 758 struct talitos_private *priv = dev_get_drvdata(dev); 759 760 /* rng fifo requires 64-bit accesses */ 761 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO); 762 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO); 763 764 return sizeof(u32); 765 } 766 767 static int talitos_rng_init(struct hwrng *rng) 768 { 769 struct device *dev = (struct device *)rng->priv; 770 struct talitos_private *priv = dev_get_drvdata(dev); 771 unsigned int timeout = TALITOS_TIMEOUT; 772 773 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR); 774 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO) 775 & TALITOS_RNGUSR_LO_RD) 776 && --timeout) 777 cpu_relax(); 778 if (timeout == 0) { 779 dev_err(dev, "failed to reset rng hw\n"); 780 return -ENODEV; 781 } 782 783 /* start generating */ 784 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0); 785 786 return 0; 787 } 788 789 static int talitos_register_rng(struct device *dev) 790 { 791 struct talitos_private *priv = dev_get_drvdata(dev); 792 int err; 793 794 priv->rng.name = dev_driver_string(dev), 795 priv->rng.init = talitos_rng_init, 796 priv->rng.data_present = talitos_rng_data_present, 797 priv->rng.data_read = talitos_rng_data_read, 798 priv->rng.priv = (unsigned long)dev; 799 800 err = hwrng_register(&priv->rng); 801 if (!err) 802 priv->rng_registered = true; 803 804 return err; 805 } 806 807 static void talitos_unregister_rng(struct device *dev) 808 { 809 struct talitos_private *priv = dev_get_drvdata(dev); 810 811 if (!priv->rng_registered) 812 return; 813 814 hwrng_unregister(&priv->rng); 815 priv->rng_registered = false; 816 } 817 818 /* 819 * crypto alg 820 */ 821 #define TALITOS_CRA_PRIORITY 3000 822 /* 823 * Defines a priority for doing AEAD with descriptors type 824 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP 825 */ 826 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) 827 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) 828 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 829 830 struct talitos_ctx { 831 struct device *dev; 832 int ch; 833 __be32 desc_hdr_template; 834 u8 key[TALITOS_MAX_KEY_SIZE]; 835 u8 iv[TALITOS_MAX_IV_LENGTH]; 836 dma_addr_t dma_key; 837 unsigned int keylen; 838 unsigned int enckeylen; 839 unsigned int authkeylen; 840 }; 841 842 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE 843 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 844 845 struct talitos_ahash_req_ctx { 846 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 847 unsigned int hw_context_size; 848 u8 buf[2][HASH_MAX_BLOCK_SIZE]; 849 int buf_idx; 850 unsigned int swinit; 851 unsigned int first; 852 unsigned int last; 853 unsigned int to_hash_later; 854 unsigned int nbuf; 855 struct scatterlist bufsl[2]; 856 struct scatterlist *psrc; 857 }; 858 859 struct talitos_export_state { 860 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 861 u8 buf[HASH_MAX_BLOCK_SIZE]; 862 unsigned int swinit; 863 unsigned int first; 864 unsigned int last; 865 unsigned int to_hash_later; 866 unsigned int nbuf; 867 }; 868 869 static int aead_setkey(struct crypto_aead *authenc, 870 const u8 *key, unsigned int keylen) 871 { 872 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 873 struct device *dev = ctx->dev; 874 struct crypto_authenc_keys keys; 875 876 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 877 goto badkey; 878 879 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 880 goto badkey; 881 882 if (ctx->keylen) 883 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 884 885 memcpy(ctx->key, keys.authkey, keys.authkeylen); 886 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); 887 888 ctx->keylen = keys.authkeylen + keys.enckeylen; 889 ctx->enckeylen = keys.enckeylen; 890 ctx->authkeylen = keys.authkeylen; 891 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen, 892 DMA_TO_DEVICE); 893 894 memzero_explicit(&keys, sizeof(keys)); 895 return 0; 896 897 badkey: 898 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 899 memzero_explicit(&keys, sizeof(keys)); 900 return -EINVAL; 901 } 902 903 static int aead_des3_setkey(struct crypto_aead *authenc, 904 const u8 *key, unsigned int keylen) 905 { 906 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 907 struct device *dev = ctx->dev; 908 struct crypto_authenc_keys keys; 909 u32 flags; 910 int err; 911 912 err = crypto_authenc_extractkeys(&keys, key, keylen); 913 if (unlikely(err)) 914 goto badkey; 915 916 err = -EINVAL; 917 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 918 goto badkey; 919 920 if (keys.enckeylen != DES3_EDE_KEY_SIZE) 921 goto badkey; 922 923 flags = crypto_aead_get_flags(authenc); 924 err = __des3_verify_key(&flags, keys.enckey); 925 if (unlikely(err)) { 926 crypto_aead_set_flags(authenc, flags); 927 goto out; 928 } 929 930 if (ctx->keylen) 931 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 932 933 memcpy(ctx->key, keys.authkey, keys.authkeylen); 934 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); 935 936 ctx->keylen = keys.authkeylen + keys.enckeylen; 937 ctx->enckeylen = keys.enckeylen; 938 ctx->authkeylen = keys.authkeylen; 939 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen, 940 DMA_TO_DEVICE); 941 942 out: 943 memzero_explicit(&keys, sizeof(keys)); 944 return err; 945 946 badkey: 947 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 948 goto out; 949 } 950 951 /* 952 * talitos_edesc - s/w-extended descriptor 953 * @src_nents: number of segments in input scatterlist 954 * @dst_nents: number of segments in output scatterlist 955 * @icv_ool: whether ICV is out-of-line 956 * @iv_dma: dma address of iv for checking continuity and link table 957 * @dma_len: length of dma mapped link_tbl space 958 * @dma_link_tbl: bus physical address of link_tbl/buf 959 * @desc: h/w descriptor 960 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) 961 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) 962 * 963 * if decrypting (with authcheck), or either one of src_nents or dst_nents 964 * is greater than 1, an integrity check value is concatenated to the end 965 * of link_tbl data 966 */ 967 struct talitos_edesc { 968 int src_nents; 969 int dst_nents; 970 bool icv_ool; 971 dma_addr_t iv_dma; 972 int dma_len; 973 dma_addr_t dma_link_tbl; 974 struct talitos_desc desc; 975 union { 976 struct talitos_ptr link_tbl[0]; 977 u8 buf[0]; 978 }; 979 }; 980 981 static void talitos_sg_unmap(struct device *dev, 982 struct talitos_edesc *edesc, 983 struct scatterlist *src, 984 struct scatterlist *dst, 985 unsigned int len, unsigned int offset) 986 { 987 struct talitos_private *priv = dev_get_drvdata(dev); 988 bool is_sec1 = has_ftr_sec1(priv); 989 unsigned int src_nents = edesc->src_nents ? : 1; 990 unsigned int dst_nents = edesc->dst_nents ? : 1; 991 992 if (is_sec1 && dst && dst_nents > 1) { 993 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset, 994 len, DMA_FROM_DEVICE); 995 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len, 996 offset); 997 } 998 if (src != dst) { 999 if (src_nents == 1 || !is_sec1) 1000 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 1001 1002 if (dst && (dst_nents == 1 || !is_sec1)) 1003 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 1004 } else if (src_nents == 1 || !is_sec1) { 1005 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 1006 } 1007 } 1008 1009 static void ipsec_esp_unmap(struct device *dev, 1010 struct talitos_edesc *edesc, 1011 struct aead_request *areq) 1012 { 1013 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 1014 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1015 unsigned int ivsize = crypto_aead_ivsize(aead); 1016 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP; 1017 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3]; 1018 1019 if (is_ipsec_esp) 1020 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], 1021 DMA_FROM_DEVICE); 1022 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE); 1023 1024 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 1025 areq->assoclen); 1026 1027 if (edesc->dma_len) 1028 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1029 DMA_BIDIRECTIONAL); 1030 1031 if (!is_ipsec_esp) { 1032 unsigned int dst_nents = edesc->dst_nents ? : 1; 1033 1034 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, 1035 areq->assoclen + areq->cryptlen - ivsize); 1036 } 1037 } 1038 1039 /* 1040 * ipsec_esp descriptor callbacks 1041 */ 1042 static void ipsec_esp_encrypt_done(struct device *dev, 1043 struct talitos_desc *desc, void *context, 1044 int err) 1045 { 1046 struct talitos_private *priv = dev_get_drvdata(dev); 1047 bool is_sec1 = has_ftr_sec1(priv); 1048 struct aead_request *areq = context; 1049 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1050 unsigned int authsize = crypto_aead_authsize(authenc); 1051 unsigned int ivsize = crypto_aead_ivsize(authenc); 1052 struct talitos_edesc *edesc; 1053 struct scatterlist *sg; 1054 void *icvdata; 1055 1056 edesc = container_of(desc, struct talitos_edesc, desc); 1057 1058 ipsec_esp_unmap(dev, edesc, areq); 1059 1060 /* copy the generated ICV to dst */ 1061 if (edesc->icv_ool) { 1062 if (is_sec1) 1063 icvdata = edesc->buf + areq->assoclen + areq->cryptlen; 1064 else 1065 icvdata = &edesc->link_tbl[edesc->src_nents + 1066 edesc->dst_nents + 2]; 1067 sg = sg_last(areq->dst, edesc->dst_nents); 1068 memcpy((char *)sg_virt(sg) + sg->length - authsize, 1069 icvdata, authsize); 1070 } 1071 1072 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); 1073 1074 kfree(edesc); 1075 1076 aead_request_complete(areq, err); 1077 } 1078 1079 static void ipsec_esp_decrypt_swauth_done(struct device *dev, 1080 struct talitos_desc *desc, 1081 void *context, int err) 1082 { 1083 struct aead_request *req = context; 1084 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1085 unsigned int authsize = crypto_aead_authsize(authenc); 1086 struct talitos_edesc *edesc; 1087 struct scatterlist *sg; 1088 char *oicv, *icv; 1089 struct talitos_private *priv = dev_get_drvdata(dev); 1090 bool is_sec1 = has_ftr_sec1(priv); 1091 1092 edesc = container_of(desc, struct talitos_edesc, desc); 1093 1094 ipsec_esp_unmap(dev, edesc, req); 1095 1096 if (!err) { 1097 /* auth check */ 1098 sg = sg_last(req->dst, edesc->dst_nents ? : 1); 1099 icv = (char *)sg_virt(sg) + sg->length - authsize; 1100 1101 if (edesc->dma_len) { 1102 if (is_sec1) 1103 oicv = (char *)&edesc->dma_link_tbl + 1104 req->assoclen + req->cryptlen; 1105 else 1106 oicv = (char *) 1107 &edesc->link_tbl[edesc->src_nents + 1108 edesc->dst_nents + 2]; 1109 if (edesc->icv_ool) 1110 icv = oicv + authsize; 1111 } else 1112 oicv = (char *)&edesc->link_tbl[0]; 1113 1114 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; 1115 } 1116 1117 kfree(edesc); 1118 1119 aead_request_complete(req, err); 1120 } 1121 1122 static void ipsec_esp_decrypt_hwauth_done(struct device *dev, 1123 struct talitos_desc *desc, 1124 void *context, int err) 1125 { 1126 struct aead_request *req = context; 1127 struct talitos_edesc *edesc; 1128 1129 edesc = container_of(desc, struct talitos_edesc, desc); 1130 1131 ipsec_esp_unmap(dev, edesc, req); 1132 1133 /* check ICV auth status */ 1134 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != 1135 DESC_HDR_LO_ICCR1_PASS)) 1136 err = -EBADMSG; 1137 1138 kfree(edesc); 1139 1140 aead_request_complete(req, err); 1141 } 1142 1143 /* 1144 * convert scatterlist to SEC h/w link table format 1145 * stop at cryptlen bytes 1146 */ 1147 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, 1148 unsigned int offset, int cryptlen, 1149 struct talitos_ptr *link_tbl_ptr) 1150 { 1151 int n_sg = sg_count; 1152 int count = 0; 1153 1154 while (cryptlen && sg && n_sg--) { 1155 unsigned int len = sg_dma_len(sg); 1156 1157 if (offset >= len) { 1158 offset -= len; 1159 goto next; 1160 } 1161 1162 len -= offset; 1163 1164 if (len > cryptlen) 1165 len = cryptlen; 1166 1167 to_talitos_ptr(link_tbl_ptr + count, 1168 sg_dma_address(sg) + offset, len, 0); 1169 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); 1170 count++; 1171 cryptlen -= len; 1172 offset = 0; 1173 1174 next: 1175 sg = sg_next(sg); 1176 } 1177 1178 /* tag end of link table */ 1179 if (count > 0) 1180 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1, 1181 DESC_PTR_LNKTBL_RETURN, 0); 1182 1183 return count; 1184 } 1185 1186 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, 1187 unsigned int len, struct talitos_edesc *edesc, 1188 struct talitos_ptr *ptr, int sg_count, 1189 unsigned int offset, int tbl_off, int elen) 1190 { 1191 struct talitos_private *priv = dev_get_drvdata(dev); 1192 bool is_sec1 = has_ftr_sec1(priv); 1193 1194 if (!src) { 1195 to_talitos_ptr(ptr, 0, 0, is_sec1); 1196 return 1; 1197 } 1198 to_talitos_ptr_ext_set(ptr, elen, is_sec1); 1199 if (sg_count == 1) { 1200 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); 1201 return sg_count; 1202 } 1203 if (is_sec1) { 1204 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); 1205 return sg_count; 1206 } 1207 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen, 1208 &edesc->link_tbl[tbl_off]); 1209 if (sg_count == 1) { 1210 /* Only one segment now, so no link tbl needed*/ 1211 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1); 1212 return sg_count; 1213 } 1214 to_talitos_ptr(ptr, edesc->dma_link_tbl + 1215 tbl_off * sizeof(struct talitos_ptr), len, is_sec1); 1216 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1); 1217 1218 return sg_count; 1219 } 1220 1221 static int talitos_sg_map(struct device *dev, struct scatterlist *src, 1222 unsigned int len, struct talitos_edesc *edesc, 1223 struct talitos_ptr *ptr, int sg_count, 1224 unsigned int offset, int tbl_off) 1225 { 1226 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset, 1227 tbl_off, 0); 1228 } 1229 1230 /* 1231 * fill in and submit ipsec_esp descriptor 1232 */ 1233 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, 1234 void (*callback)(struct device *dev, 1235 struct talitos_desc *desc, 1236 void *context, int error)) 1237 { 1238 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 1239 unsigned int authsize = crypto_aead_authsize(aead); 1240 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1241 struct device *dev = ctx->dev; 1242 struct talitos_desc *desc = &edesc->desc; 1243 unsigned int cryptlen = areq->cryptlen; 1244 unsigned int ivsize = crypto_aead_ivsize(aead); 1245 int tbl_off = 0; 1246 int sg_count, ret; 1247 int elen = 0; 1248 bool sync_needed = false; 1249 struct talitos_private *priv = dev_get_drvdata(dev); 1250 bool is_sec1 = has_ftr_sec1(priv); 1251 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP; 1252 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3]; 1253 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2]; 1254 1255 /* hmac key */ 1256 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1); 1257 1258 sg_count = edesc->src_nents ?: 1; 1259 if (is_sec1 && sg_count > 1) 1260 sg_copy_to_buffer(areq->src, sg_count, edesc->buf, 1261 areq->assoclen + cryptlen); 1262 else 1263 sg_count = dma_map_sg(dev, areq->src, sg_count, 1264 (areq->src == areq->dst) ? 1265 DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1266 1267 /* hmac data */ 1268 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc, 1269 &desc->ptr[1], sg_count, 0, tbl_off); 1270 1271 if (ret > 1) { 1272 tbl_off += ret; 1273 sync_needed = true; 1274 } 1275 1276 /* cipher iv */ 1277 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1); 1278 1279 /* cipher key */ 1280 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen, 1281 ctx->enckeylen, is_sec1); 1282 1283 /* 1284 * cipher in 1285 * map and adjust cipher len to aead request cryptlen. 1286 * extent is bytes of HMAC postpended to ciphertext, 1287 * typically 12 for ipsec 1288 */ 1289 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)) 1290 elen = authsize; 1291 1292 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], 1293 sg_count, areq->assoclen, tbl_off, elen); 1294 1295 if (ret > 1) { 1296 tbl_off += ret; 1297 sync_needed = true; 1298 } 1299 1300 /* cipher out */ 1301 if (areq->src != areq->dst) { 1302 sg_count = edesc->dst_nents ? : 1; 1303 if (!is_sec1 || sg_count == 1) 1304 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); 1305 } 1306 1307 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], 1308 sg_count, areq->assoclen, tbl_off); 1309 1310 if (is_ipsec_esp) 1311 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); 1312 1313 /* ICV data */ 1314 if (ret > 1) { 1315 tbl_off += ret; 1316 edesc->icv_ool = true; 1317 sync_needed = true; 1318 1319 if (is_ipsec_esp) { 1320 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1321 int offset = (edesc->src_nents + edesc->dst_nents + 2) * 1322 sizeof(struct talitos_ptr) + authsize; 1323 1324 /* Add an entry to the link table for ICV data */ 1325 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); 1326 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN, 1327 is_sec1); 1328 1329 /* icv data follows link tables */ 1330 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset, 1331 authsize, is_sec1); 1332 } else { 1333 dma_addr_t addr = edesc->dma_link_tbl; 1334 1335 if (is_sec1) 1336 addr += areq->assoclen + cryptlen; 1337 else 1338 addr += sizeof(struct talitos_ptr) * tbl_off; 1339 1340 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1); 1341 } 1342 } else if (!is_ipsec_esp) { 1343 ret = talitos_sg_map(dev, areq->dst, authsize, edesc, 1344 &desc->ptr[6], sg_count, areq->assoclen + 1345 cryptlen, 1346 tbl_off); 1347 if (ret > 1) { 1348 tbl_off += ret; 1349 edesc->icv_ool = true; 1350 sync_needed = true; 1351 } else { 1352 edesc->icv_ool = false; 1353 } 1354 } else { 1355 edesc->icv_ool = false; 1356 } 1357 1358 /* iv out */ 1359 if (is_ipsec_esp) 1360 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1361 DMA_FROM_DEVICE); 1362 1363 if (sync_needed) 1364 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1365 edesc->dma_len, 1366 DMA_BIDIRECTIONAL); 1367 1368 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1369 if (ret != -EINPROGRESS) { 1370 ipsec_esp_unmap(dev, edesc, areq); 1371 kfree(edesc); 1372 } 1373 return ret; 1374 } 1375 1376 /* 1377 * allocate and map the extended descriptor 1378 */ 1379 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, 1380 struct scatterlist *src, 1381 struct scatterlist *dst, 1382 u8 *iv, 1383 unsigned int assoclen, 1384 unsigned int cryptlen, 1385 unsigned int authsize, 1386 unsigned int ivsize, 1387 int icv_stashing, 1388 u32 cryptoflags, 1389 bool encrypt) 1390 { 1391 struct talitos_edesc *edesc; 1392 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len; 1393 dma_addr_t iv_dma = 0; 1394 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1395 GFP_ATOMIC; 1396 struct talitos_private *priv = dev_get_drvdata(dev); 1397 bool is_sec1 = has_ftr_sec1(priv); 1398 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1399 1400 if (cryptlen + authsize > max_len) { 1401 dev_err(dev, "length exceeds h/w max limit\n"); 1402 return ERR_PTR(-EINVAL); 1403 } 1404 1405 if (!dst || dst == src) { 1406 src_len = assoclen + cryptlen + authsize; 1407 src_nents = sg_nents_for_len(src, src_len); 1408 if (src_nents < 0) { 1409 dev_err(dev, "Invalid number of src SG.\n"); 1410 return ERR_PTR(-EINVAL); 1411 } 1412 src_nents = (src_nents == 1) ? 0 : src_nents; 1413 dst_nents = dst ? src_nents : 0; 1414 dst_len = 0; 1415 } else { /* dst && dst != src*/ 1416 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize); 1417 src_nents = sg_nents_for_len(src, src_len); 1418 if (src_nents < 0) { 1419 dev_err(dev, "Invalid number of src SG.\n"); 1420 return ERR_PTR(-EINVAL); 1421 } 1422 src_nents = (src_nents == 1) ? 0 : src_nents; 1423 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); 1424 dst_nents = sg_nents_for_len(dst, dst_len); 1425 if (dst_nents < 0) { 1426 dev_err(dev, "Invalid number of dst SG.\n"); 1427 return ERR_PTR(-EINVAL); 1428 } 1429 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1430 } 1431 1432 /* 1433 * allocate space for base edesc plus the link tables, 1434 * allowing for two separate entries for AD and generated ICV (+ 2), 1435 * and space for two sets of ICVs (stashed and generated) 1436 */ 1437 alloc_len = sizeof(struct talitos_edesc); 1438 if (src_nents || dst_nents) { 1439 if (is_sec1) 1440 dma_len = (src_nents ? src_len : 0) + 1441 (dst_nents ? dst_len : 0); 1442 else 1443 dma_len = (src_nents + dst_nents + 2) * 1444 sizeof(struct talitos_ptr) + authsize * 2; 1445 alloc_len += dma_len; 1446 } else { 1447 dma_len = 0; 1448 alloc_len += icv_stashing ? authsize : 0; 1449 } 1450 1451 /* if its a ahash, add space for a second desc next to the first one */ 1452 if (is_sec1 && !dst) 1453 alloc_len += sizeof(struct talitos_desc); 1454 alloc_len += ivsize; 1455 1456 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1457 if (!edesc) 1458 return ERR_PTR(-ENOMEM); 1459 if (ivsize) { 1460 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); 1461 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1462 } 1463 memset(&edesc->desc, 0, sizeof(edesc->desc)); 1464 1465 edesc->src_nents = src_nents; 1466 edesc->dst_nents = dst_nents; 1467 edesc->iv_dma = iv_dma; 1468 edesc->dma_len = dma_len; 1469 if (dma_len) { 1470 void *addr = &edesc->link_tbl[0]; 1471 1472 if (is_sec1 && !dst) 1473 addr += sizeof(struct talitos_desc); 1474 edesc->dma_link_tbl = dma_map_single(dev, addr, 1475 edesc->dma_len, 1476 DMA_BIDIRECTIONAL); 1477 } 1478 return edesc; 1479 } 1480 1481 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1482 int icv_stashing, bool encrypt) 1483 { 1484 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1485 unsigned int authsize = crypto_aead_authsize(authenc); 1486 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1487 unsigned int ivsize = crypto_aead_ivsize(authenc); 1488 1489 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1490 iv, areq->assoclen, areq->cryptlen, 1491 authsize, ivsize, icv_stashing, 1492 areq->base.flags, encrypt); 1493 } 1494 1495 static int aead_encrypt(struct aead_request *req) 1496 { 1497 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1498 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1499 struct talitos_edesc *edesc; 1500 1501 /* allocate extended descriptor */ 1502 edesc = aead_edesc_alloc(req, req->iv, 0, true); 1503 if (IS_ERR(edesc)) 1504 return PTR_ERR(edesc); 1505 1506 /* set encrypt */ 1507 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1508 1509 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); 1510 } 1511 1512 static int aead_decrypt(struct aead_request *req) 1513 { 1514 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1515 unsigned int authsize = crypto_aead_authsize(authenc); 1516 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1517 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1518 struct talitos_edesc *edesc; 1519 struct scatterlist *sg; 1520 void *icvdata; 1521 1522 req->cryptlen -= authsize; 1523 1524 /* allocate extended descriptor */ 1525 edesc = aead_edesc_alloc(req, req->iv, 1, false); 1526 if (IS_ERR(edesc)) 1527 return PTR_ERR(edesc); 1528 1529 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1530 ((!edesc->src_nents && !edesc->dst_nents) || 1531 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { 1532 1533 /* decrypt and check the ICV */ 1534 edesc->desc.hdr = ctx->desc_hdr_template | 1535 DESC_HDR_DIR_INBOUND | 1536 DESC_HDR_MODE1_MDEU_CICV; 1537 1538 /* reset integrity check result bits */ 1539 1540 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); 1541 } 1542 1543 /* Have to check the ICV with software */ 1544 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1545 1546 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1547 if (edesc->dma_len) 1548 icvdata = (char *)&edesc->link_tbl[edesc->src_nents + 1549 edesc->dst_nents + 2]; 1550 else 1551 icvdata = &edesc->link_tbl[0]; 1552 1553 sg = sg_last(req->src, edesc->src_nents ? : 1); 1554 1555 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize); 1556 1557 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done); 1558 } 1559 1560 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, 1561 const u8 *key, unsigned int keylen) 1562 { 1563 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1564 struct device *dev = ctx->dev; 1565 1566 if (ctx->keylen) 1567 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 1568 1569 memcpy(&ctx->key, key, keylen); 1570 ctx->keylen = keylen; 1571 1572 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); 1573 1574 return 0; 1575 } 1576 1577 static int ablkcipher_des_setkey(struct crypto_ablkcipher *cipher, 1578 const u8 *key, unsigned int keylen) 1579 { 1580 u32 tmp[DES_EXPKEY_WORDS]; 1581 1582 if (unlikely(crypto_ablkcipher_get_flags(cipher) & 1583 CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) && 1584 !des_ekey(tmp, key)) { 1585 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY); 1586 return -EINVAL; 1587 } 1588 1589 return ablkcipher_setkey(cipher, key, keylen); 1590 } 1591 1592 static int ablkcipher_des3_setkey(struct crypto_ablkcipher *cipher, 1593 const u8 *key, unsigned int keylen) 1594 { 1595 u32 flags; 1596 int err; 1597 1598 flags = crypto_ablkcipher_get_flags(cipher); 1599 err = __des3_verify_key(&flags, key); 1600 if (unlikely(err)) { 1601 crypto_ablkcipher_set_flags(cipher, flags); 1602 return err; 1603 } 1604 1605 return ablkcipher_setkey(cipher, key, keylen); 1606 } 1607 1608 static void common_nonsnoop_unmap(struct device *dev, 1609 struct talitos_edesc *edesc, 1610 struct ablkcipher_request *areq) 1611 { 1612 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1613 1614 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0); 1615 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); 1616 1617 if (edesc->dma_len) 1618 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1619 DMA_BIDIRECTIONAL); 1620 } 1621 1622 static void ablkcipher_done(struct device *dev, 1623 struct talitos_desc *desc, void *context, 1624 int err) 1625 { 1626 struct ablkcipher_request *areq = context; 1627 struct talitos_edesc *edesc; 1628 1629 edesc = container_of(desc, struct talitos_edesc, desc); 1630 1631 common_nonsnoop_unmap(dev, edesc, areq); 1632 1633 kfree(edesc); 1634 1635 areq->base.complete(&areq->base, err); 1636 } 1637 1638 static int common_nonsnoop(struct talitos_edesc *edesc, 1639 struct ablkcipher_request *areq, 1640 void (*callback) (struct device *dev, 1641 struct talitos_desc *desc, 1642 void *context, int error)) 1643 { 1644 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1645 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1646 struct device *dev = ctx->dev; 1647 struct talitos_desc *desc = &edesc->desc; 1648 unsigned int cryptlen = areq->nbytes; 1649 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1650 int sg_count, ret; 1651 bool sync_needed = false; 1652 struct talitos_private *priv = dev_get_drvdata(dev); 1653 bool is_sec1 = has_ftr_sec1(priv); 1654 1655 /* first DWORD empty */ 1656 1657 /* cipher iv */ 1658 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1); 1659 1660 /* cipher key */ 1661 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1); 1662 1663 sg_count = edesc->src_nents ?: 1; 1664 if (is_sec1 && sg_count > 1) 1665 sg_copy_to_buffer(areq->src, sg_count, edesc->buf, 1666 cryptlen); 1667 else 1668 sg_count = dma_map_sg(dev, areq->src, sg_count, 1669 (areq->src == areq->dst) ? 1670 DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1671 /* 1672 * cipher in 1673 */ 1674 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, 1675 &desc->ptr[3], sg_count, 0, 0); 1676 if (sg_count > 1) 1677 sync_needed = true; 1678 1679 /* cipher out */ 1680 if (areq->src != areq->dst) { 1681 sg_count = edesc->dst_nents ? : 1; 1682 if (!is_sec1 || sg_count == 1) 1683 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); 1684 } 1685 1686 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4], 1687 sg_count, 0, (edesc->src_nents + 1)); 1688 if (ret > 1) 1689 sync_needed = true; 1690 1691 /* iv out */ 1692 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 1693 DMA_FROM_DEVICE); 1694 1695 /* last DWORD empty */ 1696 1697 if (sync_needed) 1698 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1699 edesc->dma_len, DMA_BIDIRECTIONAL); 1700 1701 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1702 if (ret != -EINPROGRESS) { 1703 common_nonsnoop_unmap(dev, edesc, areq); 1704 kfree(edesc); 1705 } 1706 return ret; 1707 } 1708 1709 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * 1710 areq, bool encrypt) 1711 { 1712 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1713 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1714 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1715 1716 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1717 areq->info, 0, areq->nbytes, 0, ivsize, 0, 1718 areq->base.flags, encrypt); 1719 } 1720 1721 static int ablkcipher_encrypt(struct ablkcipher_request *areq) 1722 { 1723 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1724 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1725 struct talitos_edesc *edesc; 1726 1727 /* allocate extended descriptor */ 1728 edesc = ablkcipher_edesc_alloc(areq, true); 1729 if (IS_ERR(edesc)) 1730 return PTR_ERR(edesc); 1731 1732 /* set encrypt */ 1733 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1734 1735 return common_nonsnoop(edesc, areq, ablkcipher_done); 1736 } 1737 1738 static int ablkcipher_decrypt(struct ablkcipher_request *areq) 1739 { 1740 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1741 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1742 struct talitos_edesc *edesc; 1743 1744 /* allocate extended descriptor */ 1745 edesc = ablkcipher_edesc_alloc(areq, false); 1746 if (IS_ERR(edesc)) 1747 return PTR_ERR(edesc); 1748 1749 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1750 1751 return common_nonsnoop(edesc, areq, ablkcipher_done); 1752 } 1753 1754 static void common_nonsnoop_hash_unmap(struct device *dev, 1755 struct talitos_edesc *edesc, 1756 struct ahash_request *areq) 1757 { 1758 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1759 struct talitos_private *priv = dev_get_drvdata(dev); 1760 bool is_sec1 = has_ftr_sec1(priv); 1761 struct talitos_desc *desc = &edesc->desc; 1762 struct talitos_desc *desc2 = desc + 1; 1763 1764 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1765 if (desc->next_desc && 1766 desc->ptr[5].ptr != desc2->ptr[5].ptr) 1767 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); 1768 1769 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); 1770 1771 /* When using hashctx-in, must unmap it. */ 1772 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) 1773 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], 1774 DMA_TO_DEVICE); 1775 else if (desc->next_desc) 1776 unmap_single_talitos_ptr(dev, &desc2->ptr[1], 1777 DMA_TO_DEVICE); 1778 1779 if (is_sec1 && req_ctx->nbuf) 1780 unmap_single_talitos_ptr(dev, &desc->ptr[3], 1781 DMA_TO_DEVICE); 1782 1783 if (edesc->dma_len) 1784 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1785 DMA_BIDIRECTIONAL); 1786 1787 if (edesc->desc.next_desc) 1788 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc), 1789 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL); 1790 } 1791 1792 static void ahash_done(struct device *dev, 1793 struct talitos_desc *desc, void *context, 1794 int err) 1795 { 1796 struct ahash_request *areq = context; 1797 struct talitos_edesc *edesc = 1798 container_of(desc, struct talitos_edesc, desc); 1799 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1800 1801 if (!req_ctx->last && req_ctx->to_hash_later) { 1802 /* Position any partial block for next update/final/finup */ 1803 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1; 1804 req_ctx->nbuf = req_ctx->to_hash_later; 1805 } 1806 common_nonsnoop_hash_unmap(dev, edesc, areq); 1807 1808 kfree(edesc); 1809 1810 areq->base.complete(&areq->base, err); 1811 } 1812 1813 /* 1814 * SEC1 doesn't like hashing of 0 sized message, so we do the padding 1815 * ourself and submit a padded block 1816 */ 1817 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx, 1818 struct talitos_edesc *edesc, 1819 struct talitos_ptr *ptr) 1820 { 1821 static u8 padded_hash[64] = { 1822 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1823 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1824 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1825 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1826 }; 1827 1828 pr_err_once("Bug in SEC1, padding ourself\n"); 1829 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD; 1830 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash), 1831 (char *)padded_hash, DMA_TO_DEVICE); 1832 } 1833 1834 static int common_nonsnoop_hash(struct talitos_edesc *edesc, 1835 struct ahash_request *areq, unsigned int length, 1836 unsigned int offset, 1837 void (*callback) (struct device *dev, 1838 struct talitos_desc *desc, 1839 void *context, int error)) 1840 { 1841 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1842 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1843 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1844 struct device *dev = ctx->dev; 1845 struct talitos_desc *desc = &edesc->desc; 1846 int ret; 1847 bool sync_needed = false; 1848 struct talitos_private *priv = dev_get_drvdata(dev); 1849 bool is_sec1 = has_ftr_sec1(priv); 1850 int sg_count; 1851 1852 /* first DWORD empty */ 1853 1854 /* hash context in */ 1855 if (!req_ctx->first || req_ctx->swinit) { 1856 map_single_talitos_ptr_nosync(dev, &desc->ptr[1], 1857 req_ctx->hw_context_size, 1858 req_ctx->hw_context, 1859 DMA_TO_DEVICE); 1860 req_ctx->swinit = 0; 1861 } 1862 /* Indicate next op is not the first. */ 1863 req_ctx->first = 0; 1864 1865 /* HMAC key */ 1866 if (ctx->keylen) 1867 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, 1868 is_sec1); 1869 1870 if (is_sec1 && req_ctx->nbuf) 1871 length -= req_ctx->nbuf; 1872 1873 sg_count = edesc->src_nents ?: 1; 1874 if (is_sec1 && sg_count > 1) 1875 sg_pcopy_to_buffer(req_ctx->psrc, sg_count, 1876 edesc->buf + sizeof(struct talitos_desc), 1877 length, req_ctx->nbuf); 1878 else if (length) 1879 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, 1880 DMA_TO_DEVICE); 1881 /* 1882 * data in 1883 */ 1884 if (is_sec1 && req_ctx->nbuf) { 1885 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf, 1886 req_ctx->buf[req_ctx->buf_idx], 1887 DMA_TO_DEVICE); 1888 } else { 1889 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1890 &desc->ptr[3], sg_count, offset, 0); 1891 if (sg_count > 1) 1892 sync_needed = true; 1893 } 1894 1895 /* fifth DWORD empty */ 1896 1897 /* hash/HMAC out -or- hash context out */ 1898 if (req_ctx->last) 1899 map_single_talitos_ptr(dev, &desc->ptr[5], 1900 crypto_ahash_digestsize(tfm), 1901 areq->result, DMA_FROM_DEVICE); 1902 else 1903 map_single_talitos_ptr_nosync(dev, &desc->ptr[5], 1904 req_ctx->hw_context_size, 1905 req_ctx->hw_context, 1906 DMA_FROM_DEVICE); 1907 1908 /* last DWORD empty */ 1909 1910 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) 1911 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); 1912 1913 if (is_sec1 && req_ctx->nbuf && length) { 1914 struct talitos_desc *desc2 = desc + 1; 1915 dma_addr_t next_desc; 1916 1917 memset(desc2, 0, sizeof(*desc2)); 1918 desc2->hdr = desc->hdr; 1919 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT; 1920 desc2->hdr1 = desc2->hdr; 1921 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD; 1922 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT; 1923 desc->hdr &= ~DESC_HDR_DONE_NOTIFY; 1924 1925 if (desc->ptr[1].ptr) 1926 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1], 1927 is_sec1); 1928 else 1929 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1], 1930 req_ctx->hw_context_size, 1931 req_ctx->hw_context, 1932 DMA_TO_DEVICE); 1933 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); 1934 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1935 &desc2->ptr[3], sg_count, offset, 0); 1936 if (sg_count > 1) 1937 sync_needed = true; 1938 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); 1939 if (req_ctx->last) 1940 map_single_talitos_ptr_nosync(dev, &desc->ptr[5], 1941 req_ctx->hw_context_size, 1942 req_ctx->hw_context, 1943 DMA_FROM_DEVICE); 1944 1945 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE, 1946 DMA_BIDIRECTIONAL); 1947 desc->next_desc = cpu_to_be32(next_desc); 1948 } 1949 1950 if (sync_needed) 1951 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1952 edesc->dma_len, DMA_BIDIRECTIONAL); 1953 1954 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1955 if (ret != -EINPROGRESS) { 1956 common_nonsnoop_hash_unmap(dev, edesc, areq); 1957 kfree(edesc); 1958 } 1959 return ret; 1960 } 1961 1962 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, 1963 unsigned int nbytes) 1964 { 1965 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1966 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1967 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1968 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1969 bool is_sec1 = has_ftr_sec1(priv); 1970 1971 if (is_sec1) 1972 nbytes -= req_ctx->nbuf; 1973 1974 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0, 1975 nbytes, 0, 0, 0, areq->base.flags, false); 1976 } 1977 1978 static int ahash_init(struct ahash_request *areq) 1979 { 1980 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1981 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1982 struct device *dev = ctx->dev; 1983 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1984 unsigned int size; 1985 dma_addr_t dma; 1986 1987 /* Initialize the context */ 1988 req_ctx->buf_idx = 0; 1989 req_ctx->nbuf = 0; 1990 req_ctx->first = 1; /* first indicates h/w must init its context */ 1991 req_ctx->swinit = 0; /* assume h/w init of context */ 1992 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 1993 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 1994 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 1995 req_ctx->hw_context_size = size; 1996 1997 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size, 1998 DMA_TO_DEVICE); 1999 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE); 2000 2001 return 0; 2002 } 2003 2004 /* 2005 * on h/w without explicit sha224 support, we initialize h/w context 2006 * manually with sha224 constants, and tell it to run sha256. 2007 */ 2008 static int ahash_init_sha224_swinit(struct ahash_request *areq) 2009 { 2010 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2011 2012 req_ctx->hw_context[0] = SHA224_H0; 2013 req_ctx->hw_context[1] = SHA224_H1; 2014 req_ctx->hw_context[2] = SHA224_H2; 2015 req_ctx->hw_context[3] = SHA224_H3; 2016 req_ctx->hw_context[4] = SHA224_H4; 2017 req_ctx->hw_context[5] = SHA224_H5; 2018 req_ctx->hw_context[6] = SHA224_H6; 2019 req_ctx->hw_context[7] = SHA224_H7; 2020 2021 /* init 64-bit count */ 2022 req_ctx->hw_context[8] = 0; 2023 req_ctx->hw_context[9] = 0; 2024 2025 ahash_init(areq); 2026 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ 2027 2028 return 0; 2029 } 2030 2031 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) 2032 { 2033 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2034 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 2035 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2036 struct talitos_edesc *edesc; 2037 unsigned int blocksize = 2038 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2039 unsigned int nbytes_to_hash; 2040 unsigned int to_hash_later; 2041 unsigned int nsg; 2042 int nents; 2043 struct device *dev = ctx->dev; 2044 struct talitos_private *priv = dev_get_drvdata(dev); 2045 bool is_sec1 = has_ftr_sec1(priv); 2046 int offset = 0; 2047 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; 2048 2049 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { 2050 /* Buffer up to one whole block */ 2051 nents = sg_nents_for_len(areq->src, nbytes); 2052 if (nents < 0) { 2053 dev_err(ctx->dev, "Invalid number of src SG.\n"); 2054 return nents; 2055 } 2056 sg_copy_to_buffer(areq->src, nents, 2057 ctx_buf + req_ctx->nbuf, nbytes); 2058 req_ctx->nbuf += nbytes; 2059 return 0; 2060 } 2061 2062 /* At least (blocksize + 1) bytes are available to hash */ 2063 nbytes_to_hash = nbytes + req_ctx->nbuf; 2064 to_hash_later = nbytes_to_hash & (blocksize - 1); 2065 2066 if (req_ctx->last) 2067 to_hash_later = 0; 2068 else if (to_hash_later) 2069 /* There is a partial block. Hash the full block(s) now */ 2070 nbytes_to_hash -= to_hash_later; 2071 else { 2072 /* Keep one block buffered */ 2073 nbytes_to_hash -= blocksize; 2074 to_hash_later = blocksize; 2075 } 2076 2077 /* Chain in any previously buffered data */ 2078 if (!is_sec1 && req_ctx->nbuf) { 2079 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; 2080 sg_init_table(req_ctx->bufsl, nsg); 2081 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf); 2082 if (nsg > 1) 2083 sg_chain(req_ctx->bufsl, 2, areq->src); 2084 req_ctx->psrc = req_ctx->bufsl; 2085 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { 2086 if (nbytes_to_hash > blocksize) 2087 offset = blocksize - req_ctx->nbuf; 2088 else 2089 offset = nbytes_to_hash - req_ctx->nbuf; 2090 nents = sg_nents_for_len(areq->src, offset); 2091 if (nents < 0) { 2092 dev_err(ctx->dev, "Invalid number of src SG.\n"); 2093 return nents; 2094 } 2095 sg_copy_to_buffer(areq->src, nents, 2096 ctx_buf + req_ctx->nbuf, offset); 2097 req_ctx->nbuf += offset; 2098 req_ctx->psrc = areq->src; 2099 } else 2100 req_ctx->psrc = areq->src; 2101 2102 if (to_hash_later) { 2103 nents = sg_nents_for_len(areq->src, nbytes); 2104 if (nents < 0) { 2105 dev_err(ctx->dev, "Invalid number of src SG.\n"); 2106 return nents; 2107 } 2108 sg_pcopy_to_buffer(areq->src, nents, 2109 req_ctx->buf[(req_ctx->buf_idx + 1) & 1], 2110 to_hash_later, 2111 nbytes - to_hash_later); 2112 } 2113 req_ctx->to_hash_later = to_hash_later; 2114 2115 /* Allocate extended descriptor */ 2116 edesc = ahash_edesc_alloc(areq, nbytes_to_hash); 2117 if (IS_ERR(edesc)) 2118 return PTR_ERR(edesc); 2119 2120 edesc->desc.hdr = ctx->desc_hdr_template; 2121 2122 /* On last one, request SEC to pad; otherwise continue */ 2123 if (req_ctx->last) 2124 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; 2125 else 2126 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; 2127 2128 /* request SEC to INIT hash. */ 2129 if (req_ctx->first && !req_ctx->swinit) 2130 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; 2131 2132 /* When the tfm context has a keylen, it's an HMAC. 2133 * A first or last (ie. not middle) descriptor must request HMAC. 2134 */ 2135 if (ctx->keylen && (req_ctx->first || req_ctx->last)) 2136 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; 2137 2138 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset, 2139 ahash_done); 2140 } 2141 2142 static int ahash_update(struct ahash_request *areq) 2143 { 2144 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2145 2146 req_ctx->last = 0; 2147 2148 return ahash_process_req(areq, areq->nbytes); 2149 } 2150 2151 static int ahash_final(struct ahash_request *areq) 2152 { 2153 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2154 2155 req_ctx->last = 1; 2156 2157 return ahash_process_req(areq, 0); 2158 } 2159 2160 static int ahash_finup(struct ahash_request *areq) 2161 { 2162 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2163 2164 req_ctx->last = 1; 2165 2166 return ahash_process_req(areq, areq->nbytes); 2167 } 2168 2169 static int ahash_digest(struct ahash_request *areq) 2170 { 2171 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2172 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 2173 2174 ahash->init(areq); 2175 req_ctx->last = 1; 2176 2177 return ahash_process_req(areq, areq->nbytes); 2178 } 2179 2180 static int ahash_export(struct ahash_request *areq, void *out) 2181 { 2182 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2183 struct talitos_export_state *export = out; 2184 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2185 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 2186 struct device *dev = ctx->dev; 2187 dma_addr_t dma; 2188 2189 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size, 2190 DMA_FROM_DEVICE); 2191 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE); 2192 2193 memcpy(export->hw_context, req_ctx->hw_context, 2194 req_ctx->hw_context_size); 2195 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf); 2196 export->swinit = req_ctx->swinit; 2197 export->first = req_ctx->first; 2198 export->last = req_ctx->last; 2199 export->to_hash_later = req_ctx->to_hash_later; 2200 export->nbuf = req_ctx->nbuf; 2201 2202 return 0; 2203 } 2204 2205 static int ahash_import(struct ahash_request *areq, const void *in) 2206 { 2207 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2208 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2209 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 2210 struct device *dev = ctx->dev; 2211 const struct talitos_export_state *export = in; 2212 unsigned int size; 2213 dma_addr_t dma; 2214 2215 memset(req_ctx, 0, sizeof(*req_ctx)); 2216 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 2217 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 2218 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 2219 req_ctx->hw_context_size = size; 2220 memcpy(req_ctx->hw_context, export->hw_context, size); 2221 memcpy(req_ctx->buf[0], export->buf, export->nbuf); 2222 req_ctx->swinit = export->swinit; 2223 req_ctx->first = export->first; 2224 req_ctx->last = export->last; 2225 req_ctx->to_hash_later = export->to_hash_later; 2226 req_ctx->nbuf = export->nbuf; 2227 2228 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size, 2229 DMA_TO_DEVICE); 2230 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE); 2231 2232 return 0; 2233 } 2234 2235 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, 2236 u8 *hash) 2237 { 2238 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2239 2240 struct scatterlist sg[1]; 2241 struct ahash_request *req; 2242 struct crypto_wait wait; 2243 int ret; 2244 2245 crypto_init_wait(&wait); 2246 2247 req = ahash_request_alloc(tfm, GFP_KERNEL); 2248 if (!req) 2249 return -ENOMEM; 2250 2251 /* Keep tfm keylen == 0 during hash of the long key */ 2252 ctx->keylen = 0; 2253 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2254 crypto_req_done, &wait); 2255 2256 sg_init_one(&sg[0], key, keylen); 2257 2258 ahash_request_set_crypt(req, sg, hash, keylen); 2259 ret = crypto_wait_req(crypto_ahash_digest(req), &wait); 2260 2261 ahash_request_free(req); 2262 2263 return ret; 2264 } 2265 2266 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 2267 unsigned int keylen) 2268 { 2269 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2270 struct device *dev = ctx->dev; 2271 unsigned int blocksize = 2272 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2273 unsigned int digestsize = crypto_ahash_digestsize(tfm); 2274 unsigned int keysize = keylen; 2275 u8 hash[SHA512_DIGEST_SIZE]; 2276 int ret; 2277 2278 if (keylen <= blocksize) 2279 memcpy(ctx->key, key, keysize); 2280 else { 2281 /* Must get the hash of the long key */ 2282 ret = keyhash(tfm, key, keylen, hash); 2283 2284 if (ret) { 2285 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 2286 return -EINVAL; 2287 } 2288 2289 keysize = digestsize; 2290 memcpy(ctx->key, hash, digestsize); 2291 } 2292 2293 if (ctx->keylen) 2294 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 2295 2296 ctx->keylen = keysize; 2297 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE); 2298 2299 return 0; 2300 } 2301 2302 2303 struct talitos_alg_template { 2304 u32 type; 2305 u32 priority; 2306 union { 2307 struct crypto_alg crypto; 2308 struct ahash_alg hash; 2309 struct aead_alg aead; 2310 } alg; 2311 __be32 desc_hdr_template; 2312 }; 2313 2314 static struct talitos_alg_template driver_algs[] = { 2315 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ 2316 { .type = CRYPTO_ALG_TYPE_AEAD, 2317 .alg.aead = { 2318 .base = { 2319 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2320 .cra_driver_name = "authenc-hmac-sha1-" 2321 "cbc-aes-talitos", 2322 .cra_blocksize = AES_BLOCK_SIZE, 2323 .cra_flags = CRYPTO_ALG_ASYNC, 2324 }, 2325 .ivsize = AES_BLOCK_SIZE, 2326 .maxauthsize = SHA1_DIGEST_SIZE, 2327 }, 2328 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2329 DESC_HDR_SEL0_AESU | 2330 DESC_HDR_MODE0_AESU_CBC | 2331 DESC_HDR_SEL1_MDEUA | 2332 DESC_HDR_MODE1_MDEU_INIT | 2333 DESC_HDR_MODE1_MDEU_PAD | 2334 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2335 }, 2336 { .type = CRYPTO_ALG_TYPE_AEAD, 2337 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2338 .alg.aead = { 2339 .base = { 2340 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2341 .cra_driver_name = "authenc-hmac-sha1-" 2342 "cbc-aes-talitos", 2343 .cra_blocksize = AES_BLOCK_SIZE, 2344 .cra_flags = CRYPTO_ALG_ASYNC, 2345 }, 2346 .ivsize = AES_BLOCK_SIZE, 2347 .maxauthsize = SHA1_DIGEST_SIZE, 2348 }, 2349 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2350 DESC_HDR_SEL0_AESU | 2351 DESC_HDR_MODE0_AESU_CBC | 2352 DESC_HDR_SEL1_MDEUA | 2353 DESC_HDR_MODE1_MDEU_INIT | 2354 DESC_HDR_MODE1_MDEU_PAD | 2355 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2356 }, 2357 { .type = CRYPTO_ALG_TYPE_AEAD, 2358 .alg.aead = { 2359 .base = { 2360 .cra_name = "authenc(hmac(sha1)," 2361 "cbc(des3_ede))", 2362 .cra_driver_name = "authenc-hmac-sha1-" 2363 "cbc-3des-talitos", 2364 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2365 .cra_flags = CRYPTO_ALG_ASYNC, 2366 }, 2367 .ivsize = DES3_EDE_BLOCK_SIZE, 2368 .maxauthsize = SHA1_DIGEST_SIZE, 2369 .setkey = aead_des3_setkey, 2370 }, 2371 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2372 DESC_HDR_SEL0_DEU | 2373 DESC_HDR_MODE0_DEU_CBC | 2374 DESC_HDR_MODE0_DEU_3DES | 2375 DESC_HDR_SEL1_MDEUA | 2376 DESC_HDR_MODE1_MDEU_INIT | 2377 DESC_HDR_MODE1_MDEU_PAD | 2378 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2379 }, 2380 { .type = CRYPTO_ALG_TYPE_AEAD, 2381 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2382 .alg.aead = { 2383 .base = { 2384 .cra_name = "authenc(hmac(sha1)," 2385 "cbc(des3_ede))", 2386 .cra_driver_name = "authenc-hmac-sha1-" 2387 "cbc-3des-talitos", 2388 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2389 .cra_flags = CRYPTO_ALG_ASYNC, 2390 }, 2391 .ivsize = DES3_EDE_BLOCK_SIZE, 2392 .maxauthsize = SHA1_DIGEST_SIZE, 2393 .setkey = aead_des3_setkey, 2394 }, 2395 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2396 DESC_HDR_SEL0_DEU | 2397 DESC_HDR_MODE0_DEU_CBC | 2398 DESC_HDR_MODE0_DEU_3DES | 2399 DESC_HDR_SEL1_MDEUA | 2400 DESC_HDR_MODE1_MDEU_INIT | 2401 DESC_HDR_MODE1_MDEU_PAD | 2402 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2403 }, 2404 { .type = CRYPTO_ALG_TYPE_AEAD, 2405 .alg.aead = { 2406 .base = { 2407 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2408 .cra_driver_name = "authenc-hmac-sha224-" 2409 "cbc-aes-talitos", 2410 .cra_blocksize = AES_BLOCK_SIZE, 2411 .cra_flags = CRYPTO_ALG_ASYNC, 2412 }, 2413 .ivsize = AES_BLOCK_SIZE, 2414 .maxauthsize = SHA224_DIGEST_SIZE, 2415 }, 2416 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2417 DESC_HDR_SEL0_AESU | 2418 DESC_HDR_MODE0_AESU_CBC | 2419 DESC_HDR_SEL1_MDEUA | 2420 DESC_HDR_MODE1_MDEU_INIT | 2421 DESC_HDR_MODE1_MDEU_PAD | 2422 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2423 }, 2424 { .type = CRYPTO_ALG_TYPE_AEAD, 2425 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2426 .alg.aead = { 2427 .base = { 2428 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2429 .cra_driver_name = "authenc-hmac-sha224-" 2430 "cbc-aes-talitos", 2431 .cra_blocksize = AES_BLOCK_SIZE, 2432 .cra_flags = CRYPTO_ALG_ASYNC, 2433 }, 2434 .ivsize = AES_BLOCK_SIZE, 2435 .maxauthsize = SHA224_DIGEST_SIZE, 2436 }, 2437 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2438 DESC_HDR_SEL0_AESU | 2439 DESC_HDR_MODE0_AESU_CBC | 2440 DESC_HDR_SEL1_MDEUA | 2441 DESC_HDR_MODE1_MDEU_INIT | 2442 DESC_HDR_MODE1_MDEU_PAD | 2443 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2444 }, 2445 { .type = CRYPTO_ALG_TYPE_AEAD, 2446 .alg.aead = { 2447 .base = { 2448 .cra_name = "authenc(hmac(sha224)," 2449 "cbc(des3_ede))", 2450 .cra_driver_name = "authenc-hmac-sha224-" 2451 "cbc-3des-talitos", 2452 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2453 .cra_flags = CRYPTO_ALG_ASYNC, 2454 }, 2455 .ivsize = DES3_EDE_BLOCK_SIZE, 2456 .maxauthsize = SHA224_DIGEST_SIZE, 2457 .setkey = aead_des3_setkey, 2458 }, 2459 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2460 DESC_HDR_SEL0_DEU | 2461 DESC_HDR_MODE0_DEU_CBC | 2462 DESC_HDR_MODE0_DEU_3DES | 2463 DESC_HDR_SEL1_MDEUA | 2464 DESC_HDR_MODE1_MDEU_INIT | 2465 DESC_HDR_MODE1_MDEU_PAD | 2466 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2467 }, 2468 { .type = CRYPTO_ALG_TYPE_AEAD, 2469 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2470 .alg.aead = { 2471 .base = { 2472 .cra_name = "authenc(hmac(sha224)," 2473 "cbc(des3_ede))", 2474 .cra_driver_name = "authenc-hmac-sha224-" 2475 "cbc-3des-talitos", 2476 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2477 .cra_flags = CRYPTO_ALG_ASYNC, 2478 }, 2479 .ivsize = DES3_EDE_BLOCK_SIZE, 2480 .maxauthsize = SHA224_DIGEST_SIZE, 2481 .setkey = aead_des3_setkey, 2482 }, 2483 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2484 DESC_HDR_SEL0_DEU | 2485 DESC_HDR_MODE0_DEU_CBC | 2486 DESC_HDR_MODE0_DEU_3DES | 2487 DESC_HDR_SEL1_MDEUA | 2488 DESC_HDR_MODE1_MDEU_INIT | 2489 DESC_HDR_MODE1_MDEU_PAD | 2490 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2491 }, 2492 { .type = CRYPTO_ALG_TYPE_AEAD, 2493 .alg.aead = { 2494 .base = { 2495 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2496 .cra_driver_name = "authenc-hmac-sha256-" 2497 "cbc-aes-talitos", 2498 .cra_blocksize = AES_BLOCK_SIZE, 2499 .cra_flags = CRYPTO_ALG_ASYNC, 2500 }, 2501 .ivsize = AES_BLOCK_SIZE, 2502 .maxauthsize = SHA256_DIGEST_SIZE, 2503 }, 2504 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2505 DESC_HDR_SEL0_AESU | 2506 DESC_HDR_MODE0_AESU_CBC | 2507 DESC_HDR_SEL1_MDEUA | 2508 DESC_HDR_MODE1_MDEU_INIT | 2509 DESC_HDR_MODE1_MDEU_PAD | 2510 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2511 }, 2512 { .type = CRYPTO_ALG_TYPE_AEAD, 2513 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2514 .alg.aead = { 2515 .base = { 2516 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2517 .cra_driver_name = "authenc-hmac-sha256-" 2518 "cbc-aes-talitos", 2519 .cra_blocksize = AES_BLOCK_SIZE, 2520 .cra_flags = CRYPTO_ALG_ASYNC, 2521 }, 2522 .ivsize = AES_BLOCK_SIZE, 2523 .maxauthsize = SHA256_DIGEST_SIZE, 2524 }, 2525 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2526 DESC_HDR_SEL0_AESU | 2527 DESC_HDR_MODE0_AESU_CBC | 2528 DESC_HDR_SEL1_MDEUA | 2529 DESC_HDR_MODE1_MDEU_INIT | 2530 DESC_HDR_MODE1_MDEU_PAD | 2531 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2532 }, 2533 { .type = CRYPTO_ALG_TYPE_AEAD, 2534 .alg.aead = { 2535 .base = { 2536 .cra_name = "authenc(hmac(sha256)," 2537 "cbc(des3_ede))", 2538 .cra_driver_name = "authenc-hmac-sha256-" 2539 "cbc-3des-talitos", 2540 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2541 .cra_flags = CRYPTO_ALG_ASYNC, 2542 }, 2543 .ivsize = DES3_EDE_BLOCK_SIZE, 2544 .maxauthsize = SHA256_DIGEST_SIZE, 2545 .setkey = aead_des3_setkey, 2546 }, 2547 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2548 DESC_HDR_SEL0_DEU | 2549 DESC_HDR_MODE0_DEU_CBC | 2550 DESC_HDR_MODE0_DEU_3DES | 2551 DESC_HDR_SEL1_MDEUA | 2552 DESC_HDR_MODE1_MDEU_INIT | 2553 DESC_HDR_MODE1_MDEU_PAD | 2554 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2555 }, 2556 { .type = CRYPTO_ALG_TYPE_AEAD, 2557 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2558 .alg.aead = { 2559 .base = { 2560 .cra_name = "authenc(hmac(sha256)," 2561 "cbc(des3_ede))", 2562 .cra_driver_name = "authenc-hmac-sha256-" 2563 "cbc-3des-talitos", 2564 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2565 .cra_flags = CRYPTO_ALG_ASYNC, 2566 }, 2567 .ivsize = DES3_EDE_BLOCK_SIZE, 2568 .maxauthsize = SHA256_DIGEST_SIZE, 2569 .setkey = aead_des3_setkey, 2570 }, 2571 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2572 DESC_HDR_SEL0_DEU | 2573 DESC_HDR_MODE0_DEU_CBC | 2574 DESC_HDR_MODE0_DEU_3DES | 2575 DESC_HDR_SEL1_MDEUA | 2576 DESC_HDR_MODE1_MDEU_INIT | 2577 DESC_HDR_MODE1_MDEU_PAD | 2578 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2579 }, 2580 { .type = CRYPTO_ALG_TYPE_AEAD, 2581 .alg.aead = { 2582 .base = { 2583 .cra_name = "authenc(hmac(sha384),cbc(aes))", 2584 .cra_driver_name = "authenc-hmac-sha384-" 2585 "cbc-aes-talitos", 2586 .cra_blocksize = AES_BLOCK_SIZE, 2587 .cra_flags = CRYPTO_ALG_ASYNC, 2588 }, 2589 .ivsize = AES_BLOCK_SIZE, 2590 .maxauthsize = SHA384_DIGEST_SIZE, 2591 }, 2592 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2593 DESC_HDR_SEL0_AESU | 2594 DESC_HDR_MODE0_AESU_CBC | 2595 DESC_HDR_SEL1_MDEUB | 2596 DESC_HDR_MODE1_MDEU_INIT | 2597 DESC_HDR_MODE1_MDEU_PAD | 2598 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2599 }, 2600 { .type = CRYPTO_ALG_TYPE_AEAD, 2601 .alg.aead = { 2602 .base = { 2603 .cra_name = "authenc(hmac(sha384)," 2604 "cbc(des3_ede))", 2605 .cra_driver_name = "authenc-hmac-sha384-" 2606 "cbc-3des-talitos", 2607 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2608 .cra_flags = CRYPTO_ALG_ASYNC, 2609 }, 2610 .ivsize = DES3_EDE_BLOCK_SIZE, 2611 .maxauthsize = SHA384_DIGEST_SIZE, 2612 .setkey = aead_des3_setkey, 2613 }, 2614 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2615 DESC_HDR_SEL0_DEU | 2616 DESC_HDR_MODE0_DEU_CBC | 2617 DESC_HDR_MODE0_DEU_3DES | 2618 DESC_HDR_SEL1_MDEUB | 2619 DESC_HDR_MODE1_MDEU_INIT | 2620 DESC_HDR_MODE1_MDEU_PAD | 2621 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2622 }, 2623 { .type = CRYPTO_ALG_TYPE_AEAD, 2624 .alg.aead = { 2625 .base = { 2626 .cra_name = "authenc(hmac(sha512),cbc(aes))", 2627 .cra_driver_name = "authenc-hmac-sha512-" 2628 "cbc-aes-talitos", 2629 .cra_blocksize = AES_BLOCK_SIZE, 2630 .cra_flags = CRYPTO_ALG_ASYNC, 2631 }, 2632 .ivsize = AES_BLOCK_SIZE, 2633 .maxauthsize = SHA512_DIGEST_SIZE, 2634 }, 2635 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2636 DESC_HDR_SEL0_AESU | 2637 DESC_HDR_MODE0_AESU_CBC | 2638 DESC_HDR_SEL1_MDEUB | 2639 DESC_HDR_MODE1_MDEU_INIT | 2640 DESC_HDR_MODE1_MDEU_PAD | 2641 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2642 }, 2643 { .type = CRYPTO_ALG_TYPE_AEAD, 2644 .alg.aead = { 2645 .base = { 2646 .cra_name = "authenc(hmac(sha512)," 2647 "cbc(des3_ede))", 2648 .cra_driver_name = "authenc-hmac-sha512-" 2649 "cbc-3des-talitos", 2650 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2651 .cra_flags = CRYPTO_ALG_ASYNC, 2652 }, 2653 .ivsize = DES3_EDE_BLOCK_SIZE, 2654 .maxauthsize = SHA512_DIGEST_SIZE, 2655 .setkey = aead_des3_setkey, 2656 }, 2657 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2658 DESC_HDR_SEL0_DEU | 2659 DESC_HDR_MODE0_DEU_CBC | 2660 DESC_HDR_MODE0_DEU_3DES | 2661 DESC_HDR_SEL1_MDEUB | 2662 DESC_HDR_MODE1_MDEU_INIT | 2663 DESC_HDR_MODE1_MDEU_PAD | 2664 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2665 }, 2666 { .type = CRYPTO_ALG_TYPE_AEAD, 2667 .alg.aead = { 2668 .base = { 2669 .cra_name = "authenc(hmac(md5),cbc(aes))", 2670 .cra_driver_name = "authenc-hmac-md5-" 2671 "cbc-aes-talitos", 2672 .cra_blocksize = AES_BLOCK_SIZE, 2673 .cra_flags = CRYPTO_ALG_ASYNC, 2674 }, 2675 .ivsize = AES_BLOCK_SIZE, 2676 .maxauthsize = MD5_DIGEST_SIZE, 2677 }, 2678 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2679 DESC_HDR_SEL0_AESU | 2680 DESC_HDR_MODE0_AESU_CBC | 2681 DESC_HDR_SEL1_MDEUA | 2682 DESC_HDR_MODE1_MDEU_INIT | 2683 DESC_HDR_MODE1_MDEU_PAD | 2684 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2685 }, 2686 { .type = CRYPTO_ALG_TYPE_AEAD, 2687 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2688 .alg.aead = { 2689 .base = { 2690 .cra_name = "authenc(hmac(md5),cbc(aes))", 2691 .cra_driver_name = "authenc-hmac-md5-" 2692 "cbc-aes-talitos", 2693 .cra_blocksize = AES_BLOCK_SIZE, 2694 .cra_flags = CRYPTO_ALG_ASYNC, 2695 }, 2696 .ivsize = AES_BLOCK_SIZE, 2697 .maxauthsize = MD5_DIGEST_SIZE, 2698 }, 2699 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2700 DESC_HDR_SEL0_AESU | 2701 DESC_HDR_MODE0_AESU_CBC | 2702 DESC_HDR_SEL1_MDEUA | 2703 DESC_HDR_MODE1_MDEU_INIT | 2704 DESC_HDR_MODE1_MDEU_PAD | 2705 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2706 }, 2707 { .type = CRYPTO_ALG_TYPE_AEAD, 2708 .alg.aead = { 2709 .base = { 2710 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2711 .cra_driver_name = "authenc-hmac-md5-" 2712 "cbc-3des-talitos", 2713 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2714 .cra_flags = CRYPTO_ALG_ASYNC, 2715 }, 2716 .ivsize = DES3_EDE_BLOCK_SIZE, 2717 .maxauthsize = MD5_DIGEST_SIZE, 2718 .setkey = aead_des3_setkey, 2719 }, 2720 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2721 DESC_HDR_SEL0_DEU | 2722 DESC_HDR_MODE0_DEU_CBC | 2723 DESC_HDR_MODE0_DEU_3DES | 2724 DESC_HDR_SEL1_MDEUA | 2725 DESC_HDR_MODE1_MDEU_INIT | 2726 DESC_HDR_MODE1_MDEU_PAD | 2727 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2728 }, 2729 { .type = CRYPTO_ALG_TYPE_AEAD, 2730 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2731 .alg.aead = { 2732 .base = { 2733 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2734 .cra_driver_name = "authenc-hmac-md5-" 2735 "cbc-3des-talitos", 2736 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2737 .cra_flags = CRYPTO_ALG_ASYNC, 2738 }, 2739 .ivsize = DES3_EDE_BLOCK_SIZE, 2740 .maxauthsize = MD5_DIGEST_SIZE, 2741 .setkey = aead_des3_setkey, 2742 }, 2743 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2744 DESC_HDR_SEL0_DEU | 2745 DESC_HDR_MODE0_DEU_CBC | 2746 DESC_HDR_MODE0_DEU_3DES | 2747 DESC_HDR_SEL1_MDEUA | 2748 DESC_HDR_MODE1_MDEU_INIT | 2749 DESC_HDR_MODE1_MDEU_PAD | 2750 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2751 }, 2752 /* ABLKCIPHER algorithms. */ 2753 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2754 .alg.crypto = { 2755 .cra_name = "ecb(aes)", 2756 .cra_driver_name = "ecb-aes-talitos", 2757 .cra_blocksize = AES_BLOCK_SIZE, 2758 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2759 CRYPTO_ALG_ASYNC, 2760 .cra_ablkcipher = { 2761 .min_keysize = AES_MIN_KEY_SIZE, 2762 .max_keysize = AES_MAX_KEY_SIZE, 2763 .ivsize = AES_BLOCK_SIZE, 2764 } 2765 }, 2766 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2767 DESC_HDR_SEL0_AESU, 2768 }, 2769 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2770 .alg.crypto = { 2771 .cra_name = "cbc(aes)", 2772 .cra_driver_name = "cbc-aes-talitos", 2773 .cra_blocksize = AES_BLOCK_SIZE, 2774 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2775 CRYPTO_ALG_ASYNC, 2776 .cra_ablkcipher = { 2777 .min_keysize = AES_MIN_KEY_SIZE, 2778 .max_keysize = AES_MAX_KEY_SIZE, 2779 .ivsize = AES_BLOCK_SIZE, 2780 } 2781 }, 2782 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2783 DESC_HDR_SEL0_AESU | 2784 DESC_HDR_MODE0_AESU_CBC, 2785 }, 2786 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2787 .alg.crypto = { 2788 .cra_name = "ctr(aes)", 2789 .cra_driver_name = "ctr-aes-talitos", 2790 .cra_blocksize = AES_BLOCK_SIZE, 2791 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2792 CRYPTO_ALG_ASYNC, 2793 .cra_ablkcipher = { 2794 .min_keysize = AES_MIN_KEY_SIZE, 2795 .max_keysize = AES_MAX_KEY_SIZE, 2796 .ivsize = AES_BLOCK_SIZE, 2797 } 2798 }, 2799 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | 2800 DESC_HDR_SEL0_AESU | 2801 DESC_HDR_MODE0_AESU_CTR, 2802 }, 2803 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2804 .alg.crypto = { 2805 .cra_name = "ecb(des)", 2806 .cra_driver_name = "ecb-des-talitos", 2807 .cra_blocksize = DES_BLOCK_SIZE, 2808 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2809 CRYPTO_ALG_ASYNC, 2810 .cra_ablkcipher = { 2811 .min_keysize = DES_KEY_SIZE, 2812 .max_keysize = DES_KEY_SIZE, 2813 .ivsize = DES_BLOCK_SIZE, 2814 .setkey = ablkcipher_des_setkey, 2815 } 2816 }, 2817 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2818 DESC_HDR_SEL0_DEU, 2819 }, 2820 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2821 .alg.crypto = { 2822 .cra_name = "cbc(des)", 2823 .cra_driver_name = "cbc-des-talitos", 2824 .cra_blocksize = DES_BLOCK_SIZE, 2825 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2826 CRYPTO_ALG_ASYNC, 2827 .cra_ablkcipher = { 2828 .min_keysize = DES_KEY_SIZE, 2829 .max_keysize = DES_KEY_SIZE, 2830 .ivsize = DES_BLOCK_SIZE, 2831 .setkey = ablkcipher_des_setkey, 2832 } 2833 }, 2834 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2835 DESC_HDR_SEL0_DEU | 2836 DESC_HDR_MODE0_DEU_CBC, 2837 }, 2838 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2839 .alg.crypto = { 2840 .cra_name = "ecb(des3_ede)", 2841 .cra_driver_name = "ecb-3des-talitos", 2842 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2843 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2844 CRYPTO_ALG_ASYNC, 2845 .cra_ablkcipher = { 2846 .min_keysize = DES3_EDE_KEY_SIZE, 2847 .max_keysize = DES3_EDE_KEY_SIZE, 2848 .ivsize = DES3_EDE_BLOCK_SIZE, 2849 .setkey = ablkcipher_des3_setkey, 2850 } 2851 }, 2852 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2853 DESC_HDR_SEL0_DEU | 2854 DESC_HDR_MODE0_DEU_3DES, 2855 }, 2856 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2857 .alg.crypto = { 2858 .cra_name = "cbc(des3_ede)", 2859 .cra_driver_name = "cbc-3des-talitos", 2860 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2861 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2862 CRYPTO_ALG_ASYNC, 2863 .cra_ablkcipher = { 2864 .min_keysize = DES3_EDE_KEY_SIZE, 2865 .max_keysize = DES3_EDE_KEY_SIZE, 2866 .ivsize = DES3_EDE_BLOCK_SIZE, 2867 .setkey = ablkcipher_des3_setkey, 2868 } 2869 }, 2870 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2871 DESC_HDR_SEL0_DEU | 2872 DESC_HDR_MODE0_DEU_CBC | 2873 DESC_HDR_MODE0_DEU_3DES, 2874 }, 2875 /* AHASH algorithms. */ 2876 { .type = CRYPTO_ALG_TYPE_AHASH, 2877 .alg.hash = { 2878 .halg.digestsize = MD5_DIGEST_SIZE, 2879 .halg.statesize = sizeof(struct talitos_export_state), 2880 .halg.base = { 2881 .cra_name = "md5", 2882 .cra_driver_name = "md5-talitos", 2883 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2884 .cra_flags = CRYPTO_ALG_ASYNC, 2885 } 2886 }, 2887 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2888 DESC_HDR_SEL0_MDEUA | 2889 DESC_HDR_MODE0_MDEU_MD5, 2890 }, 2891 { .type = CRYPTO_ALG_TYPE_AHASH, 2892 .alg.hash = { 2893 .halg.digestsize = SHA1_DIGEST_SIZE, 2894 .halg.statesize = sizeof(struct talitos_export_state), 2895 .halg.base = { 2896 .cra_name = "sha1", 2897 .cra_driver_name = "sha1-talitos", 2898 .cra_blocksize = SHA1_BLOCK_SIZE, 2899 .cra_flags = CRYPTO_ALG_ASYNC, 2900 } 2901 }, 2902 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2903 DESC_HDR_SEL0_MDEUA | 2904 DESC_HDR_MODE0_MDEU_SHA1, 2905 }, 2906 { .type = CRYPTO_ALG_TYPE_AHASH, 2907 .alg.hash = { 2908 .halg.digestsize = SHA224_DIGEST_SIZE, 2909 .halg.statesize = sizeof(struct talitos_export_state), 2910 .halg.base = { 2911 .cra_name = "sha224", 2912 .cra_driver_name = "sha224-talitos", 2913 .cra_blocksize = SHA224_BLOCK_SIZE, 2914 .cra_flags = CRYPTO_ALG_ASYNC, 2915 } 2916 }, 2917 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2918 DESC_HDR_SEL0_MDEUA | 2919 DESC_HDR_MODE0_MDEU_SHA224, 2920 }, 2921 { .type = CRYPTO_ALG_TYPE_AHASH, 2922 .alg.hash = { 2923 .halg.digestsize = SHA256_DIGEST_SIZE, 2924 .halg.statesize = sizeof(struct talitos_export_state), 2925 .halg.base = { 2926 .cra_name = "sha256", 2927 .cra_driver_name = "sha256-talitos", 2928 .cra_blocksize = SHA256_BLOCK_SIZE, 2929 .cra_flags = CRYPTO_ALG_ASYNC, 2930 } 2931 }, 2932 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2933 DESC_HDR_SEL0_MDEUA | 2934 DESC_HDR_MODE0_MDEU_SHA256, 2935 }, 2936 { .type = CRYPTO_ALG_TYPE_AHASH, 2937 .alg.hash = { 2938 .halg.digestsize = SHA384_DIGEST_SIZE, 2939 .halg.statesize = sizeof(struct talitos_export_state), 2940 .halg.base = { 2941 .cra_name = "sha384", 2942 .cra_driver_name = "sha384-talitos", 2943 .cra_blocksize = SHA384_BLOCK_SIZE, 2944 .cra_flags = CRYPTO_ALG_ASYNC, 2945 } 2946 }, 2947 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2948 DESC_HDR_SEL0_MDEUB | 2949 DESC_HDR_MODE0_MDEUB_SHA384, 2950 }, 2951 { .type = CRYPTO_ALG_TYPE_AHASH, 2952 .alg.hash = { 2953 .halg.digestsize = SHA512_DIGEST_SIZE, 2954 .halg.statesize = sizeof(struct talitos_export_state), 2955 .halg.base = { 2956 .cra_name = "sha512", 2957 .cra_driver_name = "sha512-talitos", 2958 .cra_blocksize = SHA512_BLOCK_SIZE, 2959 .cra_flags = CRYPTO_ALG_ASYNC, 2960 } 2961 }, 2962 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2963 DESC_HDR_SEL0_MDEUB | 2964 DESC_HDR_MODE0_MDEUB_SHA512, 2965 }, 2966 { .type = CRYPTO_ALG_TYPE_AHASH, 2967 .alg.hash = { 2968 .halg.digestsize = MD5_DIGEST_SIZE, 2969 .halg.statesize = sizeof(struct talitos_export_state), 2970 .halg.base = { 2971 .cra_name = "hmac(md5)", 2972 .cra_driver_name = "hmac-md5-talitos", 2973 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2974 .cra_flags = CRYPTO_ALG_ASYNC, 2975 } 2976 }, 2977 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2978 DESC_HDR_SEL0_MDEUA | 2979 DESC_HDR_MODE0_MDEU_MD5, 2980 }, 2981 { .type = CRYPTO_ALG_TYPE_AHASH, 2982 .alg.hash = { 2983 .halg.digestsize = SHA1_DIGEST_SIZE, 2984 .halg.statesize = sizeof(struct talitos_export_state), 2985 .halg.base = { 2986 .cra_name = "hmac(sha1)", 2987 .cra_driver_name = "hmac-sha1-talitos", 2988 .cra_blocksize = SHA1_BLOCK_SIZE, 2989 .cra_flags = CRYPTO_ALG_ASYNC, 2990 } 2991 }, 2992 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2993 DESC_HDR_SEL0_MDEUA | 2994 DESC_HDR_MODE0_MDEU_SHA1, 2995 }, 2996 { .type = CRYPTO_ALG_TYPE_AHASH, 2997 .alg.hash = { 2998 .halg.digestsize = SHA224_DIGEST_SIZE, 2999 .halg.statesize = sizeof(struct talitos_export_state), 3000 .halg.base = { 3001 .cra_name = "hmac(sha224)", 3002 .cra_driver_name = "hmac-sha224-talitos", 3003 .cra_blocksize = SHA224_BLOCK_SIZE, 3004 .cra_flags = CRYPTO_ALG_ASYNC, 3005 } 3006 }, 3007 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 3008 DESC_HDR_SEL0_MDEUA | 3009 DESC_HDR_MODE0_MDEU_SHA224, 3010 }, 3011 { .type = CRYPTO_ALG_TYPE_AHASH, 3012 .alg.hash = { 3013 .halg.digestsize = SHA256_DIGEST_SIZE, 3014 .halg.statesize = sizeof(struct talitos_export_state), 3015 .halg.base = { 3016 .cra_name = "hmac(sha256)", 3017 .cra_driver_name = "hmac-sha256-talitos", 3018 .cra_blocksize = SHA256_BLOCK_SIZE, 3019 .cra_flags = CRYPTO_ALG_ASYNC, 3020 } 3021 }, 3022 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 3023 DESC_HDR_SEL0_MDEUA | 3024 DESC_HDR_MODE0_MDEU_SHA256, 3025 }, 3026 { .type = CRYPTO_ALG_TYPE_AHASH, 3027 .alg.hash = { 3028 .halg.digestsize = SHA384_DIGEST_SIZE, 3029 .halg.statesize = sizeof(struct talitos_export_state), 3030 .halg.base = { 3031 .cra_name = "hmac(sha384)", 3032 .cra_driver_name = "hmac-sha384-talitos", 3033 .cra_blocksize = SHA384_BLOCK_SIZE, 3034 .cra_flags = CRYPTO_ALG_ASYNC, 3035 } 3036 }, 3037 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 3038 DESC_HDR_SEL0_MDEUB | 3039 DESC_HDR_MODE0_MDEUB_SHA384, 3040 }, 3041 { .type = CRYPTO_ALG_TYPE_AHASH, 3042 .alg.hash = { 3043 .halg.digestsize = SHA512_DIGEST_SIZE, 3044 .halg.statesize = sizeof(struct talitos_export_state), 3045 .halg.base = { 3046 .cra_name = "hmac(sha512)", 3047 .cra_driver_name = "hmac-sha512-talitos", 3048 .cra_blocksize = SHA512_BLOCK_SIZE, 3049 .cra_flags = CRYPTO_ALG_ASYNC, 3050 } 3051 }, 3052 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 3053 DESC_HDR_SEL0_MDEUB | 3054 DESC_HDR_MODE0_MDEUB_SHA512, 3055 } 3056 }; 3057 3058 struct talitos_crypto_alg { 3059 struct list_head entry; 3060 struct device *dev; 3061 struct talitos_alg_template algt; 3062 }; 3063 3064 static int talitos_init_common(struct talitos_ctx *ctx, 3065 struct talitos_crypto_alg *talitos_alg) 3066 { 3067 struct talitos_private *priv; 3068 3069 /* update context with ptr to dev */ 3070 ctx->dev = talitos_alg->dev; 3071 3072 /* assign SEC channel to tfm in round-robin fashion */ 3073 priv = dev_get_drvdata(ctx->dev); 3074 ctx->ch = atomic_inc_return(&priv->last_chan) & 3075 (priv->num_channels - 1); 3076 3077 /* copy descriptor header template value */ 3078 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; 3079 3080 /* select done notification */ 3081 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY; 3082 3083 return 0; 3084 } 3085 3086 static int talitos_cra_init(struct crypto_tfm *tfm) 3087 { 3088 struct crypto_alg *alg = tfm->__crt_alg; 3089 struct talitos_crypto_alg *talitos_alg; 3090 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 3091 3092 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) 3093 talitos_alg = container_of(__crypto_ahash_alg(alg), 3094 struct talitos_crypto_alg, 3095 algt.alg.hash); 3096 else 3097 talitos_alg = container_of(alg, struct talitos_crypto_alg, 3098 algt.alg.crypto); 3099 3100 return talitos_init_common(ctx, talitos_alg); 3101 } 3102 3103 static int talitos_cra_init_aead(struct crypto_aead *tfm) 3104 { 3105 struct aead_alg *alg = crypto_aead_alg(tfm); 3106 struct talitos_crypto_alg *talitos_alg; 3107 struct talitos_ctx *ctx = crypto_aead_ctx(tfm); 3108 3109 talitos_alg = container_of(alg, struct talitos_crypto_alg, 3110 algt.alg.aead); 3111 3112 return talitos_init_common(ctx, talitos_alg); 3113 } 3114 3115 static int talitos_cra_init_ahash(struct crypto_tfm *tfm) 3116 { 3117 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 3118 3119 talitos_cra_init(tfm); 3120 3121 ctx->keylen = 0; 3122 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 3123 sizeof(struct talitos_ahash_req_ctx)); 3124 3125 return 0; 3126 } 3127 3128 static void talitos_cra_exit(struct crypto_tfm *tfm) 3129 { 3130 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 3131 struct device *dev = ctx->dev; 3132 3133 if (ctx->keylen) 3134 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 3135 } 3136 3137 /* 3138 * given the alg's descriptor header template, determine whether descriptor 3139 * type and primary/secondary execution units required match the hw 3140 * capabilities description provided in the device tree node. 3141 */ 3142 static int hw_supports(struct device *dev, __be32 desc_hdr_template) 3143 { 3144 struct talitos_private *priv = dev_get_drvdata(dev); 3145 int ret; 3146 3147 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) && 3148 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units); 3149 3150 if (SECONDARY_EU(desc_hdr_template)) 3151 ret = ret && (1 << SECONDARY_EU(desc_hdr_template) 3152 & priv->exec_units); 3153 3154 return ret; 3155 } 3156 3157 static int talitos_remove(struct platform_device *ofdev) 3158 { 3159 struct device *dev = &ofdev->dev; 3160 struct talitos_private *priv = dev_get_drvdata(dev); 3161 struct talitos_crypto_alg *t_alg, *n; 3162 int i; 3163 3164 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 3165 switch (t_alg->algt.type) { 3166 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3167 break; 3168 case CRYPTO_ALG_TYPE_AEAD: 3169 crypto_unregister_aead(&t_alg->algt.alg.aead); 3170 case CRYPTO_ALG_TYPE_AHASH: 3171 crypto_unregister_ahash(&t_alg->algt.alg.hash); 3172 break; 3173 } 3174 list_del(&t_alg->entry); 3175 } 3176 3177 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 3178 talitos_unregister_rng(dev); 3179 3180 for (i = 0; i < 2; i++) 3181 if (priv->irq[i]) { 3182 free_irq(priv->irq[i], dev); 3183 irq_dispose_mapping(priv->irq[i]); 3184 } 3185 3186 tasklet_kill(&priv->done_task[0]); 3187 if (priv->irq[1]) 3188 tasklet_kill(&priv->done_task[1]); 3189 3190 return 0; 3191 } 3192 3193 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, 3194 struct talitos_alg_template 3195 *template) 3196 { 3197 struct talitos_private *priv = dev_get_drvdata(dev); 3198 struct talitos_crypto_alg *t_alg; 3199 struct crypto_alg *alg; 3200 3201 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg), 3202 GFP_KERNEL); 3203 if (!t_alg) 3204 return ERR_PTR(-ENOMEM); 3205 3206 t_alg->algt = *template; 3207 3208 switch (t_alg->algt.type) { 3209 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3210 alg = &t_alg->algt.alg.crypto; 3211 alg->cra_init = talitos_cra_init; 3212 alg->cra_exit = talitos_cra_exit; 3213 alg->cra_type = &crypto_ablkcipher_type; 3214 alg->cra_ablkcipher.setkey = alg->cra_ablkcipher.setkey ?: 3215 ablkcipher_setkey; 3216 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; 3217 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt; 3218 break; 3219 case CRYPTO_ALG_TYPE_AEAD: 3220 alg = &t_alg->algt.alg.aead.base; 3221 alg->cra_exit = talitos_cra_exit; 3222 t_alg->algt.alg.aead.init = talitos_cra_init_aead; 3223 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?: 3224 aead_setkey; 3225 t_alg->algt.alg.aead.encrypt = aead_encrypt; 3226 t_alg->algt.alg.aead.decrypt = aead_decrypt; 3227 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && 3228 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) { 3229 devm_kfree(dev, t_alg); 3230 return ERR_PTR(-ENOTSUPP); 3231 } 3232 break; 3233 case CRYPTO_ALG_TYPE_AHASH: 3234 alg = &t_alg->algt.alg.hash.halg.base; 3235 alg->cra_init = talitos_cra_init_ahash; 3236 alg->cra_exit = talitos_cra_exit; 3237 t_alg->algt.alg.hash.init = ahash_init; 3238 t_alg->algt.alg.hash.update = ahash_update; 3239 t_alg->algt.alg.hash.final = ahash_final; 3240 t_alg->algt.alg.hash.finup = ahash_finup; 3241 t_alg->algt.alg.hash.digest = ahash_digest; 3242 if (!strncmp(alg->cra_name, "hmac", 4)) 3243 t_alg->algt.alg.hash.setkey = ahash_setkey; 3244 t_alg->algt.alg.hash.import = ahash_import; 3245 t_alg->algt.alg.hash.export = ahash_export; 3246 3247 if (!(priv->features & TALITOS_FTR_HMAC_OK) && 3248 !strncmp(alg->cra_name, "hmac", 4)) { 3249 devm_kfree(dev, t_alg); 3250 return ERR_PTR(-ENOTSUPP); 3251 } 3252 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && 3253 (!strcmp(alg->cra_name, "sha224") || 3254 !strcmp(alg->cra_name, "hmac(sha224)"))) { 3255 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; 3256 t_alg->algt.desc_hdr_template = 3257 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 3258 DESC_HDR_SEL0_MDEUA | 3259 DESC_HDR_MODE0_MDEU_SHA256; 3260 } 3261 break; 3262 default: 3263 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); 3264 devm_kfree(dev, t_alg); 3265 return ERR_PTR(-EINVAL); 3266 } 3267 3268 alg->cra_module = THIS_MODULE; 3269 if (t_alg->algt.priority) 3270 alg->cra_priority = t_alg->algt.priority; 3271 else 3272 alg->cra_priority = TALITOS_CRA_PRIORITY; 3273 alg->cra_alignmask = 0; 3274 alg->cra_ctxsize = sizeof(struct talitos_ctx); 3275 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; 3276 3277 t_alg->dev = dev; 3278 3279 return t_alg; 3280 } 3281 3282 static int talitos_probe_irq(struct platform_device *ofdev) 3283 { 3284 struct device *dev = &ofdev->dev; 3285 struct device_node *np = ofdev->dev.of_node; 3286 struct talitos_private *priv = dev_get_drvdata(dev); 3287 int err; 3288 bool is_sec1 = has_ftr_sec1(priv); 3289 3290 priv->irq[0] = irq_of_parse_and_map(np, 0); 3291 if (!priv->irq[0]) { 3292 dev_err(dev, "failed to map irq\n"); 3293 return -EINVAL; 3294 } 3295 if (is_sec1) { 3296 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0, 3297 dev_driver_string(dev), dev); 3298 goto primary_out; 3299 } 3300 3301 priv->irq[1] = irq_of_parse_and_map(np, 1); 3302 3303 /* get the primary irq line */ 3304 if (!priv->irq[1]) { 3305 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0, 3306 dev_driver_string(dev), dev); 3307 goto primary_out; 3308 } 3309 3310 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0, 3311 dev_driver_string(dev), dev); 3312 if (err) 3313 goto primary_out; 3314 3315 /* get the secondary irq line */ 3316 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0, 3317 dev_driver_string(dev), dev); 3318 if (err) { 3319 dev_err(dev, "failed to request secondary irq\n"); 3320 irq_dispose_mapping(priv->irq[1]); 3321 priv->irq[1] = 0; 3322 } 3323 3324 return err; 3325 3326 primary_out: 3327 if (err) { 3328 dev_err(dev, "failed to request primary irq\n"); 3329 irq_dispose_mapping(priv->irq[0]); 3330 priv->irq[0] = 0; 3331 } 3332 3333 return err; 3334 } 3335 3336 static int talitos_probe(struct platform_device *ofdev) 3337 { 3338 struct device *dev = &ofdev->dev; 3339 struct device_node *np = ofdev->dev.of_node; 3340 struct talitos_private *priv; 3341 int i, err; 3342 int stride; 3343 struct resource *res; 3344 3345 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL); 3346 if (!priv) 3347 return -ENOMEM; 3348 3349 INIT_LIST_HEAD(&priv->alg_list); 3350 3351 dev_set_drvdata(dev, priv); 3352 3353 priv->ofdev = ofdev; 3354 3355 spin_lock_init(&priv->reg_lock); 3356 3357 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 3358 if (!res) 3359 return -ENXIO; 3360 priv->reg = devm_ioremap(dev, res->start, resource_size(res)); 3361 if (!priv->reg) { 3362 dev_err(dev, "failed to of_iomap\n"); 3363 err = -ENOMEM; 3364 goto err_out; 3365 } 3366 3367 /* get SEC version capabilities from device tree */ 3368 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels); 3369 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len); 3370 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units); 3371 of_property_read_u32(np, "fsl,descriptor-types-mask", 3372 &priv->desc_types); 3373 3374 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || 3375 !priv->exec_units || !priv->desc_types) { 3376 dev_err(dev, "invalid property data in device tree node\n"); 3377 err = -EINVAL; 3378 goto err_out; 3379 } 3380 3381 if (of_device_is_compatible(np, "fsl,sec3.0")) 3382 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; 3383 3384 if (of_device_is_compatible(np, "fsl,sec2.1")) 3385 priv->features |= TALITOS_FTR_HW_AUTH_CHECK | 3386 TALITOS_FTR_SHA224_HWINIT | 3387 TALITOS_FTR_HMAC_OK; 3388 3389 if (of_device_is_compatible(np, "fsl,sec1.0")) 3390 priv->features |= TALITOS_FTR_SEC1; 3391 3392 if (of_device_is_compatible(np, "fsl,sec1.2")) { 3393 priv->reg_deu = priv->reg + TALITOS12_DEU; 3394 priv->reg_aesu = priv->reg + TALITOS12_AESU; 3395 priv->reg_mdeu = priv->reg + TALITOS12_MDEU; 3396 stride = TALITOS1_CH_STRIDE; 3397 } else if (of_device_is_compatible(np, "fsl,sec1.0")) { 3398 priv->reg_deu = priv->reg + TALITOS10_DEU; 3399 priv->reg_aesu = priv->reg + TALITOS10_AESU; 3400 priv->reg_mdeu = priv->reg + TALITOS10_MDEU; 3401 priv->reg_afeu = priv->reg + TALITOS10_AFEU; 3402 priv->reg_rngu = priv->reg + TALITOS10_RNGU; 3403 priv->reg_pkeu = priv->reg + TALITOS10_PKEU; 3404 stride = TALITOS1_CH_STRIDE; 3405 } else { 3406 priv->reg_deu = priv->reg + TALITOS2_DEU; 3407 priv->reg_aesu = priv->reg + TALITOS2_AESU; 3408 priv->reg_mdeu = priv->reg + TALITOS2_MDEU; 3409 priv->reg_afeu = priv->reg + TALITOS2_AFEU; 3410 priv->reg_rngu = priv->reg + TALITOS2_RNGU; 3411 priv->reg_pkeu = priv->reg + TALITOS2_PKEU; 3412 priv->reg_keu = priv->reg + TALITOS2_KEU; 3413 priv->reg_crcu = priv->reg + TALITOS2_CRCU; 3414 stride = TALITOS2_CH_STRIDE; 3415 } 3416 3417 err = talitos_probe_irq(ofdev); 3418 if (err) 3419 goto err_out; 3420 3421 if (of_device_is_compatible(np, "fsl,sec1.0")) { 3422 if (priv->num_channels == 1) 3423 tasklet_init(&priv->done_task[0], talitos1_done_ch0, 3424 (unsigned long)dev); 3425 else 3426 tasklet_init(&priv->done_task[0], talitos1_done_4ch, 3427 (unsigned long)dev); 3428 } else { 3429 if (priv->irq[1]) { 3430 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, 3431 (unsigned long)dev); 3432 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, 3433 (unsigned long)dev); 3434 } else if (priv->num_channels == 1) { 3435 tasklet_init(&priv->done_task[0], talitos2_done_ch0, 3436 (unsigned long)dev); 3437 } else { 3438 tasklet_init(&priv->done_task[0], talitos2_done_4ch, 3439 (unsigned long)dev); 3440 } 3441 } 3442 3443 priv->chan = devm_kcalloc(dev, 3444 priv->num_channels, 3445 sizeof(struct talitos_channel), 3446 GFP_KERNEL); 3447 if (!priv->chan) { 3448 dev_err(dev, "failed to allocate channel management space\n"); 3449 err = -ENOMEM; 3450 goto err_out; 3451 } 3452 3453 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 3454 3455 for (i = 0; i < priv->num_channels; i++) { 3456 priv->chan[i].reg = priv->reg + stride * (i + 1); 3457 if (!priv->irq[1] || !(i & 1)) 3458 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; 3459 3460 spin_lock_init(&priv->chan[i].head_lock); 3461 spin_lock_init(&priv->chan[i].tail_lock); 3462 3463 priv->chan[i].fifo = devm_kcalloc(dev, 3464 priv->fifo_len, 3465 sizeof(struct talitos_request), 3466 GFP_KERNEL); 3467 if (!priv->chan[i].fifo) { 3468 dev_err(dev, "failed to allocate request fifo %d\n", i); 3469 err = -ENOMEM; 3470 goto err_out; 3471 } 3472 3473 atomic_set(&priv->chan[i].submit_count, 3474 -(priv->chfifo_len - 1)); 3475 } 3476 3477 dma_set_mask(dev, DMA_BIT_MASK(36)); 3478 3479 /* reset and initialize the h/w */ 3480 err = init_device(dev); 3481 if (err) { 3482 dev_err(dev, "failed to initialize device\n"); 3483 goto err_out; 3484 } 3485 3486 /* register the RNG, if available */ 3487 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) { 3488 err = talitos_register_rng(dev); 3489 if (err) { 3490 dev_err(dev, "failed to register hwrng: %d\n", err); 3491 goto err_out; 3492 } else 3493 dev_info(dev, "hwrng\n"); 3494 } 3495 3496 /* register crypto algorithms the device supports */ 3497 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 3498 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 3499 struct talitos_crypto_alg *t_alg; 3500 struct crypto_alg *alg = NULL; 3501 3502 t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 3503 if (IS_ERR(t_alg)) { 3504 err = PTR_ERR(t_alg); 3505 if (err == -ENOTSUPP) 3506 continue; 3507 goto err_out; 3508 } 3509 3510 switch (t_alg->algt.type) { 3511 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3512 err = crypto_register_alg( 3513 &t_alg->algt.alg.crypto); 3514 alg = &t_alg->algt.alg.crypto; 3515 break; 3516 3517 case CRYPTO_ALG_TYPE_AEAD: 3518 err = crypto_register_aead( 3519 &t_alg->algt.alg.aead); 3520 alg = &t_alg->algt.alg.aead.base; 3521 break; 3522 3523 case CRYPTO_ALG_TYPE_AHASH: 3524 err = crypto_register_ahash( 3525 &t_alg->algt.alg.hash); 3526 alg = &t_alg->algt.alg.hash.halg.base; 3527 break; 3528 } 3529 if (err) { 3530 dev_err(dev, "%s alg registration failed\n", 3531 alg->cra_driver_name); 3532 devm_kfree(dev, t_alg); 3533 } else 3534 list_add_tail(&t_alg->entry, &priv->alg_list); 3535 } 3536 } 3537 if (!list_empty(&priv->alg_list)) 3538 dev_info(dev, "%s algorithms registered in /proc/crypto\n", 3539 (char *)of_get_property(np, "compatible", NULL)); 3540 3541 return 0; 3542 3543 err_out: 3544 talitos_remove(ofdev); 3545 3546 return err; 3547 } 3548 3549 static const struct of_device_id talitos_match[] = { 3550 #ifdef CONFIG_CRYPTO_DEV_TALITOS1 3551 { 3552 .compatible = "fsl,sec1.0", 3553 }, 3554 #endif 3555 #ifdef CONFIG_CRYPTO_DEV_TALITOS2 3556 { 3557 .compatible = "fsl,sec2.0", 3558 }, 3559 #endif 3560 {}, 3561 }; 3562 MODULE_DEVICE_TABLE(of, talitos_match); 3563 3564 static struct platform_driver talitos_driver = { 3565 .driver = { 3566 .name = "talitos", 3567 .of_match_table = talitos_match, 3568 }, 3569 .probe = talitos_probe, 3570 .remove = talitos_remove, 3571 }; 3572 3573 module_platform_driver(talitos_driver); 3574 3575 MODULE_LICENSE("GPL"); 3576 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>"); 3577 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver"); 3578