1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * talitos - Freescale Integrated Security Engine (SEC) device driver 4 * 5 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc. 6 * 7 * Scatterlist Crypto API glue code copied from files with the following: 8 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> 9 * 10 * Crypto algorithm registration code copied from hifn driver: 11 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 12 * All rights reserved. 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/module.h> 17 #include <linux/mod_devicetable.h> 18 #include <linux/device.h> 19 #include <linux/interrupt.h> 20 #include <linux/crypto.h> 21 #include <linux/hw_random.h> 22 #include <linux/of_address.h> 23 #include <linux/of_irq.h> 24 #include <linux/of_platform.h> 25 #include <linux/dma-mapping.h> 26 #include <linux/io.h> 27 #include <linux/spinlock.h> 28 #include <linux/rtnetlink.h> 29 #include <linux/slab.h> 30 31 #include <crypto/algapi.h> 32 #include <crypto/aes.h> 33 #include <crypto/internal/des.h> 34 #include <crypto/sha.h> 35 #include <crypto/md5.h> 36 #include <crypto/internal/aead.h> 37 #include <crypto/authenc.h> 38 #include <crypto/internal/skcipher.h> 39 #include <crypto/hash.h> 40 #include <crypto/internal/hash.h> 41 #include <crypto/scatterwalk.h> 42 43 #include "talitos.h" 44 45 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, 46 unsigned int len, bool is_sec1) 47 { 48 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 49 if (is_sec1) { 50 ptr->len1 = cpu_to_be16(len); 51 } else { 52 ptr->len = cpu_to_be16(len); 53 ptr->eptr = upper_32_bits(dma_addr); 54 } 55 } 56 57 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr, 58 struct talitos_ptr *src_ptr, bool is_sec1) 59 { 60 dst_ptr->ptr = src_ptr->ptr; 61 if (is_sec1) { 62 dst_ptr->len1 = src_ptr->len1; 63 } else { 64 dst_ptr->len = src_ptr->len; 65 dst_ptr->eptr = src_ptr->eptr; 66 } 67 } 68 69 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, 70 bool is_sec1) 71 { 72 if (is_sec1) 73 return be16_to_cpu(ptr->len1); 74 else 75 return be16_to_cpu(ptr->len); 76 } 77 78 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val, 79 bool is_sec1) 80 { 81 if (!is_sec1) 82 ptr->j_extent = val; 83 } 84 85 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1) 86 { 87 if (!is_sec1) 88 ptr->j_extent |= val; 89 } 90 91 /* 92 * map virtual single (contiguous) pointer to h/w descriptor pointer 93 */ 94 static void __map_single_talitos_ptr(struct device *dev, 95 struct talitos_ptr *ptr, 96 unsigned int len, void *data, 97 enum dma_data_direction dir, 98 unsigned long attrs) 99 { 100 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs); 101 struct talitos_private *priv = dev_get_drvdata(dev); 102 bool is_sec1 = has_ftr_sec1(priv); 103 104 to_talitos_ptr(ptr, dma_addr, len, is_sec1); 105 } 106 107 static void map_single_talitos_ptr(struct device *dev, 108 struct talitos_ptr *ptr, 109 unsigned int len, void *data, 110 enum dma_data_direction dir) 111 { 112 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0); 113 } 114 115 static void map_single_talitos_ptr_nosync(struct device *dev, 116 struct talitos_ptr *ptr, 117 unsigned int len, void *data, 118 enum dma_data_direction dir) 119 { 120 __map_single_talitos_ptr(dev, ptr, len, data, dir, 121 DMA_ATTR_SKIP_CPU_SYNC); 122 } 123 124 /* 125 * unmap bus single (contiguous) h/w descriptor pointer 126 */ 127 static void unmap_single_talitos_ptr(struct device *dev, 128 struct talitos_ptr *ptr, 129 enum dma_data_direction dir) 130 { 131 struct talitos_private *priv = dev_get_drvdata(dev); 132 bool is_sec1 = has_ftr_sec1(priv); 133 134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr), 135 from_talitos_ptr_len(ptr, is_sec1), dir); 136 } 137 138 static int reset_channel(struct device *dev, int ch) 139 { 140 struct talitos_private *priv = dev_get_drvdata(dev); 141 unsigned int timeout = TALITOS_TIMEOUT; 142 bool is_sec1 = has_ftr_sec1(priv); 143 144 if (is_sec1) { 145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 146 TALITOS1_CCCR_LO_RESET); 147 148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) & 149 TALITOS1_CCCR_LO_RESET) && --timeout) 150 cpu_relax(); 151 } else { 152 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 153 TALITOS2_CCCR_RESET); 154 155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 156 TALITOS2_CCCR_RESET) && --timeout) 157 cpu_relax(); 158 } 159 160 if (timeout == 0) { 161 dev_err(dev, "failed to reset channel %d\n", ch); 162 return -EIO; 163 } 164 165 /* set 36-bit addressing, done writeback enable and done IRQ enable */ 166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE | 167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); 168 /* enable chaining descriptors */ 169 if (is_sec1) 170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 171 TALITOS_CCCR_LO_NE); 172 173 /* and ICCR writeback, if available */ 174 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 175 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 176 TALITOS_CCCR_LO_IWSE); 177 178 return 0; 179 } 180 181 static int reset_device(struct device *dev) 182 { 183 struct talitos_private *priv = dev_get_drvdata(dev); 184 unsigned int timeout = TALITOS_TIMEOUT; 185 bool is_sec1 = has_ftr_sec1(priv); 186 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR; 187 188 setbits32(priv->reg + TALITOS_MCR, mcr); 189 190 while ((in_be32(priv->reg + TALITOS_MCR) & mcr) 191 && --timeout) 192 cpu_relax(); 193 194 if (priv->irq[1]) { 195 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3; 196 setbits32(priv->reg + TALITOS_MCR, mcr); 197 } 198 199 if (timeout == 0) { 200 dev_err(dev, "failed to reset device\n"); 201 return -EIO; 202 } 203 204 return 0; 205 } 206 207 /* 208 * Reset and initialize the device 209 */ 210 static int init_device(struct device *dev) 211 { 212 struct talitos_private *priv = dev_get_drvdata(dev); 213 int ch, err; 214 bool is_sec1 = has_ftr_sec1(priv); 215 216 /* 217 * Master reset 218 * errata documentation: warning: certain SEC interrupts 219 * are not fully cleared by writing the MCR:SWR bit, 220 * set bit twice to completely reset 221 */ 222 err = reset_device(dev); 223 if (err) 224 return err; 225 226 err = reset_device(dev); 227 if (err) 228 return err; 229 230 /* reset channels */ 231 for (ch = 0; ch < priv->num_channels; ch++) { 232 err = reset_channel(dev, ch); 233 if (err) 234 return err; 235 } 236 237 /* enable channel done and error interrupts */ 238 if (is_sec1) { 239 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT); 240 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); 241 /* disable parity error check in DEU (erroneous? test vect.) */ 242 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE); 243 } else { 244 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT); 245 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); 246 } 247 248 /* disable integrity check error interrupts (use writeback instead) */ 249 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 250 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO, 251 TALITOS_MDEUICR_LO_ICE); 252 253 return 0; 254 } 255 256 /** 257 * talitos_submit - submits a descriptor to the device for processing 258 * @dev: the SEC device to be used 259 * @ch: the SEC device channel to be used 260 * @desc: the descriptor to be processed by the device 261 * @callback: whom to call when processing is complete 262 * @context: a handle for use by caller (optional) 263 * 264 * desc must contain valid dma-mapped (bus physical) address pointers. 265 * callback must check err and feedback in descriptor header 266 * for device processing status. 267 */ 268 static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 269 void (*callback)(struct device *dev, 270 struct talitos_desc *desc, 271 void *context, int error), 272 void *context) 273 { 274 struct talitos_private *priv = dev_get_drvdata(dev); 275 struct talitos_request *request; 276 unsigned long flags; 277 int head; 278 bool is_sec1 = has_ftr_sec1(priv); 279 280 spin_lock_irqsave(&priv->chan[ch].head_lock, flags); 281 282 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { 283 /* h/w fifo is full */ 284 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 285 return -EAGAIN; 286 } 287 288 head = priv->chan[ch].head; 289 request = &priv->chan[ch].fifo[head]; 290 291 /* map descriptor and save caller data */ 292 if (is_sec1) { 293 desc->hdr1 = desc->hdr; 294 request->dma_desc = dma_map_single(dev, &desc->hdr1, 295 TALITOS_DESC_SIZE, 296 DMA_BIDIRECTIONAL); 297 } else { 298 request->dma_desc = dma_map_single(dev, desc, 299 TALITOS_DESC_SIZE, 300 DMA_BIDIRECTIONAL); 301 } 302 request->callback = callback; 303 request->context = context; 304 305 /* increment fifo head */ 306 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); 307 308 smp_wmb(); 309 request->desc = desc; 310 311 /* GO! */ 312 wmb(); 313 out_be32(priv->chan[ch].reg + TALITOS_FF, 314 upper_32_bits(request->dma_desc)); 315 out_be32(priv->chan[ch].reg + TALITOS_FF_LO, 316 lower_32_bits(request->dma_desc)); 317 318 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 319 320 return -EINPROGRESS; 321 } 322 323 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1) 324 { 325 struct talitos_edesc *edesc; 326 327 if (!is_sec1) 328 return request->desc->hdr; 329 330 if (!request->desc->next_desc) 331 return request->desc->hdr1; 332 333 edesc = container_of(request->desc, struct talitos_edesc, desc); 334 335 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1; 336 } 337 338 /* 339 * process what was done, notify callback of error if not 340 */ 341 static void flush_channel(struct device *dev, int ch, int error, int reset_ch) 342 { 343 struct talitos_private *priv = dev_get_drvdata(dev); 344 struct talitos_request *request, saved_req; 345 unsigned long flags; 346 int tail, status; 347 bool is_sec1 = has_ftr_sec1(priv); 348 349 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 350 351 tail = priv->chan[ch].tail; 352 while (priv->chan[ch].fifo[tail].desc) { 353 __be32 hdr; 354 355 request = &priv->chan[ch].fifo[tail]; 356 357 /* descriptors with their done bits set don't get the error */ 358 rmb(); 359 hdr = get_request_hdr(request, is_sec1); 360 361 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 362 status = 0; 363 else 364 if (!error) 365 break; 366 else 367 status = error; 368 369 dma_unmap_single(dev, request->dma_desc, 370 TALITOS_DESC_SIZE, 371 DMA_BIDIRECTIONAL); 372 373 /* copy entries so we can call callback outside lock */ 374 saved_req.desc = request->desc; 375 saved_req.callback = request->callback; 376 saved_req.context = request->context; 377 378 /* release request entry in fifo */ 379 smp_wmb(); 380 request->desc = NULL; 381 382 /* increment fifo tail */ 383 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1); 384 385 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 386 387 atomic_dec(&priv->chan[ch].submit_count); 388 389 saved_req.callback(dev, saved_req.desc, saved_req.context, 390 status); 391 /* channel may resume processing in single desc error case */ 392 if (error && !reset_ch && status == error) 393 return; 394 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 395 tail = priv->chan[ch].tail; 396 } 397 398 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 399 } 400 401 /* 402 * process completed requests for channels that have done status 403 */ 404 #define DEF_TALITOS1_DONE(name, ch_done_mask) \ 405 static void talitos1_done_##name(unsigned long data) \ 406 { \ 407 struct device *dev = (struct device *)data; \ 408 struct talitos_private *priv = dev_get_drvdata(dev); \ 409 unsigned long flags; \ 410 \ 411 if (ch_done_mask & 0x10000000) \ 412 flush_channel(dev, 0, 0, 0); \ 413 if (ch_done_mask & 0x40000000) \ 414 flush_channel(dev, 1, 0, 0); \ 415 if (ch_done_mask & 0x00010000) \ 416 flush_channel(dev, 2, 0, 0); \ 417 if (ch_done_mask & 0x00040000) \ 418 flush_channel(dev, 3, 0, 0); \ 419 \ 420 /* At this point, all completed channels have been processed */ \ 421 /* Unmask done interrupts for channels completed later on. */ \ 422 spin_lock_irqsave(&priv->reg_lock, flags); \ 423 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 424 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \ 425 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 426 } 427 428 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) 429 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE) 430 431 #define DEF_TALITOS2_DONE(name, ch_done_mask) \ 432 static void talitos2_done_##name(unsigned long data) \ 433 { \ 434 struct device *dev = (struct device *)data; \ 435 struct talitos_private *priv = dev_get_drvdata(dev); \ 436 unsigned long flags; \ 437 \ 438 if (ch_done_mask & 1) \ 439 flush_channel(dev, 0, 0, 0); \ 440 if (ch_done_mask & (1 << 2)) \ 441 flush_channel(dev, 1, 0, 0); \ 442 if (ch_done_mask & (1 << 4)) \ 443 flush_channel(dev, 2, 0, 0); \ 444 if (ch_done_mask & (1 << 6)) \ 445 flush_channel(dev, 3, 0, 0); \ 446 \ 447 /* At this point, all completed channels have been processed */ \ 448 /* Unmask done interrupts for channels completed later on. */ \ 449 spin_lock_irqsave(&priv->reg_lock, flags); \ 450 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 451 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \ 452 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 453 } 454 455 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) 456 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE) 457 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) 458 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) 459 460 /* 461 * locate current (offending) descriptor 462 */ 463 static u32 current_desc_hdr(struct device *dev, int ch) 464 { 465 struct talitos_private *priv = dev_get_drvdata(dev); 466 int tail, iter; 467 dma_addr_t cur_desc; 468 469 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32; 470 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO); 471 472 if (!cur_desc) { 473 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n"); 474 return 0; 475 } 476 477 tail = priv->chan[ch].tail; 478 479 iter = tail; 480 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc && 481 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) { 482 iter = (iter + 1) & (priv->fifo_len - 1); 483 if (iter == tail) { 484 dev_err(dev, "couldn't locate current descriptor\n"); 485 return 0; 486 } 487 } 488 489 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) { 490 struct talitos_edesc *edesc; 491 492 edesc = container_of(priv->chan[ch].fifo[iter].desc, 493 struct talitos_edesc, desc); 494 return ((struct talitos_desc *) 495 (edesc->buf + edesc->dma_len))->hdr; 496 } 497 498 return priv->chan[ch].fifo[iter].desc->hdr; 499 } 500 501 /* 502 * user diagnostics; report root cause of error based on execution unit status 503 */ 504 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) 505 { 506 struct talitos_private *priv = dev_get_drvdata(dev); 507 int i; 508 509 if (!desc_hdr) 510 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); 511 512 switch (desc_hdr & DESC_HDR_SEL0_MASK) { 513 case DESC_HDR_SEL0_AFEU: 514 dev_err(dev, "AFEUISR 0x%08x_%08x\n", 515 in_be32(priv->reg_afeu + TALITOS_EUISR), 516 in_be32(priv->reg_afeu + TALITOS_EUISR_LO)); 517 break; 518 case DESC_HDR_SEL0_DEU: 519 dev_err(dev, "DEUISR 0x%08x_%08x\n", 520 in_be32(priv->reg_deu + TALITOS_EUISR), 521 in_be32(priv->reg_deu + TALITOS_EUISR_LO)); 522 break; 523 case DESC_HDR_SEL0_MDEUA: 524 case DESC_HDR_SEL0_MDEUB: 525 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 526 in_be32(priv->reg_mdeu + TALITOS_EUISR), 527 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 528 break; 529 case DESC_HDR_SEL0_RNG: 530 dev_err(dev, "RNGUISR 0x%08x_%08x\n", 531 in_be32(priv->reg_rngu + TALITOS_ISR), 532 in_be32(priv->reg_rngu + TALITOS_ISR_LO)); 533 break; 534 case DESC_HDR_SEL0_PKEU: 535 dev_err(dev, "PKEUISR 0x%08x_%08x\n", 536 in_be32(priv->reg_pkeu + TALITOS_EUISR), 537 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 538 break; 539 case DESC_HDR_SEL0_AESU: 540 dev_err(dev, "AESUISR 0x%08x_%08x\n", 541 in_be32(priv->reg_aesu + TALITOS_EUISR), 542 in_be32(priv->reg_aesu + TALITOS_EUISR_LO)); 543 break; 544 case DESC_HDR_SEL0_CRCU: 545 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 546 in_be32(priv->reg_crcu + TALITOS_EUISR), 547 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 548 break; 549 case DESC_HDR_SEL0_KEU: 550 dev_err(dev, "KEUISR 0x%08x_%08x\n", 551 in_be32(priv->reg_pkeu + TALITOS_EUISR), 552 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 553 break; 554 } 555 556 switch (desc_hdr & DESC_HDR_SEL1_MASK) { 557 case DESC_HDR_SEL1_MDEUA: 558 case DESC_HDR_SEL1_MDEUB: 559 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 560 in_be32(priv->reg_mdeu + TALITOS_EUISR), 561 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 562 break; 563 case DESC_HDR_SEL1_CRCU: 564 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 565 in_be32(priv->reg_crcu + TALITOS_EUISR), 566 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 567 break; 568 } 569 570 for (i = 0; i < 8; i++) 571 dev_err(dev, "DESCBUF 0x%08x_%08x\n", 572 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i), 573 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i)); 574 } 575 576 /* 577 * recover from error interrupts 578 */ 579 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) 580 { 581 struct talitos_private *priv = dev_get_drvdata(dev); 582 unsigned int timeout = TALITOS_TIMEOUT; 583 int ch, error, reset_dev = 0; 584 u32 v_lo; 585 bool is_sec1 = has_ftr_sec1(priv); 586 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ 587 588 for (ch = 0; ch < priv->num_channels; ch++) { 589 /* skip channels without errors */ 590 if (is_sec1) { 591 /* bits 29, 31, 17, 19 */ 592 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6)))) 593 continue; 594 } else { 595 if (!(isr & (1 << (ch * 2 + 1)))) 596 continue; 597 } 598 599 error = -EINVAL; 600 601 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); 602 603 if (v_lo & TALITOS_CCPSR_LO_DOF) { 604 dev_err(dev, "double fetch fifo overflow error\n"); 605 error = -EAGAIN; 606 reset_ch = 1; 607 } 608 if (v_lo & TALITOS_CCPSR_LO_SOF) { 609 /* h/w dropped descriptor */ 610 dev_err(dev, "single fetch fifo overflow error\n"); 611 error = -EAGAIN; 612 } 613 if (v_lo & TALITOS_CCPSR_LO_MDTE) 614 dev_err(dev, "master data transfer error\n"); 615 if (v_lo & TALITOS_CCPSR_LO_SGDLZ) 616 dev_err(dev, is_sec1 ? "pointer not complete error\n" 617 : "s/g data length zero error\n"); 618 if (v_lo & TALITOS_CCPSR_LO_FPZ) 619 dev_err(dev, is_sec1 ? "parity error\n" 620 : "fetch pointer zero error\n"); 621 if (v_lo & TALITOS_CCPSR_LO_IDH) 622 dev_err(dev, "illegal descriptor header error\n"); 623 if (v_lo & TALITOS_CCPSR_LO_IEU) 624 dev_err(dev, is_sec1 ? "static assignment error\n" 625 : "invalid exec unit error\n"); 626 if (v_lo & TALITOS_CCPSR_LO_EU) 627 report_eu_error(dev, ch, current_desc_hdr(dev, ch)); 628 if (!is_sec1) { 629 if (v_lo & TALITOS_CCPSR_LO_GB) 630 dev_err(dev, "gather boundary error\n"); 631 if (v_lo & TALITOS_CCPSR_LO_GRL) 632 dev_err(dev, "gather return/length error\n"); 633 if (v_lo & TALITOS_CCPSR_LO_SB) 634 dev_err(dev, "scatter boundary error\n"); 635 if (v_lo & TALITOS_CCPSR_LO_SRL) 636 dev_err(dev, "scatter return/length error\n"); 637 } 638 639 flush_channel(dev, ch, error, reset_ch); 640 641 if (reset_ch) { 642 reset_channel(dev, ch); 643 } else { 644 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 645 TALITOS2_CCCR_CONT); 646 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); 647 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 648 TALITOS2_CCCR_CONT) && --timeout) 649 cpu_relax(); 650 if (timeout == 0) { 651 dev_err(dev, "failed to restart channel %d\n", 652 ch); 653 reset_dev = 1; 654 } 655 } 656 } 657 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) || 658 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) { 659 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR)) 660 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n", 661 isr, isr_lo); 662 else 663 dev_err(dev, "done overflow, internal time out, or " 664 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo); 665 666 /* purge request queues */ 667 for (ch = 0; ch < priv->num_channels; ch++) 668 flush_channel(dev, ch, -EIO, 1); 669 670 /* reset and reinitialize the device */ 671 init_device(dev); 672 } 673 } 674 675 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 676 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \ 677 { \ 678 struct device *dev = data; \ 679 struct talitos_private *priv = dev_get_drvdata(dev); \ 680 u32 isr, isr_lo; \ 681 unsigned long flags; \ 682 \ 683 spin_lock_irqsave(&priv->reg_lock, flags); \ 684 isr = in_be32(priv->reg + TALITOS_ISR); \ 685 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 686 /* Acknowledge interrupt */ \ 687 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 688 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 689 \ 690 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \ 691 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 692 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 693 } \ 694 else { \ 695 if (likely(isr & ch_done_mask)) { \ 696 /* mask further done interrupts. */ \ 697 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 698 /* done_task will unmask done interrupts at exit */ \ 699 tasklet_schedule(&priv->done_task[tlet]); \ 700 } \ 701 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 702 } \ 703 \ 704 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 705 IRQ_NONE; \ 706 } 707 708 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0) 709 710 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 711 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \ 712 { \ 713 struct device *dev = data; \ 714 struct talitos_private *priv = dev_get_drvdata(dev); \ 715 u32 isr, isr_lo; \ 716 unsigned long flags; \ 717 \ 718 spin_lock_irqsave(&priv->reg_lock, flags); \ 719 isr = in_be32(priv->reg + TALITOS_ISR); \ 720 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 721 /* Acknowledge interrupt */ \ 722 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 723 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 724 \ 725 if (unlikely(isr & ch_err_mask || isr_lo)) { \ 726 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 727 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 728 } \ 729 else { \ 730 if (likely(isr & ch_done_mask)) { \ 731 /* mask further done interrupts. */ \ 732 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 733 /* done_task will unmask done interrupts at exit */ \ 734 tasklet_schedule(&priv->done_task[tlet]); \ 735 } \ 736 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 737 } \ 738 \ 739 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 740 IRQ_NONE; \ 741 } 742 743 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0) 744 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR, 745 0) 746 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR, 747 1) 748 749 /* 750 * hwrng 751 */ 752 static int talitos_rng_data_present(struct hwrng *rng, int wait) 753 { 754 struct device *dev = (struct device *)rng->priv; 755 struct talitos_private *priv = dev_get_drvdata(dev); 756 u32 ofl; 757 int i; 758 759 for (i = 0; i < 20; i++) { 760 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) & 761 TALITOS_RNGUSR_LO_OFL; 762 if (ofl || !wait) 763 break; 764 udelay(10); 765 } 766 767 return !!ofl; 768 } 769 770 static int talitos_rng_data_read(struct hwrng *rng, u32 *data) 771 { 772 struct device *dev = (struct device *)rng->priv; 773 struct talitos_private *priv = dev_get_drvdata(dev); 774 775 /* rng fifo requires 64-bit accesses */ 776 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO); 777 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO); 778 779 return sizeof(u32); 780 } 781 782 static int talitos_rng_init(struct hwrng *rng) 783 { 784 struct device *dev = (struct device *)rng->priv; 785 struct talitos_private *priv = dev_get_drvdata(dev); 786 unsigned int timeout = TALITOS_TIMEOUT; 787 788 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR); 789 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO) 790 & TALITOS_RNGUSR_LO_RD) 791 && --timeout) 792 cpu_relax(); 793 if (timeout == 0) { 794 dev_err(dev, "failed to reset rng hw\n"); 795 return -ENODEV; 796 } 797 798 /* start generating */ 799 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0); 800 801 return 0; 802 } 803 804 static int talitos_register_rng(struct device *dev) 805 { 806 struct talitos_private *priv = dev_get_drvdata(dev); 807 int err; 808 809 priv->rng.name = dev_driver_string(dev), 810 priv->rng.init = talitos_rng_init, 811 priv->rng.data_present = talitos_rng_data_present, 812 priv->rng.data_read = talitos_rng_data_read, 813 priv->rng.priv = (unsigned long)dev; 814 815 err = hwrng_register(&priv->rng); 816 if (!err) 817 priv->rng_registered = true; 818 819 return err; 820 } 821 822 static void talitos_unregister_rng(struct device *dev) 823 { 824 struct talitos_private *priv = dev_get_drvdata(dev); 825 826 if (!priv->rng_registered) 827 return; 828 829 hwrng_unregister(&priv->rng); 830 priv->rng_registered = false; 831 } 832 833 /* 834 * crypto alg 835 */ 836 #define TALITOS_CRA_PRIORITY 3000 837 /* 838 * Defines a priority for doing AEAD with descriptors type 839 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP 840 */ 841 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1) 842 #ifdef CONFIG_CRYPTO_DEV_TALITOS2 843 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE) 844 #else 845 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA256_BLOCK_SIZE) 846 #endif 847 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 848 849 struct talitos_ctx { 850 struct device *dev; 851 int ch; 852 __be32 desc_hdr_template; 853 u8 key[TALITOS_MAX_KEY_SIZE]; 854 u8 iv[TALITOS_MAX_IV_LENGTH]; 855 dma_addr_t dma_key; 856 unsigned int keylen; 857 unsigned int enckeylen; 858 unsigned int authkeylen; 859 }; 860 861 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE 862 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 863 864 struct talitos_ahash_req_ctx { 865 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 866 unsigned int hw_context_size; 867 u8 buf[2][HASH_MAX_BLOCK_SIZE]; 868 int buf_idx; 869 unsigned int swinit; 870 unsigned int first; 871 unsigned int last; 872 unsigned int to_hash_later; 873 unsigned int nbuf; 874 struct scatterlist bufsl[2]; 875 struct scatterlist *psrc; 876 }; 877 878 struct talitos_export_state { 879 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 880 u8 buf[HASH_MAX_BLOCK_SIZE]; 881 unsigned int swinit; 882 unsigned int first; 883 unsigned int last; 884 unsigned int to_hash_later; 885 unsigned int nbuf; 886 }; 887 888 static int aead_setkey(struct crypto_aead *authenc, 889 const u8 *key, unsigned int keylen) 890 { 891 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 892 struct device *dev = ctx->dev; 893 struct crypto_authenc_keys keys; 894 895 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 896 goto badkey; 897 898 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 899 goto badkey; 900 901 if (ctx->keylen) 902 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 903 904 memcpy(ctx->key, keys.authkey, keys.authkeylen); 905 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); 906 907 ctx->keylen = keys.authkeylen + keys.enckeylen; 908 ctx->enckeylen = keys.enckeylen; 909 ctx->authkeylen = keys.authkeylen; 910 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen, 911 DMA_TO_DEVICE); 912 913 memzero_explicit(&keys, sizeof(keys)); 914 return 0; 915 916 badkey: 917 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 918 memzero_explicit(&keys, sizeof(keys)); 919 return -EINVAL; 920 } 921 922 static int aead_des3_setkey(struct crypto_aead *authenc, 923 const u8 *key, unsigned int keylen) 924 { 925 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 926 struct device *dev = ctx->dev; 927 struct crypto_authenc_keys keys; 928 int err; 929 930 err = crypto_authenc_extractkeys(&keys, key, keylen); 931 if (unlikely(err)) 932 goto badkey; 933 934 err = -EINVAL; 935 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 936 goto badkey; 937 938 err = verify_aead_des3_key(authenc, keys.enckey, keys.enckeylen); 939 if (err) 940 goto out; 941 942 if (ctx->keylen) 943 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 944 945 memcpy(ctx->key, keys.authkey, keys.authkeylen); 946 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); 947 948 ctx->keylen = keys.authkeylen + keys.enckeylen; 949 ctx->enckeylen = keys.enckeylen; 950 ctx->authkeylen = keys.authkeylen; 951 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen, 952 DMA_TO_DEVICE); 953 954 out: 955 memzero_explicit(&keys, sizeof(keys)); 956 return err; 957 958 badkey: 959 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 960 goto out; 961 } 962 963 static void talitos_sg_unmap(struct device *dev, 964 struct talitos_edesc *edesc, 965 struct scatterlist *src, 966 struct scatterlist *dst, 967 unsigned int len, unsigned int offset) 968 { 969 struct talitos_private *priv = dev_get_drvdata(dev); 970 bool is_sec1 = has_ftr_sec1(priv); 971 unsigned int src_nents = edesc->src_nents ? : 1; 972 unsigned int dst_nents = edesc->dst_nents ? : 1; 973 974 if (is_sec1 && dst && dst_nents > 1) { 975 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset, 976 len, DMA_FROM_DEVICE); 977 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len, 978 offset); 979 } 980 if (src != dst) { 981 if (src_nents == 1 || !is_sec1) 982 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 983 984 if (dst && (dst_nents == 1 || !is_sec1)) 985 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 986 } else if (src_nents == 1 || !is_sec1) { 987 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 988 } 989 } 990 991 static void ipsec_esp_unmap(struct device *dev, 992 struct talitos_edesc *edesc, 993 struct aead_request *areq, bool encrypt) 994 { 995 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 996 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 997 unsigned int ivsize = crypto_aead_ivsize(aead); 998 unsigned int authsize = crypto_aead_authsize(aead); 999 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); 1000 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP; 1001 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3]; 1002 1003 if (is_ipsec_esp) 1004 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], 1005 DMA_FROM_DEVICE); 1006 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE); 1007 1008 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, 1009 cryptlen + authsize, areq->assoclen); 1010 1011 if (edesc->dma_len) 1012 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1013 DMA_BIDIRECTIONAL); 1014 1015 if (!is_ipsec_esp) { 1016 unsigned int dst_nents = edesc->dst_nents ? : 1; 1017 1018 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize, 1019 areq->assoclen + cryptlen - ivsize); 1020 } 1021 } 1022 1023 /* 1024 * ipsec_esp descriptor callbacks 1025 */ 1026 static void ipsec_esp_encrypt_done(struct device *dev, 1027 struct talitos_desc *desc, void *context, 1028 int err) 1029 { 1030 struct aead_request *areq = context; 1031 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1032 unsigned int ivsize = crypto_aead_ivsize(authenc); 1033 struct talitos_edesc *edesc; 1034 1035 edesc = container_of(desc, struct talitos_edesc, desc); 1036 1037 ipsec_esp_unmap(dev, edesc, areq, true); 1038 1039 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); 1040 1041 kfree(edesc); 1042 1043 aead_request_complete(areq, err); 1044 } 1045 1046 static void ipsec_esp_decrypt_swauth_done(struct device *dev, 1047 struct talitos_desc *desc, 1048 void *context, int err) 1049 { 1050 struct aead_request *req = context; 1051 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1052 unsigned int authsize = crypto_aead_authsize(authenc); 1053 struct talitos_edesc *edesc; 1054 char *oicv, *icv; 1055 1056 edesc = container_of(desc, struct talitos_edesc, desc); 1057 1058 ipsec_esp_unmap(dev, edesc, req, false); 1059 1060 if (!err) { 1061 /* auth check */ 1062 oicv = edesc->buf + edesc->dma_len; 1063 icv = oicv - authsize; 1064 1065 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; 1066 } 1067 1068 kfree(edesc); 1069 1070 aead_request_complete(req, err); 1071 } 1072 1073 static void ipsec_esp_decrypt_hwauth_done(struct device *dev, 1074 struct talitos_desc *desc, 1075 void *context, int err) 1076 { 1077 struct aead_request *req = context; 1078 struct talitos_edesc *edesc; 1079 1080 edesc = container_of(desc, struct talitos_edesc, desc); 1081 1082 ipsec_esp_unmap(dev, edesc, req, false); 1083 1084 /* check ICV auth status */ 1085 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != 1086 DESC_HDR_LO_ICCR1_PASS)) 1087 err = -EBADMSG; 1088 1089 kfree(edesc); 1090 1091 aead_request_complete(req, err); 1092 } 1093 1094 /* 1095 * convert scatterlist to SEC h/w link table format 1096 * stop at cryptlen bytes 1097 */ 1098 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, 1099 unsigned int offset, int datalen, int elen, 1100 struct talitos_ptr *link_tbl_ptr) 1101 { 1102 int n_sg = elen ? sg_count + 1 : sg_count; 1103 int count = 0; 1104 int cryptlen = datalen + elen; 1105 1106 while (cryptlen && sg && n_sg--) { 1107 unsigned int len = sg_dma_len(sg); 1108 1109 if (offset >= len) { 1110 offset -= len; 1111 goto next; 1112 } 1113 1114 len -= offset; 1115 1116 if (len > cryptlen) 1117 len = cryptlen; 1118 1119 if (datalen > 0 && len > datalen) { 1120 to_talitos_ptr(link_tbl_ptr + count, 1121 sg_dma_address(sg) + offset, datalen, 0); 1122 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); 1123 count++; 1124 len -= datalen; 1125 offset += datalen; 1126 } 1127 to_talitos_ptr(link_tbl_ptr + count, 1128 sg_dma_address(sg) + offset, len, 0); 1129 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0); 1130 count++; 1131 cryptlen -= len; 1132 datalen -= len; 1133 offset = 0; 1134 1135 next: 1136 sg = sg_next(sg); 1137 } 1138 1139 /* tag end of link table */ 1140 if (count > 0) 1141 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1, 1142 DESC_PTR_LNKTBL_RET, 0); 1143 1144 return count; 1145 } 1146 1147 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src, 1148 unsigned int len, struct talitos_edesc *edesc, 1149 struct talitos_ptr *ptr, int sg_count, 1150 unsigned int offset, int tbl_off, int elen, 1151 bool force) 1152 { 1153 struct talitos_private *priv = dev_get_drvdata(dev); 1154 bool is_sec1 = has_ftr_sec1(priv); 1155 1156 if (!src) { 1157 to_talitos_ptr(ptr, 0, 0, is_sec1); 1158 return 1; 1159 } 1160 to_talitos_ptr_ext_set(ptr, elen, is_sec1); 1161 if (sg_count == 1 && !force) { 1162 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1); 1163 return sg_count; 1164 } 1165 if (is_sec1) { 1166 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1); 1167 return sg_count; 1168 } 1169 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen, 1170 &edesc->link_tbl[tbl_off]); 1171 if (sg_count == 1 && !force) { 1172 /* Only one segment now, so no link tbl needed*/ 1173 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1); 1174 return sg_count; 1175 } 1176 to_talitos_ptr(ptr, edesc->dma_link_tbl + 1177 tbl_off * sizeof(struct talitos_ptr), len, is_sec1); 1178 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1); 1179 1180 return sg_count; 1181 } 1182 1183 static int talitos_sg_map(struct device *dev, struct scatterlist *src, 1184 unsigned int len, struct talitos_edesc *edesc, 1185 struct talitos_ptr *ptr, int sg_count, 1186 unsigned int offset, int tbl_off) 1187 { 1188 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset, 1189 tbl_off, 0, false); 1190 } 1191 1192 /* 1193 * fill in and submit ipsec_esp descriptor 1194 */ 1195 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, 1196 bool encrypt, 1197 void (*callback)(struct device *dev, 1198 struct talitos_desc *desc, 1199 void *context, int error)) 1200 { 1201 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 1202 unsigned int authsize = crypto_aead_authsize(aead); 1203 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1204 struct device *dev = ctx->dev; 1205 struct talitos_desc *desc = &edesc->desc; 1206 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); 1207 unsigned int ivsize = crypto_aead_ivsize(aead); 1208 int tbl_off = 0; 1209 int sg_count, ret; 1210 int elen = 0; 1211 bool sync_needed = false; 1212 struct talitos_private *priv = dev_get_drvdata(dev); 1213 bool is_sec1 = has_ftr_sec1(priv); 1214 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP; 1215 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3]; 1216 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2]; 1217 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize; 1218 1219 /* hmac key */ 1220 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1); 1221 1222 sg_count = edesc->src_nents ?: 1; 1223 if (is_sec1 && sg_count > 1) 1224 sg_copy_to_buffer(areq->src, sg_count, edesc->buf, 1225 areq->assoclen + cryptlen); 1226 else 1227 sg_count = dma_map_sg(dev, areq->src, sg_count, 1228 (areq->src == areq->dst) ? 1229 DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1230 1231 /* hmac data */ 1232 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc, 1233 &desc->ptr[1], sg_count, 0, tbl_off); 1234 1235 if (ret > 1) { 1236 tbl_off += ret; 1237 sync_needed = true; 1238 } 1239 1240 /* cipher iv */ 1241 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1); 1242 1243 /* cipher key */ 1244 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen, 1245 ctx->enckeylen, is_sec1); 1246 1247 /* 1248 * cipher in 1249 * map and adjust cipher len to aead request cryptlen. 1250 * extent is bytes of HMAC postpended to ciphertext, 1251 * typically 12 for ipsec 1252 */ 1253 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV)) 1254 elen = authsize; 1255 1256 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4], 1257 sg_count, areq->assoclen, tbl_off, elen, 1258 false); 1259 1260 if (ret > 1) { 1261 tbl_off += ret; 1262 sync_needed = true; 1263 } 1264 1265 /* cipher out */ 1266 if (areq->src != areq->dst) { 1267 sg_count = edesc->dst_nents ? : 1; 1268 if (!is_sec1 || sg_count == 1) 1269 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); 1270 } 1271 1272 if (is_ipsec_esp && encrypt) 1273 elen = authsize; 1274 else 1275 elen = 0; 1276 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5], 1277 sg_count, areq->assoclen, tbl_off, elen, 1278 is_ipsec_esp && !encrypt); 1279 tbl_off += ret; 1280 1281 if (!encrypt && is_ipsec_esp) { 1282 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1283 1284 /* Add an entry to the link table for ICV data */ 1285 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1); 1286 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1); 1287 1288 /* icv data follows link tables */ 1289 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1); 1290 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1); 1291 sync_needed = true; 1292 } else if (!encrypt) { 1293 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1); 1294 sync_needed = true; 1295 } else if (!is_ipsec_esp) { 1296 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6], 1297 sg_count, areq->assoclen + cryptlen, tbl_off); 1298 } 1299 1300 /* iv out */ 1301 if (is_ipsec_esp) 1302 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1303 DMA_FROM_DEVICE); 1304 1305 if (sync_needed) 1306 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1307 edesc->dma_len, 1308 DMA_BIDIRECTIONAL); 1309 1310 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1311 if (ret != -EINPROGRESS) { 1312 ipsec_esp_unmap(dev, edesc, areq, encrypt); 1313 kfree(edesc); 1314 } 1315 return ret; 1316 } 1317 1318 /* 1319 * allocate and map the extended descriptor 1320 */ 1321 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, 1322 struct scatterlist *src, 1323 struct scatterlist *dst, 1324 u8 *iv, 1325 unsigned int assoclen, 1326 unsigned int cryptlen, 1327 unsigned int authsize, 1328 unsigned int ivsize, 1329 int icv_stashing, 1330 u32 cryptoflags, 1331 bool encrypt) 1332 { 1333 struct talitos_edesc *edesc; 1334 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len; 1335 dma_addr_t iv_dma = 0; 1336 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1337 GFP_ATOMIC; 1338 struct talitos_private *priv = dev_get_drvdata(dev); 1339 bool is_sec1 = has_ftr_sec1(priv); 1340 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1341 1342 if (cryptlen + authsize > max_len) { 1343 dev_err(dev, "length exceeds h/w max limit\n"); 1344 return ERR_PTR(-EINVAL); 1345 } 1346 1347 if (!dst || dst == src) { 1348 src_len = assoclen + cryptlen + authsize; 1349 src_nents = sg_nents_for_len(src, src_len); 1350 if (src_nents < 0) { 1351 dev_err(dev, "Invalid number of src SG.\n"); 1352 return ERR_PTR(-EINVAL); 1353 } 1354 src_nents = (src_nents == 1) ? 0 : src_nents; 1355 dst_nents = dst ? src_nents : 0; 1356 dst_len = 0; 1357 } else { /* dst && dst != src*/ 1358 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize); 1359 src_nents = sg_nents_for_len(src, src_len); 1360 if (src_nents < 0) { 1361 dev_err(dev, "Invalid number of src SG.\n"); 1362 return ERR_PTR(-EINVAL); 1363 } 1364 src_nents = (src_nents == 1) ? 0 : src_nents; 1365 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0); 1366 dst_nents = sg_nents_for_len(dst, dst_len); 1367 if (dst_nents < 0) { 1368 dev_err(dev, "Invalid number of dst SG.\n"); 1369 return ERR_PTR(-EINVAL); 1370 } 1371 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1372 } 1373 1374 /* 1375 * allocate space for base edesc plus the link tables, 1376 * allowing for two separate entries for AD and generated ICV (+ 2), 1377 * and space for two sets of ICVs (stashed and generated) 1378 */ 1379 alloc_len = sizeof(struct talitos_edesc); 1380 if (src_nents || dst_nents || !encrypt) { 1381 if (is_sec1) 1382 dma_len = (src_nents ? src_len : 0) + 1383 (dst_nents ? dst_len : 0) + authsize; 1384 else 1385 dma_len = (src_nents + dst_nents + 2) * 1386 sizeof(struct talitos_ptr) + authsize; 1387 alloc_len += dma_len; 1388 } else { 1389 dma_len = 0; 1390 } 1391 alloc_len += icv_stashing ? authsize : 0; 1392 1393 /* if its a ahash, add space for a second desc next to the first one */ 1394 if (is_sec1 && !dst) 1395 alloc_len += sizeof(struct talitos_desc); 1396 alloc_len += ivsize; 1397 1398 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1399 if (!edesc) 1400 return ERR_PTR(-ENOMEM); 1401 if (ivsize) { 1402 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize); 1403 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1404 } 1405 memset(&edesc->desc, 0, sizeof(edesc->desc)); 1406 1407 edesc->src_nents = src_nents; 1408 edesc->dst_nents = dst_nents; 1409 edesc->iv_dma = iv_dma; 1410 edesc->dma_len = dma_len; 1411 if (dma_len) 1412 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], 1413 edesc->dma_len, 1414 DMA_BIDIRECTIONAL); 1415 1416 return edesc; 1417 } 1418 1419 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1420 int icv_stashing, bool encrypt) 1421 { 1422 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1423 unsigned int authsize = crypto_aead_authsize(authenc); 1424 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1425 unsigned int ivsize = crypto_aead_ivsize(authenc); 1426 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize); 1427 1428 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1429 iv, areq->assoclen, cryptlen, 1430 authsize, ivsize, icv_stashing, 1431 areq->base.flags, encrypt); 1432 } 1433 1434 static int aead_encrypt(struct aead_request *req) 1435 { 1436 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1437 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1438 struct talitos_edesc *edesc; 1439 1440 /* allocate extended descriptor */ 1441 edesc = aead_edesc_alloc(req, req->iv, 0, true); 1442 if (IS_ERR(edesc)) 1443 return PTR_ERR(edesc); 1444 1445 /* set encrypt */ 1446 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1447 1448 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done); 1449 } 1450 1451 static int aead_decrypt(struct aead_request *req) 1452 { 1453 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1454 unsigned int authsize = crypto_aead_authsize(authenc); 1455 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1456 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1457 struct talitos_edesc *edesc; 1458 void *icvdata; 1459 1460 /* allocate extended descriptor */ 1461 edesc = aead_edesc_alloc(req, req->iv, 1, false); 1462 if (IS_ERR(edesc)) 1463 return PTR_ERR(edesc); 1464 1465 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) && 1466 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1467 ((!edesc->src_nents && !edesc->dst_nents) || 1468 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { 1469 1470 /* decrypt and check the ICV */ 1471 edesc->desc.hdr = ctx->desc_hdr_template | 1472 DESC_HDR_DIR_INBOUND | 1473 DESC_HDR_MODE1_MDEU_CICV; 1474 1475 /* reset integrity check result bits */ 1476 1477 return ipsec_esp(edesc, req, false, 1478 ipsec_esp_decrypt_hwauth_done); 1479 } 1480 1481 /* Have to check the ICV with software */ 1482 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1483 1484 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1485 icvdata = edesc->buf + edesc->dma_len; 1486 1487 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize, 1488 req->assoclen + req->cryptlen - authsize); 1489 1490 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done); 1491 } 1492 1493 static int skcipher_setkey(struct crypto_skcipher *cipher, 1494 const u8 *key, unsigned int keylen) 1495 { 1496 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher); 1497 struct device *dev = ctx->dev; 1498 1499 if (ctx->keylen) 1500 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 1501 1502 memcpy(&ctx->key, key, keylen); 1503 ctx->keylen = keylen; 1504 1505 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE); 1506 1507 return 0; 1508 } 1509 1510 static int skcipher_des_setkey(struct crypto_skcipher *cipher, 1511 const u8 *key, unsigned int keylen) 1512 { 1513 return verify_skcipher_des_key(cipher, key) ?: 1514 skcipher_setkey(cipher, key, keylen); 1515 } 1516 1517 static int skcipher_des3_setkey(struct crypto_skcipher *cipher, 1518 const u8 *key, unsigned int keylen) 1519 { 1520 return verify_skcipher_des3_key(cipher, key) ?: 1521 skcipher_setkey(cipher, key, keylen); 1522 } 1523 1524 static int skcipher_aes_setkey(struct crypto_skcipher *cipher, 1525 const u8 *key, unsigned int keylen) 1526 { 1527 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 || 1528 keylen == AES_KEYSIZE_256) 1529 return skcipher_setkey(cipher, key, keylen); 1530 1531 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 1532 1533 return -EINVAL; 1534 } 1535 1536 static void common_nonsnoop_unmap(struct device *dev, 1537 struct talitos_edesc *edesc, 1538 struct skcipher_request *areq) 1539 { 1540 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1541 1542 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen, 0); 1543 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); 1544 1545 if (edesc->dma_len) 1546 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1547 DMA_BIDIRECTIONAL); 1548 } 1549 1550 static void skcipher_done(struct device *dev, 1551 struct talitos_desc *desc, void *context, 1552 int err) 1553 { 1554 struct skcipher_request *areq = context; 1555 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); 1556 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher); 1557 unsigned int ivsize = crypto_skcipher_ivsize(cipher); 1558 struct talitos_edesc *edesc; 1559 1560 edesc = container_of(desc, struct talitos_edesc, desc); 1561 1562 common_nonsnoop_unmap(dev, edesc, areq); 1563 memcpy(areq->iv, ctx->iv, ivsize); 1564 1565 kfree(edesc); 1566 1567 areq->base.complete(&areq->base, err); 1568 } 1569 1570 static int common_nonsnoop(struct talitos_edesc *edesc, 1571 struct skcipher_request *areq, 1572 void (*callback) (struct device *dev, 1573 struct talitos_desc *desc, 1574 void *context, int error)) 1575 { 1576 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); 1577 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher); 1578 struct device *dev = ctx->dev; 1579 struct talitos_desc *desc = &edesc->desc; 1580 unsigned int cryptlen = areq->cryptlen; 1581 unsigned int ivsize = crypto_skcipher_ivsize(cipher); 1582 int sg_count, ret; 1583 bool sync_needed = false; 1584 struct talitos_private *priv = dev_get_drvdata(dev); 1585 bool is_sec1 = has_ftr_sec1(priv); 1586 1587 /* first DWORD empty */ 1588 1589 /* cipher iv */ 1590 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1); 1591 1592 /* cipher key */ 1593 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1); 1594 1595 sg_count = edesc->src_nents ?: 1; 1596 if (is_sec1 && sg_count > 1) 1597 sg_copy_to_buffer(areq->src, sg_count, edesc->buf, 1598 cryptlen); 1599 else 1600 sg_count = dma_map_sg(dev, areq->src, sg_count, 1601 (areq->src == areq->dst) ? 1602 DMA_BIDIRECTIONAL : DMA_TO_DEVICE); 1603 /* 1604 * cipher in 1605 */ 1606 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc, 1607 &desc->ptr[3], sg_count, 0, 0); 1608 if (sg_count > 1) 1609 sync_needed = true; 1610 1611 /* cipher out */ 1612 if (areq->src != areq->dst) { 1613 sg_count = edesc->dst_nents ? : 1; 1614 if (!is_sec1 || sg_count == 1) 1615 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE); 1616 } 1617 1618 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4], 1619 sg_count, 0, (edesc->src_nents + 1)); 1620 if (ret > 1) 1621 sync_needed = true; 1622 1623 /* iv out */ 1624 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 1625 DMA_FROM_DEVICE); 1626 1627 /* last DWORD empty */ 1628 1629 if (sync_needed) 1630 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1631 edesc->dma_len, DMA_BIDIRECTIONAL); 1632 1633 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1634 if (ret != -EINPROGRESS) { 1635 common_nonsnoop_unmap(dev, edesc, areq); 1636 kfree(edesc); 1637 } 1638 return ret; 1639 } 1640 1641 static struct talitos_edesc *skcipher_edesc_alloc(struct skcipher_request * 1642 areq, bool encrypt) 1643 { 1644 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); 1645 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher); 1646 unsigned int ivsize = crypto_skcipher_ivsize(cipher); 1647 1648 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1649 areq->iv, 0, areq->cryptlen, 0, ivsize, 0, 1650 areq->base.flags, encrypt); 1651 } 1652 1653 static int skcipher_encrypt(struct skcipher_request *areq) 1654 { 1655 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); 1656 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher); 1657 struct talitos_edesc *edesc; 1658 unsigned int blocksize = 1659 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher)); 1660 1661 if (!areq->cryptlen) 1662 return 0; 1663 1664 if (areq->cryptlen % blocksize) 1665 return -EINVAL; 1666 1667 /* allocate extended descriptor */ 1668 edesc = skcipher_edesc_alloc(areq, true); 1669 if (IS_ERR(edesc)) 1670 return PTR_ERR(edesc); 1671 1672 /* set encrypt */ 1673 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1674 1675 return common_nonsnoop(edesc, areq, skcipher_done); 1676 } 1677 1678 static int skcipher_decrypt(struct skcipher_request *areq) 1679 { 1680 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); 1681 struct talitos_ctx *ctx = crypto_skcipher_ctx(cipher); 1682 struct talitos_edesc *edesc; 1683 unsigned int blocksize = 1684 crypto_tfm_alg_blocksize(crypto_skcipher_tfm(cipher)); 1685 1686 if (!areq->cryptlen) 1687 return 0; 1688 1689 if (areq->cryptlen % blocksize) 1690 return -EINVAL; 1691 1692 /* allocate extended descriptor */ 1693 edesc = skcipher_edesc_alloc(areq, false); 1694 if (IS_ERR(edesc)) 1695 return PTR_ERR(edesc); 1696 1697 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1698 1699 return common_nonsnoop(edesc, areq, skcipher_done); 1700 } 1701 1702 static void common_nonsnoop_hash_unmap(struct device *dev, 1703 struct talitos_edesc *edesc, 1704 struct ahash_request *areq) 1705 { 1706 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1707 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1708 struct talitos_private *priv = dev_get_drvdata(dev); 1709 bool is_sec1 = has_ftr_sec1(priv); 1710 struct talitos_desc *desc = &edesc->desc; 1711 struct talitos_desc *desc2 = (struct talitos_desc *) 1712 (edesc->buf + edesc->dma_len); 1713 1714 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1715 if (desc->next_desc && 1716 desc->ptr[5].ptr != desc2->ptr[5].ptr) 1717 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE); 1718 if (req_ctx->last) 1719 memcpy(areq->result, req_ctx->hw_context, 1720 crypto_ahash_digestsize(tfm)); 1721 1722 if (req_ctx->psrc) 1723 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0); 1724 1725 /* When using hashctx-in, must unmap it. */ 1726 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) 1727 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], 1728 DMA_TO_DEVICE); 1729 else if (desc->next_desc) 1730 unmap_single_talitos_ptr(dev, &desc2->ptr[1], 1731 DMA_TO_DEVICE); 1732 1733 if (is_sec1 && req_ctx->nbuf) 1734 unmap_single_talitos_ptr(dev, &desc->ptr[3], 1735 DMA_TO_DEVICE); 1736 1737 if (edesc->dma_len) 1738 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1739 DMA_BIDIRECTIONAL); 1740 1741 if (edesc->desc.next_desc) 1742 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc), 1743 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL); 1744 } 1745 1746 static void ahash_done(struct device *dev, 1747 struct talitos_desc *desc, void *context, 1748 int err) 1749 { 1750 struct ahash_request *areq = context; 1751 struct talitos_edesc *edesc = 1752 container_of(desc, struct talitos_edesc, desc); 1753 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1754 1755 if (!req_ctx->last && req_ctx->to_hash_later) { 1756 /* Position any partial block for next update/final/finup */ 1757 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1; 1758 req_ctx->nbuf = req_ctx->to_hash_later; 1759 } 1760 common_nonsnoop_hash_unmap(dev, edesc, areq); 1761 1762 kfree(edesc); 1763 1764 areq->base.complete(&areq->base, err); 1765 } 1766 1767 /* 1768 * SEC1 doesn't like hashing of 0 sized message, so we do the padding 1769 * ourself and submit a padded block 1770 */ 1771 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx, 1772 struct talitos_edesc *edesc, 1773 struct talitos_ptr *ptr) 1774 { 1775 static u8 padded_hash[64] = { 1776 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1777 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1778 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1779 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1780 }; 1781 1782 pr_err_once("Bug in SEC1, padding ourself\n"); 1783 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD; 1784 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash), 1785 (char *)padded_hash, DMA_TO_DEVICE); 1786 } 1787 1788 static int common_nonsnoop_hash(struct talitos_edesc *edesc, 1789 struct ahash_request *areq, unsigned int length, 1790 void (*callback) (struct device *dev, 1791 struct talitos_desc *desc, 1792 void *context, int error)) 1793 { 1794 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1795 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1796 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1797 struct device *dev = ctx->dev; 1798 struct talitos_desc *desc = &edesc->desc; 1799 int ret; 1800 bool sync_needed = false; 1801 struct talitos_private *priv = dev_get_drvdata(dev); 1802 bool is_sec1 = has_ftr_sec1(priv); 1803 int sg_count; 1804 1805 /* first DWORD empty */ 1806 1807 /* hash context in */ 1808 if (!req_ctx->first || req_ctx->swinit) { 1809 map_single_talitos_ptr_nosync(dev, &desc->ptr[1], 1810 req_ctx->hw_context_size, 1811 req_ctx->hw_context, 1812 DMA_TO_DEVICE); 1813 req_ctx->swinit = 0; 1814 } 1815 /* Indicate next op is not the first. */ 1816 req_ctx->first = 0; 1817 1818 /* HMAC key */ 1819 if (ctx->keylen) 1820 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, 1821 is_sec1); 1822 1823 if (is_sec1 && req_ctx->nbuf) 1824 length -= req_ctx->nbuf; 1825 1826 sg_count = edesc->src_nents ?: 1; 1827 if (is_sec1 && sg_count > 1) 1828 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length); 1829 else if (length) 1830 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, 1831 DMA_TO_DEVICE); 1832 /* 1833 * data in 1834 */ 1835 if (is_sec1 && req_ctx->nbuf) { 1836 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf, 1837 req_ctx->buf[req_ctx->buf_idx], 1838 DMA_TO_DEVICE); 1839 } else { 1840 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1841 &desc->ptr[3], sg_count, 0, 0); 1842 if (sg_count > 1) 1843 sync_needed = true; 1844 } 1845 1846 /* fifth DWORD empty */ 1847 1848 /* hash/HMAC out -or- hash context out */ 1849 if (req_ctx->last) 1850 map_single_talitos_ptr(dev, &desc->ptr[5], 1851 crypto_ahash_digestsize(tfm), 1852 req_ctx->hw_context, DMA_FROM_DEVICE); 1853 else 1854 map_single_talitos_ptr_nosync(dev, &desc->ptr[5], 1855 req_ctx->hw_context_size, 1856 req_ctx->hw_context, 1857 DMA_FROM_DEVICE); 1858 1859 /* last DWORD empty */ 1860 1861 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) 1862 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); 1863 1864 if (is_sec1 && req_ctx->nbuf && length) { 1865 struct talitos_desc *desc2 = (struct talitos_desc *) 1866 (edesc->buf + edesc->dma_len); 1867 dma_addr_t next_desc; 1868 1869 memset(desc2, 0, sizeof(*desc2)); 1870 desc2->hdr = desc->hdr; 1871 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT; 1872 desc2->hdr1 = desc2->hdr; 1873 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD; 1874 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT; 1875 desc->hdr &= ~DESC_HDR_DONE_NOTIFY; 1876 1877 if (desc->ptr[1].ptr) 1878 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1], 1879 is_sec1); 1880 else 1881 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1], 1882 req_ctx->hw_context_size, 1883 req_ctx->hw_context, 1884 DMA_TO_DEVICE); 1885 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1); 1886 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc, 1887 &desc2->ptr[3], sg_count, 0, 0); 1888 if (sg_count > 1) 1889 sync_needed = true; 1890 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1); 1891 if (req_ctx->last) 1892 map_single_talitos_ptr_nosync(dev, &desc->ptr[5], 1893 req_ctx->hw_context_size, 1894 req_ctx->hw_context, 1895 DMA_FROM_DEVICE); 1896 1897 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE, 1898 DMA_BIDIRECTIONAL); 1899 desc->next_desc = cpu_to_be32(next_desc); 1900 } 1901 1902 if (sync_needed) 1903 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1904 edesc->dma_len, DMA_BIDIRECTIONAL); 1905 1906 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1907 if (ret != -EINPROGRESS) { 1908 common_nonsnoop_hash_unmap(dev, edesc, areq); 1909 kfree(edesc); 1910 } 1911 return ret; 1912 } 1913 1914 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, 1915 unsigned int nbytes) 1916 { 1917 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1918 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1919 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1920 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1921 bool is_sec1 = has_ftr_sec1(priv); 1922 1923 if (is_sec1) 1924 nbytes -= req_ctx->nbuf; 1925 1926 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0, 1927 nbytes, 0, 0, 0, areq->base.flags, false); 1928 } 1929 1930 static int ahash_init(struct ahash_request *areq) 1931 { 1932 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1933 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1934 struct device *dev = ctx->dev; 1935 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1936 unsigned int size; 1937 dma_addr_t dma; 1938 1939 /* Initialize the context */ 1940 req_ctx->buf_idx = 0; 1941 req_ctx->nbuf = 0; 1942 req_ctx->first = 1; /* first indicates h/w must init its context */ 1943 req_ctx->swinit = 0; /* assume h/w init of context */ 1944 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 1945 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 1946 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 1947 req_ctx->hw_context_size = size; 1948 1949 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size, 1950 DMA_TO_DEVICE); 1951 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE); 1952 1953 return 0; 1954 } 1955 1956 /* 1957 * on h/w without explicit sha224 support, we initialize h/w context 1958 * manually with sha224 constants, and tell it to run sha256. 1959 */ 1960 static int ahash_init_sha224_swinit(struct ahash_request *areq) 1961 { 1962 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1963 1964 req_ctx->hw_context[0] = SHA224_H0; 1965 req_ctx->hw_context[1] = SHA224_H1; 1966 req_ctx->hw_context[2] = SHA224_H2; 1967 req_ctx->hw_context[3] = SHA224_H3; 1968 req_ctx->hw_context[4] = SHA224_H4; 1969 req_ctx->hw_context[5] = SHA224_H5; 1970 req_ctx->hw_context[6] = SHA224_H6; 1971 req_ctx->hw_context[7] = SHA224_H7; 1972 1973 /* init 64-bit count */ 1974 req_ctx->hw_context[8] = 0; 1975 req_ctx->hw_context[9] = 0; 1976 1977 ahash_init(areq); 1978 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ 1979 1980 return 0; 1981 } 1982 1983 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) 1984 { 1985 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1986 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1987 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1988 struct talitos_edesc *edesc; 1989 unsigned int blocksize = 1990 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1991 unsigned int nbytes_to_hash; 1992 unsigned int to_hash_later; 1993 unsigned int nsg; 1994 int nents; 1995 struct device *dev = ctx->dev; 1996 struct talitos_private *priv = dev_get_drvdata(dev); 1997 bool is_sec1 = has_ftr_sec1(priv); 1998 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx]; 1999 2000 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { 2001 /* Buffer up to one whole block */ 2002 nents = sg_nents_for_len(areq->src, nbytes); 2003 if (nents < 0) { 2004 dev_err(ctx->dev, "Invalid number of src SG.\n"); 2005 return nents; 2006 } 2007 sg_copy_to_buffer(areq->src, nents, 2008 ctx_buf + req_ctx->nbuf, nbytes); 2009 req_ctx->nbuf += nbytes; 2010 return 0; 2011 } 2012 2013 /* At least (blocksize + 1) bytes are available to hash */ 2014 nbytes_to_hash = nbytes + req_ctx->nbuf; 2015 to_hash_later = nbytes_to_hash & (blocksize - 1); 2016 2017 if (req_ctx->last) 2018 to_hash_later = 0; 2019 else if (to_hash_later) 2020 /* There is a partial block. Hash the full block(s) now */ 2021 nbytes_to_hash -= to_hash_later; 2022 else { 2023 /* Keep one block buffered */ 2024 nbytes_to_hash -= blocksize; 2025 to_hash_later = blocksize; 2026 } 2027 2028 /* Chain in any previously buffered data */ 2029 if (!is_sec1 && req_ctx->nbuf) { 2030 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; 2031 sg_init_table(req_ctx->bufsl, nsg); 2032 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf); 2033 if (nsg > 1) 2034 sg_chain(req_ctx->bufsl, 2, areq->src); 2035 req_ctx->psrc = req_ctx->bufsl; 2036 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) { 2037 int offset; 2038 2039 if (nbytes_to_hash > blocksize) 2040 offset = blocksize - req_ctx->nbuf; 2041 else 2042 offset = nbytes_to_hash - req_ctx->nbuf; 2043 nents = sg_nents_for_len(areq->src, offset); 2044 if (nents < 0) { 2045 dev_err(ctx->dev, "Invalid number of src SG.\n"); 2046 return nents; 2047 } 2048 sg_copy_to_buffer(areq->src, nents, 2049 ctx_buf + req_ctx->nbuf, offset); 2050 req_ctx->nbuf += offset; 2051 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src, 2052 offset); 2053 } else 2054 req_ctx->psrc = areq->src; 2055 2056 if (to_hash_later) { 2057 nents = sg_nents_for_len(areq->src, nbytes); 2058 if (nents < 0) { 2059 dev_err(ctx->dev, "Invalid number of src SG.\n"); 2060 return nents; 2061 } 2062 sg_pcopy_to_buffer(areq->src, nents, 2063 req_ctx->buf[(req_ctx->buf_idx + 1) & 1], 2064 to_hash_later, 2065 nbytes - to_hash_later); 2066 } 2067 req_ctx->to_hash_later = to_hash_later; 2068 2069 /* Allocate extended descriptor */ 2070 edesc = ahash_edesc_alloc(areq, nbytes_to_hash); 2071 if (IS_ERR(edesc)) 2072 return PTR_ERR(edesc); 2073 2074 edesc->desc.hdr = ctx->desc_hdr_template; 2075 2076 /* On last one, request SEC to pad; otherwise continue */ 2077 if (req_ctx->last) 2078 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; 2079 else 2080 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; 2081 2082 /* request SEC to INIT hash. */ 2083 if (req_ctx->first && !req_ctx->swinit) 2084 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; 2085 2086 /* When the tfm context has a keylen, it's an HMAC. 2087 * A first or last (ie. not middle) descriptor must request HMAC. 2088 */ 2089 if (ctx->keylen && (req_ctx->first || req_ctx->last)) 2090 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; 2091 2092 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done); 2093 } 2094 2095 static int ahash_update(struct ahash_request *areq) 2096 { 2097 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2098 2099 req_ctx->last = 0; 2100 2101 return ahash_process_req(areq, areq->nbytes); 2102 } 2103 2104 static int ahash_final(struct ahash_request *areq) 2105 { 2106 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2107 2108 req_ctx->last = 1; 2109 2110 return ahash_process_req(areq, 0); 2111 } 2112 2113 static int ahash_finup(struct ahash_request *areq) 2114 { 2115 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2116 2117 req_ctx->last = 1; 2118 2119 return ahash_process_req(areq, areq->nbytes); 2120 } 2121 2122 static int ahash_digest(struct ahash_request *areq) 2123 { 2124 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2125 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 2126 2127 ahash->init(areq); 2128 req_ctx->last = 1; 2129 2130 return ahash_process_req(areq, areq->nbytes); 2131 } 2132 2133 static int ahash_export(struct ahash_request *areq, void *out) 2134 { 2135 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2136 struct talitos_export_state *export = out; 2137 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2138 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 2139 struct device *dev = ctx->dev; 2140 dma_addr_t dma; 2141 2142 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size, 2143 DMA_FROM_DEVICE); 2144 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE); 2145 2146 memcpy(export->hw_context, req_ctx->hw_context, 2147 req_ctx->hw_context_size); 2148 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf); 2149 export->swinit = req_ctx->swinit; 2150 export->first = req_ctx->first; 2151 export->last = req_ctx->last; 2152 export->to_hash_later = req_ctx->to_hash_later; 2153 export->nbuf = req_ctx->nbuf; 2154 2155 return 0; 2156 } 2157 2158 static int ahash_import(struct ahash_request *areq, const void *in) 2159 { 2160 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2161 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2162 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 2163 struct device *dev = ctx->dev; 2164 const struct talitos_export_state *export = in; 2165 unsigned int size; 2166 dma_addr_t dma; 2167 2168 memset(req_ctx, 0, sizeof(*req_ctx)); 2169 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 2170 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 2171 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 2172 req_ctx->hw_context_size = size; 2173 memcpy(req_ctx->hw_context, export->hw_context, size); 2174 memcpy(req_ctx->buf[0], export->buf, export->nbuf); 2175 req_ctx->swinit = export->swinit; 2176 req_ctx->first = export->first; 2177 req_ctx->last = export->last; 2178 req_ctx->to_hash_later = export->to_hash_later; 2179 req_ctx->nbuf = export->nbuf; 2180 2181 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size, 2182 DMA_TO_DEVICE); 2183 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE); 2184 2185 return 0; 2186 } 2187 2188 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, 2189 u8 *hash) 2190 { 2191 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2192 2193 struct scatterlist sg[1]; 2194 struct ahash_request *req; 2195 struct crypto_wait wait; 2196 int ret; 2197 2198 crypto_init_wait(&wait); 2199 2200 req = ahash_request_alloc(tfm, GFP_KERNEL); 2201 if (!req) 2202 return -ENOMEM; 2203 2204 /* Keep tfm keylen == 0 during hash of the long key */ 2205 ctx->keylen = 0; 2206 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2207 crypto_req_done, &wait); 2208 2209 sg_init_one(&sg[0], key, keylen); 2210 2211 ahash_request_set_crypt(req, sg, hash, keylen); 2212 ret = crypto_wait_req(crypto_ahash_digest(req), &wait); 2213 2214 ahash_request_free(req); 2215 2216 return ret; 2217 } 2218 2219 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 2220 unsigned int keylen) 2221 { 2222 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2223 struct device *dev = ctx->dev; 2224 unsigned int blocksize = 2225 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2226 unsigned int digestsize = crypto_ahash_digestsize(tfm); 2227 unsigned int keysize = keylen; 2228 u8 hash[SHA512_DIGEST_SIZE]; 2229 int ret; 2230 2231 if (keylen <= blocksize) 2232 memcpy(ctx->key, key, keysize); 2233 else { 2234 /* Must get the hash of the long key */ 2235 ret = keyhash(tfm, key, keylen, hash); 2236 2237 if (ret) { 2238 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 2239 return -EINVAL; 2240 } 2241 2242 keysize = digestsize; 2243 memcpy(ctx->key, hash, digestsize); 2244 } 2245 2246 if (ctx->keylen) 2247 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 2248 2249 ctx->keylen = keysize; 2250 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE); 2251 2252 return 0; 2253 } 2254 2255 2256 struct talitos_alg_template { 2257 u32 type; 2258 u32 priority; 2259 union { 2260 struct skcipher_alg skcipher; 2261 struct ahash_alg hash; 2262 struct aead_alg aead; 2263 } alg; 2264 __be32 desc_hdr_template; 2265 }; 2266 2267 static struct talitos_alg_template driver_algs[] = { 2268 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ 2269 { .type = CRYPTO_ALG_TYPE_AEAD, 2270 .alg.aead = { 2271 .base = { 2272 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2273 .cra_driver_name = "authenc-hmac-sha1-" 2274 "cbc-aes-talitos", 2275 .cra_blocksize = AES_BLOCK_SIZE, 2276 .cra_flags = CRYPTO_ALG_ASYNC, 2277 }, 2278 .ivsize = AES_BLOCK_SIZE, 2279 .maxauthsize = SHA1_DIGEST_SIZE, 2280 }, 2281 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2282 DESC_HDR_SEL0_AESU | 2283 DESC_HDR_MODE0_AESU_CBC | 2284 DESC_HDR_SEL1_MDEUA | 2285 DESC_HDR_MODE1_MDEU_INIT | 2286 DESC_HDR_MODE1_MDEU_PAD | 2287 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2288 }, 2289 { .type = CRYPTO_ALG_TYPE_AEAD, 2290 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2291 .alg.aead = { 2292 .base = { 2293 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2294 .cra_driver_name = "authenc-hmac-sha1-" 2295 "cbc-aes-talitos-hsna", 2296 .cra_blocksize = AES_BLOCK_SIZE, 2297 .cra_flags = CRYPTO_ALG_ASYNC, 2298 }, 2299 .ivsize = AES_BLOCK_SIZE, 2300 .maxauthsize = SHA1_DIGEST_SIZE, 2301 }, 2302 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2303 DESC_HDR_SEL0_AESU | 2304 DESC_HDR_MODE0_AESU_CBC | 2305 DESC_HDR_SEL1_MDEUA | 2306 DESC_HDR_MODE1_MDEU_INIT | 2307 DESC_HDR_MODE1_MDEU_PAD | 2308 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2309 }, 2310 { .type = CRYPTO_ALG_TYPE_AEAD, 2311 .alg.aead = { 2312 .base = { 2313 .cra_name = "authenc(hmac(sha1)," 2314 "cbc(des3_ede))", 2315 .cra_driver_name = "authenc-hmac-sha1-" 2316 "cbc-3des-talitos", 2317 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2318 .cra_flags = CRYPTO_ALG_ASYNC, 2319 }, 2320 .ivsize = DES3_EDE_BLOCK_SIZE, 2321 .maxauthsize = SHA1_DIGEST_SIZE, 2322 .setkey = aead_des3_setkey, 2323 }, 2324 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2325 DESC_HDR_SEL0_DEU | 2326 DESC_HDR_MODE0_DEU_CBC | 2327 DESC_HDR_MODE0_DEU_3DES | 2328 DESC_HDR_SEL1_MDEUA | 2329 DESC_HDR_MODE1_MDEU_INIT | 2330 DESC_HDR_MODE1_MDEU_PAD | 2331 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2332 }, 2333 { .type = CRYPTO_ALG_TYPE_AEAD, 2334 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2335 .alg.aead = { 2336 .base = { 2337 .cra_name = "authenc(hmac(sha1)," 2338 "cbc(des3_ede))", 2339 .cra_driver_name = "authenc-hmac-sha1-" 2340 "cbc-3des-talitos-hsna", 2341 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2342 .cra_flags = CRYPTO_ALG_ASYNC, 2343 }, 2344 .ivsize = DES3_EDE_BLOCK_SIZE, 2345 .maxauthsize = SHA1_DIGEST_SIZE, 2346 .setkey = aead_des3_setkey, 2347 }, 2348 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2349 DESC_HDR_SEL0_DEU | 2350 DESC_HDR_MODE0_DEU_CBC | 2351 DESC_HDR_MODE0_DEU_3DES | 2352 DESC_HDR_SEL1_MDEUA | 2353 DESC_HDR_MODE1_MDEU_INIT | 2354 DESC_HDR_MODE1_MDEU_PAD | 2355 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2356 }, 2357 { .type = CRYPTO_ALG_TYPE_AEAD, 2358 .alg.aead = { 2359 .base = { 2360 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2361 .cra_driver_name = "authenc-hmac-sha224-" 2362 "cbc-aes-talitos", 2363 .cra_blocksize = AES_BLOCK_SIZE, 2364 .cra_flags = CRYPTO_ALG_ASYNC, 2365 }, 2366 .ivsize = AES_BLOCK_SIZE, 2367 .maxauthsize = SHA224_DIGEST_SIZE, 2368 }, 2369 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2370 DESC_HDR_SEL0_AESU | 2371 DESC_HDR_MODE0_AESU_CBC | 2372 DESC_HDR_SEL1_MDEUA | 2373 DESC_HDR_MODE1_MDEU_INIT | 2374 DESC_HDR_MODE1_MDEU_PAD | 2375 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2376 }, 2377 { .type = CRYPTO_ALG_TYPE_AEAD, 2378 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2379 .alg.aead = { 2380 .base = { 2381 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2382 .cra_driver_name = "authenc-hmac-sha224-" 2383 "cbc-aes-talitos-hsna", 2384 .cra_blocksize = AES_BLOCK_SIZE, 2385 .cra_flags = CRYPTO_ALG_ASYNC, 2386 }, 2387 .ivsize = AES_BLOCK_SIZE, 2388 .maxauthsize = SHA224_DIGEST_SIZE, 2389 }, 2390 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2391 DESC_HDR_SEL0_AESU | 2392 DESC_HDR_MODE0_AESU_CBC | 2393 DESC_HDR_SEL1_MDEUA | 2394 DESC_HDR_MODE1_MDEU_INIT | 2395 DESC_HDR_MODE1_MDEU_PAD | 2396 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2397 }, 2398 { .type = CRYPTO_ALG_TYPE_AEAD, 2399 .alg.aead = { 2400 .base = { 2401 .cra_name = "authenc(hmac(sha224)," 2402 "cbc(des3_ede))", 2403 .cra_driver_name = "authenc-hmac-sha224-" 2404 "cbc-3des-talitos", 2405 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2406 .cra_flags = CRYPTO_ALG_ASYNC, 2407 }, 2408 .ivsize = DES3_EDE_BLOCK_SIZE, 2409 .maxauthsize = SHA224_DIGEST_SIZE, 2410 .setkey = aead_des3_setkey, 2411 }, 2412 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2413 DESC_HDR_SEL0_DEU | 2414 DESC_HDR_MODE0_DEU_CBC | 2415 DESC_HDR_MODE0_DEU_3DES | 2416 DESC_HDR_SEL1_MDEUA | 2417 DESC_HDR_MODE1_MDEU_INIT | 2418 DESC_HDR_MODE1_MDEU_PAD | 2419 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2420 }, 2421 { .type = CRYPTO_ALG_TYPE_AEAD, 2422 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2423 .alg.aead = { 2424 .base = { 2425 .cra_name = "authenc(hmac(sha224)," 2426 "cbc(des3_ede))", 2427 .cra_driver_name = "authenc-hmac-sha224-" 2428 "cbc-3des-talitos-hsna", 2429 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2430 .cra_flags = CRYPTO_ALG_ASYNC, 2431 }, 2432 .ivsize = DES3_EDE_BLOCK_SIZE, 2433 .maxauthsize = SHA224_DIGEST_SIZE, 2434 .setkey = aead_des3_setkey, 2435 }, 2436 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2437 DESC_HDR_SEL0_DEU | 2438 DESC_HDR_MODE0_DEU_CBC | 2439 DESC_HDR_MODE0_DEU_3DES | 2440 DESC_HDR_SEL1_MDEUA | 2441 DESC_HDR_MODE1_MDEU_INIT | 2442 DESC_HDR_MODE1_MDEU_PAD | 2443 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2444 }, 2445 { .type = CRYPTO_ALG_TYPE_AEAD, 2446 .alg.aead = { 2447 .base = { 2448 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2449 .cra_driver_name = "authenc-hmac-sha256-" 2450 "cbc-aes-talitos", 2451 .cra_blocksize = AES_BLOCK_SIZE, 2452 .cra_flags = CRYPTO_ALG_ASYNC, 2453 }, 2454 .ivsize = AES_BLOCK_SIZE, 2455 .maxauthsize = SHA256_DIGEST_SIZE, 2456 }, 2457 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2458 DESC_HDR_SEL0_AESU | 2459 DESC_HDR_MODE0_AESU_CBC | 2460 DESC_HDR_SEL1_MDEUA | 2461 DESC_HDR_MODE1_MDEU_INIT | 2462 DESC_HDR_MODE1_MDEU_PAD | 2463 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2464 }, 2465 { .type = CRYPTO_ALG_TYPE_AEAD, 2466 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2467 .alg.aead = { 2468 .base = { 2469 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2470 .cra_driver_name = "authenc-hmac-sha256-" 2471 "cbc-aes-talitos-hsna", 2472 .cra_blocksize = AES_BLOCK_SIZE, 2473 .cra_flags = CRYPTO_ALG_ASYNC, 2474 }, 2475 .ivsize = AES_BLOCK_SIZE, 2476 .maxauthsize = SHA256_DIGEST_SIZE, 2477 }, 2478 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2479 DESC_HDR_SEL0_AESU | 2480 DESC_HDR_MODE0_AESU_CBC | 2481 DESC_HDR_SEL1_MDEUA | 2482 DESC_HDR_MODE1_MDEU_INIT | 2483 DESC_HDR_MODE1_MDEU_PAD | 2484 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2485 }, 2486 { .type = CRYPTO_ALG_TYPE_AEAD, 2487 .alg.aead = { 2488 .base = { 2489 .cra_name = "authenc(hmac(sha256)," 2490 "cbc(des3_ede))", 2491 .cra_driver_name = "authenc-hmac-sha256-" 2492 "cbc-3des-talitos", 2493 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2494 .cra_flags = CRYPTO_ALG_ASYNC, 2495 }, 2496 .ivsize = DES3_EDE_BLOCK_SIZE, 2497 .maxauthsize = SHA256_DIGEST_SIZE, 2498 .setkey = aead_des3_setkey, 2499 }, 2500 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2501 DESC_HDR_SEL0_DEU | 2502 DESC_HDR_MODE0_DEU_CBC | 2503 DESC_HDR_MODE0_DEU_3DES | 2504 DESC_HDR_SEL1_MDEUA | 2505 DESC_HDR_MODE1_MDEU_INIT | 2506 DESC_HDR_MODE1_MDEU_PAD | 2507 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2508 }, 2509 { .type = CRYPTO_ALG_TYPE_AEAD, 2510 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2511 .alg.aead = { 2512 .base = { 2513 .cra_name = "authenc(hmac(sha256)," 2514 "cbc(des3_ede))", 2515 .cra_driver_name = "authenc-hmac-sha256-" 2516 "cbc-3des-talitos-hsna", 2517 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2518 .cra_flags = CRYPTO_ALG_ASYNC, 2519 }, 2520 .ivsize = DES3_EDE_BLOCK_SIZE, 2521 .maxauthsize = SHA256_DIGEST_SIZE, 2522 .setkey = aead_des3_setkey, 2523 }, 2524 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2525 DESC_HDR_SEL0_DEU | 2526 DESC_HDR_MODE0_DEU_CBC | 2527 DESC_HDR_MODE0_DEU_3DES | 2528 DESC_HDR_SEL1_MDEUA | 2529 DESC_HDR_MODE1_MDEU_INIT | 2530 DESC_HDR_MODE1_MDEU_PAD | 2531 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2532 }, 2533 { .type = CRYPTO_ALG_TYPE_AEAD, 2534 .alg.aead = { 2535 .base = { 2536 .cra_name = "authenc(hmac(sha384),cbc(aes))", 2537 .cra_driver_name = "authenc-hmac-sha384-" 2538 "cbc-aes-talitos", 2539 .cra_blocksize = AES_BLOCK_SIZE, 2540 .cra_flags = CRYPTO_ALG_ASYNC, 2541 }, 2542 .ivsize = AES_BLOCK_SIZE, 2543 .maxauthsize = SHA384_DIGEST_SIZE, 2544 }, 2545 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2546 DESC_HDR_SEL0_AESU | 2547 DESC_HDR_MODE0_AESU_CBC | 2548 DESC_HDR_SEL1_MDEUB | 2549 DESC_HDR_MODE1_MDEU_INIT | 2550 DESC_HDR_MODE1_MDEU_PAD | 2551 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2552 }, 2553 { .type = CRYPTO_ALG_TYPE_AEAD, 2554 .alg.aead = { 2555 .base = { 2556 .cra_name = "authenc(hmac(sha384)," 2557 "cbc(des3_ede))", 2558 .cra_driver_name = "authenc-hmac-sha384-" 2559 "cbc-3des-talitos", 2560 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2561 .cra_flags = CRYPTO_ALG_ASYNC, 2562 }, 2563 .ivsize = DES3_EDE_BLOCK_SIZE, 2564 .maxauthsize = SHA384_DIGEST_SIZE, 2565 .setkey = aead_des3_setkey, 2566 }, 2567 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2568 DESC_HDR_SEL0_DEU | 2569 DESC_HDR_MODE0_DEU_CBC | 2570 DESC_HDR_MODE0_DEU_3DES | 2571 DESC_HDR_SEL1_MDEUB | 2572 DESC_HDR_MODE1_MDEU_INIT | 2573 DESC_HDR_MODE1_MDEU_PAD | 2574 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2575 }, 2576 { .type = CRYPTO_ALG_TYPE_AEAD, 2577 .alg.aead = { 2578 .base = { 2579 .cra_name = "authenc(hmac(sha512),cbc(aes))", 2580 .cra_driver_name = "authenc-hmac-sha512-" 2581 "cbc-aes-talitos", 2582 .cra_blocksize = AES_BLOCK_SIZE, 2583 .cra_flags = CRYPTO_ALG_ASYNC, 2584 }, 2585 .ivsize = AES_BLOCK_SIZE, 2586 .maxauthsize = SHA512_DIGEST_SIZE, 2587 }, 2588 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2589 DESC_HDR_SEL0_AESU | 2590 DESC_HDR_MODE0_AESU_CBC | 2591 DESC_HDR_SEL1_MDEUB | 2592 DESC_HDR_MODE1_MDEU_INIT | 2593 DESC_HDR_MODE1_MDEU_PAD | 2594 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2595 }, 2596 { .type = CRYPTO_ALG_TYPE_AEAD, 2597 .alg.aead = { 2598 .base = { 2599 .cra_name = "authenc(hmac(sha512)," 2600 "cbc(des3_ede))", 2601 .cra_driver_name = "authenc-hmac-sha512-" 2602 "cbc-3des-talitos", 2603 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2604 .cra_flags = CRYPTO_ALG_ASYNC, 2605 }, 2606 .ivsize = DES3_EDE_BLOCK_SIZE, 2607 .maxauthsize = SHA512_DIGEST_SIZE, 2608 .setkey = aead_des3_setkey, 2609 }, 2610 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2611 DESC_HDR_SEL0_DEU | 2612 DESC_HDR_MODE0_DEU_CBC | 2613 DESC_HDR_MODE0_DEU_3DES | 2614 DESC_HDR_SEL1_MDEUB | 2615 DESC_HDR_MODE1_MDEU_INIT | 2616 DESC_HDR_MODE1_MDEU_PAD | 2617 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2618 }, 2619 { .type = CRYPTO_ALG_TYPE_AEAD, 2620 .alg.aead = { 2621 .base = { 2622 .cra_name = "authenc(hmac(md5),cbc(aes))", 2623 .cra_driver_name = "authenc-hmac-md5-" 2624 "cbc-aes-talitos", 2625 .cra_blocksize = AES_BLOCK_SIZE, 2626 .cra_flags = CRYPTO_ALG_ASYNC, 2627 }, 2628 .ivsize = AES_BLOCK_SIZE, 2629 .maxauthsize = MD5_DIGEST_SIZE, 2630 }, 2631 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2632 DESC_HDR_SEL0_AESU | 2633 DESC_HDR_MODE0_AESU_CBC | 2634 DESC_HDR_SEL1_MDEUA | 2635 DESC_HDR_MODE1_MDEU_INIT | 2636 DESC_HDR_MODE1_MDEU_PAD | 2637 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2638 }, 2639 { .type = CRYPTO_ALG_TYPE_AEAD, 2640 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2641 .alg.aead = { 2642 .base = { 2643 .cra_name = "authenc(hmac(md5),cbc(aes))", 2644 .cra_driver_name = "authenc-hmac-md5-" 2645 "cbc-aes-talitos-hsna", 2646 .cra_blocksize = AES_BLOCK_SIZE, 2647 .cra_flags = CRYPTO_ALG_ASYNC, 2648 }, 2649 .ivsize = AES_BLOCK_SIZE, 2650 .maxauthsize = MD5_DIGEST_SIZE, 2651 }, 2652 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2653 DESC_HDR_SEL0_AESU | 2654 DESC_HDR_MODE0_AESU_CBC | 2655 DESC_HDR_SEL1_MDEUA | 2656 DESC_HDR_MODE1_MDEU_INIT | 2657 DESC_HDR_MODE1_MDEU_PAD | 2658 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2659 }, 2660 { .type = CRYPTO_ALG_TYPE_AEAD, 2661 .alg.aead = { 2662 .base = { 2663 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2664 .cra_driver_name = "authenc-hmac-md5-" 2665 "cbc-3des-talitos", 2666 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2667 .cra_flags = CRYPTO_ALG_ASYNC, 2668 }, 2669 .ivsize = DES3_EDE_BLOCK_SIZE, 2670 .maxauthsize = MD5_DIGEST_SIZE, 2671 .setkey = aead_des3_setkey, 2672 }, 2673 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2674 DESC_HDR_SEL0_DEU | 2675 DESC_HDR_MODE0_DEU_CBC | 2676 DESC_HDR_MODE0_DEU_3DES | 2677 DESC_HDR_SEL1_MDEUA | 2678 DESC_HDR_MODE1_MDEU_INIT | 2679 DESC_HDR_MODE1_MDEU_PAD | 2680 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2681 }, 2682 { .type = CRYPTO_ALG_TYPE_AEAD, 2683 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA, 2684 .alg.aead = { 2685 .base = { 2686 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2687 .cra_driver_name = "authenc-hmac-md5-" 2688 "cbc-3des-talitos-hsna", 2689 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2690 .cra_flags = CRYPTO_ALG_ASYNC, 2691 }, 2692 .ivsize = DES3_EDE_BLOCK_SIZE, 2693 .maxauthsize = MD5_DIGEST_SIZE, 2694 .setkey = aead_des3_setkey, 2695 }, 2696 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU | 2697 DESC_HDR_SEL0_DEU | 2698 DESC_HDR_MODE0_DEU_CBC | 2699 DESC_HDR_MODE0_DEU_3DES | 2700 DESC_HDR_SEL1_MDEUA | 2701 DESC_HDR_MODE1_MDEU_INIT | 2702 DESC_HDR_MODE1_MDEU_PAD | 2703 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2704 }, 2705 /* SKCIPHER algorithms. */ 2706 { .type = CRYPTO_ALG_TYPE_SKCIPHER, 2707 .alg.skcipher = { 2708 .base.cra_name = "ecb(aes)", 2709 .base.cra_driver_name = "ecb-aes-talitos", 2710 .base.cra_blocksize = AES_BLOCK_SIZE, 2711 .base.cra_flags = CRYPTO_ALG_ASYNC, 2712 .min_keysize = AES_MIN_KEY_SIZE, 2713 .max_keysize = AES_MAX_KEY_SIZE, 2714 .setkey = skcipher_aes_setkey, 2715 }, 2716 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2717 DESC_HDR_SEL0_AESU, 2718 }, 2719 { .type = CRYPTO_ALG_TYPE_SKCIPHER, 2720 .alg.skcipher = { 2721 .base.cra_name = "cbc(aes)", 2722 .base.cra_driver_name = "cbc-aes-talitos", 2723 .base.cra_blocksize = AES_BLOCK_SIZE, 2724 .base.cra_flags = CRYPTO_ALG_ASYNC, 2725 .min_keysize = AES_MIN_KEY_SIZE, 2726 .max_keysize = AES_MAX_KEY_SIZE, 2727 .ivsize = AES_BLOCK_SIZE, 2728 .setkey = skcipher_aes_setkey, 2729 }, 2730 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2731 DESC_HDR_SEL0_AESU | 2732 DESC_HDR_MODE0_AESU_CBC, 2733 }, 2734 { .type = CRYPTO_ALG_TYPE_SKCIPHER, 2735 .alg.skcipher = { 2736 .base.cra_name = "ctr(aes)", 2737 .base.cra_driver_name = "ctr-aes-talitos", 2738 .base.cra_blocksize = 1, 2739 .base.cra_flags = CRYPTO_ALG_ASYNC, 2740 .min_keysize = AES_MIN_KEY_SIZE, 2741 .max_keysize = AES_MAX_KEY_SIZE, 2742 .ivsize = AES_BLOCK_SIZE, 2743 .setkey = skcipher_aes_setkey, 2744 }, 2745 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP | 2746 DESC_HDR_SEL0_AESU | 2747 DESC_HDR_MODE0_AESU_CTR, 2748 }, 2749 { .type = CRYPTO_ALG_TYPE_SKCIPHER, 2750 .alg.skcipher = { 2751 .base.cra_name = "ecb(des)", 2752 .base.cra_driver_name = "ecb-des-talitos", 2753 .base.cra_blocksize = DES_BLOCK_SIZE, 2754 .base.cra_flags = CRYPTO_ALG_ASYNC, 2755 .min_keysize = DES_KEY_SIZE, 2756 .max_keysize = DES_KEY_SIZE, 2757 .setkey = skcipher_des_setkey, 2758 }, 2759 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2760 DESC_HDR_SEL0_DEU, 2761 }, 2762 { .type = CRYPTO_ALG_TYPE_SKCIPHER, 2763 .alg.skcipher = { 2764 .base.cra_name = "cbc(des)", 2765 .base.cra_driver_name = "cbc-des-talitos", 2766 .base.cra_blocksize = DES_BLOCK_SIZE, 2767 .base.cra_flags = CRYPTO_ALG_ASYNC, 2768 .min_keysize = DES_KEY_SIZE, 2769 .max_keysize = DES_KEY_SIZE, 2770 .ivsize = DES_BLOCK_SIZE, 2771 .setkey = skcipher_des_setkey, 2772 }, 2773 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2774 DESC_HDR_SEL0_DEU | 2775 DESC_HDR_MODE0_DEU_CBC, 2776 }, 2777 { .type = CRYPTO_ALG_TYPE_SKCIPHER, 2778 .alg.skcipher = { 2779 .base.cra_name = "ecb(des3_ede)", 2780 .base.cra_driver_name = "ecb-3des-talitos", 2781 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 2782 .base.cra_flags = CRYPTO_ALG_ASYNC, 2783 .min_keysize = DES3_EDE_KEY_SIZE, 2784 .max_keysize = DES3_EDE_KEY_SIZE, 2785 .setkey = skcipher_des3_setkey, 2786 }, 2787 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2788 DESC_HDR_SEL0_DEU | 2789 DESC_HDR_MODE0_DEU_3DES, 2790 }, 2791 { .type = CRYPTO_ALG_TYPE_SKCIPHER, 2792 .alg.skcipher = { 2793 .base.cra_name = "cbc(des3_ede)", 2794 .base.cra_driver_name = "cbc-3des-talitos", 2795 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE, 2796 .base.cra_flags = CRYPTO_ALG_ASYNC, 2797 .min_keysize = DES3_EDE_KEY_SIZE, 2798 .max_keysize = DES3_EDE_KEY_SIZE, 2799 .ivsize = DES3_EDE_BLOCK_SIZE, 2800 .setkey = skcipher_des3_setkey, 2801 }, 2802 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2803 DESC_HDR_SEL0_DEU | 2804 DESC_HDR_MODE0_DEU_CBC | 2805 DESC_HDR_MODE0_DEU_3DES, 2806 }, 2807 /* AHASH algorithms. */ 2808 { .type = CRYPTO_ALG_TYPE_AHASH, 2809 .alg.hash = { 2810 .halg.digestsize = MD5_DIGEST_SIZE, 2811 .halg.statesize = sizeof(struct talitos_export_state), 2812 .halg.base = { 2813 .cra_name = "md5", 2814 .cra_driver_name = "md5-talitos", 2815 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2816 .cra_flags = CRYPTO_ALG_ASYNC, 2817 } 2818 }, 2819 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2820 DESC_HDR_SEL0_MDEUA | 2821 DESC_HDR_MODE0_MDEU_MD5, 2822 }, 2823 { .type = CRYPTO_ALG_TYPE_AHASH, 2824 .alg.hash = { 2825 .halg.digestsize = SHA1_DIGEST_SIZE, 2826 .halg.statesize = sizeof(struct talitos_export_state), 2827 .halg.base = { 2828 .cra_name = "sha1", 2829 .cra_driver_name = "sha1-talitos", 2830 .cra_blocksize = SHA1_BLOCK_SIZE, 2831 .cra_flags = CRYPTO_ALG_ASYNC, 2832 } 2833 }, 2834 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2835 DESC_HDR_SEL0_MDEUA | 2836 DESC_HDR_MODE0_MDEU_SHA1, 2837 }, 2838 { .type = CRYPTO_ALG_TYPE_AHASH, 2839 .alg.hash = { 2840 .halg.digestsize = SHA224_DIGEST_SIZE, 2841 .halg.statesize = sizeof(struct talitos_export_state), 2842 .halg.base = { 2843 .cra_name = "sha224", 2844 .cra_driver_name = "sha224-talitos", 2845 .cra_blocksize = SHA224_BLOCK_SIZE, 2846 .cra_flags = CRYPTO_ALG_ASYNC, 2847 } 2848 }, 2849 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2850 DESC_HDR_SEL0_MDEUA | 2851 DESC_HDR_MODE0_MDEU_SHA224, 2852 }, 2853 { .type = CRYPTO_ALG_TYPE_AHASH, 2854 .alg.hash = { 2855 .halg.digestsize = SHA256_DIGEST_SIZE, 2856 .halg.statesize = sizeof(struct talitos_export_state), 2857 .halg.base = { 2858 .cra_name = "sha256", 2859 .cra_driver_name = "sha256-talitos", 2860 .cra_blocksize = SHA256_BLOCK_SIZE, 2861 .cra_flags = CRYPTO_ALG_ASYNC, 2862 } 2863 }, 2864 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2865 DESC_HDR_SEL0_MDEUA | 2866 DESC_HDR_MODE0_MDEU_SHA256, 2867 }, 2868 { .type = CRYPTO_ALG_TYPE_AHASH, 2869 .alg.hash = { 2870 .halg.digestsize = SHA384_DIGEST_SIZE, 2871 .halg.statesize = sizeof(struct talitos_export_state), 2872 .halg.base = { 2873 .cra_name = "sha384", 2874 .cra_driver_name = "sha384-talitos", 2875 .cra_blocksize = SHA384_BLOCK_SIZE, 2876 .cra_flags = CRYPTO_ALG_ASYNC, 2877 } 2878 }, 2879 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2880 DESC_HDR_SEL0_MDEUB | 2881 DESC_HDR_MODE0_MDEUB_SHA384, 2882 }, 2883 { .type = CRYPTO_ALG_TYPE_AHASH, 2884 .alg.hash = { 2885 .halg.digestsize = SHA512_DIGEST_SIZE, 2886 .halg.statesize = sizeof(struct talitos_export_state), 2887 .halg.base = { 2888 .cra_name = "sha512", 2889 .cra_driver_name = "sha512-talitos", 2890 .cra_blocksize = SHA512_BLOCK_SIZE, 2891 .cra_flags = CRYPTO_ALG_ASYNC, 2892 } 2893 }, 2894 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2895 DESC_HDR_SEL0_MDEUB | 2896 DESC_HDR_MODE0_MDEUB_SHA512, 2897 }, 2898 { .type = CRYPTO_ALG_TYPE_AHASH, 2899 .alg.hash = { 2900 .halg.digestsize = MD5_DIGEST_SIZE, 2901 .halg.statesize = sizeof(struct talitos_export_state), 2902 .halg.base = { 2903 .cra_name = "hmac(md5)", 2904 .cra_driver_name = "hmac-md5-talitos", 2905 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2906 .cra_flags = CRYPTO_ALG_ASYNC, 2907 } 2908 }, 2909 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2910 DESC_HDR_SEL0_MDEUA | 2911 DESC_HDR_MODE0_MDEU_MD5, 2912 }, 2913 { .type = CRYPTO_ALG_TYPE_AHASH, 2914 .alg.hash = { 2915 .halg.digestsize = SHA1_DIGEST_SIZE, 2916 .halg.statesize = sizeof(struct talitos_export_state), 2917 .halg.base = { 2918 .cra_name = "hmac(sha1)", 2919 .cra_driver_name = "hmac-sha1-talitos", 2920 .cra_blocksize = SHA1_BLOCK_SIZE, 2921 .cra_flags = CRYPTO_ALG_ASYNC, 2922 } 2923 }, 2924 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2925 DESC_HDR_SEL0_MDEUA | 2926 DESC_HDR_MODE0_MDEU_SHA1, 2927 }, 2928 { .type = CRYPTO_ALG_TYPE_AHASH, 2929 .alg.hash = { 2930 .halg.digestsize = SHA224_DIGEST_SIZE, 2931 .halg.statesize = sizeof(struct talitos_export_state), 2932 .halg.base = { 2933 .cra_name = "hmac(sha224)", 2934 .cra_driver_name = "hmac-sha224-talitos", 2935 .cra_blocksize = SHA224_BLOCK_SIZE, 2936 .cra_flags = CRYPTO_ALG_ASYNC, 2937 } 2938 }, 2939 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2940 DESC_HDR_SEL0_MDEUA | 2941 DESC_HDR_MODE0_MDEU_SHA224, 2942 }, 2943 { .type = CRYPTO_ALG_TYPE_AHASH, 2944 .alg.hash = { 2945 .halg.digestsize = SHA256_DIGEST_SIZE, 2946 .halg.statesize = sizeof(struct talitos_export_state), 2947 .halg.base = { 2948 .cra_name = "hmac(sha256)", 2949 .cra_driver_name = "hmac-sha256-talitos", 2950 .cra_blocksize = SHA256_BLOCK_SIZE, 2951 .cra_flags = CRYPTO_ALG_ASYNC, 2952 } 2953 }, 2954 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2955 DESC_HDR_SEL0_MDEUA | 2956 DESC_HDR_MODE0_MDEU_SHA256, 2957 }, 2958 { .type = CRYPTO_ALG_TYPE_AHASH, 2959 .alg.hash = { 2960 .halg.digestsize = SHA384_DIGEST_SIZE, 2961 .halg.statesize = sizeof(struct talitos_export_state), 2962 .halg.base = { 2963 .cra_name = "hmac(sha384)", 2964 .cra_driver_name = "hmac-sha384-talitos", 2965 .cra_blocksize = SHA384_BLOCK_SIZE, 2966 .cra_flags = CRYPTO_ALG_ASYNC, 2967 } 2968 }, 2969 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2970 DESC_HDR_SEL0_MDEUB | 2971 DESC_HDR_MODE0_MDEUB_SHA384, 2972 }, 2973 { .type = CRYPTO_ALG_TYPE_AHASH, 2974 .alg.hash = { 2975 .halg.digestsize = SHA512_DIGEST_SIZE, 2976 .halg.statesize = sizeof(struct talitos_export_state), 2977 .halg.base = { 2978 .cra_name = "hmac(sha512)", 2979 .cra_driver_name = "hmac-sha512-talitos", 2980 .cra_blocksize = SHA512_BLOCK_SIZE, 2981 .cra_flags = CRYPTO_ALG_ASYNC, 2982 } 2983 }, 2984 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2985 DESC_HDR_SEL0_MDEUB | 2986 DESC_HDR_MODE0_MDEUB_SHA512, 2987 } 2988 }; 2989 2990 struct talitos_crypto_alg { 2991 struct list_head entry; 2992 struct device *dev; 2993 struct talitos_alg_template algt; 2994 }; 2995 2996 static int talitos_init_common(struct talitos_ctx *ctx, 2997 struct talitos_crypto_alg *talitos_alg) 2998 { 2999 struct talitos_private *priv; 3000 3001 /* update context with ptr to dev */ 3002 ctx->dev = talitos_alg->dev; 3003 3004 /* assign SEC channel to tfm in round-robin fashion */ 3005 priv = dev_get_drvdata(ctx->dev); 3006 ctx->ch = atomic_inc_return(&priv->last_chan) & 3007 (priv->num_channels - 1); 3008 3009 /* copy descriptor header template value */ 3010 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; 3011 3012 /* select done notification */ 3013 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY; 3014 3015 return 0; 3016 } 3017 3018 static int talitos_cra_init_aead(struct crypto_aead *tfm) 3019 { 3020 struct aead_alg *alg = crypto_aead_alg(tfm); 3021 struct talitos_crypto_alg *talitos_alg; 3022 struct talitos_ctx *ctx = crypto_aead_ctx(tfm); 3023 3024 talitos_alg = container_of(alg, struct talitos_crypto_alg, 3025 algt.alg.aead); 3026 3027 return talitos_init_common(ctx, talitos_alg); 3028 } 3029 3030 static int talitos_cra_init_skcipher(struct crypto_skcipher *tfm) 3031 { 3032 struct skcipher_alg *alg = crypto_skcipher_alg(tfm); 3033 struct talitos_crypto_alg *talitos_alg; 3034 struct talitos_ctx *ctx = crypto_skcipher_ctx(tfm); 3035 3036 talitos_alg = container_of(alg, struct talitos_crypto_alg, 3037 algt.alg.skcipher); 3038 3039 return talitos_init_common(ctx, talitos_alg); 3040 } 3041 3042 static int talitos_cra_init_ahash(struct crypto_tfm *tfm) 3043 { 3044 struct crypto_alg *alg = tfm->__crt_alg; 3045 struct talitos_crypto_alg *talitos_alg; 3046 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 3047 3048 talitos_alg = container_of(__crypto_ahash_alg(alg), 3049 struct talitos_crypto_alg, 3050 algt.alg.hash); 3051 3052 ctx->keylen = 0; 3053 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 3054 sizeof(struct talitos_ahash_req_ctx)); 3055 3056 return talitos_init_common(ctx, talitos_alg); 3057 } 3058 3059 static void talitos_cra_exit(struct crypto_tfm *tfm) 3060 { 3061 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 3062 struct device *dev = ctx->dev; 3063 3064 if (ctx->keylen) 3065 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE); 3066 } 3067 3068 /* 3069 * given the alg's descriptor header template, determine whether descriptor 3070 * type and primary/secondary execution units required match the hw 3071 * capabilities description provided in the device tree node. 3072 */ 3073 static int hw_supports(struct device *dev, __be32 desc_hdr_template) 3074 { 3075 struct talitos_private *priv = dev_get_drvdata(dev); 3076 int ret; 3077 3078 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) && 3079 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units); 3080 3081 if (SECONDARY_EU(desc_hdr_template)) 3082 ret = ret && (1 << SECONDARY_EU(desc_hdr_template) 3083 & priv->exec_units); 3084 3085 return ret; 3086 } 3087 3088 static int talitos_remove(struct platform_device *ofdev) 3089 { 3090 struct device *dev = &ofdev->dev; 3091 struct talitos_private *priv = dev_get_drvdata(dev); 3092 struct talitos_crypto_alg *t_alg, *n; 3093 int i; 3094 3095 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 3096 switch (t_alg->algt.type) { 3097 case CRYPTO_ALG_TYPE_SKCIPHER: 3098 crypto_unregister_skcipher(&t_alg->algt.alg.skcipher); 3099 break; 3100 case CRYPTO_ALG_TYPE_AEAD: 3101 crypto_unregister_aead(&t_alg->algt.alg.aead); 3102 break; 3103 case CRYPTO_ALG_TYPE_AHASH: 3104 crypto_unregister_ahash(&t_alg->algt.alg.hash); 3105 break; 3106 } 3107 list_del(&t_alg->entry); 3108 } 3109 3110 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 3111 talitos_unregister_rng(dev); 3112 3113 for (i = 0; i < 2; i++) 3114 if (priv->irq[i]) { 3115 free_irq(priv->irq[i], dev); 3116 irq_dispose_mapping(priv->irq[i]); 3117 } 3118 3119 tasklet_kill(&priv->done_task[0]); 3120 if (priv->irq[1]) 3121 tasklet_kill(&priv->done_task[1]); 3122 3123 return 0; 3124 } 3125 3126 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, 3127 struct talitos_alg_template 3128 *template) 3129 { 3130 struct talitos_private *priv = dev_get_drvdata(dev); 3131 struct talitos_crypto_alg *t_alg; 3132 struct crypto_alg *alg; 3133 3134 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg), 3135 GFP_KERNEL); 3136 if (!t_alg) 3137 return ERR_PTR(-ENOMEM); 3138 3139 t_alg->algt = *template; 3140 3141 switch (t_alg->algt.type) { 3142 case CRYPTO_ALG_TYPE_SKCIPHER: 3143 alg = &t_alg->algt.alg.skcipher.base; 3144 alg->cra_exit = talitos_cra_exit; 3145 t_alg->algt.alg.skcipher.init = talitos_cra_init_skcipher; 3146 t_alg->algt.alg.skcipher.setkey = 3147 t_alg->algt.alg.skcipher.setkey ?: skcipher_setkey; 3148 t_alg->algt.alg.skcipher.encrypt = skcipher_encrypt; 3149 t_alg->algt.alg.skcipher.decrypt = skcipher_decrypt; 3150 break; 3151 case CRYPTO_ALG_TYPE_AEAD: 3152 alg = &t_alg->algt.alg.aead.base; 3153 alg->cra_exit = talitos_cra_exit; 3154 t_alg->algt.alg.aead.init = talitos_cra_init_aead; 3155 t_alg->algt.alg.aead.setkey = t_alg->algt.alg.aead.setkey ?: 3156 aead_setkey; 3157 t_alg->algt.alg.aead.encrypt = aead_encrypt; 3158 t_alg->algt.alg.aead.decrypt = aead_decrypt; 3159 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && 3160 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) { 3161 devm_kfree(dev, t_alg); 3162 return ERR_PTR(-ENOTSUPP); 3163 } 3164 break; 3165 case CRYPTO_ALG_TYPE_AHASH: 3166 alg = &t_alg->algt.alg.hash.halg.base; 3167 alg->cra_init = talitos_cra_init_ahash; 3168 alg->cra_exit = talitos_cra_exit; 3169 t_alg->algt.alg.hash.init = ahash_init; 3170 t_alg->algt.alg.hash.update = ahash_update; 3171 t_alg->algt.alg.hash.final = ahash_final; 3172 t_alg->algt.alg.hash.finup = ahash_finup; 3173 t_alg->algt.alg.hash.digest = ahash_digest; 3174 if (!strncmp(alg->cra_name, "hmac", 4)) 3175 t_alg->algt.alg.hash.setkey = ahash_setkey; 3176 t_alg->algt.alg.hash.import = ahash_import; 3177 t_alg->algt.alg.hash.export = ahash_export; 3178 3179 if (!(priv->features & TALITOS_FTR_HMAC_OK) && 3180 !strncmp(alg->cra_name, "hmac", 4)) { 3181 devm_kfree(dev, t_alg); 3182 return ERR_PTR(-ENOTSUPP); 3183 } 3184 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && 3185 (!strcmp(alg->cra_name, "sha224") || 3186 !strcmp(alg->cra_name, "hmac(sha224)"))) { 3187 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; 3188 t_alg->algt.desc_hdr_template = 3189 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 3190 DESC_HDR_SEL0_MDEUA | 3191 DESC_HDR_MODE0_MDEU_SHA256; 3192 } 3193 break; 3194 default: 3195 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); 3196 devm_kfree(dev, t_alg); 3197 return ERR_PTR(-EINVAL); 3198 } 3199 3200 alg->cra_module = THIS_MODULE; 3201 if (t_alg->algt.priority) 3202 alg->cra_priority = t_alg->algt.priority; 3203 else 3204 alg->cra_priority = TALITOS_CRA_PRIORITY; 3205 if (has_ftr_sec1(priv)) 3206 alg->cra_alignmask = 3; 3207 else 3208 alg->cra_alignmask = 0; 3209 alg->cra_ctxsize = sizeof(struct talitos_ctx); 3210 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; 3211 3212 t_alg->dev = dev; 3213 3214 return t_alg; 3215 } 3216 3217 static int talitos_probe_irq(struct platform_device *ofdev) 3218 { 3219 struct device *dev = &ofdev->dev; 3220 struct device_node *np = ofdev->dev.of_node; 3221 struct talitos_private *priv = dev_get_drvdata(dev); 3222 int err; 3223 bool is_sec1 = has_ftr_sec1(priv); 3224 3225 priv->irq[0] = irq_of_parse_and_map(np, 0); 3226 if (!priv->irq[0]) { 3227 dev_err(dev, "failed to map irq\n"); 3228 return -EINVAL; 3229 } 3230 if (is_sec1) { 3231 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0, 3232 dev_driver_string(dev), dev); 3233 goto primary_out; 3234 } 3235 3236 priv->irq[1] = irq_of_parse_and_map(np, 1); 3237 3238 /* get the primary irq line */ 3239 if (!priv->irq[1]) { 3240 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0, 3241 dev_driver_string(dev), dev); 3242 goto primary_out; 3243 } 3244 3245 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0, 3246 dev_driver_string(dev), dev); 3247 if (err) 3248 goto primary_out; 3249 3250 /* get the secondary irq line */ 3251 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0, 3252 dev_driver_string(dev), dev); 3253 if (err) { 3254 dev_err(dev, "failed to request secondary irq\n"); 3255 irq_dispose_mapping(priv->irq[1]); 3256 priv->irq[1] = 0; 3257 } 3258 3259 return err; 3260 3261 primary_out: 3262 if (err) { 3263 dev_err(dev, "failed to request primary irq\n"); 3264 irq_dispose_mapping(priv->irq[0]); 3265 priv->irq[0] = 0; 3266 } 3267 3268 return err; 3269 } 3270 3271 static int talitos_probe(struct platform_device *ofdev) 3272 { 3273 struct device *dev = &ofdev->dev; 3274 struct device_node *np = ofdev->dev.of_node; 3275 struct talitos_private *priv; 3276 int i, err; 3277 int stride; 3278 struct resource *res; 3279 3280 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL); 3281 if (!priv) 3282 return -ENOMEM; 3283 3284 INIT_LIST_HEAD(&priv->alg_list); 3285 3286 dev_set_drvdata(dev, priv); 3287 3288 priv->ofdev = ofdev; 3289 3290 spin_lock_init(&priv->reg_lock); 3291 3292 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); 3293 if (!res) 3294 return -ENXIO; 3295 priv->reg = devm_ioremap(dev, res->start, resource_size(res)); 3296 if (!priv->reg) { 3297 dev_err(dev, "failed to of_iomap\n"); 3298 err = -ENOMEM; 3299 goto err_out; 3300 } 3301 3302 /* get SEC version capabilities from device tree */ 3303 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels); 3304 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len); 3305 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units); 3306 of_property_read_u32(np, "fsl,descriptor-types-mask", 3307 &priv->desc_types); 3308 3309 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || 3310 !priv->exec_units || !priv->desc_types) { 3311 dev_err(dev, "invalid property data in device tree node\n"); 3312 err = -EINVAL; 3313 goto err_out; 3314 } 3315 3316 if (of_device_is_compatible(np, "fsl,sec3.0")) 3317 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; 3318 3319 if (of_device_is_compatible(np, "fsl,sec2.1")) 3320 priv->features |= TALITOS_FTR_HW_AUTH_CHECK | 3321 TALITOS_FTR_SHA224_HWINIT | 3322 TALITOS_FTR_HMAC_OK; 3323 3324 if (of_device_is_compatible(np, "fsl,sec1.0")) 3325 priv->features |= TALITOS_FTR_SEC1; 3326 3327 if (of_device_is_compatible(np, "fsl,sec1.2")) { 3328 priv->reg_deu = priv->reg + TALITOS12_DEU; 3329 priv->reg_aesu = priv->reg + TALITOS12_AESU; 3330 priv->reg_mdeu = priv->reg + TALITOS12_MDEU; 3331 stride = TALITOS1_CH_STRIDE; 3332 } else if (of_device_is_compatible(np, "fsl,sec1.0")) { 3333 priv->reg_deu = priv->reg + TALITOS10_DEU; 3334 priv->reg_aesu = priv->reg + TALITOS10_AESU; 3335 priv->reg_mdeu = priv->reg + TALITOS10_MDEU; 3336 priv->reg_afeu = priv->reg + TALITOS10_AFEU; 3337 priv->reg_rngu = priv->reg + TALITOS10_RNGU; 3338 priv->reg_pkeu = priv->reg + TALITOS10_PKEU; 3339 stride = TALITOS1_CH_STRIDE; 3340 } else { 3341 priv->reg_deu = priv->reg + TALITOS2_DEU; 3342 priv->reg_aesu = priv->reg + TALITOS2_AESU; 3343 priv->reg_mdeu = priv->reg + TALITOS2_MDEU; 3344 priv->reg_afeu = priv->reg + TALITOS2_AFEU; 3345 priv->reg_rngu = priv->reg + TALITOS2_RNGU; 3346 priv->reg_pkeu = priv->reg + TALITOS2_PKEU; 3347 priv->reg_keu = priv->reg + TALITOS2_KEU; 3348 priv->reg_crcu = priv->reg + TALITOS2_CRCU; 3349 stride = TALITOS2_CH_STRIDE; 3350 } 3351 3352 err = talitos_probe_irq(ofdev); 3353 if (err) 3354 goto err_out; 3355 3356 if (has_ftr_sec1(priv)) { 3357 if (priv->num_channels == 1) 3358 tasklet_init(&priv->done_task[0], talitos1_done_ch0, 3359 (unsigned long)dev); 3360 else 3361 tasklet_init(&priv->done_task[0], talitos1_done_4ch, 3362 (unsigned long)dev); 3363 } else { 3364 if (priv->irq[1]) { 3365 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, 3366 (unsigned long)dev); 3367 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, 3368 (unsigned long)dev); 3369 } else if (priv->num_channels == 1) { 3370 tasklet_init(&priv->done_task[0], talitos2_done_ch0, 3371 (unsigned long)dev); 3372 } else { 3373 tasklet_init(&priv->done_task[0], talitos2_done_4ch, 3374 (unsigned long)dev); 3375 } 3376 } 3377 3378 priv->chan = devm_kcalloc(dev, 3379 priv->num_channels, 3380 sizeof(struct talitos_channel), 3381 GFP_KERNEL); 3382 if (!priv->chan) { 3383 dev_err(dev, "failed to allocate channel management space\n"); 3384 err = -ENOMEM; 3385 goto err_out; 3386 } 3387 3388 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 3389 3390 for (i = 0; i < priv->num_channels; i++) { 3391 priv->chan[i].reg = priv->reg + stride * (i + 1); 3392 if (!priv->irq[1] || !(i & 1)) 3393 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; 3394 3395 spin_lock_init(&priv->chan[i].head_lock); 3396 spin_lock_init(&priv->chan[i].tail_lock); 3397 3398 priv->chan[i].fifo = devm_kcalloc(dev, 3399 priv->fifo_len, 3400 sizeof(struct talitos_request), 3401 GFP_KERNEL); 3402 if (!priv->chan[i].fifo) { 3403 dev_err(dev, "failed to allocate request fifo %d\n", i); 3404 err = -ENOMEM; 3405 goto err_out; 3406 } 3407 3408 atomic_set(&priv->chan[i].submit_count, 3409 -(priv->chfifo_len - 1)); 3410 } 3411 3412 dma_set_mask(dev, DMA_BIT_MASK(36)); 3413 3414 /* reset and initialize the h/w */ 3415 err = init_device(dev); 3416 if (err) { 3417 dev_err(dev, "failed to initialize device\n"); 3418 goto err_out; 3419 } 3420 3421 /* register the RNG, if available */ 3422 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) { 3423 err = talitos_register_rng(dev); 3424 if (err) { 3425 dev_err(dev, "failed to register hwrng: %d\n", err); 3426 goto err_out; 3427 } else 3428 dev_info(dev, "hwrng\n"); 3429 } 3430 3431 /* register crypto algorithms the device supports */ 3432 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 3433 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 3434 struct talitos_crypto_alg *t_alg; 3435 struct crypto_alg *alg = NULL; 3436 3437 t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 3438 if (IS_ERR(t_alg)) { 3439 err = PTR_ERR(t_alg); 3440 if (err == -ENOTSUPP) 3441 continue; 3442 goto err_out; 3443 } 3444 3445 switch (t_alg->algt.type) { 3446 case CRYPTO_ALG_TYPE_SKCIPHER: 3447 err = crypto_register_skcipher( 3448 &t_alg->algt.alg.skcipher); 3449 alg = &t_alg->algt.alg.skcipher.base; 3450 break; 3451 3452 case CRYPTO_ALG_TYPE_AEAD: 3453 err = crypto_register_aead( 3454 &t_alg->algt.alg.aead); 3455 alg = &t_alg->algt.alg.aead.base; 3456 break; 3457 3458 case CRYPTO_ALG_TYPE_AHASH: 3459 err = crypto_register_ahash( 3460 &t_alg->algt.alg.hash); 3461 alg = &t_alg->algt.alg.hash.halg.base; 3462 break; 3463 } 3464 if (err) { 3465 dev_err(dev, "%s alg registration failed\n", 3466 alg->cra_driver_name); 3467 devm_kfree(dev, t_alg); 3468 } else 3469 list_add_tail(&t_alg->entry, &priv->alg_list); 3470 } 3471 } 3472 if (!list_empty(&priv->alg_list)) 3473 dev_info(dev, "%s algorithms registered in /proc/crypto\n", 3474 (char *)of_get_property(np, "compatible", NULL)); 3475 3476 return 0; 3477 3478 err_out: 3479 talitos_remove(ofdev); 3480 3481 return err; 3482 } 3483 3484 static const struct of_device_id talitos_match[] = { 3485 #ifdef CONFIG_CRYPTO_DEV_TALITOS1 3486 { 3487 .compatible = "fsl,sec1.0", 3488 }, 3489 #endif 3490 #ifdef CONFIG_CRYPTO_DEV_TALITOS2 3491 { 3492 .compatible = "fsl,sec2.0", 3493 }, 3494 #endif 3495 {}, 3496 }; 3497 MODULE_DEVICE_TABLE(of, talitos_match); 3498 3499 static struct platform_driver talitos_driver = { 3500 .driver = { 3501 .name = "talitos", 3502 .of_match_table = talitos_match, 3503 }, 3504 .probe = talitos_probe, 3505 .remove = talitos_remove, 3506 }; 3507 3508 module_platform_driver(talitos_driver); 3509 3510 MODULE_LICENSE("GPL"); 3511 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>"); 3512 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver"); 3513