1 /* 2 * talitos - Freescale Integrated Security Engine (SEC) device driver 3 * 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc. 5 * 6 * Scatterlist Crypto API glue code copied from files with the following: 7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * Crypto algorithm registration code copied from hifn driver: 10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 11 * All rights reserved. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/mod_devicetable.h> 31 #include <linux/device.h> 32 #include <linux/interrupt.h> 33 #include <linux/crypto.h> 34 #include <linux/hw_random.h> 35 #include <linux/of_address.h> 36 #include <linux/of_irq.h> 37 #include <linux/of_platform.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/io.h> 40 #include <linux/spinlock.h> 41 #include <linux/rtnetlink.h> 42 #include <linux/slab.h> 43 44 #include <crypto/algapi.h> 45 #include <crypto/aes.h> 46 #include <crypto/des.h> 47 #include <crypto/sha.h> 48 #include <crypto/md5.h> 49 #include <crypto/internal/aead.h> 50 #include <crypto/authenc.h> 51 #include <crypto/skcipher.h> 52 #include <crypto/hash.h> 53 #include <crypto/internal/hash.h> 54 #include <crypto/scatterwalk.h> 55 56 #include "talitos.h" 57 58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, 59 bool is_sec1) 60 { 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 62 if (!is_sec1) 63 ptr->eptr = upper_32_bits(dma_addr); 64 } 65 66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr, 67 struct talitos_ptr *src_ptr, bool is_sec1) 68 { 69 dst_ptr->ptr = src_ptr->ptr; 70 if (!is_sec1) 71 dst_ptr->eptr = src_ptr->eptr; 72 } 73 74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, 75 bool is_sec1) 76 { 77 if (is_sec1) { 78 ptr->res = 0; 79 ptr->len1 = cpu_to_be16(len); 80 } else { 81 ptr->len = cpu_to_be16(len); 82 } 83 } 84 85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, 86 bool is_sec1) 87 { 88 if (is_sec1) 89 return be16_to_cpu(ptr->len1); 90 else 91 return be16_to_cpu(ptr->len); 92 } 93 94 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) 95 { 96 if (!is_sec1) 97 ptr->j_extent = 0; 98 } 99 100 /* 101 * map virtual single (contiguous) pointer to h/w descriptor pointer 102 */ 103 static void map_single_talitos_ptr(struct device *dev, 104 struct talitos_ptr *ptr, 105 unsigned int len, void *data, 106 enum dma_data_direction dir) 107 { 108 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); 109 struct talitos_private *priv = dev_get_drvdata(dev); 110 bool is_sec1 = has_ftr_sec1(priv); 111 112 to_talitos_ptr_len(ptr, len, is_sec1); 113 to_talitos_ptr(ptr, dma_addr, is_sec1); 114 to_talitos_ptr_extent_clear(ptr, is_sec1); 115 } 116 117 /* 118 * unmap bus single (contiguous) h/w descriptor pointer 119 */ 120 static void unmap_single_talitos_ptr(struct device *dev, 121 struct talitos_ptr *ptr, 122 enum dma_data_direction dir) 123 { 124 struct talitos_private *priv = dev_get_drvdata(dev); 125 bool is_sec1 = has_ftr_sec1(priv); 126 127 dma_unmap_single(dev, be32_to_cpu(ptr->ptr), 128 from_talitos_ptr_len(ptr, is_sec1), dir); 129 } 130 131 static int reset_channel(struct device *dev, int ch) 132 { 133 struct talitos_private *priv = dev_get_drvdata(dev); 134 unsigned int timeout = TALITOS_TIMEOUT; 135 bool is_sec1 = has_ftr_sec1(priv); 136 137 if (is_sec1) { 138 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 139 TALITOS1_CCCR_LO_RESET); 140 141 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) & 142 TALITOS1_CCCR_LO_RESET) && --timeout) 143 cpu_relax(); 144 } else { 145 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 146 TALITOS2_CCCR_RESET); 147 148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 149 TALITOS2_CCCR_RESET) && --timeout) 150 cpu_relax(); 151 } 152 153 if (timeout == 0) { 154 dev_err(dev, "failed to reset channel %d\n", ch); 155 return -EIO; 156 } 157 158 /* set 36-bit addressing, done writeback enable and done IRQ enable */ 159 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE | 160 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); 161 162 /* and ICCR writeback, if available */ 163 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 164 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 165 TALITOS_CCCR_LO_IWSE); 166 167 return 0; 168 } 169 170 static int reset_device(struct device *dev) 171 { 172 struct talitos_private *priv = dev_get_drvdata(dev); 173 unsigned int timeout = TALITOS_TIMEOUT; 174 bool is_sec1 = has_ftr_sec1(priv); 175 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR; 176 177 setbits32(priv->reg + TALITOS_MCR, mcr); 178 179 while ((in_be32(priv->reg + TALITOS_MCR) & mcr) 180 && --timeout) 181 cpu_relax(); 182 183 if (priv->irq[1]) { 184 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3; 185 setbits32(priv->reg + TALITOS_MCR, mcr); 186 } 187 188 if (timeout == 0) { 189 dev_err(dev, "failed to reset device\n"); 190 return -EIO; 191 } 192 193 return 0; 194 } 195 196 /* 197 * Reset and initialize the device 198 */ 199 static int init_device(struct device *dev) 200 { 201 struct talitos_private *priv = dev_get_drvdata(dev); 202 int ch, err; 203 bool is_sec1 = has_ftr_sec1(priv); 204 205 /* 206 * Master reset 207 * errata documentation: warning: certain SEC interrupts 208 * are not fully cleared by writing the MCR:SWR bit, 209 * set bit twice to completely reset 210 */ 211 err = reset_device(dev); 212 if (err) 213 return err; 214 215 err = reset_device(dev); 216 if (err) 217 return err; 218 219 /* reset channels */ 220 for (ch = 0; ch < priv->num_channels; ch++) { 221 err = reset_channel(dev, ch); 222 if (err) 223 return err; 224 } 225 226 /* enable channel done and error interrupts */ 227 if (is_sec1) { 228 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT); 229 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); 230 /* disable parity error check in DEU (erroneous? test vect.) */ 231 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE); 232 } else { 233 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT); 234 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); 235 } 236 237 /* disable integrity check error interrupts (use writeback instead) */ 238 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 239 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO, 240 TALITOS_MDEUICR_LO_ICE); 241 242 return 0; 243 } 244 245 /** 246 * talitos_submit - submits a descriptor to the device for processing 247 * @dev: the SEC device to be used 248 * @ch: the SEC device channel to be used 249 * @desc: the descriptor to be processed by the device 250 * @callback: whom to call when processing is complete 251 * @context: a handle for use by caller (optional) 252 * 253 * desc must contain valid dma-mapped (bus physical) address pointers. 254 * callback must check err and feedback in descriptor header 255 * for device processing status. 256 */ 257 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 258 void (*callback)(struct device *dev, 259 struct talitos_desc *desc, 260 void *context, int error), 261 void *context) 262 { 263 struct talitos_private *priv = dev_get_drvdata(dev); 264 struct talitos_request *request; 265 unsigned long flags; 266 int head; 267 bool is_sec1 = has_ftr_sec1(priv); 268 269 spin_lock_irqsave(&priv->chan[ch].head_lock, flags); 270 271 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { 272 /* h/w fifo is full */ 273 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 274 return -EAGAIN; 275 } 276 277 head = priv->chan[ch].head; 278 request = &priv->chan[ch].fifo[head]; 279 280 /* map descriptor and save caller data */ 281 if (is_sec1) { 282 desc->hdr1 = desc->hdr; 283 desc->next_desc = 0; 284 request->dma_desc = dma_map_single(dev, &desc->hdr1, 285 TALITOS_DESC_SIZE, 286 DMA_BIDIRECTIONAL); 287 } else { 288 request->dma_desc = dma_map_single(dev, desc, 289 TALITOS_DESC_SIZE, 290 DMA_BIDIRECTIONAL); 291 } 292 request->callback = callback; 293 request->context = context; 294 295 /* increment fifo head */ 296 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); 297 298 smp_wmb(); 299 request->desc = desc; 300 301 /* GO! */ 302 wmb(); 303 out_be32(priv->chan[ch].reg + TALITOS_FF, 304 upper_32_bits(request->dma_desc)); 305 out_be32(priv->chan[ch].reg + TALITOS_FF_LO, 306 lower_32_bits(request->dma_desc)); 307 308 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 309 310 return -EINPROGRESS; 311 } 312 EXPORT_SYMBOL(talitos_submit); 313 314 /* 315 * process what was done, notify callback of error if not 316 */ 317 static void flush_channel(struct device *dev, int ch, int error, int reset_ch) 318 { 319 struct talitos_private *priv = dev_get_drvdata(dev); 320 struct talitos_request *request, saved_req; 321 unsigned long flags; 322 int tail, status; 323 bool is_sec1 = has_ftr_sec1(priv); 324 325 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 326 327 tail = priv->chan[ch].tail; 328 while (priv->chan[ch].fifo[tail].desc) { 329 __be32 hdr; 330 331 request = &priv->chan[ch].fifo[tail]; 332 333 /* descriptors with their done bits set don't get the error */ 334 rmb(); 335 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr; 336 337 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 338 status = 0; 339 else 340 if (!error) 341 break; 342 else 343 status = error; 344 345 dma_unmap_single(dev, request->dma_desc, 346 TALITOS_DESC_SIZE, 347 DMA_BIDIRECTIONAL); 348 349 /* copy entries so we can call callback outside lock */ 350 saved_req.desc = request->desc; 351 saved_req.callback = request->callback; 352 saved_req.context = request->context; 353 354 /* release request entry in fifo */ 355 smp_wmb(); 356 request->desc = NULL; 357 358 /* increment fifo tail */ 359 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1); 360 361 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 362 363 atomic_dec(&priv->chan[ch].submit_count); 364 365 saved_req.callback(dev, saved_req.desc, saved_req.context, 366 status); 367 /* channel may resume processing in single desc error case */ 368 if (error && !reset_ch && status == error) 369 return; 370 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 371 tail = priv->chan[ch].tail; 372 } 373 374 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 375 } 376 377 /* 378 * process completed requests for channels that have done status 379 */ 380 #define DEF_TALITOS1_DONE(name, ch_done_mask) \ 381 static void talitos1_done_##name(unsigned long data) \ 382 { \ 383 struct device *dev = (struct device *)data; \ 384 struct talitos_private *priv = dev_get_drvdata(dev); \ 385 unsigned long flags; \ 386 \ 387 if (ch_done_mask & 0x10000000) \ 388 flush_channel(dev, 0, 0, 0); \ 389 if (priv->num_channels == 1) \ 390 goto out; \ 391 if (ch_done_mask & 0x40000000) \ 392 flush_channel(dev, 1, 0, 0); \ 393 if (ch_done_mask & 0x00010000) \ 394 flush_channel(dev, 2, 0, 0); \ 395 if (ch_done_mask & 0x00040000) \ 396 flush_channel(dev, 3, 0, 0); \ 397 \ 398 out: \ 399 /* At this point, all completed channels have been processed */ \ 400 /* Unmask done interrupts for channels completed later on. */ \ 401 spin_lock_irqsave(&priv->reg_lock, flags); \ 402 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 403 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \ 404 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 405 } 406 407 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) 408 409 #define DEF_TALITOS2_DONE(name, ch_done_mask) \ 410 static void talitos2_done_##name(unsigned long data) \ 411 { \ 412 struct device *dev = (struct device *)data; \ 413 struct talitos_private *priv = dev_get_drvdata(dev); \ 414 unsigned long flags; \ 415 \ 416 if (ch_done_mask & 1) \ 417 flush_channel(dev, 0, 0, 0); \ 418 if (priv->num_channels == 1) \ 419 goto out; \ 420 if (ch_done_mask & (1 << 2)) \ 421 flush_channel(dev, 1, 0, 0); \ 422 if (ch_done_mask & (1 << 4)) \ 423 flush_channel(dev, 2, 0, 0); \ 424 if (ch_done_mask & (1 << 6)) \ 425 flush_channel(dev, 3, 0, 0); \ 426 \ 427 out: \ 428 /* At this point, all completed channels have been processed */ \ 429 /* Unmask done interrupts for channels completed later on. */ \ 430 spin_lock_irqsave(&priv->reg_lock, flags); \ 431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \ 433 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 434 } 435 436 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) 437 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) 438 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) 439 440 /* 441 * locate current (offending) descriptor 442 */ 443 static u32 current_desc_hdr(struct device *dev, int ch) 444 { 445 struct talitos_private *priv = dev_get_drvdata(dev); 446 int tail, iter; 447 dma_addr_t cur_desc; 448 449 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32; 450 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO); 451 452 if (!cur_desc) { 453 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n"); 454 return 0; 455 } 456 457 tail = priv->chan[ch].tail; 458 459 iter = tail; 460 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) { 461 iter = (iter + 1) & (priv->fifo_len - 1); 462 if (iter == tail) { 463 dev_err(dev, "couldn't locate current descriptor\n"); 464 return 0; 465 } 466 } 467 468 return priv->chan[ch].fifo[iter].desc->hdr; 469 } 470 471 /* 472 * user diagnostics; report root cause of error based on execution unit status 473 */ 474 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) 475 { 476 struct talitos_private *priv = dev_get_drvdata(dev); 477 int i; 478 479 if (!desc_hdr) 480 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); 481 482 switch (desc_hdr & DESC_HDR_SEL0_MASK) { 483 case DESC_HDR_SEL0_AFEU: 484 dev_err(dev, "AFEUISR 0x%08x_%08x\n", 485 in_be32(priv->reg_afeu + TALITOS_EUISR), 486 in_be32(priv->reg_afeu + TALITOS_EUISR_LO)); 487 break; 488 case DESC_HDR_SEL0_DEU: 489 dev_err(dev, "DEUISR 0x%08x_%08x\n", 490 in_be32(priv->reg_deu + TALITOS_EUISR), 491 in_be32(priv->reg_deu + TALITOS_EUISR_LO)); 492 break; 493 case DESC_HDR_SEL0_MDEUA: 494 case DESC_HDR_SEL0_MDEUB: 495 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 496 in_be32(priv->reg_mdeu + TALITOS_EUISR), 497 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 498 break; 499 case DESC_HDR_SEL0_RNG: 500 dev_err(dev, "RNGUISR 0x%08x_%08x\n", 501 in_be32(priv->reg_rngu + TALITOS_ISR), 502 in_be32(priv->reg_rngu + TALITOS_ISR_LO)); 503 break; 504 case DESC_HDR_SEL0_PKEU: 505 dev_err(dev, "PKEUISR 0x%08x_%08x\n", 506 in_be32(priv->reg_pkeu + TALITOS_EUISR), 507 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 508 break; 509 case DESC_HDR_SEL0_AESU: 510 dev_err(dev, "AESUISR 0x%08x_%08x\n", 511 in_be32(priv->reg_aesu + TALITOS_EUISR), 512 in_be32(priv->reg_aesu + TALITOS_EUISR_LO)); 513 break; 514 case DESC_HDR_SEL0_CRCU: 515 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 516 in_be32(priv->reg_crcu + TALITOS_EUISR), 517 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 518 break; 519 case DESC_HDR_SEL0_KEU: 520 dev_err(dev, "KEUISR 0x%08x_%08x\n", 521 in_be32(priv->reg_pkeu + TALITOS_EUISR), 522 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 523 break; 524 } 525 526 switch (desc_hdr & DESC_HDR_SEL1_MASK) { 527 case DESC_HDR_SEL1_MDEUA: 528 case DESC_HDR_SEL1_MDEUB: 529 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 530 in_be32(priv->reg_mdeu + TALITOS_EUISR), 531 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 532 break; 533 case DESC_HDR_SEL1_CRCU: 534 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 535 in_be32(priv->reg_crcu + TALITOS_EUISR), 536 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 537 break; 538 } 539 540 for (i = 0; i < 8; i++) 541 dev_err(dev, "DESCBUF 0x%08x_%08x\n", 542 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i), 543 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i)); 544 } 545 546 /* 547 * recover from error interrupts 548 */ 549 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) 550 { 551 struct talitos_private *priv = dev_get_drvdata(dev); 552 unsigned int timeout = TALITOS_TIMEOUT; 553 int ch, error, reset_dev = 0; 554 u32 v_lo; 555 bool is_sec1 = has_ftr_sec1(priv); 556 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ 557 558 for (ch = 0; ch < priv->num_channels; ch++) { 559 /* skip channels without errors */ 560 if (is_sec1) { 561 /* bits 29, 31, 17, 19 */ 562 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6)))) 563 continue; 564 } else { 565 if (!(isr & (1 << (ch * 2 + 1)))) 566 continue; 567 } 568 569 error = -EINVAL; 570 571 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); 572 573 if (v_lo & TALITOS_CCPSR_LO_DOF) { 574 dev_err(dev, "double fetch fifo overflow error\n"); 575 error = -EAGAIN; 576 reset_ch = 1; 577 } 578 if (v_lo & TALITOS_CCPSR_LO_SOF) { 579 /* h/w dropped descriptor */ 580 dev_err(dev, "single fetch fifo overflow error\n"); 581 error = -EAGAIN; 582 } 583 if (v_lo & TALITOS_CCPSR_LO_MDTE) 584 dev_err(dev, "master data transfer error\n"); 585 if (v_lo & TALITOS_CCPSR_LO_SGDLZ) 586 dev_err(dev, is_sec1 ? "pointeur not complete error\n" 587 : "s/g data length zero error\n"); 588 if (v_lo & TALITOS_CCPSR_LO_FPZ) 589 dev_err(dev, is_sec1 ? "parity error\n" 590 : "fetch pointer zero error\n"); 591 if (v_lo & TALITOS_CCPSR_LO_IDH) 592 dev_err(dev, "illegal descriptor header error\n"); 593 if (v_lo & TALITOS_CCPSR_LO_IEU) 594 dev_err(dev, is_sec1 ? "static assignment error\n" 595 : "invalid exec unit error\n"); 596 if (v_lo & TALITOS_CCPSR_LO_EU) 597 report_eu_error(dev, ch, current_desc_hdr(dev, ch)); 598 if (!is_sec1) { 599 if (v_lo & TALITOS_CCPSR_LO_GB) 600 dev_err(dev, "gather boundary error\n"); 601 if (v_lo & TALITOS_CCPSR_LO_GRL) 602 dev_err(dev, "gather return/length error\n"); 603 if (v_lo & TALITOS_CCPSR_LO_SB) 604 dev_err(dev, "scatter boundary error\n"); 605 if (v_lo & TALITOS_CCPSR_LO_SRL) 606 dev_err(dev, "scatter return/length error\n"); 607 } 608 609 flush_channel(dev, ch, error, reset_ch); 610 611 if (reset_ch) { 612 reset_channel(dev, ch); 613 } else { 614 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 615 TALITOS2_CCCR_CONT); 616 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); 617 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 618 TALITOS2_CCCR_CONT) && --timeout) 619 cpu_relax(); 620 if (timeout == 0) { 621 dev_err(dev, "failed to restart channel %d\n", 622 ch); 623 reset_dev = 1; 624 } 625 } 626 } 627 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) || 628 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) { 629 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR)) 630 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n", 631 isr, isr_lo); 632 else 633 dev_err(dev, "done overflow, internal time out, or " 634 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo); 635 636 /* purge request queues */ 637 for (ch = 0; ch < priv->num_channels; ch++) 638 flush_channel(dev, ch, -EIO, 1); 639 640 /* reset and reinitialize the device */ 641 init_device(dev); 642 } 643 } 644 645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \ 647 { \ 648 struct device *dev = data; \ 649 struct talitos_private *priv = dev_get_drvdata(dev); \ 650 u32 isr, isr_lo; \ 651 unsigned long flags; \ 652 \ 653 spin_lock_irqsave(&priv->reg_lock, flags); \ 654 isr = in_be32(priv->reg + TALITOS_ISR); \ 655 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 656 /* Acknowledge interrupt */ \ 657 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 658 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 659 \ 660 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \ 661 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 662 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 663 } \ 664 else { \ 665 if (likely(isr & ch_done_mask)) { \ 666 /* mask further done interrupts. */ \ 667 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 668 /* done_task will unmask done interrupts at exit */ \ 669 tasklet_schedule(&priv->done_task[tlet]); \ 670 } \ 671 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 672 } \ 673 \ 674 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 675 IRQ_NONE; \ 676 } 677 678 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0) 679 680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \ 682 { \ 683 struct device *dev = data; \ 684 struct talitos_private *priv = dev_get_drvdata(dev); \ 685 u32 isr, isr_lo; \ 686 unsigned long flags; \ 687 \ 688 spin_lock_irqsave(&priv->reg_lock, flags); \ 689 isr = in_be32(priv->reg + TALITOS_ISR); \ 690 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 691 /* Acknowledge interrupt */ \ 692 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 693 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 694 \ 695 if (unlikely(isr & ch_err_mask || isr_lo)) { \ 696 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 697 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 698 } \ 699 else { \ 700 if (likely(isr & ch_done_mask)) { \ 701 /* mask further done interrupts. */ \ 702 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 703 /* done_task will unmask done interrupts at exit */ \ 704 tasklet_schedule(&priv->done_task[tlet]); \ 705 } \ 706 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 707 } \ 708 \ 709 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 710 IRQ_NONE; \ 711 } 712 713 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0) 714 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR, 715 0) 716 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR, 717 1) 718 719 /* 720 * hwrng 721 */ 722 static int talitos_rng_data_present(struct hwrng *rng, int wait) 723 { 724 struct device *dev = (struct device *)rng->priv; 725 struct talitos_private *priv = dev_get_drvdata(dev); 726 u32 ofl; 727 int i; 728 729 for (i = 0; i < 20; i++) { 730 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) & 731 TALITOS_RNGUSR_LO_OFL; 732 if (ofl || !wait) 733 break; 734 udelay(10); 735 } 736 737 return !!ofl; 738 } 739 740 static int talitos_rng_data_read(struct hwrng *rng, u32 *data) 741 { 742 struct device *dev = (struct device *)rng->priv; 743 struct talitos_private *priv = dev_get_drvdata(dev); 744 745 /* rng fifo requires 64-bit accesses */ 746 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO); 747 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO); 748 749 return sizeof(u32); 750 } 751 752 static int talitos_rng_init(struct hwrng *rng) 753 { 754 struct device *dev = (struct device *)rng->priv; 755 struct talitos_private *priv = dev_get_drvdata(dev); 756 unsigned int timeout = TALITOS_TIMEOUT; 757 758 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR); 759 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO) 760 & TALITOS_RNGUSR_LO_RD) 761 && --timeout) 762 cpu_relax(); 763 if (timeout == 0) { 764 dev_err(dev, "failed to reset rng hw\n"); 765 return -ENODEV; 766 } 767 768 /* start generating */ 769 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0); 770 771 return 0; 772 } 773 774 static int talitos_register_rng(struct device *dev) 775 { 776 struct talitos_private *priv = dev_get_drvdata(dev); 777 int err; 778 779 priv->rng.name = dev_driver_string(dev), 780 priv->rng.init = talitos_rng_init, 781 priv->rng.data_present = talitos_rng_data_present, 782 priv->rng.data_read = talitos_rng_data_read, 783 priv->rng.priv = (unsigned long)dev; 784 785 err = hwrng_register(&priv->rng); 786 if (!err) 787 priv->rng_registered = true; 788 789 return err; 790 } 791 792 static void talitos_unregister_rng(struct device *dev) 793 { 794 struct talitos_private *priv = dev_get_drvdata(dev); 795 796 if (!priv->rng_registered) 797 return; 798 799 hwrng_unregister(&priv->rng); 800 priv->rng_registered = false; 801 } 802 803 /* 804 * crypto alg 805 */ 806 #define TALITOS_CRA_PRIORITY 3000 807 #define TALITOS_MAX_KEY_SIZE 96 808 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 809 810 struct talitos_ctx { 811 struct device *dev; 812 int ch; 813 __be32 desc_hdr_template; 814 u8 key[TALITOS_MAX_KEY_SIZE]; 815 u8 iv[TALITOS_MAX_IV_LENGTH]; 816 unsigned int keylen; 817 unsigned int enckeylen; 818 unsigned int authkeylen; 819 }; 820 821 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE 822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 823 824 struct talitos_ahash_req_ctx { 825 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 826 unsigned int hw_context_size; 827 u8 buf[HASH_MAX_BLOCK_SIZE]; 828 u8 bufnext[HASH_MAX_BLOCK_SIZE]; 829 unsigned int swinit; 830 unsigned int first; 831 unsigned int last; 832 unsigned int to_hash_later; 833 unsigned int nbuf; 834 struct scatterlist bufsl[2]; 835 struct scatterlist *psrc; 836 }; 837 838 struct talitos_export_state { 839 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 840 u8 buf[HASH_MAX_BLOCK_SIZE]; 841 unsigned int swinit; 842 unsigned int first; 843 unsigned int last; 844 unsigned int to_hash_later; 845 unsigned int nbuf; 846 }; 847 848 static int aead_setkey(struct crypto_aead *authenc, 849 const u8 *key, unsigned int keylen) 850 { 851 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 852 struct crypto_authenc_keys keys; 853 854 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 855 goto badkey; 856 857 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 858 goto badkey; 859 860 memcpy(ctx->key, keys.authkey, keys.authkeylen); 861 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); 862 863 ctx->keylen = keys.authkeylen + keys.enckeylen; 864 ctx->enckeylen = keys.enckeylen; 865 ctx->authkeylen = keys.authkeylen; 866 867 return 0; 868 869 badkey: 870 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 871 return -EINVAL; 872 } 873 874 /* 875 * talitos_edesc - s/w-extended descriptor 876 * @src_nents: number of segments in input scatterlist 877 * @dst_nents: number of segments in output scatterlist 878 * @icv_ool: whether ICV is out-of-line 879 * @iv_dma: dma address of iv for checking continuity and link table 880 * @dma_len: length of dma mapped link_tbl space 881 * @dma_link_tbl: bus physical address of link_tbl/buf 882 * @desc: h/w descriptor 883 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) 884 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) 885 * 886 * if decrypting (with authcheck), or either one of src_nents or dst_nents 887 * is greater than 1, an integrity check value is concatenated to the end 888 * of link_tbl data 889 */ 890 struct talitos_edesc { 891 int src_nents; 892 int dst_nents; 893 bool icv_ool; 894 dma_addr_t iv_dma; 895 int dma_len; 896 dma_addr_t dma_link_tbl; 897 struct talitos_desc desc; 898 union { 899 struct talitos_ptr link_tbl[0]; 900 u8 buf[0]; 901 }; 902 }; 903 904 static void talitos_sg_unmap(struct device *dev, 905 struct talitos_edesc *edesc, 906 struct scatterlist *src, 907 struct scatterlist *dst) 908 { 909 unsigned int src_nents = edesc->src_nents ? : 1; 910 unsigned int dst_nents = edesc->dst_nents ? : 1; 911 912 if (src != dst) { 913 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 914 915 if (dst) { 916 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); 917 } 918 } else 919 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 920 } 921 922 static void ipsec_esp_unmap(struct device *dev, 923 struct talitos_edesc *edesc, 924 struct aead_request *areq) 925 { 926 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); 927 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); 928 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 929 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); 930 931 talitos_sg_unmap(dev, edesc, areq->src, areq->dst); 932 933 if (edesc->dma_len) 934 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 935 DMA_BIDIRECTIONAL); 936 } 937 938 /* 939 * ipsec_esp descriptor callbacks 940 */ 941 static void ipsec_esp_encrypt_done(struct device *dev, 942 struct talitos_desc *desc, void *context, 943 int err) 944 { 945 struct aead_request *areq = context; 946 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 947 unsigned int authsize = crypto_aead_authsize(authenc); 948 struct talitos_edesc *edesc; 949 struct scatterlist *sg; 950 void *icvdata; 951 952 edesc = container_of(desc, struct talitos_edesc, desc); 953 954 ipsec_esp_unmap(dev, edesc, areq); 955 956 /* copy the generated ICV to dst */ 957 if (edesc->icv_ool) { 958 icvdata = &edesc->link_tbl[edesc->src_nents + 959 edesc->dst_nents + 2]; 960 sg = sg_last(areq->dst, edesc->dst_nents); 961 memcpy((char *)sg_virt(sg) + sg->length - authsize, 962 icvdata, authsize); 963 } 964 965 kfree(edesc); 966 967 aead_request_complete(areq, err); 968 } 969 970 static void ipsec_esp_decrypt_swauth_done(struct device *dev, 971 struct talitos_desc *desc, 972 void *context, int err) 973 { 974 struct aead_request *req = context; 975 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 976 unsigned int authsize = crypto_aead_authsize(authenc); 977 struct talitos_edesc *edesc; 978 struct scatterlist *sg; 979 char *oicv, *icv; 980 981 edesc = container_of(desc, struct talitos_edesc, desc); 982 983 ipsec_esp_unmap(dev, edesc, req); 984 985 if (!err) { 986 /* auth check */ 987 sg = sg_last(req->dst, edesc->dst_nents ? : 1); 988 icv = (char *)sg_virt(sg) + sg->length - authsize; 989 990 if (edesc->dma_len) { 991 oicv = (char *)&edesc->link_tbl[edesc->src_nents + 992 edesc->dst_nents + 2]; 993 if (edesc->icv_ool) 994 icv = oicv + authsize; 995 } else 996 oicv = (char *)&edesc->link_tbl[0]; 997 998 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0; 999 } 1000 1001 kfree(edesc); 1002 1003 aead_request_complete(req, err); 1004 } 1005 1006 static void ipsec_esp_decrypt_hwauth_done(struct device *dev, 1007 struct talitos_desc *desc, 1008 void *context, int err) 1009 { 1010 struct aead_request *req = context; 1011 struct talitos_edesc *edesc; 1012 1013 edesc = container_of(desc, struct talitos_edesc, desc); 1014 1015 ipsec_esp_unmap(dev, edesc, req); 1016 1017 /* check ICV auth status */ 1018 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != 1019 DESC_HDR_LO_ICCR1_PASS)) 1020 err = -EBADMSG; 1021 1022 kfree(edesc); 1023 1024 aead_request_complete(req, err); 1025 } 1026 1027 /* 1028 * convert scatterlist to SEC h/w link table format 1029 * stop at cryptlen bytes 1030 */ 1031 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, 1032 unsigned int offset, int cryptlen, 1033 struct talitos_ptr *link_tbl_ptr) 1034 { 1035 int n_sg = sg_count; 1036 int count = 0; 1037 1038 while (cryptlen && sg && n_sg--) { 1039 unsigned int len = sg_dma_len(sg); 1040 1041 if (offset >= len) { 1042 offset -= len; 1043 goto next; 1044 } 1045 1046 len -= offset; 1047 1048 if (len > cryptlen) 1049 len = cryptlen; 1050 1051 to_talitos_ptr(link_tbl_ptr + count, 1052 sg_dma_address(sg) + offset, 0); 1053 link_tbl_ptr[count].len = cpu_to_be16(len); 1054 link_tbl_ptr[count].j_extent = 0; 1055 count++; 1056 cryptlen -= len; 1057 offset = 0; 1058 1059 next: 1060 sg = sg_next(sg); 1061 } 1062 1063 /* tag end of link table */ 1064 if (count > 0) 1065 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN; 1066 1067 return count; 1068 } 1069 1070 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count, 1071 int cryptlen, 1072 struct talitos_ptr *link_tbl_ptr) 1073 { 1074 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen, 1075 link_tbl_ptr); 1076 } 1077 1078 /* 1079 * fill in and submit ipsec_esp descriptor 1080 */ 1081 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, 1082 void (*callback)(struct device *dev, 1083 struct talitos_desc *desc, 1084 void *context, int error)) 1085 { 1086 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 1087 unsigned int authsize = crypto_aead_authsize(aead); 1088 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1089 struct device *dev = ctx->dev; 1090 struct talitos_desc *desc = &edesc->desc; 1091 unsigned int cryptlen = areq->cryptlen; 1092 unsigned int ivsize = crypto_aead_ivsize(aead); 1093 int tbl_off = 0; 1094 int sg_count, ret; 1095 int sg_link_tbl_len; 1096 1097 /* hmac key */ 1098 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 1099 DMA_TO_DEVICE); 1100 1101 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1, 1102 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1103 : DMA_TO_DEVICE); 1104 /* hmac data */ 1105 desc->ptr[1].len = cpu_to_be16(areq->assoclen); 1106 if (sg_count > 1 && 1107 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, 1108 areq->assoclen, 1109 &edesc->link_tbl[tbl_off])) > 1) { 1110 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * 1111 sizeof(struct talitos_ptr), 0); 1112 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; 1113 1114 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1115 edesc->dma_len, DMA_BIDIRECTIONAL); 1116 1117 tbl_off += ret; 1118 } else { 1119 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); 1120 desc->ptr[1].j_extent = 0; 1121 } 1122 1123 /* cipher iv */ 1124 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); 1125 desc->ptr[2].len = cpu_to_be16(ivsize); 1126 desc->ptr[2].j_extent = 0; 1127 1128 /* cipher key */ 1129 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, 1130 (char *)&ctx->key + ctx->authkeylen, 1131 DMA_TO_DEVICE); 1132 1133 /* 1134 * cipher in 1135 * map and adjust cipher len to aead request cryptlen. 1136 * extent is bytes of HMAC postpended to ciphertext, 1137 * typically 12 for ipsec 1138 */ 1139 desc->ptr[4].len = cpu_to_be16(cryptlen); 1140 desc->ptr[4].j_extent = authsize; 1141 1142 sg_link_tbl_len = cryptlen; 1143 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) 1144 sg_link_tbl_len += authsize; 1145 1146 if (sg_count == 1) { 1147 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) + 1148 areq->assoclen, 0); 1149 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count, 1150 areq->assoclen, sg_link_tbl_len, 1151 &edesc->link_tbl[tbl_off])) > 1152 1) { 1153 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1154 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + 1155 tbl_off * 1156 sizeof(struct talitos_ptr), 0); 1157 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1158 edesc->dma_len, 1159 DMA_BIDIRECTIONAL); 1160 tbl_off += ret; 1161 } else { 1162 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0); 1163 } 1164 1165 /* cipher out */ 1166 desc->ptr[5].len = cpu_to_be16(cryptlen); 1167 desc->ptr[5].j_extent = authsize; 1168 1169 if (areq->src != areq->dst) 1170 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1, 1171 DMA_FROM_DEVICE); 1172 1173 edesc->icv_ool = false; 1174 1175 if (sg_count == 1) { 1176 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) + 1177 areq->assoclen, 0); 1178 } else if ((sg_count = 1179 sg_to_link_tbl_offset(areq->dst, sg_count, 1180 areq->assoclen, cryptlen, 1181 &edesc->link_tbl[tbl_off])) > 1) { 1182 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1183 1184 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + 1185 tbl_off * sizeof(struct talitos_ptr), 0); 1186 1187 /* Add an entry to the link table for ICV data */ 1188 tbl_ptr += sg_count - 1; 1189 tbl_ptr->j_extent = 0; 1190 tbl_ptr++; 1191 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 1192 tbl_ptr->len = cpu_to_be16(authsize); 1193 1194 /* icv data follows link tables */ 1195 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + 1196 (edesc->src_nents + edesc->dst_nents + 1197 2) * sizeof(struct talitos_ptr) + 1198 authsize, 0); 1199 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 1200 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1201 edesc->dma_len, DMA_BIDIRECTIONAL); 1202 1203 edesc->icv_ool = true; 1204 } else { 1205 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0); 1206 } 1207 1208 /* iv out */ 1209 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1210 DMA_FROM_DEVICE); 1211 1212 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1213 if (ret != -EINPROGRESS) { 1214 ipsec_esp_unmap(dev, edesc, areq); 1215 kfree(edesc); 1216 } 1217 return ret; 1218 } 1219 1220 /* 1221 * allocate and map the extended descriptor 1222 */ 1223 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, 1224 struct scatterlist *src, 1225 struct scatterlist *dst, 1226 u8 *iv, 1227 unsigned int assoclen, 1228 unsigned int cryptlen, 1229 unsigned int authsize, 1230 unsigned int ivsize, 1231 int icv_stashing, 1232 u32 cryptoflags, 1233 bool encrypt) 1234 { 1235 struct talitos_edesc *edesc; 1236 int src_nents, dst_nents, alloc_len, dma_len; 1237 dma_addr_t iv_dma = 0; 1238 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1239 GFP_ATOMIC; 1240 struct talitos_private *priv = dev_get_drvdata(dev); 1241 bool is_sec1 = has_ftr_sec1(priv); 1242 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1243 void *err; 1244 1245 if (cryptlen + authsize > max_len) { 1246 dev_err(dev, "length exceeds h/w max limit\n"); 1247 return ERR_PTR(-EINVAL); 1248 } 1249 1250 if (ivsize) 1251 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1252 1253 if (!dst || dst == src) { 1254 src_nents = sg_nents_for_len(src, 1255 assoclen + cryptlen + authsize); 1256 if (src_nents < 0) { 1257 dev_err(dev, "Invalid number of src SG.\n"); 1258 err = ERR_PTR(-EINVAL); 1259 goto error_sg; 1260 } 1261 src_nents = (src_nents == 1) ? 0 : src_nents; 1262 dst_nents = dst ? src_nents : 0; 1263 } else { /* dst && dst != src*/ 1264 src_nents = sg_nents_for_len(src, assoclen + cryptlen + 1265 (encrypt ? 0 : authsize)); 1266 if (src_nents < 0) { 1267 dev_err(dev, "Invalid number of src SG.\n"); 1268 err = ERR_PTR(-EINVAL); 1269 goto error_sg; 1270 } 1271 src_nents = (src_nents == 1) ? 0 : src_nents; 1272 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen + 1273 (encrypt ? authsize : 0)); 1274 if (dst_nents < 0) { 1275 dev_err(dev, "Invalid number of dst SG.\n"); 1276 err = ERR_PTR(-EINVAL); 1277 goto error_sg; 1278 } 1279 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1280 } 1281 1282 /* 1283 * allocate space for base edesc plus the link tables, 1284 * allowing for two separate entries for AD and generated ICV (+ 2), 1285 * and space for two sets of ICVs (stashed and generated) 1286 */ 1287 alloc_len = sizeof(struct talitos_edesc); 1288 if (src_nents || dst_nents) { 1289 if (is_sec1) 1290 dma_len = (src_nents ? cryptlen : 0) + 1291 (dst_nents ? cryptlen : 0); 1292 else 1293 dma_len = (src_nents + dst_nents + 2) * 1294 sizeof(struct talitos_ptr) + authsize * 2; 1295 alloc_len += dma_len; 1296 } else { 1297 dma_len = 0; 1298 alloc_len += icv_stashing ? authsize : 0; 1299 } 1300 1301 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1302 if (!edesc) { 1303 dev_err(dev, "could not allocate edescriptor\n"); 1304 err = ERR_PTR(-ENOMEM); 1305 goto error_sg; 1306 } 1307 1308 edesc->src_nents = src_nents; 1309 edesc->dst_nents = dst_nents; 1310 edesc->iv_dma = iv_dma; 1311 edesc->dma_len = dma_len; 1312 if (dma_len) 1313 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], 1314 edesc->dma_len, 1315 DMA_BIDIRECTIONAL); 1316 1317 return edesc; 1318 error_sg: 1319 if (iv_dma) 1320 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 1321 return err; 1322 } 1323 1324 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1325 int icv_stashing, bool encrypt) 1326 { 1327 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1328 unsigned int authsize = crypto_aead_authsize(authenc); 1329 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1330 unsigned int ivsize = crypto_aead_ivsize(authenc); 1331 1332 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1333 iv, areq->assoclen, areq->cryptlen, 1334 authsize, ivsize, icv_stashing, 1335 areq->base.flags, encrypt); 1336 } 1337 1338 static int aead_encrypt(struct aead_request *req) 1339 { 1340 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1341 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1342 struct talitos_edesc *edesc; 1343 1344 /* allocate extended descriptor */ 1345 edesc = aead_edesc_alloc(req, req->iv, 0, true); 1346 if (IS_ERR(edesc)) 1347 return PTR_ERR(edesc); 1348 1349 /* set encrypt */ 1350 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1351 1352 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); 1353 } 1354 1355 static int aead_decrypt(struct aead_request *req) 1356 { 1357 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1358 unsigned int authsize = crypto_aead_authsize(authenc); 1359 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1360 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1361 struct talitos_edesc *edesc; 1362 struct scatterlist *sg; 1363 void *icvdata; 1364 1365 req->cryptlen -= authsize; 1366 1367 /* allocate extended descriptor */ 1368 edesc = aead_edesc_alloc(req, req->iv, 1, false); 1369 if (IS_ERR(edesc)) 1370 return PTR_ERR(edesc); 1371 1372 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1373 ((!edesc->src_nents && !edesc->dst_nents) || 1374 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { 1375 1376 /* decrypt and check the ICV */ 1377 edesc->desc.hdr = ctx->desc_hdr_template | 1378 DESC_HDR_DIR_INBOUND | 1379 DESC_HDR_MODE1_MDEU_CICV; 1380 1381 /* reset integrity check result bits */ 1382 edesc->desc.hdr_lo = 0; 1383 1384 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); 1385 } 1386 1387 /* Have to check the ICV with software */ 1388 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1389 1390 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1391 if (edesc->dma_len) 1392 icvdata = (char *)&edesc->link_tbl[edesc->src_nents + 1393 edesc->dst_nents + 2]; 1394 else 1395 icvdata = &edesc->link_tbl[0]; 1396 1397 sg = sg_last(req->src, edesc->src_nents ? : 1); 1398 1399 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize); 1400 1401 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done); 1402 } 1403 1404 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, 1405 const u8 *key, unsigned int keylen) 1406 { 1407 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1408 1409 memcpy(&ctx->key, key, keylen); 1410 ctx->keylen = keylen; 1411 1412 return 0; 1413 } 1414 1415 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, 1416 struct scatterlist *dst, unsigned int len, 1417 struct talitos_edesc *edesc) 1418 { 1419 struct talitos_private *priv = dev_get_drvdata(dev); 1420 bool is_sec1 = has_ftr_sec1(priv); 1421 1422 if (is_sec1) { 1423 if (!edesc->src_nents) { 1424 dma_unmap_sg(dev, src, 1, 1425 dst != src ? DMA_TO_DEVICE 1426 : DMA_BIDIRECTIONAL); 1427 } 1428 if (dst && edesc->dst_nents) { 1429 dma_sync_single_for_device(dev, 1430 edesc->dma_link_tbl + len, 1431 len, DMA_FROM_DEVICE); 1432 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1, 1433 edesc->buf + len, len); 1434 } else if (dst && dst != src) { 1435 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE); 1436 } 1437 } else { 1438 talitos_sg_unmap(dev, edesc, src, dst); 1439 } 1440 } 1441 1442 static void common_nonsnoop_unmap(struct device *dev, 1443 struct talitos_edesc *edesc, 1444 struct ablkcipher_request *areq) 1445 { 1446 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1447 1448 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc); 1449 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 1450 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); 1451 1452 if (edesc->dma_len) 1453 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1454 DMA_BIDIRECTIONAL); 1455 } 1456 1457 static void ablkcipher_done(struct device *dev, 1458 struct talitos_desc *desc, void *context, 1459 int err) 1460 { 1461 struct ablkcipher_request *areq = context; 1462 struct talitos_edesc *edesc; 1463 1464 edesc = container_of(desc, struct talitos_edesc, desc); 1465 1466 common_nonsnoop_unmap(dev, edesc, areq); 1467 1468 kfree(edesc); 1469 1470 areq->base.complete(&areq->base, err); 1471 } 1472 1473 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, 1474 unsigned int len, struct talitos_edesc *edesc, 1475 enum dma_data_direction dir, struct talitos_ptr *ptr) 1476 { 1477 int sg_count; 1478 struct talitos_private *priv = dev_get_drvdata(dev); 1479 bool is_sec1 = has_ftr_sec1(priv); 1480 1481 to_talitos_ptr_len(ptr, len, is_sec1); 1482 1483 if (is_sec1) { 1484 sg_count = edesc->src_nents ? : 1; 1485 1486 if (sg_count == 1) { 1487 dma_map_sg(dev, src, 1, dir); 1488 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); 1489 } else { 1490 sg_copy_to_buffer(src, sg_count, edesc->buf, len); 1491 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1); 1492 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1493 len, DMA_TO_DEVICE); 1494 } 1495 } else { 1496 to_talitos_ptr_extent_clear(ptr, is_sec1); 1497 1498 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir); 1499 1500 if (sg_count == 1) { 1501 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); 1502 } else { 1503 sg_count = sg_to_link_tbl(src, sg_count, len, 1504 &edesc->link_tbl[0]); 1505 if (sg_count > 1) { 1506 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); 1507 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; 1508 dma_sync_single_for_device(dev, 1509 edesc->dma_link_tbl, 1510 edesc->dma_len, 1511 DMA_BIDIRECTIONAL); 1512 } else { 1513 /* Only one segment now, so no link tbl needed*/ 1514 to_talitos_ptr(ptr, sg_dma_address(src), 1515 is_sec1); 1516 } 1517 } 1518 } 1519 return sg_count; 1520 } 1521 1522 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, 1523 unsigned int len, struct talitos_edesc *edesc, 1524 enum dma_data_direction dir, 1525 struct talitos_ptr *ptr, int sg_count) 1526 { 1527 struct talitos_private *priv = dev_get_drvdata(dev); 1528 bool is_sec1 = has_ftr_sec1(priv); 1529 1530 if (dir != DMA_NONE) 1531 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir); 1532 1533 to_talitos_ptr_len(ptr, len, is_sec1); 1534 1535 if (is_sec1) { 1536 if (sg_count == 1) { 1537 if (dir != DMA_NONE) 1538 dma_map_sg(dev, dst, 1, dir); 1539 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); 1540 } else { 1541 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1); 1542 dma_sync_single_for_device(dev, 1543 edesc->dma_link_tbl + len, 1544 len, DMA_FROM_DEVICE); 1545 } 1546 } else { 1547 to_talitos_ptr_extent_clear(ptr, is_sec1); 1548 1549 if (sg_count == 1) { 1550 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); 1551 } else { 1552 struct talitos_ptr *link_tbl_ptr = 1553 &edesc->link_tbl[edesc->src_nents + 1]; 1554 1555 to_talitos_ptr(ptr, edesc->dma_link_tbl + 1556 (edesc->src_nents + 1) * 1557 sizeof(struct talitos_ptr), 0); 1558 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; 1559 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); 1560 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1561 edesc->dma_len, 1562 DMA_BIDIRECTIONAL); 1563 } 1564 } 1565 } 1566 1567 static int common_nonsnoop(struct talitos_edesc *edesc, 1568 struct ablkcipher_request *areq, 1569 void (*callback) (struct device *dev, 1570 struct talitos_desc *desc, 1571 void *context, int error)) 1572 { 1573 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1574 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1575 struct device *dev = ctx->dev; 1576 struct talitos_desc *desc = &edesc->desc; 1577 unsigned int cryptlen = areq->nbytes; 1578 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1579 int sg_count, ret; 1580 struct talitos_private *priv = dev_get_drvdata(dev); 1581 bool is_sec1 = has_ftr_sec1(priv); 1582 1583 /* first DWORD empty */ 1584 desc->ptr[0] = zero_entry; 1585 1586 /* cipher iv */ 1587 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); 1588 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); 1589 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1); 1590 1591 /* cipher key */ 1592 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1593 (char *)&ctx->key, DMA_TO_DEVICE); 1594 1595 /* 1596 * cipher in 1597 */ 1598 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc, 1599 (areq->src == areq->dst) ? 1600 DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 1601 &desc->ptr[3]); 1602 1603 /* cipher out */ 1604 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc, 1605 (areq->src == areq->dst) ? DMA_NONE 1606 : DMA_FROM_DEVICE, 1607 &desc->ptr[4], sg_count); 1608 1609 /* iv out */ 1610 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 1611 DMA_FROM_DEVICE); 1612 1613 /* last DWORD empty */ 1614 desc->ptr[6] = zero_entry; 1615 1616 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1617 if (ret != -EINPROGRESS) { 1618 common_nonsnoop_unmap(dev, edesc, areq); 1619 kfree(edesc); 1620 } 1621 return ret; 1622 } 1623 1624 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * 1625 areq, bool encrypt) 1626 { 1627 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1628 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1629 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1630 1631 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1632 areq->info, 0, areq->nbytes, 0, ivsize, 0, 1633 areq->base.flags, encrypt); 1634 } 1635 1636 static int ablkcipher_encrypt(struct ablkcipher_request *areq) 1637 { 1638 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1639 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1640 struct talitos_edesc *edesc; 1641 1642 /* allocate extended descriptor */ 1643 edesc = ablkcipher_edesc_alloc(areq, true); 1644 if (IS_ERR(edesc)) 1645 return PTR_ERR(edesc); 1646 1647 /* set encrypt */ 1648 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1649 1650 return common_nonsnoop(edesc, areq, ablkcipher_done); 1651 } 1652 1653 static int ablkcipher_decrypt(struct ablkcipher_request *areq) 1654 { 1655 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1656 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1657 struct talitos_edesc *edesc; 1658 1659 /* allocate extended descriptor */ 1660 edesc = ablkcipher_edesc_alloc(areq, false); 1661 if (IS_ERR(edesc)) 1662 return PTR_ERR(edesc); 1663 1664 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1665 1666 return common_nonsnoop(edesc, areq, ablkcipher_done); 1667 } 1668 1669 static void common_nonsnoop_hash_unmap(struct device *dev, 1670 struct talitos_edesc *edesc, 1671 struct ahash_request *areq) 1672 { 1673 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1674 struct talitos_private *priv = dev_get_drvdata(dev); 1675 bool is_sec1 = has_ftr_sec1(priv); 1676 1677 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1678 1679 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); 1680 1681 /* When using hashctx-in, must unmap it. */ 1682 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) 1683 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], 1684 DMA_TO_DEVICE); 1685 1686 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1)) 1687 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], 1688 DMA_TO_DEVICE); 1689 1690 if (edesc->dma_len) 1691 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1692 DMA_BIDIRECTIONAL); 1693 1694 } 1695 1696 static void ahash_done(struct device *dev, 1697 struct talitos_desc *desc, void *context, 1698 int err) 1699 { 1700 struct ahash_request *areq = context; 1701 struct talitos_edesc *edesc = 1702 container_of(desc, struct talitos_edesc, desc); 1703 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1704 1705 if (!req_ctx->last && req_ctx->to_hash_later) { 1706 /* Position any partial block for next update/final/finup */ 1707 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); 1708 req_ctx->nbuf = req_ctx->to_hash_later; 1709 } 1710 common_nonsnoop_hash_unmap(dev, edesc, areq); 1711 1712 kfree(edesc); 1713 1714 areq->base.complete(&areq->base, err); 1715 } 1716 1717 /* 1718 * SEC1 doesn't like hashing of 0 sized message, so we do the padding 1719 * ourself and submit a padded block 1720 */ 1721 void talitos_handle_buggy_hash(struct talitos_ctx *ctx, 1722 struct talitos_edesc *edesc, 1723 struct talitos_ptr *ptr) 1724 { 1725 static u8 padded_hash[64] = { 1726 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1727 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1728 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1729 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1730 }; 1731 1732 pr_err_once("Bug in SEC1, padding ourself\n"); 1733 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD; 1734 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash), 1735 (char *)padded_hash, DMA_TO_DEVICE); 1736 } 1737 1738 static int common_nonsnoop_hash(struct talitos_edesc *edesc, 1739 struct ahash_request *areq, unsigned int length, 1740 void (*callback) (struct device *dev, 1741 struct talitos_desc *desc, 1742 void *context, int error)) 1743 { 1744 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1745 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1746 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1747 struct device *dev = ctx->dev; 1748 struct talitos_desc *desc = &edesc->desc; 1749 int ret; 1750 struct talitos_private *priv = dev_get_drvdata(dev); 1751 bool is_sec1 = has_ftr_sec1(priv); 1752 1753 /* first DWORD empty */ 1754 desc->ptr[0] = zero_entry; 1755 1756 /* hash context in */ 1757 if (!req_ctx->first || req_ctx->swinit) { 1758 map_single_talitos_ptr(dev, &desc->ptr[1], 1759 req_ctx->hw_context_size, 1760 (char *)req_ctx->hw_context, 1761 DMA_TO_DEVICE); 1762 req_ctx->swinit = 0; 1763 } else { 1764 desc->ptr[1] = zero_entry; 1765 /* Indicate next op is not the first. */ 1766 req_ctx->first = 0; 1767 } 1768 1769 /* HMAC key */ 1770 if (ctx->keylen) 1771 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1772 (char *)&ctx->key, DMA_TO_DEVICE); 1773 else 1774 desc->ptr[2] = zero_entry; 1775 1776 /* 1777 * data in 1778 */ 1779 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc, 1780 DMA_TO_DEVICE, &desc->ptr[3]); 1781 1782 /* fifth DWORD empty */ 1783 desc->ptr[4] = zero_entry; 1784 1785 /* hash/HMAC out -or- hash context out */ 1786 if (req_ctx->last) 1787 map_single_talitos_ptr(dev, &desc->ptr[5], 1788 crypto_ahash_digestsize(tfm), 1789 areq->result, DMA_FROM_DEVICE); 1790 else 1791 map_single_talitos_ptr(dev, &desc->ptr[5], 1792 req_ctx->hw_context_size, 1793 req_ctx->hw_context, DMA_FROM_DEVICE); 1794 1795 /* last DWORD empty */ 1796 desc->ptr[6] = zero_entry; 1797 1798 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) 1799 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); 1800 1801 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1802 if (ret != -EINPROGRESS) { 1803 common_nonsnoop_hash_unmap(dev, edesc, areq); 1804 kfree(edesc); 1805 } 1806 return ret; 1807 } 1808 1809 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, 1810 unsigned int nbytes) 1811 { 1812 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1813 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1814 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1815 1816 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0, 1817 nbytes, 0, 0, 0, areq->base.flags, false); 1818 } 1819 1820 static int ahash_init(struct ahash_request *areq) 1821 { 1822 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1823 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1824 1825 /* Initialize the context */ 1826 req_ctx->nbuf = 0; 1827 req_ctx->first = 1; /* first indicates h/w must init its context */ 1828 req_ctx->swinit = 0; /* assume h/w init of context */ 1829 req_ctx->hw_context_size = 1830 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 1831 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 1832 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 1833 1834 return 0; 1835 } 1836 1837 /* 1838 * on h/w without explicit sha224 support, we initialize h/w context 1839 * manually with sha224 constants, and tell it to run sha256. 1840 */ 1841 static int ahash_init_sha224_swinit(struct ahash_request *areq) 1842 { 1843 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1844 1845 ahash_init(areq); 1846 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ 1847 1848 req_ctx->hw_context[0] = SHA224_H0; 1849 req_ctx->hw_context[1] = SHA224_H1; 1850 req_ctx->hw_context[2] = SHA224_H2; 1851 req_ctx->hw_context[3] = SHA224_H3; 1852 req_ctx->hw_context[4] = SHA224_H4; 1853 req_ctx->hw_context[5] = SHA224_H5; 1854 req_ctx->hw_context[6] = SHA224_H6; 1855 req_ctx->hw_context[7] = SHA224_H7; 1856 1857 /* init 64-bit count */ 1858 req_ctx->hw_context[8] = 0; 1859 req_ctx->hw_context[9] = 0; 1860 1861 return 0; 1862 } 1863 1864 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) 1865 { 1866 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1867 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1868 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1869 struct talitos_edesc *edesc; 1870 unsigned int blocksize = 1871 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1872 unsigned int nbytes_to_hash; 1873 unsigned int to_hash_later; 1874 unsigned int nsg; 1875 int nents; 1876 1877 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { 1878 /* Buffer up to one whole block */ 1879 nents = sg_nents_for_len(areq->src, nbytes); 1880 if (nents < 0) { 1881 dev_err(ctx->dev, "Invalid number of src SG.\n"); 1882 return nents; 1883 } 1884 sg_copy_to_buffer(areq->src, nents, 1885 req_ctx->buf + req_ctx->nbuf, nbytes); 1886 req_ctx->nbuf += nbytes; 1887 return 0; 1888 } 1889 1890 /* At least (blocksize + 1) bytes are available to hash */ 1891 nbytes_to_hash = nbytes + req_ctx->nbuf; 1892 to_hash_later = nbytes_to_hash & (blocksize - 1); 1893 1894 if (req_ctx->last) 1895 to_hash_later = 0; 1896 else if (to_hash_later) 1897 /* There is a partial block. Hash the full block(s) now */ 1898 nbytes_to_hash -= to_hash_later; 1899 else { 1900 /* Keep one block buffered */ 1901 nbytes_to_hash -= blocksize; 1902 to_hash_later = blocksize; 1903 } 1904 1905 /* Chain in any previously buffered data */ 1906 if (req_ctx->nbuf) { 1907 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; 1908 sg_init_table(req_ctx->bufsl, nsg); 1909 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); 1910 if (nsg > 1) 1911 sg_chain(req_ctx->bufsl, 2, areq->src); 1912 req_ctx->psrc = req_ctx->bufsl; 1913 } else 1914 req_ctx->psrc = areq->src; 1915 1916 if (to_hash_later) { 1917 nents = sg_nents_for_len(areq->src, nbytes); 1918 if (nents < 0) { 1919 dev_err(ctx->dev, "Invalid number of src SG.\n"); 1920 return nents; 1921 } 1922 sg_pcopy_to_buffer(areq->src, nents, 1923 req_ctx->bufnext, 1924 to_hash_later, 1925 nbytes - to_hash_later); 1926 } 1927 req_ctx->to_hash_later = to_hash_later; 1928 1929 /* Allocate extended descriptor */ 1930 edesc = ahash_edesc_alloc(areq, nbytes_to_hash); 1931 if (IS_ERR(edesc)) 1932 return PTR_ERR(edesc); 1933 1934 edesc->desc.hdr = ctx->desc_hdr_template; 1935 1936 /* On last one, request SEC to pad; otherwise continue */ 1937 if (req_ctx->last) 1938 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; 1939 else 1940 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; 1941 1942 /* request SEC to INIT hash. */ 1943 if (req_ctx->first && !req_ctx->swinit) 1944 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; 1945 1946 /* When the tfm context has a keylen, it's an HMAC. 1947 * A first or last (ie. not middle) descriptor must request HMAC. 1948 */ 1949 if (ctx->keylen && (req_ctx->first || req_ctx->last)) 1950 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; 1951 1952 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, 1953 ahash_done); 1954 } 1955 1956 static int ahash_update(struct ahash_request *areq) 1957 { 1958 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1959 1960 req_ctx->last = 0; 1961 1962 return ahash_process_req(areq, areq->nbytes); 1963 } 1964 1965 static int ahash_final(struct ahash_request *areq) 1966 { 1967 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1968 1969 req_ctx->last = 1; 1970 1971 return ahash_process_req(areq, 0); 1972 } 1973 1974 static int ahash_finup(struct ahash_request *areq) 1975 { 1976 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1977 1978 req_ctx->last = 1; 1979 1980 return ahash_process_req(areq, areq->nbytes); 1981 } 1982 1983 static int ahash_digest(struct ahash_request *areq) 1984 { 1985 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1986 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 1987 1988 ahash->init(areq); 1989 req_ctx->last = 1; 1990 1991 return ahash_process_req(areq, areq->nbytes); 1992 } 1993 1994 static int ahash_export(struct ahash_request *areq, void *out) 1995 { 1996 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1997 struct talitos_export_state *export = out; 1998 1999 memcpy(export->hw_context, req_ctx->hw_context, 2000 req_ctx->hw_context_size); 2001 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf); 2002 export->swinit = req_ctx->swinit; 2003 export->first = req_ctx->first; 2004 export->last = req_ctx->last; 2005 export->to_hash_later = req_ctx->to_hash_later; 2006 export->nbuf = req_ctx->nbuf; 2007 2008 return 0; 2009 } 2010 2011 static int ahash_import(struct ahash_request *areq, const void *in) 2012 { 2013 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2014 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 2015 const struct talitos_export_state *export = in; 2016 2017 memset(req_ctx, 0, sizeof(*req_ctx)); 2018 req_ctx->hw_context_size = 2019 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 2020 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 2021 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 2022 memcpy(req_ctx->hw_context, export->hw_context, 2023 req_ctx->hw_context_size); 2024 memcpy(req_ctx->buf, export->buf, export->nbuf); 2025 req_ctx->swinit = export->swinit; 2026 req_ctx->first = export->first; 2027 req_ctx->last = export->last; 2028 req_ctx->to_hash_later = export->to_hash_later; 2029 req_ctx->nbuf = export->nbuf; 2030 2031 return 0; 2032 } 2033 2034 struct keyhash_result { 2035 struct completion completion; 2036 int err; 2037 }; 2038 2039 static void keyhash_complete(struct crypto_async_request *req, int err) 2040 { 2041 struct keyhash_result *res = req->data; 2042 2043 if (err == -EINPROGRESS) 2044 return; 2045 2046 res->err = err; 2047 complete(&res->completion); 2048 } 2049 2050 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, 2051 u8 *hash) 2052 { 2053 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2054 2055 struct scatterlist sg[1]; 2056 struct ahash_request *req; 2057 struct keyhash_result hresult; 2058 int ret; 2059 2060 init_completion(&hresult.completion); 2061 2062 req = ahash_request_alloc(tfm, GFP_KERNEL); 2063 if (!req) 2064 return -ENOMEM; 2065 2066 /* Keep tfm keylen == 0 during hash of the long key */ 2067 ctx->keylen = 0; 2068 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2069 keyhash_complete, &hresult); 2070 2071 sg_init_one(&sg[0], key, keylen); 2072 2073 ahash_request_set_crypt(req, sg, hash, keylen); 2074 ret = crypto_ahash_digest(req); 2075 switch (ret) { 2076 case 0: 2077 break; 2078 case -EINPROGRESS: 2079 case -EBUSY: 2080 ret = wait_for_completion_interruptible( 2081 &hresult.completion); 2082 if (!ret) 2083 ret = hresult.err; 2084 break; 2085 default: 2086 break; 2087 } 2088 ahash_request_free(req); 2089 2090 return ret; 2091 } 2092 2093 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 2094 unsigned int keylen) 2095 { 2096 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2097 unsigned int blocksize = 2098 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2099 unsigned int digestsize = crypto_ahash_digestsize(tfm); 2100 unsigned int keysize = keylen; 2101 u8 hash[SHA512_DIGEST_SIZE]; 2102 int ret; 2103 2104 if (keylen <= blocksize) 2105 memcpy(ctx->key, key, keysize); 2106 else { 2107 /* Must get the hash of the long key */ 2108 ret = keyhash(tfm, key, keylen, hash); 2109 2110 if (ret) { 2111 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 2112 return -EINVAL; 2113 } 2114 2115 keysize = digestsize; 2116 memcpy(ctx->key, hash, digestsize); 2117 } 2118 2119 ctx->keylen = keysize; 2120 2121 return 0; 2122 } 2123 2124 2125 struct talitos_alg_template { 2126 u32 type; 2127 union { 2128 struct crypto_alg crypto; 2129 struct ahash_alg hash; 2130 struct aead_alg aead; 2131 } alg; 2132 __be32 desc_hdr_template; 2133 }; 2134 2135 static struct talitos_alg_template driver_algs[] = { 2136 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ 2137 { .type = CRYPTO_ALG_TYPE_AEAD, 2138 .alg.aead = { 2139 .base = { 2140 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2141 .cra_driver_name = "authenc-hmac-sha1-" 2142 "cbc-aes-talitos", 2143 .cra_blocksize = AES_BLOCK_SIZE, 2144 .cra_flags = CRYPTO_ALG_ASYNC, 2145 }, 2146 .ivsize = AES_BLOCK_SIZE, 2147 .maxauthsize = SHA1_DIGEST_SIZE, 2148 }, 2149 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2150 DESC_HDR_SEL0_AESU | 2151 DESC_HDR_MODE0_AESU_CBC | 2152 DESC_HDR_SEL1_MDEUA | 2153 DESC_HDR_MODE1_MDEU_INIT | 2154 DESC_HDR_MODE1_MDEU_PAD | 2155 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2156 }, 2157 { .type = CRYPTO_ALG_TYPE_AEAD, 2158 .alg.aead = { 2159 .base = { 2160 .cra_name = "authenc(hmac(sha1)," 2161 "cbc(des3_ede))", 2162 .cra_driver_name = "authenc-hmac-sha1-" 2163 "cbc-3des-talitos", 2164 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2165 .cra_flags = CRYPTO_ALG_ASYNC, 2166 }, 2167 .ivsize = DES3_EDE_BLOCK_SIZE, 2168 .maxauthsize = SHA1_DIGEST_SIZE, 2169 }, 2170 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2171 DESC_HDR_SEL0_DEU | 2172 DESC_HDR_MODE0_DEU_CBC | 2173 DESC_HDR_MODE0_DEU_3DES | 2174 DESC_HDR_SEL1_MDEUA | 2175 DESC_HDR_MODE1_MDEU_INIT | 2176 DESC_HDR_MODE1_MDEU_PAD | 2177 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2178 }, 2179 { .type = CRYPTO_ALG_TYPE_AEAD, 2180 .alg.aead = { 2181 .base = { 2182 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2183 .cra_driver_name = "authenc-hmac-sha224-" 2184 "cbc-aes-talitos", 2185 .cra_blocksize = AES_BLOCK_SIZE, 2186 .cra_flags = CRYPTO_ALG_ASYNC, 2187 }, 2188 .ivsize = AES_BLOCK_SIZE, 2189 .maxauthsize = SHA224_DIGEST_SIZE, 2190 }, 2191 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2192 DESC_HDR_SEL0_AESU | 2193 DESC_HDR_MODE0_AESU_CBC | 2194 DESC_HDR_SEL1_MDEUA | 2195 DESC_HDR_MODE1_MDEU_INIT | 2196 DESC_HDR_MODE1_MDEU_PAD | 2197 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2198 }, 2199 { .type = CRYPTO_ALG_TYPE_AEAD, 2200 .alg.aead = { 2201 .base = { 2202 .cra_name = "authenc(hmac(sha224)," 2203 "cbc(des3_ede))", 2204 .cra_driver_name = "authenc-hmac-sha224-" 2205 "cbc-3des-talitos", 2206 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2207 .cra_flags = CRYPTO_ALG_ASYNC, 2208 }, 2209 .ivsize = DES3_EDE_BLOCK_SIZE, 2210 .maxauthsize = SHA224_DIGEST_SIZE, 2211 }, 2212 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2213 DESC_HDR_SEL0_DEU | 2214 DESC_HDR_MODE0_DEU_CBC | 2215 DESC_HDR_MODE0_DEU_3DES | 2216 DESC_HDR_SEL1_MDEUA | 2217 DESC_HDR_MODE1_MDEU_INIT | 2218 DESC_HDR_MODE1_MDEU_PAD | 2219 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2220 }, 2221 { .type = CRYPTO_ALG_TYPE_AEAD, 2222 .alg.aead = { 2223 .base = { 2224 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2225 .cra_driver_name = "authenc-hmac-sha256-" 2226 "cbc-aes-talitos", 2227 .cra_blocksize = AES_BLOCK_SIZE, 2228 .cra_flags = CRYPTO_ALG_ASYNC, 2229 }, 2230 .ivsize = AES_BLOCK_SIZE, 2231 .maxauthsize = SHA256_DIGEST_SIZE, 2232 }, 2233 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2234 DESC_HDR_SEL0_AESU | 2235 DESC_HDR_MODE0_AESU_CBC | 2236 DESC_HDR_SEL1_MDEUA | 2237 DESC_HDR_MODE1_MDEU_INIT | 2238 DESC_HDR_MODE1_MDEU_PAD | 2239 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2240 }, 2241 { .type = CRYPTO_ALG_TYPE_AEAD, 2242 .alg.aead = { 2243 .base = { 2244 .cra_name = "authenc(hmac(sha256)," 2245 "cbc(des3_ede))", 2246 .cra_driver_name = "authenc-hmac-sha256-" 2247 "cbc-3des-talitos", 2248 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2249 .cra_flags = CRYPTO_ALG_ASYNC, 2250 }, 2251 .ivsize = DES3_EDE_BLOCK_SIZE, 2252 .maxauthsize = SHA256_DIGEST_SIZE, 2253 }, 2254 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2255 DESC_HDR_SEL0_DEU | 2256 DESC_HDR_MODE0_DEU_CBC | 2257 DESC_HDR_MODE0_DEU_3DES | 2258 DESC_HDR_SEL1_MDEUA | 2259 DESC_HDR_MODE1_MDEU_INIT | 2260 DESC_HDR_MODE1_MDEU_PAD | 2261 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2262 }, 2263 { .type = CRYPTO_ALG_TYPE_AEAD, 2264 .alg.aead = { 2265 .base = { 2266 .cra_name = "authenc(hmac(sha384),cbc(aes))", 2267 .cra_driver_name = "authenc-hmac-sha384-" 2268 "cbc-aes-talitos", 2269 .cra_blocksize = AES_BLOCK_SIZE, 2270 .cra_flags = CRYPTO_ALG_ASYNC, 2271 }, 2272 .ivsize = AES_BLOCK_SIZE, 2273 .maxauthsize = SHA384_DIGEST_SIZE, 2274 }, 2275 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2276 DESC_HDR_SEL0_AESU | 2277 DESC_HDR_MODE0_AESU_CBC | 2278 DESC_HDR_SEL1_MDEUB | 2279 DESC_HDR_MODE1_MDEU_INIT | 2280 DESC_HDR_MODE1_MDEU_PAD | 2281 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2282 }, 2283 { .type = CRYPTO_ALG_TYPE_AEAD, 2284 .alg.aead = { 2285 .base = { 2286 .cra_name = "authenc(hmac(sha384)," 2287 "cbc(des3_ede))", 2288 .cra_driver_name = "authenc-hmac-sha384-" 2289 "cbc-3des-talitos", 2290 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2291 .cra_flags = CRYPTO_ALG_ASYNC, 2292 }, 2293 .ivsize = DES3_EDE_BLOCK_SIZE, 2294 .maxauthsize = SHA384_DIGEST_SIZE, 2295 }, 2296 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2297 DESC_HDR_SEL0_DEU | 2298 DESC_HDR_MODE0_DEU_CBC | 2299 DESC_HDR_MODE0_DEU_3DES | 2300 DESC_HDR_SEL1_MDEUB | 2301 DESC_HDR_MODE1_MDEU_INIT | 2302 DESC_HDR_MODE1_MDEU_PAD | 2303 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2304 }, 2305 { .type = CRYPTO_ALG_TYPE_AEAD, 2306 .alg.aead = { 2307 .base = { 2308 .cra_name = "authenc(hmac(sha512),cbc(aes))", 2309 .cra_driver_name = "authenc-hmac-sha512-" 2310 "cbc-aes-talitos", 2311 .cra_blocksize = AES_BLOCK_SIZE, 2312 .cra_flags = CRYPTO_ALG_ASYNC, 2313 }, 2314 .ivsize = AES_BLOCK_SIZE, 2315 .maxauthsize = SHA512_DIGEST_SIZE, 2316 }, 2317 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2318 DESC_HDR_SEL0_AESU | 2319 DESC_HDR_MODE0_AESU_CBC | 2320 DESC_HDR_SEL1_MDEUB | 2321 DESC_HDR_MODE1_MDEU_INIT | 2322 DESC_HDR_MODE1_MDEU_PAD | 2323 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2324 }, 2325 { .type = CRYPTO_ALG_TYPE_AEAD, 2326 .alg.aead = { 2327 .base = { 2328 .cra_name = "authenc(hmac(sha512)," 2329 "cbc(des3_ede))", 2330 .cra_driver_name = "authenc-hmac-sha512-" 2331 "cbc-3des-talitos", 2332 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2333 .cra_flags = CRYPTO_ALG_ASYNC, 2334 }, 2335 .ivsize = DES3_EDE_BLOCK_SIZE, 2336 .maxauthsize = SHA512_DIGEST_SIZE, 2337 }, 2338 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2339 DESC_HDR_SEL0_DEU | 2340 DESC_HDR_MODE0_DEU_CBC | 2341 DESC_HDR_MODE0_DEU_3DES | 2342 DESC_HDR_SEL1_MDEUB | 2343 DESC_HDR_MODE1_MDEU_INIT | 2344 DESC_HDR_MODE1_MDEU_PAD | 2345 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2346 }, 2347 { .type = CRYPTO_ALG_TYPE_AEAD, 2348 .alg.aead = { 2349 .base = { 2350 .cra_name = "authenc(hmac(md5),cbc(aes))", 2351 .cra_driver_name = "authenc-hmac-md5-" 2352 "cbc-aes-talitos", 2353 .cra_blocksize = AES_BLOCK_SIZE, 2354 .cra_flags = CRYPTO_ALG_ASYNC, 2355 }, 2356 .ivsize = AES_BLOCK_SIZE, 2357 .maxauthsize = MD5_DIGEST_SIZE, 2358 }, 2359 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2360 DESC_HDR_SEL0_AESU | 2361 DESC_HDR_MODE0_AESU_CBC | 2362 DESC_HDR_SEL1_MDEUA | 2363 DESC_HDR_MODE1_MDEU_INIT | 2364 DESC_HDR_MODE1_MDEU_PAD | 2365 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2366 }, 2367 { .type = CRYPTO_ALG_TYPE_AEAD, 2368 .alg.aead = { 2369 .base = { 2370 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2371 .cra_driver_name = "authenc-hmac-md5-" 2372 "cbc-3des-talitos", 2373 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2374 .cra_flags = CRYPTO_ALG_ASYNC, 2375 }, 2376 .ivsize = DES3_EDE_BLOCK_SIZE, 2377 .maxauthsize = MD5_DIGEST_SIZE, 2378 }, 2379 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2380 DESC_HDR_SEL0_DEU | 2381 DESC_HDR_MODE0_DEU_CBC | 2382 DESC_HDR_MODE0_DEU_3DES | 2383 DESC_HDR_SEL1_MDEUA | 2384 DESC_HDR_MODE1_MDEU_INIT | 2385 DESC_HDR_MODE1_MDEU_PAD | 2386 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2387 }, 2388 /* ABLKCIPHER algorithms. */ 2389 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2390 .alg.crypto = { 2391 .cra_name = "ecb(aes)", 2392 .cra_driver_name = "ecb-aes-talitos", 2393 .cra_blocksize = AES_BLOCK_SIZE, 2394 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2395 CRYPTO_ALG_ASYNC, 2396 .cra_ablkcipher = { 2397 .min_keysize = AES_MIN_KEY_SIZE, 2398 .max_keysize = AES_MAX_KEY_SIZE, 2399 .ivsize = AES_BLOCK_SIZE, 2400 } 2401 }, 2402 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2403 DESC_HDR_SEL0_AESU, 2404 }, 2405 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2406 .alg.crypto = { 2407 .cra_name = "cbc(aes)", 2408 .cra_driver_name = "cbc-aes-talitos", 2409 .cra_blocksize = AES_BLOCK_SIZE, 2410 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2411 CRYPTO_ALG_ASYNC, 2412 .cra_ablkcipher = { 2413 .min_keysize = AES_MIN_KEY_SIZE, 2414 .max_keysize = AES_MAX_KEY_SIZE, 2415 .ivsize = AES_BLOCK_SIZE, 2416 } 2417 }, 2418 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2419 DESC_HDR_SEL0_AESU | 2420 DESC_HDR_MODE0_AESU_CBC, 2421 }, 2422 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2423 .alg.crypto = { 2424 .cra_name = "ctr(aes)", 2425 .cra_driver_name = "ctr-aes-talitos", 2426 .cra_blocksize = AES_BLOCK_SIZE, 2427 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2428 CRYPTO_ALG_ASYNC, 2429 .cra_ablkcipher = { 2430 .min_keysize = AES_MIN_KEY_SIZE, 2431 .max_keysize = AES_MAX_KEY_SIZE, 2432 .ivsize = AES_BLOCK_SIZE, 2433 } 2434 }, 2435 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2436 DESC_HDR_SEL0_AESU | 2437 DESC_HDR_MODE0_AESU_CTR, 2438 }, 2439 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2440 .alg.crypto = { 2441 .cra_name = "ecb(des)", 2442 .cra_driver_name = "ecb-des-talitos", 2443 .cra_blocksize = DES_BLOCK_SIZE, 2444 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2445 CRYPTO_ALG_ASYNC, 2446 .cra_ablkcipher = { 2447 .min_keysize = DES_KEY_SIZE, 2448 .max_keysize = DES_KEY_SIZE, 2449 .ivsize = DES_BLOCK_SIZE, 2450 } 2451 }, 2452 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2453 DESC_HDR_SEL0_DEU, 2454 }, 2455 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2456 .alg.crypto = { 2457 .cra_name = "cbc(des)", 2458 .cra_driver_name = "cbc-des-talitos", 2459 .cra_blocksize = DES_BLOCK_SIZE, 2460 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2461 CRYPTO_ALG_ASYNC, 2462 .cra_ablkcipher = { 2463 .min_keysize = DES_KEY_SIZE, 2464 .max_keysize = DES_KEY_SIZE, 2465 .ivsize = DES_BLOCK_SIZE, 2466 } 2467 }, 2468 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2469 DESC_HDR_SEL0_DEU | 2470 DESC_HDR_MODE0_DEU_CBC, 2471 }, 2472 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2473 .alg.crypto = { 2474 .cra_name = "ecb(des3_ede)", 2475 .cra_driver_name = "ecb-3des-talitos", 2476 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2477 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2478 CRYPTO_ALG_ASYNC, 2479 .cra_ablkcipher = { 2480 .min_keysize = DES3_EDE_KEY_SIZE, 2481 .max_keysize = DES3_EDE_KEY_SIZE, 2482 .ivsize = DES3_EDE_BLOCK_SIZE, 2483 } 2484 }, 2485 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2486 DESC_HDR_SEL0_DEU | 2487 DESC_HDR_MODE0_DEU_3DES, 2488 }, 2489 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2490 .alg.crypto = { 2491 .cra_name = "cbc(des3_ede)", 2492 .cra_driver_name = "cbc-3des-talitos", 2493 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2494 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2495 CRYPTO_ALG_ASYNC, 2496 .cra_ablkcipher = { 2497 .min_keysize = DES3_EDE_KEY_SIZE, 2498 .max_keysize = DES3_EDE_KEY_SIZE, 2499 .ivsize = DES3_EDE_BLOCK_SIZE, 2500 } 2501 }, 2502 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2503 DESC_HDR_SEL0_DEU | 2504 DESC_HDR_MODE0_DEU_CBC | 2505 DESC_HDR_MODE0_DEU_3DES, 2506 }, 2507 /* AHASH algorithms. */ 2508 { .type = CRYPTO_ALG_TYPE_AHASH, 2509 .alg.hash = { 2510 .halg.digestsize = MD5_DIGEST_SIZE, 2511 .halg.statesize = sizeof(struct talitos_export_state), 2512 .halg.base = { 2513 .cra_name = "md5", 2514 .cra_driver_name = "md5-talitos", 2515 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2516 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2517 CRYPTO_ALG_ASYNC, 2518 } 2519 }, 2520 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2521 DESC_HDR_SEL0_MDEUA | 2522 DESC_HDR_MODE0_MDEU_MD5, 2523 }, 2524 { .type = CRYPTO_ALG_TYPE_AHASH, 2525 .alg.hash = { 2526 .halg.digestsize = SHA1_DIGEST_SIZE, 2527 .halg.statesize = sizeof(struct talitos_export_state), 2528 .halg.base = { 2529 .cra_name = "sha1", 2530 .cra_driver_name = "sha1-talitos", 2531 .cra_blocksize = SHA1_BLOCK_SIZE, 2532 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2533 CRYPTO_ALG_ASYNC, 2534 } 2535 }, 2536 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2537 DESC_HDR_SEL0_MDEUA | 2538 DESC_HDR_MODE0_MDEU_SHA1, 2539 }, 2540 { .type = CRYPTO_ALG_TYPE_AHASH, 2541 .alg.hash = { 2542 .halg.digestsize = SHA224_DIGEST_SIZE, 2543 .halg.statesize = sizeof(struct talitos_export_state), 2544 .halg.base = { 2545 .cra_name = "sha224", 2546 .cra_driver_name = "sha224-talitos", 2547 .cra_blocksize = SHA224_BLOCK_SIZE, 2548 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2549 CRYPTO_ALG_ASYNC, 2550 } 2551 }, 2552 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2553 DESC_HDR_SEL0_MDEUA | 2554 DESC_HDR_MODE0_MDEU_SHA224, 2555 }, 2556 { .type = CRYPTO_ALG_TYPE_AHASH, 2557 .alg.hash = { 2558 .halg.digestsize = SHA256_DIGEST_SIZE, 2559 .halg.statesize = sizeof(struct talitos_export_state), 2560 .halg.base = { 2561 .cra_name = "sha256", 2562 .cra_driver_name = "sha256-talitos", 2563 .cra_blocksize = SHA256_BLOCK_SIZE, 2564 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2565 CRYPTO_ALG_ASYNC, 2566 } 2567 }, 2568 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2569 DESC_HDR_SEL0_MDEUA | 2570 DESC_HDR_MODE0_MDEU_SHA256, 2571 }, 2572 { .type = CRYPTO_ALG_TYPE_AHASH, 2573 .alg.hash = { 2574 .halg.digestsize = SHA384_DIGEST_SIZE, 2575 .halg.statesize = sizeof(struct talitos_export_state), 2576 .halg.base = { 2577 .cra_name = "sha384", 2578 .cra_driver_name = "sha384-talitos", 2579 .cra_blocksize = SHA384_BLOCK_SIZE, 2580 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2581 CRYPTO_ALG_ASYNC, 2582 } 2583 }, 2584 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2585 DESC_HDR_SEL0_MDEUB | 2586 DESC_HDR_MODE0_MDEUB_SHA384, 2587 }, 2588 { .type = CRYPTO_ALG_TYPE_AHASH, 2589 .alg.hash = { 2590 .halg.digestsize = SHA512_DIGEST_SIZE, 2591 .halg.statesize = sizeof(struct talitos_export_state), 2592 .halg.base = { 2593 .cra_name = "sha512", 2594 .cra_driver_name = "sha512-talitos", 2595 .cra_blocksize = SHA512_BLOCK_SIZE, 2596 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2597 CRYPTO_ALG_ASYNC, 2598 } 2599 }, 2600 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2601 DESC_HDR_SEL0_MDEUB | 2602 DESC_HDR_MODE0_MDEUB_SHA512, 2603 }, 2604 { .type = CRYPTO_ALG_TYPE_AHASH, 2605 .alg.hash = { 2606 .halg.digestsize = MD5_DIGEST_SIZE, 2607 .halg.statesize = sizeof(struct talitos_export_state), 2608 .halg.base = { 2609 .cra_name = "hmac(md5)", 2610 .cra_driver_name = "hmac-md5-talitos", 2611 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2612 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2613 CRYPTO_ALG_ASYNC, 2614 } 2615 }, 2616 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2617 DESC_HDR_SEL0_MDEUA | 2618 DESC_HDR_MODE0_MDEU_MD5, 2619 }, 2620 { .type = CRYPTO_ALG_TYPE_AHASH, 2621 .alg.hash = { 2622 .halg.digestsize = SHA1_DIGEST_SIZE, 2623 .halg.statesize = sizeof(struct talitos_export_state), 2624 .halg.base = { 2625 .cra_name = "hmac(sha1)", 2626 .cra_driver_name = "hmac-sha1-talitos", 2627 .cra_blocksize = SHA1_BLOCK_SIZE, 2628 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2629 CRYPTO_ALG_ASYNC, 2630 } 2631 }, 2632 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2633 DESC_HDR_SEL0_MDEUA | 2634 DESC_HDR_MODE0_MDEU_SHA1, 2635 }, 2636 { .type = CRYPTO_ALG_TYPE_AHASH, 2637 .alg.hash = { 2638 .halg.digestsize = SHA224_DIGEST_SIZE, 2639 .halg.statesize = sizeof(struct talitos_export_state), 2640 .halg.base = { 2641 .cra_name = "hmac(sha224)", 2642 .cra_driver_name = "hmac-sha224-talitos", 2643 .cra_blocksize = SHA224_BLOCK_SIZE, 2644 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2645 CRYPTO_ALG_ASYNC, 2646 } 2647 }, 2648 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2649 DESC_HDR_SEL0_MDEUA | 2650 DESC_HDR_MODE0_MDEU_SHA224, 2651 }, 2652 { .type = CRYPTO_ALG_TYPE_AHASH, 2653 .alg.hash = { 2654 .halg.digestsize = SHA256_DIGEST_SIZE, 2655 .halg.statesize = sizeof(struct talitos_export_state), 2656 .halg.base = { 2657 .cra_name = "hmac(sha256)", 2658 .cra_driver_name = "hmac-sha256-talitos", 2659 .cra_blocksize = SHA256_BLOCK_SIZE, 2660 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2661 CRYPTO_ALG_ASYNC, 2662 } 2663 }, 2664 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2665 DESC_HDR_SEL0_MDEUA | 2666 DESC_HDR_MODE0_MDEU_SHA256, 2667 }, 2668 { .type = CRYPTO_ALG_TYPE_AHASH, 2669 .alg.hash = { 2670 .halg.digestsize = SHA384_DIGEST_SIZE, 2671 .halg.statesize = sizeof(struct talitos_export_state), 2672 .halg.base = { 2673 .cra_name = "hmac(sha384)", 2674 .cra_driver_name = "hmac-sha384-talitos", 2675 .cra_blocksize = SHA384_BLOCK_SIZE, 2676 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2677 CRYPTO_ALG_ASYNC, 2678 } 2679 }, 2680 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2681 DESC_HDR_SEL0_MDEUB | 2682 DESC_HDR_MODE0_MDEUB_SHA384, 2683 }, 2684 { .type = CRYPTO_ALG_TYPE_AHASH, 2685 .alg.hash = { 2686 .halg.digestsize = SHA512_DIGEST_SIZE, 2687 .halg.statesize = sizeof(struct talitos_export_state), 2688 .halg.base = { 2689 .cra_name = "hmac(sha512)", 2690 .cra_driver_name = "hmac-sha512-talitos", 2691 .cra_blocksize = SHA512_BLOCK_SIZE, 2692 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2693 CRYPTO_ALG_ASYNC, 2694 } 2695 }, 2696 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2697 DESC_HDR_SEL0_MDEUB | 2698 DESC_HDR_MODE0_MDEUB_SHA512, 2699 } 2700 }; 2701 2702 struct talitos_crypto_alg { 2703 struct list_head entry; 2704 struct device *dev; 2705 struct talitos_alg_template algt; 2706 }; 2707 2708 static int talitos_init_common(struct talitos_ctx *ctx, 2709 struct talitos_crypto_alg *talitos_alg) 2710 { 2711 struct talitos_private *priv; 2712 2713 /* update context with ptr to dev */ 2714 ctx->dev = talitos_alg->dev; 2715 2716 /* assign SEC channel to tfm in round-robin fashion */ 2717 priv = dev_get_drvdata(ctx->dev); 2718 ctx->ch = atomic_inc_return(&priv->last_chan) & 2719 (priv->num_channels - 1); 2720 2721 /* copy descriptor header template value */ 2722 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; 2723 2724 /* select done notification */ 2725 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY; 2726 2727 return 0; 2728 } 2729 2730 static int talitos_cra_init(struct crypto_tfm *tfm) 2731 { 2732 struct crypto_alg *alg = tfm->__crt_alg; 2733 struct talitos_crypto_alg *talitos_alg; 2734 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2735 2736 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) 2737 talitos_alg = container_of(__crypto_ahash_alg(alg), 2738 struct talitos_crypto_alg, 2739 algt.alg.hash); 2740 else 2741 talitos_alg = container_of(alg, struct talitos_crypto_alg, 2742 algt.alg.crypto); 2743 2744 return talitos_init_common(ctx, talitos_alg); 2745 } 2746 2747 static int talitos_cra_init_aead(struct crypto_aead *tfm) 2748 { 2749 struct aead_alg *alg = crypto_aead_alg(tfm); 2750 struct talitos_crypto_alg *talitos_alg; 2751 struct talitos_ctx *ctx = crypto_aead_ctx(tfm); 2752 2753 talitos_alg = container_of(alg, struct talitos_crypto_alg, 2754 algt.alg.aead); 2755 2756 return talitos_init_common(ctx, talitos_alg); 2757 } 2758 2759 static int talitos_cra_init_ahash(struct crypto_tfm *tfm) 2760 { 2761 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2762 2763 talitos_cra_init(tfm); 2764 2765 ctx->keylen = 0; 2766 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 2767 sizeof(struct talitos_ahash_req_ctx)); 2768 2769 return 0; 2770 } 2771 2772 /* 2773 * given the alg's descriptor header template, determine whether descriptor 2774 * type and primary/secondary execution units required match the hw 2775 * capabilities description provided in the device tree node. 2776 */ 2777 static int hw_supports(struct device *dev, __be32 desc_hdr_template) 2778 { 2779 struct talitos_private *priv = dev_get_drvdata(dev); 2780 int ret; 2781 2782 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) && 2783 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units); 2784 2785 if (SECONDARY_EU(desc_hdr_template)) 2786 ret = ret && (1 << SECONDARY_EU(desc_hdr_template) 2787 & priv->exec_units); 2788 2789 return ret; 2790 } 2791 2792 static int talitos_remove(struct platform_device *ofdev) 2793 { 2794 struct device *dev = &ofdev->dev; 2795 struct talitos_private *priv = dev_get_drvdata(dev); 2796 struct talitos_crypto_alg *t_alg, *n; 2797 int i; 2798 2799 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 2800 switch (t_alg->algt.type) { 2801 case CRYPTO_ALG_TYPE_ABLKCIPHER: 2802 break; 2803 case CRYPTO_ALG_TYPE_AEAD: 2804 crypto_unregister_aead(&t_alg->algt.alg.aead); 2805 case CRYPTO_ALG_TYPE_AHASH: 2806 crypto_unregister_ahash(&t_alg->algt.alg.hash); 2807 break; 2808 } 2809 list_del(&t_alg->entry); 2810 kfree(t_alg); 2811 } 2812 2813 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 2814 talitos_unregister_rng(dev); 2815 2816 for (i = 0; priv->chan && i < priv->num_channels; i++) 2817 kfree(priv->chan[i].fifo); 2818 2819 kfree(priv->chan); 2820 2821 for (i = 0; i < 2; i++) 2822 if (priv->irq[i]) { 2823 free_irq(priv->irq[i], dev); 2824 irq_dispose_mapping(priv->irq[i]); 2825 } 2826 2827 tasklet_kill(&priv->done_task[0]); 2828 if (priv->irq[1]) 2829 tasklet_kill(&priv->done_task[1]); 2830 2831 iounmap(priv->reg); 2832 2833 kfree(priv); 2834 2835 return 0; 2836 } 2837 2838 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, 2839 struct talitos_alg_template 2840 *template) 2841 { 2842 struct talitos_private *priv = dev_get_drvdata(dev); 2843 struct talitos_crypto_alg *t_alg; 2844 struct crypto_alg *alg; 2845 2846 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL); 2847 if (!t_alg) 2848 return ERR_PTR(-ENOMEM); 2849 2850 t_alg->algt = *template; 2851 2852 switch (t_alg->algt.type) { 2853 case CRYPTO_ALG_TYPE_ABLKCIPHER: 2854 alg = &t_alg->algt.alg.crypto; 2855 alg->cra_init = talitos_cra_init; 2856 alg->cra_type = &crypto_ablkcipher_type; 2857 alg->cra_ablkcipher.setkey = ablkcipher_setkey; 2858 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; 2859 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt; 2860 alg->cra_ablkcipher.geniv = "eseqiv"; 2861 break; 2862 case CRYPTO_ALG_TYPE_AEAD: 2863 alg = &t_alg->algt.alg.aead.base; 2864 t_alg->algt.alg.aead.init = talitos_cra_init_aead; 2865 t_alg->algt.alg.aead.setkey = aead_setkey; 2866 t_alg->algt.alg.aead.encrypt = aead_encrypt; 2867 t_alg->algt.alg.aead.decrypt = aead_decrypt; 2868 break; 2869 case CRYPTO_ALG_TYPE_AHASH: 2870 alg = &t_alg->algt.alg.hash.halg.base; 2871 alg->cra_init = talitos_cra_init_ahash; 2872 alg->cra_type = &crypto_ahash_type; 2873 t_alg->algt.alg.hash.init = ahash_init; 2874 t_alg->algt.alg.hash.update = ahash_update; 2875 t_alg->algt.alg.hash.final = ahash_final; 2876 t_alg->algt.alg.hash.finup = ahash_finup; 2877 t_alg->algt.alg.hash.digest = ahash_digest; 2878 t_alg->algt.alg.hash.setkey = ahash_setkey; 2879 t_alg->algt.alg.hash.import = ahash_import; 2880 t_alg->algt.alg.hash.export = ahash_export; 2881 2882 if (!(priv->features & TALITOS_FTR_HMAC_OK) && 2883 !strncmp(alg->cra_name, "hmac", 4)) { 2884 kfree(t_alg); 2885 return ERR_PTR(-ENOTSUPP); 2886 } 2887 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && 2888 (!strcmp(alg->cra_name, "sha224") || 2889 !strcmp(alg->cra_name, "hmac(sha224)"))) { 2890 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; 2891 t_alg->algt.desc_hdr_template = 2892 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2893 DESC_HDR_SEL0_MDEUA | 2894 DESC_HDR_MODE0_MDEU_SHA256; 2895 } 2896 break; 2897 default: 2898 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); 2899 kfree(t_alg); 2900 return ERR_PTR(-EINVAL); 2901 } 2902 2903 alg->cra_module = THIS_MODULE; 2904 alg->cra_priority = TALITOS_CRA_PRIORITY; 2905 alg->cra_alignmask = 0; 2906 alg->cra_ctxsize = sizeof(struct talitos_ctx); 2907 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; 2908 2909 t_alg->dev = dev; 2910 2911 return t_alg; 2912 } 2913 2914 static int talitos_probe_irq(struct platform_device *ofdev) 2915 { 2916 struct device *dev = &ofdev->dev; 2917 struct device_node *np = ofdev->dev.of_node; 2918 struct talitos_private *priv = dev_get_drvdata(dev); 2919 int err; 2920 bool is_sec1 = has_ftr_sec1(priv); 2921 2922 priv->irq[0] = irq_of_parse_and_map(np, 0); 2923 if (!priv->irq[0]) { 2924 dev_err(dev, "failed to map irq\n"); 2925 return -EINVAL; 2926 } 2927 if (is_sec1) { 2928 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0, 2929 dev_driver_string(dev), dev); 2930 goto primary_out; 2931 } 2932 2933 priv->irq[1] = irq_of_parse_and_map(np, 1); 2934 2935 /* get the primary irq line */ 2936 if (!priv->irq[1]) { 2937 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0, 2938 dev_driver_string(dev), dev); 2939 goto primary_out; 2940 } 2941 2942 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0, 2943 dev_driver_string(dev), dev); 2944 if (err) 2945 goto primary_out; 2946 2947 /* get the secondary irq line */ 2948 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0, 2949 dev_driver_string(dev), dev); 2950 if (err) { 2951 dev_err(dev, "failed to request secondary irq\n"); 2952 irq_dispose_mapping(priv->irq[1]); 2953 priv->irq[1] = 0; 2954 } 2955 2956 return err; 2957 2958 primary_out: 2959 if (err) { 2960 dev_err(dev, "failed to request primary irq\n"); 2961 irq_dispose_mapping(priv->irq[0]); 2962 priv->irq[0] = 0; 2963 } 2964 2965 return err; 2966 } 2967 2968 static int talitos_probe(struct platform_device *ofdev) 2969 { 2970 struct device *dev = &ofdev->dev; 2971 struct device_node *np = ofdev->dev.of_node; 2972 struct talitos_private *priv; 2973 const unsigned int *prop; 2974 int i, err; 2975 int stride; 2976 2977 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); 2978 if (!priv) 2979 return -ENOMEM; 2980 2981 INIT_LIST_HEAD(&priv->alg_list); 2982 2983 dev_set_drvdata(dev, priv); 2984 2985 priv->ofdev = ofdev; 2986 2987 spin_lock_init(&priv->reg_lock); 2988 2989 priv->reg = of_iomap(np, 0); 2990 if (!priv->reg) { 2991 dev_err(dev, "failed to of_iomap\n"); 2992 err = -ENOMEM; 2993 goto err_out; 2994 } 2995 2996 /* get SEC version capabilities from device tree */ 2997 prop = of_get_property(np, "fsl,num-channels", NULL); 2998 if (prop) 2999 priv->num_channels = *prop; 3000 3001 prop = of_get_property(np, "fsl,channel-fifo-len", NULL); 3002 if (prop) 3003 priv->chfifo_len = *prop; 3004 3005 prop = of_get_property(np, "fsl,exec-units-mask", NULL); 3006 if (prop) 3007 priv->exec_units = *prop; 3008 3009 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL); 3010 if (prop) 3011 priv->desc_types = *prop; 3012 3013 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || 3014 !priv->exec_units || !priv->desc_types) { 3015 dev_err(dev, "invalid property data in device tree node\n"); 3016 err = -EINVAL; 3017 goto err_out; 3018 } 3019 3020 if (of_device_is_compatible(np, "fsl,sec3.0")) 3021 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; 3022 3023 if (of_device_is_compatible(np, "fsl,sec2.1")) 3024 priv->features |= TALITOS_FTR_HW_AUTH_CHECK | 3025 TALITOS_FTR_SHA224_HWINIT | 3026 TALITOS_FTR_HMAC_OK; 3027 3028 if (of_device_is_compatible(np, "fsl,sec1.0")) 3029 priv->features |= TALITOS_FTR_SEC1; 3030 3031 if (of_device_is_compatible(np, "fsl,sec1.2")) { 3032 priv->reg_deu = priv->reg + TALITOS12_DEU; 3033 priv->reg_aesu = priv->reg + TALITOS12_AESU; 3034 priv->reg_mdeu = priv->reg + TALITOS12_MDEU; 3035 stride = TALITOS1_CH_STRIDE; 3036 } else if (of_device_is_compatible(np, "fsl,sec1.0")) { 3037 priv->reg_deu = priv->reg + TALITOS10_DEU; 3038 priv->reg_aesu = priv->reg + TALITOS10_AESU; 3039 priv->reg_mdeu = priv->reg + TALITOS10_MDEU; 3040 priv->reg_afeu = priv->reg + TALITOS10_AFEU; 3041 priv->reg_rngu = priv->reg + TALITOS10_RNGU; 3042 priv->reg_pkeu = priv->reg + TALITOS10_PKEU; 3043 stride = TALITOS1_CH_STRIDE; 3044 } else { 3045 priv->reg_deu = priv->reg + TALITOS2_DEU; 3046 priv->reg_aesu = priv->reg + TALITOS2_AESU; 3047 priv->reg_mdeu = priv->reg + TALITOS2_MDEU; 3048 priv->reg_afeu = priv->reg + TALITOS2_AFEU; 3049 priv->reg_rngu = priv->reg + TALITOS2_RNGU; 3050 priv->reg_pkeu = priv->reg + TALITOS2_PKEU; 3051 priv->reg_keu = priv->reg + TALITOS2_KEU; 3052 priv->reg_crcu = priv->reg + TALITOS2_CRCU; 3053 stride = TALITOS2_CH_STRIDE; 3054 } 3055 3056 err = talitos_probe_irq(ofdev); 3057 if (err) 3058 goto err_out; 3059 3060 if (of_device_is_compatible(np, "fsl,sec1.0")) { 3061 tasklet_init(&priv->done_task[0], talitos1_done_4ch, 3062 (unsigned long)dev); 3063 } else { 3064 if (!priv->irq[1]) { 3065 tasklet_init(&priv->done_task[0], talitos2_done_4ch, 3066 (unsigned long)dev); 3067 } else { 3068 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, 3069 (unsigned long)dev); 3070 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, 3071 (unsigned long)dev); 3072 } 3073 } 3074 3075 priv->chan = kzalloc(sizeof(struct talitos_channel) * 3076 priv->num_channels, GFP_KERNEL); 3077 if (!priv->chan) { 3078 dev_err(dev, "failed to allocate channel management space\n"); 3079 err = -ENOMEM; 3080 goto err_out; 3081 } 3082 3083 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 3084 3085 for (i = 0; i < priv->num_channels; i++) { 3086 priv->chan[i].reg = priv->reg + stride * (i + 1); 3087 if (!priv->irq[1] || !(i & 1)) 3088 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; 3089 3090 spin_lock_init(&priv->chan[i].head_lock); 3091 spin_lock_init(&priv->chan[i].tail_lock); 3092 3093 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) * 3094 priv->fifo_len, GFP_KERNEL); 3095 if (!priv->chan[i].fifo) { 3096 dev_err(dev, "failed to allocate request fifo %d\n", i); 3097 err = -ENOMEM; 3098 goto err_out; 3099 } 3100 3101 atomic_set(&priv->chan[i].submit_count, 3102 -(priv->chfifo_len - 1)); 3103 } 3104 3105 dma_set_mask(dev, DMA_BIT_MASK(36)); 3106 3107 /* reset and initialize the h/w */ 3108 err = init_device(dev); 3109 if (err) { 3110 dev_err(dev, "failed to initialize device\n"); 3111 goto err_out; 3112 } 3113 3114 /* register the RNG, if available */ 3115 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) { 3116 err = talitos_register_rng(dev); 3117 if (err) { 3118 dev_err(dev, "failed to register hwrng: %d\n", err); 3119 goto err_out; 3120 } else 3121 dev_info(dev, "hwrng\n"); 3122 } 3123 3124 /* register crypto algorithms the device supports */ 3125 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 3126 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 3127 struct talitos_crypto_alg *t_alg; 3128 struct crypto_alg *alg = NULL; 3129 3130 t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 3131 if (IS_ERR(t_alg)) { 3132 err = PTR_ERR(t_alg); 3133 if (err == -ENOTSUPP) 3134 continue; 3135 goto err_out; 3136 } 3137 3138 switch (t_alg->algt.type) { 3139 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3140 err = crypto_register_alg( 3141 &t_alg->algt.alg.crypto); 3142 alg = &t_alg->algt.alg.crypto; 3143 break; 3144 3145 case CRYPTO_ALG_TYPE_AEAD: 3146 err = crypto_register_aead( 3147 &t_alg->algt.alg.aead); 3148 alg = &t_alg->algt.alg.aead.base; 3149 break; 3150 3151 case CRYPTO_ALG_TYPE_AHASH: 3152 err = crypto_register_ahash( 3153 &t_alg->algt.alg.hash); 3154 alg = &t_alg->algt.alg.hash.halg.base; 3155 break; 3156 } 3157 if (err) { 3158 dev_err(dev, "%s alg registration failed\n", 3159 alg->cra_driver_name); 3160 kfree(t_alg); 3161 } else 3162 list_add_tail(&t_alg->entry, &priv->alg_list); 3163 } 3164 } 3165 if (!list_empty(&priv->alg_list)) 3166 dev_info(dev, "%s algorithms registered in /proc/crypto\n", 3167 (char *)of_get_property(np, "compatible", NULL)); 3168 3169 return 0; 3170 3171 err_out: 3172 talitos_remove(ofdev); 3173 3174 return err; 3175 } 3176 3177 static const struct of_device_id talitos_match[] = { 3178 #ifdef CONFIG_CRYPTO_DEV_TALITOS1 3179 { 3180 .compatible = "fsl,sec1.0", 3181 }, 3182 #endif 3183 #ifdef CONFIG_CRYPTO_DEV_TALITOS2 3184 { 3185 .compatible = "fsl,sec2.0", 3186 }, 3187 #endif 3188 {}, 3189 }; 3190 MODULE_DEVICE_TABLE(of, talitos_match); 3191 3192 static struct platform_driver talitos_driver = { 3193 .driver = { 3194 .name = "talitos", 3195 .of_match_table = talitos_match, 3196 }, 3197 .probe = talitos_probe, 3198 .remove = talitos_remove, 3199 }; 3200 3201 module_platform_driver(talitos_driver); 3202 3203 MODULE_LICENSE("GPL"); 3204 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>"); 3205 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver"); 3206