1 /* 2 * talitos - Freescale Integrated Security Engine (SEC) device driver 3 * 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc. 5 * 6 * Scatterlist Crypto API glue code copied from files with the following: 7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * Crypto algorithm registration code copied from hifn driver: 10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 11 * All rights reserved. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/mod_devicetable.h> 31 #include <linux/device.h> 32 #include <linux/interrupt.h> 33 #include <linux/crypto.h> 34 #include <linux/hw_random.h> 35 #include <linux/of_address.h> 36 #include <linux/of_irq.h> 37 #include <linux/of_platform.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/io.h> 40 #include <linux/spinlock.h> 41 #include <linux/rtnetlink.h> 42 #include <linux/slab.h> 43 44 #include <crypto/algapi.h> 45 #include <crypto/aes.h> 46 #include <crypto/des.h> 47 #include <crypto/sha.h> 48 #include <crypto/md5.h> 49 #include <crypto/internal/aead.h> 50 #include <crypto/authenc.h> 51 #include <crypto/skcipher.h> 52 #include <crypto/hash.h> 53 #include <crypto/internal/hash.h> 54 #include <crypto/scatterwalk.h> 55 56 #include "talitos.h" 57 58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, 59 bool is_sec1) 60 { 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 62 if (!is_sec1) 63 ptr->eptr = upper_32_bits(dma_addr); 64 } 65 66 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, 67 bool is_sec1) 68 { 69 if (is_sec1) { 70 ptr->res = 0; 71 ptr->len1 = cpu_to_be16(len); 72 } else { 73 ptr->len = cpu_to_be16(len); 74 } 75 } 76 77 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, 78 bool is_sec1) 79 { 80 if (is_sec1) 81 return be16_to_cpu(ptr->len1); 82 else 83 return be16_to_cpu(ptr->len); 84 } 85 86 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) 87 { 88 if (!is_sec1) 89 ptr->j_extent = 0; 90 } 91 92 /* 93 * map virtual single (contiguous) pointer to h/w descriptor pointer 94 */ 95 static void map_single_talitos_ptr(struct device *dev, 96 struct talitos_ptr *ptr, 97 unsigned int len, void *data, 98 enum dma_data_direction dir) 99 { 100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); 101 struct talitos_private *priv = dev_get_drvdata(dev); 102 bool is_sec1 = has_ftr_sec1(priv); 103 104 to_talitos_ptr_len(ptr, len, is_sec1); 105 to_talitos_ptr(ptr, dma_addr, is_sec1); 106 to_talitos_ptr_extent_clear(ptr, is_sec1); 107 } 108 109 /* 110 * unmap bus single (contiguous) h/w descriptor pointer 111 */ 112 static void unmap_single_talitos_ptr(struct device *dev, 113 struct talitos_ptr *ptr, 114 enum dma_data_direction dir) 115 { 116 struct talitos_private *priv = dev_get_drvdata(dev); 117 bool is_sec1 = has_ftr_sec1(priv); 118 119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr), 120 from_talitos_ptr_len(ptr, is_sec1), dir); 121 } 122 123 static int reset_channel(struct device *dev, int ch) 124 { 125 struct talitos_private *priv = dev_get_drvdata(dev); 126 unsigned int timeout = TALITOS_TIMEOUT; 127 bool is_sec1 = has_ftr_sec1(priv); 128 129 if (is_sec1) { 130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 131 TALITOS1_CCCR_LO_RESET); 132 133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) & 134 TALITOS1_CCCR_LO_RESET) && --timeout) 135 cpu_relax(); 136 } else { 137 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 138 TALITOS2_CCCR_RESET); 139 140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 141 TALITOS2_CCCR_RESET) && --timeout) 142 cpu_relax(); 143 } 144 145 if (timeout == 0) { 146 dev_err(dev, "failed to reset channel %d\n", ch); 147 return -EIO; 148 } 149 150 /* set 36-bit addressing, done writeback enable and done IRQ enable */ 151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE | 152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); 153 154 /* and ICCR writeback, if available */ 155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 157 TALITOS_CCCR_LO_IWSE); 158 159 return 0; 160 } 161 162 static int reset_device(struct device *dev) 163 { 164 struct talitos_private *priv = dev_get_drvdata(dev); 165 unsigned int timeout = TALITOS_TIMEOUT; 166 bool is_sec1 = has_ftr_sec1(priv); 167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR; 168 169 setbits32(priv->reg + TALITOS_MCR, mcr); 170 171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr) 172 && --timeout) 173 cpu_relax(); 174 175 if (priv->irq[1]) { 176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3; 177 setbits32(priv->reg + TALITOS_MCR, mcr); 178 } 179 180 if (timeout == 0) { 181 dev_err(dev, "failed to reset device\n"); 182 return -EIO; 183 } 184 185 return 0; 186 } 187 188 /* 189 * Reset and initialize the device 190 */ 191 static int init_device(struct device *dev) 192 { 193 struct talitos_private *priv = dev_get_drvdata(dev); 194 int ch, err; 195 bool is_sec1 = has_ftr_sec1(priv); 196 197 /* 198 * Master reset 199 * errata documentation: warning: certain SEC interrupts 200 * are not fully cleared by writing the MCR:SWR bit, 201 * set bit twice to completely reset 202 */ 203 err = reset_device(dev); 204 if (err) 205 return err; 206 207 err = reset_device(dev); 208 if (err) 209 return err; 210 211 /* reset channels */ 212 for (ch = 0; ch < priv->num_channels; ch++) { 213 err = reset_channel(dev, ch); 214 if (err) 215 return err; 216 } 217 218 /* enable channel done and error interrupts */ 219 if (is_sec1) { 220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT); 221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); 222 /* disable parity error check in DEU (erroneous? test vect.) */ 223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE); 224 } else { 225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT); 226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); 227 } 228 229 /* disable integrity check error interrupts (use writeback instead) */ 230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO, 232 TALITOS_MDEUICR_LO_ICE); 233 234 return 0; 235 } 236 237 /** 238 * talitos_submit - submits a descriptor to the device for processing 239 * @dev: the SEC device to be used 240 * @ch: the SEC device channel to be used 241 * @desc: the descriptor to be processed by the device 242 * @callback: whom to call when processing is complete 243 * @context: a handle for use by caller (optional) 244 * 245 * desc must contain valid dma-mapped (bus physical) address pointers. 246 * callback must check err and feedback in descriptor header 247 * for device processing status. 248 */ 249 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 250 void (*callback)(struct device *dev, 251 struct talitos_desc *desc, 252 void *context, int error), 253 void *context) 254 { 255 struct talitos_private *priv = dev_get_drvdata(dev); 256 struct talitos_request *request; 257 unsigned long flags; 258 int head; 259 bool is_sec1 = has_ftr_sec1(priv); 260 261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags); 262 263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { 264 /* h/w fifo is full */ 265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 266 return -EAGAIN; 267 } 268 269 head = priv->chan[ch].head; 270 request = &priv->chan[ch].fifo[head]; 271 272 /* map descriptor and save caller data */ 273 if (is_sec1) { 274 desc->hdr1 = desc->hdr; 275 desc->next_desc = 0; 276 request->dma_desc = dma_map_single(dev, &desc->hdr1, 277 TALITOS_DESC_SIZE, 278 DMA_BIDIRECTIONAL); 279 } else { 280 request->dma_desc = dma_map_single(dev, desc, 281 TALITOS_DESC_SIZE, 282 DMA_BIDIRECTIONAL); 283 } 284 request->callback = callback; 285 request->context = context; 286 287 /* increment fifo head */ 288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); 289 290 smp_wmb(); 291 request->desc = desc; 292 293 /* GO! */ 294 wmb(); 295 out_be32(priv->chan[ch].reg + TALITOS_FF, 296 upper_32_bits(request->dma_desc)); 297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO, 298 lower_32_bits(request->dma_desc)); 299 300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 301 302 return -EINPROGRESS; 303 } 304 EXPORT_SYMBOL(talitos_submit); 305 306 /* 307 * process what was done, notify callback of error if not 308 */ 309 static void flush_channel(struct device *dev, int ch, int error, int reset_ch) 310 { 311 struct talitos_private *priv = dev_get_drvdata(dev); 312 struct talitos_request *request, saved_req; 313 unsigned long flags; 314 int tail, status; 315 bool is_sec1 = has_ftr_sec1(priv); 316 317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 318 319 tail = priv->chan[ch].tail; 320 while (priv->chan[ch].fifo[tail].desc) { 321 __be32 hdr; 322 323 request = &priv->chan[ch].fifo[tail]; 324 325 /* descriptors with their done bits set don't get the error */ 326 rmb(); 327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr; 328 329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 330 status = 0; 331 else 332 if (!error) 333 break; 334 else 335 status = error; 336 337 dma_unmap_single(dev, request->dma_desc, 338 TALITOS_DESC_SIZE, 339 DMA_BIDIRECTIONAL); 340 341 /* copy entries so we can call callback outside lock */ 342 saved_req.desc = request->desc; 343 saved_req.callback = request->callback; 344 saved_req.context = request->context; 345 346 /* release request entry in fifo */ 347 smp_wmb(); 348 request->desc = NULL; 349 350 /* increment fifo tail */ 351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1); 352 353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 354 355 atomic_dec(&priv->chan[ch].submit_count); 356 357 saved_req.callback(dev, saved_req.desc, saved_req.context, 358 status); 359 /* channel may resume processing in single desc error case */ 360 if (error && !reset_ch && status == error) 361 return; 362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 363 tail = priv->chan[ch].tail; 364 } 365 366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 367 } 368 369 /* 370 * process completed requests for channels that have done status 371 */ 372 #define DEF_TALITOS1_DONE(name, ch_done_mask) \ 373 static void talitos1_done_##name(unsigned long data) \ 374 { \ 375 struct device *dev = (struct device *)data; \ 376 struct talitos_private *priv = dev_get_drvdata(dev); \ 377 unsigned long flags; \ 378 \ 379 if (ch_done_mask & 0x10000000) \ 380 flush_channel(dev, 0, 0, 0); \ 381 if (priv->num_channels == 1) \ 382 goto out; \ 383 if (ch_done_mask & 0x40000000) \ 384 flush_channel(dev, 1, 0, 0); \ 385 if (ch_done_mask & 0x00010000) \ 386 flush_channel(dev, 2, 0, 0); \ 387 if (ch_done_mask & 0x00040000) \ 388 flush_channel(dev, 3, 0, 0); \ 389 \ 390 out: \ 391 /* At this point, all completed channels have been processed */ \ 392 /* Unmask done interrupts for channels completed later on. */ \ 393 spin_lock_irqsave(&priv->reg_lock, flags); \ 394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \ 396 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 397 } 398 399 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) 400 401 #define DEF_TALITOS2_DONE(name, ch_done_mask) \ 402 static void talitos2_done_##name(unsigned long data) \ 403 { \ 404 struct device *dev = (struct device *)data; \ 405 struct talitos_private *priv = dev_get_drvdata(dev); \ 406 unsigned long flags; \ 407 \ 408 if (ch_done_mask & 1) \ 409 flush_channel(dev, 0, 0, 0); \ 410 if (priv->num_channels == 1) \ 411 goto out; \ 412 if (ch_done_mask & (1 << 2)) \ 413 flush_channel(dev, 1, 0, 0); \ 414 if (ch_done_mask & (1 << 4)) \ 415 flush_channel(dev, 2, 0, 0); \ 416 if (ch_done_mask & (1 << 6)) \ 417 flush_channel(dev, 3, 0, 0); \ 418 \ 419 out: \ 420 /* At this point, all completed channels have been processed */ \ 421 /* Unmask done interrupts for channels completed later on. */ \ 422 spin_lock_irqsave(&priv->reg_lock, flags); \ 423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \ 425 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 426 } 427 428 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) 429 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) 430 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) 431 432 /* 433 * locate current (offending) descriptor 434 */ 435 static u32 current_desc_hdr(struct device *dev, int ch) 436 { 437 struct talitos_private *priv = dev_get_drvdata(dev); 438 int tail, iter; 439 dma_addr_t cur_desc; 440 441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32; 442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO); 443 444 if (!cur_desc) { 445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n"); 446 return 0; 447 } 448 449 tail = priv->chan[ch].tail; 450 451 iter = tail; 452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) { 453 iter = (iter + 1) & (priv->fifo_len - 1); 454 if (iter == tail) { 455 dev_err(dev, "couldn't locate current descriptor\n"); 456 return 0; 457 } 458 } 459 460 return priv->chan[ch].fifo[iter].desc->hdr; 461 } 462 463 /* 464 * user diagnostics; report root cause of error based on execution unit status 465 */ 466 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) 467 { 468 struct talitos_private *priv = dev_get_drvdata(dev); 469 int i; 470 471 if (!desc_hdr) 472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); 473 474 switch (desc_hdr & DESC_HDR_SEL0_MASK) { 475 case DESC_HDR_SEL0_AFEU: 476 dev_err(dev, "AFEUISR 0x%08x_%08x\n", 477 in_be32(priv->reg_afeu + TALITOS_EUISR), 478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO)); 479 break; 480 case DESC_HDR_SEL0_DEU: 481 dev_err(dev, "DEUISR 0x%08x_%08x\n", 482 in_be32(priv->reg_deu + TALITOS_EUISR), 483 in_be32(priv->reg_deu + TALITOS_EUISR_LO)); 484 break; 485 case DESC_HDR_SEL0_MDEUA: 486 case DESC_HDR_SEL0_MDEUB: 487 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 488 in_be32(priv->reg_mdeu + TALITOS_EUISR), 489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 490 break; 491 case DESC_HDR_SEL0_RNG: 492 dev_err(dev, "RNGUISR 0x%08x_%08x\n", 493 in_be32(priv->reg_rngu + TALITOS_ISR), 494 in_be32(priv->reg_rngu + TALITOS_ISR_LO)); 495 break; 496 case DESC_HDR_SEL0_PKEU: 497 dev_err(dev, "PKEUISR 0x%08x_%08x\n", 498 in_be32(priv->reg_pkeu + TALITOS_EUISR), 499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 500 break; 501 case DESC_HDR_SEL0_AESU: 502 dev_err(dev, "AESUISR 0x%08x_%08x\n", 503 in_be32(priv->reg_aesu + TALITOS_EUISR), 504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO)); 505 break; 506 case DESC_HDR_SEL0_CRCU: 507 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 508 in_be32(priv->reg_crcu + TALITOS_EUISR), 509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 510 break; 511 case DESC_HDR_SEL0_KEU: 512 dev_err(dev, "KEUISR 0x%08x_%08x\n", 513 in_be32(priv->reg_pkeu + TALITOS_EUISR), 514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 515 break; 516 } 517 518 switch (desc_hdr & DESC_HDR_SEL1_MASK) { 519 case DESC_HDR_SEL1_MDEUA: 520 case DESC_HDR_SEL1_MDEUB: 521 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 522 in_be32(priv->reg_mdeu + TALITOS_EUISR), 523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 524 break; 525 case DESC_HDR_SEL1_CRCU: 526 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 527 in_be32(priv->reg_crcu + TALITOS_EUISR), 528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 529 break; 530 } 531 532 for (i = 0; i < 8; i++) 533 dev_err(dev, "DESCBUF 0x%08x_%08x\n", 534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i), 535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i)); 536 } 537 538 /* 539 * recover from error interrupts 540 */ 541 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) 542 { 543 struct talitos_private *priv = dev_get_drvdata(dev); 544 unsigned int timeout = TALITOS_TIMEOUT; 545 int ch, error, reset_dev = 0; 546 u32 v_lo; 547 bool is_sec1 = has_ftr_sec1(priv); 548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ 549 550 for (ch = 0; ch < priv->num_channels; ch++) { 551 /* skip channels without errors */ 552 if (is_sec1) { 553 /* bits 29, 31, 17, 19 */ 554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6)))) 555 continue; 556 } else { 557 if (!(isr & (1 << (ch * 2 + 1)))) 558 continue; 559 } 560 561 error = -EINVAL; 562 563 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); 564 565 if (v_lo & TALITOS_CCPSR_LO_DOF) { 566 dev_err(dev, "double fetch fifo overflow error\n"); 567 error = -EAGAIN; 568 reset_ch = 1; 569 } 570 if (v_lo & TALITOS_CCPSR_LO_SOF) { 571 /* h/w dropped descriptor */ 572 dev_err(dev, "single fetch fifo overflow error\n"); 573 error = -EAGAIN; 574 } 575 if (v_lo & TALITOS_CCPSR_LO_MDTE) 576 dev_err(dev, "master data transfer error\n"); 577 if (v_lo & TALITOS_CCPSR_LO_SGDLZ) 578 dev_err(dev, is_sec1 ? "pointeur not complete error\n" 579 : "s/g data length zero error\n"); 580 if (v_lo & TALITOS_CCPSR_LO_FPZ) 581 dev_err(dev, is_sec1 ? "parity error\n" 582 : "fetch pointer zero error\n"); 583 if (v_lo & TALITOS_CCPSR_LO_IDH) 584 dev_err(dev, "illegal descriptor header error\n"); 585 if (v_lo & TALITOS_CCPSR_LO_IEU) 586 dev_err(dev, is_sec1 ? "static assignment error\n" 587 : "invalid exec unit error\n"); 588 if (v_lo & TALITOS_CCPSR_LO_EU) 589 report_eu_error(dev, ch, current_desc_hdr(dev, ch)); 590 if (!is_sec1) { 591 if (v_lo & TALITOS_CCPSR_LO_GB) 592 dev_err(dev, "gather boundary error\n"); 593 if (v_lo & TALITOS_CCPSR_LO_GRL) 594 dev_err(dev, "gather return/length error\n"); 595 if (v_lo & TALITOS_CCPSR_LO_SB) 596 dev_err(dev, "scatter boundary error\n"); 597 if (v_lo & TALITOS_CCPSR_LO_SRL) 598 dev_err(dev, "scatter return/length error\n"); 599 } 600 601 flush_channel(dev, ch, error, reset_ch); 602 603 if (reset_ch) { 604 reset_channel(dev, ch); 605 } else { 606 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 607 TALITOS2_CCCR_CONT); 608 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); 609 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 610 TALITOS2_CCCR_CONT) && --timeout) 611 cpu_relax(); 612 if (timeout == 0) { 613 dev_err(dev, "failed to restart channel %d\n", 614 ch); 615 reset_dev = 1; 616 } 617 } 618 } 619 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) || 620 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) { 621 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR)) 622 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n", 623 isr, isr_lo); 624 else 625 dev_err(dev, "done overflow, internal time out, or " 626 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo); 627 628 /* purge request queues */ 629 for (ch = 0; ch < priv->num_channels; ch++) 630 flush_channel(dev, ch, -EIO, 1); 631 632 /* reset and reinitialize the device */ 633 init_device(dev); 634 } 635 } 636 637 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 638 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \ 639 { \ 640 struct device *dev = data; \ 641 struct talitos_private *priv = dev_get_drvdata(dev); \ 642 u32 isr, isr_lo; \ 643 unsigned long flags; \ 644 \ 645 spin_lock_irqsave(&priv->reg_lock, flags); \ 646 isr = in_be32(priv->reg + TALITOS_ISR); \ 647 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 648 /* Acknowledge interrupt */ \ 649 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 650 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 651 \ 652 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \ 653 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 654 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 655 } \ 656 else { \ 657 if (likely(isr & ch_done_mask)) { \ 658 /* mask further done interrupts. */ \ 659 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 660 /* done_task will unmask done interrupts at exit */ \ 661 tasklet_schedule(&priv->done_task[tlet]); \ 662 } \ 663 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 664 } \ 665 \ 666 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 667 IRQ_NONE; \ 668 } 669 670 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0) 671 672 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 673 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \ 674 { \ 675 struct device *dev = data; \ 676 struct talitos_private *priv = dev_get_drvdata(dev); \ 677 u32 isr, isr_lo; \ 678 unsigned long flags; \ 679 \ 680 spin_lock_irqsave(&priv->reg_lock, flags); \ 681 isr = in_be32(priv->reg + TALITOS_ISR); \ 682 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 683 /* Acknowledge interrupt */ \ 684 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 685 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 686 \ 687 if (unlikely(isr & ch_err_mask || isr_lo)) { \ 688 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 689 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 690 } \ 691 else { \ 692 if (likely(isr & ch_done_mask)) { \ 693 /* mask further done interrupts. */ \ 694 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 695 /* done_task will unmask done interrupts at exit */ \ 696 tasklet_schedule(&priv->done_task[tlet]); \ 697 } \ 698 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 699 } \ 700 \ 701 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 702 IRQ_NONE; \ 703 } 704 705 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0) 706 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR, 707 0) 708 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR, 709 1) 710 711 /* 712 * hwrng 713 */ 714 static int talitos_rng_data_present(struct hwrng *rng, int wait) 715 { 716 struct device *dev = (struct device *)rng->priv; 717 struct talitos_private *priv = dev_get_drvdata(dev); 718 u32 ofl; 719 int i; 720 721 for (i = 0; i < 20; i++) { 722 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) & 723 TALITOS_RNGUSR_LO_OFL; 724 if (ofl || !wait) 725 break; 726 udelay(10); 727 } 728 729 return !!ofl; 730 } 731 732 static int talitos_rng_data_read(struct hwrng *rng, u32 *data) 733 { 734 struct device *dev = (struct device *)rng->priv; 735 struct talitos_private *priv = dev_get_drvdata(dev); 736 737 /* rng fifo requires 64-bit accesses */ 738 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO); 739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO); 740 741 return sizeof(u32); 742 } 743 744 static int talitos_rng_init(struct hwrng *rng) 745 { 746 struct device *dev = (struct device *)rng->priv; 747 struct talitos_private *priv = dev_get_drvdata(dev); 748 unsigned int timeout = TALITOS_TIMEOUT; 749 750 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR); 751 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO) 752 & TALITOS_RNGUSR_LO_RD) 753 && --timeout) 754 cpu_relax(); 755 if (timeout == 0) { 756 dev_err(dev, "failed to reset rng hw\n"); 757 return -ENODEV; 758 } 759 760 /* start generating */ 761 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0); 762 763 return 0; 764 } 765 766 static int talitos_register_rng(struct device *dev) 767 { 768 struct talitos_private *priv = dev_get_drvdata(dev); 769 770 priv->rng.name = dev_driver_string(dev), 771 priv->rng.init = talitos_rng_init, 772 priv->rng.data_present = talitos_rng_data_present, 773 priv->rng.data_read = talitos_rng_data_read, 774 priv->rng.priv = (unsigned long)dev; 775 776 return hwrng_register(&priv->rng); 777 } 778 779 static void talitos_unregister_rng(struct device *dev) 780 { 781 struct talitos_private *priv = dev_get_drvdata(dev); 782 783 hwrng_unregister(&priv->rng); 784 } 785 786 /* 787 * crypto alg 788 */ 789 #define TALITOS_CRA_PRIORITY 3000 790 #define TALITOS_MAX_KEY_SIZE 96 791 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 792 793 struct talitos_ctx { 794 struct device *dev; 795 int ch; 796 __be32 desc_hdr_template; 797 u8 key[TALITOS_MAX_KEY_SIZE]; 798 u8 iv[TALITOS_MAX_IV_LENGTH]; 799 unsigned int keylen; 800 unsigned int enckeylen; 801 unsigned int authkeylen; 802 unsigned int authsize; 803 }; 804 805 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE 806 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 807 808 struct talitos_ahash_req_ctx { 809 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 810 unsigned int hw_context_size; 811 u8 buf[HASH_MAX_BLOCK_SIZE]; 812 u8 bufnext[HASH_MAX_BLOCK_SIZE]; 813 unsigned int swinit; 814 unsigned int first; 815 unsigned int last; 816 unsigned int to_hash_later; 817 unsigned int nbuf; 818 struct scatterlist bufsl[2]; 819 struct scatterlist *psrc; 820 }; 821 822 static int aead_setauthsize(struct crypto_aead *authenc, 823 unsigned int authsize) 824 { 825 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 826 827 ctx->authsize = authsize; 828 829 return 0; 830 } 831 832 static int aead_setkey(struct crypto_aead *authenc, 833 const u8 *key, unsigned int keylen) 834 { 835 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 836 struct crypto_authenc_keys keys; 837 838 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 839 goto badkey; 840 841 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 842 goto badkey; 843 844 memcpy(ctx->key, keys.authkey, keys.authkeylen); 845 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); 846 847 ctx->keylen = keys.authkeylen + keys.enckeylen; 848 ctx->enckeylen = keys.enckeylen; 849 ctx->authkeylen = keys.authkeylen; 850 851 return 0; 852 853 badkey: 854 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 855 return -EINVAL; 856 } 857 858 /* 859 * talitos_edesc - s/w-extended descriptor 860 * @assoc_nents: number of segments in associated data scatterlist 861 * @src_nents: number of segments in input scatterlist 862 * @dst_nents: number of segments in output scatterlist 863 * @assoc_chained: whether assoc is chained or not 864 * @src_chained: whether src is chained or not 865 * @dst_chained: whether dst is chained or not 866 * @iv_dma: dma address of iv for checking continuity and link table 867 * @dma_len: length of dma mapped link_tbl space 868 * @dma_link_tbl: bus physical address of link_tbl/buf 869 * @desc: h/w descriptor 870 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) 871 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) 872 * 873 * if decrypting (with authcheck), or either one of src_nents or dst_nents 874 * is greater than 1, an integrity check value is concatenated to the end 875 * of link_tbl data 876 */ 877 struct talitos_edesc { 878 int assoc_nents; 879 int src_nents; 880 int dst_nents; 881 bool assoc_chained; 882 bool src_chained; 883 bool dst_chained; 884 dma_addr_t iv_dma; 885 int dma_len; 886 dma_addr_t dma_link_tbl; 887 struct talitos_desc desc; 888 union { 889 struct talitos_ptr link_tbl[0]; 890 u8 buf[0]; 891 }; 892 }; 893 894 static int talitos_map_sg(struct device *dev, struct scatterlist *sg, 895 unsigned int nents, enum dma_data_direction dir, 896 bool chained) 897 { 898 if (unlikely(chained)) 899 while (sg) { 900 dma_map_sg(dev, sg, 1, dir); 901 sg = sg_next(sg); 902 } 903 else 904 dma_map_sg(dev, sg, nents, dir); 905 return nents; 906 } 907 908 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, 909 enum dma_data_direction dir) 910 { 911 while (sg) { 912 dma_unmap_sg(dev, sg, 1, dir); 913 sg = sg_next(sg); 914 } 915 } 916 917 static void talitos_sg_unmap(struct device *dev, 918 struct talitos_edesc *edesc, 919 struct scatterlist *src, 920 struct scatterlist *dst) 921 { 922 unsigned int src_nents = edesc->src_nents ? : 1; 923 unsigned int dst_nents = edesc->dst_nents ? : 1; 924 925 if (src != dst) { 926 if (edesc->src_chained) 927 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); 928 else 929 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 930 931 if (dst) { 932 if (edesc->dst_chained) 933 talitos_unmap_sg_chain(dev, dst, 934 DMA_FROM_DEVICE); 935 else 936 dma_unmap_sg(dev, dst, dst_nents, 937 DMA_FROM_DEVICE); 938 } 939 } else 940 if (edesc->src_chained) 941 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); 942 else 943 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 944 } 945 946 static void ipsec_esp_unmap(struct device *dev, 947 struct talitos_edesc *edesc, 948 struct aead_request *areq) 949 { 950 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); 951 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); 952 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 953 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); 954 955 if (edesc->assoc_chained) 956 talitos_unmap_sg_chain(dev, areq->assoc, DMA_TO_DEVICE); 957 else if (areq->assoclen) 958 /* assoc_nents counts also for IV in non-contiguous cases */ 959 dma_unmap_sg(dev, areq->assoc, 960 edesc->assoc_nents ? edesc->assoc_nents - 1 : 1, 961 DMA_TO_DEVICE); 962 963 talitos_sg_unmap(dev, edesc, areq->src, areq->dst); 964 965 if (edesc->dma_len) 966 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 967 DMA_BIDIRECTIONAL); 968 } 969 970 /* 971 * ipsec_esp descriptor callbacks 972 */ 973 static void ipsec_esp_encrypt_done(struct device *dev, 974 struct talitos_desc *desc, void *context, 975 int err) 976 { 977 struct aead_request *areq = context; 978 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 979 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 980 struct talitos_edesc *edesc; 981 struct scatterlist *sg; 982 void *icvdata; 983 984 edesc = container_of(desc, struct talitos_edesc, desc); 985 986 ipsec_esp_unmap(dev, edesc, areq); 987 988 /* copy the generated ICV to dst */ 989 if (edesc->dst_nents) { 990 icvdata = &edesc->link_tbl[edesc->src_nents + 991 edesc->dst_nents + 2 + 992 edesc->assoc_nents]; 993 sg = sg_last(areq->dst, edesc->dst_nents); 994 memcpy((char *)sg_virt(sg) + sg->length - ctx->authsize, 995 icvdata, ctx->authsize); 996 } 997 998 kfree(edesc); 999 1000 aead_request_complete(areq, err); 1001 } 1002 1003 static void ipsec_esp_decrypt_swauth_done(struct device *dev, 1004 struct talitos_desc *desc, 1005 void *context, int err) 1006 { 1007 struct aead_request *req = context; 1008 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1009 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1010 struct talitos_edesc *edesc; 1011 struct scatterlist *sg; 1012 void *icvdata; 1013 1014 edesc = container_of(desc, struct talitos_edesc, desc); 1015 1016 ipsec_esp_unmap(dev, edesc, req); 1017 1018 if (!err) { 1019 /* auth check */ 1020 if (edesc->dma_len) 1021 icvdata = &edesc->link_tbl[edesc->src_nents + 1022 edesc->dst_nents + 2 + 1023 edesc->assoc_nents]; 1024 else 1025 icvdata = &edesc->link_tbl[0]; 1026 1027 sg = sg_last(req->dst, edesc->dst_nents ? : 1); 1028 err = memcmp(icvdata, (char *)sg_virt(sg) + sg->length - 1029 ctx->authsize, ctx->authsize) ? -EBADMSG : 0; 1030 } 1031 1032 kfree(edesc); 1033 1034 aead_request_complete(req, err); 1035 } 1036 1037 static void ipsec_esp_decrypt_hwauth_done(struct device *dev, 1038 struct talitos_desc *desc, 1039 void *context, int err) 1040 { 1041 struct aead_request *req = context; 1042 struct talitos_edesc *edesc; 1043 1044 edesc = container_of(desc, struct talitos_edesc, desc); 1045 1046 ipsec_esp_unmap(dev, edesc, req); 1047 1048 /* check ICV auth status */ 1049 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != 1050 DESC_HDR_LO_ICCR1_PASS)) 1051 err = -EBADMSG; 1052 1053 kfree(edesc); 1054 1055 aead_request_complete(req, err); 1056 } 1057 1058 /* 1059 * convert scatterlist to SEC h/w link table format 1060 * stop at cryptlen bytes 1061 */ 1062 static int sg_to_link_tbl(struct scatterlist *sg, int sg_count, 1063 int cryptlen, struct talitos_ptr *link_tbl_ptr) 1064 { 1065 int n_sg = sg_count; 1066 1067 while (sg && n_sg--) { 1068 to_talitos_ptr(link_tbl_ptr, sg_dma_address(sg), 0); 1069 link_tbl_ptr->len = cpu_to_be16(sg_dma_len(sg)); 1070 link_tbl_ptr->j_extent = 0; 1071 link_tbl_ptr++; 1072 cryptlen -= sg_dma_len(sg); 1073 sg = sg_next(sg); 1074 } 1075 1076 /* adjust (decrease) last one (or two) entry's len to cryptlen */ 1077 link_tbl_ptr--; 1078 while (be16_to_cpu(link_tbl_ptr->len) <= (-cryptlen)) { 1079 /* Empty this entry, and move to previous one */ 1080 cryptlen += be16_to_cpu(link_tbl_ptr->len); 1081 link_tbl_ptr->len = 0; 1082 sg_count--; 1083 link_tbl_ptr--; 1084 } 1085 link_tbl_ptr->len = cpu_to_be16(be16_to_cpu(link_tbl_ptr->len) 1086 + cryptlen); 1087 1088 /* tag end of link table */ 1089 link_tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 1090 1091 return sg_count; 1092 } 1093 1094 /* 1095 * fill in and submit ipsec_esp descriptor 1096 */ 1097 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, 1098 u64 seq, void (*callback) (struct device *dev, 1099 struct talitos_desc *desc, 1100 void *context, int error)) 1101 { 1102 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 1103 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1104 struct device *dev = ctx->dev; 1105 struct talitos_desc *desc = &edesc->desc; 1106 unsigned int cryptlen = areq->cryptlen; 1107 unsigned int authsize = ctx->authsize; 1108 unsigned int ivsize = crypto_aead_ivsize(aead); 1109 int sg_count, ret; 1110 int sg_link_tbl_len; 1111 1112 /* hmac key */ 1113 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 1114 DMA_TO_DEVICE); 1115 1116 /* hmac data */ 1117 desc->ptr[1].len = cpu_to_be16(areq->assoclen + ivsize); 1118 if (edesc->assoc_nents) { 1119 int tbl_off = edesc->src_nents + edesc->dst_nents + 2; 1120 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1121 1122 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * 1123 sizeof(struct talitos_ptr), 0); 1124 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; 1125 1126 /* assoc_nents - 1 entries for assoc, 1 for IV */ 1127 sg_count = sg_to_link_tbl(areq->assoc, edesc->assoc_nents - 1, 1128 areq->assoclen, tbl_ptr); 1129 1130 /* add IV to link table */ 1131 tbl_ptr += sg_count - 1; 1132 tbl_ptr->j_extent = 0; 1133 tbl_ptr++; 1134 to_talitos_ptr(tbl_ptr, edesc->iv_dma, 0); 1135 tbl_ptr->len = cpu_to_be16(ivsize); 1136 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 1137 1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1139 edesc->dma_len, DMA_BIDIRECTIONAL); 1140 } else { 1141 if (areq->assoclen) 1142 to_talitos_ptr(&desc->ptr[1], 1143 sg_dma_address(areq->assoc), 0); 1144 else 1145 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, 0); 1146 desc->ptr[1].j_extent = 0; 1147 } 1148 1149 /* cipher iv */ 1150 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); 1151 desc->ptr[2].len = cpu_to_be16(ivsize); 1152 desc->ptr[2].j_extent = 0; 1153 /* Sync needed for the aead_givencrypt case */ 1154 dma_sync_single_for_device(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE); 1155 1156 /* cipher key */ 1157 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, 1158 (char *)&ctx->key + ctx->authkeylen, 1159 DMA_TO_DEVICE); 1160 1161 /* 1162 * cipher in 1163 * map and adjust cipher len to aead request cryptlen. 1164 * extent is bytes of HMAC postpended to ciphertext, 1165 * typically 12 for ipsec 1166 */ 1167 desc->ptr[4].len = cpu_to_be16(cryptlen); 1168 desc->ptr[4].j_extent = authsize; 1169 1170 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ? : 1, 1171 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1172 : DMA_TO_DEVICE, 1173 edesc->src_chained); 1174 1175 if (sg_count == 1) { 1176 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); 1177 } else { 1178 sg_link_tbl_len = cryptlen; 1179 1180 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) 1181 sg_link_tbl_len = cryptlen + authsize; 1182 1183 sg_count = sg_to_link_tbl(areq->src, sg_count, sg_link_tbl_len, 1184 &edesc->link_tbl[0]); 1185 if (sg_count > 1) { 1186 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1187 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl, 0); 1188 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1189 edesc->dma_len, 1190 DMA_BIDIRECTIONAL); 1191 } else { 1192 /* Only one segment now, so no link tbl needed */ 1193 to_talitos_ptr(&desc->ptr[4], 1194 sg_dma_address(areq->src), 0); 1195 } 1196 } 1197 1198 /* cipher out */ 1199 desc->ptr[5].len = cpu_to_be16(cryptlen); 1200 desc->ptr[5].j_extent = authsize; 1201 1202 if (areq->src != areq->dst) 1203 sg_count = talitos_map_sg(dev, areq->dst, 1204 edesc->dst_nents ? : 1, 1205 DMA_FROM_DEVICE, edesc->dst_chained); 1206 1207 if (sg_count == 1) { 1208 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); 1209 } else { 1210 int tbl_off = edesc->src_nents + 1; 1211 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1212 1213 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + 1214 tbl_off * sizeof(struct talitos_ptr), 0); 1215 sg_count = sg_to_link_tbl(areq->dst, sg_count, cryptlen, 1216 tbl_ptr); 1217 1218 /* Add an entry to the link table for ICV data */ 1219 tbl_ptr += sg_count - 1; 1220 tbl_ptr->j_extent = 0; 1221 tbl_ptr++; 1222 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 1223 tbl_ptr->len = cpu_to_be16(authsize); 1224 1225 /* icv data follows link tables */ 1226 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + 1227 (tbl_off + edesc->dst_nents + 1 + 1228 edesc->assoc_nents) * 1229 sizeof(struct talitos_ptr), 0); 1230 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 1231 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1232 edesc->dma_len, DMA_BIDIRECTIONAL); 1233 } 1234 1235 /* iv out */ 1236 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1237 DMA_FROM_DEVICE); 1238 1239 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1240 if (ret != -EINPROGRESS) { 1241 ipsec_esp_unmap(dev, edesc, areq); 1242 kfree(edesc); 1243 } 1244 return ret; 1245 } 1246 1247 /* 1248 * derive number of elements in scatterlist 1249 */ 1250 static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) 1251 { 1252 struct scatterlist *sg = sg_list; 1253 int sg_nents = 0; 1254 1255 *chained = false; 1256 while (nbytes > 0 && sg) { 1257 sg_nents++; 1258 nbytes -= sg->length; 1259 if (!sg_is_last(sg) && (sg + 1)->length == 0) 1260 *chained = true; 1261 sg = sg_next(sg); 1262 } 1263 1264 return sg_nents; 1265 } 1266 1267 /* 1268 * allocate and map the extended descriptor 1269 */ 1270 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, 1271 struct scatterlist *assoc, 1272 struct scatterlist *src, 1273 struct scatterlist *dst, 1274 u8 *iv, 1275 unsigned int assoclen, 1276 unsigned int cryptlen, 1277 unsigned int authsize, 1278 unsigned int ivsize, 1279 int icv_stashing, 1280 u32 cryptoflags, 1281 bool encrypt) 1282 { 1283 struct talitos_edesc *edesc; 1284 int assoc_nents = 0, src_nents, dst_nents, alloc_len, dma_len; 1285 bool assoc_chained = false, src_chained = false, dst_chained = false; 1286 dma_addr_t iv_dma = 0; 1287 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1288 GFP_ATOMIC; 1289 struct talitos_private *priv = dev_get_drvdata(dev); 1290 bool is_sec1 = has_ftr_sec1(priv); 1291 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1292 1293 if (cryptlen + authsize > max_len) { 1294 dev_err(dev, "length exceeds h/w max limit\n"); 1295 return ERR_PTR(-EINVAL); 1296 } 1297 1298 if (ivsize) 1299 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1300 1301 if (assoclen) { 1302 /* 1303 * Currently it is assumed that iv is provided whenever assoc 1304 * is. 1305 */ 1306 BUG_ON(!iv); 1307 1308 assoc_nents = sg_count(assoc, assoclen, &assoc_chained); 1309 talitos_map_sg(dev, assoc, assoc_nents, DMA_TO_DEVICE, 1310 assoc_chained); 1311 assoc_nents = (assoc_nents == 1) ? 0 : assoc_nents; 1312 1313 if (assoc_nents || sg_dma_address(assoc) + assoclen != iv_dma) 1314 assoc_nents = assoc_nents ? assoc_nents + 1 : 2; 1315 } 1316 1317 if (!dst || dst == src) { 1318 src_nents = sg_count(src, cryptlen + authsize, &src_chained); 1319 src_nents = (src_nents == 1) ? 0 : src_nents; 1320 dst_nents = dst ? src_nents : 0; 1321 } else { /* dst && dst != src*/ 1322 src_nents = sg_count(src, cryptlen + (encrypt ? 0 : authsize), 1323 &src_chained); 1324 src_nents = (src_nents == 1) ? 0 : src_nents; 1325 dst_nents = sg_count(dst, cryptlen + (encrypt ? authsize : 0), 1326 &dst_chained); 1327 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1328 } 1329 1330 /* 1331 * allocate space for base edesc plus the link tables, 1332 * allowing for two separate entries for ICV and generated ICV (+ 2), 1333 * and the ICV data itself 1334 */ 1335 alloc_len = sizeof(struct talitos_edesc); 1336 if (assoc_nents || src_nents || dst_nents) { 1337 if (is_sec1) 1338 dma_len = (src_nents ? cryptlen : 0) + 1339 (dst_nents ? cryptlen : 0); 1340 else 1341 dma_len = (src_nents + dst_nents + 2 + assoc_nents) * 1342 sizeof(struct talitos_ptr) + authsize; 1343 alloc_len += dma_len; 1344 } else { 1345 dma_len = 0; 1346 alloc_len += icv_stashing ? authsize : 0; 1347 } 1348 1349 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1350 if (!edesc) { 1351 if (assoc_chained) 1352 talitos_unmap_sg_chain(dev, assoc, DMA_TO_DEVICE); 1353 else if (assoclen) 1354 dma_unmap_sg(dev, assoc, 1355 assoc_nents ? assoc_nents - 1 : 1, 1356 DMA_TO_DEVICE); 1357 1358 if (iv_dma) 1359 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 1360 1361 dev_err(dev, "could not allocate edescriptor\n"); 1362 return ERR_PTR(-ENOMEM); 1363 } 1364 1365 edesc->assoc_nents = assoc_nents; 1366 edesc->src_nents = src_nents; 1367 edesc->dst_nents = dst_nents; 1368 edesc->assoc_chained = assoc_chained; 1369 edesc->src_chained = src_chained; 1370 edesc->dst_chained = dst_chained; 1371 edesc->iv_dma = iv_dma; 1372 edesc->dma_len = dma_len; 1373 if (dma_len) 1374 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], 1375 edesc->dma_len, 1376 DMA_BIDIRECTIONAL); 1377 1378 return edesc; 1379 } 1380 1381 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1382 int icv_stashing, bool encrypt) 1383 { 1384 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1385 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1386 unsigned int ivsize = crypto_aead_ivsize(authenc); 1387 1388 return talitos_edesc_alloc(ctx->dev, areq->assoc, areq->src, areq->dst, 1389 iv, areq->assoclen, areq->cryptlen, 1390 ctx->authsize, ivsize, icv_stashing, 1391 areq->base.flags, encrypt); 1392 } 1393 1394 static int aead_encrypt(struct aead_request *req) 1395 { 1396 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1397 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1398 struct talitos_edesc *edesc; 1399 1400 /* allocate extended descriptor */ 1401 edesc = aead_edesc_alloc(req, req->iv, 0, true); 1402 if (IS_ERR(edesc)) 1403 return PTR_ERR(edesc); 1404 1405 /* set encrypt */ 1406 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1407 1408 return ipsec_esp(edesc, req, 0, ipsec_esp_encrypt_done); 1409 } 1410 1411 static int aead_decrypt(struct aead_request *req) 1412 { 1413 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1414 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1415 unsigned int authsize = ctx->authsize; 1416 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1417 struct talitos_edesc *edesc; 1418 struct scatterlist *sg; 1419 void *icvdata; 1420 1421 req->cryptlen -= authsize; 1422 1423 /* allocate extended descriptor */ 1424 edesc = aead_edesc_alloc(req, req->iv, 1, false); 1425 if (IS_ERR(edesc)) 1426 return PTR_ERR(edesc); 1427 1428 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1429 ((!edesc->src_nents && !edesc->dst_nents) || 1430 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { 1431 1432 /* decrypt and check the ICV */ 1433 edesc->desc.hdr = ctx->desc_hdr_template | 1434 DESC_HDR_DIR_INBOUND | 1435 DESC_HDR_MODE1_MDEU_CICV; 1436 1437 /* reset integrity check result bits */ 1438 edesc->desc.hdr_lo = 0; 1439 1440 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_hwauth_done); 1441 } 1442 1443 /* Have to check the ICV with software */ 1444 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1445 1446 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1447 if (edesc->dma_len) 1448 icvdata = &edesc->link_tbl[edesc->src_nents + 1449 edesc->dst_nents + 2 + 1450 edesc->assoc_nents]; 1451 else 1452 icvdata = &edesc->link_tbl[0]; 1453 1454 sg = sg_last(req->src, edesc->src_nents ? : 1); 1455 1456 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - ctx->authsize, 1457 ctx->authsize); 1458 1459 return ipsec_esp(edesc, req, 0, ipsec_esp_decrypt_swauth_done); 1460 } 1461 1462 static int aead_givencrypt(struct aead_givcrypt_request *req) 1463 { 1464 struct aead_request *areq = &req->areq; 1465 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1466 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1467 struct talitos_edesc *edesc; 1468 1469 /* allocate extended descriptor */ 1470 edesc = aead_edesc_alloc(areq, req->giv, 0, true); 1471 if (IS_ERR(edesc)) 1472 return PTR_ERR(edesc); 1473 1474 /* set encrypt */ 1475 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1476 1477 memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); 1478 /* avoid consecutive packets going out with same IV */ 1479 *(__be64 *)req->giv ^= cpu_to_be64(req->seq); 1480 1481 return ipsec_esp(edesc, areq, req->seq, ipsec_esp_encrypt_done); 1482 } 1483 1484 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, 1485 const u8 *key, unsigned int keylen) 1486 { 1487 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1488 1489 memcpy(&ctx->key, key, keylen); 1490 ctx->keylen = keylen; 1491 1492 return 0; 1493 } 1494 1495 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, 1496 struct scatterlist *dst, unsigned int len, 1497 struct talitos_edesc *edesc) 1498 { 1499 struct talitos_private *priv = dev_get_drvdata(dev); 1500 bool is_sec1 = has_ftr_sec1(priv); 1501 1502 if (is_sec1) { 1503 if (!edesc->src_nents) { 1504 dma_unmap_sg(dev, src, 1, 1505 dst != src ? DMA_TO_DEVICE 1506 : DMA_BIDIRECTIONAL); 1507 } 1508 if (dst && edesc->dst_nents) { 1509 dma_sync_single_for_device(dev, 1510 edesc->dma_link_tbl + len, 1511 len, DMA_FROM_DEVICE); 1512 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1, 1513 edesc->buf + len, len); 1514 } else if (dst && dst != src) { 1515 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE); 1516 } 1517 } else { 1518 talitos_sg_unmap(dev, edesc, src, dst); 1519 } 1520 } 1521 1522 static void common_nonsnoop_unmap(struct device *dev, 1523 struct talitos_edesc *edesc, 1524 struct ablkcipher_request *areq) 1525 { 1526 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1527 1528 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc); 1529 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 1530 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); 1531 1532 if (edesc->dma_len) 1533 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1534 DMA_BIDIRECTIONAL); 1535 } 1536 1537 static void ablkcipher_done(struct device *dev, 1538 struct talitos_desc *desc, void *context, 1539 int err) 1540 { 1541 struct ablkcipher_request *areq = context; 1542 struct talitos_edesc *edesc; 1543 1544 edesc = container_of(desc, struct talitos_edesc, desc); 1545 1546 common_nonsnoop_unmap(dev, edesc, areq); 1547 1548 kfree(edesc); 1549 1550 areq->base.complete(&areq->base, err); 1551 } 1552 1553 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, 1554 unsigned int len, struct talitos_edesc *edesc, 1555 enum dma_data_direction dir, struct talitos_ptr *ptr) 1556 { 1557 int sg_count; 1558 struct talitos_private *priv = dev_get_drvdata(dev); 1559 bool is_sec1 = has_ftr_sec1(priv); 1560 1561 to_talitos_ptr_len(ptr, len, is_sec1); 1562 1563 if (is_sec1) { 1564 sg_count = edesc->src_nents ? : 1; 1565 1566 if (sg_count == 1) { 1567 dma_map_sg(dev, src, 1, dir); 1568 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); 1569 } else { 1570 sg_copy_to_buffer(src, sg_count, edesc->buf, len); 1571 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1); 1572 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1573 len, DMA_TO_DEVICE); 1574 } 1575 } else { 1576 to_talitos_ptr_extent_clear(ptr, is_sec1); 1577 1578 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, 1579 edesc->src_chained); 1580 1581 if (sg_count == 1) { 1582 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); 1583 } else { 1584 sg_count = sg_to_link_tbl(src, sg_count, len, 1585 &edesc->link_tbl[0]); 1586 if (sg_count > 1) { 1587 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); 1588 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; 1589 dma_sync_single_for_device(dev, 1590 edesc->dma_link_tbl, 1591 edesc->dma_len, 1592 DMA_BIDIRECTIONAL); 1593 } else { 1594 /* Only one segment now, so no link tbl needed*/ 1595 to_talitos_ptr(ptr, sg_dma_address(src), 1596 is_sec1); 1597 } 1598 } 1599 } 1600 return sg_count; 1601 } 1602 1603 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, 1604 unsigned int len, struct talitos_edesc *edesc, 1605 enum dma_data_direction dir, 1606 struct talitos_ptr *ptr, int sg_count) 1607 { 1608 struct talitos_private *priv = dev_get_drvdata(dev); 1609 bool is_sec1 = has_ftr_sec1(priv); 1610 1611 if (dir != DMA_NONE) 1612 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, 1613 dir, edesc->dst_chained); 1614 1615 to_talitos_ptr_len(ptr, len, is_sec1); 1616 1617 if (is_sec1) { 1618 if (sg_count == 1) { 1619 if (dir != DMA_NONE) 1620 dma_map_sg(dev, dst, 1, dir); 1621 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); 1622 } else { 1623 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1); 1624 dma_sync_single_for_device(dev, 1625 edesc->dma_link_tbl + len, 1626 len, DMA_FROM_DEVICE); 1627 } 1628 } else { 1629 to_talitos_ptr_extent_clear(ptr, is_sec1); 1630 1631 if (sg_count == 1) { 1632 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); 1633 } else { 1634 struct talitos_ptr *link_tbl_ptr = 1635 &edesc->link_tbl[edesc->src_nents + 1]; 1636 1637 to_talitos_ptr(ptr, edesc->dma_link_tbl + 1638 (edesc->src_nents + 1) * 1639 sizeof(struct talitos_ptr), 0); 1640 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; 1641 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); 1642 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1643 edesc->dma_len, 1644 DMA_BIDIRECTIONAL); 1645 } 1646 } 1647 } 1648 1649 static int common_nonsnoop(struct talitos_edesc *edesc, 1650 struct ablkcipher_request *areq, 1651 void (*callback) (struct device *dev, 1652 struct talitos_desc *desc, 1653 void *context, int error)) 1654 { 1655 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1656 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1657 struct device *dev = ctx->dev; 1658 struct talitos_desc *desc = &edesc->desc; 1659 unsigned int cryptlen = areq->nbytes; 1660 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1661 int sg_count, ret; 1662 struct talitos_private *priv = dev_get_drvdata(dev); 1663 bool is_sec1 = has_ftr_sec1(priv); 1664 1665 /* first DWORD empty */ 1666 desc->ptr[0] = zero_entry; 1667 1668 /* cipher iv */ 1669 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); 1670 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); 1671 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1); 1672 1673 /* cipher key */ 1674 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1675 (char *)&ctx->key, DMA_TO_DEVICE); 1676 1677 /* 1678 * cipher in 1679 */ 1680 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc, 1681 (areq->src == areq->dst) ? 1682 DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 1683 &desc->ptr[3]); 1684 1685 /* cipher out */ 1686 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc, 1687 (areq->src == areq->dst) ? DMA_NONE 1688 : DMA_FROM_DEVICE, 1689 &desc->ptr[4], sg_count); 1690 1691 /* iv out */ 1692 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 1693 DMA_FROM_DEVICE); 1694 1695 /* last DWORD empty */ 1696 desc->ptr[6] = zero_entry; 1697 1698 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1699 if (ret != -EINPROGRESS) { 1700 common_nonsnoop_unmap(dev, edesc, areq); 1701 kfree(edesc); 1702 } 1703 return ret; 1704 } 1705 1706 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * 1707 areq, bool encrypt) 1708 { 1709 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1710 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1711 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1712 1713 return talitos_edesc_alloc(ctx->dev, NULL, areq->src, areq->dst, 1714 areq->info, 0, areq->nbytes, 0, ivsize, 0, 1715 areq->base.flags, encrypt); 1716 } 1717 1718 static int ablkcipher_encrypt(struct ablkcipher_request *areq) 1719 { 1720 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1721 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1722 struct talitos_edesc *edesc; 1723 1724 /* allocate extended descriptor */ 1725 edesc = ablkcipher_edesc_alloc(areq, true); 1726 if (IS_ERR(edesc)) 1727 return PTR_ERR(edesc); 1728 1729 /* set encrypt */ 1730 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1731 1732 return common_nonsnoop(edesc, areq, ablkcipher_done); 1733 } 1734 1735 static int ablkcipher_decrypt(struct ablkcipher_request *areq) 1736 { 1737 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1738 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1739 struct talitos_edesc *edesc; 1740 1741 /* allocate extended descriptor */ 1742 edesc = ablkcipher_edesc_alloc(areq, false); 1743 if (IS_ERR(edesc)) 1744 return PTR_ERR(edesc); 1745 1746 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1747 1748 return common_nonsnoop(edesc, areq, ablkcipher_done); 1749 } 1750 1751 static void common_nonsnoop_hash_unmap(struct device *dev, 1752 struct talitos_edesc *edesc, 1753 struct ahash_request *areq) 1754 { 1755 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1756 struct talitos_private *priv = dev_get_drvdata(dev); 1757 bool is_sec1 = has_ftr_sec1(priv); 1758 1759 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1760 1761 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); 1762 1763 /* When using hashctx-in, must unmap it. */ 1764 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) 1765 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], 1766 DMA_TO_DEVICE); 1767 1768 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1)) 1769 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], 1770 DMA_TO_DEVICE); 1771 1772 if (edesc->dma_len) 1773 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1774 DMA_BIDIRECTIONAL); 1775 1776 } 1777 1778 static void ahash_done(struct device *dev, 1779 struct talitos_desc *desc, void *context, 1780 int err) 1781 { 1782 struct ahash_request *areq = context; 1783 struct talitos_edesc *edesc = 1784 container_of(desc, struct talitos_edesc, desc); 1785 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1786 1787 if (!req_ctx->last && req_ctx->to_hash_later) { 1788 /* Position any partial block for next update/final/finup */ 1789 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); 1790 req_ctx->nbuf = req_ctx->to_hash_later; 1791 } 1792 common_nonsnoop_hash_unmap(dev, edesc, areq); 1793 1794 kfree(edesc); 1795 1796 areq->base.complete(&areq->base, err); 1797 } 1798 1799 /* 1800 * SEC1 doesn't like hashing of 0 sized message, so we do the padding 1801 * ourself and submit a padded block 1802 */ 1803 void talitos_handle_buggy_hash(struct talitos_ctx *ctx, 1804 struct talitos_edesc *edesc, 1805 struct talitos_ptr *ptr) 1806 { 1807 static u8 padded_hash[64] = { 1808 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1809 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1810 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1811 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1812 }; 1813 1814 pr_err_once("Bug in SEC1, padding ourself\n"); 1815 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD; 1816 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash), 1817 (char *)padded_hash, DMA_TO_DEVICE); 1818 } 1819 1820 static int common_nonsnoop_hash(struct talitos_edesc *edesc, 1821 struct ahash_request *areq, unsigned int length, 1822 void (*callback) (struct device *dev, 1823 struct talitos_desc *desc, 1824 void *context, int error)) 1825 { 1826 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1827 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1828 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1829 struct device *dev = ctx->dev; 1830 struct talitos_desc *desc = &edesc->desc; 1831 int ret; 1832 struct talitos_private *priv = dev_get_drvdata(dev); 1833 bool is_sec1 = has_ftr_sec1(priv); 1834 1835 /* first DWORD empty */ 1836 desc->ptr[0] = zero_entry; 1837 1838 /* hash context in */ 1839 if (!req_ctx->first || req_ctx->swinit) { 1840 map_single_talitos_ptr(dev, &desc->ptr[1], 1841 req_ctx->hw_context_size, 1842 (char *)req_ctx->hw_context, 1843 DMA_TO_DEVICE); 1844 req_ctx->swinit = 0; 1845 } else { 1846 desc->ptr[1] = zero_entry; 1847 /* Indicate next op is not the first. */ 1848 req_ctx->first = 0; 1849 } 1850 1851 /* HMAC key */ 1852 if (ctx->keylen) 1853 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1854 (char *)&ctx->key, DMA_TO_DEVICE); 1855 else 1856 desc->ptr[2] = zero_entry; 1857 1858 /* 1859 * data in 1860 */ 1861 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc, 1862 DMA_TO_DEVICE, &desc->ptr[3]); 1863 1864 /* fifth DWORD empty */ 1865 desc->ptr[4] = zero_entry; 1866 1867 /* hash/HMAC out -or- hash context out */ 1868 if (req_ctx->last) 1869 map_single_talitos_ptr(dev, &desc->ptr[5], 1870 crypto_ahash_digestsize(tfm), 1871 areq->result, DMA_FROM_DEVICE); 1872 else 1873 map_single_talitos_ptr(dev, &desc->ptr[5], 1874 req_ctx->hw_context_size, 1875 req_ctx->hw_context, DMA_FROM_DEVICE); 1876 1877 /* last DWORD empty */ 1878 desc->ptr[6] = zero_entry; 1879 1880 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) 1881 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); 1882 1883 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1884 if (ret != -EINPROGRESS) { 1885 common_nonsnoop_hash_unmap(dev, edesc, areq); 1886 kfree(edesc); 1887 } 1888 return ret; 1889 } 1890 1891 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, 1892 unsigned int nbytes) 1893 { 1894 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1895 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1896 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1897 1898 return talitos_edesc_alloc(ctx->dev, NULL, req_ctx->psrc, NULL, NULL, 0, 1899 nbytes, 0, 0, 0, areq->base.flags, false); 1900 } 1901 1902 static int ahash_init(struct ahash_request *areq) 1903 { 1904 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1905 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1906 1907 /* Initialize the context */ 1908 req_ctx->nbuf = 0; 1909 req_ctx->first = 1; /* first indicates h/w must init its context */ 1910 req_ctx->swinit = 0; /* assume h/w init of context */ 1911 req_ctx->hw_context_size = 1912 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 1913 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 1914 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 1915 1916 return 0; 1917 } 1918 1919 /* 1920 * on h/w without explicit sha224 support, we initialize h/w context 1921 * manually with sha224 constants, and tell it to run sha256. 1922 */ 1923 static int ahash_init_sha224_swinit(struct ahash_request *areq) 1924 { 1925 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1926 1927 ahash_init(areq); 1928 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ 1929 1930 req_ctx->hw_context[0] = SHA224_H0; 1931 req_ctx->hw_context[1] = SHA224_H1; 1932 req_ctx->hw_context[2] = SHA224_H2; 1933 req_ctx->hw_context[3] = SHA224_H3; 1934 req_ctx->hw_context[4] = SHA224_H4; 1935 req_ctx->hw_context[5] = SHA224_H5; 1936 req_ctx->hw_context[6] = SHA224_H6; 1937 req_ctx->hw_context[7] = SHA224_H7; 1938 1939 /* init 64-bit count */ 1940 req_ctx->hw_context[8] = 0; 1941 req_ctx->hw_context[9] = 0; 1942 1943 return 0; 1944 } 1945 1946 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) 1947 { 1948 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1949 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1950 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1951 struct talitos_edesc *edesc; 1952 unsigned int blocksize = 1953 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1954 unsigned int nbytes_to_hash; 1955 unsigned int to_hash_later; 1956 unsigned int nsg; 1957 bool chained; 1958 1959 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { 1960 /* Buffer up to one whole block */ 1961 sg_copy_to_buffer(areq->src, 1962 sg_count(areq->src, nbytes, &chained), 1963 req_ctx->buf + req_ctx->nbuf, nbytes); 1964 req_ctx->nbuf += nbytes; 1965 return 0; 1966 } 1967 1968 /* At least (blocksize + 1) bytes are available to hash */ 1969 nbytes_to_hash = nbytes + req_ctx->nbuf; 1970 to_hash_later = nbytes_to_hash & (blocksize - 1); 1971 1972 if (req_ctx->last) 1973 to_hash_later = 0; 1974 else if (to_hash_later) 1975 /* There is a partial block. Hash the full block(s) now */ 1976 nbytes_to_hash -= to_hash_later; 1977 else { 1978 /* Keep one block buffered */ 1979 nbytes_to_hash -= blocksize; 1980 to_hash_later = blocksize; 1981 } 1982 1983 /* Chain in any previously buffered data */ 1984 if (req_ctx->nbuf) { 1985 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; 1986 sg_init_table(req_ctx->bufsl, nsg); 1987 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); 1988 if (nsg > 1) 1989 scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src); 1990 req_ctx->psrc = req_ctx->bufsl; 1991 } else 1992 req_ctx->psrc = areq->src; 1993 1994 if (to_hash_later) { 1995 int nents = sg_count(areq->src, nbytes, &chained); 1996 sg_pcopy_to_buffer(areq->src, nents, 1997 req_ctx->bufnext, 1998 to_hash_later, 1999 nbytes - to_hash_later); 2000 } 2001 req_ctx->to_hash_later = to_hash_later; 2002 2003 /* Allocate extended descriptor */ 2004 edesc = ahash_edesc_alloc(areq, nbytes_to_hash); 2005 if (IS_ERR(edesc)) 2006 return PTR_ERR(edesc); 2007 2008 edesc->desc.hdr = ctx->desc_hdr_template; 2009 2010 /* On last one, request SEC to pad; otherwise continue */ 2011 if (req_ctx->last) 2012 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; 2013 else 2014 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; 2015 2016 /* request SEC to INIT hash. */ 2017 if (req_ctx->first && !req_ctx->swinit) 2018 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; 2019 2020 /* When the tfm context has a keylen, it's an HMAC. 2021 * A first or last (ie. not middle) descriptor must request HMAC. 2022 */ 2023 if (ctx->keylen && (req_ctx->first || req_ctx->last)) 2024 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; 2025 2026 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, 2027 ahash_done); 2028 } 2029 2030 static int ahash_update(struct ahash_request *areq) 2031 { 2032 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2033 2034 req_ctx->last = 0; 2035 2036 return ahash_process_req(areq, areq->nbytes); 2037 } 2038 2039 static int ahash_final(struct ahash_request *areq) 2040 { 2041 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2042 2043 req_ctx->last = 1; 2044 2045 return ahash_process_req(areq, 0); 2046 } 2047 2048 static int ahash_finup(struct ahash_request *areq) 2049 { 2050 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2051 2052 req_ctx->last = 1; 2053 2054 return ahash_process_req(areq, areq->nbytes); 2055 } 2056 2057 static int ahash_digest(struct ahash_request *areq) 2058 { 2059 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2060 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 2061 2062 ahash->init(areq); 2063 req_ctx->last = 1; 2064 2065 return ahash_process_req(areq, areq->nbytes); 2066 } 2067 2068 struct keyhash_result { 2069 struct completion completion; 2070 int err; 2071 }; 2072 2073 static void keyhash_complete(struct crypto_async_request *req, int err) 2074 { 2075 struct keyhash_result *res = req->data; 2076 2077 if (err == -EINPROGRESS) 2078 return; 2079 2080 res->err = err; 2081 complete(&res->completion); 2082 } 2083 2084 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, 2085 u8 *hash) 2086 { 2087 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2088 2089 struct scatterlist sg[1]; 2090 struct ahash_request *req; 2091 struct keyhash_result hresult; 2092 int ret; 2093 2094 init_completion(&hresult.completion); 2095 2096 req = ahash_request_alloc(tfm, GFP_KERNEL); 2097 if (!req) 2098 return -ENOMEM; 2099 2100 /* Keep tfm keylen == 0 during hash of the long key */ 2101 ctx->keylen = 0; 2102 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2103 keyhash_complete, &hresult); 2104 2105 sg_init_one(&sg[0], key, keylen); 2106 2107 ahash_request_set_crypt(req, sg, hash, keylen); 2108 ret = crypto_ahash_digest(req); 2109 switch (ret) { 2110 case 0: 2111 break; 2112 case -EINPROGRESS: 2113 case -EBUSY: 2114 ret = wait_for_completion_interruptible( 2115 &hresult.completion); 2116 if (!ret) 2117 ret = hresult.err; 2118 break; 2119 default: 2120 break; 2121 } 2122 ahash_request_free(req); 2123 2124 return ret; 2125 } 2126 2127 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 2128 unsigned int keylen) 2129 { 2130 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2131 unsigned int blocksize = 2132 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2133 unsigned int digestsize = crypto_ahash_digestsize(tfm); 2134 unsigned int keysize = keylen; 2135 u8 hash[SHA512_DIGEST_SIZE]; 2136 int ret; 2137 2138 if (keylen <= blocksize) 2139 memcpy(ctx->key, key, keysize); 2140 else { 2141 /* Must get the hash of the long key */ 2142 ret = keyhash(tfm, key, keylen, hash); 2143 2144 if (ret) { 2145 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 2146 return -EINVAL; 2147 } 2148 2149 keysize = digestsize; 2150 memcpy(ctx->key, hash, digestsize); 2151 } 2152 2153 ctx->keylen = keysize; 2154 2155 return 0; 2156 } 2157 2158 2159 struct talitos_alg_template { 2160 u32 type; 2161 union { 2162 struct crypto_alg crypto; 2163 struct ahash_alg hash; 2164 } alg; 2165 __be32 desc_hdr_template; 2166 }; 2167 2168 static struct talitos_alg_template driver_algs[] = { 2169 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ 2170 { .type = CRYPTO_ALG_TYPE_AEAD, 2171 .alg.crypto = { 2172 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2173 .cra_driver_name = "authenc-hmac-sha1-cbc-aes-talitos", 2174 .cra_blocksize = AES_BLOCK_SIZE, 2175 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2176 .cra_aead = { 2177 .ivsize = AES_BLOCK_SIZE, 2178 .maxauthsize = SHA1_DIGEST_SIZE, 2179 } 2180 }, 2181 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2182 DESC_HDR_SEL0_AESU | 2183 DESC_HDR_MODE0_AESU_CBC | 2184 DESC_HDR_SEL1_MDEUA | 2185 DESC_HDR_MODE1_MDEU_INIT | 2186 DESC_HDR_MODE1_MDEU_PAD | 2187 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2188 }, 2189 { .type = CRYPTO_ALG_TYPE_AEAD, 2190 .alg.crypto = { 2191 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", 2192 .cra_driver_name = "authenc-hmac-sha1-cbc-3des-talitos", 2193 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2194 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2195 .cra_aead = { 2196 .ivsize = DES3_EDE_BLOCK_SIZE, 2197 .maxauthsize = SHA1_DIGEST_SIZE, 2198 } 2199 }, 2200 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2201 DESC_HDR_SEL0_DEU | 2202 DESC_HDR_MODE0_DEU_CBC | 2203 DESC_HDR_MODE0_DEU_3DES | 2204 DESC_HDR_SEL1_MDEUA | 2205 DESC_HDR_MODE1_MDEU_INIT | 2206 DESC_HDR_MODE1_MDEU_PAD | 2207 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2208 }, 2209 { .type = CRYPTO_ALG_TYPE_AEAD, 2210 .alg.crypto = { 2211 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2212 .cra_driver_name = "authenc-hmac-sha224-cbc-aes-talitos", 2213 .cra_blocksize = AES_BLOCK_SIZE, 2214 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2215 .cra_aead = { 2216 .ivsize = AES_BLOCK_SIZE, 2217 .maxauthsize = SHA224_DIGEST_SIZE, 2218 } 2219 }, 2220 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2221 DESC_HDR_SEL0_AESU | 2222 DESC_HDR_MODE0_AESU_CBC | 2223 DESC_HDR_SEL1_MDEUA | 2224 DESC_HDR_MODE1_MDEU_INIT | 2225 DESC_HDR_MODE1_MDEU_PAD | 2226 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2227 }, 2228 { .type = CRYPTO_ALG_TYPE_AEAD, 2229 .alg.crypto = { 2230 .cra_name = "authenc(hmac(sha224),cbc(des3_ede))", 2231 .cra_driver_name = "authenc-hmac-sha224-cbc-3des-talitos", 2232 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2233 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2234 .cra_aead = { 2235 .ivsize = DES3_EDE_BLOCK_SIZE, 2236 .maxauthsize = SHA224_DIGEST_SIZE, 2237 } 2238 }, 2239 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2240 DESC_HDR_SEL0_DEU | 2241 DESC_HDR_MODE0_DEU_CBC | 2242 DESC_HDR_MODE0_DEU_3DES | 2243 DESC_HDR_SEL1_MDEUA | 2244 DESC_HDR_MODE1_MDEU_INIT | 2245 DESC_HDR_MODE1_MDEU_PAD | 2246 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2247 }, 2248 { .type = CRYPTO_ALG_TYPE_AEAD, 2249 .alg.crypto = { 2250 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2251 .cra_driver_name = "authenc-hmac-sha256-cbc-aes-talitos", 2252 .cra_blocksize = AES_BLOCK_SIZE, 2253 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2254 .cra_aead = { 2255 .ivsize = AES_BLOCK_SIZE, 2256 .maxauthsize = SHA256_DIGEST_SIZE, 2257 } 2258 }, 2259 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2260 DESC_HDR_SEL0_AESU | 2261 DESC_HDR_MODE0_AESU_CBC | 2262 DESC_HDR_SEL1_MDEUA | 2263 DESC_HDR_MODE1_MDEU_INIT | 2264 DESC_HDR_MODE1_MDEU_PAD | 2265 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2266 }, 2267 { .type = CRYPTO_ALG_TYPE_AEAD, 2268 .alg.crypto = { 2269 .cra_name = "authenc(hmac(sha256),cbc(des3_ede))", 2270 .cra_driver_name = "authenc-hmac-sha256-cbc-3des-talitos", 2271 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2272 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2273 .cra_aead = { 2274 .ivsize = DES3_EDE_BLOCK_SIZE, 2275 .maxauthsize = SHA256_DIGEST_SIZE, 2276 } 2277 }, 2278 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2279 DESC_HDR_SEL0_DEU | 2280 DESC_HDR_MODE0_DEU_CBC | 2281 DESC_HDR_MODE0_DEU_3DES | 2282 DESC_HDR_SEL1_MDEUA | 2283 DESC_HDR_MODE1_MDEU_INIT | 2284 DESC_HDR_MODE1_MDEU_PAD | 2285 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2286 }, 2287 { .type = CRYPTO_ALG_TYPE_AEAD, 2288 .alg.crypto = { 2289 .cra_name = "authenc(hmac(sha384),cbc(aes))", 2290 .cra_driver_name = "authenc-hmac-sha384-cbc-aes-talitos", 2291 .cra_blocksize = AES_BLOCK_SIZE, 2292 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2293 .cra_aead = { 2294 .ivsize = AES_BLOCK_SIZE, 2295 .maxauthsize = SHA384_DIGEST_SIZE, 2296 } 2297 }, 2298 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2299 DESC_HDR_SEL0_AESU | 2300 DESC_HDR_MODE0_AESU_CBC | 2301 DESC_HDR_SEL1_MDEUB | 2302 DESC_HDR_MODE1_MDEU_INIT | 2303 DESC_HDR_MODE1_MDEU_PAD | 2304 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2305 }, 2306 { .type = CRYPTO_ALG_TYPE_AEAD, 2307 .alg.crypto = { 2308 .cra_name = "authenc(hmac(sha384),cbc(des3_ede))", 2309 .cra_driver_name = "authenc-hmac-sha384-cbc-3des-talitos", 2310 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2311 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2312 .cra_aead = { 2313 .ivsize = DES3_EDE_BLOCK_SIZE, 2314 .maxauthsize = SHA384_DIGEST_SIZE, 2315 } 2316 }, 2317 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2318 DESC_HDR_SEL0_DEU | 2319 DESC_HDR_MODE0_DEU_CBC | 2320 DESC_HDR_MODE0_DEU_3DES | 2321 DESC_HDR_SEL1_MDEUB | 2322 DESC_HDR_MODE1_MDEU_INIT | 2323 DESC_HDR_MODE1_MDEU_PAD | 2324 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2325 }, 2326 { .type = CRYPTO_ALG_TYPE_AEAD, 2327 .alg.crypto = { 2328 .cra_name = "authenc(hmac(sha512),cbc(aes))", 2329 .cra_driver_name = "authenc-hmac-sha512-cbc-aes-talitos", 2330 .cra_blocksize = AES_BLOCK_SIZE, 2331 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2332 .cra_aead = { 2333 .ivsize = AES_BLOCK_SIZE, 2334 .maxauthsize = SHA512_DIGEST_SIZE, 2335 } 2336 }, 2337 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2338 DESC_HDR_SEL0_AESU | 2339 DESC_HDR_MODE0_AESU_CBC | 2340 DESC_HDR_SEL1_MDEUB | 2341 DESC_HDR_MODE1_MDEU_INIT | 2342 DESC_HDR_MODE1_MDEU_PAD | 2343 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2344 }, 2345 { .type = CRYPTO_ALG_TYPE_AEAD, 2346 .alg.crypto = { 2347 .cra_name = "authenc(hmac(sha512),cbc(des3_ede))", 2348 .cra_driver_name = "authenc-hmac-sha512-cbc-3des-talitos", 2349 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2350 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2351 .cra_aead = { 2352 .ivsize = DES3_EDE_BLOCK_SIZE, 2353 .maxauthsize = SHA512_DIGEST_SIZE, 2354 } 2355 }, 2356 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2357 DESC_HDR_SEL0_DEU | 2358 DESC_HDR_MODE0_DEU_CBC | 2359 DESC_HDR_MODE0_DEU_3DES | 2360 DESC_HDR_SEL1_MDEUB | 2361 DESC_HDR_MODE1_MDEU_INIT | 2362 DESC_HDR_MODE1_MDEU_PAD | 2363 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2364 }, 2365 { .type = CRYPTO_ALG_TYPE_AEAD, 2366 .alg.crypto = { 2367 .cra_name = "authenc(hmac(md5),cbc(aes))", 2368 .cra_driver_name = "authenc-hmac-md5-cbc-aes-talitos", 2369 .cra_blocksize = AES_BLOCK_SIZE, 2370 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2371 .cra_aead = { 2372 .ivsize = AES_BLOCK_SIZE, 2373 .maxauthsize = MD5_DIGEST_SIZE, 2374 } 2375 }, 2376 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2377 DESC_HDR_SEL0_AESU | 2378 DESC_HDR_MODE0_AESU_CBC | 2379 DESC_HDR_SEL1_MDEUA | 2380 DESC_HDR_MODE1_MDEU_INIT | 2381 DESC_HDR_MODE1_MDEU_PAD | 2382 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2383 }, 2384 { .type = CRYPTO_ALG_TYPE_AEAD, 2385 .alg.crypto = { 2386 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2387 .cra_driver_name = "authenc-hmac-md5-cbc-3des-talitos", 2388 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2389 .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, 2390 .cra_aead = { 2391 .ivsize = DES3_EDE_BLOCK_SIZE, 2392 .maxauthsize = MD5_DIGEST_SIZE, 2393 } 2394 }, 2395 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2396 DESC_HDR_SEL0_DEU | 2397 DESC_HDR_MODE0_DEU_CBC | 2398 DESC_HDR_MODE0_DEU_3DES | 2399 DESC_HDR_SEL1_MDEUA | 2400 DESC_HDR_MODE1_MDEU_INIT | 2401 DESC_HDR_MODE1_MDEU_PAD | 2402 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2403 }, 2404 /* ABLKCIPHER algorithms. */ 2405 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2406 .alg.crypto = { 2407 .cra_name = "cbc(aes)", 2408 .cra_driver_name = "cbc-aes-talitos", 2409 .cra_blocksize = AES_BLOCK_SIZE, 2410 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2411 CRYPTO_ALG_ASYNC, 2412 .cra_ablkcipher = { 2413 .min_keysize = AES_MIN_KEY_SIZE, 2414 .max_keysize = AES_MAX_KEY_SIZE, 2415 .ivsize = AES_BLOCK_SIZE, 2416 } 2417 }, 2418 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2419 DESC_HDR_SEL0_AESU | 2420 DESC_HDR_MODE0_AESU_CBC, 2421 }, 2422 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2423 .alg.crypto = { 2424 .cra_name = "cbc(des3_ede)", 2425 .cra_driver_name = "cbc-3des-talitos", 2426 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2427 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2428 CRYPTO_ALG_ASYNC, 2429 .cra_ablkcipher = { 2430 .min_keysize = DES3_EDE_KEY_SIZE, 2431 .max_keysize = DES3_EDE_KEY_SIZE, 2432 .ivsize = DES3_EDE_BLOCK_SIZE, 2433 } 2434 }, 2435 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2436 DESC_HDR_SEL0_DEU | 2437 DESC_HDR_MODE0_DEU_CBC | 2438 DESC_HDR_MODE0_DEU_3DES, 2439 }, 2440 /* AHASH algorithms. */ 2441 { .type = CRYPTO_ALG_TYPE_AHASH, 2442 .alg.hash = { 2443 .halg.digestsize = MD5_DIGEST_SIZE, 2444 .halg.base = { 2445 .cra_name = "md5", 2446 .cra_driver_name = "md5-talitos", 2447 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2448 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2449 CRYPTO_ALG_ASYNC, 2450 } 2451 }, 2452 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2453 DESC_HDR_SEL0_MDEUA | 2454 DESC_HDR_MODE0_MDEU_MD5, 2455 }, 2456 { .type = CRYPTO_ALG_TYPE_AHASH, 2457 .alg.hash = { 2458 .halg.digestsize = SHA1_DIGEST_SIZE, 2459 .halg.base = { 2460 .cra_name = "sha1", 2461 .cra_driver_name = "sha1-talitos", 2462 .cra_blocksize = SHA1_BLOCK_SIZE, 2463 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2464 CRYPTO_ALG_ASYNC, 2465 } 2466 }, 2467 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2468 DESC_HDR_SEL0_MDEUA | 2469 DESC_HDR_MODE0_MDEU_SHA1, 2470 }, 2471 { .type = CRYPTO_ALG_TYPE_AHASH, 2472 .alg.hash = { 2473 .halg.digestsize = SHA224_DIGEST_SIZE, 2474 .halg.base = { 2475 .cra_name = "sha224", 2476 .cra_driver_name = "sha224-talitos", 2477 .cra_blocksize = SHA224_BLOCK_SIZE, 2478 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2479 CRYPTO_ALG_ASYNC, 2480 } 2481 }, 2482 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2483 DESC_HDR_SEL0_MDEUA | 2484 DESC_HDR_MODE0_MDEU_SHA224, 2485 }, 2486 { .type = CRYPTO_ALG_TYPE_AHASH, 2487 .alg.hash = { 2488 .halg.digestsize = SHA256_DIGEST_SIZE, 2489 .halg.base = { 2490 .cra_name = "sha256", 2491 .cra_driver_name = "sha256-talitos", 2492 .cra_blocksize = SHA256_BLOCK_SIZE, 2493 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2494 CRYPTO_ALG_ASYNC, 2495 } 2496 }, 2497 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2498 DESC_HDR_SEL0_MDEUA | 2499 DESC_HDR_MODE0_MDEU_SHA256, 2500 }, 2501 { .type = CRYPTO_ALG_TYPE_AHASH, 2502 .alg.hash = { 2503 .halg.digestsize = SHA384_DIGEST_SIZE, 2504 .halg.base = { 2505 .cra_name = "sha384", 2506 .cra_driver_name = "sha384-talitos", 2507 .cra_blocksize = SHA384_BLOCK_SIZE, 2508 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2509 CRYPTO_ALG_ASYNC, 2510 } 2511 }, 2512 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2513 DESC_HDR_SEL0_MDEUB | 2514 DESC_HDR_MODE0_MDEUB_SHA384, 2515 }, 2516 { .type = CRYPTO_ALG_TYPE_AHASH, 2517 .alg.hash = { 2518 .halg.digestsize = SHA512_DIGEST_SIZE, 2519 .halg.base = { 2520 .cra_name = "sha512", 2521 .cra_driver_name = "sha512-talitos", 2522 .cra_blocksize = SHA512_BLOCK_SIZE, 2523 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2524 CRYPTO_ALG_ASYNC, 2525 } 2526 }, 2527 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2528 DESC_HDR_SEL0_MDEUB | 2529 DESC_HDR_MODE0_MDEUB_SHA512, 2530 }, 2531 { .type = CRYPTO_ALG_TYPE_AHASH, 2532 .alg.hash = { 2533 .halg.digestsize = MD5_DIGEST_SIZE, 2534 .halg.base = { 2535 .cra_name = "hmac(md5)", 2536 .cra_driver_name = "hmac-md5-talitos", 2537 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2538 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2539 CRYPTO_ALG_ASYNC, 2540 } 2541 }, 2542 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2543 DESC_HDR_SEL0_MDEUA | 2544 DESC_HDR_MODE0_MDEU_MD5, 2545 }, 2546 { .type = CRYPTO_ALG_TYPE_AHASH, 2547 .alg.hash = { 2548 .halg.digestsize = SHA1_DIGEST_SIZE, 2549 .halg.base = { 2550 .cra_name = "hmac(sha1)", 2551 .cra_driver_name = "hmac-sha1-talitos", 2552 .cra_blocksize = SHA1_BLOCK_SIZE, 2553 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2554 CRYPTO_ALG_ASYNC, 2555 } 2556 }, 2557 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2558 DESC_HDR_SEL0_MDEUA | 2559 DESC_HDR_MODE0_MDEU_SHA1, 2560 }, 2561 { .type = CRYPTO_ALG_TYPE_AHASH, 2562 .alg.hash = { 2563 .halg.digestsize = SHA224_DIGEST_SIZE, 2564 .halg.base = { 2565 .cra_name = "hmac(sha224)", 2566 .cra_driver_name = "hmac-sha224-talitos", 2567 .cra_blocksize = SHA224_BLOCK_SIZE, 2568 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2569 CRYPTO_ALG_ASYNC, 2570 } 2571 }, 2572 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2573 DESC_HDR_SEL0_MDEUA | 2574 DESC_HDR_MODE0_MDEU_SHA224, 2575 }, 2576 { .type = CRYPTO_ALG_TYPE_AHASH, 2577 .alg.hash = { 2578 .halg.digestsize = SHA256_DIGEST_SIZE, 2579 .halg.base = { 2580 .cra_name = "hmac(sha256)", 2581 .cra_driver_name = "hmac-sha256-talitos", 2582 .cra_blocksize = SHA256_BLOCK_SIZE, 2583 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2584 CRYPTO_ALG_ASYNC, 2585 } 2586 }, 2587 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2588 DESC_HDR_SEL0_MDEUA | 2589 DESC_HDR_MODE0_MDEU_SHA256, 2590 }, 2591 { .type = CRYPTO_ALG_TYPE_AHASH, 2592 .alg.hash = { 2593 .halg.digestsize = SHA384_DIGEST_SIZE, 2594 .halg.base = { 2595 .cra_name = "hmac(sha384)", 2596 .cra_driver_name = "hmac-sha384-talitos", 2597 .cra_blocksize = SHA384_BLOCK_SIZE, 2598 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2599 CRYPTO_ALG_ASYNC, 2600 } 2601 }, 2602 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2603 DESC_HDR_SEL0_MDEUB | 2604 DESC_HDR_MODE0_MDEUB_SHA384, 2605 }, 2606 { .type = CRYPTO_ALG_TYPE_AHASH, 2607 .alg.hash = { 2608 .halg.digestsize = SHA512_DIGEST_SIZE, 2609 .halg.base = { 2610 .cra_name = "hmac(sha512)", 2611 .cra_driver_name = "hmac-sha512-talitos", 2612 .cra_blocksize = SHA512_BLOCK_SIZE, 2613 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2614 CRYPTO_ALG_ASYNC, 2615 } 2616 }, 2617 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2618 DESC_HDR_SEL0_MDEUB | 2619 DESC_HDR_MODE0_MDEUB_SHA512, 2620 } 2621 }; 2622 2623 struct talitos_crypto_alg { 2624 struct list_head entry; 2625 struct device *dev; 2626 struct talitos_alg_template algt; 2627 }; 2628 2629 static int talitos_cra_init(struct crypto_tfm *tfm) 2630 { 2631 struct crypto_alg *alg = tfm->__crt_alg; 2632 struct talitos_crypto_alg *talitos_alg; 2633 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2634 struct talitos_private *priv; 2635 2636 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) 2637 talitos_alg = container_of(__crypto_ahash_alg(alg), 2638 struct talitos_crypto_alg, 2639 algt.alg.hash); 2640 else 2641 talitos_alg = container_of(alg, struct talitos_crypto_alg, 2642 algt.alg.crypto); 2643 2644 /* update context with ptr to dev */ 2645 ctx->dev = talitos_alg->dev; 2646 2647 /* assign SEC channel to tfm in round-robin fashion */ 2648 priv = dev_get_drvdata(ctx->dev); 2649 ctx->ch = atomic_inc_return(&priv->last_chan) & 2650 (priv->num_channels - 1); 2651 2652 /* copy descriptor header template value */ 2653 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; 2654 2655 /* select done notification */ 2656 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY; 2657 2658 return 0; 2659 } 2660 2661 static int talitos_cra_init_aead(struct crypto_tfm *tfm) 2662 { 2663 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2664 2665 talitos_cra_init(tfm); 2666 2667 /* random first IV */ 2668 get_random_bytes(ctx->iv, TALITOS_MAX_IV_LENGTH); 2669 2670 return 0; 2671 } 2672 2673 static int talitos_cra_init_ahash(struct crypto_tfm *tfm) 2674 { 2675 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2676 2677 talitos_cra_init(tfm); 2678 2679 ctx->keylen = 0; 2680 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 2681 sizeof(struct talitos_ahash_req_ctx)); 2682 2683 return 0; 2684 } 2685 2686 /* 2687 * given the alg's descriptor header template, determine whether descriptor 2688 * type and primary/secondary execution units required match the hw 2689 * capabilities description provided in the device tree node. 2690 */ 2691 static int hw_supports(struct device *dev, __be32 desc_hdr_template) 2692 { 2693 struct talitos_private *priv = dev_get_drvdata(dev); 2694 int ret; 2695 2696 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) && 2697 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units); 2698 2699 if (SECONDARY_EU(desc_hdr_template)) 2700 ret = ret && (1 << SECONDARY_EU(desc_hdr_template) 2701 & priv->exec_units); 2702 2703 return ret; 2704 } 2705 2706 static int talitos_remove(struct platform_device *ofdev) 2707 { 2708 struct device *dev = &ofdev->dev; 2709 struct talitos_private *priv = dev_get_drvdata(dev); 2710 struct talitos_crypto_alg *t_alg, *n; 2711 int i; 2712 2713 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 2714 switch (t_alg->algt.type) { 2715 case CRYPTO_ALG_TYPE_ABLKCIPHER: 2716 case CRYPTO_ALG_TYPE_AEAD: 2717 crypto_unregister_alg(&t_alg->algt.alg.crypto); 2718 break; 2719 case CRYPTO_ALG_TYPE_AHASH: 2720 crypto_unregister_ahash(&t_alg->algt.alg.hash); 2721 break; 2722 } 2723 list_del(&t_alg->entry); 2724 kfree(t_alg); 2725 } 2726 2727 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 2728 talitos_unregister_rng(dev); 2729 2730 for (i = 0; i < priv->num_channels; i++) 2731 kfree(priv->chan[i].fifo); 2732 2733 kfree(priv->chan); 2734 2735 for (i = 0; i < 2; i++) 2736 if (priv->irq[i]) { 2737 free_irq(priv->irq[i], dev); 2738 irq_dispose_mapping(priv->irq[i]); 2739 } 2740 2741 tasklet_kill(&priv->done_task[0]); 2742 if (priv->irq[1]) 2743 tasklet_kill(&priv->done_task[1]); 2744 2745 iounmap(priv->reg); 2746 2747 kfree(priv); 2748 2749 return 0; 2750 } 2751 2752 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, 2753 struct talitos_alg_template 2754 *template) 2755 { 2756 struct talitos_private *priv = dev_get_drvdata(dev); 2757 struct talitos_crypto_alg *t_alg; 2758 struct crypto_alg *alg; 2759 2760 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL); 2761 if (!t_alg) 2762 return ERR_PTR(-ENOMEM); 2763 2764 t_alg->algt = *template; 2765 2766 switch (t_alg->algt.type) { 2767 case CRYPTO_ALG_TYPE_ABLKCIPHER: 2768 alg = &t_alg->algt.alg.crypto; 2769 alg->cra_init = talitos_cra_init; 2770 alg->cra_type = &crypto_ablkcipher_type; 2771 alg->cra_ablkcipher.setkey = ablkcipher_setkey; 2772 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; 2773 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt; 2774 alg->cra_ablkcipher.geniv = "eseqiv"; 2775 break; 2776 case CRYPTO_ALG_TYPE_AEAD: 2777 alg = &t_alg->algt.alg.crypto; 2778 alg->cra_init = talitos_cra_init_aead; 2779 alg->cra_type = &crypto_aead_type; 2780 alg->cra_aead.setkey = aead_setkey; 2781 alg->cra_aead.setauthsize = aead_setauthsize; 2782 alg->cra_aead.encrypt = aead_encrypt; 2783 alg->cra_aead.decrypt = aead_decrypt; 2784 alg->cra_aead.givencrypt = aead_givencrypt; 2785 alg->cra_aead.geniv = "<built-in>"; 2786 break; 2787 case CRYPTO_ALG_TYPE_AHASH: 2788 alg = &t_alg->algt.alg.hash.halg.base; 2789 alg->cra_init = talitos_cra_init_ahash; 2790 alg->cra_type = &crypto_ahash_type; 2791 t_alg->algt.alg.hash.init = ahash_init; 2792 t_alg->algt.alg.hash.update = ahash_update; 2793 t_alg->algt.alg.hash.final = ahash_final; 2794 t_alg->algt.alg.hash.finup = ahash_finup; 2795 t_alg->algt.alg.hash.digest = ahash_digest; 2796 t_alg->algt.alg.hash.setkey = ahash_setkey; 2797 2798 if (!(priv->features & TALITOS_FTR_HMAC_OK) && 2799 !strncmp(alg->cra_name, "hmac", 4)) { 2800 kfree(t_alg); 2801 return ERR_PTR(-ENOTSUPP); 2802 } 2803 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && 2804 (!strcmp(alg->cra_name, "sha224") || 2805 !strcmp(alg->cra_name, "hmac(sha224)"))) { 2806 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; 2807 t_alg->algt.desc_hdr_template = 2808 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2809 DESC_HDR_SEL0_MDEUA | 2810 DESC_HDR_MODE0_MDEU_SHA256; 2811 } 2812 break; 2813 default: 2814 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); 2815 kfree(t_alg); 2816 return ERR_PTR(-EINVAL); 2817 } 2818 2819 alg->cra_module = THIS_MODULE; 2820 alg->cra_priority = TALITOS_CRA_PRIORITY; 2821 alg->cra_alignmask = 0; 2822 alg->cra_ctxsize = sizeof(struct talitos_ctx); 2823 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; 2824 2825 t_alg->dev = dev; 2826 2827 return t_alg; 2828 } 2829 2830 static int talitos_probe_irq(struct platform_device *ofdev) 2831 { 2832 struct device *dev = &ofdev->dev; 2833 struct device_node *np = ofdev->dev.of_node; 2834 struct talitos_private *priv = dev_get_drvdata(dev); 2835 int err; 2836 bool is_sec1 = has_ftr_sec1(priv); 2837 2838 priv->irq[0] = irq_of_parse_and_map(np, 0); 2839 if (!priv->irq[0]) { 2840 dev_err(dev, "failed to map irq\n"); 2841 return -EINVAL; 2842 } 2843 if (is_sec1) { 2844 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0, 2845 dev_driver_string(dev), dev); 2846 goto primary_out; 2847 } 2848 2849 priv->irq[1] = irq_of_parse_and_map(np, 1); 2850 2851 /* get the primary irq line */ 2852 if (!priv->irq[1]) { 2853 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0, 2854 dev_driver_string(dev), dev); 2855 goto primary_out; 2856 } 2857 2858 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0, 2859 dev_driver_string(dev), dev); 2860 if (err) 2861 goto primary_out; 2862 2863 /* get the secondary irq line */ 2864 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0, 2865 dev_driver_string(dev), dev); 2866 if (err) { 2867 dev_err(dev, "failed to request secondary irq\n"); 2868 irq_dispose_mapping(priv->irq[1]); 2869 priv->irq[1] = 0; 2870 } 2871 2872 return err; 2873 2874 primary_out: 2875 if (err) { 2876 dev_err(dev, "failed to request primary irq\n"); 2877 irq_dispose_mapping(priv->irq[0]); 2878 priv->irq[0] = 0; 2879 } 2880 2881 return err; 2882 } 2883 2884 static int talitos_probe(struct platform_device *ofdev) 2885 { 2886 struct device *dev = &ofdev->dev; 2887 struct device_node *np = ofdev->dev.of_node; 2888 struct talitos_private *priv; 2889 const unsigned int *prop; 2890 int i, err; 2891 int stride; 2892 2893 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); 2894 if (!priv) 2895 return -ENOMEM; 2896 2897 INIT_LIST_HEAD(&priv->alg_list); 2898 2899 dev_set_drvdata(dev, priv); 2900 2901 priv->ofdev = ofdev; 2902 2903 spin_lock_init(&priv->reg_lock); 2904 2905 priv->reg = of_iomap(np, 0); 2906 if (!priv->reg) { 2907 dev_err(dev, "failed to of_iomap\n"); 2908 err = -ENOMEM; 2909 goto err_out; 2910 } 2911 2912 /* get SEC version capabilities from device tree */ 2913 prop = of_get_property(np, "fsl,num-channels", NULL); 2914 if (prop) 2915 priv->num_channels = *prop; 2916 2917 prop = of_get_property(np, "fsl,channel-fifo-len", NULL); 2918 if (prop) 2919 priv->chfifo_len = *prop; 2920 2921 prop = of_get_property(np, "fsl,exec-units-mask", NULL); 2922 if (prop) 2923 priv->exec_units = *prop; 2924 2925 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL); 2926 if (prop) 2927 priv->desc_types = *prop; 2928 2929 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || 2930 !priv->exec_units || !priv->desc_types) { 2931 dev_err(dev, "invalid property data in device tree node\n"); 2932 err = -EINVAL; 2933 goto err_out; 2934 } 2935 2936 if (of_device_is_compatible(np, "fsl,sec3.0")) 2937 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; 2938 2939 if (of_device_is_compatible(np, "fsl,sec2.1")) 2940 priv->features |= TALITOS_FTR_HW_AUTH_CHECK | 2941 TALITOS_FTR_SHA224_HWINIT | 2942 TALITOS_FTR_HMAC_OK; 2943 2944 if (of_device_is_compatible(np, "fsl,sec1.0")) 2945 priv->features |= TALITOS_FTR_SEC1; 2946 2947 if (of_device_is_compatible(np, "fsl,sec1.2")) { 2948 priv->reg_deu = priv->reg + TALITOS12_DEU; 2949 priv->reg_aesu = priv->reg + TALITOS12_AESU; 2950 priv->reg_mdeu = priv->reg + TALITOS12_MDEU; 2951 stride = TALITOS1_CH_STRIDE; 2952 } else if (of_device_is_compatible(np, "fsl,sec1.0")) { 2953 priv->reg_deu = priv->reg + TALITOS10_DEU; 2954 priv->reg_aesu = priv->reg + TALITOS10_AESU; 2955 priv->reg_mdeu = priv->reg + TALITOS10_MDEU; 2956 priv->reg_afeu = priv->reg + TALITOS10_AFEU; 2957 priv->reg_rngu = priv->reg + TALITOS10_RNGU; 2958 priv->reg_pkeu = priv->reg + TALITOS10_PKEU; 2959 stride = TALITOS1_CH_STRIDE; 2960 } else { 2961 priv->reg_deu = priv->reg + TALITOS2_DEU; 2962 priv->reg_aesu = priv->reg + TALITOS2_AESU; 2963 priv->reg_mdeu = priv->reg + TALITOS2_MDEU; 2964 priv->reg_afeu = priv->reg + TALITOS2_AFEU; 2965 priv->reg_rngu = priv->reg + TALITOS2_RNGU; 2966 priv->reg_pkeu = priv->reg + TALITOS2_PKEU; 2967 priv->reg_keu = priv->reg + TALITOS2_KEU; 2968 priv->reg_crcu = priv->reg + TALITOS2_CRCU; 2969 stride = TALITOS2_CH_STRIDE; 2970 } 2971 2972 err = talitos_probe_irq(ofdev); 2973 if (err) 2974 goto err_out; 2975 2976 if (of_device_is_compatible(np, "fsl,sec1.0")) { 2977 tasklet_init(&priv->done_task[0], talitos1_done_4ch, 2978 (unsigned long)dev); 2979 } else { 2980 if (!priv->irq[1]) { 2981 tasklet_init(&priv->done_task[0], talitos2_done_4ch, 2982 (unsigned long)dev); 2983 } else { 2984 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, 2985 (unsigned long)dev); 2986 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, 2987 (unsigned long)dev); 2988 } 2989 } 2990 2991 priv->chan = kzalloc(sizeof(struct talitos_channel) * 2992 priv->num_channels, GFP_KERNEL); 2993 if (!priv->chan) { 2994 dev_err(dev, "failed to allocate channel management space\n"); 2995 err = -ENOMEM; 2996 goto err_out; 2997 } 2998 2999 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 3000 3001 for (i = 0; i < priv->num_channels; i++) { 3002 priv->chan[i].reg = priv->reg + stride * (i + 1); 3003 if (!priv->irq[1] || !(i & 1)) 3004 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; 3005 3006 spin_lock_init(&priv->chan[i].head_lock); 3007 spin_lock_init(&priv->chan[i].tail_lock); 3008 3009 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) * 3010 priv->fifo_len, GFP_KERNEL); 3011 if (!priv->chan[i].fifo) { 3012 dev_err(dev, "failed to allocate request fifo %d\n", i); 3013 err = -ENOMEM; 3014 goto err_out; 3015 } 3016 3017 atomic_set(&priv->chan[i].submit_count, 3018 -(priv->chfifo_len - 1)); 3019 } 3020 3021 dma_set_mask(dev, DMA_BIT_MASK(36)); 3022 3023 /* reset and initialize the h/w */ 3024 err = init_device(dev); 3025 if (err) { 3026 dev_err(dev, "failed to initialize device\n"); 3027 goto err_out; 3028 } 3029 3030 /* register the RNG, if available */ 3031 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) { 3032 err = talitos_register_rng(dev); 3033 if (err) { 3034 dev_err(dev, "failed to register hwrng: %d\n", err); 3035 goto err_out; 3036 } else 3037 dev_info(dev, "hwrng\n"); 3038 } 3039 3040 /* register crypto algorithms the device supports */ 3041 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 3042 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 3043 struct talitos_crypto_alg *t_alg; 3044 char *name = NULL; 3045 3046 t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 3047 if (IS_ERR(t_alg)) { 3048 err = PTR_ERR(t_alg); 3049 if (err == -ENOTSUPP) 3050 continue; 3051 goto err_out; 3052 } 3053 3054 switch (t_alg->algt.type) { 3055 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3056 case CRYPTO_ALG_TYPE_AEAD: 3057 err = crypto_register_alg( 3058 &t_alg->algt.alg.crypto); 3059 name = t_alg->algt.alg.crypto.cra_driver_name; 3060 break; 3061 case CRYPTO_ALG_TYPE_AHASH: 3062 err = crypto_register_ahash( 3063 &t_alg->algt.alg.hash); 3064 name = 3065 t_alg->algt.alg.hash.halg.base.cra_driver_name; 3066 break; 3067 } 3068 if (err) { 3069 dev_err(dev, "%s alg registration failed\n", 3070 name); 3071 kfree(t_alg); 3072 } else 3073 list_add_tail(&t_alg->entry, &priv->alg_list); 3074 } 3075 } 3076 if (!list_empty(&priv->alg_list)) 3077 dev_info(dev, "%s algorithms registered in /proc/crypto\n", 3078 (char *)of_get_property(np, "compatible", NULL)); 3079 3080 return 0; 3081 3082 err_out: 3083 talitos_remove(ofdev); 3084 3085 return err; 3086 } 3087 3088 static const struct of_device_id talitos_match[] = { 3089 #ifdef CONFIG_CRYPTO_DEV_TALITOS1 3090 { 3091 .compatible = "fsl,sec1.0", 3092 }, 3093 #endif 3094 #ifdef CONFIG_CRYPTO_DEV_TALITOS2 3095 { 3096 .compatible = "fsl,sec2.0", 3097 }, 3098 #endif 3099 {}, 3100 }; 3101 MODULE_DEVICE_TABLE(of, talitos_match); 3102 3103 static struct platform_driver talitos_driver = { 3104 .driver = { 3105 .name = "talitos", 3106 .of_match_table = talitos_match, 3107 }, 3108 .probe = talitos_probe, 3109 .remove = talitos_remove, 3110 }; 3111 3112 module_platform_driver(talitos_driver); 3113 3114 MODULE_LICENSE("GPL"); 3115 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>"); 3116 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver"); 3117