1 /* 2 * talitos - Freescale Integrated Security Engine (SEC) device driver 3 * 4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc. 5 * 6 * Scatterlist Crypto API glue code copied from files with the following: 7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au> 8 * 9 * Crypto algorithm registration code copied from hifn driver: 10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru> 11 * All rights reserved. 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of the GNU General Public License as published by 15 * the Free Software Foundation; either version 2 of the License, or 16 * (at your option) any later version. 17 * 18 * This program is distributed in the hope that it will be useful, 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 21 * GNU General Public License for more details. 22 * 23 * You should have received a copy of the GNU General Public License 24 * along with this program; if not, write to the Free Software 25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 26 */ 27 28 #include <linux/kernel.h> 29 #include <linux/module.h> 30 #include <linux/mod_devicetable.h> 31 #include <linux/device.h> 32 #include <linux/interrupt.h> 33 #include <linux/crypto.h> 34 #include <linux/hw_random.h> 35 #include <linux/of_address.h> 36 #include <linux/of_irq.h> 37 #include <linux/of_platform.h> 38 #include <linux/dma-mapping.h> 39 #include <linux/io.h> 40 #include <linux/spinlock.h> 41 #include <linux/rtnetlink.h> 42 #include <linux/slab.h> 43 44 #include <crypto/algapi.h> 45 #include <crypto/aes.h> 46 #include <crypto/des.h> 47 #include <crypto/sha.h> 48 #include <crypto/md5.h> 49 #include <crypto/internal/aead.h> 50 #include <crypto/authenc.h> 51 #include <crypto/skcipher.h> 52 #include <crypto/hash.h> 53 #include <crypto/internal/hash.h> 54 #include <crypto/scatterwalk.h> 55 56 #include "talitos.h" 57 58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr, 59 bool is_sec1) 60 { 61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr)); 62 if (!is_sec1) 63 ptr->eptr = upper_32_bits(dma_addr); 64 } 65 66 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len, 67 bool is_sec1) 68 { 69 if (is_sec1) { 70 ptr->res = 0; 71 ptr->len1 = cpu_to_be16(len); 72 } else { 73 ptr->len = cpu_to_be16(len); 74 } 75 } 76 77 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr, 78 bool is_sec1) 79 { 80 if (is_sec1) 81 return be16_to_cpu(ptr->len1); 82 else 83 return be16_to_cpu(ptr->len); 84 } 85 86 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1) 87 { 88 if (!is_sec1) 89 ptr->j_extent = 0; 90 } 91 92 /* 93 * map virtual single (contiguous) pointer to h/w descriptor pointer 94 */ 95 static void map_single_talitos_ptr(struct device *dev, 96 struct talitos_ptr *ptr, 97 unsigned int len, void *data, 98 enum dma_data_direction dir) 99 { 100 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir); 101 struct talitos_private *priv = dev_get_drvdata(dev); 102 bool is_sec1 = has_ftr_sec1(priv); 103 104 to_talitos_ptr_len(ptr, len, is_sec1); 105 to_talitos_ptr(ptr, dma_addr, is_sec1); 106 to_talitos_ptr_extent_clear(ptr, is_sec1); 107 } 108 109 /* 110 * unmap bus single (contiguous) h/w descriptor pointer 111 */ 112 static void unmap_single_talitos_ptr(struct device *dev, 113 struct talitos_ptr *ptr, 114 enum dma_data_direction dir) 115 { 116 struct talitos_private *priv = dev_get_drvdata(dev); 117 bool is_sec1 = has_ftr_sec1(priv); 118 119 dma_unmap_single(dev, be32_to_cpu(ptr->ptr), 120 from_talitos_ptr_len(ptr, is_sec1), dir); 121 } 122 123 static int reset_channel(struct device *dev, int ch) 124 { 125 struct talitos_private *priv = dev_get_drvdata(dev); 126 unsigned int timeout = TALITOS_TIMEOUT; 127 bool is_sec1 = has_ftr_sec1(priv); 128 129 if (is_sec1) { 130 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 131 TALITOS1_CCCR_LO_RESET); 132 133 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) & 134 TALITOS1_CCCR_LO_RESET) && --timeout) 135 cpu_relax(); 136 } else { 137 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 138 TALITOS2_CCCR_RESET); 139 140 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 141 TALITOS2_CCCR_RESET) && --timeout) 142 cpu_relax(); 143 } 144 145 if (timeout == 0) { 146 dev_err(dev, "failed to reset channel %d\n", ch); 147 return -EIO; 148 } 149 150 /* set 36-bit addressing, done writeback enable and done IRQ enable */ 151 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE | 152 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE); 153 154 /* and ICCR writeback, if available */ 155 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 156 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 157 TALITOS_CCCR_LO_IWSE); 158 159 return 0; 160 } 161 162 static int reset_device(struct device *dev) 163 { 164 struct talitos_private *priv = dev_get_drvdata(dev); 165 unsigned int timeout = TALITOS_TIMEOUT; 166 bool is_sec1 = has_ftr_sec1(priv); 167 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR; 168 169 setbits32(priv->reg + TALITOS_MCR, mcr); 170 171 while ((in_be32(priv->reg + TALITOS_MCR) & mcr) 172 && --timeout) 173 cpu_relax(); 174 175 if (priv->irq[1]) { 176 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3; 177 setbits32(priv->reg + TALITOS_MCR, mcr); 178 } 179 180 if (timeout == 0) { 181 dev_err(dev, "failed to reset device\n"); 182 return -EIO; 183 } 184 185 return 0; 186 } 187 188 /* 189 * Reset and initialize the device 190 */ 191 static int init_device(struct device *dev) 192 { 193 struct talitos_private *priv = dev_get_drvdata(dev); 194 int ch, err; 195 bool is_sec1 = has_ftr_sec1(priv); 196 197 /* 198 * Master reset 199 * errata documentation: warning: certain SEC interrupts 200 * are not fully cleared by writing the MCR:SWR bit, 201 * set bit twice to completely reset 202 */ 203 err = reset_device(dev); 204 if (err) 205 return err; 206 207 err = reset_device(dev); 208 if (err) 209 return err; 210 211 /* reset channels */ 212 for (ch = 0; ch < priv->num_channels; ch++) { 213 err = reset_channel(dev, ch); 214 if (err) 215 return err; 216 } 217 218 /* enable channel done and error interrupts */ 219 if (is_sec1) { 220 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT); 221 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); 222 /* disable parity error check in DEU (erroneous? test vect.) */ 223 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE); 224 } else { 225 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT); 226 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); 227 } 228 229 /* disable integrity check error interrupts (use writeback instead) */ 230 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK) 231 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO, 232 TALITOS_MDEUICR_LO_ICE); 233 234 return 0; 235 } 236 237 /** 238 * talitos_submit - submits a descriptor to the device for processing 239 * @dev: the SEC device to be used 240 * @ch: the SEC device channel to be used 241 * @desc: the descriptor to be processed by the device 242 * @callback: whom to call when processing is complete 243 * @context: a handle for use by caller (optional) 244 * 245 * desc must contain valid dma-mapped (bus physical) address pointers. 246 * callback must check err and feedback in descriptor header 247 * for device processing status. 248 */ 249 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc, 250 void (*callback)(struct device *dev, 251 struct talitos_desc *desc, 252 void *context, int error), 253 void *context) 254 { 255 struct talitos_private *priv = dev_get_drvdata(dev); 256 struct talitos_request *request; 257 unsigned long flags; 258 int head; 259 bool is_sec1 = has_ftr_sec1(priv); 260 261 spin_lock_irqsave(&priv->chan[ch].head_lock, flags); 262 263 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) { 264 /* h/w fifo is full */ 265 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 266 return -EAGAIN; 267 } 268 269 head = priv->chan[ch].head; 270 request = &priv->chan[ch].fifo[head]; 271 272 /* map descriptor and save caller data */ 273 if (is_sec1) { 274 desc->hdr1 = desc->hdr; 275 desc->next_desc = 0; 276 request->dma_desc = dma_map_single(dev, &desc->hdr1, 277 TALITOS_DESC_SIZE, 278 DMA_BIDIRECTIONAL); 279 } else { 280 request->dma_desc = dma_map_single(dev, desc, 281 TALITOS_DESC_SIZE, 282 DMA_BIDIRECTIONAL); 283 } 284 request->callback = callback; 285 request->context = context; 286 287 /* increment fifo head */ 288 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1); 289 290 smp_wmb(); 291 request->desc = desc; 292 293 /* GO! */ 294 wmb(); 295 out_be32(priv->chan[ch].reg + TALITOS_FF, 296 upper_32_bits(request->dma_desc)); 297 out_be32(priv->chan[ch].reg + TALITOS_FF_LO, 298 lower_32_bits(request->dma_desc)); 299 300 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags); 301 302 return -EINPROGRESS; 303 } 304 EXPORT_SYMBOL(talitos_submit); 305 306 /* 307 * process what was done, notify callback of error if not 308 */ 309 static void flush_channel(struct device *dev, int ch, int error, int reset_ch) 310 { 311 struct talitos_private *priv = dev_get_drvdata(dev); 312 struct talitos_request *request, saved_req; 313 unsigned long flags; 314 int tail, status; 315 bool is_sec1 = has_ftr_sec1(priv); 316 317 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 318 319 tail = priv->chan[ch].tail; 320 while (priv->chan[ch].fifo[tail].desc) { 321 __be32 hdr; 322 323 request = &priv->chan[ch].fifo[tail]; 324 325 /* descriptors with their done bits set don't get the error */ 326 rmb(); 327 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr; 328 329 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE) 330 status = 0; 331 else 332 if (!error) 333 break; 334 else 335 status = error; 336 337 dma_unmap_single(dev, request->dma_desc, 338 TALITOS_DESC_SIZE, 339 DMA_BIDIRECTIONAL); 340 341 /* copy entries so we can call callback outside lock */ 342 saved_req.desc = request->desc; 343 saved_req.callback = request->callback; 344 saved_req.context = request->context; 345 346 /* release request entry in fifo */ 347 smp_wmb(); 348 request->desc = NULL; 349 350 /* increment fifo tail */ 351 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1); 352 353 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 354 355 atomic_dec(&priv->chan[ch].submit_count); 356 357 saved_req.callback(dev, saved_req.desc, saved_req.context, 358 status); 359 /* channel may resume processing in single desc error case */ 360 if (error && !reset_ch && status == error) 361 return; 362 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags); 363 tail = priv->chan[ch].tail; 364 } 365 366 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags); 367 } 368 369 /* 370 * process completed requests for channels that have done status 371 */ 372 #define DEF_TALITOS1_DONE(name, ch_done_mask) \ 373 static void talitos1_done_##name(unsigned long data) \ 374 { \ 375 struct device *dev = (struct device *)data; \ 376 struct talitos_private *priv = dev_get_drvdata(dev); \ 377 unsigned long flags; \ 378 \ 379 if (ch_done_mask & 0x10000000) \ 380 flush_channel(dev, 0, 0, 0); \ 381 if (priv->num_channels == 1) \ 382 goto out; \ 383 if (ch_done_mask & 0x40000000) \ 384 flush_channel(dev, 1, 0, 0); \ 385 if (ch_done_mask & 0x00010000) \ 386 flush_channel(dev, 2, 0, 0); \ 387 if (ch_done_mask & 0x00040000) \ 388 flush_channel(dev, 3, 0, 0); \ 389 \ 390 out: \ 391 /* At this point, all completed channels have been processed */ \ 392 /* Unmask done interrupts for channels completed later on. */ \ 393 spin_lock_irqsave(&priv->reg_lock, flags); \ 394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \ 396 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 397 } 398 399 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE) 400 401 #define DEF_TALITOS2_DONE(name, ch_done_mask) \ 402 static void talitos2_done_##name(unsigned long data) \ 403 { \ 404 struct device *dev = (struct device *)data; \ 405 struct talitos_private *priv = dev_get_drvdata(dev); \ 406 unsigned long flags; \ 407 \ 408 if (ch_done_mask & 1) \ 409 flush_channel(dev, 0, 0, 0); \ 410 if (priv->num_channels == 1) \ 411 goto out; \ 412 if (ch_done_mask & (1 << 2)) \ 413 flush_channel(dev, 1, 0, 0); \ 414 if (ch_done_mask & (1 << 4)) \ 415 flush_channel(dev, 2, 0, 0); \ 416 if (ch_done_mask & (1 << 6)) \ 417 flush_channel(dev, 3, 0, 0); \ 418 \ 419 out: \ 420 /* At this point, all completed channels have been processed */ \ 421 /* Unmask done interrupts for channels completed later on. */ \ 422 spin_lock_irqsave(&priv->reg_lock, flags); \ 423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \ 425 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 426 } 427 428 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE) 429 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE) 430 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE) 431 432 /* 433 * locate current (offending) descriptor 434 */ 435 static u32 current_desc_hdr(struct device *dev, int ch) 436 { 437 struct talitos_private *priv = dev_get_drvdata(dev); 438 int tail, iter; 439 dma_addr_t cur_desc; 440 441 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32; 442 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO); 443 444 if (!cur_desc) { 445 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n"); 446 return 0; 447 } 448 449 tail = priv->chan[ch].tail; 450 451 iter = tail; 452 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) { 453 iter = (iter + 1) & (priv->fifo_len - 1); 454 if (iter == tail) { 455 dev_err(dev, "couldn't locate current descriptor\n"); 456 return 0; 457 } 458 } 459 460 return priv->chan[ch].fifo[iter].desc->hdr; 461 } 462 463 /* 464 * user diagnostics; report root cause of error based on execution unit status 465 */ 466 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr) 467 { 468 struct talitos_private *priv = dev_get_drvdata(dev); 469 int i; 470 471 if (!desc_hdr) 472 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF); 473 474 switch (desc_hdr & DESC_HDR_SEL0_MASK) { 475 case DESC_HDR_SEL0_AFEU: 476 dev_err(dev, "AFEUISR 0x%08x_%08x\n", 477 in_be32(priv->reg_afeu + TALITOS_EUISR), 478 in_be32(priv->reg_afeu + TALITOS_EUISR_LO)); 479 break; 480 case DESC_HDR_SEL0_DEU: 481 dev_err(dev, "DEUISR 0x%08x_%08x\n", 482 in_be32(priv->reg_deu + TALITOS_EUISR), 483 in_be32(priv->reg_deu + TALITOS_EUISR_LO)); 484 break; 485 case DESC_HDR_SEL0_MDEUA: 486 case DESC_HDR_SEL0_MDEUB: 487 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 488 in_be32(priv->reg_mdeu + TALITOS_EUISR), 489 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 490 break; 491 case DESC_HDR_SEL0_RNG: 492 dev_err(dev, "RNGUISR 0x%08x_%08x\n", 493 in_be32(priv->reg_rngu + TALITOS_ISR), 494 in_be32(priv->reg_rngu + TALITOS_ISR_LO)); 495 break; 496 case DESC_HDR_SEL0_PKEU: 497 dev_err(dev, "PKEUISR 0x%08x_%08x\n", 498 in_be32(priv->reg_pkeu + TALITOS_EUISR), 499 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 500 break; 501 case DESC_HDR_SEL0_AESU: 502 dev_err(dev, "AESUISR 0x%08x_%08x\n", 503 in_be32(priv->reg_aesu + TALITOS_EUISR), 504 in_be32(priv->reg_aesu + TALITOS_EUISR_LO)); 505 break; 506 case DESC_HDR_SEL0_CRCU: 507 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 508 in_be32(priv->reg_crcu + TALITOS_EUISR), 509 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 510 break; 511 case DESC_HDR_SEL0_KEU: 512 dev_err(dev, "KEUISR 0x%08x_%08x\n", 513 in_be32(priv->reg_pkeu + TALITOS_EUISR), 514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO)); 515 break; 516 } 517 518 switch (desc_hdr & DESC_HDR_SEL1_MASK) { 519 case DESC_HDR_SEL1_MDEUA: 520 case DESC_HDR_SEL1_MDEUB: 521 dev_err(dev, "MDEUISR 0x%08x_%08x\n", 522 in_be32(priv->reg_mdeu + TALITOS_EUISR), 523 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO)); 524 break; 525 case DESC_HDR_SEL1_CRCU: 526 dev_err(dev, "CRCUISR 0x%08x_%08x\n", 527 in_be32(priv->reg_crcu + TALITOS_EUISR), 528 in_be32(priv->reg_crcu + TALITOS_EUISR_LO)); 529 break; 530 } 531 532 for (i = 0; i < 8; i++) 533 dev_err(dev, "DESCBUF 0x%08x_%08x\n", 534 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i), 535 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i)); 536 } 537 538 /* 539 * recover from error interrupts 540 */ 541 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo) 542 { 543 struct talitos_private *priv = dev_get_drvdata(dev); 544 unsigned int timeout = TALITOS_TIMEOUT; 545 int ch, error, reset_dev = 0; 546 u32 v_lo; 547 bool is_sec1 = has_ftr_sec1(priv); 548 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */ 549 550 for (ch = 0; ch < priv->num_channels; ch++) { 551 /* skip channels without errors */ 552 if (is_sec1) { 553 /* bits 29, 31, 17, 19 */ 554 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6)))) 555 continue; 556 } else { 557 if (!(isr & (1 << (ch * 2 + 1)))) 558 continue; 559 } 560 561 error = -EINVAL; 562 563 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO); 564 565 if (v_lo & TALITOS_CCPSR_LO_DOF) { 566 dev_err(dev, "double fetch fifo overflow error\n"); 567 error = -EAGAIN; 568 reset_ch = 1; 569 } 570 if (v_lo & TALITOS_CCPSR_LO_SOF) { 571 /* h/w dropped descriptor */ 572 dev_err(dev, "single fetch fifo overflow error\n"); 573 error = -EAGAIN; 574 } 575 if (v_lo & TALITOS_CCPSR_LO_MDTE) 576 dev_err(dev, "master data transfer error\n"); 577 if (v_lo & TALITOS_CCPSR_LO_SGDLZ) 578 dev_err(dev, is_sec1 ? "pointeur not complete error\n" 579 : "s/g data length zero error\n"); 580 if (v_lo & TALITOS_CCPSR_LO_FPZ) 581 dev_err(dev, is_sec1 ? "parity error\n" 582 : "fetch pointer zero error\n"); 583 if (v_lo & TALITOS_CCPSR_LO_IDH) 584 dev_err(dev, "illegal descriptor header error\n"); 585 if (v_lo & TALITOS_CCPSR_LO_IEU) 586 dev_err(dev, is_sec1 ? "static assignment error\n" 587 : "invalid exec unit error\n"); 588 if (v_lo & TALITOS_CCPSR_LO_EU) 589 report_eu_error(dev, ch, current_desc_hdr(dev, ch)); 590 if (!is_sec1) { 591 if (v_lo & TALITOS_CCPSR_LO_GB) 592 dev_err(dev, "gather boundary error\n"); 593 if (v_lo & TALITOS_CCPSR_LO_GRL) 594 dev_err(dev, "gather return/length error\n"); 595 if (v_lo & TALITOS_CCPSR_LO_SB) 596 dev_err(dev, "scatter boundary error\n"); 597 if (v_lo & TALITOS_CCPSR_LO_SRL) 598 dev_err(dev, "scatter return/length error\n"); 599 } 600 601 flush_channel(dev, ch, error, reset_ch); 602 603 if (reset_ch) { 604 reset_channel(dev, ch); 605 } else { 606 setbits32(priv->chan[ch].reg + TALITOS_CCCR, 607 TALITOS2_CCCR_CONT); 608 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0); 609 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & 610 TALITOS2_CCCR_CONT) && --timeout) 611 cpu_relax(); 612 if (timeout == 0) { 613 dev_err(dev, "failed to restart channel %d\n", 614 ch); 615 reset_dev = 1; 616 } 617 } 618 } 619 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) || 620 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) { 621 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR)) 622 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n", 623 isr, isr_lo); 624 else 625 dev_err(dev, "done overflow, internal time out, or " 626 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo); 627 628 /* purge request queues */ 629 for (ch = 0; ch < priv->num_channels; ch++) 630 flush_channel(dev, ch, -EIO, 1); 631 632 /* reset and reinitialize the device */ 633 init_device(dev); 634 } 635 } 636 637 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 638 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \ 639 { \ 640 struct device *dev = data; \ 641 struct talitos_private *priv = dev_get_drvdata(dev); \ 642 u32 isr, isr_lo; \ 643 unsigned long flags; \ 644 \ 645 spin_lock_irqsave(&priv->reg_lock, flags); \ 646 isr = in_be32(priv->reg + TALITOS_ISR); \ 647 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 648 /* Acknowledge interrupt */ \ 649 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 650 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 651 \ 652 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \ 653 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 654 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 655 } \ 656 else { \ 657 if (likely(isr & ch_done_mask)) { \ 658 /* mask further done interrupts. */ \ 659 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 660 /* done_task will unmask done interrupts at exit */ \ 661 tasklet_schedule(&priv->done_task[tlet]); \ 662 } \ 663 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 664 } \ 665 \ 666 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 667 IRQ_NONE; \ 668 } 669 670 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0) 671 672 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \ 673 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \ 674 { \ 675 struct device *dev = data; \ 676 struct talitos_private *priv = dev_get_drvdata(dev); \ 677 u32 isr, isr_lo; \ 678 unsigned long flags; \ 679 \ 680 spin_lock_irqsave(&priv->reg_lock, flags); \ 681 isr = in_be32(priv->reg + TALITOS_ISR); \ 682 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \ 683 /* Acknowledge interrupt */ \ 684 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \ 685 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \ 686 \ 687 if (unlikely(isr & ch_err_mask || isr_lo)) { \ 688 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 689 talitos_error(dev, isr & ch_err_mask, isr_lo); \ 690 } \ 691 else { \ 692 if (likely(isr & ch_done_mask)) { \ 693 /* mask further done interrupts. */ \ 694 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \ 695 /* done_task will unmask done interrupts at exit */ \ 696 tasklet_schedule(&priv->done_task[tlet]); \ 697 } \ 698 spin_unlock_irqrestore(&priv->reg_lock, flags); \ 699 } \ 700 \ 701 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \ 702 IRQ_NONE; \ 703 } 704 705 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0) 706 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR, 707 0) 708 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR, 709 1) 710 711 /* 712 * hwrng 713 */ 714 static int talitos_rng_data_present(struct hwrng *rng, int wait) 715 { 716 struct device *dev = (struct device *)rng->priv; 717 struct talitos_private *priv = dev_get_drvdata(dev); 718 u32 ofl; 719 int i; 720 721 for (i = 0; i < 20; i++) { 722 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) & 723 TALITOS_RNGUSR_LO_OFL; 724 if (ofl || !wait) 725 break; 726 udelay(10); 727 } 728 729 return !!ofl; 730 } 731 732 static int talitos_rng_data_read(struct hwrng *rng, u32 *data) 733 { 734 struct device *dev = (struct device *)rng->priv; 735 struct talitos_private *priv = dev_get_drvdata(dev); 736 737 /* rng fifo requires 64-bit accesses */ 738 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO); 739 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO); 740 741 return sizeof(u32); 742 } 743 744 static int talitos_rng_init(struct hwrng *rng) 745 { 746 struct device *dev = (struct device *)rng->priv; 747 struct talitos_private *priv = dev_get_drvdata(dev); 748 unsigned int timeout = TALITOS_TIMEOUT; 749 750 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR); 751 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO) 752 & TALITOS_RNGUSR_LO_RD) 753 && --timeout) 754 cpu_relax(); 755 if (timeout == 0) { 756 dev_err(dev, "failed to reset rng hw\n"); 757 return -ENODEV; 758 } 759 760 /* start generating */ 761 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0); 762 763 return 0; 764 } 765 766 static int talitos_register_rng(struct device *dev) 767 { 768 struct talitos_private *priv = dev_get_drvdata(dev); 769 int err; 770 771 priv->rng.name = dev_driver_string(dev), 772 priv->rng.init = talitos_rng_init, 773 priv->rng.data_present = talitos_rng_data_present, 774 priv->rng.data_read = talitos_rng_data_read, 775 priv->rng.priv = (unsigned long)dev; 776 777 err = hwrng_register(&priv->rng); 778 if (!err) 779 priv->rng_registered = true; 780 781 return err; 782 } 783 784 static void talitos_unregister_rng(struct device *dev) 785 { 786 struct talitos_private *priv = dev_get_drvdata(dev); 787 788 if (!priv->rng_registered) 789 return; 790 791 hwrng_unregister(&priv->rng); 792 priv->rng_registered = false; 793 } 794 795 /* 796 * crypto alg 797 */ 798 #define TALITOS_CRA_PRIORITY 3000 799 #define TALITOS_MAX_KEY_SIZE 96 800 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ 801 802 struct talitos_ctx { 803 struct device *dev; 804 int ch; 805 __be32 desc_hdr_template; 806 u8 key[TALITOS_MAX_KEY_SIZE]; 807 u8 iv[TALITOS_MAX_IV_LENGTH]; 808 unsigned int keylen; 809 unsigned int enckeylen; 810 unsigned int authkeylen; 811 }; 812 813 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE 814 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512 815 816 struct talitos_ahash_req_ctx { 817 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)]; 818 unsigned int hw_context_size; 819 u8 buf[HASH_MAX_BLOCK_SIZE]; 820 u8 bufnext[HASH_MAX_BLOCK_SIZE]; 821 unsigned int swinit; 822 unsigned int first; 823 unsigned int last; 824 unsigned int to_hash_later; 825 unsigned int nbuf; 826 struct scatterlist bufsl[2]; 827 struct scatterlist *psrc; 828 }; 829 830 static int aead_setkey(struct crypto_aead *authenc, 831 const u8 *key, unsigned int keylen) 832 { 833 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 834 struct crypto_authenc_keys keys; 835 836 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 837 goto badkey; 838 839 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE) 840 goto badkey; 841 842 memcpy(ctx->key, keys.authkey, keys.authkeylen); 843 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen); 844 845 ctx->keylen = keys.authkeylen + keys.enckeylen; 846 ctx->enckeylen = keys.enckeylen; 847 ctx->authkeylen = keys.authkeylen; 848 849 return 0; 850 851 badkey: 852 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN); 853 return -EINVAL; 854 } 855 856 /* 857 * talitos_edesc - s/w-extended descriptor 858 * @src_nents: number of segments in input scatterlist 859 * @dst_nents: number of segments in output scatterlist 860 * @src_chained: whether src is chained or not 861 * @dst_chained: whether dst is chained or not 862 * @icv_ool: whether ICV is out-of-line 863 * @iv_dma: dma address of iv for checking continuity and link table 864 * @dma_len: length of dma mapped link_tbl space 865 * @dma_link_tbl: bus physical address of link_tbl/buf 866 * @desc: h/w descriptor 867 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2) 868 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1) 869 * 870 * if decrypting (with authcheck), or either one of src_nents or dst_nents 871 * is greater than 1, an integrity check value is concatenated to the end 872 * of link_tbl data 873 */ 874 struct talitos_edesc { 875 int src_nents; 876 int dst_nents; 877 bool src_chained; 878 bool dst_chained; 879 bool icv_ool; 880 dma_addr_t iv_dma; 881 int dma_len; 882 dma_addr_t dma_link_tbl; 883 struct talitos_desc desc; 884 union { 885 struct talitos_ptr link_tbl[0]; 886 u8 buf[0]; 887 }; 888 }; 889 890 static int talitos_map_sg(struct device *dev, struct scatterlist *sg, 891 unsigned int nents, enum dma_data_direction dir, 892 bool chained) 893 { 894 if (unlikely(chained)) 895 while (sg) { 896 dma_map_sg(dev, sg, 1, dir); 897 sg = sg_next(sg); 898 } 899 else 900 dma_map_sg(dev, sg, nents, dir); 901 return nents; 902 } 903 904 static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg, 905 enum dma_data_direction dir) 906 { 907 while (sg) { 908 dma_unmap_sg(dev, sg, 1, dir); 909 sg = sg_next(sg); 910 } 911 } 912 913 static void talitos_sg_unmap(struct device *dev, 914 struct talitos_edesc *edesc, 915 struct scatterlist *src, 916 struct scatterlist *dst) 917 { 918 unsigned int src_nents = edesc->src_nents ? : 1; 919 unsigned int dst_nents = edesc->dst_nents ? : 1; 920 921 if (src != dst) { 922 if (edesc->src_chained) 923 talitos_unmap_sg_chain(dev, src, DMA_TO_DEVICE); 924 else 925 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); 926 927 if (dst) { 928 if (edesc->dst_chained) 929 talitos_unmap_sg_chain(dev, dst, 930 DMA_FROM_DEVICE); 931 else 932 dma_unmap_sg(dev, dst, dst_nents, 933 DMA_FROM_DEVICE); 934 } 935 } else 936 if (edesc->src_chained) 937 talitos_unmap_sg_chain(dev, src, DMA_BIDIRECTIONAL); 938 else 939 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); 940 } 941 942 static void ipsec_esp_unmap(struct device *dev, 943 struct talitos_edesc *edesc, 944 struct aead_request *areq) 945 { 946 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE); 947 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE); 948 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 949 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE); 950 951 talitos_sg_unmap(dev, edesc, areq->src, areq->dst); 952 953 if (edesc->dma_len) 954 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 955 DMA_BIDIRECTIONAL); 956 } 957 958 /* 959 * ipsec_esp descriptor callbacks 960 */ 961 static void ipsec_esp_encrypt_done(struct device *dev, 962 struct talitos_desc *desc, void *context, 963 int err) 964 { 965 struct aead_request *areq = context; 966 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 967 unsigned int authsize = crypto_aead_authsize(authenc); 968 struct talitos_edesc *edesc; 969 struct scatterlist *sg; 970 void *icvdata; 971 972 edesc = container_of(desc, struct talitos_edesc, desc); 973 974 ipsec_esp_unmap(dev, edesc, areq); 975 976 /* copy the generated ICV to dst */ 977 if (edesc->icv_ool) { 978 icvdata = &edesc->link_tbl[edesc->src_nents + 979 edesc->dst_nents + 2]; 980 sg = sg_last(areq->dst, edesc->dst_nents); 981 memcpy((char *)sg_virt(sg) + sg->length - authsize, 982 icvdata, authsize); 983 } 984 985 kfree(edesc); 986 987 aead_request_complete(areq, err); 988 } 989 990 static void ipsec_esp_decrypt_swauth_done(struct device *dev, 991 struct talitos_desc *desc, 992 void *context, int err) 993 { 994 struct aead_request *req = context; 995 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 996 unsigned int authsize = crypto_aead_authsize(authenc); 997 struct talitos_edesc *edesc; 998 struct scatterlist *sg; 999 char *oicv, *icv; 1000 1001 edesc = container_of(desc, struct talitos_edesc, desc); 1002 1003 ipsec_esp_unmap(dev, edesc, req); 1004 1005 if (!err) { 1006 /* auth check */ 1007 sg = sg_last(req->dst, edesc->dst_nents ? : 1); 1008 icv = (char *)sg_virt(sg) + sg->length - authsize; 1009 1010 if (edesc->dma_len) { 1011 oicv = (char *)&edesc->link_tbl[edesc->src_nents + 1012 edesc->dst_nents + 2]; 1013 if (edesc->icv_ool) 1014 icv = oicv + authsize; 1015 } else 1016 oicv = (char *)&edesc->link_tbl[0]; 1017 1018 err = memcmp(oicv, icv, authsize) ? -EBADMSG : 0; 1019 } 1020 1021 kfree(edesc); 1022 1023 aead_request_complete(req, err); 1024 } 1025 1026 static void ipsec_esp_decrypt_hwauth_done(struct device *dev, 1027 struct talitos_desc *desc, 1028 void *context, int err) 1029 { 1030 struct aead_request *req = context; 1031 struct talitos_edesc *edesc; 1032 1033 edesc = container_of(desc, struct talitos_edesc, desc); 1034 1035 ipsec_esp_unmap(dev, edesc, req); 1036 1037 /* check ICV auth status */ 1038 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) != 1039 DESC_HDR_LO_ICCR1_PASS)) 1040 err = -EBADMSG; 1041 1042 kfree(edesc); 1043 1044 aead_request_complete(req, err); 1045 } 1046 1047 /* 1048 * convert scatterlist to SEC h/w link table format 1049 * stop at cryptlen bytes 1050 */ 1051 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count, 1052 unsigned int offset, int cryptlen, 1053 struct talitos_ptr *link_tbl_ptr) 1054 { 1055 int n_sg = sg_count; 1056 int count = 0; 1057 1058 while (cryptlen && sg && n_sg--) { 1059 unsigned int len = sg_dma_len(sg); 1060 1061 if (offset >= len) { 1062 offset -= len; 1063 goto next; 1064 } 1065 1066 len -= offset; 1067 1068 if (len > cryptlen) 1069 len = cryptlen; 1070 1071 to_talitos_ptr(link_tbl_ptr + count, 1072 sg_dma_address(sg) + offset, 0); 1073 link_tbl_ptr[count].len = cpu_to_be16(len); 1074 link_tbl_ptr[count].j_extent = 0; 1075 count++; 1076 cryptlen -= len; 1077 offset = 0; 1078 1079 next: 1080 sg = sg_next(sg); 1081 } 1082 1083 /* tag end of link table */ 1084 if (count > 0) 1085 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN; 1086 1087 return count; 1088 } 1089 1090 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count, 1091 int cryptlen, 1092 struct talitos_ptr *link_tbl_ptr) 1093 { 1094 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen, 1095 link_tbl_ptr); 1096 } 1097 1098 /* 1099 * fill in and submit ipsec_esp descriptor 1100 */ 1101 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq, 1102 void (*callback)(struct device *dev, 1103 struct talitos_desc *desc, 1104 void *context, int error)) 1105 { 1106 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 1107 unsigned int authsize = crypto_aead_authsize(aead); 1108 struct talitos_ctx *ctx = crypto_aead_ctx(aead); 1109 struct device *dev = ctx->dev; 1110 struct talitos_desc *desc = &edesc->desc; 1111 unsigned int cryptlen = areq->cryptlen; 1112 unsigned int ivsize = crypto_aead_ivsize(aead); 1113 int tbl_off = 0; 1114 int sg_count, ret; 1115 int sg_link_tbl_len; 1116 1117 /* hmac key */ 1118 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key, 1119 DMA_TO_DEVICE); 1120 1121 sg_count = talitos_map_sg(dev, areq->src, edesc->src_nents ?: 1, 1122 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL 1123 : DMA_TO_DEVICE, 1124 edesc->src_chained); 1125 1126 /* hmac data */ 1127 desc->ptr[1].len = cpu_to_be16(areq->assoclen); 1128 if (sg_count > 1 && 1129 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0, 1130 areq->assoclen, 1131 &edesc->link_tbl[tbl_off])) > 1) { 1132 tbl_off += ret; 1133 1134 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off * 1135 sizeof(struct talitos_ptr), 0); 1136 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP; 1137 1138 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1139 edesc->dma_len, DMA_BIDIRECTIONAL); 1140 } else { 1141 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0); 1142 desc->ptr[1].j_extent = 0; 1143 } 1144 1145 /* cipher iv */ 1146 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0); 1147 desc->ptr[2].len = cpu_to_be16(ivsize); 1148 desc->ptr[2].j_extent = 0; 1149 1150 /* cipher key */ 1151 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen, 1152 (char *)&ctx->key + ctx->authkeylen, 1153 DMA_TO_DEVICE); 1154 1155 /* 1156 * cipher in 1157 * map and adjust cipher len to aead request cryptlen. 1158 * extent is bytes of HMAC postpended to ciphertext, 1159 * typically 12 for ipsec 1160 */ 1161 desc->ptr[4].len = cpu_to_be16(cryptlen); 1162 desc->ptr[4].j_extent = authsize; 1163 1164 sg_link_tbl_len = cryptlen; 1165 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV) 1166 sg_link_tbl_len += authsize; 1167 1168 if (sg_count > 1 && 1169 (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen, 1170 sg_link_tbl_len, 1171 &edesc->link_tbl[tbl_off])) > 1) { 1172 tbl_off += ret; 1173 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP; 1174 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl + 1175 tbl_off * 1176 sizeof(struct talitos_ptr), 0); 1177 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1178 edesc->dma_len, 1179 DMA_BIDIRECTIONAL); 1180 } else 1181 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0); 1182 1183 /* cipher out */ 1184 desc->ptr[5].len = cpu_to_be16(cryptlen); 1185 desc->ptr[5].j_extent = authsize; 1186 1187 if (areq->src != areq->dst) 1188 sg_count = talitos_map_sg(dev, areq->dst, 1189 edesc->dst_nents ? : 1, 1190 DMA_FROM_DEVICE, edesc->dst_chained); 1191 1192 edesc->icv_ool = false; 1193 1194 if (sg_count > 1 && 1195 (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count, 1196 areq->assoclen, cryptlen, 1197 &edesc->link_tbl[tbl_off])) > 1198 1) { 1199 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off]; 1200 1201 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl + 1202 tbl_off * sizeof(struct talitos_ptr), 0); 1203 1204 /* Add an entry to the link table for ICV data */ 1205 tbl_ptr += sg_count - 1; 1206 tbl_ptr->j_extent = 0; 1207 tbl_ptr++; 1208 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN; 1209 tbl_ptr->len = cpu_to_be16(authsize); 1210 1211 /* icv data follows link tables */ 1212 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + 1213 (edesc->src_nents + edesc->dst_nents + 1214 2) * sizeof(struct talitos_ptr) + 1215 authsize, 0); 1216 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP; 1217 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl, 1218 edesc->dma_len, DMA_BIDIRECTIONAL); 1219 1220 edesc->icv_ool = true; 1221 } else 1222 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0); 1223 1224 /* iv out */ 1225 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv, 1226 DMA_FROM_DEVICE); 1227 1228 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1229 if (ret != -EINPROGRESS) { 1230 ipsec_esp_unmap(dev, edesc, areq); 1231 kfree(edesc); 1232 } 1233 return ret; 1234 } 1235 1236 /* 1237 * derive number of elements in scatterlist 1238 */ 1239 static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained) 1240 { 1241 struct scatterlist *sg = sg_list; 1242 int sg_nents = 0; 1243 1244 *chained = false; 1245 while (nbytes > 0 && sg) { 1246 sg_nents++; 1247 nbytes -= sg->length; 1248 if (!sg_is_last(sg) && (sg + 1)->length == 0) 1249 *chained = true; 1250 sg = sg_next(sg); 1251 } 1252 1253 return sg_nents; 1254 } 1255 1256 /* 1257 * allocate and map the extended descriptor 1258 */ 1259 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev, 1260 struct scatterlist *src, 1261 struct scatterlist *dst, 1262 u8 *iv, 1263 unsigned int assoclen, 1264 unsigned int cryptlen, 1265 unsigned int authsize, 1266 unsigned int ivsize, 1267 int icv_stashing, 1268 u32 cryptoflags, 1269 bool encrypt) 1270 { 1271 struct talitos_edesc *edesc; 1272 int src_nents, dst_nents, alloc_len, dma_len; 1273 bool src_chained = false, dst_chained = false; 1274 dma_addr_t iv_dma = 0; 1275 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : 1276 GFP_ATOMIC; 1277 struct talitos_private *priv = dev_get_drvdata(dev); 1278 bool is_sec1 = has_ftr_sec1(priv); 1279 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN; 1280 1281 if (cryptlen + authsize > max_len) { 1282 dev_err(dev, "length exceeds h/w max limit\n"); 1283 return ERR_PTR(-EINVAL); 1284 } 1285 1286 if (ivsize) 1287 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE); 1288 1289 if (!dst || dst == src) { 1290 src_nents = sg_count(src, assoclen + cryptlen + authsize, 1291 &src_chained); 1292 src_nents = (src_nents == 1) ? 0 : src_nents; 1293 dst_nents = dst ? src_nents : 0; 1294 } else { /* dst && dst != src*/ 1295 src_nents = sg_count(src, assoclen + cryptlen + 1296 (encrypt ? 0 : authsize), 1297 &src_chained); 1298 src_nents = (src_nents == 1) ? 0 : src_nents; 1299 dst_nents = sg_count(dst, assoclen + cryptlen + 1300 (encrypt ? authsize : 0), 1301 &dst_chained); 1302 dst_nents = (dst_nents == 1) ? 0 : dst_nents; 1303 } 1304 1305 /* 1306 * allocate space for base edesc plus the link tables, 1307 * allowing for two separate entries for AD and generated ICV (+ 2), 1308 * and space for two sets of ICVs (stashed and generated) 1309 */ 1310 alloc_len = sizeof(struct talitos_edesc); 1311 if (src_nents || dst_nents) { 1312 if (is_sec1) 1313 dma_len = (src_nents ? cryptlen : 0) + 1314 (dst_nents ? cryptlen : 0); 1315 else 1316 dma_len = (src_nents + dst_nents + 2) * 1317 sizeof(struct talitos_ptr) + authsize * 2; 1318 alloc_len += dma_len; 1319 } else { 1320 dma_len = 0; 1321 alloc_len += icv_stashing ? authsize : 0; 1322 } 1323 1324 edesc = kmalloc(alloc_len, GFP_DMA | flags); 1325 if (!edesc) { 1326 if (iv_dma) 1327 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE); 1328 1329 dev_err(dev, "could not allocate edescriptor\n"); 1330 return ERR_PTR(-ENOMEM); 1331 } 1332 1333 edesc->src_nents = src_nents; 1334 edesc->dst_nents = dst_nents; 1335 edesc->src_chained = src_chained; 1336 edesc->dst_chained = dst_chained; 1337 edesc->iv_dma = iv_dma; 1338 edesc->dma_len = dma_len; 1339 if (dma_len) 1340 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0], 1341 edesc->dma_len, 1342 DMA_BIDIRECTIONAL); 1343 1344 return edesc; 1345 } 1346 1347 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv, 1348 int icv_stashing, bool encrypt) 1349 { 1350 struct crypto_aead *authenc = crypto_aead_reqtfm(areq); 1351 unsigned int authsize = crypto_aead_authsize(authenc); 1352 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1353 unsigned int ivsize = crypto_aead_ivsize(authenc); 1354 1355 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1356 iv, areq->assoclen, areq->cryptlen, 1357 authsize, ivsize, icv_stashing, 1358 areq->base.flags, encrypt); 1359 } 1360 1361 static int aead_encrypt(struct aead_request *req) 1362 { 1363 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1364 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1365 struct talitos_edesc *edesc; 1366 1367 /* allocate extended descriptor */ 1368 edesc = aead_edesc_alloc(req, req->iv, 0, true); 1369 if (IS_ERR(edesc)) 1370 return PTR_ERR(edesc); 1371 1372 /* set encrypt */ 1373 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1374 1375 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done); 1376 } 1377 1378 static int aead_decrypt(struct aead_request *req) 1379 { 1380 struct crypto_aead *authenc = crypto_aead_reqtfm(req); 1381 unsigned int authsize = crypto_aead_authsize(authenc); 1382 struct talitos_ctx *ctx = crypto_aead_ctx(authenc); 1383 struct talitos_private *priv = dev_get_drvdata(ctx->dev); 1384 struct talitos_edesc *edesc; 1385 struct scatterlist *sg; 1386 void *icvdata; 1387 1388 req->cryptlen -= authsize; 1389 1390 /* allocate extended descriptor */ 1391 edesc = aead_edesc_alloc(req, req->iv, 1, false); 1392 if (IS_ERR(edesc)) 1393 return PTR_ERR(edesc); 1394 1395 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) && 1396 ((!edesc->src_nents && !edesc->dst_nents) || 1397 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) { 1398 1399 /* decrypt and check the ICV */ 1400 edesc->desc.hdr = ctx->desc_hdr_template | 1401 DESC_HDR_DIR_INBOUND | 1402 DESC_HDR_MODE1_MDEU_CICV; 1403 1404 /* reset integrity check result bits */ 1405 edesc->desc.hdr_lo = 0; 1406 1407 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done); 1408 } 1409 1410 /* Have to check the ICV with software */ 1411 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1412 1413 /* stash incoming ICV for later cmp with ICV generated by the h/w */ 1414 if (edesc->dma_len) 1415 icvdata = (char *)&edesc->link_tbl[edesc->src_nents + 1416 edesc->dst_nents + 2]; 1417 else 1418 icvdata = &edesc->link_tbl[0]; 1419 1420 sg = sg_last(req->src, edesc->src_nents ? : 1); 1421 1422 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize); 1423 1424 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done); 1425 } 1426 1427 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher, 1428 const u8 *key, unsigned int keylen) 1429 { 1430 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1431 1432 memcpy(&ctx->key, key, keylen); 1433 ctx->keylen = keylen; 1434 1435 return 0; 1436 } 1437 1438 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src, 1439 struct scatterlist *dst, unsigned int len, 1440 struct talitos_edesc *edesc) 1441 { 1442 struct talitos_private *priv = dev_get_drvdata(dev); 1443 bool is_sec1 = has_ftr_sec1(priv); 1444 1445 if (is_sec1) { 1446 if (!edesc->src_nents) { 1447 dma_unmap_sg(dev, src, 1, 1448 dst != src ? DMA_TO_DEVICE 1449 : DMA_BIDIRECTIONAL); 1450 } 1451 if (dst && edesc->dst_nents) { 1452 dma_sync_single_for_device(dev, 1453 edesc->dma_link_tbl + len, 1454 len, DMA_FROM_DEVICE); 1455 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1, 1456 edesc->buf + len, len); 1457 } else if (dst && dst != src) { 1458 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE); 1459 } 1460 } else { 1461 talitos_sg_unmap(dev, edesc, src, dst); 1462 } 1463 } 1464 1465 static void common_nonsnoop_unmap(struct device *dev, 1466 struct talitos_edesc *edesc, 1467 struct ablkcipher_request *areq) 1468 { 1469 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1470 1471 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc); 1472 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE); 1473 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE); 1474 1475 if (edesc->dma_len) 1476 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1477 DMA_BIDIRECTIONAL); 1478 } 1479 1480 static void ablkcipher_done(struct device *dev, 1481 struct talitos_desc *desc, void *context, 1482 int err) 1483 { 1484 struct ablkcipher_request *areq = context; 1485 struct talitos_edesc *edesc; 1486 1487 edesc = container_of(desc, struct talitos_edesc, desc); 1488 1489 common_nonsnoop_unmap(dev, edesc, areq); 1490 1491 kfree(edesc); 1492 1493 areq->base.complete(&areq->base, err); 1494 } 1495 1496 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src, 1497 unsigned int len, struct talitos_edesc *edesc, 1498 enum dma_data_direction dir, struct talitos_ptr *ptr) 1499 { 1500 int sg_count; 1501 struct talitos_private *priv = dev_get_drvdata(dev); 1502 bool is_sec1 = has_ftr_sec1(priv); 1503 1504 to_talitos_ptr_len(ptr, len, is_sec1); 1505 1506 if (is_sec1) { 1507 sg_count = edesc->src_nents ? : 1; 1508 1509 if (sg_count == 1) { 1510 dma_map_sg(dev, src, 1, dir); 1511 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); 1512 } else { 1513 sg_copy_to_buffer(src, sg_count, edesc->buf, len); 1514 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1); 1515 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1516 len, DMA_TO_DEVICE); 1517 } 1518 } else { 1519 to_talitos_ptr_extent_clear(ptr, is_sec1); 1520 1521 sg_count = talitos_map_sg(dev, src, edesc->src_nents ? : 1, dir, 1522 edesc->src_chained); 1523 1524 if (sg_count == 1) { 1525 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1); 1526 } else { 1527 sg_count = sg_to_link_tbl(src, sg_count, len, 1528 &edesc->link_tbl[0]); 1529 if (sg_count > 1) { 1530 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0); 1531 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; 1532 dma_sync_single_for_device(dev, 1533 edesc->dma_link_tbl, 1534 edesc->dma_len, 1535 DMA_BIDIRECTIONAL); 1536 } else { 1537 /* Only one segment now, so no link tbl needed*/ 1538 to_talitos_ptr(ptr, sg_dma_address(src), 1539 is_sec1); 1540 } 1541 } 1542 } 1543 return sg_count; 1544 } 1545 1546 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst, 1547 unsigned int len, struct talitos_edesc *edesc, 1548 enum dma_data_direction dir, 1549 struct talitos_ptr *ptr, int sg_count) 1550 { 1551 struct talitos_private *priv = dev_get_drvdata(dev); 1552 bool is_sec1 = has_ftr_sec1(priv); 1553 1554 if (dir != DMA_NONE) 1555 sg_count = talitos_map_sg(dev, dst, edesc->dst_nents ? : 1, 1556 dir, edesc->dst_chained); 1557 1558 to_talitos_ptr_len(ptr, len, is_sec1); 1559 1560 if (is_sec1) { 1561 if (sg_count == 1) { 1562 if (dir != DMA_NONE) 1563 dma_map_sg(dev, dst, 1, dir); 1564 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); 1565 } else { 1566 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1); 1567 dma_sync_single_for_device(dev, 1568 edesc->dma_link_tbl + len, 1569 len, DMA_FROM_DEVICE); 1570 } 1571 } else { 1572 to_talitos_ptr_extent_clear(ptr, is_sec1); 1573 1574 if (sg_count == 1) { 1575 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1); 1576 } else { 1577 struct talitos_ptr *link_tbl_ptr = 1578 &edesc->link_tbl[edesc->src_nents + 1]; 1579 1580 to_talitos_ptr(ptr, edesc->dma_link_tbl + 1581 (edesc->src_nents + 1) * 1582 sizeof(struct talitos_ptr), 0); 1583 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP; 1584 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr); 1585 dma_sync_single_for_device(dev, edesc->dma_link_tbl, 1586 edesc->dma_len, 1587 DMA_BIDIRECTIONAL); 1588 } 1589 } 1590 } 1591 1592 static int common_nonsnoop(struct talitos_edesc *edesc, 1593 struct ablkcipher_request *areq, 1594 void (*callback) (struct device *dev, 1595 struct talitos_desc *desc, 1596 void *context, int error)) 1597 { 1598 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1599 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1600 struct device *dev = ctx->dev; 1601 struct talitos_desc *desc = &edesc->desc; 1602 unsigned int cryptlen = areq->nbytes; 1603 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1604 int sg_count, ret; 1605 struct talitos_private *priv = dev_get_drvdata(dev); 1606 bool is_sec1 = has_ftr_sec1(priv); 1607 1608 /* first DWORD empty */ 1609 desc->ptr[0] = zero_entry; 1610 1611 /* cipher iv */ 1612 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1); 1613 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1); 1614 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1); 1615 1616 /* cipher key */ 1617 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1618 (char *)&ctx->key, DMA_TO_DEVICE); 1619 1620 /* 1621 * cipher in 1622 */ 1623 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc, 1624 (areq->src == areq->dst) ? 1625 DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 1626 &desc->ptr[3]); 1627 1628 /* cipher out */ 1629 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc, 1630 (areq->src == areq->dst) ? DMA_NONE 1631 : DMA_FROM_DEVICE, 1632 &desc->ptr[4], sg_count); 1633 1634 /* iv out */ 1635 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv, 1636 DMA_FROM_DEVICE); 1637 1638 /* last DWORD empty */ 1639 desc->ptr[6] = zero_entry; 1640 1641 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1642 if (ret != -EINPROGRESS) { 1643 common_nonsnoop_unmap(dev, edesc, areq); 1644 kfree(edesc); 1645 } 1646 return ret; 1647 } 1648 1649 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request * 1650 areq, bool encrypt) 1651 { 1652 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1653 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1654 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher); 1655 1656 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst, 1657 areq->info, 0, areq->nbytes, 0, ivsize, 0, 1658 areq->base.flags, encrypt); 1659 } 1660 1661 static int ablkcipher_encrypt(struct ablkcipher_request *areq) 1662 { 1663 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1664 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1665 struct talitos_edesc *edesc; 1666 1667 /* allocate extended descriptor */ 1668 edesc = ablkcipher_edesc_alloc(areq, true); 1669 if (IS_ERR(edesc)) 1670 return PTR_ERR(edesc); 1671 1672 /* set encrypt */ 1673 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT; 1674 1675 return common_nonsnoop(edesc, areq, ablkcipher_done); 1676 } 1677 1678 static int ablkcipher_decrypt(struct ablkcipher_request *areq) 1679 { 1680 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq); 1681 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher); 1682 struct talitos_edesc *edesc; 1683 1684 /* allocate extended descriptor */ 1685 edesc = ablkcipher_edesc_alloc(areq, false); 1686 if (IS_ERR(edesc)) 1687 return PTR_ERR(edesc); 1688 1689 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND; 1690 1691 return common_nonsnoop(edesc, areq, ablkcipher_done); 1692 } 1693 1694 static void common_nonsnoop_hash_unmap(struct device *dev, 1695 struct talitos_edesc *edesc, 1696 struct ahash_request *areq) 1697 { 1698 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1699 struct talitos_private *priv = dev_get_drvdata(dev); 1700 bool is_sec1 = has_ftr_sec1(priv); 1701 1702 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE); 1703 1704 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc); 1705 1706 /* When using hashctx-in, must unmap it. */ 1707 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1)) 1708 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], 1709 DMA_TO_DEVICE); 1710 1711 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1)) 1712 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], 1713 DMA_TO_DEVICE); 1714 1715 if (edesc->dma_len) 1716 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len, 1717 DMA_BIDIRECTIONAL); 1718 1719 } 1720 1721 static void ahash_done(struct device *dev, 1722 struct talitos_desc *desc, void *context, 1723 int err) 1724 { 1725 struct ahash_request *areq = context; 1726 struct talitos_edesc *edesc = 1727 container_of(desc, struct talitos_edesc, desc); 1728 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1729 1730 if (!req_ctx->last && req_ctx->to_hash_later) { 1731 /* Position any partial block for next update/final/finup */ 1732 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later); 1733 req_ctx->nbuf = req_ctx->to_hash_later; 1734 } 1735 common_nonsnoop_hash_unmap(dev, edesc, areq); 1736 1737 kfree(edesc); 1738 1739 areq->base.complete(&areq->base, err); 1740 } 1741 1742 /* 1743 * SEC1 doesn't like hashing of 0 sized message, so we do the padding 1744 * ourself and submit a padded block 1745 */ 1746 void talitos_handle_buggy_hash(struct talitos_ctx *ctx, 1747 struct talitos_edesc *edesc, 1748 struct talitos_ptr *ptr) 1749 { 1750 static u8 padded_hash[64] = { 1751 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1752 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1753 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1754 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1755 }; 1756 1757 pr_err_once("Bug in SEC1, padding ourself\n"); 1758 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD; 1759 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash), 1760 (char *)padded_hash, DMA_TO_DEVICE); 1761 } 1762 1763 static int common_nonsnoop_hash(struct talitos_edesc *edesc, 1764 struct ahash_request *areq, unsigned int length, 1765 void (*callback) (struct device *dev, 1766 struct talitos_desc *desc, 1767 void *context, int error)) 1768 { 1769 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1770 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1771 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1772 struct device *dev = ctx->dev; 1773 struct talitos_desc *desc = &edesc->desc; 1774 int ret; 1775 struct talitos_private *priv = dev_get_drvdata(dev); 1776 bool is_sec1 = has_ftr_sec1(priv); 1777 1778 /* first DWORD empty */ 1779 desc->ptr[0] = zero_entry; 1780 1781 /* hash context in */ 1782 if (!req_ctx->first || req_ctx->swinit) { 1783 map_single_talitos_ptr(dev, &desc->ptr[1], 1784 req_ctx->hw_context_size, 1785 (char *)req_ctx->hw_context, 1786 DMA_TO_DEVICE); 1787 req_ctx->swinit = 0; 1788 } else { 1789 desc->ptr[1] = zero_entry; 1790 /* Indicate next op is not the first. */ 1791 req_ctx->first = 0; 1792 } 1793 1794 /* HMAC key */ 1795 if (ctx->keylen) 1796 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen, 1797 (char *)&ctx->key, DMA_TO_DEVICE); 1798 else 1799 desc->ptr[2] = zero_entry; 1800 1801 /* 1802 * data in 1803 */ 1804 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc, 1805 DMA_TO_DEVICE, &desc->ptr[3]); 1806 1807 /* fifth DWORD empty */ 1808 desc->ptr[4] = zero_entry; 1809 1810 /* hash/HMAC out -or- hash context out */ 1811 if (req_ctx->last) 1812 map_single_talitos_ptr(dev, &desc->ptr[5], 1813 crypto_ahash_digestsize(tfm), 1814 areq->result, DMA_FROM_DEVICE); 1815 else 1816 map_single_talitos_ptr(dev, &desc->ptr[5], 1817 req_ctx->hw_context_size, 1818 req_ctx->hw_context, DMA_FROM_DEVICE); 1819 1820 /* last DWORD empty */ 1821 desc->ptr[6] = zero_entry; 1822 1823 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0) 1824 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]); 1825 1826 ret = talitos_submit(dev, ctx->ch, desc, callback, areq); 1827 if (ret != -EINPROGRESS) { 1828 common_nonsnoop_hash_unmap(dev, edesc, areq); 1829 kfree(edesc); 1830 } 1831 return ret; 1832 } 1833 1834 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq, 1835 unsigned int nbytes) 1836 { 1837 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1838 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1839 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1840 1841 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0, 1842 nbytes, 0, 0, 0, areq->base.flags, false); 1843 } 1844 1845 static int ahash_init(struct ahash_request *areq) 1846 { 1847 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1848 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1849 1850 /* Initialize the context */ 1851 req_ctx->nbuf = 0; 1852 req_ctx->first = 1; /* first indicates h/w must init its context */ 1853 req_ctx->swinit = 0; /* assume h/w init of context */ 1854 req_ctx->hw_context_size = 1855 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE) 1856 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256 1857 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512; 1858 1859 return 0; 1860 } 1861 1862 /* 1863 * on h/w without explicit sha224 support, we initialize h/w context 1864 * manually with sha224 constants, and tell it to run sha256. 1865 */ 1866 static int ahash_init_sha224_swinit(struct ahash_request *areq) 1867 { 1868 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1869 1870 ahash_init(areq); 1871 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/ 1872 1873 req_ctx->hw_context[0] = SHA224_H0; 1874 req_ctx->hw_context[1] = SHA224_H1; 1875 req_ctx->hw_context[2] = SHA224_H2; 1876 req_ctx->hw_context[3] = SHA224_H3; 1877 req_ctx->hw_context[4] = SHA224_H4; 1878 req_ctx->hw_context[5] = SHA224_H5; 1879 req_ctx->hw_context[6] = SHA224_H6; 1880 req_ctx->hw_context[7] = SHA224_H7; 1881 1882 /* init 64-bit count */ 1883 req_ctx->hw_context[8] = 0; 1884 req_ctx->hw_context[9] = 0; 1885 1886 return 0; 1887 } 1888 1889 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes) 1890 { 1891 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); 1892 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm); 1893 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1894 struct talitos_edesc *edesc; 1895 unsigned int blocksize = 1896 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 1897 unsigned int nbytes_to_hash; 1898 unsigned int to_hash_later; 1899 unsigned int nsg; 1900 bool chained; 1901 1902 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) { 1903 /* Buffer up to one whole block */ 1904 sg_copy_to_buffer(areq->src, 1905 sg_count(areq->src, nbytes, &chained), 1906 req_ctx->buf + req_ctx->nbuf, nbytes); 1907 req_ctx->nbuf += nbytes; 1908 return 0; 1909 } 1910 1911 /* At least (blocksize + 1) bytes are available to hash */ 1912 nbytes_to_hash = nbytes + req_ctx->nbuf; 1913 to_hash_later = nbytes_to_hash & (blocksize - 1); 1914 1915 if (req_ctx->last) 1916 to_hash_later = 0; 1917 else if (to_hash_later) 1918 /* There is a partial block. Hash the full block(s) now */ 1919 nbytes_to_hash -= to_hash_later; 1920 else { 1921 /* Keep one block buffered */ 1922 nbytes_to_hash -= blocksize; 1923 to_hash_later = blocksize; 1924 } 1925 1926 /* Chain in any previously buffered data */ 1927 if (req_ctx->nbuf) { 1928 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1; 1929 sg_init_table(req_ctx->bufsl, nsg); 1930 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf); 1931 if (nsg > 1) 1932 sg_chain(req_ctx->bufsl, 2, areq->src); 1933 req_ctx->psrc = req_ctx->bufsl; 1934 } else 1935 req_ctx->psrc = areq->src; 1936 1937 if (to_hash_later) { 1938 int nents = sg_count(areq->src, nbytes, &chained); 1939 sg_pcopy_to_buffer(areq->src, nents, 1940 req_ctx->bufnext, 1941 to_hash_later, 1942 nbytes - to_hash_later); 1943 } 1944 req_ctx->to_hash_later = to_hash_later; 1945 1946 /* Allocate extended descriptor */ 1947 edesc = ahash_edesc_alloc(areq, nbytes_to_hash); 1948 if (IS_ERR(edesc)) 1949 return PTR_ERR(edesc); 1950 1951 edesc->desc.hdr = ctx->desc_hdr_template; 1952 1953 /* On last one, request SEC to pad; otherwise continue */ 1954 if (req_ctx->last) 1955 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD; 1956 else 1957 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT; 1958 1959 /* request SEC to INIT hash. */ 1960 if (req_ctx->first && !req_ctx->swinit) 1961 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT; 1962 1963 /* When the tfm context has a keylen, it's an HMAC. 1964 * A first or last (ie. not middle) descriptor must request HMAC. 1965 */ 1966 if (ctx->keylen && (req_ctx->first || req_ctx->last)) 1967 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC; 1968 1969 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, 1970 ahash_done); 1971 } 1972 1973 static int ahash_update(struct ahash_request *areq) 1974 { 1975 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1976 1977 req_ctx->last = 0; 1978 1979 return ahash_process_req(areq, areq->nbytes); 1980 } 1981 1982 static int ahash_final(struct ahash_request *areq) 1983 { 1984 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1985 1986 req_ctx->last = 1; 1987 1988 return ahash_process_req(areq, 0); 1989 } 1990 1991 static int ahash_finup(struct ahash_request *areq) 1992 { 1993 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 1994 1995 req_ctx->last = 1; 1996 1997 return ahash_process_req(areq, areq->nbytes); 1998 } 1999 2000 static int ahash_digest(struct ahash_request *areq) 2001 { 2002 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); 2003 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); 2004 2005 ahash->init(areq); 2006 req_ctx->last = 1; 2007 2008 return ahash_process_req(areq, areq->nbytes); 2009 } 2010 2011 struct keyhash_result { 2012 struct completion completion; 2013 int err; 2014 }; 2015 2016 static void keyhash_complete(struct crypto_async_request *req, int err) 2017 { 2018 struct keyhash_result *res = req->data; 2019 2020 if (err == -EINPROGRESS) 2021 return; 2022 2023 res->err = err; 2024 complete(&res->completion); 2025 } 2026 2027 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen, 2028 u8 *hash) 2029 { 2030 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2031 2032 struct scatterlist sg[1]; 2033 struct ahash_request *req; 2034 struct keyhash_result hresult; 2035 int ret; 2036 2037 init_completion(&hresult.completion); 2038 2039 req = ahash_request_alloc(tfm, GFP_KERNEL); 2040 if (!req) 2041 return -ENOMEM; 2042 2043 /* Keep tfm keylen == 0 during hash of the long key */ 2044 ctx->keylen = 0; 2045 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 2046 keyhash_complete, &hresult); 2047 2048 sg_init_one(&sg[0], key, keylen); 2049 2050 ahash_request_set_crypt(req, sg, hash, keylen); 2051 ret = crypto_ahash_digest(req); 2052 switch (ret) { 2053 case 0: 2054 break; 2055 case -EINPROGRESS: 2056 case -EBUSY: 2057 ret = wait_for_completion_interruptible( 2058 &hresult.completion); 2059 if (!ret) 2060 ret = hresult.err; 2061 break; 2062 default: 2063 break; 2064 } 2065 ahash_request_free(req); 2066 2067 return ret; 2068 } 2069 2070 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key, 2071 unsigned int keylen) 2072 { 2073 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); 2074 unsigned int blocksize = 2075 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2076 unsigned int digestsize = crypto_ahash_digestsize(tfm); 2077 unsigned int keysize = keylen; 2078 u8 hash[SHA512_DIGEST_SIZE]; 2079 int ret; 2080 2081 if (keylen <= blocksize) 2082 memcpy(ctx->key, key, keysize); 2083 else { 2084 /* Must get the hash of the long key */ 2085 ret = keyhash(tfm, key, keylen, hash); 2086 2087 if (ret) { 2088 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 2089 return -EINVAL; 2090 } 2091 2092 keysize = digestsize; 2093 memcpy(ctx->key, hash, digestsize); 2094 } 2095 2096 ctx->keylen = keysize; 2097 2098 return 0; 2099 } 2100 2101 2102 struct talitos_alg_template { 2103 u32 type; 2104 union { 2105 struct crypto_alg crypto; 2106 struct ahash_alg hash; 2107 struct aead_alg aead; 2108 } alg; 2109 __be32 desc_hdr_template; 2110 }; 2111 2112 static struct talitos_alg_template driver_algs[] = { 2113 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */ 2114 { .type = CRYPTO_ALG_TYPE_AEAD, 2115 .alg.aead = { 2116 .base = { 2117 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2118 .cra_driver_name = "authenc-hmac-sha1-" 2119 "cbc-aes-talitos", 2120 .cra_blocksize = AES_BLOCK_SIZE, 2121 .cra_flags = CRYPTO_ALG_ASYNC, 2122 }, 2123 .ivsize = AES_BLOCK_SIZE, 2124 .maxauthsize = SHA1_DIGEST_SIZE, 2125 }, 2126 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2127 DESC_HDR_SEL0_AESU | 2128 DESC_HDR_MODE0_AESU_CBC | 2129 DESC_HDR_SEL1_MDEUA | 2130 DESC_HDR_MODE1_MDEU_INIT | 2131 DESC_HDR_MODE1_MDEU_PAD | 2132 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2133 }, 2134 { .type = CRYPTO_ALG_TYPE_AEAD, 2135 .alg.aead = { 2136 .base = { 2137 .cra_name = "authenc(hmac(sha1)," 2138 "cbc(des3_ede))", 2139 .cra_driver_name = "authenc-hmac-sha1-" 2140 "cbc-3des-talitos", 2141 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2142 .cra_flags = CRYPTO_ALG_ASYNC, 2143 }, 2144 .ivsize = DES3_EDE_BLOCK_SIZE, 2145 .maxauthsize = SHA1_DIGEST_SIZE, 2146 }, 2147 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2148 DESC_HDR_SEL0_DEU | 2149 DESC_HDR_MODE0_DEU_CBC | 2150 DESC_HDR_MODE0_DEU_3DES | 2151 DESC_HDR_SEL1_MDEUA | 2152 DESC_HDR_MODE1_MDEU_INIT | 2153 DESC_HDR_MODE1_MDEU_PAD | 2154 DESC_HDR_MODE1_MDEU_SHA1_HMAC, 2155 }, 2156 { .type = CRYPTO_ALG_TYPE_AEAD, 2157 .alg.aead = { 2158 .base = { 2159 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2160 .cra_driver_name = "authenc-hmac-sha224-" 2161 "cbc-aes-talitos", 2162 .cra_blocksize = AES_BLOCK_SIZE, 2163 .cra_flags = CRYPTO_ALG_ASYNC, 2164 }, 2165 .ivsize = AES_BLOCK_SIZE, 2166 .maxauthsize = SHA224_DIGEST_SIZE, 2167 }, 2168 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2169 DESC_HDR_SEL0_AESU | 2170 DESC_HDR_MODE0_AESU_CBC | 2171 DESC_HDR_SEL1_MDEUA | 2172 DESC_HDR_MODE1_MDEU_INIT | 2173 DESC_HDR_MODE1_MDEU_PAD | 2174 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2175 }, 2176 { .type = CRYPTO_ALG_TYPE_AEAD, 2177 .alg.aead = { 2178 .base = { 2179 .cra_name = "authenc(hmac(sha224)," 2180 "cbc(des3_ede))", 2181 .cra_driver_name = "authenc-hmac-sha224-" 2182 "cbc-3des-talitos", 2183 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2184 .cra_flags = CRYPTO_ALG_ASYNC, 2185 }, 2186 .ivsize = DES3_EDE_BLOCK_SIZE, 2187 .maxauthsize = SHA224_DIGEST_SIZE, 2188 }, 2189 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2190 DESC_HDR_SEL0_DEU | 2191 DESC_HDR_MODE0_DEU_CBC | 2192 DESC_HDR_MODE0_DEU_3DES | 2193 DESC_HDR_SEL1_MDEUA | 2194 DESC_HDR_MODE1_MDEU_INIT | 2195 DESC_HDR_MODE1_MDEU_PAD | 2196 DESC_HDR_MODE1_MDEU_SHA224_HMAC, 2197 }, 2198 { .type = CRYPTO_ALG_TYPE_AEAD, 2199 .alg.aead = { 2200 .base = { 2201 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2202 .cra_driver_name = "authenc-hmac-sha256-" 2203 "cbc-aes-talitos", 2204 .cra_blocksize = AES_BLOCK_SIZE, 2205 .cra_flags = CRYPTO_ALG_ASYNC, 2206 }, 2207 .ivsize = AES_BLOCK_SIZE, 2208 .maxauthsize = SHA256_DIGEST_SIZE, 2209 }, 2210 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2211 DESC_HDR_SEL0_AESU | 2212 DESC_HDR_MODE0_AESU_CBC | 2213 DESC_HDR_SEL1_MDEUA | 2214 DESC_HDR_MODE1_MDEU_INIT | 2215 DESC_HDR_MODE1_MDEU_PAD | 2216 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2217 }, 2218 { .type = CRYPTO_ALG_TYPE_AEAD, 2219 .alg.aead = { 2220 .base = { 2221 .cra_name = "authenc(hmac(sha256)," 2222 "cbc(des3_ede))", 2223 .cra_driver_name = "authenc-hmac-sha256-" 2224 "cbc-3des-talitos", 2225 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2226 .cra_flags = CRYPTO_ALG_ASYNC, 2227 }, 2228 .ivsize = DES3_EDE_BLOCK_SIZE, 2229 .maxauthsize = SHA256_DIGEST_SIZE, 2230 }, 2231 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2232 DESC_HDR_SEL0_DEU | 2233 DESC_HDR_MODE0_DEU_CBC | 2234 DESC_HDR_MODE0_DEU_3DES | 2235 DESC_HDR_SEL1_MDEUA | 2236 DESC_HDR_MODE1_MDEU_INIT | 2237 DESC_HDR_MODE1_MDEU_PAD | 2238 DESC_HDR_MODE1_MDEU_SHA256_HMAC, 2239 }, 2240 { .type = CRYPTO_ALG_TYPE_AEAD, 2241 .alg.aead = { 2242 .base = { 2243 .cra_name = "authenc(hmac(sha384),cbc(aes))", 2244 .cra_driver_name = "authenc-hmac-sha384-" 2245 "cbc-aes-talitos", 2246 .cra_blocksize = AES_BLOCK_SIZE, 2247 .cra_flags = CRYPTO_ALG_ASYNC, 2248 }, 2249 .ivsize = AES_BLOCK_SIZE, 2250 .maxauthsize = SHA384_DIGEST_SIZE, 2251 }, 2252 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2253 DESC_HDR_SEL0_AESU | 2254 DESC_HDR_MODE0_AESU_CBC | 2255 DESC_HDR_SEL1_MDEUB | 2256 DESC_HDR_MODE1_MDEU_INIT | 2257 DESC_HDR_MODE1_MDEU_PAD | 2258 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2259 }, 2260 { .type = CRYPTO_ALG_TYPE_AEAD, 2261 .alg.aead = { 2262 .base = { 2263 .cra_name = "authenc(hmac(sha384)," 2264 "cbc(des3_ede))", 2265 .cra_driver_name = "authenc-hmac-sha384-" 2266 "cbc-3des-talitos", 2267 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2268 .cra_flags = CRYPTO_ALG_ASYNC, 2269 }, 2270 .ivsize = DES3_EDE_BLOCK_SIZE, 2271 .maxauthsize = SHA384_DIGEST_SIZE, 2272 }, 2273 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2274 DESC_HDR_SEL0_DEU | 2275 DESC_HDR_MODE0_DEU_CBC | 2276 DESC_HDR_MODE0_DEU_3DES | 2277 DESC_HDR_SEL1_MDEUB | 2278 DESC_HDR_MODE1_MDEU_INIT | 2279 DESC_HDR_MODE1_MDEU_PAD | 2280 DESC_HDR_MODE1_MDEUB_SHA384_HMAC, 2281 }, 2282 { .type = CRYPTO_ALG_TYPE_AEAD, 2283 .alg.aead = { 2284 .base = { 2285 .cra_name = "authenc(hmac(sha512),cbc(aes))", 2286 .cra_driver_name = "authenc-hmac-sha512-" 2287 "cbc-aes-talitos", 2288 .cra_blocksize = AES_BLOCK_SIZE, 2289 .cra_flags = CRYPTO_ALG_ASYNC, 2290 }, 2291 .ivsize = AES_BLOCK_SIZE, 2292 .maxauthsize = SHA512_DIGEST_SIZE, 2293 }, 2294 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2295 DESC_HDR_SEL0_AESU | 2296 DESC_HDR_MODE0_AESU_CBC | 2297 DESC_HDR_SEL1_MDEUB | 2298 DESC_HDR_MODE1_MDEU_INIT | 2299 DESC_HDR_MODE1_MDEU_PAD | 2300 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2301 }, 2302 { .type = CRYPTO_ALG_TYPE_AEAD, 2303 .alg.aead = { 2304 .base = { 2305 .cra_name = "authenc(hmac(sha512)," 2306 "cbc(des3_ede))", 2307 .cra_driver_name = "authenc-hmac-sha512-" 2308 "cbc-3des-talitos", 2309 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2310 .cra_flags = CRYPTO_ALG_ASYNC, 2311 }, 2312 .ivsize = DES3_EDE_BLOCK_SIZE, 2313 .maxauthsize = SHA512_DIGEST_SIZE, 2314 }, 2315 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2316 DESC_HDR_SEL0_DEU | 2317 DESC_HDR_MODE0_DEU_CBC | 2318 DESC_HDR_MODE0_DEU_3DES | 2319 DESC_HDR_SEL1_MDEUB | 2320 DESC_HDR_MODE1_MDEU_INIT | 2321 DESC_HDR_MODE1_MDEU_PAD | 2322 DESC_HDR_MODE1_MDEUB_SHA512_HMAC, 2323 }, 2324 { .type = CRYPTO_ALG_TYPE_AEAD, 2325 .alg.aead = { 2326 .base = { 2327 .cra_name = "authenc(hmac(md5),cbc(aes))", 2328 .cra_driver_name = "authenc-hmac-md5-" 2329 "cbc-aes-talitos", 2330 .cra_blocksize = AES_BLOCK_SIZE, 2331 .cra_flags = CRYPTO_ALG_ASYNC, 2332 }, 2333 .ivsize = AES_BLOCK_SIZE, 2334 .maxauthsize = MD5_DIGEST_SIZE, 2335 }, 2336 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2337 DESC_HDR_SEL0_AESU | 2338 DESC_HDR_MODE0_AESU_CBC | 2339 DESC_HDR_SEL1_MDEUA | 2340 DESC_HDR_MODE1_MDEU_INIT | 2341 DESC_HDR_MODE1_MDEU_PAD | 2342 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2343 }, 2344 { .type = CRYPTO_ALG_TYPE_AEAD, 2345 .alg.aead = { 2346 .base = { 2347 .cra_name = "authenc(hmac(md5),cbc(des3_ede))", 2348 .cra_driver_name = "authenc-hmac-md5-" 2349 "cbc-3des-talitos", 2350 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2351 .cra_flags = CRYPTO_ALG_ASYNC, 2352 }, 2353 .ivsize = DES3_EDE_BLOCK_SIZE, 2354 .maxauthsize = MD5_DIGEST_SIZE, 2355 }, 2356 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP | 2357 DESC_HDR_SEL0_DEU | 2358 DESC_HDR_MODE0_DEU_CBC | 2359 DESC_HDR_MODE0_DEU_3DES | 2360 DESC_HDR_SEL1_MDEUA | 2361 DESC_HDR_MODE1_MDEU_INIT | 2362 DESC_HDR_MODE1_MDEU_PAD | 2363 DESC_HDR_MODE1_MDEU_MD5_HMAC, 2364 }, 2365 /* ABLKCIPHER algorithms. */ 2366 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2367 .alg.crypto = { 2368 .cra_name = "cbc(aes)", 2369 .cra_driver_name = "cbc-aes-talitos", 2370 .cra_blocksize = AES_BLOCK_SIZE, 2371 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2372 CRYPTO_ALG_ASYNC, 2373 .cra_ablkcipher = { 2374 .min_keysize = AES_MIN_KEY_SIZE, 2375 .max_keysize = AES_MAX_KEY_SIZE, 2376 .ivsize = AES_BLOCK_SIZE, 2377 } 2378 }, 2379 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2380 DESC_HDR_SEL0_AESU | 2381 DESC_HDR_MODE0_AESU_CBC, 2382 }, 2383 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, 2384 .alg.crypto = { 2385 .cra_name = "cbc(des3_ede)", 2386 .cra_driver_name = "cbc-3des-talitos", 2387 .cra_blocksize = DES3_EDE_BLOCK_SIZE, 2388 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 2389 CRYPTO_ALG_ASYNC, 2390 .cra_ablkcipher = { 2391 .min_keysize = DES3_EDE_KEY_SIZE, 2392 .max_keysize = DES3_EDE_KEY_SIZE, 2393 .ivsize = DES3_EDE_BLOCK_SIZE, 2394 } 2395 }, 2396 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2397 DESC_HDR_SEL0_DEU | 2398 DESC_HDR_MODE0_DEU_CBC | 2399 DESC_HDR_MODE0_DEU_3DES, 2400 }, 2401 /* AHASH algorithms. */ 2402 { .type = CRYPTO_ALG_TYPE_AHASH, 2403 .alg.hash = { 2404 .halg.digestsize = MD5_DIGEST_SIZE, 2405 .halg.base = { 2406 .cra_name = "md5", 2407 .cra_driver_name = "md5-talitos", 2408 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2409 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2410 CRYPTO_ALG_ASYNC, 2411 } 2412 }, 2413 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2414 DESC_HDR_SEL0_MDEUA | 2415 DESC_HDR_MODE0_MDEU_MD5, 2416 }, 2417 { .type = CRYPTO_ALG_TYPE_AHASH, 2418 .alg.hash = { 2419 .halg.digestsize = SHA1_DIGEST_SIZE, 2420 .halg.base = { 2421 .cra_name = "sha1", 2422 .cra_driver_name = "sha1-talitos", 2423 .cra_blocksize = SHA1_BLOCK_SIZE, 2424 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2425 CRYPTO_ALG_ASYNC, 2426 } 2427 }, 2428 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2429 DESC_HDR_SEL0_MDEUA | 2430 DESC_HDR_MODE0_MDEU_SHA1, 2431 }, 2432 { .type = CRYPTO_ALG_TYPE_AHASH, 2433 .alg.hash = { 2434 .halg.digestsize = SHA224_DIGEST_SIZE, 2435 .halg.base = { 2436 .cra_name = "sha224", 2437 .cra_driver_name = "sha224-talitos", 2438 .cra_blocksize = SHA224_BLOCK_SIZE, 2439 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2440 CRYPTO_ALG_ASYNC, 2441 } 2442 }, 2443 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2444 DESC_HDR_SEL0_MDEUA | 2445 DESC_HDR_MODE0_MDEU_SHA224, 2446 }, 2447 { .type = CRYPTO_ALG_TYPE_AHASH, 2448 .alg.hash = { 2449 .halg.digestsize = SHA256_DIGEST_SIZE, 2450 .halg.base = { 2451 .cra_name = "sha256", 2452 .cra_driver_name = "sha256-talitos", 2453 .cra_blocksize = SHA256_BLOCK_SIZE, 2454 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2455 CRYPTO_ALG_ASYNC, 2456 } 2457 }, 2458 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2459 DESC_HDR_SEL0_MDEUA | 2460 DESC_HDR_MODE0_MDEU_SHA256, 2461 }, 2462 { .type = CRYPTO_ALG_TYPE_AHASH, 2463 .alg.hash = { 2464 .halg.digestsize = SHA384_DIGEST_SIZE, 2465 .halg.base = { 2466 .cra_name = "sha384", 2467 .cra_driver_name = "sha384-talitos", 2468 .cra_blocksize = SHA384_BLOCK_SIZE, 2469 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2470 CRYPTO_ALG_ASYNC, 2471 } 2472 }, 2473 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2474 DESC_HDR_SEL0_MDEUB | 2475 DESC_HDR_MODE0_MDEUB_SHA384, 2476 }, 2477 { .type = CRYPTO_ALG_TYPE_AHASH, 2478 .alg.hash = { 2479 .halg.digestsize = SHA512_DIGEST_SIZE, 2480 .halg.base = { 2481 .cra_name = "sha512", 2482 .cra_driver_name = "sha512-talitos", 2483 .cra_blocksize = SHA512_BLOCK_SIZE, 2484 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2485 CRYPTO_ALG_ASYNC, 2486 } 2487 }, 2488 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2489 DESC_HDR_SEL0_MDEUB | 2490 DESC_HDR_MODE0_MDEUB_SHA512, 2491 }, 2492 { .type = CRYPTO_ALG_TYPE_AHASH, 2493 .alg.hash = { 2494 .halg.digestsize = MD5_DIGEST_SIZE, 2495 .halg.base = { 2496 .cra_name = "hmac(md5)", 2497 .cra_driver_name = "hmac-md5-talitos", 2498 .cra_blocksize = MD5_HMAC_BLOCK_SIZE, 2499 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2500 CRYPTO_ALG_ASYNC, 2501 } 2502 }, 2503 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2504 DESC_HDR_SEL0_MDEUA | 2505 DESC_HDR_MODE0_MDEU_MD5, 2506 }, 2507 { .type = CRYPTO_ALG_TYPE_AHASH, 2508 .alg.hash = { 2509 .halg.digestsize = SHA1_DIGEST_SIZE, 2510 .halg.base = { 2511 .cra_name = "hmac(sha1)", 2512 .cra_driver_name = "hmac-sha1-talitos", 2513 .cra_blocksize = SHA1_BLOCK_SIZE, 2514 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2515 CRYPTO_ALG_ASYNC, 2516 } 2517 }, 2518 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2519 DESC_HDR_SEL0_MDEUA | 2520 DESC_HDR_MODE0_MDEU_SHA1, 2521 }, 2522 { .type = CRYPTO_ALG_TYPE_AHASH, 2523 .alg.hash = { 2524 .halg.digestsize = SHA224_DIGEST_SIZE, 2525 .halg.base = { 2526 .cra_name = "hmac(sha224)", 2527 .cra_driver_name = "hmac-sha224-talitos", 2528 .cra_blocksize = SHA224_BLOCK_SIZE, 2529 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2530 CRYPTO_ALG_ASYNC, 2531 } 2532 }, 2533 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2534 DESC_HDR_SEL0_MDEUA | 2535 DESC_HDR_MODE0_MDEU_SHA224, 2536 }, 2537 { .type = CRYPTO_ALG_TYPE_AHASH, 2538 .alg.hash = { 2539 .halg.digestsize = SHA256_DIGEST_SIZE, 2540 .halg.base = { 2541 .cra_name = "hmac(sha256)", 2542 .cra_driver_name = "hmac-sha256-talitos", 2543 .cra_blocksize = SHA256_BLOCK_SIZE, 2544 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2545 CRYPTO_ALG_ASYNC, 2546 } 2547 }, 2548 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2549 DESC_HDR_SEL0_MDEUA | 2550 DESC_HDR_MODE0_MDEU_SHA256, 2551 }, 2552 { .type = CRYPTO_ALG_TYPE_AHASH, 2553 .alg.hash = { 2554 .halg.digestsize = SHA384_DIGEST_SIZE, 2555 .halg.base = { 2556 .cra_name = "hmac(sha384)", 2557 .cra_driver_name = "hmac-sha384-talitos", 2558 .cra_blocksize = SHA384_BLOCK_SIZE, 2559 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2560 CRYPTO_ALG_ASYNC, 2561 } 2562 }, 2563 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2564 DESC_HDR_SEL0_MDEUB | 2565 DESC_HDR_MODE0_MDEUB_SHA384, 2566 }, 2567 { .type = CRYPTO_ALG_TYPE_AHASH, 2568 .alg.hash = { 2569 .halg.digestsize = SHA512_DIGEST_SIZE, 2570 .halg.base = { 2571 .cra_name = "hmac(sha512)", 2572 .cra_driver_name = "hmac-sha512-talitos", 2573 .cra_blocksize = SHA512_BLOCK_SIZE, 2574 .cra_flags = CRYPTO_ALG_TYPE_AHASH | 2575 CRYPTO_ALG_ASYNC, 2576 } 2577 }, 2578 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2579 DESC_HDR_SEL0_MDEUB | 2580 DESC_HDR_MODE0_MDEUB_SHA512, 2581 } 2582 }; 2583 2584 struct talitos_crypto_alg { 2585 struct list_head entry; 2586 struct device *dev; 2587 struct talitos_alg_template algt; 2588 }; 2589 2590 static int talitos_cra_init(struct crypto_tfm *tfm) 2591 { 2592 struct crypto_alg *alg = tfm->__crt_alg; 2593 struct talitos_crypto_alg *talitos_alg; 2594 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2595 struct talitos_private *priv; 2596 2597 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH) 2598 talitos_alg = container_of(__crypto_ahash_alg(alg), 2599 struct talitos_crypto_alg, 2600 algt.alg.hash); 2601 else 2602 talitos_alg = container_of(alg, struct talitos_crypto_alg, 2603 algt.alg.crypto); 2604 2605 /* update context with ptr to dev */ 2606 ctx->dev = talitos_alg->dev; 2607 2608 /* assign SEC channel to tfm in round-robin fashion */ 2609 priv = dev_get_drvdata(ctx->dev); 2610 ctx->ch = atomic_inc_return(&priv->last_chan) & 2611 (priv->num_channels - 1); 2612 2613 /* copy descriptor header template value */ 2614 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template; 2615 2616 /* select done notification */ 2617 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY; 2618 2619 return 0; 2620 } 2621 2622 static int talitos_cra_init_aead(struct crypto_aead *tfm) 2623 { 2624 talitos_cra_init(crypto_aead_tfm(tfm)); 2625 return 0; 2626 } 2627 2628 static int talitos_cra_init_ahash(struct crypto_tfm *tfm) 2629 { 2630 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm); 2631 2632 talitos_cra_init(tfm); 2633 2634 ctx->keylen = 0; 2635 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 2636 sizeof(struct talitos_ahash_req_ctx)); 2637 2638 return 0; 2639 } 2640 2641 /* 2642 * given the alg's descriptor header template, determine whether descriptor 2643 * type and primary/secondary execution units required match the hw 2644 * capabilities description provided in the device tree node. 2645 */ 2646 static int hw_supports(struct device *dev, __be32 desc_hdr_template) 2647 { 2648 struct talitos_private *priv = dev_get_drvdata(dev); 2649 int ret; 2650 2651 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) && 2652 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units); 2653 2654 if (SECONDARY_EU(desc_hdr_template)) 2655 ret = ret && (1 << SECONDARY_EU(desc_hdr_template) 2656 & priv->exec_units); 2657 2658 return ret; 2659 } 2660 2661 static int talitos_remove(struct platform_device *ofdev) 2662 { 2663 struct device *dev = &ofdev->dev; 2664 struct talitos_private *priv = dev_get_drvdata(dev); 2665 struct talitos_crypto_alg *t_alg, *n; 2666 int i; 2667 2668 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) { 2669 switch (t_alg->algt.type) { 2670 case CRYPTO_ALG_TYPE_ABLKCIPHER: 2671 break; 2672 case CRYPTO_ALG_TYPE_AEAD: 2673 crypto_unregister_aead(&t_alg->algt.alg.aead); 2674 case CRYPTO_ALG_TYPE_AHASH: 2675 crypto_unregister_ahash(&t_alg->algt.alg.hash); 2676 break; 2677 } 2678 list_del(&t_alg->entry); 2679 kfree(t_alg); 2680 } 2681 2682 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) 2683 talitos_unregister_rng(dev); 2684 2685 for (i = 0; priv->chan && i < priv->num_channels; i++) 2686 kfree(priv->chan[i].fifo); 2687 2688 kfree(priv->chan); 2689 2690 for (i = 0; i < 2; i++) 2691 if (priv->irq[i]) { 2692 free_irq(priv->irq[i], dev); 2693 irq_dispose_mapping(priv->irq[i]); 2694 } 2695 2696 tasklet_kill(&priv->done_task[0]); 2697 if (priv->irq[1]) 2698 tasklet_kill(&priv->done_task[1]); 2699 2700 iounmap(priv->reg); 2701 2702 kfree(priv); 2703 2704 return 0; 2705 } 2706 2707 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev, 2708 struct talitos_alg_template 2709 *template) 2710 { 2711 struct talitos_private *priv = dev_get_drvdata(dev); 2712 struct talitos_crypto_alg *t_alg; 2713 struct crypto_alg *alg; 2714 2715 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL); 2716 if (!t_alg) 2717 return ERR_PTR(-ENOMEM); 2718 2719 t_alg->algt = *template; 2720 2721 switch (t_alg->algt.type) { 2722 case CRYPTO_ALG_TYPE_ABLKCIPHER: 2723 alg = &t_alg->algt.alg.crypto; 2724 alg->cra_init = talitos_cra_init; 2725 alg->cra_type = &crypto_ablkcipher_type; 2726 alg->cra_ablkcipher.setkey = ablkcipher_setkey; 2727 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt; 2728 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt; 2729 alg->cra_ablkcipher.geniv = "eseqiv"; 2730 break; 2731 case CRYPTO_ALG_TYPE_AEAD: 2732 alg = &t_alg->algt.alg.aead.base; 2733 t_alg->algt.alg.aead.init = talitos_cra_init_aead; 2734 t_alg->algt.alg.aead.setkey = aead_setkey; 2735 t_alg->algt.alg.aead.encrypt = aead_encrypt; 2736 t_alg->algt.alg.aead.decrypt = aead_decrypt; 2737 break; 2738 case CRYPTO_ALG_TYPE_AHASH: 2739 alg = &t_alg->algt.alg.hash.halg.base; 2740 alg->cra_init = talitos_cra_init_ahash; 2741 alg->cra_type = &crypto_ahash_type; 2742 t_alg->algt.alg.hash.init = ahash_init; 2743 t_alg->algt.alg.hash.update = ahash_update; 2744 t_alg->algt.alg.hash.final = ahash_final; 2745 t_alg->algt.alg.hash.finup = ahash_finup; 2746 t_alg->algt.alg.hash.digest = ahash_digest; 2747 t_alg->algt.alg.hash.setkey = ahash_setkey; 2748 2749 if (!(priv->features & TALITOS_FTR_HMAC_OK) && 2750 !strncmp(alg->cra_name, "hmac", 4)) { 2751 kfree(t_alg); 2752 return ERR_PTR(-ENOTSUPP); 2753 } 2754 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) && 2755 (!strcmp(alg->cra_name, "sha224") || 2756 !strcmp(alg->cra_name, "hmac(sha224)"))) { 2757 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit; 2758 t_alg->algt.desc_hdr_template = 2759 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU | 2760 DESC_HDR_SEL0_MDEUA | 2761 DESC_HDR_MODE0_MDEU_SHA256; 2762 } 2763 break; 2764 default: 2765 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type); 2766 kfree(t_alg); 2767 return ERR_PTR(-EINVAL); 2768 } 2769 2770 alg->cra_module = THIS_MODULE; 2771 alg->cra_priority = TALITOS_CRA_PRIORITY; 2772 alg->cra_alignmask = 0; 2773 alg->cra_ctxsize = sizeof(struct talitos_ctx); 2774 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY; 2775 2776 t_alg->dev = dev; 2777 2778 return t_alg; 2779 } 2780 2781 static int talitos_probe_irq(struct platform_device *ofdev) 2782 { 2783 struct device *dev = &ofdev->dev; 2784 struct device_node *np = ofdev->dev.of_node; 2785 struct talitos_private *priv = dev_get_drvdata(dev); 2786 int err; 2787 bool is_sec1 = has_ftr_sec1(priv); 2788 2789 priv->irq[0] = irq_of_parse_and_map(np, 0); 2790 if (!priv->irq[0]) { 2791 dev_err(dev, "failed to map irq\n"); 2792 return -EINVAL; 2793 } 2794 if (is_sec1) { 2795 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0, 2796 dev_driver_string(dev), dev); 2797 goto primary_out; 2798 } 2799 2800 priv->irq[1] = irq_of_parse_and_map(np, 1); 2801 2802 /* get the primary irq line */ 2803 if (!priv->irq[1]) { 2804 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0, 2805 dev_driver_string(dev), dev); 2806 goto primary_out; 2807 } 2808 2809 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0, 2810 dev_driver_string(dev), dev); 2811 if (err) 2812 goto primary_out; 2813 2814 /* get the secondary irq line */ 2815 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0, 2816 dev_driver_string(dev), dev); 2817 if (err) { 2818 dev_err(dev, "failed to request secondary irq\n"); 2819 irq_dispose_mapping(priv->irq[1]); 2820 priv->irq[1] = 0; 2821 } 2822 2823 return err; 2824 2825 primary_out: 2826 if (err) { 2827 dev_err(dev, "failed to request primary irq\n"); 2828 irq_dispose_mapping(priv->irq[0]); 2829 priv->irq[0] = 0; 2830 } 2831 2832 return err; 2833 } 2834 2835 static int talitos_probe(struct platform_device *ofdev) 2836 { 2837 struct device *dev = &ofdev->dev; 2838 struct device_node *np = ofdev->dev.of_node; 2839 struct talitos_private *priv; 2840 const unsigned int *prop; 2841 int i, err; 2842 int stride; 2843 2844 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL); 2845 if (!priv) 2846 return -ENOMEM; 2847 2848 INIT_LIST_HEAD(&priv->alg_list); 2849 2850 dev_set_drvdata(dev, priv); 2851 2852 priv->ofdev = ofdev; 2853 2854 spin_lock_init(&priv->reg_lock); 2855 2856 priv->reg = of_iomap(np, 0); 2857 if (!priv->reg) { 2858 dev_err(dev, "failed to of_iomap\n"); 2859 err = -ENOMEM; 2860 goto err_out; 2861 } 2862 2863 /* get SEC version capabilities from device tree */ 2864 prop = of_get_property(np, "fsl,num-channels", NULL); 2865 if (prop) 2866 priv->num_channels = *prop; 2867 2868 prop = of_get_property(np, "fsl,channel-fifo-len", NULL); 2869 if (prop) 2870 priv->chfifo_len = *prop; 2871 2872 prop = of_get_property(np, "fsl,exec-units-mask", NULL); 2873 if (prop) 2874 priv->exec_units = *prop; 2875 2876 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL); 2877 if (prop) 2878 priv->desc_types = *prop; 2879 2880 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len || 2881 !priv->exec_units || !priv->desc_types) { 2882 dev_err(dev, "invalid property data in device tree node\n"); 2883 err = -EINVAL; 2884 goto err_out; 2885 } 2886 2887 if (of_device_is_compatible(np, "fsl,sec3.0")) 2888 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT; 2889 2890 if (of_device_is_compatible(np, "fsl,sec2.1")) 2891 priv->features |= TALITOS_FTR_HW_AUTH_CHECK | 2892 TALITOS_FTR_SHA224_HWINIT | 2893 TALITOS_FTR_HMAC_OK; 2894 2895 if (of_device_is_compatible(np, "fsl,sec1.0")) 2896 priv->features |= TALITOS_FTR_SEC1; 2897 2898 if (of_device_is_compatible(np, "fsl,sec1.2")) { 2899 priv->reg_deu = priv->reg + TALITOS12_DEU; 2900 priv->reg_aesu = priv->reg + TALITOS12_AESU; 2901 priv->reg_mdeu = priv->reg + TALITOS12_MDEU; 2902 stride = TALITOS1_CH_STRIDE; 2903 } else if (of_device_is_compatible(np, "fsl,sec1.0")) { 2904 priv->reg_deu = priv->reg + TALITOS10_DEU; 2905 priv->reg_aesu = priv->reg + TALITOS10_AESU; 2906 priv->reg_mdeu = priv->reg + TALITOS10_MDEU; 2907 priv->reg_afeu = priv->reg + TALITOS10_AFEU; 2908 priv->reg_rngu = priv->reg + TALITOS10_RNGU; 2909 priv->reg_pkeu = priv->reg + TALITOS10_PKEU; 2910 stride = TALITOS1_CH_STRIDE; 2911 } else { 2912 priv->reg_deu = priv->reg + TALITOS2_DEU; 2913 priv->reg_aesu = priv->reg + TALITOS2_AESU; 2914 priv->reg_mdeu = priv->reg + TALITOS2_MDEU; 2915 priv->reg_afeu = priv->reg + TALITOS2_AFEU; 2916 priv->reg_rngu = priv->reg + TALITOS2_RNGU; 2917 priv->reg_pkeu = priv->reg + TALITOS2_PKEU; 2918 priv->reg_keu = priv->reg + TALITOS2_KEU; 2919 priv->reg_crcu = priv->reg + TALITOS2_CRCU; 2920 stride = TALITOS2_CH_STRIDE; 2921 } 2922 2923 err = talitos_probe_irq(ofdev); 2924 if (err) 2925 goto err_out; 2926 2927 if (of_device_is_compatible(np, "fsl,sec1.0")) { 2928 tasklet_init(&priv->done_task[0], talitos1_done_4ch, 2929 (unsigned long)dev); 2930 } else { 2931 if (!priv->irq[1]) { 2932 tasklet_init(&priv->done_task[0], talitos2_done_4ch, 2933 (unsigned long)dev); 2934 } else { 2935 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2, 2936 (unsigned long)dev); 2937 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3, 2938 (unsigned long)dev); 2939 } 2940 } 2941 2942 priv->chan = kzalloc(sizeof(struct talitos_channel) * 2943 priv->num_channels, GFP_KERNEL); 2944 if (!priv->chan) { 2945 dev_err(dev, "failed to allocate channel management space\n"); 2946 err = -ENOMEM; 2947 goto err_out; 2948 } 2949 2950 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len); 2951 2952 for (i = 0; i < priv->num_channels; i++) { 2953 priv->chan[i].reg = priv->reg + stride * (i + 1); 2954 if (!priv->irq[1] || !(i & 1)) 2955 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET; 2956 2957 spin_lock_init(&priv->chan[i].head_lock); 2958 spin_lock_init(&priv->chan[i].tail_lock); 2959 2960 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) * 2961 priv->fifo_len, GFP_KERNEL); 2962 if (!priv->chan[i].fifo) { 2963 dev_err(dev, "failed to allocate request fifo %d\n", i); 2964 err = -ENOMEM; 2965 goto err_out; 2966 } 2967 2968 atomic_set(&priv->chan[i].submit_count, 2969 -(priv->chfifo_len - 1)); 2970 } 2971 2972 dma_set_mask(dev, DMA_BIT_MASK(36)); 2973 2974 /* reset and initialize the h/w */ 2975 err = init_device(dev); 2976 if (err) { 2977 dev_err(dev, "failed to initialize device\n"); 2978 goto err_out; 2979 } 2980 2981 /* register the RNG, if available */ 2982 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) { 2983 err = talitos_register_rng(dev); 2984 if (err) { 2985 dev_err(dev, "failed to register hwrng: %d\n", err); 2986 goto err_out; 2987 } else 2988 dev_info(dev, "hwrng\n"); 2989 } 2990 2991 /* register crypto algorithms the device supports */ 2992 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { 2993 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) { 2994 struct talitos_crypto_alg *t_alg; 2995 struct crypto_alg *alg = NULL; 2996 2997 t_alg = talitos_alg_alloc(dev, &driver_algs[i]); 2998 if (IS_ERR(t_alg)) { 2999 err = PTR_ERR(t_alg); 3000 if (err == -ENOTSUPP) 3001 continue; 3002 goto err_out; 3003 } 3004 3005 switch (t_alg->algt.type) { 3006 case CRYPTO_ALG_TYPE_ABLKCIPHER: 3007 err = crypto_register_alg( 3008 &t_alg->algt.alg.crypto); 3009 alg = &t_alg->algt.alg.crypto; 3010 break; 3011 3012 case CRYPTO_ALG_TYPE_AEAD: 3013 err = crypto_register_aead( 3014 &t_alg->algt.alg.aead); 3015 alg = &t_alg->algt.alg.aead.base; 3016 break; 3017 3018 case CRYPTO_ALG_TYPE_AHASH: 3019 err = crypto_register_ahash( 3020 &t_alg->algt.alg.hash); 3021 alg = &t_alg->algt.alg.hash.halg.base; 3022 break; 3023 } 3024 if (err) { 3025 dev_err(dev, "%s alg registration failed\n", 3026 alg->cra_driver_name); 3027 kfree(t_alg); 3028 } else 3029 list_add_tail(&t_alg->entry, &priv->alg_list); 3030 } 3031 } 3032 if (!list_empty(&priv->alg_list)) 3033 dev_info(dev, "%s algorithms registered in /proc/crypto\n", 3034 (char *)of_get_property(np, "compatible", NULL)); 3035 3036 return 0; 3037 3038 err_out: 3039 talitos_remove(ofdev); 3040 3041 return err; 3042 } 3043 3044 static const struct of_device_id talitos_match[] = { 3045 #ifdef CONFIG_CRYPTO_DEV_TALITOS1 3046 { 3047 .compatible = "fsl,sec1.0", 3048 }, 3049 #endif 3050 #ifdef CONFIG_CRYPTO_DEV_TALITOS2 3051 { 3052 .compatible = "fsl,sec2.0", 3053 }, 3054 #endif 3055 {}, 3056 }; 3057 MODULE_DEVICE_TABLE(of, talitos_match); 3058 3059 static struct platform_driver talitos_driver = { 3060 .driver = { 3061 .name = "talitos", 3062 .of_match_table = talitos_match, 3063 }, 3064 .probe = talitos_probe, 3065 .remove = talitos_remove, 3066 }; 3067 3068 module_platform_driver(talitos_driver); 3069 3070 MODULE_LICENSE("GPL"); 3071 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>"); 3072 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver"); 3073