1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Cryptographic API. 4 * 5 * Support for ATMEL AES HW acceleration. 6 * 7 * Copyright (c) 2012 Eukréa Electromatique - ATMEL 8 * Author: Nicolas Royer <nicolas@eukrea.com> 9 * 10 * Some ideas are from omap-aes.c driver. 11 */ 12 13 14 #include <linux/kernel.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/err.h> 18 #include <linux/clk.h> 19 #include <linux/io.h> 20 #include <linux/hw_random.h> 21 #include <linux/platform_device.h> 22 23 #include <linux/device.h> 24 #include <linux/init.h> 25 #include <linux/errno.h> 26 #include <linux/interrupt.h> 27 #include <linux/irq.h> 28 #include <linux/scatterlist.h> 29 #include <linux/dma-mapping.h> 30 #include <linux/of_device.h> 31 #include <linux/delay.h> 32 #include <linux/crypto.h> 33 #include <crypto/scatterwalk.h> 34 #include <crypto/algapi.h> 35 #include <crypto/aes.h> 36 #include <crypto/gcm.h> 37 #include <crypto/xts.h> 38 #include <crypto/internal/aead.h> 39 #include <crypto/internal/skcipher.h> 40 #include <linux/platform_data/crypto-atmel.h> 41 #include "atmel-aes-regs.h" 42 #include "atmel-authenc.h" 43 44 #define ATMEL_AES_PRIORITY 300 45 46 #define ATMEL_AES_BUFFER_ORDER 2 47 #define ATMEL_AES_BUFFER_SIZE (PAGE_SIZE << ATMEL_AES_BUFFER_ORDER) 48 49 #define CFB8_BLOCK_SIZE 1 50 #define CFB16_BLOCK_SIZE 2 51 #define CFB32_BLOCK_SIZE 4 52 #define CFB64_BLOCK_SIZE 8 53 54 #define SIZE_IN_WORDS(x) ((x) >> 2) 55 56 /* AES flags */ 57 /* Reserve bits [18:16] [14:12] [1:0] for mode (same as for AES_MR) */ 58 #define AES_FLAGS_ENCRYPT AES_MR_CYPHER_ENC 59 #define AES_FLAGS_GTAGEN AES_MR_GTAGEN 60 #define AES_FLAGS_OPMODE_MASK (AES_MR_OPMOD_MASK | AES_MR_CFBS_MASK) 61 #define AES_FLAGS_ECB AES_MR_OPMOD_ECB 62 #define AES_FLAGS_CBC AES_MR_OPMOD_CBC 63 #define AES_FLAGS_OFB AES_MR_OPMOD_OFB 64 #define AES_FLAGS_CFB128 (AES_MR_OPMOD_CFB | AES_MR_CFBS_128b) 65 #define AES_FLAGS_CFB64 (AES_MR_OPMOD_CFB | AES_MR_CFBS_64b) 66 #define AES_FLAGS_CFB32 (AES_MR_OPMOD_CFB | AES_MR_CFBS_32b) 67 #define AES_FLAGS_CFB16 (AES_MR_OPMOD_CFB | AES_MR_CFBS_16b) 68 #define AES_FLAGS_CFB8 (AES_MR_OPMOD_CFB | AES_MR_CFBS_8b) 69 #define AES_FLAGS_CTR AES_MR_OPMOD_CTR 70 #define AES_FLAGS_GCM AES_MR_OPMOD_GCM 71 #define AES_FLAGS_XTS AES_MR_OPMOD_XTS 72 73 #define AES_FLAGS_MODE_MASK (AES_FLAGS_OPMODE_MASK | \ 74 AES_FLAGS_ENCRYPT | \ 75 AES_FLAGS_GTAGEN) 76 77 #define AES_FLAGS_BUSY BIT(3) 78 #define AES_FLAGS_DUMP_REG BIT(4) 79 #define AES_FLAGS_OWN_SHA BIT(5) 80 81 #define AES_FLAGS_PERSISTENT AES_FLAGS_BUSY 82 83 #define ATMEL_AES_QUEUE_LENGTH 50 84 85 #define ATMEL_AES_DMA_THRESHOLD 256 86 87 88 struct atmel_aes_caps { 89 bool has_dualbuff; 90 bool has_cfb64; 91 bool has_gcm; 92 bool has_xts; 93 bool has_authenc; 94 u32 max_burst_size; 95 }; 96 97 struct atmel_aes_dev; 98 99 100 typedef int (*atmel_aes_fn_t)(struct atmel_aes_dev *); 101 102 103 struct atmel_aes_base_ctx { 104 struct atmel_aes_dev *dd; 105 atmel_aes_fn_t start; 106 int keylen; 107 u32 key[AES_KEYSIZE_256 / sizeof(u32)]; 108 u16 block_size; 109 bool is_aead; 110 }; 111 112 struct atmel_aes_ctx { 113 struct atmel_aes_base_ctx base; 114 }; 115 116 struct atmel_aes_ctr_ctx { 117 struct atmel_aes_base_ctx base; 118 119 __be32 iv[AES_BLOCK_SIZE / sizeof(u32)]; 120 size_t offset; 121 struct scatterlist src[2]; 122 struct scatterlist dst[2]; 123 u32 blocks; 124 }; 125 126 struct atmel_aes_gcm_ctx { 127 struct atmel_aes_base_ctx base; 128 129 struct scatterlist src[2]; 130 struct scatterlist dst[2]; 131 132 __be32 j0[AES_BLOCK_SIZE / sizeof(u32)]; 133 u32 tag[AES_BLOCK_SIZE / sizeof(u32)]; 134 __be32 ghash[AES_BLOCK_SIZE / sizeof(u32)]; 135 size_t textlen; 136 137 const __be32 *ghash_in; 138 __be32 *ghash_out; 139 atmel_aes_fn_t ghash_resume; 140 }; 141 142 struct atmel_aes_xts_ctx { 143 struct atmel_aes_base_ctx base; 144 145 u32 key2[AES_KEYSIZE_256 / sizeof(u32)]; 146 }; 147 148 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 149 struct atmel_aes_authenc_ctx { 150 struct atmel_aes_base_ctx base; 151 struct atmel_sha_authenc_ctx *auth; 152 }; 153 #endif 154 155 struct atmel_aes_reqctx { 156 unsigned long mode; 157 u8 lastc[AES_BLOCK_SIZE]; 158 }; 159 160 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 161 struct atmel_aes_authenc_reqctx { 162 struct atmel_aes_reqctx base; 163 164 struct scatterlist src[2]; 165 struct scatterlist dst[2]; 166 size_t textlen; 167 u32 digest[SHA512_DIGEST_SIZE / sizeof(u32)]; 168 169 /* auth_req MUST be place last. */ 170 struct ahash_request auth_req; 171 }; 172 #endif 173 174 struct atmel_aes_dma { 175 struct dma_chan *chan; 176 struct scatterlist *sg; 177 int nents; 178 unsigned int remainder; 179 unsigned int sg_len; 180 }; 181 182 struct atmel_aes_dev { 183 struct list_head list; 184 unsigned long phys_base; 185 void __iomem *io_base; 186 187 struct crypto_async_request *areq; 188 struct atmel_aes_base_ctx *ctx; 189 190 bool is_async; 191 atmel_aes_fn_t resume; 192 atmel_aes_fn_t cpu_transfer_complete; 193 194 struct device *dev; 195 struct clk *iclk; 196 int irq; 197 198 unsigned long flags; 199 200 spinlock_t lock; 201 struct crypto_queue queue; 202 203 struct tasklet_struct done_task; 204 struct tasklet_struct queue_task; 205 206 size_t total; 207 size_t datalen; 208 u32 *data; 209 210 struct atmel_aes_dma src; 211 struct atmel_aes_dma dst; 212 213 size_t buflen; 214 void *buf; 215 struct scatterlist aligned_sg; 216 struct scatterlist *real_dst; 217 218 struct atmel_aes_caps caps; 219 220 u32 hw_version; 221 }; 222 223 struct atmel_aes_drv { 224 struct list_head dev_list; 225 spinlock_t lock; 226 }; 227 228 static struct atmel_aes_drv atmel_aes = { 229 .dev_list = LIST_HEAD_INIT(atmel_aes.dev_list), 230 .lock = __SPIN_LOCK_UNLOCKED(atmel_aes.lock), 231 }; 232 233 #ifdef VERBOSE_DEBUG 234 static const char *atmel_aes_reg_name(u32 offset, char *tmp, size_t sz) 235 { 236 switch (offset) { 237 case AES_CR: 238 return "CR"; 239 240 case AES_MR: 241 return "MR"; 242 243 case AES_ISR: 244 return "ISR"; 245 246 case AES_IMR: 247 return "IMR"; 248 249 case AES_IER: 250 return "IER"; 251 252 case AES_IDR: 253 return "IDR"; 254 255 case AES_KEYWR(0): 256 case AES_KEYWR(1): 257 case AES_KEYWR(2): 258 case AES_KEYWR(3): 259 case AES_KEYWR(4): 260 case AES_KEYWR(5): 261 case AES_KEYWR(6): 262 case AES_KEYWR(7): 263 snprintf(tmp, sz, "KEYWR[%u]", (offset - AES_KEYWR(0)) >> 2); 264 break; 265 266 case AES_IDATAR(0): 267 case AES_IDATAR(1): 268 case AES_IDATAR(2): 269 case AES_IDATAR(3): 270 snprintf(tmp, sz, "IDATAR[%u]", (offset - AES_IDATAR(0)) >> 2); 271 break; 272 273 case AES_ODATAR(0): 274 case AES_ODATAR(1): 275 case AES_ODATAR(2): 276 case AES_ODATAR(3): 277 snprintf(tmp, sz, "ODATAR[%u]", (offset - AES_ODATAR(0)) >> 2); 278 break; 279 280 case AES_IVR(0): 281 case AES_IVR(1): 282 case AES_IVR(2): 283 case AES_IVR(3): 284 snprintf(tmp, sz, "IVR[%u]", (offset - AES_IVR(0)) >> 2); 285 break; 286 287 case AES_AADLENR: 288 return "AADLENR"; 289 290 case AES_CLENR: 291 return "CLENR"; 292 293 case AES_GHASHR(0): 294 case AES_GHASHR(1): 295 case AES_GHASHR(2): 296 case AES_GHASHR(3): 297 snprintf(tmp, sz, "GHASHR[%u]", (offset - AES_GHASHR(0)) >> 2); 298 break; 299 300 case AES_TAGR(0): 301 case AES_TAGR(1): 302 case AES_TAGR(2): 303 case AES_TAGR(3): 304 snprintf(tmp, sz, "TAGR[%u]", (offset - AES_TAGR(0)) >> 2); 305 break; 306 307 case AES_CTRR: 308 return "CTRR"; 309 310 case AES_GCMHR(0): 311 case AES_GCMHR(1): 312 case AES_GCMHR(2): 313 case AES_GCMHR(3): 314 snprintf(tmp, sz, "GCMHR[%u]", (offset - AES_GCMHR(0)) >> 2); 315 break; 316 317 case AES_EMR: 318 return "EMR"; 319 320 case AES_TWR(0): 321 case AES_TWR(1): 322 case AES_TWR(2): 323 case AES_TWR(3): 324 snprintf(tmp, sz, "TWR[%u]", (offset - AES_TWR(0)) >> 2); 325 break; 326 327 case AES_ALPHAR(0): 328 case AES_ALPHAR(1): 329 case AES_ALPHAR(2): 330 case AES_ALPHAR(3): 331 snprintf(tmp, sz, "ALPHAR[%u]", (offset - AES_ALPHAR(0)) >> 2); 332 break; 333 334 default: 335 snprintf(tmp, sz, "0x%02x", offset); 336 break; 337 } 338 339 return tmp; 340 } 341 #endif /* VERBOSE_DEBUG */ 342 343 /* Shared functions */ 344 345 static inline u32 atmel_aes_read(struct atmel_aes_dev *dd, u32 offset) 346 { 347 u32 value = readl_relaxed(dd->io_base + offset); 348 349 #ifdef VERBOSE_DEBUG 350 if (dd->flags & AES_FLAGS_DUMP_REG) { 351 char tmp[16]; 352 353 dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, 354 atmel_aes_reg_name(offset, tmp, sizeof(tmp))); 355 } 356 #endif /* VERBOSE_DEBUG */ 357 358 return value; 359 } 360 361 static inline void atmel_aes_write(struct atmel_aes_dev *dd, 362 u32 offset, u32 value) 363 { 364 #ifdef VERBOSE_DEBUG 365 if (dd->flags & AES_FLAGS_DUMP_REG) { 366 char tmp[16]; 367 368 dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, 369 atmel_aes_reg_name(offset, tmp, sizeof(tmp))); 370 } 371 #endif /* VERBOSE_DEBUG */ 372 373 writel_relaxed(value, dd->io_base + offset); 374 } 375 376 static void atmel_aes_read_n(struct atmel_aes_dev *dd, u32 offset, 377 u32 *value, int count) 378 { 379 for (; count--; value++, offset += 4) 380 *value = atmel_aes_read(dd, offset); 381 } 382 383 static void atmel_aes_write_n(struct atmel_aes_dev *dd, u32 offset, 384 const u32 *value, int count) 385 { 386 for (; count--; value++, offset += 4) 387 atmel_aes_write(dd, offset, *value); 388 } 389 390 static inline void atmel_aes_read_block(struct atmel_aes_dev *dd, u32 offset, 391 void *value) 392 { 393 atmel_aes_read_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); 394 } 395 396 static inline void atmel_aes_write_block(struct atmel_aes_dev *dd, u32 offset, 397 const void *value) 398 { 399 atmel_aes_write_n(dd, offset, value, SIZE_IN_WORDS(AES_BLOCK_SIZE)); 400 } 401 402 static inline int atmel_aes_wait_for_data_ready(struct atmel_aes_dev *dd, 403 atmel_aes_fn_t resume) 404 { 405 u32 isr = atmel_aes_read(dd, AES_ISR); 406 407 if (unlikely(isr & AES_INT_DATARDY)) 408 return resume(dd); 409 410 dd->resume = resume; 411 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); 412 return -EINPROGRESS; 413 } 414 415 static inline size_t atmel_aes_padlen(size_t len, size_t block_size) 416 { 417 len &= block_size - 1; 418 return len ? block_size - len : 0; 419 } 420 421 static struct atmel_aes_dev *atmel_aes_find_dev(struct atmel_aes_base_ctx *ctx) 422 { 423 struct atmel_aes_dev *aes_dd = NULL; 424 struct atmel_aes_dev *tmp; 425 426 spin_lock_bh(&atmel_aes.lock); 427 if (!ctx->dd) { 428 list_for_each_entry(tmp, &atmel_aes.dev_list, list) { 429 aes_dd = tmp; 430 break; 431 } 432 ctx->dd = aes_dd; 433 } else { 434 aes_dd = ctx->dd; 435 } 436 437 spin_unlock_bh(&atmel_aes.lock); 438 439 return aes_dd; 440 } 441 442 static int atmel_aes_hw_init(struct atmel_aes_dev *dd) 443 { 444 int err; 445 446 err = clk_enable(dd->iclk); 447 if (err) 448 return err; 449 450 atmel_aes_write(dd, AES_CR, AES_CR_SWRST); 451 atmel_aes_write(dd, AES_MR, 0xE << AES_MR_CKEY_OFFSET); 452 453 return 0; 454 } 455 456 static inline unsigned int atmel_aes_get_version(struct atmel_aes_dev *dd) 457 { 458 return atmel_aes_read(dd, AES_HW_VERSION) & 0x00000fff; 459 } 460 461 static int atmel_aes_hw_version_init(struct atmel_aes_dev *dd) 462 { 463 int err; 464 465 err = atmel_aes_hw_init(dd); 466 if (err) 467 return err; 468 469 dd->hw_version = atmel_aes_get_version(dd); 470 471 dev_info(dd->dev, "version: 0x%x\n", dd->hw_version); 472 473 clk_disable(dd->iclk); 474 return 0; 475 } 476 477 static inline void atmel_aes_set_mode(struct atmel_aes_dev *dd, 478 const struct atmel_aes_reqctx *rctx) 479 { 480 /* Clear all but persistent flags and set request flags. */ 481 dd->flags = (dd->flags & AES_FLAGS_PERSISTENT) | rctx->mode; 482 } 483 484 static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd) 485 { 486 return (dd->flags & AES_FLAGS_ENCRYPT); 487 } 488 489 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 490 static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err); 491 #endif 492 493 static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd) 494 { 495 struct skcipher_request *req = skcipher_request_cast(dd->areq); 496 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req); 497 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 498 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 499 500 if (req->cryptlen < ivsize) 501 return; 502 503 if (rctx->mode & AES_FLAGS_ENCRYPT) { 504 scatterwalk_map_and_copy(req->iv, req->dst, 505 req->cryptlen - ivsize, ivsize, 0); 506 } else { 507 if (req->src == req->dst) 508 memcpy(req->iv, rctx->lastc, ivsize); 509 else 510 scatterwalk_map_and_copy(req->iv, req->src, 511 req->cryptlen - ivsize, 512 ivsize, 0); 513 } 514 } 515 516 static inline struct atmel_aes_ctr_ctx * 517 atmel_aes_ctr_ctx_cast(struct atmel_aes_base_ctx *ctx) 518 { 519 return container_of(ctx, struct atmel_aes_ctr_ctx, base); 520 } 521 522 static void atmel_aes_ctr_update_req_iv(struct atmel_aes_dev *dd) 523 { 524 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); 525 struct skcipher_request *req = skcipher_request_cast(dd->areq); 526 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 527 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 528 int i; 529 530 /* 531 * The CTR transfer works in fragments of data of maximum 1 MByte 532 * because of the 16 bit CTR counter embedded in the IP. When reaching 533 * here, ctx->blocks contains the number of blocks of the last fragment 534 * processed, there is no need to explicit cast it to u16. 535 */ 536 for (i = 0; i < ctx->blocks; i++) 537 crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE); 538 539 memcpy(req->iv, ctx->iv, ivsize); 540 } 541 542 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err) 543 { 544 struct skcipher_request *req = skcipher_request_cast(dd->areq); 545 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req); 546 547 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 548 if (dd->ctx->is_aead) 549 atmel_aes_authenc_complete(dd, err); 550 #endif 551 552 clk_disable(dd->iclk); 553 dd->flags &= ~AES_FLAGS_BUSY; 554 555 if (!err && !dd->ctx->is_aead && 556 (rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB) { 557 if ((rctx->mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_CTR) 558 atmel_aes_set_iv_as_last_ciphertext_block(dd); 559 else 560 atmel_aes_ctr_update_req_iv(dd); 561 } 562 563 if (dd->is_async) 564 dd->areq->complete(dd->areq, err); 565 566 tasklet_schedule(&dd->queue_task); 567 568 return err; 569 } 570 571 static void atmel_aes_write_ctrl_key(struct atmel_aes_dev *dd, bool use_dma, 572 const __be32 *iv, const u32 *key, int keylen) 573 { 574 u32 valmr = 0; 575 576 /* MR register must be set before IV registers */ 577 if (keylen == AES_KEYSIZE_128) 578 valmr |= AES_MR_KEYSIZE_128; 579 else if (keylen == AES_KEYSIZE_192) 580 valmr |= AES_MR_KEYSIZE_192; 581 else 582 valmr |= AES_MR_KEYSIZE_256; 583 584 valmr |= dd->flags & AES_FLAGS_MODE_MASK; 585 586 if (use_dma) { 587 valmr |= AES_MR_SMOD_IDATAR0; 588 if (dd->caps.has_dualbuff) 589 valmr |= AES_MR_DUALBUFF; 590 } else { 591 valmr |= AES_MR_SMOD_AUTO; 592 } 593 594 atmel_aes_write(dd, AES_MR, valmr); 595 596 atmel_aes_write_n(dd, AES_KEYWR(0), key, SIZE_IN_WORDS(keylen)); 597 598 if (iv && (valmr & AES_MR_OPMOD_MASK) != AES_MR_OPMOD_ECB) 599 atmel_aes_write_block(dd, AES_IVR(0), iv); 600 } 601 602 static inline void atmel_aes_write_ctrl(struct atmel_aes_dev *dd, bool use_dma, 603 const __be32 *iv) 604 605 { 606 atmel_aes_write_ctrl_key(dd, use_dma, iv, 607 dd->ctx->key, dd->ctx->keylen); 608 } 609 610 /* CPU transfer */ 611 612 static int atmel_aes_cpu_transfer(struct atmel_aes_dev *dd) 613 { 614 int err = 0; 615 u32 isr; 616 617 for (;;) { 618 atmel_aes_read_block(dd, AES_ODATAR(0), dd->data); 619 dd->data += 4; 620 dd->datalen -= AES_BLOCK_SIZE; 621 622 if (dd->datalen < AES_BLOCK_SIZE) 623 break; 624 625 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); 626 627 isr = atmel_aes_read(dd, AES_ISR); 628 if (!(isr & AES_INT_DATARDY)) { 629 dd->resume = atmel_aes_cpu_transfer; 630 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); 631 return -EINPROGRESS; 632 } 633 } 634 635 if (!sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), 636 dd->buf, dd->total)) 637 err = -EINVAL; 638 639 if (err) 640 return atmel_aes_complete(dd, err); 641 642 return dd->cpu_transfer_complete(dd); 643 } 644 645 static int atmel_aes_cpu_start(struct atmel_aes_dev *dd, 646 struct scatterlist *src, 647 struct scatterlist *dst, 648 size_t len, 649 atmel_aes_fn_t resume) 650 { 651 size_t padlen = atmel_aes_padlen(len, AES_BLOCK_SIZE); 652 653 if (unlikely(len == 0)) 654 return -EINVAL; 655 656 sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); 657 658 dd->total = len; 659 dd->real_dst = dst; 660 dd->cpu_transfer_complete = resume; 661 dd->datalen = len + padlen; 662 dd->data = (u32 *)dd->buf; 663 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); 664 return atmel_aes_wait_for_data_ready(dd, atmel_aes_cpu_transfer); 665 } 666 667 668 /* DMA transfer */ 669 670 static void atmel_aes_dma_callback(void *data); 671 672 static bool atmel_aes_check_aligned(struct atmel_aes_dev *dd, 673 struct scatterlist *sg, 674 size_t len, 675 struct atmel_aes_dma *dma) 676 { 677 int nents; 678 679 if (!IS_ALIGNED(len, dd->ctx->block_size)) 680 return false; 681 682 for (nents = 0; sg; sg = sg_next(sg), ++nents) { 683 if (!IS_ALIGNED(sg->offset, sizeof(u32))) 684 return false; 685 686 if (len <= sg->length) { 687 if (!IS_ALIGNED(len, dd->ctx->block_size)) 688 return false; 689 690 dma->nents = nents+1; 691 dma->remainder = sg->length - len; 692 sg->length = len; 693 return true; 694 } 695 696 if (!IS_ALIGNED(sg->length, dd->ctx->block_size)) 697 return false; 698 699 len -= sg->length; 700 } 701 702 return false; 703 } 704 705 static inline void atmel_aes_restore_sg(const struct atmel_aes_dma *dma) 706 { 707 struct scatterlist *sg = dma->sg; 708 int nents = dma->nents; 709 710 if (!dma->remainder) 711 return; 712 713 while (--nents > 0 && sg) 714 sg = sg_next(sg); 715 716 if (!sg) 717 return; 718 719 sg->length += dma->remainder; 720 } 721 722 static int atmel_aes_map(struct atmel_aes_dev *dd, 723 struct scatterlist *src, 724 struct scatterlist *dst, 725 size_t len) 726 { 727 bool src_aligned, dst_aligned; 728 size_t padlen; 729 730 dd->total = len; 731 dd->src.sg = src; 732 dd->dst.sg = dst; 733 dd->real_dst = dst; 734 735 src_aligned = atmel_aes_check_aligned(dd, src, len, &dd->src); 736 if (src == dst) 737 dst_aligned = src_aligned; 738 else 739 dst_aligned = atmel_aes_check_aligned(dd, dst, len, &dd->dst); 740 if (!src_aligned || !dst_aligned) { 741 padlen = atmel_aes_padlen(len, dd->ctx->block_size); 742 743 if (dd->buflen < len + padlen) 744 return -ENOMEM; 745 746 if (!src_aligned) { 747 sg_copy_to_buffer(src, sg_nents(src), dd->buf, len); 748 dd->src.sg = &dd->aligned_sg; 749 dd->src.nents = 1; 750 dd->src.remainder = 0; 751 } 752 753 if (!dst_aligned) { 754 dd->dst.sg = &dd->aligned_sg; 755 dd->dst.nents = 1; 756 dd->dst.remainder = 0; 757 } 758 759 sg_init_table(&dd->aligned_sg, 1); 760 sg_set_buf(&dd->aligned_sg, dd->buf, len + padlen); 761 } 762 763 if (dd->src.sg == dd->dst.sg) { 764 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, 765 DMA_BIDIRECTIONAL); 766 dd->dst.sg_len = dd->src.sg_len; 767 if (!dd->src.sg_len) 768 return -EFAULT; 769 } else { 770 dd->src.sg_len = dma_map_sg(dd->dev, dd->src.sg, dd->src.nents, 771 DMA_TO_DEVICE); 772 if (!dd->src.sg_len) 773 return -EFAULT; 774 775 dd->dst.sg_len = dma_map_sg(dd->dev, dd->dst.sg, dd->dst.nents, 776 DMA_FROM_DEVICE); 777 if (!dd->dst.sg_len) { 778 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, 779 DMA_TO_DEVICE); 780 return -EFAULT; 781 } 782 } 783 784 return 0; 785 } 786 787 static void atmel_aes_unmap(struct atmel_aes_dev *dd) 788 { 789 if (dd->src.sg == dd->dst.sg) { 790 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, 791 DMA_BIDIRECTIONAL); 792 793 if (dd->src.sg != &dd->aligned_sg) 794 atmel_aes_restore_sg(&dd->src); 795 } else { 796 dma_unmap_sg(dd->dev, dd->dst.sg, dd->dst.nents, 797 DMA_FROM_DEVICE); 798 799 if (dd->dst.sg != &dd->aligned_sg) 800 atmel_aes_restore_sg(&dd->dst); 801 802 dma_unmap_sg(dd->dev, dd->src.sg, dd->src.nents, 803 DMA_TO_DEVICE); 804 805 if (dd->src.sg != &dd->aligned_sg) 806 atmel_aes_restore_sg(&dd->src); 807 } 808 809 if (dd->dst.sg == &dd->aligned_sg) 810 sg_copy_from_buffer(dd->real_dst, sg_nents(dd->real_dst), 811 dd->buf, dd->total); 812 } 813 814 static int atmel_aes_dma_transfer_start(struct atmel_aes_dev *dd, 815 enum dma_slave_buswidth addr_width, 816 enum dma_transfer_direction dir, 817 u32 maxburst) 818 { 819 struct dma_async_tx_descriptor *desc; 820 struct dma_slave_config config; 821 dma_async_tx_callback callback; 822 struct atmel_aes_dma *dma; 823 int err; 824 825 memset(&config, 0, sizeof(config)); 826 config.src_addr_width = addr_width; 827 config.dst_addr_width = addr_width; 828 config.src_maxburst = maxburst; 829 config.dst_maxburst = maxburst; 830 831 switch (dir) { 832 case DMA_MEM_TO_DEV: 833 dma = &dd->src; 834 callback = NULL; 835 config.dst_addr = dd->phys_base + AES_IDATAR(0); 836 break; 837 838 case DMA_DEV_TO_MEM: 839 dma = &dd->dst; 840 callback = atmel_aes_dma_callback; 841 config.src_addr = dd->phys_base + AES_ODATAR(0); 842 break; 843 844 default: 845 return -EINVAL; 846 } 847 848 err = dmaengine_slave_config(dma->chan, &config); 849 if (err) 850 return err; 851 852 desc = dmaengine_prep_slave_sg(dma->chan, dma->sg, dma->sg_len, dir, 853 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 854 if (!desc) 855 return -ENOMEM; 856 857 desc->callback = callback; 858 desc->callback_param = dd; 859 dmaengine_submit(desc); 860 dma_async_issue_pending(dma->chan); 861 862 return 0; 863 } 864 865 static int atmel_aes_dma_start(struct atmel_aes_dev *dd, 866 struct scatterlist *src, 867 struct scatterlist *dst, 868 size_t len, 869 atmel_aes_fn_t resume) 870 { 871 enum dma_slave_buswidth addr_width; 872 u32 maxburst; 873 int err; 874 875 switch (dd->ctx->block_size) { 876 case CFB8_BLOCK_SIZE: 877 addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 878 maxburst = 1; 879 break; 880 881 case CFB16_BLOCK_SIZE: 882 addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 883 maxburst = 1; 884 break; 885 886 case CFB32_BLOCK_SIZE: 887 case CFB64_BLOCK_SIZE: 888 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 889 maxburst = 1; 890 break; 891 892 case AES_BLOCK_SIZE: 893 addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 894 maxburst = dd->caps.max_burst_size; 895 break; 896 897 default: 898 err = -EINVAL; 899 goto exit; 900 } 901 902 err = atmel_aes_map(dd, src, dst, len); 903 if (err) 904 goto exit; 905 906 dd->resume = resume; 907 908 /* Set output DMA transfer first */ 909 err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_DEV_TO_MEM, 910 maxburst); 911 if (err) 912 goto unmap; 913 914 /* Then set input DMA transfer */ 915 err = atmel_aes_dma_transfer_start(dd, addr_width, DMA_MEM_TO_DEV, 916 maxburst); 917 if (err) 918 goto output_transfer_stop; 919 920 return -EINPROGRESS; 921 922 output_transfer_stop: 923 dmaengine_terminate_sync(dd->dst.chan); 924 unmap: 925 atmel_aes_unmap(dd); 926 exit: 927 return atmel_aes_complete(dd, err); 928 } 929 930 static void atmel_aes_dma_callback(void *data) 931 { 932 struct atmel_aes_dev *dd = data; 933 934 atmel_aes_unmap(dd); 935 dd->is_async = true; 936 (void)dd->resume(dd); 937 } 938 939 static int atmel_aes_handle_queue(struct atmel_aes_dev *dd, 940 struct crypto_async_request *new_areq) 941 { 942 struct crypto_async_request *areq, *backlog; 943 struct atmel_aes_base_ctx *ctx; 944 unsigned long flags; 945 bool start_async; 946 int err, ret = 0; 947 948 spin_lock_irqsave(&dd->lock, flags); 949 if (new_areq) 950 ret = crypto_enqueue_request(&dd->queue, new_areq); 951 if (dd->flags & AES_FLAGS_BUSY) { 952 spin_unlock_irqrestore(&dd->lock, flags); 953 return ret; 954 } 955 backlog = crypto_get_backlog(&dd->queue); 956 areq = crypto_dequeue_request(&dd->queue); 957 if (areq) 958 dd->flags |= AES_FLAGS_BUSY; 959 spin_unlock_irqrestore(&dd->lock, flags); 960 961 if (!areq) 962 return ret; 963 964 if (backlog) 965 backlog->complete(backlog, -EINPROGRESS); 966 967 ctx = crypto_tfm_ctx(areq->tfm); 968 969 dd->areq = areq; 970 dd->ctx = ctx; 971 start_async = (areq != new_areq); 972 dd->is_async = start_async; 973 974 /* WARNING: ctx->start() MAY change dd->is_async. */ 975 err = ctx->start(dd); 976 return (start_async) ? ret : err; 977 } 978 979 980 /* AES async block ciphers */ 981 982 static int atmel_aes_transfer_complete(struct atmel_aes_dev *dd) 983 { 984 return atmel_aes_complete(dd, 0); 985 } 986 987 static int atmel_aes_start(struct atmel_aes_dev *dd) 988 { 989 struct skcipher_request *req = skcipher_request_cast(dd->areq); 990 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req); 991 bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD || 992 dd->ctx->block_size != AES_BLOCK_SIZE); 993 int err; 994 995 atmel_aes_set_mode(dd, rctx); 996 997 err = atmel_aes_hw_init(dd); 998 if (err) 999 return atmel_aes_complete(dd, err); 1000 1001 atmel_aes_write_ctrl(dd, use_dma, (void *)req->iv); 1002 if (use_dma) 1003 return atmel_aes_dma_start(dd, req->src, req->dst, 1004 req->cryptlen, 1005 atmel_aes_transfer_complete); 1006 1007 return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen, 1008 atmel_aes_transfer_complete); 1009 } 1010 1011 static int atmel_aes_ctr_transfer(struct atmel_aes_dev *dd) 1012 { 1013 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); 1014 struct skcipher_request *req = skcipher_request_cast(dd->areq); 1015 struct scatterlist *src, *dst; 1016 size_t datalen; 1017 u32 ctr; 1018 u16 start, end; 1019 bool use_dma, fragmented = false; 1020 1021 /* Check for transfer completion. */ 1022 ctx->offset += dd->total; 1023 if (ctx->offset >= req->cryptlen) 1024 return atmel_aes_transfer_complete(dd); 1025 1026 /* Compute data length. */ 1027 datalen = req->cryptlen - ctx->offset; 1028 ctx->blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); 1029 ctr = be32_to_cpu(ctx->iv[3]); 1030 1031 /* Check 16bit counter overflow. */ 1032 start = ctr & 0xffff; 1033 end = start + ctx->blocks - 1; 1034 1035 if (ctx->blocks >> 16 || end < start) { 1036 ctr |= 0xffff; 1037 datalen = AES_BLOCK_SIZE * (0x10000 - start); 1038 fragmented = true; 1039 } 1040 1041 use_dma = (datalen >= ATMEL_AES_DMA_THRESHOLD); 1042 1043 /* Jump to offset. */ 1044 src = scatterwalk_ffwd(ctx->src, req->src, ctx->offset); 1045 dst = ((req->src == req->dst) ? src : 1046 scatterwalk_ffwd(ctx->dst, req->dst, ctx->offset)); 1047 1048 /* Configure hardware. */ 1049 atmel_aes_write_ctrl(dd, use_dma, ctx->iv); 1050 if (unlikely(fragmented)) { 1051 /* 1052 * Increment the counter manually to cope with the hardware 1053 * counter overflow. 1054 */ 1055 ctx->iv[3] = cpu_to_be32(ctr); 1056 crypto_inc((u8 *)ctx->iv, AES_BLOCK_SIZE); 1057 } 1058 1059 if (use_dma) 1060 return atmel_aes_dma_start(dd, src, dst, datalen, 1061 atmel_aes_ctr_transfer); 1062 1063 return atmel_aes_cpu_start(dd, src, dst, datalen, 1064 atmel_aes_ctr_transfer); 1065 } 1066 1067 static int atmel_aes_ctr_start(struct atmel_aes_dev *dd) 1068 { 1069 struct atmel_aes_ctr_ctx *ctx = atmel_aes_ctr_ctx_cast(dd->ctx); 1070 struct skcipher_request *req = skcipher_request_cast(dd->areq); 1071 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req); 1072 int err; 1073 1074 atmel_aes_set_mode(dd, rctx); 1075 1076 err = atmel_aes_hw_init(dd); 1077 if (err) 1078 return atmel_aes_complete(dd, err); 1079 1080 memcpy(ctx->iv, req->iv, AES_BLOCK_SIZE); 1081 ctx->offset = 0; 1082 dd->total = 0; 1083 return atmel_aes_ctr_transfer(dd); 1084 } 1085 1086 static int atmel_aes_crypt(struct skcipher_request *req, unsigned long mode) 1087 { 1088 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 1089 struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher); 1090 struct atmel_aes_reqctx *rctx; 1091 struct atmel_aes_dev *dd; 1092 1093 switch (mode & AES_FLAGS_OPMODE_MASK) { 1094 case AES_FLAGS_CFB8: 1095 ctx->block_size = CFB8_BLOCK_SIZE; 1096 break; 1097 1098 case AES_FLAGS_CFB16: 1099 ctx->block_size = CFB16_BLOCK_SIZE; 1100 break; 1101 1102 case AES_FLAGS_CFB32: 1103 ctx->block_size = CFB32_BLOCK_SIZE; 1104 break; 1105 1106 case AES_FLAGS_CFB64: 1107 ctx->block_size = CFB64_BLOCK_SIZE; 1108 break; 1109 1110 default: 1111 ctx->block_size = AES_BLOCK_SIZE; 1112 break; 1113 } 1114 ctx->is_aead = false; 1115 1116 dd = atmel_aes_find_dev(ctx); 1117 if (!dd) 1118 return -ENODEV; 1119 1120 rctx = skcipher_request_ctx(req); 1121 rctx->mode = mode; 1122 1123 if ((mode & AES_FLAGS_OPMODE_MASK) != AES_FLAGS_ECB && 1124 !(mode & AES_FLAGS_ENCRYPT) && req->src == req->dst) { 1125 unsigned int ivsize = crypto_skcipher_ivsize(skcipher); 1126 1127 if (req->cryptlen >= ivsize) 1128 scatterwalk_map_and_copy(rctx->lastc, req->src, 1129 req->cryptlen - ivsize, 1130 ivsize, 0); 1131 } 1132 1133 return atmel_aes_handle_queue(dd, &req->base); 1134 } 1135 1136 static int atmel_aes_setkey(struct crypto_skcipher *tfm, const u8 *key, 1137 unsigned int keylen) 1138 { 1139 struct atmel_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm); 1140 1141 if (keylen != AES_KEYSIZE_128 && 1142 keylen != AES_KEYSIZE_192 && 1143 keylen != AES_KEYSIZE_256) 1144 return -EINVAL; 1145 1146 memcpy(ctx->key, key, keylen); 1147 ctx->keylen = keylen; 1148 1149 return 0; 1150 } 1151 1152 static int atmel_aes_ecb_encrypt(struct skcipher_request *req) 1153 { 1154 return atmel_aes_crypt(req, AES_FLAGS_ECB | AES_FLAGS_ENCRYPT); 1155 } 1156 1157 static int atmel_aes_ecb_decrypt(struct skcipher_request *req) 1158 { 1159 return atmel_aes_crypt(req, AES_FLAGS_ECB); 1160 } 1161 1162 static int atmel_aes_cbc_encrypt(struct skcipher_request *req) 1163 { 1164 return atmel_aes_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT); 1165 } 1166 1167 static int atmel_aes_cbc_decrypt(struct skcipher_request *req) 1168 { 1169 return atmel_aes_crypt(req, AES_FLAGS_CBC); 1170 } 1171 1172 static int atmel_aes_ofb_encrypt(struct skcipher_request *req) 1173 { 1174 return atmel_aes_crypt(req, AES_FLAGS_OFB | AES_FLAGS_ENCRYPT); 1175 } 1176 1177 static int atmel_aes_ofb_decrypt(struct skcipher_request *req) 1178 { 1179 return atmel_aes_crypt(req, AES_FLAGS_OFB); 1180 } 1181 1182 static int atmel_aes_cfb_encrypt(struct skcipher_request *req) 1183 { 1184 return atmel_aes_crypt(req, AES_FLAGS_CFB128 | AES_FLAGS_ENCRYPT); 1185 } 1186 1187 static int atmel_aes_cfb_decrypt(struct skcipher_request *req) 1188 { 1189 return atmel_aes_crypt(req, AES_FLAGS_CFB128); 1190 } 1191 1192 static int atmel_aes_cfb64_encrypt(struct skcipher_request *req) 1193 { 1194 return atmel_aes_crypt(req, AES_FLAGS_CFB64 | AES_FLAGS_ENCRYPT); 1195 } 1196 1197 static int atmel_aes_cfb64_decrypt(struct skcipher_request *req) 1198 { 1199 return atmel_aes_crypt(req, AES_FLAGS_CFB64); 1200 } 1201 1202 static int atmel_aes_cfb32_encrypt(struct skcipher_request *req) 1203 { 1204 return atmel_aes_crypt(req, AES_FLAGS_CFB32 | AES_FLAGS_ENCRYPT); 1205 } 1206 1207 static int atmel_aes_cfb32_decrypt(struct skcipher_request *req) 1208 { 1209 return atmel_aes_crypt(req, AES_FLAGS_CFB32); 1210 } 1211 1212 static int atmel_aes_cfb16_encrypt(struct skcipher_request *req) 1213 { 1214 return atmel_aes_crypt(req, AES_FLAGS_CFB16 | AES_FLAGS_ENCRYPT); 1215 } 1216 1217 static int atmel_aes_cfb16_decrypt(struct skcipher_request *req) 1218 { 1219 return atmel_aes_crypt(req, AES_FLAGS_CFB16); 1220 } 1221 1222 static int atmel_aes_cfb8_encrypt(struct skcipher_request *req) 1223 { 1224 return atmel_aes_crypt(req, AES_FLAGS_CFB8 | AES_FLAGS_ENCRYPT); 1225 } 1226 1227 static int atmel_aes_cfb8_decrypt(struct skcipher_request *req) 1228 { 1229 return atmel_aes_crypt(req, AES_FLAGS_CFB8); 1230 } 1231 1232 static int atmel_aes_ctr_encrypt(struct skcipher_request *req) 1233 { 1234 return atmel_aes_crypt(req, AES_FLAGS_CTR | AES_FLAGS_ENCRYPT); 1235 } 1236 1237 static int atmel_aes_ctr_decrypt(struct skcipher_request *req) 1238 { 1239 return atmel_aes_crypt(req, AES_FLAGS_CTR); 1240 } 1241 1242 static int atmel_aes_init_tfm(struct crypto_skcipher *tfm) 1243 { 1244 struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 1245 1246 crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); 1247 ctx->base.start = atmel_aes_start; 1248 1249 return 0; 1250 } 1251 1252 static int atmel_aes_ctr_init_tfm(struct crypto_skcipher *tfm) 1253 { 1254 struct atmel_aes_ctx *ctx = crypto_skcipher_ctx(tfm); 1255 1256 crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); 1257 ctx->base.start = atmel_aes_ctr_start; 1258 1259 return 0; 1260 } 1261 1262 static struct skcipher_alg aes_algs[] = { 1263 { 1264 .base.cra_name = "ecb(aes)", 1265 .base.cra_driver_name = "atmel-ecb-aes", 1266 .base.cra_blocksize = AES_BLOCK_SIZE, 1267 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), 1268 1269 .init = atmel_aes_init_tfm, 1270 .min_keysize = AES_MIN_KEY_SIZE, 1271 .max_keysize = AES_MAX_KEY_SIZE, 1272 .setkey = atmel_aes_setkey, 1273 .encrypt = atmel_aes_ecb_encrypt, 1274 .decrypt = atmel_aes_ecb_decrypt, 1275 }, 1276 { 1277 .base.cra_name = "cbc(aes)", 1278 .base.cra_driver_name = "atmel-cbc-aes", 1279 .base.cra_blocksize = AES_BLOCK_SIZE, 1280 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), 1281 1282 .init = atmel_aes_init_tfm, 1283 .min_keysize = AES_MIN_KEY_SIZE, 1284 .max_keysize = AES_MAX_KEY_SIZE, 1285 .setkey = atmel_aes_setkey, 1286 .encrypt = atmel_aes_cbc_encrypt, 1287 .decrypt = atmel_aes_cbc_decrypt, 1288 .ivsize = AES_BLOCK_SIZE, 1289 }, 1290 { 1291 .base.cra_name = "ofb(aes)", 1292 .base.cra_driver_name = "atmel-ofb-aes", 1293 .base.cra_blocksize = AES_BLOCK_SIZE, 1294 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), 1295 1296 .init = atmel_aes_init_tfm, 1297 .min_keysize = AES_MIN_KEY_SIZE, 1298 .max_keysize = AES_MAX_KEY_SIZE, 1299 .setkey = atmel_aes_setkey, 1300 .encrypt = atmel_aes_ofb_encrypt, 1301 .decrypt = atmel_aes_ofb_decrypt, 1302 .ivsize = AES_BLOCK_SIZE, 1303 }, 1304 { 1305 .base.cra_name = "cfb(aes)", 1306 .base.cra_driver_name = "atmel-cfb-aes", 1307 .base.cra_blocksize = AES_BLOCK_SIZE, 1308 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), 1309 1310 .init = atmel_aes_init_tfm, 1311 .min_keysize = AES_MIN_KEY_SIZE, 1312 .max_keysize = AES_MAX_KEY_SIZE, 1313 .setkey = atmel_aes_setkey, 1314 .encrypt = atmel_aes_cfb_encrypt, 1315 .decrypt = atmel_aes_cfb_decrypt, 1316 .ivsize = AES_BLOCK_SIZE, 1317 }, 1318 { 1319 .base.cra_name = "cfb32(aes)", 1320 .base.cra_driver_name = "atmel-cfb32-aes", 1321 .base.cra_blocksize = CFB32_BLOCK_SIZE, 1322 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), 1323 1324 .init = atmel_aes_init_tfm, 1325 .min_keysize = AES_MIN_KEY_SIZE, 1326 .max_keysize = AES_MAX_KEY_SIZE, 1327 .setkey = atmel_aes_setkey, 1328 .encrypt = atmel_aes_cfb32_encrypt, 1329 .decrypt = atmel_aes_cfb32_decrypt, 1330 .ivsize = AES_BLOCK_SIZE, 1331 }, 1332 { 1333 .base.cra_name = "cfb16(aes)", 1334 .base.cra_driver_name = "atmel-cfb16-aes", 1335 .base.cra_blocksize = CFB16_BLOCK_SIZE, 1336 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), 1337 1338 .init = atmel_aes_init_tfm, 1339 .min_keysize = AES_MIN_KEY_SIZE, 1340 .max_keysize = AES_MAX_KEY_SIZE, 1341 .setkey = atmel_aes_setkey, 1342 .encrypt = atmel_aes_cfb16_encrypt, 1343 .decrypt = atmel_aes_cfb16_decrypt, 1344 .ivsize = AES_BLOCK_SIZE, 1345 }, 1346 { 1347 .base.cra_name = "cfb8(aes)", 1348 .base.cra_driver_name = "atmel-cfb8-aes", 1349 .base.cra_blocksize = CFB8_BLOCK_SIZE, 1350 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), 1351 1352 .init = atmel_aes_init_tfm, 1353 .min_keysize = AES_MIN_KEY_SIZE, 1354 .max_keysize = AES_MAX_KEY_SIZE, 1355 .setkey = atmel_aes_setkey, 1356 .encrypt = atmel_aes_cfb8_encrypt, 1357 .decrypt = atmel_aes_cfb8_decrypt, 1358 .ivsize = AES_BLOCK_SIZE, 1359 }, 1360 { 1361 .base.cra_name = "ctr(aes)", 1362 .base.cra_driver_name = "atmel-ctr-aes", 1363 .base.cra_blocksize = 1, 1364 .base.cra_ctxsize = sizeof(struct atmel_aes_ctr_ctx), 1365 1366 .init = atmel_aes_ctr_init_tfm, 1367 .min_keysize = AES_MIN_KEY_SIZE, 1368 .max_keysize = AES_MAX_KEY_SIZE, 1369 .setkey = atmel_aes_setkey, 1370 .encrypt = atmel_aes_ctr_encrypt, 1371 .decrypt = atmel_aes_ctr_decrypt, 1372 .ivsize = AES_BLOCK_SIZE, 1373 }, 1374 }; 1375 1376 static struct skcipher_alg aes_cfb64_alg = { 1377 .base.cra_name = "cfb64(aes)", 1378 .base.cra_driver_name = "atmel-cfb64-aes", 1379 .base.cra_blocksize = CFB64_BLOCK_SIZE, 1380 .base.cra_ctxsize = sizeof(struct atmel_aes_ctx), 1381 1382 .init = atmel_aes_init_tfm, 1383 .min_keysize = AES_MIN_KEY_SIZE, 1384 .max_keysize = AES_MAX_KEY_SIZE, 1385 .setkey = atmel_aes_setkey, 1386 .encrypt = atmel_aes_cfb64_encrypt, 1387 .decrypt = atmel_aes_cfb64_decrypt, 1388 .ivsize = AES_BLOCK_SIZE, 1389 }; 1390 1391 1392 /* gcm aead functions */ 1393 1394 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd, 1395 const u32 *data, size_t datalen, 1396 const __be32 *ghash_in, __be32 *ghash_out, 1397 atmel_aes_fn_t resume); 1398 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd); 1399 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd); 1400 1401 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd); 1402 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd); 1403 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd); 1404 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd); 1405 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd); 1406 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd); 1407 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd); 1408 1409 static inline struct atmel_aes_gcm_ctx * 1410 atmel_aes_gcm_ctx_cast(struct atmel_aes_base_ctx *ctx) 1411 { 1412 return container_of(ctx, struct atmel_aes_gcm_ctx, base); 1413 } 1414 1415 static int atmel_aes_gcm_ghash(struct atmel_aes_dev *dd, 1416 const u32 *data, size_t datalen, 1417 const __be32 *ghash_in, __be32 *ghash_out, 1418 atmel_aes_fn_t resume) 1419 { 1420 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1421 1422 dd->data = (u32 *)data; 1423 dd->datalen = datalen; 1424 ctx->ghash_in = ghash_in; 1425 ctx->ghash_out = ghash_out; 1426 ctx->ghash_resume = resume; 1427 1428 atmel_aes_write_ctrl(dd, false, NULL); 1429 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_ghash_init); 1430 } 1431 1432 static int atmel_aes_gcm_ghash_init(struct atmel_aes_dev *dd) 1433 { 1434 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1435 1436 /* Set the data length. */ 1437 atmel_aes_write(dd, AES_AADLENR, dd->total); 1438 atmel_aes_write(dd, AES_CLENR, 0); 1439 1440 /* If needed, overwrite the GCM Intermediate Hash Word Registers */ 1441 if (ctx->ghash_in) 1442 atmel_aes_write_block(dd, AES_GHASHR(0), ctx->ghash_in); 1443 1444 return atmel_aes_gcm_ghash_finalize(dd); 1445 } 1446 1447 static int atmel_aes_gcm_ghash_finalize(struct atmel_aes_dev *dd) 1448 { 1449 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1450 u32 isr; 1451 1452 /* Write data into the Input Data Registers. */ 1453 while (dd->datalen > 0) { 1454 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); 1455 dd->data += 4; 1456 dd->datalen -= AES_BLOCK_SIZE; 1457 1458 isr = atmel_aes_read(dd, AES_ISR); 1459 if (!(isr & AES_INT_DATARDY)) { 1460 dd->resume = atmel_aes_gcm_ghash_finalize; 1461 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); 1462 return -EINPROGRESS; 1463 } 1464 } 1465 1466 /* Read the computed hash from GHASHRx. */ 1467 atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash_out); 1468 1469 return ctx->ghash_resume(dd); 1470 } 1471 1472 1473 static int atmel_aes_gcm_start(struct atmel_aes_dev *dd) 1474 { 1475 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1476 struct aead_request *req = aead_request_cast(dd->areq); 1477 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1478 struct atmel_aes_reqctx *rctx = aead_request_ctx(req); 1479 size_t ivsize = crypto_aead_ivsize(tfm); 1480 size_t datalen, padlen; 1481 const void *iv = req->iv; 1482 u8 *data = dd->buf; 1483 int err; 1484 1485 atmel_aes_set_mode(dd, rctx); 1486 1487 err = atmel_aes_hw_init(dd); 1488 if (err) 1489 return atmel_aes_complete(dd, err); 1490 1491 if (likely(ivsize == GCM_AES_IV_SIZE)) { 1492 memcpy(ctx->j0, iv, ivsize); 1493 ctx->j0[3] = cpu_to_be32(1); 1494 return atmel_aes_gcm_process(dd); 1495 } 1496 1497 padlen = atmel_aes_padlen(ivsize, AES_BLOCK_SIZE); 1498 datalen = ivsize + padlen + AES_BLOCK_SIZE; 1499 if (datalen > dd->buflen) 1500 return atmel_aes_complete(dd, -EINVAL); 1501 1502 memcpy(data, iv, ivsize); 1503 memset(data + ivsize, 0, padlen + sizeof(u64)); 1504 ((__be64 *)(data + datalen))[-1] = cpu_to_be64(ivsize * 8); 1505 1506 return atmel_aes_gcm_ghash(dd, (const u32 *)data, datalen, 1507 NULL, ctx->j0, atmel_aes_gcm_process); 1508 } 1509 1510 static int atmel_aes_gcm_process(struct atmel_aes_dev *dd) 1511 { 1512 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1513 struct aead_request *req = aead_request_cast(dd->areq); 1514 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1515 bool enc = atmel_aes_is_encrypt(dd); 1516 u32 authsize; 1517 1518 /* Compute text length. */ 1519 authsize = crypto_aead_authsize(tfm); 1520 ctx->textlen = req->cryptlen - (enc ? 0 : authsize); 1521 1522 /* 1523 * According to tcrypt test suite, the GCM Automatic Tag Generation 1524 * fails when both the message and its associated data are empty. 1525 */ 1526 if (likely(req->assoclen != 0 || ctx->textlen != 0)) 1527 dd->flags |= AES_FLAGS_GTAGEN; 1528 1529 atmel_aes_write_ctrl(dd, false, NULL); 1530 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_length); 1531 } 1532 1533 static int atmel_aes_gcm_length(struct atmel_aes_dev *dd) 1534 { 1535 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1536 struct aead_request *req = aead_request_cast(dd->areq); 1537 __be32 j0_lsw, *j0 = ctx->j0; 1538 size_t padlen; 1539 1540 /* Write incr32(J0) into IV. */ 1541 j0_lsw = j0[3]; 1542 j0[3] = cpu_to_be32(be32_to_cpu(j0[3]) + 1); 1543 atmel_aes_write_block(dd, AES_IVR(0), j0); 1544 j0[3] = j0_lsw; 1545 1546 /* Set aad and text lengths. */ 1547 atmel_aes_write(dd, AES_AADLENR, req->assoclen); 1548 atmel_aes_write(dd, AES_CLENR, ctx->textlen); 1549 1550 /* Check whether AAD are present. */ 1551 if (unlikely(req->assoclen == 0)) { 1552 dd->datalen = 0; 1553 return atmel_aes_gcm_data(dd); 1554 } 1555 1556 /* Copy assoc data and add padding. */ 1557 padlen = atmel_aes_padlen(req->assoclen, AES_BLOCK_SIZE); 1558 if (unlikely(req->assoclen + padlen > dd->buflen)) 1559 return atmel_aes_complete(dd, -EINVAL); 1560 sg_copy_to_buffer(req->src, sg_nents(req->src), dd->buf, req->assoclen); 1561 1562 /* Write assoc data into the Input Data register. */ 1563 dd->data = (u32 *)dd->buf; 1564 dd->datalen = req->assoclen + padlen; 1565 return atmel_aes_gcm_data(dd); 1566 } 1567 1568 static int atmel_aes_gcm_data(struct atmel_aes_dev *dd) 1569 { 1570 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1571 struct aead_request *req = aead_request_cast(dd->areq); 1572 bool use_dma = (ctx->textlen >= ATMEL_AES_DMA_THRESHOLD); 1573 struct scatterlist *src, *dst; 1574 u32 isr, mr; 1575 1576 /* Write AAD first. */ 1577 while (dd->datalen > 0) { 1578 atmel_aes_write_block(dd, AES_IDATAR(0), dd->data); 1579 dd->data += 4; 1580 dd->datalen -= AES_BLOCK_SIZE; 1581 1582 isr = atmel_aes_read(dd, AES_ISR); 1583 if (!(isr & AES_INT_DATARDY)) { 1584 dd->resume = atmel_aes_gcm_data; 1585 atmel_aes_write(dd, AES_IER, AES_INT_DATARDY); 1586 return -EINPROGRESS; 1587 } 1588 } 1589 1590 /* GMAC only. */ 1591 if (unlikely(ctx->textlen == 0)) 1592 return atmel_aes_gcm_tag_init(dd); 1593 1594 /* Prepare src and dst scatter lists to transfer cipher/plain texts */ 1595 src = scatterwalk_ffwd(ctx->src, req->src, req->assoclen); 1596 dst = ((req->src == req->dst) ? src : 1597 scatterwalk_ffwd(ctx->dst, req->dst, req->assoclen)); 1598 1599 if (use_dma) { 1600 /* Update the Mode Register for DMA transfers. */ 1601 mr = atmel_aes_read(dd, AES_MR); 1602 mr &= ~(AES_MR_SMOD_MASK | AES_MR_DUALBUFF); 1603 mr |= AES_MR_SMOD_IDATAR0; 1604 if (dd->caps.has_dualbuff) 1605 mr |= AES_MR_DUALBUFF; 1606 atmel_aes_write(dd, AES_MR, mr); 1607 1608 return atmel_aes_dma_start(dd, src, dst, ctx->textlen, 1609 atmel_aes_gcm_tag_init); 1610 } 1611 1612 return atmel_aes_cpu_start(dd, src, dst, ctx->textlen, 1613 atmel_aes_gcm_tag_init); 1614 } 1615 1616 static int atmel_aes_gcm_tag_init(struct atmel_aes_dev *dd) 1617 { 1618 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1619 struct aead_request *req = aead_request_cast(dd->areq); 1620 __be64 *data = dd->buf; 1621 1622 if (likely(dd->flags & AES_FLAGS_GTAGEN)) { 1623 if (!(atmel_aes_read(dd, AES_ISR) & AES_INT_TAGRDY)) { 1624 dd->resume = atmel_aes_gcm_tag_init; 1625 atmel_aes_write(dd, AES_IER, AES_INT_TAGRDY); 1626 return -EINPROGRESS; 1627 } 1628 1629 return atmel_aes_gcm_finalize(dd); 1630 } 1631 1632 /* Read the GCM Intermediate Hash Word Registers. */ 1633 atmel_aes_read_block(dd, AES_GHASHR(0), ctx->ghash); 1634 1635 data[0] = cpu_to_be64(req->assoclen * 8); 1636 data[1] = cpu_to_be64(ctx->textlen * 8); 1637 1638 return atmel_aes_gcm_ghash(dd, (const u32 *)data, AES_BLOCK_SIZE, 1639 ctx->ghash, ctx->ghash, atmel_aes_gcm_tag); 1640 } 1641 1642 static int atmel_aes_gcm_tag(struct atmel_aes_dev *dd) 1643 { 1644 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1645 unsigned long flags; 1646 1647 /* 1648 * Change mode to CTR to complete the tag generation. 1649 * Use J0 as Initialization Vector. 1650 */ 1651 flags = dd->flags; 1652 dd->flags &= ~(AES_FLAGS_OPMODE_MASK | AES_FLAGS_GTAGEN); 1653 dd->flags |= AES_FLAGS_CTR; 1654 atmel_aes_write_ctrl(dd, false, ctx->j0); 1655 dd->flags = flags; 1656 1657 atmel_aes_write_block(dd, AES_IDATAR(0), ctx->ghash); 1658 return atmel_aes_wait_for_data_ready(dd, atmel_aes_gcm_finalize); 1659 } 1660 1661 static int atmel_aes_gcm_finalize(struct atmel_aes_dev *dd) 1662 { 1663 struct atmel_aes_gcm_ctx *ctx = atmel_aes_gcm_ctx_cast(dd->ctx); 1664 struct aead_request *req = aead_request_cast(dd->areq); 1665 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1666 bool enc = atmel_aes_is_encrypt(dd); 1667 u32 offset, authsize, itag[4], *otag = ctx->tag; 1668 int err; 1669 1670 /* Read the computed tag. */ 1671 if (likely(dd->flags & AES_FLAGS_GTAGEN)) 1672 atmel_aes_read_block(dd, AES_TAGR(0), ctx->tag); 1673 else 1674 atmel_aes_read_block(dd, AES_ODATAR(0), ctx->tag); 1675 1676 offset = req->assoclen + ctx->textlen; 1677 authsize = crypto_aead_authsize(tfm); 1678 if (enc) { 1679 scatterwalk_map_and_copy(otag, req->dst, offset, authsize, 1); 1680 err = 0; 1681 } else { 1682 scatterwalk_map_and_copy(itag, req->src, offset, authsize, 0); 1683 err = crypto_memneq(itag, otag, authsize) ? -EBADMSG : 0; 1684 } 1685 1686 return atmel_aes_complete(dd, err); 1687 } 1688 1689 static int atmel_aes_gcm_crypt(struct aead_request *req, 1690 unsigned long mode) 1691 { 1692 struct atmel_aes_base_ctx *ctx; 1693 struct atmel_aes_reqctx *rctx; 1694 struct atmel_aes_dev *dd; 1695 1696 ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); 1697 ctx->block_size = AES_BLOCK_SIZE; 1698 ctx->is_aead = true; 1699 1700 dd = atmel_aes_find_dev(ctx); 1701 if (!dd) 1702 return -ENODEV; 1703 1704 rctx = aead_request_ctx(req); 1705 rctx->mode = AES_FLAGS_GCM | mode; 1706 1707 return atmel_aes_handle_queue(dd, &req->base); 1708 } 1709 1710 static int atmel_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key, 1711 unsigned int keylen) 1712 { 1713 struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm); 1714 1715 if (keylen != AES_KEYSIZE_256 && 1716 keylen != AES_KEYSIZE_192 && 1717 keylen != AES_KEYSIZE_128) 1718 return -EINVAL; 1719 1720 memcpy(ctx->key, key, keylen); 1721 ctx->keylen = keylen; 1722 1723 return 0; 1724 } 1725 1726 static int atmel_aes_gcm_setauthsize(struct crypto_aead *tfm, 1727 unsigned int authsize) 1728 { 1729 return crypto_gcm_check_authsize(authsize); 1730 } 1731 1732 static int atmel_aes_gcm_encrypt(struct aead_request *req) 1733 { 1734 return atmel_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT); 1735 } 1736 1737 static int atmel_aes_gcm_decrypt(struct aead_request *req) 1738 { 1739 return atmel_aes_gcm_crypt(req, 0); 1740 } 1741 1742 static int atmel_aes_gcm_init(struct crypto_aead *tfm) 1743 { 1744 struct atmel_aes_gcm_ctx *ctx = crypto_aead_ctx(tfm); 1745 1746 crypto_aead_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); 1747 ctx->base.start = atmel_aes_gcm_start; 1748 1749 return 0; 1750 } 1751 1752 static struct aead_alg aes_gcm_alg = { 1753 .setkey = atmel_aes_gcm_setkey, 1754 .setauthsize = atmel_aes_gcm_setauthsize, 1755 .encrypt = atmel_aes_gcm_encrypt, 1756 .decrypt = atmel_aes_gcm_decrypt, 1757 .init = atmel_aes_gcm_init, 1758 .ivsize = GCM_AES_IV_SIZE, 1759 .maxauthsize = AES_BLOCK_SIZE, 1760 1761 .base = { 1762 .cra_name = "gcm(aes)", 1763 .cra_driver_name = "atmel-gcm-aes", 1764 .cra_blocksize = 1, 1765 .cra_ctxsize = sizeof(struct atmel_aes_gcm_ctx), 1766 }, 1767 }; 1768 1769 1770 /* xts functions */ 1771 1772 static inline struct atmel_aes_xts_ctx * 1773 atmel_aes_xts_ctx_cast(struct atmel_aes_base_ctx *ctx) 1774 { 1775 return container_of(ctx, struct atmel_aes_xts_ctx, base); 1776 } 1777 1778 static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd); 1779 1780 static int atmel_aes_xts_start(struct atmel_aes_dev *dd) 1781 { 1782 struct atmel_aes_xts_ctx *ctx = atmel_aes_xts_ctx_cast(dd->ctx); 1783 struct skcipher_request *req = skcipher_request_cast(dd->areq); 1784 struct atmel_aes_reqctx *rctx = skcipher_request_ctx(req); 1785 unsigned long flags; 1786 int err; 1787 1788 atmel_aes_set_mode(dd, rctx); 1789 1790 err = atmel_aes_hw_init(dd); 1791 if (err) 1792 return atmel_aes_complete(dd, err); 1793 1794 /* Compute the tweak value from req->iv with ecb(aes). */ 1795 flags = dd->flags; 1796 dd->flags &= ~AES_FLAGS_MODE_MASK; 1797 dd->flags |= (AES_FLAGS_ECB | AES_FLAGS_ENCRYPT); 1798 atmel_aes_write_ctrl_key(dd, false, NULL, 1799 ctx->key2, ctx->base.keylen); 1800 dd->flags = flags; 1801 1802 atmel_aes_write_block(dd, AES_IDATAR(0), req->iv); 1803 return atmel_aes_wait_for_data_ready(dd, atmel_aes_xts_process_data); 1804 } 1805 1806 static int atmel_aes_xts_process_data(struct atmel_aes_dev *dd) 1807 { 1808 struct skcipher_request *req = skcipher_request_cast(dd->areq); 1809 bool use_dma = (req->cryptlen >= ATMEL_AES_DMA_THRESHOLD); 1810 u32 tweak[AES_BLOCK_SIZE / sizeof(u32)]; 1811 static const __le32 one[AES_BLOCK_SIZE / sizeof(u32)] = {cpu_to_le32(1), }; 1812 u8 *tweak_bytes = (u8 *)tweak; 1813 int i; 1814 1815 /* Read the computed ciphered tweak value. */ 1816 atmel_aes_read_block(dd, AES_ODATAR(0), tweak); 1817 /* 1818 * Hardware quirk: 1819 * the order of the ciphered tweak bytes need to be reversed before 1820 * writing them into the ODATARx registers. 1821 */ 1822 for (i = 0; i < AES_BLOCK_SIZE/2; ++i) { 1823 u8 tmp = tweak_bytes[AES_BLOCK_SIZE - 1 - i]; 1824 1825 tweak_bytes[AES_BLOCK_SIZE - 1 - i] = tweak_bytes[i]; 1826 tweak_bytes[i] = tmp; 1827 } 1828 1829 /* Process the data. */ 1830 atmel_aes_write_ctrl(dd, use_dma, NULL); 1831 atmel_aes_write_block(dd, AES_TWR(0), tweak); 1832 atmel_aes_write_block(dd, AES_ALPHAR(0), one); 1833 if (use_dma) 1834 return atmel_aes_dma_start(dd, req->src, req->dst, 1835 req->cryptlen, 1836 atmel_aes_transfer_complete); 1837 1838 return atmel_aes_cpu_start(dd, req->src, req->dst, req->cryptlen, 1839 atmel_aes_transfer_complete); 1840 } 1841 1842 static int atmel_aes_xts_setkey(struct crypto_skcipher *tfm, const u8 *key, 1843 unsigned int keylen) 1844 { 1845 struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 1846 int err; 1847 1848 err = xts_check_key(crypto_skcipher_tfm(tfm), key, keylen); 1849 if (err) 1850 return err; 1851 1852 memcpy(ctx->base.key, key, keylen/2); 1853 memcpy(ctx->key2, key + keylen/2, keylen/2); 1854 ctx->base.keylen = keylen/2; 1855 1856 return 0; 1857 } 1858 1859 static int atmel_aes_xts_encrypt(struct skcipher_request *req) 1860 { 1861 return atmel_aes_crypt(req, AES_FLAGS_XTS | AES_FLAGS_ENCRYPT); 1862 } 1863 1864 static int atmel_aes_xts_decrypt(struct skcipher_request *req) 1865 { 1866 return atmel_aes_crypt(req, AES_FLAGS_XTS); 1867 } 1868 1869 static int atmel_aes_xts_init_tfm(struct crypto_skcipher *tfm) 1870 { 1871 struct atmel_aes_xts_ctx *ctx = crypto_skcipher_ctx(tfm); 1872 1873 crypto_skcipher_set_reqsize(tfm, sizeof(struct atmel_aes_reqctx)); 1874 ctx->base.start = atmel_aes_xts_start; 1875 1876 return 0; 1877 } 1878 1879 static struct skcipher_alg aes_xts_alg = { 1880 .base.cra_name = "xts(aes)", 1881 .base.cra_driver_name = "atmel-xts-aes", 1882 .base.cra_blocksize = AES_BLOCK_SIZE, 1883 .base.cra_ctxsize = sizeof(struct atmel_aes_xts_ctx), 1884 1885 .min_keysize = 2 * AES_MIN_KEY_SIZE, 1886 .max_keysize = 2 * AES_MAX_KEY_SIZE, 1887 .ivsize = AES_BLOCK_SIZE, 1888 .setkey = atmel_aes_xts_setkey, 1889 .encrypt = atmel_aes_xts_encrypt, 1890 .decrypt = atmel_aes_xts_decrypt, 1891 .init = atmel_aes_xts_init_tfm, 1892 }; 1893 1894 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 1895 /* authenc aead functions */ 1896 1897 static int atmel_aes_authenc_start(struct atmel_aes_dev *dd); 1898 static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err, 1899 bool is_async); 1900 static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err, 1901 bool is_async); 1902 static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd); 1903 static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err, 1904 bool is_async); 1905 1906 static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err) 1907 { 1908 struct aead_request *req = aead_request_cast(dd->areq); 1909 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); 1910 1911 if (err && (dd->flags & AES_FLAGS_OWN_SHA)) 1912 atmel_sha_authenc_abort(&rctx->auth_req); 1913 dd->flags &= ~AES_FLAGS_OWN_SHA; 1914 } 1915 1916 static int atmel_aes_authenc_start(struct atmel_aes_dev *dd) 1917 { 1918 struct aead_request *req = aead_request_cast(dd->areq); 1919 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); 1920 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 1921 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); 1922 int err; 1923 1924 atmel_aes_set_mode(dd, &rctx->base); 1925 1926 err = atmel_aes_hw_init(dd); 1927 if (err) 1928 return atmel_aes_complete(dd, err); 1929 1930 return atmel_sha_authenc_schedule(&rctx->auth_req, ctx->auth, 1931 atmel_aes_authenc_init, dd); 1932 } 1933 1934 static int atmel_aes_authenc_init(struct atmel_aes_dev *dd, int err, 1935 bool is_async) 1936 { 1937 struct aead_request *req = aead_request_cast(dd->areq); 1938 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); 1939 1940 if (is_async) 1941 dd->is_async = true; 1942 if (err) 1943 return atmel_aes_complete(dd, err); 1944 1945 /* If here, we've got the ownership of the SHA device. */ 1946 dd->flags |= AES_FLAGS_OWN_SHA; 1947 1948 /* Configure the SHA device. */ 1949 return atmel_sha_authenc_init(&rctx->auth_req, 1950 req->src, req->assoclen, 1951 rctx->textlen, 1952 atmel_aes_authenc_transfer, dd); 1953 } 1954 1955 static int atmel_aes_authenc_transfer(struct atmel_aes_dev *dd, int err, 1956 bool is_async) 1957 { 1958 struct aead_request *req = aead_request_cast(dd->areq); 1959 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); 1960 bool enc = atmel_aes_is_encrypt(dd); 1961 struct scatterlist *src, *dst; 1962 __be32 iv[AES_BLOCK_SIZE / sizeof(u32)]; 1963 u32 emr; 1964 1965 if (is_async) 1966 dd->is_async = true; 1967 if (err) 1968 return atmel_aes_complete(dd, err); 1969 1970 /* Prepare src and dst scatter-lists to transfer cipher/plain texts. */ 1971 src = scatterwalk_ffwd(rctx->src, req->src, req->assoclen); 1972 dst = src; 1973 1974 if (req->src != req->dst) 1975 dst = scatterwalk_ffwd(rctx->dst, req->dst, req->assoclen); 1976 1977 /* Configure the AES device. */ 1978 memcpy(iv, req->iv, sizeof(iv)); 1979 1980 /* 1981 * Here we always set the 2nd parameter of atmel_aes_write_ctrl() to 1982 * 'true' even if the data transfer is actually performed by the CPU (so 1983 * not by the DMA) because we must force the AES_MR_SMOD bitfield to the 1984 * value AES_MR_SMOD_IDATAR0. Indeed, both AES_MR_SMOD and SHA_MR_SMOD 1985 * must be set to *_MR_SMOD_IDATAR0. 1986 */ 1987 atmel_aes_write_ctrl(dd, true, iv); 1988 emr = AES_EMR_PLIPEN; 1989 if (!enc) 1990 emr |= AES_EMR_PLIPD; 1991 atmel_aes_write(dd, AES_EMR, emr); 1992 1993 /* Transfer data. */ 1994 return atmel_aes_dma_start(dd, src, dst, rctx->textlen, 1995 atmel_aes_authenc_digest); 1996 } 1997 1998 static int atmel_aes_authenc_digest(struct atmel_aes_dev *dd) 1999 { 2000 struct aead_request *req = aead_request_cast(dd->areq); 2001 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); 2002 2003 /* atmel_sha_authenc_final() releases the SHA device. */ 2004 dd->flags &= ~AES_FLAGS_OWN_SHA; 2005 return atmel_sha_authenc_final(&rctx->auth_req, 2006 rctx->digest, sizeof(rctx->digest), 2007 atmel_aes_authenc_final, dd); 2008 } 2009 2010 static int atmel_aes_authenc_final(struct atmel_aes_dev *dd, int err, 2011 bool is_async) 2012 { 2013 struct aead_request *req = aead_request_cast(dd->areq); 2014 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); 2015 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2016 bool enc = atmel_aes_is_encrypt(dd); 2017 u32 idigest[SHA512_DIGEST_SIZE / sizeof(u32)], *odigest = rctx->digest; 2018 u32 offs, authsize; 2019 2020 if (is_async) 2021 dd->is_async = true; 2022 if (err) 2023 goto complete; 2024 2025 offs = req->assoclen + rctx->textlen; 2026 authsize = crypto_aead_authsize(tfm); 2027 if (enc) { 2028 scatterwalk_map_and_copy(odigest, req->dst, offs, authsize, 1); 2029 } else { 2030 scatterwalk_map_and_copy(idigest, req->src, offs, authsize, 0); 2031 if (crypto_memneq(idigest, odigest, authsize)) 2032 err = -EBADMSG; 2033 } 2034 2035 complete: 2036 return atmel_aes_complete(dd, err); 2037 } 2038 2039 static int atmel_aes_authenc_setkey(struct crypto_aead *tfm, const u8 *key, 2040 unsigned int keylen) 2041 { 2042 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); 2043 struct crypto_authenc_keys keys; 2044 int err; 2045 2046 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) 2047 goto badkey; 2048 2049 if (keys.enckeylen > sizeof(ctx->base.key)) 2050 goto badkey; 2051 2052 /* Save auth key. */ 2053 err = atmel_sha_authenc_setkey(ctx->auth, 2054 keys.authkey, keys.authkeylen, 2055 crypto_aead_get_flags(tfm)); 2056 if (err) { 2057 memzero_explicit(&keys, sizeof(keys)); 2058 return err; 2059 } 2060 2061 /* Save enc key. */ 2062 ctx->base.keylen = keys.enckeylen; 2063 memcpy(ctx->base.key, keys.enckey, keys.enckeylen); 2064 2065 memzero_explicit(&keys, sizeof(keys)); 2066 return 0; 2067 2068 badkey: 2069 memzero_explicit(&keys, sizeof(keys)); 2070 return -EINVAL; 2071 } 2072 2073 static int atmel_aes_authenc_init_tfm(struct crypto_aead *tfm, 2074 unsigned long auth_mode) 2075 { 2076 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); 2077 unsigned int auth_reqsize = atmel_sha_authenc_get_reqsize(); 2078 2079 ctx->auth = atmel_sha_authenc_spawn(auth_mode); 2080 if (IS_ERR(ctx->auth)) 2081 return PTR_ERR(ctx->auth); 2082 2083 crypto_aead_set_reqsize(tfm, (sizeof(struct atmel_aes_authenc_reqctx) + 2084 auth_reqsize)); 2085 ctx->base.start = atmel_aes_authenc_start; 2086 2087 return 0; 2088 } 2089 2090 static int atmel_aes_authenc_hmac_sha1_init_tfm(struct crypto_aead *tfm) 2091 { 2092 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA1); 2093 } 2094 2095 static int atmel_aes_authenc_hmac_sha224_init_tfm(struct crypto_aead *tfm) 2096 { 2097 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA224); 2098 } 2099 2100 static int atmel_aes_authenc_hmac_sha256_init_tfm(struct crypto_aead *tfm) 2101 { 2102 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA256); 2103 } 2104 2105 static int atmel_aes_authenc_hmac_sha384_init_tfm(struct crypto_aead *tfm) 2106 { 2107 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA384); 2108 } 2109 2110 static int atmel_aes_authenc_hmac_sha512_init_tfm(struct crypto_aead *tfm) 2111 { 2112 return atmel_aes_authenc_init_tfm(tfm, SHA_FLAGS_HMAC_SHA512); 2113 } 2114 2115 static void atmel_aes_authenc_exit_tfm(struct crypto_aead *tfm) 2116 { 2117 struct atmel_aes_authenc_ctx *ctx = crypto_aead_ctx(tfm); 2118 2119 atmel_sha_authenc_free(ctx->auth); 2120 } 2121 2122 static int atmel_aes_authenc_crypt(struct aead_request *req, 2123 unsigned long mode) 2124 { 2125 struct atmel_aes_authenc_reqctx *rctx = aead_request_ctx(req); 2126 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 2127 struct atmel_aes_base_ctx *ctx = crypto_aead_ctx(tfm); 2128 u32 authsize = crypto_aead_authsize(tfm); 2129 bool enc = (mode & AES_FLAGS_ENCRYPT); 2130 struct atmel_aes_dev *dd; 2131 2132 /* Compute text length. */ 2133 if (!enc && req->cryptlen < authsize) 2134 return -EINVAL; 2135 rctx->textlen = req->cryptlen - (enc ? 0 : authsize); 2136 2137 /* 2138 * Currently, empty messages are not supported yet: 2139 * the SHA auto-padding can be used only on non-empty messages. 2140 * Hence a special case needs to be implemented for empty message. 2141 */ 2142 if (!rctx->textlen && !req->assoclen) 2143 return -EINVAL; 2144 2145 rctx->base.mode = mode; 2146 ctx->block_size = AES_BLOCK_SIZE; 2147 ctx->is_aead = true; 2148 2149 dd = atmel_aes_find_dev(ctx); 2150 if (!dd) 2151 return -ENODEV; 2152 2153 return atmel_aes_handle_queue(dd, &req->base); 2154 } 2155 2156 static int atmel_aes_authenc_cbc_aes_encrypt(struct aead_request *req) 2157 { 2158 return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC | AES_FLAGS_ENCRYPT); 2159 } 2160 2161 static int atmel_aes_authenc_cbc_aes_decrypt(struct aead_request *req) 2162 { 2163 return atmel_aes_authenc_crypt(req, AES_FLAGS_CBC); 2164 } 2165 2166 static struct aead_alg aes_authenc_algs[] = { 2167 { 2168 .setkey = atmel_aes_authenc_setkey, 2169 .encrypt = atmel_aes_authenc_cbc_aes_encrypt, 2170 .decrypt = atmel_aes_authenc_cbc_aes_decrypt, 2171 .init = atmel_aes_authenc_hmac_sha1_init_tfm, 2172 .exit = atmel_aes_authenc_exit_tfm, 2173 .ivsize = AES_BLOCK_SIZE, 2174 .maxauthsize = SHA1_DIGEST_SIZE, 2175 2176 .base = { 2177 .cra_name = "authenc(hmac(sha1),cbc(aes))", 2178 .cra_driver_name = "atmel-authenc-hmac-sha1-cbc-aes", 2179 .cra_blocksize = AES_BLOCK_SIZE, 2180 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), 2181 }, 2182 }, 2183 { 2184 .setkey = atmel_aes_authenc_setkey, 2185 .encrypt = atmel_aes_authenc_cbc_aes_encrypt, 2186 .decrypt = atmel_aes_authenc_cbc_aes_decrypt, 2187 .init = atmel_aes_authenc_hmac_sha224_init_tfm, 2188 .exit = atmel_aes_authenc_exit_tfm, 2189 .ivsize = AES_BLOCK_SIZE, 2190 .maxauthsize = SHA224_DIGEST_SIZE, 2191 2192 .base = { 2193 .cra_name = "authenc(hmac(sha224),cbc(aes))", 2194 .cra_driver_name = "atmel-authenc-hmac-sha224-cbc-aes", 2195 .cra_blocksize = AES_BLOCK_SIZE, 2196 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), 2197 }, 2198 }, 2199 { 2200 .setkey = atmel_aes_authenc_setkey, 2201 .encrypt = atmel_aes_authenc_cbc_aes_encrypt, 2202 .decrypt = atmel_aes_authenc_cbc_aes_decrypt, 2203 .init = atmel_aes_authenc_hmac_sha256_init_tfm, 2204 .exit = atmel_aes_authenc_exit_tfm, 2205 .ivsize = AES_BLOCK_SIZE, 2206 .maxauthsize = SHA256_DIGEST_SIZE, 2207 2208 .base = { 2209 .cra_name = "authenc(hmac(sha256),cbc(aes))", 2210 .cra_driver_name = "atmel-authenc-hmac-sha256-cbc-aes", 2211 .cra_blocksize = AES_BLOCK_SIZE, 2212 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), 2213 }, 2214 }, 2215 { 2216 .setkey = atmel_aes_authenc_setkey, 2217 .encrypt = atmel_aes_authenc_cbc_aes_encrypt, 2218 .decrypt = atmel_aes_authenc_cbc_aes_decrypt, 2219 .init = atmel_aes_authenc_hmac_sha384_init_tfm, 2220 .exit = atmel_aes_authenc_exit_tfm, 2221 .ivsize = AES_BLOCK_SIZE, 2222 .maxauthsize = SHA384_DIGEST_SIZE, 2223 2224 .base = { 2225 .cra_name = "authenc(hmac(sha384),cbc(aes))", 2226 .cra_driver_name = "atmel-authenc-hmac-sha384-cbc-aes", 2227 .cra_blocksize = AES_BLOCK_SIZE, 2228 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), 2229 }, 2230 }, 2231 { 2232 .setkey = atmel_aes_authenc_setkey, 2233 .encrypt = atmel_aes_authenc_cbc_aes_encrypt, 2234 .decrypt = atmel_aes_authenc_cbc_aes_decrypt, 2235 .init = atmel_aes_authenc_hmac_sha512_init_tfm, 2236 .exit = atmel_aes_authenc_exit_tfm, 2237 .ivsize = AES_BLOCK_SIZE, 2238 .maxauthsize = SHA512_DIGEST_SIZE, 2239 2240 .base = { 2241 .cra_name = "authenc(hmac(sha512),cbc(aes))", 2242 .cra_driver_name = "atmel-authenc-hmac-sha512-cbc-aes", 2243 .cra_blocksize = AES_BLOCK_SIZE, 2244 .cra_ctxsize = sizeof(struct atmel_aes_authenc_ctx), 2245 }, 2246 }, 2247 }; 2248 #endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */ 2249 2250 /* Probe functions */ 2251 2252 static int atmel_aes_buff_init(struct atmel_aes_dev *dd) 2253 { 2254 dd->buf = (void *)__get_free_pages(GFP_KERNEL, ATMEL_AES_BUFFER_ORDER); 2255 dd->buflen = ATMEL_AES_BUFFER_SIZE; 2256 dd->buflen &= ~(AES_BLOCK_SIZE - 1); 2257 2258 if (!dd->buf) { 2259 dev_err(dd->dev, "unable to alloc pages.\n"); 2260 return -ENOMEM; 2261 } 2262 2263 return 0; 2264 } 2265 2266 static void atmel_aes_buff_cleanup(struct atmel_aes_dev *dd) 2267 { 2268 free_page((unsigned long)dd->buf); 2269 } 2270 2271 static int atmel_aes_dma_init(struct atmel_aes_dev *dd) 2272 { 2273 int ret; 2274 2275 /* Try to grab 2 DMA channels */ 2276 dd->src.chan = dma_request_chan(dd->dev, "tx"); 2277 if (IS_ERR(dd->src.chan)) { 2278 ret = PTR_ERR(dd->src.chan); 2279 goto err_dma_in; 2280 } 2281 2282 dd->dst.chan = dma_request_chan(dd->dev, "rx"); 2283 if (IS_ERR(dd->dst.chan)) { 2284 ret = PTR_ERR(dd->dst.chan); 2285 goto err_dma_out; 2286 } 2287 2288 return 0; 2289 2290 err_dma_out: 2291 dma_release_channel(dd->src.chan); 2292 err_dma_in: 2293 dev_err(dd->dev, "no DMA channel available\n"); 2294 return ret; 2295 } 2296 2297 static void atmel_aes_dma_cleanup(struct atmel_aes_dev *dd) 2298 { 2299 dma_release_channel(dd->dst.chan); 2300 dma_release_channel(dd->src.chan); 2301 } 2302 2303 static void atmel_aes_queue_task(unsigned long data) 2304 { 2305 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; 2306 2307 atmel_aes_handle_queue(dd, NULL); 2308 } 2309 2310 static void atmel_aes_done_task(unsigned long data) 2311 { 2312 struct atmel_aes_dev *dd = (struct atmel_aes_dev *)data; 2313 2314 dd->is_async = true; 2315 (void)dd->resume(dd); 2316 } 2317 2318 static irqreturn_t atmel_aes_irq(int irq, void *dev_id) 2319 { 2320 struct atmel_aes_dev *aes_dd = dev_id; 2321 u32 reg; 2322 2323 reg = atmel_aes_read(aes_dd, AES_ISR); 2324 if (reg & atmel_aes_read(aes_dd, AES_IMR)) { 2325 atmel_aes_write(aes_dd, AES_IDR, reg); 2326 if (AES_FLAGS_BUSY & aes_dd->flags) 2327 tasklet_schedule(&aes_dd->done_task); 2328 else 2329 dev_warn(aes_dd->dev, "AES interrupt when no active requests.\n"); 2330 return IRQ_HANDLED; 2331 } 2332 2333 return IRQ_NONE; 2334 } 2335 2336 static void atmel_aes_unregister_algs(struct atmel_aes_dev *dd) 2337 { 2338 int i; 2339 2340 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 2341 if (dd->caps.has_authenc) 2342 for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) 2343 crypto_unregister_aead(&aes_authenc_algs[i]); 2344 #endif 2345 2346 if (dd->caps.has_xts) 2347 crypto_unregister_skcipher(&aes_xts_alg); 2348 2349 if (dd->caps.has_gcm) 2350 crypto_unregister_aead(&aes_gcm_alg); 2351 2352 if (dd->caps.has_cfb64) 2353 crypto_unregister_skcipher(&aes_cfb64_alg); 2354 2355 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) 2356 crypto_unregister_skcipher(&aes_algs[i]); 2357 } 2358 2359 static void atmel_aes_crypto_alg_init(struct crypto_alg *alg) 2360 { 2361 alg->cra_flags = CRYPTO_ALG_ASYNC; 2362 alg->cra_alignmask = 0xf; 2363 alg->cra_priority = ATMEL_AES_PRIORITY; 2364 alg->cra_module = THIS_MODULE; 2365 } 2366 2367 static int atmel_aes_register_algs(struct atmel_aes_dev *dd) 2368 { 2369 int err, i, j; 2370 2371 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { 2372 atmel_aes_crypto_alg_init(&aes_algs[i].base); 2373 2374 err = crypto_register_skcipher(&aes_algs[i]); 2375 if (err) 2376 goto err_aes_algs; 2377 } 2378 2379 if (dd->caps.has_cfb64) { 2380 atmel_aes_crypto_alg_init(&aes_cfb64_alg.base); 2381 2382 err = crypto_register_skcipher(&aes_cfb64_alg); 2383 if (err) 2384 goto err_aes_cfb64_alg; 2385 } 2386 2387 if (dd->caps.has_gcm) { 2388 atmel_aes_crypto_alg_init(&aes_gcm_alg.base); 2389 2390 err = crypto_register_aead(&aes_gcm_alg); 2391 if (err) 2392 goto err_aes_gcm_alg; 2393 } 2394 2395 if (dd->caps.has_xts) { 2396 atmel_aes_crypto_alg_init(&aes_xts_alg.base); 2397 2398 err = crypto_register_skcipher(&aes_xts_alg); 2399 if (err) 2400 goto err_aes_xts_alg; 2401 } 2402 2403 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 2404 if (dd->caps.has_authenc) { 2405 for (i = 0; i < ARRAY_SIZE(aes_authenc_algs); i++) { 2406 atmel_aes_crypto_alg_init(&aes_authenc_algs[i].base); 2407 2408 err = crypto_register_aead(&aes_authenc_algs[i]); 2409 if (err) 2410 goto err_aes_authenc_alg; 2411 } 2412 } 2413 #endif 2414 2415 return 0; 2416 2417 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 2418 /* i = ARRAY_SIZE(aes_authenc_algs); */ 2419 err_aes_authenc_alg: 2420 for (j = 0; j < i; j++) 2421 crypto_unregister_aead(&aes_authenc_algs[j]); 2422 crypto_unregister_skcipher(&aes_xts_alg); 2423 #endif 2424 err_aes_xts_alg: 2425 crypto_unregister_aead(&aes_gcm_alg); 2426 err_aes_gcm_alg: 2427 crypto_unregister_skcipher(&aes_cfb64_alg); 2428 err_aes_cfb64_alg: 2429 i = ARRAY_SIZE(aes_algs); 2430 err_aes_algs: 2431 for (j = 0; j < i; j++) 2432 crypto_unregister_skcipher(&aes_algs[j]); 2433 2434 return err; 2435 } 2436 2437 static void atmel_aes_get_cap(struct atmel_aes_dev *dd) 2438 { 2439 dd->caps.has_dualbuff = 0; 2440 dd->caps.has_cfb64 = 0; 2441 dd->caps.has_gcm = 0; 2442 dd->caps.has_xts = 0; 2443 dd->caps.has_authenc = 0; 2444 dd->caps.max_burst_size = 1; 2445 2446 /* keep only major version number */ 2447 switch (dd->hw_version & 0xff0) { 2448 case 0x500: 2449 dd->caps.has_dualbuff = 1; 2450 dd->caps.has_cfb64 = 1; 2451 dd->caps.has_gcm = 1; 2452 dd->caps.has_xts = 1; 2453 dd->caps.has_authenc = 1; 2454 dd->caps.max_burst_size = 4; 2455 break; 2456 case 0x200: 2457 dd->caps.has_dualbuff = 1; 2458 dd->caps.has_cfb64 = 1; 2459 dd->caps.has_gcm = 1; 2460 dd->caps.max_burst_size = 4; 2461 break; 2462 case 0x130: 2463 dd->caps.has_dualbuff = 1; 2464 dd->caps.has_cfb64 = 1; 2465 dd->caps.max_burst_size = 4; 2466 break; 2467 case 0x120: 2468 break; 2469 default: 2470 dev_warn(dd->dev, 2471 "Unmanaged aes version, set minimum capabilities\n"); 2472 break; 2473 } 2474 } 2475 2476 #if defined(CONFIG_OF) 2477 static const struct of_device_id atmel_aes_dt_ids[] = { 2478 { .compatible = "atmel,at91sam9g46-aes" }, 2479 { /* sentinel */ } 2480 }; 2481 MODULE_DEVICE_TABLE(of, atmel_aes_dt_ids); 2482 2483 static struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) 2484 { 2485 struct device_node *np = pdev->dev.of_node; 2486 struct crypto_platform_data *pdata; 2487 2488 if (!np) { 2489 dev_err(&pdev->dev, "device node not found\n"); 2490 return ERR_PTR(-EINVAL); 2491 } 2492 2493 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 2494 if (!pdata) 2495 return ERR_PTR(-ENOMEM); 2496 2497 return pdata; 2498 } 2499 #else 2500 static inline struct crypto_platform_data *atmel_aes_of_init(struct platform_device *pdev) 2501 { 2502 return ERR_PTR(-EINVAL); 2503 } 2504 #endif 2505 2506 static int atmel_aes_probe(struct platform_device *pdev) 2507 { 2508 struct atmel_aes_dev *aes_dd; 2509 struct crypto_platform_data *pdata; 2510 struct device *dev = &pdev->dev; 2511 struct resource *aes_res; 2512 int err; 2513 2514 pdata = pdev->dev.platform_data; 2515 if (!pdata) { 2516 pdata = atmel_aes_of_init(pdev); 2517 if (IS_ERR(pdata)) 2518 return PTR_ERR(pdata); 2519 } 2520 2521 aes_dd = devm_kzalloc(&pdev->dev, sizeof(*aes_dd), GFP_KERNEL); 2522 if (!aes_dd) 2523 return -ENOMEM; 2524 2525 aes_dd->dev = dev; 2526 2527 platform_set_drvdata(pdev, aes_dd); 2528 2529 INIT_LIST_HEAD(&aes_dd->list); 2530 spin_lock_init(&aes_dd->lock); 2531 2532 tasklet_init(&aes_dd->done_task, atmel_aes_done_task, 2533 (unsigned long)aes_dd); 2534 tasklet_init(&aes_dd->queue_task, atmel_aes_queue_task, 2535 (unsigned long)aes_dd); 2536 2537 crypto_init_queue(&aes_dd->queue, ATMEL_AES_QUEUE_LENGTH); 2538 2539 /* Get the base address */ 2540 aes_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2541 if (!aes_res) { 2542 dev_err(dev, "no MEM resource info\n"); 2543 err = -ENODEV; 2544 goto err_tasklet_kill; 2545 } 2546 aes_dd->phys_base = aes_res->start; 2547 2548 /* Get the IRQ */ 2549 aes_dd->irq = platform_get_irq(pdev, 0); 2550 if (aes_dd->irq < 0) { 2551 err = aes_dd->irq; 2552 goto err_tasklet_kill; 2553 } 2554 2555 err = devm_request_irq(&pdev->dev, aes_dd->irq, atmel_aes_irq, 2556 IRQF_SHARED, "atmel-aes", aes_dd); 2557 if (err) { 2558 dev_err(dev, "unable to request aes irq.\n"); 2559 goto err_tasklet_kill; 2560 } 2561 2562 /* Initializing the clock */ 2563 aes_dd->iclk = devm_clk_get(&pdev->dev, "aes_clk"); 2564 if (IS_ERR(aes_dd->iclk)) { 2565 dev_err(dev, "clock initialization failed.\n"); 2566 err = PTR_ERR(aes_dd->iclk); 2567 goto err_tasklet_kill; 2568 } 2569 2570 aes_dd->io_base = devm_ioremap_resource(&pdev->dev, aes_res); 2571 if (IS_ERR(aes_dd->io_base)) { 2572 dev_err(dev, "can't ioremap\n"); 2573 err = PTR_ERR(aes_dd->io_base); 2574 goto err_tasklet_kill; 2575 } 2576 2577 err = clk_prepare(aes_dd->iclk); 2578 if (err) 2579 goto err_tasklet_kill; 2580 2581 err = atmel_aes_hw_version_init(aes_dd); 2582 if (err) 2583 goto err_iclk_unprepare; 2584 2585 atmel_aes_get_cap(aes_dd); 2586 2587 #if IS_ENABLED(CONFIG_CRYPTO_DEV_ATMEL_AUTHENC) 2588 if (aes_dd->caps.has_authenc && !atmel_sha_authenc_is_ready()) { 2589 err = -EPROBE_DEFER; 2590 goto err_iclk_unprepare; 2591 } 2592 #endif 2593 2594 err = atmel_aes_buff_init(aes_dd); 2595 if (err) 2596 goto err_iclk_unprepare; 2597 2598 err = atmel_aes_dma_init(aes_dd); 2599 if (err) 2600 goto err_buff_cleanup; 2601 2602 spin_lock(&atmel_aes.lock); 2603 list_add_tail(&aes_dd->list, &atmel_aes.dev_list); 2604 spin_unlock(&atmel_aes.lock); 2605 2606 err = atmel_aes_register_algs(aes_dd); 2607 if (err) 2608 goto err_algs; 2609 2610 dev_info(dev, "Atmel AES - Using %s, %s for DMA transfers\n", 2611 dma_chan_name(aes_dd->src.chan), 2612 dma_chan_name(aes_dd->dst.chan)); 2613 2614 return 0; 2615 2616 err_algs: 2617 spin_lock(&atmel_aes.lock); 2618 list_del(&aes_dd->list); 2619 spin_unlock(&atmel_aes.lock); 2620 atmel_aes_dma_cleanup(aes_dd); 2621 err_buff_cleanup: 2622 atmel_aes_buff_cleanup(aes_dd); 2623 err_iclk_unprepare: 2624 clk_unprepare(aes_dd->iclk); 2625 err_tasklet_kill: 2626 tasklet_kill(&aes_dd->done_task); 2627 tasklet_kill(&aes_dd->queue_task); 2628 2629 return err; 2630 } 2631 2632 static int atmel_aes_remove(struct platform_device *pdev) 2633 { 2634 struct atmel_aes_dev *aes_dd; 2635 2636 aes_dd = platform_get_drvdata(pdev); 2637 if (!aes_dd) 2638 return -ENODEV; 2639 spin_lock(&atmel_aes.lock); 2640 list_del(&aes_dd->list); 2641 spin_unlock(&atmel_aes.lock); 2642 2643 atmel_aes_unregister_algs(aes_dd); 2644 2645 tasklet_kill(&aes_dd->done_task); 2646 tasklet_kill(&aes_dd->queue_task); 2647 2648 atmel_aes_dma_cleanup(aes_dd); 2649 atmel_aes_buff_cleanup(aes_dd); 2650 2651 clk_unprepare(aes_dd->iclk); 2652 2653 return 0; 2654 } 2655 2656 static struct platform_driver atmel_aes_driver = { 2657 .probe = atmel_aes_probe, 2658 .remove = atmel_aes_remove, 2659 .driver = { 2660 .name = "atmel_aes", 2661 .of_match_table = of_match_ptr(atmel_aes_dt_ids), 2662 }, 2663 }; 2664 2665 module_platform_driver(atmel_aes_driver); 2666 2667 MODULE_DESCRIPTION("Atmel AES hw acceleration support."); 2668 MODULE_LICENSE("GPL v2"); 2669 MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); 2670