1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Driver for ARTPEC-6 crypto block using the kernel asynchronous crypto api. 4 * 5 * Copyright (C) 2014-2017 Axis Communications AB 6 */ 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/bitfield.h> 10 #include <linux/crypto.h> 11 #include <linux/debugfs.h> 12 #include <linux/delay.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/fault-inject.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/kernel.h> 18 #include <linux/list.h> 19 #include <linux/module.h> 20 #include <linux/of.h> 21 #include <linux/platform_device.h> 22 #include <linux/scatterlist.h> 23 #include <linux/slab.h> 24 25 #include <crypto/aes.h> 26 #include <crypto/gcm.h> 27 #include <crypto/internal/aead.h> 28 #include <crypto/internal/hash.h> 29 #include <crypto/internal/skcipher.h> 30 #include <crypto/scatterwalk.h> 31 #include <crypto/sha.h> 32 #include <crypto/xts.h> 33 34 /* Max length of a line in all cache levels for Artpec SoCs. */ 35 #define ARTPEC_CACHE_LINE_MAX 32 36 37 #define PDMA_OUT_CFG 0x0000 38 #define PDMA_OUT_BUF_CFG 0x0004 39 #define PDMA_OUT_CMD 0x0008 40 #define PDMA_OUT_DESCRQ_PUSH 0x0010 41 #define PDMA_OUT_DESCRQ_STAT 0x0014 42 43 #define A6_PDMA_IN_CFG 0x0028 44 #define A6_PDMA_IN_BUF_CFG 0x002c 45 #define A6_PDMA_IN_CMD 0x0030 46 #define A6_PDMA_IN_STATQ_PUSH 0x0038 47 #define A6_PDMA_IN_DESCRQ_PUSH 0x0044 48 #define A6_PDMA_IN_DESCRQ_STAT 0x0048 49 #define A6_PDMA_INTR_MASK 0x0068 50 #define A6_PDMA_ACK_INTR 0x006c 51 #define A6_PDMA_MASKED_INTR 0x0074 52 53 #define A7_PDMA_IN_CFG 0x002c 54 #define A7_PDMA_IN_BUF_CFG 0x0030 55 #define A7_PDMA_IN_CMD 0x0034 56 #define A7_PDMA_IN_STATQ_PUSH 0x003c 57 #define A7_PDMA_IN_DESCRQ_PUSH 0x0048 58 #define A7_PDMA_IN_DESCRQ_STAT 0x004C 59 #define A7_PDMA_INTR_MASK 0x006c 60 #define A7_PDMA_ACK_INTR 0x0070 61 #define A7_PDMA_MASKED_INTR 0x0078 62 63 #define PDMA_OUT_CFG_EN BIT(0) 64 65 #define PDMA_OUT_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0) 66 #define PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5) 67 68 #define PDMA_OUT_CMD_START BIT(0) 69 #define A6_PDMA_OUT_CMD_STOP BIT(3) 70 #define A7_PDMA_OUT_CMD_STOP BIT(2) 71 72 #define PDMA_OUT_DESCRQ_PUSH_LEN GENMASK(5, 0) 73 #define PDMA_OUT_DESCRQ_PUSH_ADDR GENMASK(31, 6) 74 75 #define PDMA_OUT_DESCRQ_STAT_LEVEL GENMASK(3, 0) 76 #define PDMA_OUT_DESCRQ_STAT_SIZE GENMASK(7, 4) 77 78 #define PDMA_IN_CFG_EN BIT(0) 79 80 #define PDMA_IN_BUF_CFG_DATA_BUF_SIZE GENMASK(4, 0) 81 #define PDMA_IN_BUF_CFG_DESCR_BUF_SIZE GENMASK(9, 5) 82 #define PDMA_IN_BUF_CFG_STAT_BUF_SIZE GENMASK(14, 10) 83 84 #define PDMA_IN_CMD_START BIT(0) 85 #define A6_PDMA_IN_CMD_FLUSH_STAT BIT(2) 86 #define A6_PDMA_IN_CMD_STOP BIT(3) 87 #define A7_PDMA_IN_CMD_FLUSH_STAT BIT(1) 88 #define A7_PDMA_IN_CMD_STOP BIT(2) 89 90 #define PDMA_IN_STATQ_PUSH_LEN GENMASK(5, 0) 91 #define PDMA_IN_STATQ_PUSH_ADDR GENMASK(31, 6) 92 93 #define PDMA_IN_DESCRQ_PUSH_LEN GENMASK(5, 0) 94 #define PDMA_IN_DESCRQ_PUSH_ADDR GENMASK(31, 6) 95 96 #define PDMA_IN_DESCRQ_STAT_LEVEL GENMASK(3, 0) 97 #define PDMA_IN_DESCRQ_STAT_SIZE GENMASK(7, 4) 98 99 #define A6_PDMA_INTR_MASK_IN_DATA BIT(2) 100 #define A6_PDMA_INTR_MASK_IN_EOP BIT(3) 101 #define A6_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(4) 102 103 #define A7_PDMA_INTR_MASK_IN_DATA BIT(3) 104 #define A7_PDMA_INTR_MASK_IN_EOP BIT(4) 105 #define A7_PDMA_INTR_MASK_IN_EOP_FLUSH BIT(5) 106 107 #define A6_CRY_MD_OPER GENMASK(19, 16) 108 109 #define A6_CRY_MD_HASH_SEL_CTX GENMASK(21, 20) 110 #define A6_CRY_MD_HASH_HMAC_FIN BIT(23) 111 112 #define A6_CRY_MD_CIPHER_LEN GENMASK(21, 20) 113 #define A6_CRY_MD_CIPHER_DECR BIT(22) 114 #define A6_CRY_MD_CIPHER_TWEAK BIT(23) 115 #define A6_CRY_MD_CIPHER_DSEQ BIT(24) 116 117 #define A7_CRY_MD_OPER GENMASK(11, 8) 118 119 #define A7_CRY_MD_HASH_SEL_CTX GENMASK(13, 12) 120 #define A7_CRY_MD_HASH_HMAC_FIN BIT(15) 121 122 #define A7_CRY_MD_CIPHER_LEN GENMASK(13, 12) 123 #define A7_CRY_MD_CIPHER_DECR BIT(14) 124 #define A7_CRY_MD_CIPHER_TWEAK BIT(15) 125 #define A7_CRY_MD_CIPHER_DSEQ BIT(16) 126 127 /* DMA metadata constants */ 128 #define regk_crypto_aes_cbc 0x00000002 129 #define regk_crypto_aes_ctr 0x00000003 130 #define regk_crypto_aes_ecb 0x00000001 131 #define regk_crypto_aes_gcm 0x00000004 132 #define regk_crypto_aes_xts 0x00000005 133 #define regk_crypto_cache 0x00000002 134 #define a6_regk_crypto_dlkey 0x0000000a 135 #define a7_regk_crypto_dlkey 0x0000000e 136 #define regk_crypto_ext 0x00000001 137 #define regk_crypto_hmac_sha1 0x00000007 138 #define regk_crypto_hmac_sha256 0x00000009 139 #define regk_crypto_init 0x00000000 140 #define regk_crypto_key_128 0x00000000 141 #define regk_crypto_key_192 0x00000001 142 #define regk_crypto_key_256 0x00000002 143 #define regk_crypto_null 0x00000000 144 #define regk_crypto_sha1 0x00000006 145 #define regk_crypto_sha256 0x00000008 146 147 /* DMA descriptor structures */ 148 struct pdma_descr_ctrl { 149 unsigned char short_descr : 1; 150 unsigned char pad1 : 1; 151 unsigned char eop : 1; 152 unsigned char intr : 1; 153 unsigned char short_len : 3; 154 unsigned char pad2 : 1; 155 } __packed; 156 157 struct pdma_data_descr { 158 unsigned int len : 24; 159 unsigned int buf : 32; 160 } __packed; 161 162 struct pdma_short_descr { 163 unsigned char data[7]; 164 } __packed; 165 166 struct pdma_descr { 167 struct pdma_descr_ctrl ctrl; 168 union { 169 struct pdma_data_descr data; 170 struct pdma_short_descr shrt; 171 }; 172 }; 173 174 struct pdma_stat_descr { 175 unsigned char pad1 : 1; 176 unsigned char pad2 : 1; 177 unsigned char eop : 1; 178 unsigned char pad3 : 5; 179 unsigned int len : 24; 180 }; 181 182 /* Each descriptor array can hold max 64 entries */ 183 #define PDMA_DESCR_COUNT 64 184 185 #define MODULE_NAME "Artpec-6 CA" 186 187 /* Hash modes (including HMAC variants) */ 188 #define ARTPEC6_CRYPTO_HASH_SHA1 1 189 #define ARTPEC6_CRYPTO_HASH_SHA256 2 190 191 /* Crypto modes */ 192 #define ARTPEC6_CRYPTO_CIPHER_AES_ECB 1 193 #define ARTPEC6_CRYPTO_CIPHER_AES_CBC 2 194 #define ARTPEC6_CRYPTO_CIPHER_AES_CTR 3 195 #define ARTPEC6_CRYPTO_CIPHER_AES_XTS 5 196 197 /* The PDMA is a DMA-engine tightly coupled with a ciphering engine. 198 * It operates on a descriptor array with up to 64 descriptor entries. 199 * The arrays must be 64 byte aligned in memory. 200 * 201 * The ciphering unit has no registers and is completely controlled by 202 * a 4-byte metadata that is inserted at the beginning of each dma packet. 203 * 204 * A dma packet is a sequence of descriptors terminated by setting the .eop 205 * field in the final descriptor of the packet. 206 * 207 * Multiple packets are used for providing context data, key data and 208 * the plain/ciphertext. 209 * 210 * PDMA Descriptors (Array) 211 * +------+------+------+~~+-------+------+---- 212 * | 0 | 1 | 2 |~~| 11 EOP| 12 | .... 213 * +--+---+--+---+----+-+~~+-------+----+-+---- 214 * | | | | | 215 * | | | | | 216 * __|__ +-------++-------++-------+ +----+ 217 * | MD | |Payload||Payload||Payload| | MD | 218 * +-----+ +-------++-------++-------+ +----+ 219 */ 220 221 struct artpec6_crypto_bounce_buffer { 222 struct list_head list; 223 size_t length; 224 struct scatterlist *sg; 225 size_t offset; 226 /* buf is aligned to ARTPEC_CACHE_LINE_MAX and 227 * holds up to ARTPEC_CACHE_LINE_MAX bytes data. 228 */ 229 void *buf; 230 }; 231 232 struct artpec6_crypto_dma_map { 233 dma_addr_t dma_addr; 234 size_t size; 235 enum dma_data_direction dir; 236 }; 237 238 struct artpec6_crypto_dma_descriptors { 239 struct pdma_descr out[PDMA_DESCR_COUNT] __aligned(64); 240 struct pdma_descr in[PDMA_DESCR_COUNT] __aligned(64); 241 u32 stat[PDMA_DESCR_COUNT] __aligned(64); 242 struct list_head bounce_buffers; 243 /* Enough maps for all out/in buffers, and all three descr. arrays */ 244 struct artpec6_crypto_dma_map maps[PDMA_DESCR_COUNT * 2 + 2]; 245 dma_addr_t out_dma_addr; 246 dma_addr_t in_dma_addr; 247 dma_addr_t stat_dma_addr; 248 size_t out_cnt; 249 size_t in_cnt; 250 size_t map_count; 251 }; 252 253 enum artpec6_crypto_variant { 254 ARTPEC6_CRYPTO, 255 ARTPEC7_CRYPTO, 256 }; 257 258 struct artpec6_crypto { 259 void __iomem *base; 260 spinlock_t queue_lock; 261 struct list_head queue; /* waiting for pdma fifo space */ 262 struct list_head pending; /* submitted to pdma fifo */ 263 struct tasklet_struct task; 264 struct kmem_cache *dma_cache; 265 int pending_count; 266 struct timer_list timer; 267 enum artpec6_crypto_variant variant; 268 void *pad_buffer; /* cache-aligned block padding buffer */ 269 void *zero_buffer; 270 }; 271 272 enum artpec6_crypto_hash_flags { 273 HASH_FLAG_INIT_CTX = 2, 274 HASH_FLAG_UPDATE = 4, 275 HASH_FLAG_FINALIZE = 8, 276 HASH_FLAG_HMAC = 16, 277 HASH_FLAG_UPDATE_KEY = 32, 278 }; 279 280 struct artpec6_crypto_req_common { 281 struct list_head list; 282 struct list_head complete_in_progress; 283 struct artpec6_crypto_dma_descriptors *dma; 284 struct crypto_async_request *req; 285 void (*complete)(struct crypto_async_request *req); 286 gfp_t gfp_flags; 287 }; 288 289 struct artpec6_hash_request_context { 290 char partial_buffer[SHA256_BLOCK_SIZE]; 291 char partial_buffer_out[SHA256_BLOCK_SIZE]; 292 char key_buffer[SHA256_BLOCK_SIZE]; 293 char pad_buffer[SHA256_BLOCK_SIZE + 32]; 294 unsigned char digeststate[SHA256_DIGEST_SIZE]; 295 size_t partial_bytes; 296 u64 digcnt; 297 u32 key_md; 298 u32 hash_md; 299 enum artpec6_crypto_hash_flags hash_flags; 300 struct artpec6_crypto_req_common common; 301 }; 302 303 struct artpec6_hash_export_state { 304 char partial_buffer[SHA256_BLOCK_SIZE]; 305 unsigned char digeststate[SHA256_DIGEST_SIZE]; 306 size_t partial_bytes; 307 u64 digcnt; 308 int oper; 309 unsigned int hash_flags; 310 }; 311 312 struct artpec6_hashalg_context { 313 char hmac_key[SHA256_BLOCK_SIZE]; 314 size_t hmac_key_length; 315 struct crypto_shash *child_hash; 316 }; 317 318 struct artpec6_crypto_request_context { 319 u32 cipher_md; 320 bool decrypt; 321 struct artpec6_crypto_req_common common; 322 }; 323 324 struct artpec6_cryptotfm_context { 325 unsigned char aes_key[2*AES_MAX_KEY_SIZE]; 326 size_t key_length; 327 u32 key_md; 328 int crypto_type; 329 struct crypto_sync_skcipher *fallback; 330 }; 331 332 struct artpec6_crypto_aead_hw_ctx { 333 __be64 aad_length_bits; 334 __be64 text_length_bits; 335 __u8 J0[AES_BLOCK_SIZE]; 336 }; 337 338 struct artpec6_crypto_aead_req_ctx { 339 struct artpec6_crypto_aead_hw_ctx hw_ctx; 340 u32 cipher_md; 341 bool decrypt; 342 struct artpec6_crypto_req_common common; 343 __u8 decryption_tag[AES_BLOCK_SIZE] ____cacheline_aligned; 344 }; 345 346 /* The crypto framework makes it hard to avoid this global. */ 347 static struct device *artpec6_crypto_dev; 348 349 #ifdef CONFIG_FAULT_INJECTION 350 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); 351 static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); 352 #endif 353 354 enum { 355 ARTPEC6_CRYPTO_PREPARE_HASH_NO_START, 356 ARTPEC6_CRYPTO_PREPARE_HASH_START, 357 }; 358 359 static int artpec6_crypto_prepare_aead(struct aead_request *areq); 360 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq); 361 static int artpec6_crypto_prepare_hash(struct ahash_request *areq); 362 363 static void 364 artpec6_crypto_complete_crypto(struct crypto_async_request *req); 365 static void 366 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req); 367 static void 368 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req); 369 static void 370 artpec6_crypto_complete_aead(struct crypto_async_request *req); 371 static void 372 artpec6_crypto_complete_hash(struct crypto_async_request *req); 373 374 static int 375 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common); 376 377 static void 378 artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common); 379 380 struct artpec6_crypto_walk { 381 struct scatterlist *sg; 382 size_t offset; 383 }; 384 385 static void artpec6_crypto_walk_init(struct artpec6_crypto_walk *awalk, 386 struct scatterlist *sg) 387 { 388 awalk->sg = sg; 389 awalk->offset = 0; 390 } 391 392 static size_t artpec6_crypto_walk_advance(struct artpec6_crypto_walk *awalk, 393 size_t nbytes) 394 { 395 while (nbytes && awalk->sg) { 396 size_t piece; 397 398 WARN_ON(awalk->offset > awalk->sg->length); 399 400 piece = min(nbytes, (size_t)awalk->sg->length - awalk->offset); 401 nbytes -= piece; 402 awalk->offset += piece; 403 if (awalk->offset == awalk->sg->length) { 404 awalk->sg = sg_next(awalk->sg); 405 awalk->offset = 0; 406 } 407 408 } 409 410 return nbytes; 411 } 412 413 static size_t 414 artpec6_crypto_walk_chunklen(const struct artpec6_crypto_walk *awalk) 415 { 416 WARN_ON(awalk->sg->length == awalk->offset); 417 418 return awalk->sg->length - awalk->offset; 419 } 420 421 static dma_addr_t 422 artpec6_crypto_walk_chunk_phys(const struct artpec6_crypto_walk *awalk) 423 { 424 return sg_phys(awalk->sg) + awalk->offset; 425 } 426 427 static void 428 artpec6_crypto_copy_bounce_buffers(struct artpec6_crypto_req_common *common) 429 { 430 struct artpec6_crypto_dma_descriptors *dma = common->dma; 431 struct artpec6_crypto_bounce_buffer *b; 432 struct artpec6_crypto_bounce_buffer *next; 433 434 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) { 435 pr_debug("bounce entry %p: %zu bytes @ %zu from %p\n", 436 b, b->length, b->offset, b->buf); 437 sg_pcopy_from_buffer(b->sg, 438 1, 439 b->buf, 440 b->length, 441 b->offset); 442 443 list_del(&b->list); 444 kfree(b); 445 } 446 } 447 448 static inline bool artpec6_crypto_busy(void) 449 { 450 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 451 int fifo_count = ac->pending_count; 452 453 return fifo_count > 6; 454 } 455 456 static int artpec6_crypto_submit(struct artpec6_crypto_req_common *req) 457 { 458 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 459 int ret = -EBUSY; 460 461 spin_lock_bh(&ac->queue_lock); 462 463 if (!artpec6_crypto_busy()) { 464 list_add_tail(&req->list, &ac->pending); 465 artpec6_crypto_start_dma(req); 466 ret = -EINPROGRESS; 467 } else if (req->req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) { 468 list_add_tail(&req->list, &ac->queue); 469 } else { 470 artpec6_crypto_common_destroy(req); 471 } 472 473 spin_unlock_bh(&ac->queue_lock); 474 475 return ret; 476 } 477 478 static void artpec6_crypto_start_dma(struct artpec6_crypto_req_common *common) 479 { 480 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 481 enum artpec6_crypto_variant variant = ac->variant; 482 void __iomem *base = ac->base; 483 struct artpec6_crypto_dma_descriptors *dma = common->dma; 484 u32 ind, statd, outd; 485 486 /* Make descriptor content visible to the DMA before starting it. */ 487 wmb(); 488 489 ind = FIELD_PREP(PDMA_IN_DESCRQ_PUSH_LEN, dma->in_cnt - 1) | 490 FIELD_PREP(PDMA_IN_DESCRQ_PUSH_ADDR, dma->in_dma_addr >> 6); 491 492 statd = FIELD_PREP(PDMA_IN_STATQ_PUSH_LEN, dma->in_cnt - 1) | 493 FIELD_PREP(PDMA_IN_STATQ_PUSH_ADDR, dma->stat_dma_addr >> 6); 494 495 outd = FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_LEN, dma->out_cnt - 1) | 496 FIELD_PREP(PDMA_OUT_DESCRQ_PUSH_ADDR, dma->out_dma_addr >> 6); 497 498 if (variant == ARTPEC6_CRYPTO) { 499 writel_relaxed(ind, base + A6_PDMA_IN_DESCRQ_PUSH); 500 writel_relaxed(statd, base + A6_PDMA_IN_STATQ_PUSH); 501 writel_relaxed(PDMA_IN_CMD_START, base + A6_PDMA_IN_CMD); 502 } else { 503 writel_relaxed(ind, base + A7_PDMA_IN_DESCRQ_PUSH); 504 writel_relaxed(statd, base + A7_PDMA_IN_STATQ_PUSH); 505 writel_relaxed(PDMA_IN_CMD_START, base + A7_PDMA_IN_CMD); 506 } 507 508 writel_relaxed(outd, base + PDMA_OUT_DESCRQ_PUSH); 509 writel_relaxed(PDMA_OUT_CMD_START, base + PDMA_OUT_CMD); 510 511 ac->pending_count++; 512 } 513 514 static void 515 artpec6_crypto_init_dma_operation(struct artpec6_crypto_req_common *common) 516 { 517 struct artpec6_crypto_dma_descriptors *dma = common->dma; 518 519 dma->out_cnt = 0; 520 dma->in_cnt = 0; 521 dma->map_count = 0; 522 INIT_LIST_HEAD(&dma->bounce_buffers); 523 } 524 525 static bool fault_inject_dma_descr(void) 526 { 527 #ifdef CONFIG_FAULT_INJECTION 528 return should_fail(&artpec6_crypto_fail_dma_array_full, 1); 529 #else 530 return false; 531 #endif 532 } 533 534 /** artpec6_crypto_setup_out_descr_phys - Setup an out channel with a 535 * physical address 536 * 537 * @addr: The physical address of the data buffer 538 * @len: The length of the data buffer 539 * @eop: True if this is the last buffer in the packet 540 * 541 * @return 0 on success or -ENOSPC if there are no more descriptors available 542 */ 543 static int 544 artpec6_crypto_setup_out_descr_phys(struct artpec6_crypto_req_common *common, 545 dma_addr_t addr, size_t len, bool eop) 546 { 547 struct artpec6_crypto_dma_descriptors *dma = common->dma; 548 struct pdma_descr *d; 549 550 if (dma->out_cnt >= PDMA_DESCR_COUNT || 551 fault_inject_dma_descr()) { 552 pr_err("No free OUT DMA descriptors available!\n"); 553 return -ENOSPC; 554 } 555 556 d = &dma->out[dma->out_cnt++]; 557 memset(d, 0, sizeof(*d)); 558 559 d->ctrl.short_descr = 0; 560 d->ctrl.eop = eop; 561 d->data.len = len; 562 d->data.buf = addr; 563 return 0; 564 } 565 566 /** artpec6_crypto_setup_out_descr_short - Setup a short out descriptor 567 * 568 * @dst: The virtual address of the data 569 * @len: The length of the data, must be between 1 to 7 bytes 570 * @eop: True if this is the last buffer in the packet 571 * 572 * @return 0 on success 573 * -ENOSPC if no more descriptors are available 574 * -EINVAL if the data length exceeds 7 bytes 575 */ 576 static int 577 artpec6_crypto_setup_out_descr_short(struct artpec6_crypto_req_common *common, 578 void *dst, unsigned int len, bool eop) 579 { 580 struct artpec6_crypto_dma_descriptors *dma = common->dma; 581 struct pdma_descr *d; 582 583 if (dma->out_cnt >= PDMA_DESCR_COUNT || 584 fault_inject_dma_descr()) { 585 pr_err("No free OUT DMA descriptors available!\n"); 586 return -ENOSPC; 587 } else if (len > 7 || len < 1) { 588 return -EINVAL; 589 } 590 d = &dma->out[dma->out_cnt++]; 591 memset(d, 0, sizeof(*d)); 592 593 d->ctrl.short_descr = 1; 594 d->ctrl.short_len = len; 595 d->ctrl.eop = eop; 596 memcpy(d->shrt.data, dst, len); 597 return 0; 598 } 599 600 static int artpec6_crypto_dma_map_page(struct artpec6_crypto_req_common *common, 601 struct page *page, size_t offset, 602 size_t size, 603 enum dma_data_direction dir, 604 dma_addr_t *dma_addr_out) 605 { 606 struct artpec6_crypto_dma_descriptors *dma = common->dma; 607 struct device *dev = artpec6_crypto_dev; 608 struct artpec6_crypto_dma_map *map; 609 dma_addr_t dma_addr; 610 611 *dma_addr_out = 0; 612 613 if (dma->map_count >= ARRAY_SIZE(dma->maps)) 614 return -ENOMEM; 615 616 dma_addr = dma_map_page(dev, page, offset, size, dir); 617 if (dma_mapping_error(dev, dma_addr)) 618 return -ENOMEM; 619 620 map = &dma->maps[dma->map_count++]; 621 map->size = size; 622 map->dma_addr = dma_addr; 623 map->dir = dir; 624 625 *dma_addr_out = dma_addr; 626 627 return 0; 628 } 629 630 static int 631 artpec6_crypto_dma_map_single(struct artpec6_crypto_req_common *common, 632 void *ptr, size_t size, 633 enum dma_data_direction dir, 634 dma_addr_t *dma_addr_out) 635 { 636 struct page *page = virt_to_page(ptr); 637 size_t offset = (uintptr_t)ptr & ~PAGE_MASK; 638 639 return artpec6_crypto_dma_map_page(common, page, offset, size, dir, 640 dma_addr_out); 641 } 642 643 static int 644 artpec6_crypto_dma_map_descs(struct artpec6_crypto_req_common *common) 645 { 646 struct artpec6_crypto_dma_descriptors *dma = common->dma; 647 int ret; 648 649 ret = artpec6_crypto_dma_map_single(common, dma->in, 650 sizeof(dma->in[0]) * dma->in_cnt, 651 DMA_TO_DEVICE, &dma->in_dma_addr); 652 if (ret) 653 return ret; 654 655 ret = artpec6_crypto_dma_map_single(common, dma->out, 656 sizeof(dma->out[0]) * dma->out_cnt, 657 DMA_TO_DEVICE, &dma->out_dma_addr); 658 if (ret) 659 return ret; 660 661 /* We only read one stat descriptor */ 662 dma->stat[dma->in_cnt - 1] = 0; 663 664 /* 665 * DMA_BIDIRECTIONAL since we need our zeroing of the stat descriptor 666 * to be written. 667 */ 668 return artpec6_crypto_dma_map_single(common, 669 dma->stat, 670 sizeof(dma->stat[0]) * dma->in_cnt, 671 DMA_BIDIRECTIONAL, 672 &dma->stat_dma_addr); 673 } 674 675 static void 676 artpec6_crypto_dma_unmap_all(struct artpec6_crypto_req_common *common) 677 { 678 struct artpec6_crypto_dma_descriptors *dma = common->dma; 679 struct device *dev = artpec6_crypto_dev; 680 int i; 681 682 for (i = 0; i < dma->map_count; i++) { 683 struct artpec6_crypto_dma_map *map = &dma->maps[i]; 684 685 dma_unmap_page(dev, map->dma_addr, map->size, map->dir); 686 } 687 688 dma->map_count = 0; 689 } 690 691 /** artpec6_crypto_setup_out_descr - Setup an out descriptor 692 * 693 * @dst: The virtual address of the data 694 * @len: The length of the data 695 * @eop: True if this is the last buffer in the packet 696 * @use_short: If this is true and the data length is 7 bytes or less then 697 * a short descriptor will be used 698 * 699 * @return 0 on success 700 * Any errors from artpec6_crypto_setup_out_descr_short() or 701 * setup_out_descr_phys() 702 */ 703 static int 704 artpec6_crypto_setup_out_descr(struct artpec6_crypto_req_common *common, 705 void *dst, unsigned int len, bool eop, 706 bool use_short) 707 { 708 if (use_short && len < 7) { 709 return artpec6_crypto_setup_out_descr_short(common, dst, len, 710 eop); 711 } else { 712 int ret; 713 dma_addr_t dma_addr; 714 715 ret = artpec6_crypto_dma_map_single(common, dst, len, 716 DMA_TO_DEVICE, 717 &dma_addr); 718 if (ret) 719 return ret; 720 721 return artpec6_crypto_setup_out_descr_phys(common, dma_addr, 722 len, eop); 723 } 724 } 725 726 /** artpec6_crypto_setup_in_descr_phys - Setup an in channel with a 727 * physical address 728 * 729 * @addr: The physical address of the data buffer 730 * @len: The length of the data buffer 731 * @intr: True if an interrupt should be fired after HW processing of this 732 * descriptor 733 * 734 */ 735 static int 736 artpec6_crypto_setup_in_descr_phys(struct artpec6_crypto_req_common *common, 737 dma_addr_t addr, unsigned int len, bool intr) 738 { 739 struct artpec6_crypto_dma_descriptors *dma = common->dma; 740 struct pdma_descr *d; 741 742 if (dma->in_cnt >= PDMA_DESCR_COUNT || 743 fault_inject_dma_descr()) { 744 pr_err("No free IN DMA descriptors available!\n"); 745 return -ENOSPC; 746 } 747 d = &dma->in[dma->in_cnt++]; 748 memset(d, 0, sizeof(*d)); 749 750 d->ctrl.intr = intr; 751 d->data.len = len; 752 d->data.buf = addr; 753 return 0; 754 } 755 756 /** artpec6_crypto_setup_in_descr - Setup an in channel descriptor 757 * 758 * @buffer: The virtual address to of the data buffer 759 * @len: The length of the data buffer 760 * @last: If this is the last data buffer in the request (i.e. an interrupt 761 * is needed 762 * 763 * Short descriptors are not used for the in channel 764 */ 765 static int 766 artpec6_crypto_setup_in_descr(struct artpec6_crypto_req_common *common, 767 void *buffer, unsigned int len, bool last) 768 { 769 dma_addr_t dma_addr; 770 int ret; 771 772 ret = artpec6_crypto_dma_map_single(common, buffer, len, 773 DMA_FROM_DEVICE, &dma_addr); 774 if (ret) 775 return ret; 776 777 return artpec6_crypto_setup_in_descr_phys(common, dma_addr, len, last); 778 } 779 780 static struct artpec6_crypto_bounce_buffer * 781 artpec6_crypto_alloc_bounce(gfp_t flags) 782 { 783 void *base; 784 size_t alloc_size = sizeof(struct artpec6_crypto_bounce_buffer) + 785 2 * ARTPEC_CACHE_LINE_MAX; 786 struct artpec6_crypto_bounce_buffer *bbuf = kzalloc(alloc_size, flags); 787 788 if (!bbuf) 789 return NULL; 790 791 base = bbuf + 1; 792 bbuf->buf = PTR_ALIGN(base, ARTPEC_CACHE_LINE_MAX); 793 return bbuf; 794 } 795 796 static int setup_bounce_buffer_in(struct artpec6_crypto_req_common *common, 797 struct artpec6_crypto_walk *walk, size_t size) 798 { 799 struct artpec6_crypto_bounce_buffer *bbuf; 800 int ret; 801 802 bbuf = artpec6_crypto_alloc_bounce(common->gfp_flags); 803 if (!bbuf) 804 return -ENOMEM; 805 806 bbuf->length = size; 807 bbuf->sg = walk->sg; 808 bbuf->offset = walk->offset; 809 810 ret = artpec6_crypto_setup_in_descr(common, bbuf->buf, size, false); 811 if (ret) { 812 kfree(bbuf); 813 return ret; 814 } 815 816 pr_debug("BOUNCE %zu offset %zu\n", size, walk->offset); 817 list_add_tail(&bbuf->list, &common->dma->bounce_buffers); 818 return 0; 819 } 820 821 static int 822 artpec6_crypto_setup_sg_descrs_in(struct artpec6_crypto_req_common *common, 823 struct artpec6_crypto_walk *walk, 824 size_t count) 825 { 826 size_t chunk; 827 int ret; 828 dma_addr_t addr; 829 830 while (walk->sg && count) { 831 chunk = min(count, artpec6_crypto_walk_chunklen(walk)); 832 addr = artpec6_crypto_walk_chunk_phys(walk); 833 834 /* When destination buffers are not aligned to the cache line 835 * size we need bounce buffers. The DMA-API requires that the 836 * entire line is owned by the DMA buffer and this holds also 837 * for the case when coherent DMA is used. 838 */ 839 if (!IS_ALIGNED(addr, ARTPEC_CACHE_LINE_MAX)) { 840 chunk = min_t(dma_addr_t, chunk, 841 ALIGN(addr, ARTPEC_CACHE_LINE_MAX) - 842 addr); 843 844 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); 845 ret = setup_bounce_buffer_in(common, walk, chunk); 846 } else if (chunk < ARTPEC_CACHE_LINE_MAX) { 847 pr_debug("CHUNK-b %pad:%zu\n", &addr, chunk); 848 ret = setup_bounce_buffer_in(common, walk, chunk); 849 } else { 850 dma_addr_t dma_addr; 851 852 chunk = chunk & ~(ARTPEC_CACHE_LINE_MAX-1); 853 854 pr_debug("CHUNK %pad:%zu\n", &addr, chunk); 855 856 ret = artpec6_crypto_dma_map_page(common, 857 sg_page(walk->sg), 858 walk->sg->offset + 859 walk->offset, 860 chunk, 861 DMA_FROM_DEVICE, 862 &dma_addr); 863 if (ret) 864 return ret; 865 866 ret = artpec6_crypto_setup_in_descr_phys(common, 867 dma_addr, 868 chunk, false); 869 } 870 871 if (ret) 872 return ret; 873 874 count = count - chunk; 875 artpec6_crypto_walk_advance(walk, chunk); 876 } 877 878 if (count) 879 pr_err("EOL unexpected %zu bytes left\n", count); 880 881 return count ? -EINVAL : 0; 882 } 883 884 static int 885 artpec6_crypto_setup_sg_descrs_out(struct artpec6_crypto_req_common *common, 886 struct artpec6_crypto_walk *walk, 887 size_t count) 888 { 889 size_t chunk; 890 int ret; 891 dma_addr_t addr; 892 893 while (walk->sg && count) { 894 chunk = min(count, artpec6_crypto_walk_chunklen(walk)); 895 addr = artpec6_crypto_walk_chunk_phys(walk); 896 897 pr_debug("OUT-CHUNK %pad:%zu\n", &addr, chunk); 898 899 if (addr & 3) { 900 char buf[3]; 901 902 chunk = min_t(size_t, chunk, (4-(addr&3))); 903 904 sg_pcopy_to_buffer(walk->sg, 1, buf, chunk, 905 walk->offset); 906 907 ret = artpec6_crypto_setup_out_descr_short(common, buf, 908 chunk, 909 false); 910 } else { 911 dma_addr_t dma_addr; 912 913 ret = artpec6_crypto_dma_map_page(common, 914 sg_page(walk->sg), 915 walk->sg->offset + 916 walk->offset, 917 chunk, 918 DMA_TO_DEVICE, 919 &dma_addr); 920 if (ret) 921 return ret; 922 923 ret = artpec6_crypto_setup_out_descr_phys(common, 924 dma_addr, 925 chunk, false); 926 } 927 928 if (ret) 929 return ret; 930 931 count = count - chunk; 932 artpec6_crypto_walk_advance(walk, chunk); 933 } 934 935 if (count) 936 pr_err("EOL unexpected %zu bytes left\n", count); 937 938 return count ? -EINVAL : 0; 939 } 940 941 942 /** artpec6_crypto_terminate_out_descrs - Set the EOP on the last out descriptor 943 * 944 * If the out descriptor list is non-empty, then the eop flag on the 945 * last used out descriptor will be set. 946 * 947 * @return 0 on success 948 * -EINVAL if the out descriptor is empty or has overflown 949 */ 950 static int 951 artpec6_crypto_terminate_out_descrs(struct artpec6_crypto_req_common *common) 952 { 953 struct artpec6_crypto_dma_descriptors *dma = common->dma; 954 struct pdma_descr *d; 955 956 if (!dma->out_cnt || dma->out_cnt > PDMA_DESCR_COUNT) { 957 pr_err("%s: OUT descriptor list is %s\n", 958 MODULE_NAME, dma->out_cnt ? "empty" : "full"); 959 return -EINVAL; 960 961 } 962 963 d = &dma->out[dma->out_cnt-1]; 964 d->ctrl.eop = 1; 965 966 return 0; 967 } 968 969 /** artpec6_crypto_terminate_in_descrs - Set the interrupt flag on the last 970 * in descriptor 971 * 972 * See artpec6_crypto_terminate_out_descrs() for return values 973 */ 974 static int 975 artpec6_crypto_terminate_in_descrs(struct artpec6_crypto_req_common *common) 976 { 977 struct artpec6_crypto_dma_descriptors *dma = common->dma; 978 struct pdma_descr *d; 979 980 if (!dma->in_cnt || dma->in_cnt > PDMA_DESCR_COUNT) { 981 pr_err("%s: IN descriptor list is %s\n", 982 MODULE_NAME, dma->in_cnt ? "empty" : "full"); 983 return -EINVAL; 984 } 985 986 d = &dma->in[dma->in_cnt-1]; 987 d->ctrl.intr = 1; 988 return 0; 989 } 990 991 /** create_hash_pad - Create a Secure Hash conformant pad 992 * 993 * @dst: The destination buffer to write the pad. Must be at least 64 bytes 994 * @dgstlen: The total length of the hash digest in bytes 995 * @bitcount: The total length of the digest in bits 996 * 997 * @return The total number of padding bytes written to @dst 998 */ 999 static size_t 1000 create_hash_pad(int oper, unsigned char *dst, u64 dgstlen, u64 bitcount) 1001 { 1002 unsigned int mod, target, diff, pad_bytes, size_bytes; 1003 __be64 bits = __cpu_to_be64(bitcount); 1004 1005 switch (oper) { 1006 case regk_crypto_sha1: 1007 case regk_crypto_sha256: 1008 case regk_crypto_hmac_sha1: 1009 case regk_crypto_hmac_sha256: 1010 target = 448 / 8; 1011 mod = 512 / 8; 1012 size_bytes = 8; 1013 break; 1014 default: 1015 target = 896 / 8; 1016 mod = 1024 / 8; 1017 size_bytes = 16; 1018 break; 1019 } 1020 1021 target -= 1; 1022 diff = dgstlen & (mod - 1); 1023 pad_bytes = diff > target ? target + mod - diff : target - diff; 1024 1025 memset(dst + 1, 0, pad_bytes); 1026 dst[0] = 0x80; 1027 1028 if (size_bytes == 16) { 1029 memset(dst + 1 + pad_bytes, 0, 8); 1030 memcpy(dst + 1 + pad_bytes + 8, &bits, 8); 1031 } else { 1032 memcpy(dst + 1 + pad_bytes, &bits, 8); 1033 } 1034 1035 return pad_bytes + size_bytes + 1; 1036 } 1037 1038 static int artpec6_crypto_common_init(struct artpec6_crypto_req_common *common, 1039 struct crypto_async_request *parent, 1040 void (*complete)(struct crypto_async_request *req), 1041 struct scatterlist *dstsg, unsigned int nbytes) 1042 { 1043 gfp_t flags; 1044 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 1045 1046 flags = (parent->flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? 1047 GFP_KERNEL : GFP_ATOMIC; 1048 1049 common->gfp_flags = flags; 1050 common->dma = kmem_cache_alloc(ac->dma_cache, flags); 1051 if (!common->dma) 1052 return -ENOMEM; 1053 1054 common->req = parent; 1055 common->complete = complete; 1056 return 0; 1057 } 1058 1059 static void 1060 artpec6_crypto_bounce_destroy(struct artpec6_crypto_dma_descriptors *dma) 1061 { 1062 struct artpec6_crypto_bounce_buffer *b; 1063 struct artpec6_crypto_bounce_buffer *next; 1064 1065 list_for_each_entry_safe(b, next, &dma->bounce_buffers, list) { 1066 kfree(b); 1067 } 1068 } 1069 1070 static int 1071 artpec6_crypto_common_destroy(struct artpec6_crypto_req_common *common) 1072 { 1073 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 1074 1075 artpec6_crypto_dma_unmap_all(common); 1076 artpec6_crypto_bounce_destroy(common->dma); 1077 kmem_cache_free(ac->dma_cache, common->dma); 1078 common->dma = NULL; 1079 return 0; 1080 } 1081 1082 /* 1083 * Ciphering functions. 1084 */ 1085 static int artpec6_crypto_encrypt(struct skcipher_request *req) 1086 { 1087 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 1088 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); 1089 struct artpec6_crypto_request_context *req_ctx = NULL; 1090 void (*complete)(struct crypto_async_request *req); 1091 int ret; 1092 1093 req_ctx = skcipher_request_ctx(req); 1094 1095 switch (ctx->crypto_type) { 1096 case ARTPEC6_CRYPTO_CIPHER_AES_CBC: 1097 case ARTPEC6_CRYPTO_CIPHER_AES_ECB: 1098 case ARTPEC6_CRYPTO_CIPHER_AES_XTS: 1099 req_ctx->decrypt = 0; 1100 break; 1101 default: 1102 break; 1103 } 1104 1105 switch (ctx->crypto_type) { 1106 case ARTPEC6_CRYPTO_CIPHER_AES_CBC: 1107 complete = artpec6_crypto_complete_cbc_encrypt; 1108 break; 1109 default: 1110 complete = artpec6_crypto_complete_crypto; 1111 break; 1112 } 1113 1114 ret = artpec6_crypto_common_init(&req_ctx->common, 1115 &req->base, 1116 complete, 1117 req->dst, req->cryptlen); 1118 if (ret) 1119 return ret; 1120 1121 ret = artpec6_crypto_prepare_crypto(req); 1122 if (ret) { 1123 artpec6_crypto_common_destroy(&req_ctx->common); 1124 return ret; 1125 } 1126 1127 return artpec6_crypto_submit(&req_ctx->common); 1128 } 1129 1130 static int artpec6_crypto_decrypt(struct skcipher_request *req) 1131 { 1132 int ret; 1133 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 1134 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); 1135 struct artpec6_crypto_request_context *req_ctx = NULL; 1136 void (*complete)(struct crypto_async_request *req); 1137 1138 req_ctx = skcipher_request_ctx(req); 1139 1140 switch (ctx->crypto_type) { 1141 case ARTPEC6_CRYPTO_CIPHER_AES_CBC: 1142 case ARTPEC6_CRYPTO_CIPHER_AES_ECB: 1143 case ARTPEC6_CRYPTO_CIPHER_AES_XTS: 1144 req_ctx->decrypt = 1; 1145 break; 1146 default: 1147 break; 1148 } 1149 1150 1151 switch (ctx->crypto_type) { 1152 case ARTPEC6_CRYPTO_CIPHER_AES_CBC: 1153 complete = artpec6_crypto_complete_cbc_decrypt; 1154 break; 1155 default: 1156 complete = artpec6_crypto_complete_crypto; 1157 break; 1158 } 1159 1160 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, 1161 complete, 1162 req->dst, req->cryptlen); 1163 if (ret) 1164 return ret; 1165 1166 ret = artpec6_crypto_prepare_crypto(req); 1167 if (ret) { 1168 artpec6_crypto_common_destroy(&req_ctx->common); 1169 return ret; 1170 } 1171 1172 return artpec6_crypto_submit(&req_ctx->common); 1173 } 1174 1175 static int 1176 artpec6_crypto_ctr_crypt(struct skcipher_request *req, bool encrypt) 1177 { 1178 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(req); 1179 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); 1180 size_t iv_len = crypto_skcipher_ivsize(cipher); 1181 unsigned int counter = be32_to_cpup((__be32 *) 1182 (req->iv + iv_len - 4)); 1183 unsigned int nblks = ALIGN(req->cryptlen, AES_BLOCK_SIZE) / 1184 AES_BLOCK_SIZE; 1185 1186 /* 1187 * The hardware uses only the last 32-bits as the counter while the 1188 * kernel tests (aes_ctr_enc_tv_template[4] for example) expect that 1189 * the whole IV is a counter. So fallback if the counter is going to 1190 * overlow. 1191 */ 1192 if (counter + nblks < counter) { 1193 int ret; 1194 1195 pr_debug("counter %x will overflow (nblks %u), falling back\n", 1196 counter, counter + nblks); 1197 1198 ret = crypto_sync_skcipher_setkey(ctx->fallback, ctx->aes_key, 1199 ctx->key_length); 1200 if (ret) 1201 return ret; 1202 1203 { 1204 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback); 1205 1206 skcipher_request_set_sync_tfm(subreq, ctx->fallback); 1207 skcipher_request_set_callback(subreq, req->base.flags, 1208 NULL, NULL); 1209 skcipher_request_set_crypt(subreq, req->src, req->dst, 1210 req->cryptlen, req->iv); 1211 ret = encrypt ? crypto_skcipher_encrypt(subreq) 1212 : crypto_skcipher_decrypt(subreq); 1213 skcipher_request_zero(subreq); 1214 } 1215 return ret; 1216 } 1217 1218 return encrypt ? artpec6_crypto_encrypt(req) 1219 : artpec6_crypto_decrypt(req); 1220 } 1221 1222 static int artpec6_crypto_ctr_encrypt(struct skcipher_request *req) 1223 { 1224 return artpec6_crypto_ctr_crypt(req, true); 1225 } 1226 1227 static int artpec6_crypto_ctr_decrypt(struct skcipher_request *req) 1228 { 1229 return artpec6_crypto_ctr_crypt(req, false); 1230 } 1231 1232 /* 1233 * AEAD functions 1234 */ 1235 static int artpec6_crypto_aead_init(struct crypto_aead *tfm) 1236 { 1237 struct artpec6_cryptotfm_context *tfm_ctx = crypto_aead_ctx(tfm); 1238 1239 memset(tfm_ctx, 0, sizeof(*tfm_ctx)); 1240 1241 crypto_aead_set_reqsize(tfm, 1242 sizeof(struct artpec6_crypto_aead_req_ctx)); 1243 1244 return 0; 1245 } 1246 1247 static int artpec6_crypto_aead_set_key(struct crypto_aead *tfm, const u8 *key, 1248 unsigned int len) 1249 { 1250 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(&tfm->base); 1251 1252 if (len != 16 && len != 24 && len != 32) { 1253 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 1254 return -1; 1255 } 1256 1257 ctx->key_length = len; 1258 1259 memcpy(ctx->aes_key, key, len); 1260 return 0; 1261 } 1262 1263 static int artpec6_crypto_aead_encrypt(struct aead_request *req) 1264 { 1265 int ret; 1266 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req); 1267 1268 req_ctx->decrypt = false; 1269 ret = artpec6_crypto_common_init(&req_ctx->common, &req->base, 1270 artpec6_crypto_complete_aead, 1271 NULL, 0); 1272 if (ret) 1273 return ret; 1274 1275 ret = artpec6_crypto_prepare_aead(req); 1276 if (ret) { 1277 artpec6_crypto_common_destroy(&req_ctx->common); 1278 return ret; 1279 } 1280 1281 return artpec6_crypto_submit(&req_ctx->common); 1282 } 1283 1284 static int artpec6_crypto_aead_decrypt(struct aead_request *req) 1285 { 1286 int ret; 1287 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(req); 1288 1289 req_ctx->decrypt = true; 1290 if (req->cryptlen < AES_BLOCK_SIZE) 1291 return -EINVAL; 1292 1293 ret = artpec6_crypto_common_init(&req_ctx->common, 1294 &req->base, 1295 artpec6_crypto_complete_aead, 1296 NULL, 0); 1297 if (ret) 1298 return ret; 1299 1300 ret = artpec6_crypto_prepare_aead(req); 1301 if (ret) { 1302 artpec6_crypto_common_destroy(&req_ctx->common); 1303 return ret; 1304 } 1305 1306 return artpec6_crypto_submit(&req_ctx->common); 1307 } 1308 1309 static int artpec6_crypto_prepare_hash(struct ahash_request *areq) 1310 { 1311 struct artpec6_hashalg_context *ctx = crypto_tfm_ctx(areq->base.tfm); 1312 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(areq); 1313 size_t digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq)); 1314 size_t contextsize = digestsize; 1315 size_t blocksize = crypto_tfm_alg_blocksize( 1316 crypto_ahash_tfm(crypto_ahash_reqtfm(areq))); 1317 struct artpec6_crypto_req_common *common = &req_ctx->common; 1318 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 1319 enum artpec6_crypto_variant variant = ac->variant; 1320 u32 sel_ctx; 1321 bool ext_ctx = false; 1322 bool run_hw = false; 1323 int error = 0; 1324 1325 artpec6_crypto_init_dma_operation(common); 1326 1327 /* Upload HMAC key, must be first the first packet */ 1328 if (req_ctx->hash_flags & HASH_FLAG_HMAC) { 1329 if (variant == ARTPEC6_CRYPTO) { 1330 req_ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, 1331 a6_regk_crypto_dlkey); 1332 } else { 1333 req_ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, 1334 a7_regk_crypto_dlkey); 1335 } 1336 1337 /* Copy and pad up the key */ 1338 memcpy(req_ctx->key_buffer, ctx->hmac_key, 1339 ctx->hmac_key_length); 1340 memset(req_ctx->key_buffer + ctx->hmac_key_length, 0, 1341 blocksize - ctx->hmac_key_length); 1342 1343 error = artpec6_crypto_setup_out_descr(common, 1344 (void *)&req_ctx->key_md, 1345 sizeof(req_ctx->key_md), false, false); 1346 if (error) 1347 return error; 1348 1349 error = artpec6_crypto_setup_out_descr(common, 1350 req_ctx->key_buffer, blocksize, 1351 true, false); 1352 if (error) 1353 return error; 1354 } 1355 1356 if (!(req_ctx->hash_flags & HASH_FLAG_INIT_CTX)) { 1357 /* Restore context */ 1358 sel_ctx = regk_crypto_ext; 1359 ext_ctx = true; 1360 } else { 1361 sel_ctx = regk_crypto_init; 1362 } 1363 1364 if (variant == ARTPEC6_CRYPTO) { 1365 req_ctx->hash_md &= ~A6_CRY_MD_HASH_SEL_CTX; 1366 req_ctx->hash_md |= FIELD_PREP(A6_CRY_MD_HASH_SEL_CTX, sel_ctx); 1367 1368 /* If this is the final round, set the final flag */ 1369 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) 1370 req_ctx->hash_md |= A6_CRY_MD_HASH_HMAC_FIN; 1371 } else { 1372 req_ctx->hash_md &= ~A7_CRY_MD_HASH_SEL_CTX; 1373 req_ctx->hash_md |= FIELD_PREP(A7_CRY_MD_HASH_SEL_CTX, sel_ctx); 1374 1375 /* If this is the final round, set the final flag */ 1376 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) 1377 req_ctx->hash_md |= A7_CRY_MD_HASH_HMAC_FIN; 1378 } 1379 1380 /* Setup up metadata descriptors */ 1381 error = artpec6_crypto_setup_out_descr(common, 1382 (void *)&req_ctx->hash_md, 1383 sizeof(req_ctx->hash_md), false, false); 1384 if (error) 1385 return error; 1386 1387 error = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); 1388 if (error) 1389 return error; 1390 1391 if (ext_ctx) { 1392 error = artpec6_crypto_setup_out_descr(common, 1393 req_ctx->digeststate, 1394 contextsize, false, false); 1395 1396 if (error) 1397 return error; 1398 } 1399 1400 if (req_ctx->hash_flags & HASH_FLAG_UPDATE) { 1401 size_t done_bytes = 0; 1402 size_t total_bytes = areq->nbytes + req_ctx->partial_bytes; 1403 size_t ready_bytes = round_down(total_bytes, blocksize); 1404 struct artpec6_crypto_walk walk; 1405 1406 run_hw = ready_bytes > 0; 1407 if (req_ctx->partial_bytes && ready_bytes) { 1408 /* We have a partial buffer and will at least some bytes 1409 * to the HW. Empty this partial buffer before tackling 1410 * the SG lists 1411 */ 1412 memcpy(req_ctx->partial_buffer_out, 1413 req_ctx->partial_buffer, 1414 req_ctx->partial_bytes); 1415 1416 error = artpec6_crypto_setup_out_descr(common, 1417 req_ctx->partial_buffer_out, 1418 req_ctx->partial_bytes, 1419 false, true); 1420 if (error) 1421 return error; 1422 1423 /* Reset partial buffer */ 1424 done_bytes += req_ctx->partial_bytes; 1425 req_ctx->partial_bytes = 0; 1426 } 1427 1428 artpec6_crypto_walk_init(&walk, areq->src); 1429 1430 error = artpec6_crypto_setup_sg_descrs_out(common, &walk, 1431 ready_bytes - 1432 done_bytes); 1433 if (error) 1434 return error; 1435 1436 if (walk.sg) { 1437 size_t sg_skip = ready_bytes - done_bytes; 1438 size_t sg_rem = areq->nbytes - sg_skip; 1439 1440 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src), 1441 req_ctx->partial_buffer + 1442 req_ctx->partial_bytes, 1443 sg_rem, sg_skip); 1444 1445 req_ctx->partial_bytes += sg_rem; 1446 } 1447 1448 req_ctx->digcnt += ready_bytes; 1449 req_ctx->hash_flags &= ~(HASH_FLAG_UPDATE); 1450 } 1451 1452 /* Finalize */ 1453 if (req_ctx->hash_flags & HASH_FLAG_FINALIZE) { 1454 size_t hash_pad_len; 1455 u64 digest_bits; 1456 u32 oper; 1457 1458 if (variant == ARTPEC6_CRYPTO) 1459 oper = FIELD_GET(A6_CRY_MD_OPER, req_ctx->hash_md); 1460 else 1461 oper = FIELD_GET(A7_CRY_MD_OPER, req_ctx->hash_md); 1462 1463 /* Write out the partial buffer if present */ 1464 if (req_ctx->partial_bytes) { 1465 memcpy(req_ctx->partial_buffer_out, 1466 req_ctx->partial_buffer, 1467 req_ctx->partial_bytes); 1468 error = artpec6_crypto_setup_out_descr(common, 1469 req_ctx->partial_buffer_out, 1470 req_ctx->partial_bytes, 1471 false, true); 1472 if (error) 1473 return error; 1474 1475 req_ctx->digcnt += req_ctx->partial_bytes; 1476 req_ctx->partial_bytes = 0; 1477 } 1478 1479 if (req_ctx->hash_flags & HASH_FLAG_HMAC) 1480 digest_bits = 8 * (req_ctx->digcnt + blocksize); 1481 else 1482 digest_bits = 8 * req_ctx->digcnt; 1483 1484 /* Add the hash pad */ 1485 hash_pad_len = create_hash_pad(oper, req_ctx->pad_buffer, 1486 req_ctx->digcnt, digest_bits); 1487 error = artpec6_crypto_setup_out_descr(common, 1488 req_ctx->pad_buffer, 1489 hash_pad_len, false, 1490 true); 1491 req_ctx->digcnt = 0; 1492 1493 if (error) 1494 return error; 1495 1496 /* Descriptor for the final result */ 1497 error = artpec6_crypto_setup_in_descr(common, areq->result, 1498 digestsize, 1499 true); 1500 if (error) 1501 return error; 1502 1503 } else { /* This is not the final operation for this request */ 1504 if (!run_hw) 1505 return ARTPEC6_CRYPTO_PREPARE_HASH_NO_START; 1506 1507 /* Save the result to the context */ 1508 error = artpec6_crypto_setup_in_descr(common, 1509 req_ctx->digeststate, 1510 contextsize, false); 1511 if (error) 1512 return error; 1513 /* fall through */ 1514 } 1515 1516 req_ctx->hash_flags &= ~(HASH_FLAG_INIT_CTX | HASH_FLAG_UPDATE | 1517 HASH_FLAG_FINALIZE); 1518 1519 error = artpec6_crypto_terminate_in_descrs(common); 1520 if (error) 1521 return error; 1522 1523 error = artpec6_crypto_terminate_out_descrs(common); 1524 if (error) 1525 return error; 1526 1527 error = artpec6_crypto_dma_map_descs(common); 1528 if (error) 1529 return error; 1530 1531 return ARTPEC6_CRYPTO_PREPARE_HASH_START; 1532 } 1533 1534 1535 static int artpec6_crypto_aes_ecb_init(struct crypto_skcipher *tfm) 1536 { 1537 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); 1538 1539 tfm->reqsize = sizeof(struct artpec6_crypto_request_context); 1540 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_ECB; 1541 1542 return 0; 1543 } 1544 1545 static int artpec6_crypto_aes_ctr_init(struct crypto_skcipher *tfm) 1546 { 1547 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); 1548 1549 ctx->fallback = 1550 crypto_alloc_sync_skcipher(crypto_tfm_alg_name(&tfm->base), 1551 0, CRYPTO_ALG_NEED_FALLBACK); 1552 if (IS_ERR(ctx->fallback)) 1553 return PTR_ERR(ctx->fallback); 1554 1555 tfm->reqsize = sizeof(struct artpec6_crypto_request_context); 1556 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CTR; 1557 1558 return 0; 1559 } 1560 1561 static int artpec6_crypto_aes_cbc_init(struct crypto_skcipher *tfm) 1562 { 1563 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); 1564 1565 tfm->reqsize = sizeof(struct artpec6_crypto_request_context); 1566 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_CBC; 1567 1568 return 0; 1569 } 1570 1571 static int artpec6_crypto_aes_xts_init(struct crypto_skcipher *tfm) 1572 { 1573 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); 1574 1575 tfm->reqsize = sizeof(struct artpec6_crypto_request_context); 1576 ctx->crypto_type = ARTPEC6_CRYPTO_CIPHER_AES_XTS; 1577 1578 return 0; 1579 } 1580 1581 static void artpec6_crypto_aes_exit(struct crypto_skcipher *tfm) 1582 { 1583 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); 1584 1585 memset(ctx, 0, sizeof(*ctx)); 1586 } 1587 1588 static void artpec6_crypto_aes_ctr_exit(struct crypto_skcipher *tfm) 1589 { 1590 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(tfm); 1591 1592 crypto_free_sync_skcipher(ctx->fallback); 1593 artpec6_crypto_aes_exit(tfm); 1594 } 1595 1596 static int 1597 artpec6_crypto_cipher_set_key(struct crypto_skcipher *cipher, const u8 *key, 1598 unsigned int keylen) 1599 { 1600 struct artpec6_cryptotfm_context *ctx = 1601 crypto_skcipher_ctx(cipher); 1602 1603 switch (keylen) { 1604 case 16: 1605 case 24: 1606 case 32: 1607 break; 1608 default: 1609 crypto_skcipher_set_flags(cipher, 1610 CRYPTO_TFM_RES_BAD_KEY_LEN); 1611 return -EINVAL; 1612 } 1613 1614 memcpy(ctx->aes_key, key, keylen); 1615 ctx->key_length = keylen; 1616 return 0; 1617 } 1618 1619 static int 1620 artpec6_crypto_xts_set_key(struct crypto_skcipher *cipher, const u8 *key, 1621 unsigned int keylen) 1622 { 1623 struct artpec6_cryptotfm_context *ctx = 1624 crypto_skcipher_ctx(cipher); 1625 int ret; 1626 1627 ret = xts_check_key(&cipher->base, key, keylen); 1628 if (ret) 1629 return ret; 1630 1631 switch (keylen) { 1632 case 32: 1633 case 48: 1634 case 64: 1635 break; 1636 default: 1637 crypto_skcipher_set_flags(cipher, 1638 CRYPTO_TFM_RES_BAD_KEY_LEN); 1639 return -EINVAL; 1640 } 1641 1642 memcpy(ctx->aes_key, key, keylen); 1643 ctx->key_length = keylen; 1644 return 0; 1645 } 1646 1647 /** artpec6_crypto_process_crypto - Prepare an async block cipher crypto request 1648 * 1649 * @req: The asynch request to process 1650 * 1651 * @return 0 if the dma job was successfully prepared 1652 * <0 on error 1653 * 1654 * This function sets up the PDMA descriptors for a block cipher request. 1655 * 1656 * The required padding is added for AES-CTR using a statically defined 1657 * buffer. 1658 * 1659 * The PDMA descriptor list will be as follows: 1660 * 1661 * OUT: [KEY_MD][KEY][EOP]<CIPHER_MD>[IV]<data_0>...[data_n][AES-CTR_pad]<eop> 1662 * IN: <CIPHER_MD><data_0>...[data_n]<intr> 1663 * 1664 */ 1665 static int artpec6_crypto_prepare_crypto(struct skcipher_request *areq) 1666 { 1667 int ret; 1668 struct artpec6_crypto_walk walk; 1669 struct crypto_skcipher *cipher = crypto_skcipher_reqtfm(areq); 1670 struct artpec6_cryptotfm_context *ctx = crypto_skcipher_ctx(cipher); 1671 struct artpec6_crypto_request_context *req_ctx = NULL; 1672 size_t iv_len = crypto_skcipher_ivsize(cipher); 1673 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 1674 enum artpec6_crypto_variant variant = ac->variant; 1675 struct artpec6_crypto_req_common *common; 1676 bool cipher_decr = false; 1677 size_t cipher_klen; 1678 u32 cipher_len = 0; /* Same as regk_crypto_key_128 for NULL crypto */ 1679 u32 oper; 1680 1681 req_ctx = skcipher_request_ctx(areq); 1682 common = &req_ctx->common; 1683 1684 artpec6_crypto_init_dma_operation(common); 1685 1686 if (variant == ARTPEC6_CRYPTO) 1687 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, a6_regk_crypto_dlkey); 1688 else 1689 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, a7_regk_crypto_dlkey); 1690 1691 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md, 1692 sizeof(ctx->key_md), false, false); 1693 if (ret) 1694 return ret; 1695 1696 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key, 1697 ctx->key_length, true, false); 1698 if (ret) 1699 return ret; 1700 1701 req_ctx->cipher_md = 0; 1702 1703 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) 1704 cipher_klen = ctx->key_length/2; 1705 else 1706 cipher_klen = ctx->key_length; 1707 1708 /* Metadata */ 1709 switch (cipher_klen) { 1710 case 16: 1711 cipher_len = regk_crypto_key_128; 1712 break; 1713 case 24: 1714 cipher_len = regk_crypto_key_192; 1715 break; 1716 case 32: 1717 cipher_len = regk_crypto_key_256; 1718 break; 1719 default: 1720 pr_err("%s: Invalid key length %d!\n", 1721 MODULE_NAME, ctx->key_length); 1722 return -EINVAL; 1723 } 1724 1725 switch (ctx->crypto_type) { 1726 case ARTPEC6_CRYPTO_CIPHER_AES_ECB: 1727 oper = regk_crypto_aes_ecb; 1728 cipher_decr = req_ctx->decrypt; 1729 break; 1730 1731 case ARTPEC6_CRYPTO_CIPHER_AES_CBC: 1732 oper = regk_crypto_aes_cbc; 1733 cipher_decr = req_ctx->decrypt; 1734 break; 1735 1736 case ARTPEC6_CRYPTO_CIPHER_AES_CTR: 1737 oper = regk_crypto_aes_ctr; 1738 cipher_decr = false; 1739 break; 1740 1741 case ARTPEC6_CRYPTO_CIPHER_AES_XTS: 1742 oper = regk_crypto_aes_xts; 1743 cipher_decr = req_ctx->decrypt; 1744 1745 if (variant == ARTPEC6_CRYPTO) 1746 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DSEQ; 1747 else 1748 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DSEQ; 1749 break; 1750 1751 default: 1752 pr_err("%s: Invalid cipher mode %d!\n", 1753 MODULE_NAME, ctx->crypto_type); 1754 return -EINVAL; 1755 } 1756 1757 if (variant == ARTPEC6_CRYPTO) { 1758 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, oper); 1759 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN, 1760 cipher_len); 1761 if (cipher_decr) 1762 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR; 1763 } else { 1764 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, oper); 1765 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN, 1766 cipher_len); 1767 if (cipher_decr) 1768 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR; 1769 } 1770 1771 ret = artpec6_crypto_setup_out_descr(common, 1772 &req_ctx->cipher_md, 1773 sizeof(req_ctx->cipher_md), 1774 false, false); 1775 if (ret) 1776 return ret; 1777 1778 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); 1779 if (ret) 1780 return ret; 1781 1782 if (iv_len) { 1783 ret = artpec6_crypto_setup_out_descr(common, areq->iv, iv_len, 1784 false, false); 1785 if (ret) 1786 return ret; 1787 } 1788 /* Data out */ 1789 artpec6_crypto_walk_init(&walk, areq->src); 1790 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, areq->cryptlen); 1791 if (ret) 1792 return ret; 1793 1794 /* Data in */ 1795 artpec6_crypto_walk_init(&walk, areq->dst); 1796 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, areq->cryptlen); 1797 if (ret) 1798 return ret; 1799 1800 /* CTR-mode padding required by the HW. */ 1801 if (ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_CTR || 1802 ctx->crypto_type == ARTPEC6_CRYPTO_CIPHER_AES_XTS) { 1803 size_t pad = ALIGN(areq->cryptlen, AES_BLOCK_SIZE) - 1804 areq->cryptlen; 1805 1806 if (pad) { 1807 ret = artpec6_crypto_setup_out_descr(common, 1808 ac->pad_buffer, 1809 pad, false, false); 1810 if (ret) 1811 return ret; 1812 1813 ret = artpec6_crypto_setup_in_descr(common, 1814 ac->pad_buffer, pad, 1815 false); 1816 if (ret) 1817 return ret; 1818 } 1819 } 1820 1821 ret = artpec6_crypto_terminate_out_descrs(common); 1822 if (ret) 1823 return ret; 1824 1825 ret = artpec6_crypto_terminate_in_descrs(common); 1826 if (ret) 1827 return ret; 1828 1829 return artpec6_crypto_dma_map_descs(common); 1830 } 1831 1832 static int artpec6_crypto_prepare_aead(struct aead_request *areq) 1833 { 1834 size_t count; 1835 int ret; 1836 size_t input_length; 1837 struct artpec6_cryptotfm_context *ctx = crypto_tfm_ctx(areq->base.tfm); 1838 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); 1839 struct crypto_aead *cipher = crypto_aead_reqtfm(areq); 1840 struct artpec6_crypto_req_common *common = &req_ctx->common; 1841 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 1842 enum artpec6_crypto_variant variant = ac->variant; 1843 u32 md_cipher_len; 1844 1845 artpec6_crypto_init_dma_operation(common); 1846 1847 /* Key */ 1848 if (variant == ARTPEC6_CRYPTO) { 1849 ctx->key_md = FIELD_PREP(A6_CRY_MD_OPER, 1850 a6_regk_crypto_dlkey); 1851 } else { 1852 ctx->key_md = FIELD_PREP(A7_CRY_MD_OPER, 1853 a7_regk_crypto_dlkey); 1854 } 1855 ret = artpec6_crypto_setup_out_descr(common, (void *)&ctx->key_md, 1856 sizeof(ctx->key_md), false, false); 1857 if (ret) 1858 return ret; 1859 1860 ret = artpec6_crypto_setup_out_descr(common, ctx->aes_key, 1861 ctx->key_length, true, false); 1862 if (ret) 1863 return ret; 1864 1865 req_ctx->cipher_md = 0; 1866 1867 switch (ctx->key_length) { 1868 case 16: 1869 md_cipher_len = regk_crypto_key_128; 1870 break; 1871 case 24: 1872 md_cipher_len = regk_crypto_key_192; 1873 break; 1874 case 32: 1875 md_cipher_len = regk_crypto_key_256; 1876 break; 1877 default: 1878 return -EINVAL; 1879 } 1880 1881 if (variant == ARTPEC6_CRYPTO) { 1882 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_OPER, 1883 regk_crypto_aes_gcm); 1884 req_ctx->cipher_md |= FIELD_PREP(A6_CRY_MD_CIPHER_LEN, 1885 md_cipher_len); 1886 if (req_ctx->decrypt) 1887 req_ctx->cipher_md |= A6_CRY_MD_CIPHER_DECR; 1888 } else { 1889 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_OPER, 1890 regk_crypto_aes_gcm); 1891 req_ctx->cipher_md |= FIELD_PREP(A7_CRY_MD_CIPHER_LEN, 1892 md_cipher_len); 1893 if (req_ctx->decrypt) 1894 req_ctx->cipher_md |= A7_CRY_MD_CIPHER_DECR; 1895 } 1896 1897 ret = artpec6_crypto_setup_out_descr(common, 1898 (void *) &req_ctx->cipher_md, 1899 sizeof(req_ctx->cipher_md), false, 1900 false); 1901 if (ret) 1902 return ret; 1903 1904 ret = artpec6_crypto_setup_in_descr(common, ac->pad_buffer, 4, false); 1905 if (ret) 1906 return ret; 1907 1908 /* For the decryption, cryptlen includes the tag. */ 1909 input_length = areq->cryptlen; 1910 if (req_ctx->decrypt) 1911 input_length -= crypto_aead_authsize(cipher); 1912 1913 /* Prepare the context buffer */ 1914 req_ctx->hw_ctx.aad_length_bits = 1915 __cpu_to_be64(8*areq->assoclen); 1916 1917 req_ctx->hw_ctx.text_length_bits = 1918 __cpu_to_be64(8*input_length); 1919 1920 memcpy(req_ctx->hw_ctx.J0, areq->iv, crypto_aead_ivsize(cipher)); 1921 // The HW omits the initial increment of the counter field. 1922 memcpy(req_ctx->hw_ctx.J0 + GCM_AES_IV_SIZE, "\x00\x00\x00\x01", 4); 1923 1924 ret = artpec6_crypto_setup_out_descr(common, &req_ctx->hw_ctx, 1925 sizeof(struct artpec6_crypto_aead_hw_ctx), false, false); 1926 if (ret) 1927 return ret; 1928 1929 { 1930 struct artpec6_crypto_walk walk; 1931 1932 artpec6_crypto_walk_init(&walk, areq->src); 1933 1934 /* Associated data */ 1935 count = areq->assoclen; 1936 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); 1937 if (ret) 1938 return ret; 1939 1940 if (!IS_ALIGNED(areq->assoclen, 16)) { 1941 size_t assoc_pad = 16 - (areq->assoclen % 16); 1942 /* The HW mandates zero padding here */ 1943 ret = artpec6_crypto_setup_out_descr(common, 1944 ac->zero_buffer, 1945 assoc_pad, false, 1946 false); 1947 if (ret) 1948 return ret; 1949 } 1950 1951 /* Data to crypto */ 1952 count = input_length; 1953 ret = artpec6_crypto_setup_sg_descrs_out(common, &walk, count); 1954 if (ret) 1955 return ret; 1956 1957 if (!IS_ALIGNED(input_length, 16)) { 1958 size_t crypto_pad = 16 - (input_length % 16); 1959 /* The HW mandates zero padding here */ 1960 ret = artpec6_crypto_setup_out_descr(common, 1961 ac->zero_buffer, 1962 crypto_pad, 1963 false, 1964 false); 1965 if (ret) 1966 return ret; 1967 } 1968 } 1969 1970 /* Data from crypto */ 1971 { 1972 struct artpec6_crypto_walk walk; 1973 size_t output_len = areq->cryptlen; 1974 1975 if (req_ctx->decrypt) 1976 output_len -= crypto_aead_authsize(cipher); 1977 1978 artpec6_crypto_walk_init(&walk, areq->dst); 1979 1980 /* skip associated data in the output */ 1981 count = artpec6_crypto_walk_advance(&walk, areq->assoclen); 1982 if (count) 1983 return -EINVAL; 1984 1985 count = output_len; 1986 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, count); 1987 if (ret) 1988 return ret; 1989 1990 /* Put padding between the cryptotext and the auth tag */ 1991 if (!IS_ALIGNED(output_len, 16)) { 1992 size_t crypto_pad = 16 - (output_len % 16); 1993 1994 ret = artpec6_crypto_setup_in_descr(common, 1995 ac->pad_buffer, 1996 crypto_pad, false); 1997 if (ret) 1998 return ret; 1999 } 2000 2001 /* The authentication tag shall follow immediately after 2002 * the output ciphertext. For decryption it is put in a context 2003 * buffer for later compare against the input tag. 2004 */ 2005 2006 if (req_ctx->decrypt) { 2007 ret = artpec6_crypto_setup_in_descr(common, 2008 req_ctx->decryption_tag, AES_BLOCK_SIZE, false); 2009 if (ret) 2010 return ret; 2011 2012 } else { 2013 /* For encryption the requested tag size may be smaller 2014 * than the hardware's generated tag. 2015 */ 2016 size_t authsize = crypto_aead_authsize(cipher); 2017 2018 ret = artpec6_crypto_setup_sg_descrs_in(common, &walk, 2019 authsize); 2020 if (ret) 2021 return ret; 2022 2023 if (authsize < AES_BLOCK_SIZE) { 2024 count = AES_BLOCK_SIZE - authsize; 2025 ret = artpec6_crypto_setup_in_descr(common, 2026 ac->pad_buffer, 2027 count, false); 2028 if (ret) 2029 return ret; 2030 } 2031 } 2032 2033 } 2034 2035 ret = artpec6_crypto_terminate_in_descrs(common); 2036 if (ret) 2037 return ret; 2038 2039 ret = artpec6_crypto_terminate_out_descrs(common); 2040 if (ret) 2041 return ret; 2042 2043 return artpec6_crypto_dma_map_descs(common); 2044 } 2045 2046 static void artpec6_crypto_process_queue(struct artpec6_crypto *ac, 2047 struct list_head *completions) 2048 { 2049 struct artpec6_crypto_req_common *req; 2050 2051 while (!list_empty(&ac->queue) && !artpec6_crypto_busy()) { 2052 req = list_first_entry(&ac->queue, 2053 struct artpec6_crypto_req_common, 2054 list); 2055 list_move_tail(&req->list, &ac->pending); 2056 artpec6_crypto_start_dma(req); 2057 2058 list_add_tail(&req->complete_in_progress, completions); 2059 } 2060 2061 /* 2062 * In some cases, the hardware can raise an in_eop_flush interrupt 2063 * before actually updating the status, so we have an timer which will 2064 * recheck the status on timeout. Since the cases are expected to be 2065 * very rare, we use a relatively large timeout value. There should be 2066 * no noticeable negative effect if we timeout spuriously. 2067 */ 2068 if (ac->pending_count) 2069 mod_timer(&ac->timer, jiffies + msecs_to_jiffies(100)); 2070 else 2071 del_timer(&ac->timer); 2072 } 2073 2074 static void artpec6_crypto_timeout(struct timer_list *t) 2075 { 2076 struct artpec6_crypto *ac = from_timer(ac, t, timer); 2077 2078 dev_info_ratelimited(artpec6_crypto_dev, "timeout\n"); 2079 2080 tasklet_schedule(&ac->task); 2081 } 2082 2083 static void artpec6_crypto_task(unsigned long data) 2084 { 2085 struct artpec6_crypto *ac = (struct artpec6_crypto *)data; 2086 struct artpec6_crypto_req_common *req; 2087 struct artpec6_crypto_req_common *n; 2088 struct list_head complete_done; 2089 struct list_head complete_in_progress; 2090 2091 INIT_LIST_HEAD(&complete_done); 2092 INIT_LIST_HEAD(&complete_in_progress); 2093 2094 if (list_empty(&ac->pending)) { 2095 pr_debug("Spurious IRQ\n"); 2096 return; 2097 } 2098 2099 spin_lock_bh(&ac->queue_lock); 2100 2101 list_for_each_entry_safe(req, n, &ac->pending, list) { 2102 struct artpec6_crypto_dma_descriptors *dma = req->dma; 2103 u32 stat; 2104 dma_addr_t stataddr; 2105 2106 stataddr = dma->stat_dma_addr + 4 * (req->dma->in_cnt - 1); 2107 dma_sync_single_for_cpu(artpec6_crypto_dev, 2108 stataddr, 2109 4, 2110 DMA_BIDIRECTIONAL); 2111 2112 stat = req->dma->stat[req->dma->in_cnt-1]; 2113 2114 /* A non-zero final status descriptor indicates 2115 * this job has finished. 2116 */ 2117 pr_debug("Request %p status is %X\n", req, stat); 2118 if (!stat) 2119 break; 2120 2121 /* Allow testing of timeout handling with fault injection */ 2122 #ifdef CONFIG_FAULT_INJECTION 2123 if (should_fail(&artpec6_crypto_fail_status_read, 1)) 2124 continue; 2125 #endif 2126 2127 pr_debug("Completing request %p\n", req); 2128 2129 list_move_tail(&req->list, &complete_done); 2130 2131 ac->pending_count--; 2132 } 2133 2134 artpec6_crypto_process_queue(ac, &complete_in_progress); 2135 2136 spin_unlock_bh(&ac->queue_lock); 2137 2138 /* Perform the completion callbacks without holding the queue lock 2139 * to allow new request submissions from the callbacks. 2140 */ 2141 list_for_each_entry_safe(req, n, &complete_done, list) { 2142 artpec6_crypto_dma_unmap_all(req); 2143 artpec6_crypto_copy_bounce_buffers(req); 2144 artpec6_crypto_common_destroy(req); 2145 2146 req->complete(req->req); 2147 } 2148 2149 list_for_each_entry_safe(req, n, &complete_in_progress, 2150 complete_in_progress) { 2151 req->req->complete(req->req, -EINPROGRESS); 2152 } 2153 } 2154 2155 static void artpec6_crypto_complete_crypto(struct crypto_async_request *req) 2156 { 2157 req->complete(req, 0); 2158 } 2159 2160 static void 2161 artpec6_crypto_complete_cbc_decrypt(struct crypto_async_request *req) 2162 { 2163 struct skcipher_request *cipher_req = container_of(req, 2164 struct skcipher_request, base); 2165 2166 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->src, 2167 cipher_req->cryptlen - AES_BLOCK_SIZE, 2168 AES_BLOCK_SIZE, 0); 2169 req->complete(req, 0); 2170 } 2171 2172 static void 2173 artpec6_crypto_complete_cbc_encrypt(struct crypto_async_request *req) 2174 { 2175 struct skcipher_request *cipher_req = container_of(req, 2176 struct skcipher_request, base); 2177 2178 scatterwalk_map_and_copy(cipher_req->iv, cipher_req->dst, 2179 cipher_req->cryptlen - AES_BLOCK_SIZE, 2180 AES_BLOCK_SIZE, 0); 2181 req->complete(req, 0); 2182 } 2183 2184 static void artpec6_crypto_complete_aead(struct crypto_async_request *req) 2185 { 2186 int result = 0; 2187 2188 /* Verify GCM hashtag. */ 2189 struct aead_request *areq = container_of(req, 2190 struct aead_request, base); 2191 struct crypto_aead *aead = crypto_aead_reqtfm(areq); 2192 struct artpec6_crypto_aead_req_ctx *req_ctx = aead_request_ctx(areq); 2193 2194 if (req_ctx->decrypt) { 2195 u8 input_tag[AES_BLOCK_SIZE]; 2196 unsigned int authsize = crypto_aead_authsize(aead); 2197 2198 sg_pcopy_to_buffer(areq->src, 2199 sg_nents(areq->src), 2200 input_tag, 2201 authsize, 2202 areq->assoclen + areq->cryptlen - 2203 authsize); 2204 2205 if (crypto_memneq(req_ctx->decryption_tag, 2206 input_tag, 2207 authsize)) { 2208 pr_debug("***EBADMSG:\n"); 2209 print_hex_dump_debug("ref:", DUMP_PREFIX_ADDRESS, 32, 1, 2210 input_tag, authsize, true); 2211 print_hex_dump_debug("out:", DUMP_PREFIX_ADDRESS, 32, 1, 2212 req_ctx->decryption_tag, 2213 authsize, true); 2214 2215 result = -EBADMSG; 2216 } 2217 } 2218 2219 req->complete(req, result); 2220 } 2221 2222 static void artpec6_crypto_complete_hash(struct crypto_async_request *req) 2223 { 2224 req->complete(req, 0); 2225 } 2226 2227 2228 /*------------------- Hash functions -----------------------------------------*/ 2229 static int 2230 artpec6_crypto_hash_set_key(struct crypto_ahash *tfm, 2231 const u8 *key, unsigned int keylen) 2232 { 2233 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(&tfm->base); 2234 size_t blocksize; 2235 int ret; 2236 2237 if (!keylen) { 2238 pr_err("Invalid length (%d) of HMAC key\n", 2239 keylen); 2240 return -EINVAL; 2241 } 2242 2243 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key)); 2244 2245 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); 2246 2247 if (keylen > blocksize) { 2248 SHASH_DESC_ON_STACK(hdesc, tfm_ctx->child_hash); 2249 2250 hdesc->tfm = tfm_ctx->child_hash; 2251 2252 tfm_ctx->hmac_key_length = blocksize; 2253 ret = crypto_shash_digest(hdesc, key, keylen, 2254 tfm_ctx->hmac_key); 2255 if (ret) 2256 return ret; 2257 2258 } else { 2259 memcpy(tfm_ctx->hmac_key, key, keylen); 2260 tfm_ctx->hmac_key_length = keylen; 2261 } 2262 2263 return 0; 2264 } 2265 2266 static int 2267 artpec6_crypto_init_hash(struct ahash_request *req, u8 type, int hmac) 2268 { 2269 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 2270 enum artpec6_crypto_variant variant = ac->variant; 2271 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); 2272 u32 oper; 2273 2274 memset(req_ctx, 0, sizeof(*req_ctx)); 2275 2276 req_ctx->hash_flags = HASH_FLAG_INIT_CTX; 2277 if (hmac) 2278 req_ctx->hash_flags |= (HASH_FLAG_HMAC | HASH_FLAG_UPDATE_KEY); 2279 2280 switch (type) { 2281 case ARTPEC6_CRYPTO_HASH_SHA1: 2282 oper = hmac ? regk_crypto_hmac_sha1 : regk_crypto_sha1; 2283 break; 2284 case ARTPEC6_CRYPTO_HASH_SHA256: 2285 oper = hmac ? regk_crypto_hmac_sha256 : regk_crypto_sha256; 2286 break; 2287 default: 2288 pr_err("%s: Unsupported hash type 0x%x\n", MODULE_NAME, type); 2289 return -EINVAL; 2290 } 2291 2292 if (variant == ARTPEC6_CRYPTO) 2293 req_ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, oper); 2294 else 2295 req_ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, oper); 2296 2297 return 0; 2298 } 2299 2300 static int artpec6_crypto_prepare_submit_hash(struct ahash_request *req) 2301 { 2302 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); 2303 int ret; 2304 2305 if (!req_ctx->common.dma) { 2306 ret = artpec6_crypto_common_init(&req_ctx->common, 2307 &req->base, 2308 artpec6_crypto_complete_hash, 2309 NULL, 0); 2310 2311 if (ret) 2312 return ret; 2313 } 2314 2315 ret = artpec6_crypto_prepare_hash(req); 2316 switch (ret) { 2317 case ARTPEC6_CRYPTO_PREPARE_HASH_START: 2318 ret = artpec6_crypto_submit(&req_ctx->common); 2319 break; 2320 2321 case ARTPEC6_CRYPTO_PREPARE_HASH_NO_START: 2322 ret = 0; 2323 /* Fallthrough */ 2324 2325 default: 2326 artpec6_crypto_common_destroy(&req_ctx->common); 2327 break; 2328 } 2329 2330 return ret; 2331 } 2332 2333 static int artpec6_crypto_hash_final(struct ahash_request *req) 2334 { 2335 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); 2336 2337 req_ctx->hash_flags |= HASH_FLAG_FINALIZE; 2338 2339 return artpec6_crypto_prepare_submit_hash(req); 2340 } 2341 2342 static int artpec6_crypto_hash_update(struct ahash_request *req) 2343 { 2344 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); 2345 2346 req_ctx->hash_flags |= HASH_FLAG_UPDATE; 2347 2348 return artpec6_crypto_prepare_submit_hash(req); 2349 } 2350 2351 static int artpec6_crypto_sha1_init(struct ahash_request *req) 2352 { 2353 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0); 2354 } 2355 2356 static int artpec6_crypto_sha1_digest(struct ahash_request *req) 2357 { 2358 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); 2359 2360 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA1, 0); 2361 2362 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; 2363 2364 return artpec6_crypto_prepare_submit_hash(req); 2365 } 2366 2367 static int artpec6_crypto_sha256_init(struct ahash_request *req) 2368 { 2369 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0); 2370 } 2371 2372 static int artpec6_crypto_sha256_digest(struct ahash_request *req) 2373 { 2374 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); 2375 2376 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 0); 2377 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; 2378 2379 return artpec6_crypto_prepare_submit_hash(req); 2380 } 2381 2382 static int artpec6_crypto_hmac_sha256_init(struct ahash_request *req) 2383 { 2384 return artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); 2385 } 2386 2387 static int artpec6_crypto_hmac_sha256_digest(struct ahash_request *req) 2388 { 2389 struct artpec6_hash_request_context *req_ctx = ahash_request_ctx(req); 2390 2391 artpec6_crypto_init_hash(req, ARTPEC6_CRYPTO_HASH_SHA256, 1); 2392 req_ctx->hash_flags |= HASH_FLAG_UPDATE | HASH_FLAG_FINALIZE; 2393 2394 return artpec6_crypto_prepare_submit_hash(req); 2395 } 2396 2397 static int artpec6_crypto_ahash_init_common(struct crypto_tfm *tfm, 2398 const char *base_hash_name) 2399 { 2400 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); 2401 2402 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), 2403 sizeof(struct artpec6_hash_request_context)); 2404 memset(tfm_ctx, 0, sizeof(*tfm_ctx)); 2405 2406 if (base_hash_name) { 2407 struct crypto_shash *child; 2408 2409 child = crypto_alloc_shash(base_hash_name, 0, 2410 CRYPTO_ALG_NEED_FALLBACK); 2411 2412 if (IS_ERR(child)) 2413 return PTR_ERR(child); 2414 2415 tfm_ctx->child_hash = child; 2416 } 2417 2418 return 0; 2419 } 2420 2421 static int artpec6_crypto_ahash_init(struct crypto_tfm *tfm) 2422 { 2423 return artpec6_crypto_ahash_init_common(tfm, NULL); 2424 } 2425 2426 static int artpec6_crypto_ahash_init_hmac_sha256(struct crypto_tfm *tfm) 2427 { 2428 return artpec6_crypto_ahash_init_common(tfm, "sha256"); 2429 } 2430 2431 static void artpec6_crypto_ahash_exit(struct crypto_tfm *tfm) 2432 { 2433 struct artpec6_hashalg_context *tfm_ctx = crypto_tfm_ctx(tfm); 2434 2435 if (tfm_ctx->child_hash) 2436 crypto_free_shash(tfm_ctx->child_hash); 2437 2438 memset(tfm_ctx->hmac_key, 0, sizeof(tfm_ctx->hmac_key)); 2439 tfm_ctx->hmac_key_length = 0; 2440 } 2441 2442 static int artpec6_crypto_hash_export(struct ahash_request *req, void *out) 2443 { 2444 const struct artpec6_hash_request_context *ctx = ahash_request_ctx(req); 2445 struct artpec6_hash_export_state *state = out; 2446 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 2447 enum artpec6_crypto_variant variant = ac->variant; 2448 2449 BUILD_BUG_ON(sizeof(state->partial_buffer) != 2450 sizeof(ctx->partial_buffer)); 2451 BUILD_BUG_ON(sizeof(state->digeststate) != sizeof(ctx->digeststate)); 2452 2453 state->digcnt = ctx->digcnt; 2454 state->partial_bytes = ctx->partial_bytes; 2455 state->hash_flags = ctx->hash_flags; 2456 2457 if (variant == ARTPEC6_CRYPTO) 2458 state->oper = FIELD_GET(A6_CRY_MD_OPER, ctx->hash_md); 2459 else 2460 state->oper = FIELD_GET(A7_CRY_MD_OPER, ctx->hash_md); 2461 2462 memcpy(state->partial_buffer, ctx->partial_buffer, 2463 sizeof(state->partial_buffer)); 2464 memcpy(state->digeststate, ctx->digeststate, 2465 sizeof(state->digeststate)); 2466 2467 return 0; 2468 } 2469 2470 static int artpec6_crypto_hash_import(struct ahash_request *req, const void *in) 2471 { 2472 struct artpec6_hash_request_context *ctx = ahash_request_ctx(req); 2473 const struct artpec6_hash_export_state *state = in; 2474 struct artpec6_crypto *ac = dev_get_drvdata(artpec6_crypto_dev); 2475 enum artpec6_crypto_variant variant = ac->variant; 2476 2477 memset(ctx, 0, sizeof(*ctx)); 2478 2479 ctx->digcnt = state->digcnt; 2480 ctx->partial_bytes = state->partial_bytes; 2481 ctx->hash_flags = state->hash_flags; 2482 2483 if (variant == ARTPEC6_CRYPTO) 2484 ctx->hash_md = FIELD_PREP(A6_CRY_MD_OPER, state->oper); 2485 else 2486 ctx->hash_md = FIELD_PREP(A7_CRY_MD_OPER, state->oper); 2487 2488 memcpy(ctx->partial_buffer, state->partial_buffer, 2489 sizeof(state->partial_buffer)); 2490 memcpy(ctx->digeststate, state->digeststate, 2491 sizeof(state->digeststate)); 2492 2493 return 0; 2494 } 2495 2496 static int init_crypto_hw(struct artpec6_crypto *ac) 2497 { 2498 enum artpec6_crypto_variant variant = ac->variant; 2499 void __iomem *base = ac->base; 2500 u32 out_descr_buf_size; 2501 u32 out_data_buf_size; 2502 u32 in_data_buf_size; 2503 u32 in_descr_buf_size; 2504 u32 in_stat_buf_size; 2505 u32 in, out; 2506 2507 /* 2508 * The PDMA unit contains 1984 bytes of internal memory for the OUT 2509 * channels and 1024 bytes for the IN channel. This is an elastic 2510 * memory used to internally store the descriptors and data. The values 2511 * ares specified in 64 byte incremements. Trustzone buffers are not 2512 * used at this stage. 2513 */ 2514 out_data_buf_size = 16; /* 1024 bytes for data */ 2515 out_descr_buf_size = 15; /* 960 bytes for descriptors */ 2516 in_data_buf_size = 8; /* 512 bytes for data */ 2517 in_descr_buf_size = 4; /* 256 bytes for descriptors */ 2518 in_stat_buf_size = 4; /* 256 bytes for stat descrs */ 2519 2520 BUILD_BUG_ON_MSG((out_data_buf_size 2521 + out_descr_buf_size) * 64 > 1984, 2522 "Invalid OUT configuration"); 2523 2524 BUILD_BUG_ON_MSG((in_data_buf_size 2525 + in_descr_buf_size 2526 + in_stat_buf_size) * 64 > 1024, 2527 "Invalid IN configuration"); 2528 2529 in = FIELD_PREP(PDMA_IN_BUF_CFG_DATA_BUF_SIZE, in_data_buf_size) | 2530 FIELD_PREP(PDMA_IN_BUF_CFG_DESCR_BUF_SIZE, in_descr_buf_size) | 2531 FIELD_PREP(PDMA_IN_BUF_CFG_STAT_BUF_SIZE, in_stat_buf_size); 2532 2533 out = FIELD_PREP(PDMA_OUT_BUF_CFG_DATA_BUF_SIZE, out_data_buf_size) | 2534 FIELD_PREP(PDMA_OUT_BUF_CFG_DESCR_BUF_SIZE, out_descr_buf_size); 2535 2536 writel_relaxed(out, base + PDMA_OUT_BUF_CFG); 2537 writel_relaxed(PDMA_OUT_CFG_EN, base + PDMA_OUT_CFG); 2538 2539 if (variant == ARTPEC6_CRYPTO) { 2540 writel_relaxed(in, base + A6_PDMA_IN_BUF_CFG); 2541 writel_relaxed(PDMA_IN_CFG_EN, base + A6_PDMA_IN_CFG); 2542 writel_relaxed(A6_PDMA_INTR_MASK_IN_DATA | 2543 A6_PDMA_INTR_MASK_IN_EOP_FLUSH, 2544 base + A6_PDMA_INTR_MASK); 2545 } else { 2546 writel_relaxed(in, base + A7_PDMA_IN_BUF_CFG); 2547 writel_relaxed(PDMA_IN_CFG_EN, base + A7_PDMA_IN_CFG); 2548 writel_relaxed(A7_PDMA_INTR_MASK_IN_DATA | 2549 A7_PDMA_INTR_MASK_IN_EOP_FLUSH, 2550 base + A7_PDMA_INTR_MASK); 2551 } 2552 2553 return 0; 2554 } 2555 2556 static void artpec6_crypto_disable_hw(struct artpec6_crypto *ac) 2557 { 2558 enum artpec6_crypto_variant variant = ac->variant; 2559 void __iomem *base = ac->base; 2560 2561 if (variant == ARTPEC6_CRYPTO) { 2562 writel_relaxed(A6_PDMA_IN_CMD_STOP, base + A6_PDMA_IN_CMD); 2563 writel_relaxed(0, base + A6_PDMA_IN_CFG); 2564 writel_relaxed(A6_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD); 2565 } else { 2566 writel_relaxed(A7_PDMA_IN_CMD_STOP, base + A7_PDMA_IN_CMD); 2567 writel_relaxed(0, base + A7_PDMA_IN_CFG); 2568 writel_relaxed(A7_PDMA_OUT_CMD_STOP, base + PDMA_OUT_CMD); 2569 } 2570 2571 writel_relaxed(0, base + PDMA_OUT_CFG); 2572 2573 } 2574 2575 static irqreturn_t artpec6_crypto_irq(int irq, void *dev_id) 2576 { 2577 struct artpec6_crypto *ac = dev_id; 2578 enum artpec6_crypto_variant variant = ac->variant; 2579 void __iomem *base = ac->base; 2580 u32 mask_in_data, mask_in_eop_flush; 2581 u32 in_cmd_flush_stat, in_cmd_reg; 2582 u32 ack_intr_reg; 2583 u32 ack = 0; 2584 u32 intr; 2585 2586 if (variant == ARTPEC6_CRYPTO) { 2587 intr = readl_relaxed(base + A6_PDMA_MASKED_INTR); 2588 mask_in_data = A6_PDMA_INTR_MASK_IN_DATA; 2589 mask_in_eop_flush = A6_PDMA_INTR_MASK_IN_EOP_FLUSH; 2590 in_cmd_flush_stat = A6_PDMA_IN_CMD_FLUSH_STAT; 2591 in_cmd_reg = A6_PDMA_IN_CMD; 2592 ack_intr_reg = A6_PDMA_ACK_INTR; 2593 } else { 2594 intr = readl_relaxed(base + A7_PDMA_MASKED_INTR); 2595 mask_in_data = A7_PDMA_INTR_MASK_IN_DATA; 2596 mask_in_eop_flush = A7_PDMA_INTR_MASK_IN_EOP_FLUSH; 2597 in_cmd_flush_stat = A7_PDMA_IN_CMD_FLUSH_STAT; 2598 in_cmd_reg = A7_PDMA_IN_CMD; 2599 ack_intr_reg = A7_PDMA_ACK_INTR; 2600 } 2601 2602 /* We get two interrupt notifications from each job. 2603 * The in_data means all data was sent to memory and then 2604 * we request a status flush command to write the per-job 2605 * status to its status vector. This ensures that the 2606 * tasklet can detect exactly how many submitted jobs 2607 * that have finished. 2608 */ 2609 if (intr & mask_in_data) 2610 ack |= mask_in_data; 2611 2612 if (intr & mask_in_eop_flush) 2613 ack |= mask_in_eop_flush; 2614 else 2615 writel_relaxed(in_cmd_flush_stat, base + in_cmd_reg); 2616 2617 writel_relaxed(ack, base + ack_intr_reg); 2618 2619 if (intr & mask_in_eop_flush) 2620 tasklet_schedule(&ac->task); 2621 2622 return IRQ_HANDLED; 2623 } 2624 2625 /*------------------- Algorithm definitions ----------------------------------*/ 2626 2627 /* Hashes */ 2628 static struct ahash_alg hash_algos[] = { 2629 /* SHA-1 */ 2630 { 2631 .init = artpec6_crypto_sha1_init, 2632 .update = artpec6_crypto_hash_update, 2633 .final = artpec6_crypto_hash_final, 2634 .digest = artpec6_crypto_sha1_digest, 2635 .import = artpec6_crypto_hash_import, 2636 .export = artpec6_crypto_hash_export, 2637 .halg.digestsize = SHA1_DIGEST_SIZE, 2638 .halg.statesize = sizeof(struct artpec6_hash_export_state), 2639 .halg.base = { 2640 .cra_name = "sha1", 2641 .cra_driver_name = "artpec-sha1", 2642 .cra_priority = 300, 2643 .cra_flags = CRYPTO_ALG_ASYNC, 2644 .cra_blocksize = SHA1_BLOCK_SIZE, 2645 .cra_ctxsize = sizeof(struct artpec6_hashalg_context), 2646 .cra_alignmask = 3, 2647 .cra_module = THIS_MODULE, 2648 .cra_init = artpec6_crypto_ahash_init, 2649 .cra_exit = artpec6_crypto_ahash_exit, 2650 } 2651 }, 2652 /* SHA-256 */ 2653 { 2654 .init = artpec6_crypto_sha256_init, 2655 .update = artpec6_crypto_hash_update, 2656 .final = artpec6_crypto_hash_final, 2657 .digest = artpec6_crypto_sha256_digest, 2658 .import = artpec6_crypto_hash_import, 2659 .export = artpec6_crypto_hash_export, 2660 .halg.digestsize = SHA256_DIGEST_SIZE, 2661 .halg.statesize = sizeof(struct artpec6_hash_export_state), 2662 .halg.base = { 2663 .cra_name = "sha256", 2664 .cra_driver_name = "artpec-sha256", 2665 .cra_priority = 300, 2666 .cra_flags = CRYPTO_ALG_ASYNC, 2667 .cra_blocksize = SHA256_BLOCK_SIZE, 2668 .cra_ctxsize = sizeof(struct artpec6_hashalg_context), 2669 .cra_alignmask = 3, 2670 .cra_module = THIS_MODULE, 2671 .cra_init = artpec6_crypto_ahash_init, 2672 .cra_exit = artpec6_crypto_ahash_exit, 2673 } 2674 }, 2675 /* HMAC SHA-256 */ 2676 { 2677 .init = artpec6_crypto_hmac_sha256_init, 2678 .update = artpec6_crypto_hash_update, 2679 .final = artpec6_crypto_hash_final, 2680 .digest = artpec6_crypto_hmac_sha256_digest, 2681 .import = artpec6_crypto_hash_import, 2682 .export = artpec6_crypto_hash_export, 2683 .setkey = artpec6_crypto_hash_set_key, 2684 .halg.digestsize = SHA256_DIGEST_SIZE, 2685 .halg.statesize = sizeof(struct artpec6_hash_export_state), 2686 .halg.base = { 2687 .cra_name = "hmac(sha256)", 2688 .cra_driver_name = "artpec-hmac-sha256", 2689 .cra_priority = 300, 2690 .cra_flags = CRYPTO_ALG_ASYNC, 2691 .cra_blocksize = SHA256_BLOCK_SIZE, 2692 .cra_ctxsize = sizeof(struct artpec6_hashalg_context), 2693 .cra_alignmask = 3, 2694 .cra_module = THIS_MODULE, 2695 .cra_init = artpec6_crypto_ahash_init_hmac_sha256, 2696 .cra_exit = artpec6_crypto_ahash_exit, 2697 } 2698 }, 2699 }; 2700 2701 /* Crypto */ 2702 static struct skcipher_alg crypto_algos[] = { 2703 /* AES - ECB */ 2704 { 2705 .base = { 2706 .cra_name = "ecb(aes)", 2707 .cra_driver_name = "artpec6-ecb-aes", 2708 .cra_priority = 300, 2709 .cra_flags = CRYPTO_ALG_ASYNC, 2710 .cra_blocksize = AES_BLOCK_SIZE, 2711 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), 2712 .cra_alignmask = 3, 2713 .cra_module = THIS_MODULE, 2714 }, 2715 .min_keysize = AES_MIN_KEY_SIZE, 2716 .max_keysize = AES_MAX_KEY_SIZE, 2717 .setkey = artpec6_crypto_cipher_set_key, 2718 .encrypt = artpec6_crypto_encrypt, 2719 .decrypt = artpec6_crypto_decrypt, 2720 .init = artpec6_crypto_aes_ecb_init, 2721 .exit = artpec6_crypto_aes_exit, 2722 }, 2723 /* AES - CTR */ 2724 { 2725 .base = { 2726 .cra_name = "ctr(aes)", 2727 .cra_driver_name = "artpec6-ctr-aes", 2728 .cra_priority = 300, 2729 .cra_flags = CRYPTO_ALG_ASYNC | 2730 CRYPTO_ALG_NEED_FALLBACK, 2731 .cra_blocksize = 1, 2732 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), 2733 .cra_alignmask = 3, 2734 .cra_module = THIS_MODULE, 2735 }, 2736 .min_keysize = AES_MIN_KEY_SIZE, 2737 .max_keysize = AES_MAX_KEY_SIZE, 2738 .ivsize = AES_BLOCK_SIZE, 2739 .setkey = artpec6_crypto_cipher_set_key, 2740 .encrypt = artpec6_crypto_ctr_encrypt, 2741 .decrypt = artpec6_crypto_ctr_decrypt, 2742 .init = artpec6_crypto_aes_ctr_init, 2743 .exit = artpec6_crypto_aes_ctr_exit, 2744 }, 2745 /* AES - CBC */ 2746 { 2747 .base = { 2748 .cra_name = "cbc(aes)", 2749 .cra_driver_name = "artpec6-cbc-aes", 2750 .cra_priority = 300, 2751 .cra_flags = CRYPTO_ALG_ASYNC, 2752 .cra_blocksize = AES_BLOCK_SIZE, 2753 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), 2754 .cra_alignmask = 3, 2755 .cra_module = THIS_MODULE, 2756 }, 2757 .min_keysize = AES_MIN_KEY_SIZE, 2758 .max_keysize = AES_MAX_KEY_SIZE, 2759 .ivsize = AES_BLOCK_SIZE, 2760 .setkey = artpec6_crypto_cipher_set_key, 2761 .encrypt = artpec6_crypto_encrypt, 2762 .decrypt = artpec6_crypto_decrypt, 2763 .init = artpec6_crypto_aes_cbc_init, 2764 .exit = artpec6_crypto_aes_exit 2765 }, 2766 /* AES - XTS */ 2767 { 2768 .base = { 2769 .cra_name = "xts(aes)", 2770 .cra_driver_name = "artpec6-xts-aes", 2771 .cra_priority = 300, 2772 .cra_flags = CRYPTO_ALG_ASYNC, 2773 .cra_blocksize = 1, 2774 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), 2775 .cra_alignmask = 3, 2776 .cra_module = THIS_MODULE, 2777 }, 2778 .min_keysize = 2*AES_MIN_KEY_SIZE, 2779 .max_keysize = 2*AES_MAX_KEY_SIZE, 2780 .ivsize = 16, 2781 .setkey = artpec6_crypto_xts_set_key, 2782 .encrypt = artpec6_crypto_encrypt, 2783 .decrypt = artpec6_crypto_decrypt, 2784 .init = artpec6_crypto_aes_xts_init, 2785 .exit = artpec6_crypto_aes_exit, 2786 }, 2787 }; 2788 2789 static struct aead_alg aead_algos[] = { 2790 { 2791 .init = artpec6_crypto_aead_init, 2792 .setkey = artpec6_crypto_aead_set_key, 2793 .encrypt = artpec6_crypto_aead_encrypt, 2794 .decrypt = artpec6_crypto_aead_decrypt, 2795 .ivsize = GCM_AES_IV_SIZE, 2796 .maxauthsize = AES_BLOCK_SIZE, 2797 2798 .base = { 2799 .cra_name = "gcm(aes)", 2800 .cra_driver_name = "artpec-gcm-aes", 2801 .cra_priority = 300, 2802 .cra_flags = CRYPTO_ALG_ASYNC | 2803 CRYPTO_ALG_KERN_DRIVER_ONLY, 2804 .cra_blocksize = 1, 2805 .cra_ctxsize = sizeof(struct artpec6_cryptotfm_context), 2806 .cra_alignmask = 3, 2807 .cra_module = THIS_MODULE, 2808 }, 2809 } 2810 }; 2811 2812 #ifdef CONFIG_DEBUG_FS 2813 2814 struct dbgfs_u32 { 2815 char *name; 2816 mode_t mode; 2817 u32 *flag; 2818 char *desc; 2819 }; 2820 2821 static struct dentry *dbgfs_root; 2822 2823 static void artpec6_crypto_init_debugfs(void) 2824 { 2825 dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); 2826 2827 #ifdef CONFIG_FAULT_INJECTION 2828 fault_create_debugfs_attr("fail_status_read", dbgfs_root, 2829 &artpec6_crypto_fail_status_read); 2830 2831 fault_create_debugfs_attr("fail_dma_array_full", dbgfs_root, 2832 &artpec6_crypto_fail_dma_array_full); 2833 #endif 2834 } 2835 2836 static void artpec6_crypto_free_debugfs(void) 2837 { 2838 debugfs_remove_recursive(dbgfs_root); 2839 dbgfs_root = NULL; 2840 } 2841 #endif 2842 2843 static const struct of_device_id artpec6_crypto_of_match[] = { 2844 { .compatible = "axis,artpec6-crypto", .data = (void *)ARTPEC6_CRYPTO }, 2845 { .compatible = "axis,artpec7-crypto", .data = (void *)ARTPEC7_CRYPTO }, 2846 {} 2847 }; 2848 MODULE_DEVICE_TABLE(of, artpec6_crypto_of_match); 2849 2850 static int artpec6_crypto_probe(struct platform_device *pdev) 2851 { 2852 const struct of_device_id *match; 2853 enum artpec6_crypto_variant variant; 2854 struct artpec6_crypto *ac; 2855 struct device *dev = &pdev->dev; 2856 void __iomem *base; 2857 int irq; 2858 int err; 2859 2860 if (artpec6_crypto_dev) 2861 return -ENODEV; 2862 2863 match = of_match_node(artpec6_crypto_of_match, dev->of_node); 2864 if (!match) 2865 return -EINVAL; 2866 2867 variant = (enum artpec6_crypto_variant)match->data; 2868 2869 base = devm_platform_ioremap_resource(pdev, 0); 2870 if (IS_ERR(base)) 2871 return PTR_ERR(base); 2872 2873 irq = platform_get_irq(pdev, 0); 2874 if (irq < 0) 2875 return -ENODEV; 2876 2877 ac = devm_kzalloc(&pdev->dev, sizeof(struct artpec6_crypto), 2878 GFP_KERNEL); 2879 if (!ac) 2880 return -ENOMEM; 2881 2882 platform_set_drvdata(pdev, ac); 2883 ac->variant = variant; 2884 2885 spin_lock_init(&ac->queue_lock); 2886 INIT_LIST_HEAD(&ac->queue); 2887 INIT_LIST_HEAD(&ac->pending); 2888 timer_setup(&ac->timer, artpec6_crypto_timeout, 0); 2889 2890 ac->base = base; 2891 2892 ac->dma_cache = kmem_cache_create("artpec6_crypto_dma", 2893 sizeof(struct artpec6_crypto_dma_descriptors), 2894 64, 2895 0, 2896 NULL); 2897 if (!ac->dma_cache) 2898 return -ENOMEM; 2899 2900 #ifdef CONFIG_DEBUG_FS 2901 artpec6_crypto_init_debugfs(); 2902 #endif 2903 2904 tasklet_init(&ac->task, artpec6_crypto_task, 2905 (unsigned long)ac); 2906 2907 ac->pad_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, 2908 GFP_KERNEL); 2909 if (!ac->pad_buffer) 2910 return -ENOMEM; 2911 ac->pad_buffer = PTR_ALIGN(ac->pad_buffer, ARTPEC_CACHE_LINE_MAX); 2912 2913 ac->zero_buffer = devm_kzalloc(&pdev->dev, 2 * ARTPEC_CACHE_LINE_MAX, 2914 GFP_KERNEL); 2915 if (!ac->zero_buffer) 2916 return -ENOMEM; 2917 ac->zero_buffer = PTR_ALIGN(ac->zero_buffer, ARTPEC_CACHE_LINE_MAX); 2918 2919 err = init_crypto_hw(ac); 2920 if (err) 2921 goto free_cache; 2922 2923 err = devm_request_irq(&pdev->dev, irq, artpec6_crypto_irq, 0, 2924 "artpec6-crypto", ac); 2925 if (err) 2926 goto disable_hw; 2927 2928 artpec6_crypto_dev = &pdev->dev; 2929 2930 err = crypto_register_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); 2931 if (err) { 2932 dev_err(dev, "Failed to register ahashes\n"); 2933 goto disable_hw; 2934 } 2935 2936 err = crypto_register_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); 2937 if (err) { 2938 dev_err(dev, "Failed to register ciphers\n"); 2939 goto unregister_ahashes; 2940 } 2941 2942 err = crypto_register_aeads(aead_algos, ARRAY_SIZE(aead_algos)); 2943 if (err) { 2944 dev_err(dev, "Failed to register aeads\n"); 2945 goto unregister_algs; 2946 } 2947 2948 return 0; 2949 2950 unregister_algs: 2951 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); 2952 unregister_ahashes: 2953 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); 2954 disable_hw: 2955 artpec6_crypto_disable_hw(ac); 2956 free_cache: 2957 kmem_cache_destroy(ac->dma_cache); 2958 return err; 2959 } 2960 2961 static int artpec6_crypto_remove(struct platform_device *pdev) 2962 { 2963 struct artpec6_crypto *ac = platform_get_drvdata(pdev); 2964 int irq = platform_get_irq(pdev, 0); 2965 2966 crypto_unregister_ahashes(hash_algos, ARRAY_SIZE(hash_algos)); 2967 crypto_unregister_skciphers(crypto_algos, ARRAY_SIZE(crypto_algos)); 2968 crypto_unregister_aeads(aead_algos, ARRAY_SIZE(aead_algos)); 2969 2970 tasklet_disable(&ac->task); 2971 devm_free_irq(&pdev->dev, irq, ac); 2972 tasklet_kill(&ac->task); 2973 del_timer_sync(&ac->timer); 2974 2975 artpec6_crypto_disable_hw(ac); 2976 2977 kmem_cache_destroy(ac->dma_cache); 2978 #ifdef CONFIG_DEBUG_FS 2979 artpec6_crypto_free_debugfs(); 2980 #endif 2981 return 0; 2982 } 2983 2984 static struct platform_driver artpec6_crypto_driver = { 2985 .probe = artpec6_crypto_probe, 2986 .remove = artpec6_crypto_remove, 2987 .driver = { 2988 .name = "artpec6-crypto", 2989 .of_match_table = artpec6_crypto_of_match, 2990 }, 2991 }; 2992 2993 module_platform_driver(artpec6_crypto_driver); 2994 2995 MODULE_AUTHOR("Axis Communications AB"); 2996 MODULE_DESCRIPTION("ARTPEC-6 Crypto driver"); 2997 MODULE_LICENSE("GPL"); 2998