1 // SPDX-License-Identifier: GPL-2.0-only 2 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. 3 * 4 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/kernel.h> 10 #include <linux/module.h> 11 #include <linux/of.h> 12 #include <linux/of_device.h> 13 #include <linux/cpumask.h> 14 #include <linux/slab.h> 15 #include <linux/interrupt.h> 16 #include <linux/crypto.h> 17 #include <crypto/md5.h> 18 #include <crypto/sha.h> 19 #include <crypto/aes.h> 20 #include <crypto/internal/des.h> 21 #include <linux/mutex.h> 22 #include <linux/delay.h> 23 #include <linux/sched.h> 24 25 #include <crypto/internal/hash.h> 26 #include <crypto/scatterwalk.h> 27 #include <crypto/algapi.h> 28 29 #include <asm/hypervisor.h> 30 #include <asm/mdesc.h> 31 32 #include "n2_core.h" 33 34 #define DRV_MODULE_NAME "n2_crypto" 35 #define DRV_MODULE_VERSION "0.2" 36 #define DRV_MODULE_RELDATE "July 28, 2011" 37 38 static const char version[] = 39 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 40 41 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 42 MODULE_DESCRIPTION("Niagara2 Crypto driver"); 43 MODULE_LICENSE("GPL"); 44 MODULE_VERSION(DRV_MODULE_VERSION); 45 46 #define N2_CRA_PRIORITY 200 47 48 static DEFINE_MUTEX(spu_lock); 49 50 struct spu_queue { 51 cpumask_t sharing; 52 unsigned long qhandle; 53 54 spinlock_t lock; 55 u8 q_type; 56 void *q; 57 unsigned long head; 58 unsigned long tail; 59 struct list_head jobs; 60 61 unsigned long devino; 62 63 char irq_name[32]; 64 unsigned int irq; 65 66 struct list_head list; 67 }; 68 69 struct spu_qreg { 70 struct spu_queue *queue; 71 unsigned long type; 72 }; 73 74 static struct spu_queue **cpu_to_cwq; 75 static struct spu_queue **cpu_to_mau; 76 77 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) 78 { 79 if (q->q_type == HV_NCS_QTYPE_MAU) { 80 off += MAU_ENTRY_SIZE; 81 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) 82 off = 0; 83 } else { 84 off += CWQ_ENTRY_SIZE; 85 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) 86 off = 0; 87 } 88 return off; 89 } 90 91 struct n2_request_common { 92 struct list_head entry; 93 unsigned int offset; 94 }; 95 #define OFFSET_NOT_RUNNING (~(unsigned int)0) 96 97 /* An async job request records the final tail value it used in 98 * n2_request_common->offset, test to see if that offset is in 99 * the range old_head, new_head, inclusive. 100 */ 101 static inline bool job_finished(struct spu_queue *q, unsigned int offset, 102 unsigned long old_head, unsigned long new_head) 103 { 104 if (old_head <= new_head) { 105 if (offset > old_head && offset <= new_head) 106 return true; 107 } else { 108 if (offset > old_head || offset <= new_head) 109 return true; 110 } 111 return false; 112 } 113 114 /* When the HEAD marker is unequal to the actual HEAD, we get 115 * a virtual device INO interrupt. We should process the 116 * completed CWQ entries and adjust the HEAD marker to clear 117 * the IRQ. 118 */ 119 static irqreturn_t cwq_intr(int irq, void *dev_id) 120 { 121 unsigned long off, new_head, hv_ret; 122 struct spu_queue *q = dev_id; 123 124 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", 125 smp_processor_id(), q->qhandle); 126 127 spin_lock(&q->lock); 128 129 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); 130 131 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", 132 smp_processor_id(), new_head, hv_ret); 133 134 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { 135 /* XXX ... XXX */ 136 } 137 138 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); 139 if (hv_ret == HV_EOK) 140 q->head = new_head; 141 142 spin_unlock(&q->lock); 143 144 return IRQ_HANDLED; 145 } 146 147 static irqreturn_t mau_intr(int irq, void *dev_id) 148 { 149 struct spu_queue *q = dev_id; 150 unsigned long head, hv_ret; 151 152 spin_lock(&q->lock); 153 154 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", 155 smp_processor_id(), q->qhandle); 156 157 hv_ret = sun4v_ncs_gethead(q->qhandle, &head); 158 159 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", 160 smp_processor_id(), head, hv_ret); 161 162 sun4v_ncs_sethead_marker(q->qhandle, head); 163 164 spin_unlock(&q->lock); 165 166 return IRQ_HANDLED; 167 } 168 169 static void *spu_queue_next(struct spu_queue *q, void *cur) 170 { 171 return q->q + spu_next_offset(q, cur - q->q); 172 } 173 174 static int spu_queue_num_free(struct spu_queue *q) 175 { 176 unsigned long head = q->head; 177 unsigned long tail = q->tail; 178 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); 179 unsigned long diff; 180 181 if (head > tail) 182 diff = head - tail; 183 else 184 diff = (end - tail) + head; 185 186 return (diff / CWQ_ENTRY_SIZE) - 1; 187 } 188 189 static void *spu_queue_alloc(struct spu_queue *q, int num_entries) 190 { 191 int avail = spu_queue_num_free(q); 192 193 if (avail >= num_entries) 194 return q->q + q->tail; 195 196 return NULL; 197 } 198 199 static unsigned long spu_queue_submit(struct spu_queue *q, void *last) 200 { 201 unsigned long hv_ret, new_tail; 202 203 new_tail = spu_next_offset(q, last - q->q); 204 205 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); 206 if (hv_ret == HV_EOK) 207 q->tail = new_tail; 208 return hv_ret; 209 } 210 211 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, 212 int enc_type, int auth_type, 213 unsigned int hash_len, 214 bool sfas, bool sob, bool eob, bool encrypt, 215 int opcode) 216 { 217 u64 word = (len - 1) & CONTROL_LEN; 218 219 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); 220 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); 221 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); 222 if (sfas) 223 word |= CONTROL_STORE_FINAL_AUTH_STATE; 224 if (sob) 225 word |= CONTROL_START_OF_BLOCK; 226 if (eob) 227 word |= CONTROL_END_OF_BLOCK; 228 if (encrypt) 229 word |= CONTROL_ENCRYPT; 230 if (hmac_key_len) 231 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; 232 if (hash_len) 233 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; 234 235 return word; 236 } 237 238 #if 0 239 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 240 { 241 if (this_len >= 64 || 242 qp->head != qp->tail) 243 return true; 244 return false; 245 } 246 #endif 247 248 struct n2_ahash_alg { 249 struct list_head entry; 250 const u8 *hash_zero; 251 const u32 *hash_init; 252 u8 hw_op_hashsz; 253 u8 digest_size; 254 u8 auth_type; 255 u8 hmac_type; 256 struct ahash_alg alg; 257 }; 258 259 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) 260 { 261 struct crypto_alg *alg = tfm->__crt_alg; 262 struct ahash_alg *ahash_alg; 263 264 ahash_alg = container_of(alg, struct ahash_alg, halg.base); 265 266 return container_of(ahash_alg, struct n2_ahash_alg, alg); 267 } 268 269 struct n2_hmac_alg { 270 const char *child_alg; 271 struct n2_ahash_alg derived; 272 }; 273 274 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) 275 { 276 struct crypto_alg *alg = tfm->__crt_alg; 277 struct ahash_alg *ahash_alg; 278 279 ahash_alg = container_of(alg, struct ahash_alg, halg.base); 280 281 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); 282 } 283 284 struct n2_hash_ctx { 285 struct crypto_ahash *fallback_tfm; 286 }; 287 288 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ 289 290 struct n2_hmac_ctx { 291 struct n2_hash_ctx base; 292 293 struct crypto_shash *child_shash; 294 295 int hash_key_len; 296 unsigned char hash_key[N2_HASH_KEY_MAX]; 297 }; 298 299 struct n2_hash_req_ctx { 300 union { 301 struct md5_state md5; 302 struct sha1_state sha1; 303 struct sha256_state sha256; 304 } u; 305 306 struct ahash_request fallback_req; 307 }; 308 309 static int n2_hash_async_init(struct ahash_request *req) 310 { 311 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 312 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 313 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 314 315 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 316 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 317 318 return crypto_ahash_init(&rctx->fallback_req); 319 } 320 321 static int n2_hash_async_update(struct ahash_request *req) 322 { 323 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 324 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 325 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 326 327 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 328 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 329 rctx->fallback_req.nbytes = req->nbytes; 330 rctx->fallback_req.src = req->src; 331 332 return crypto_ahash_update(&rctx->fallback_req); 333 } 334 335 static int n2_hash_async_final(struct ahash_request *req) 336 { 337 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 338 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 339 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 340 341 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 342 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 343 rctx->fallback_req.result = req->result; 344 345 return crypto_ahash_final(&rctx->fallback_req); 346 } 347 348 static int n2_hash_async_finup(struct ahash_request *req) 349 { 350 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 351 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 352 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 353 354 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 355 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 356 rctx->fallback_req.nbytes = req->nbytes; 357 rctx->fallback_req.src = req->src; 358 rctx->fallback_req.result = req->result; 359 360 return crypto_ahash_finup(&rctx->fallback_req); 361 } 362 363 static int n2_hash_async_noimport(struct ahash_request *req, const void *in) 364 { 365 return -ENOSYS; 366 } 367 368 static int n2_hash_async_noexport(struct ahash_request *req, void *out) 369 { 370 return -ENOSYS; 371 } 372 373 static int n2_hash_cra_init(struct crypto_tfm *tfm) 374 { 375 const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 376 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 377 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 378 struct crypto_ahash *fallback_tfm; 379 int err; 380 381 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 382 CRYPTO_ALG_NEED_FALLBACK); 383 if (IS_ERR(fallback_tfm)) { 384 pr_warning("Fallback driver '%s' could not be loaded!\n", 385 fallback_driver_name); 386 err = PTR_ERR(fallback_tfm); 387 goto out; 388 } 389 390 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 391 crypto_ahash_reqsize(fallback_tfm))); 392 393 ctx->fallback_tfm = fallback_tfm; 394 return 0; 395 396 out: 397 return err; 398 } 399 400 static void n2_hash_cra_exit(struct crypto_tfm *tfm) 401 { 402 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 403 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 404 405 crypto_free_ahash(ctx->fallback_tfm); 406 } 407 408 static int n2_hmac_cra_init(struct crypto_tfm *tfm) 409 { 410 const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 411 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 412 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 413 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); 414 struct crypto_ahash *fallback_tfm; 415 struct crypto_shash *child_shash; 416 int err; 417 418 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 419 CRYPTO_ALG_NEED_FALLBACK); 420 if (IS_ERR(fallback_tfm)) { 421 pr_warning("Fallback driver '%s' could not be loaded!\n", 422 fallback_driver_name); 423 err = PTR_ERR(fallback_tfm); 424 goto out; 425 } 426 427 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); 428 if (IS_ERR(child_shash)) { 429 pr_warning("Child shash '%s' could not be loaded!\n", 430 n2alg->child_alg); 431 err = PTR_ERR(child_shash); 432 goto out_free_fallback; 433 } 434 435 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 436 crypto_ahash_reqsize(fallback_tfm))); 437 438 ctx->child_shash = child_shash; 439 ctx->base.fallback_tfm = fallback_tfm; 440 return 0; 441 442 out_free_fallback: 443 crypto_free_ahash(fallback_tfm); 444 445 out: 446 return err; 447 } 448 449 static void n2_hmac_cra_exit(struct crypto_tfm *tfm) 450 { 451 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 452 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 453 454 crypto_free_ahash(ctx->base.fallback_tfm); 455 crypto_free_shash(ctx->child_shash); 456 } 457 458 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, 459 unsigned int keylen) 460 { 461 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 462 struct crypto_shash *child_shash = ctx->child_shash; 463 struct crypto_ahash *fallback_tfm; 464 SHASH_DESC_ON_STACK(shash, child_shash); 465 int err, bs, ds; 466 467 fallback_tfm = ctx->base.fallback_tfm; 468 err = crypto_ahash_setkey(fallback_tfm, key, keylen); 469 if (err) 470 return err; 471 472 shash->tfm = child_shash; 473 474 bs = crypto_shash_blocksize(child_shash); 475 ds = crypto_shash_digestsize(child_shash); 476 BUG_ON(ds > N2_HASH_KEY_MAX); 477 if (keylen > bs) { 478 err = crypto_shash_digest(shash, key, keylen, 479 ctx->hash_key); 480 if (err) 481 return err; 482 keylen = ds; 483 } else if (keylen <= N2_HASH_KEY_MAX) 484 memcpy(ctx->hash_key, key, keylen); 485 486 ctx->hash_key_len = keylen; 487 488 return err; 489 } 490 491 static unsigned long wait_for_tail(struct spu_queue *qp) 492 { 493 unsigned long head, hv_ret; 494 495 do { 496 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); 497 if (hv_ret != HV_EOK) { 498 pr_err("Hypervisor error on gethead\n"); 499 break; 500 } 501 if (head == qp->tail) { 502 qp->head = head; 503 break; 504 } 505 } while (1); 506 return hv_ret; 507 } 508 509 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, 510 struct cwq_initial_entry *ent) 511 { 512 unsigned long hv_ret = spu_queue_submit(qp, ent); 513 514 if (hv_ret == HV_EOK) 515 hv_ret = wait_for_tail(qp); 516 517 return hv_ret; 518 } 519 520 static int n2_do_async_digest(struct ahash_request *req, 521 unsigned int auth_type, unsigned int digest_size, 522 unsigned int result_size, void *hash_loc, 523 unsigned long auth_key, unsigned int auth_key_len) 524 { 525 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 526 struct cwq_initial_entry *ent; 527 struct crypto_hash_walk walk; 528 struct spu_queue *qp; 529 unsigned long flags; 530 int err = -ENODEV; 531 int nbytes, cpu; 532 533 /* The total effective length of the operation may not 534 * exceed 2^16. 535 */ 536 if (unlikely(req->nbytes > (1 << 16))) { 537 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 538 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 539 540 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 541 rctx->fallback_req.base.flags = 542 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 543 rctx->fallback_req.nbytes = req->nbytes; 544 rctx->fallback_req.src = req->src; 545 rctx->fallback_req.result = req->result; 546 547 return crypto_ahash_digest(&rctx->fallback_req); 548 } 549 550 nbytes = crypto_hash_walk_first(req, &walk); 551 552 cpu = get_cpu(); 553 qp = cpu_to_cwq[cpu]; 554 if (!qp) 555 goto out; 556 557 spin_lock_irqsave(&qp->lock, flags); 558 559 /* XXX can do better, improve this later by doing a by-hand scatterlist 560 * XXX walk, etc. 561 */ 562 ent = qp->q + qp->tail; 563 564 ent->control = control_word_base(nbytes, auth_key_len, 0, 565 auth_type, digest_size, 566 false, true, false, false, 567 OPCODE_INPLACE_BIT | 568 OPCODE_AUTH_MAC); 569 ent->src_addr = __pa(walk.data); 570 ent->auth_key_addr = auth_key; 571 ent->auth_iv_addr = __pa(hash_loc); 572 ent->final_auth_state_addr = 0UL; 573 ent->enc_key_addr = 0UL; 574 ent->enc_iv_addr = 0UL; 575 ent->dest_addr = __pa(hash_loc); 576 577 nbytes = crypto_hash_walk_done(&walk, 0); 578 while (nbytes > 0) { 579 ent = spu_queue_next(qp, ent); 580 581 ent->control = (nbytes - 1); 582 ent->src_addr = __pa(walk.data); 583 ent->auth_key_addr = 0UL; 584 ent->auth_iv_addr = 0UL; 585 ent->final_auth_state_addr = 0UL; 586 ent->enc_key_addr = 0UL; 587 ent->enc_iv_addr = 0UL; 588 ent->dest_addr = 0UL; 589 590 nbytes = crypto_hash_walk_done(&walk, 0); 591 } 592 ent->control |= CONTROL_END_OF_BLOCK; 593 594 if (submit_and_wait_for_tail(qp, ent) != HV_EOK) 595 err = -EINVAL; 596 else 597 err = 0; 598 599 spin_unlock_irqrestore(&qp->lock, flags); 600 601 if (!err) 602 memcpy(req->result, hash_loc, result_size); 603 out: 604 put_cpu(); 605 606 return err; 607 } 608 609 static int n2_hash_async_digest(struct ahash_request *req) 610 { 611 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); 612 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 613 int ds; 614 615 ds = n2alg->digest_size; 616 if (unlikely(req->nbytes == 0)) { 617 memcpy(req->result, n2alg->hash_zero, ds); 618 return 0; 619 } 620 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); 621 622 return n2_do_async_digest(req, n2alg->auth_type, 623 n2alg->hw_op_hashsz, ds, 624 &rctx->u, 0UL, 0); 625 } 626 627 static int n2_hmac_async_digest(struct ahash_request *req) 628 { 629 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); 630 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 631 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 632 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 633 int ds; 634 635 ds = n2alg->derived.digest_size; 636 if (unlikely(req->nbytes == 0) || 637 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { 638 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 639 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 640 641 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 642 rctx->fallback_req.base.flags = 643 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 644 rctx->fallback_req.nbytes = req->nbytes; 645 rctx->fallback_req.src = req->src; 646 rctx->fallback_req.result = req->result; 647 648 return crypto_ahash_digest(&rctx->fallback_req); 649 } 650 memcpy(&rctx->u, n2alg->derived.hash_init, 651 n2alg->derived.hw_op_hashsz); 652 653 return n2_do_async_digest(req, n2alg->derived.hmac_type, 654 n2alg->derived.hw_op_hashsz, ds, 655 &rctx->u, 656 __pa(&ctx->hash_key), 657 ctx->hash_key_len); 658 } 659 660 struct n2_cipher_context { 661 int key_len; 662 int enc_type; 663 union { 664 u8 aes[AES_MAX_KEY_SIZE]; 665 u8 des[DES_KEY_SIZE]; 666 u8 des3[3 * DES_KEY_SIZE]; 667 u8 arc4[258]; /* S-box, X, Y */ 668 } key; 669 }; 670 671 #define N2_CHUNK_ARR_LEN 16 672 673 struct n2_crypto_chunk { 674 struct list_head entry; 675 unsigned long iv_paddr : 44; 676 unsigned long arr_len : 20; 677 unsigned long dest_paddr; 678 unsigned long dest_final; 679 struct { 680 unsigned long src_paddr : 44; 681 unsigned long src_len : 20; 682 } arr[N2_CHUNK_ARR_LEN]; 683 }; 684 685 struct n2_request_context { 686 struct ablkcipher_walk walk; 687 struct list_head chunk_list; 688 struct n2_crypto_chunk chunk; 689 u8 temp_iv[16]; 690 }; 691 692 /* The SPU allows some level of flexibility for partial cipher blocks 693 * being specified in a descriptor. 694 * 695 * It merely requires that every descriptor's length field is at least 696 * as large as the cipher block size. This means that a cipher block 697 * can span at most 2 descriptors. However, this does not allow a 698 * partial block to span into the final descriptor as that would 699 * violate the rule (since every descriptor's length must be at lest 700 * the block size). So, for example, assuming an 8 byte block size: 701 * 702 * 0xe --> 0xa --> 0x8 703 * 704 * is a valid length sequence, whereas: 705 * 706 * 0xe --> 0xb --> 0x7 707 * 708 * is not a valid sequence. 709 */ 710 711 struct n2_cipher_alg { 712 struct list_head entry; 713 u8 enc_type; 714 struct crypto_alg alg; 715 }; 716 717 static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) 718 { 719 struct crypto_alg *alg = tfm->__crt_alg; 720 721 return container_of(alg, struct n2_cipher_alg, alg); 722 } 723 724 struct n2_cipher_request_context { 725 struct ablkcipher_walk walk; 726 }; 727 728 static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 729 unsigned int keylen) 730 { 731 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 732 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 733 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 734 735 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); 736 737 switch (keylen) { 738 case AES_KEYSIZE_128: 739 ctx->enc_type |= ENC_TYPE_ALG_AES128; 740 break; 741 case AES_KEYSIZE_192: 742 ctx->enc_type |= ENC_TYPE_ALG_AES192; 743 break; 744 case AES_KEYSIZE_256: 745 ctx->enc_type |= ENC_TYPE_ALG_AES256; 746 break; 747 default: 748 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 749 return -EINVAL; 750 } 751 752 ctx->key_len = keylen; 753 memcpy(ctx->key.aes, key, keylen); 754 return 0; 755 } 756 757 static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 758 unsigned int keylen) 759 { 760 struct n2_cipher_context *ctx = crypto_ablkcipher_ctx(cipher); 761 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 762 int err; 763 764 err = verify_ablkcipher_des_key(cipher, key); 765 if (err) 766 return err; 767 768 ctx->enc_type = n2alg->enc_type; 769 770 ctx->key_len = keylen; 771 memcpy(ctx->key.des, key, keylen); 772 return 0; 773 } 774 775 static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 776 unsigned int keylen) 777 { 778 struct n2_cipher_context *ctx = crypto_ablkcipher_ctx(cipher); 779 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 780 int err; 781 782 err = verify_ablkcipher_des3_key(cipher, key); 783 if (err) 784 return err; 785 786 ctx->enc_type = n2alg->enc_type; 787 788 ctx->key_len = keylen; 789 memcpy(ctx->key.des3, key, keylen); 790 return 0; 791 } 792 793 static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 794 unsigned int keylen) 795 { 796 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 797 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 798 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 799 u8 *s = ctx->key.arc4; 800 u8 *x = s + 256; 801 u8 *y = x + 1; 802 int i, j, k; 803 804 ctx->enc_type = n2alg->enc_type; 805 806 j = k = 0; 807 *x = 0; 808 *y = 0; 809 for (i = 0; i < 256; i++) 810 s[i] = i; 811 for (i = 0; i < 256; i++) { 812 u8 a = s[i]; 813 j = (j + key[k] + a) & 0xff; 814 s[i] = s[j]; 815 s[j] = a; 816 if (++k >= keylen) 817 k = 0; 818 } 819 820 return 0; 821 } 822 823 static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) 824 { 825 int this_len = nbytes; 826 827 this_len -= (nbytes & (block_size - 1)); 828 return this_len > (1 << 16) ? (1 << 16) : this_len; 829 } 830 831 static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, 832 struct spu_queue *qp, bool encrypt) 833 { 834 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 835 struct cwq_initial_entry *ent; 836 bool in_place; 837 int i; 838 839 ent = spu_queue_alloc(qp, cp->arr_len); 840 if (!ent) { 841 pr_info("queue_alloc() of %d fails\n", 842 cp->arr_len); 843 return -EBUSY; 844 } 845 846 in_place = (cp->dest_paddr == cp->arr[0].src_paddr); 847 848 ent->control = control_word_base(cp->arr[0].src_len, 849 0, ctx->enc_type, 0, 0, 850 false, true, false, encrypt, 851 OPCODE_ENCRYPT | 852 (in_place ? OPCODE_INPLACE_BIT : 0)); 853 ent->src_addr = cp->arr[0].src_paddr; 854 ent->auth_key_addr = 0UL; 855 ent->auth_iv_addr = 0UL; 856 ent->final_auth_state_addr = 0UL; 857 ent->enc_key_addr = __pa(&ctx->key); 858 ent->enc_iv_addr = cp->iv_paddr; 859 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); 860 861 for (i = 1; i < cp->arr_len; i++) { 862 ent = spu_queue_next(qp, ent); 863 864 ent->control = cp->arr[i].src_len - 1; 865 ent->src_addr = cp->arr[i].src_paddr; 866 ent->auth_key_addr = 0UL; 867 ent->auth_iv_addr = 0UL; 868 ent->final_auth_state_addr = 0UL; 869 ent->enc_key_addr = 0UL; 870 ent->enc_iv_addr = 0UL; 871 ent->dest_addr = 0UL; 872 } 873 ent->control |= CONTROL_END_OF_BLOCK; 874 875 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; 876 } 877 878 static int n2_compute_chunks(struct ablkcipher_request *req) 879 { 880 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 881 struct ablkcipher_walk *walk = &rctx->walk; 882 struct n2_crypto_chunk *chunk; 883 unsigned long dest_prev; 884 unsigned int tot_len; 885 bool prev_in_place; 886 int err, nbytes; 887 888 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); 889 err = ablkcipher_walk_phys(req, walk); 890 if (err) 891 return err; 892 893 INIT_LIST_HEAD(&rctx->chunk_list); 894 895 chunk = &rctx->chunk; 896 INIT_LIST_HEAD(&chunk->entry); 897 898 chunk->iv_paddr = 0UL; 899 chunk->arr_len = 0; 900 chunk->dest_paddr = 0UL; 901 902 prev_in_place = false; 903 dest_prev = ~0UL; 904 tot_len = 0; 905 906 while ((nbytes = walk->nbytes) != 0) { 907 unsigned long dest_paddr, src_paddr; 908 bool in_place; 909 int this_len; 910 911 src_paddr = (page_to_phys(walk->src.page) + 912 walk->src.offset); 913 dest_paddr = (page_to_phys(walk->dst.page) + 914 walk->dst.offset); 915 in_place = (src_paddr == dest_paddr); 916 this_len = cipher_descriptor_len(nbytes, walk->blocksize); 917 918 if (chunk->arr_len != 0) { 919 if (in_place != prev_in_place || 920 (!prev_in_place && 921 dest_paddr != dest_prev) || 922 chunk->arr_len == N2_CHUNK_ARR_LEN || 923 tot_len + this_len > (1 << 16)) { 924 chunk->dest_final = dest_prev; 925 list_add_tail(&chunk->entry, 926 &rctx->chunk_list); 927 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); 928 if (!chunk) { 929 err = -ENOMEM; 930 break; 931 } 932 INIT_LIST_HEAD(&chunk->entry); 933 } 934 } 935 if (chunk->arr_len == 0) { 936 chunk->dest_paddr = dest_paddr; 937 tot_len = 0; 938 } 939 chunk->arr[chunk->arr_len].src_paddr = src_paddr; 940 chunk->arr[chunk->arr_len].src_len = this_len; 941 chunk->arr_len++; 942 943 dest_prev = dest_paddr + this_len; 944 prev_in_place = in_place; 945 tot_len += this_len; 946 947 err = ablkcipher_walk_done(req, walk, nbytes - this_len); 948 if (err) 949 break; 950 } 951 if (!err && chunk->arr_len != 0) { 952 chunk->dest_final = dest_prev; 953 list_add_tail(&chunk->entry, &rctx->chunk_list); 954 } 955 956 return err; 957 } 958 959 static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) 960 { 961 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 962 struct n2_crypto_chunk *c, *tmp; 963 964 if (final_iv) 965 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); 966 967 ablkcipher_walk_complete(&rctx->walk); 968 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 969 list_del(&c->entry); 970 if (unlikely(c != &rctx->chunk)) 971 kfree(c); 972 } 973 974 } 975 976 static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) 977 { 978 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 979 struct crypto_tfm *tfm = req->base.tfm; 980 int err = n2_compute_chunks(req); 981 struct n2_crypto_chunk *c, *tmp; 982 unsigned long flags, hv_ret; 983 struct spu_queue *qp; 984 985 if (err) 986 return err; 987 988 qp = cpu_to_cwq[get_cpu()]; 989 err = -ENODEV; 990 if (!qp) 991 goto out; 992 993 spin_lock_irqsave(&qp->lock, flags); 994 995 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 996 err = __n2_crypt_chunk(tfm, c, qp, encrypt); 997 if (err) 998 break; 999 list_del(&c->entry); 1000 if (unlikely(c != &rctx->chunk)) 1001 kfree(c); 1002 } 1003 if (!err) { 1004 hv_ret = wait_for_tail(qp); 1005 if (hv_ret != HV_EOK) 1006 err = -EINVAL; 1007 } 1008 1009 spin_unlock_irqrestore(&qp->lock, flags); 1010 1011 out: 1012 put_cpu(); 1013 1014 n2_chunk_complete(req, NULL); 1015 return err; 1016 } 1017 1018 static int n2_encrypt_ecb(struct ablkcipher_request *req) 1019 { 1020 return n2_do_ecb(req, true); 1021 } 1022 1023 static int n2_decrypt_ecb(struct ablkcipher_request *req) 1024 { 1025 return n2_do_ecb(req, false); 1026 } 1027 1028 static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) 1029 { 1030 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 1031 struct crypto_tfm *tfm = req->base.tfm; 1032 unsigned long flags, hv_ret, iv_paddr; 1033 int err = n2_compute_chunks(req); 1034 struct n2_crypto_chunk *c, *tmp; 1035 struct spu_queue *qp; 1036 void *final_iv_addr; 1037 1038 final_iv_addr = NULL; 1039 1040 if (err) 1041 return err; 1042 1043 qp = cpu_to_cwq[get_cpu()]; 1044 err = -ENODEV; 1045 if (!qp) 1046 goto out; 1047 1048 spin_lock_irqsave(&qp->lock, flags); 1049 1050 if (encrypt) { 1051 iv_paddr = __pa(rctx->walk.iv); 1052 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, 1053 entry) { 1054 c->iv_paddr = iv_paddr; 1055 err = __n2_crypt_chunk(tfm, c, qp, true); 1056 if (err) 1057 break; 1058 iv_paddr = c->dest_final - rctx->walk.blocksize; 1059 list_del(&c->entry); 1060 if (unlikely(c != &rctx->chunk)) 1061 kfree(c); 1062 } 1063 final_iv_addr = __va(iv_paddr); 1064 } else { 1065 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, 1066 entry) { 1067 if (c == &rctx->chunk) { 1068 iv_paddr = __pa(rctx->walk.iv); 1069 } else { 1070 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + 1071 tmp->arr[tmp->arr_len-1].src_len - 1072 rctx->walk.blocksize); 1073 } 1074 if (!final_iv_addr) { 1075 unsigned long pa; 1076 1077 pa = (c->arr[c->arr_len-1].src_paddr + 1078 c->arr[c->arr_len-1].src_len - 1079 rctx->walk.blocksize); 1080 final_iv_addr = rctx->temp_iv; 1081 memcpy(rctx->temp_iv, __va(pa), 1082 rctx->walk.blocksize); 1083 } 1084 c->iv_paddr = iv_paddr; 1085 err = __n2_crypt_chunk(tfm, c, qp, false); 1086 if (err) 1087 break; 1088 list_del(&c->entry); 1089 if (unlikely(c != &rctx->chunk)) 1090 kfree(c); 1091 } 1092 } 1093 if (!err) { 1094 hv_ret = wait_for_tail(qp); 1095 if (hv_ret != HV_EOK) 1096 err = -EINVAL; 1097 } 1098 1099 spin_unlock_irqrestore(&qp->lock, flags); 1100 1101 out: 1102 put_cpu(); 1103 1104 n2_chunk_complete(req, err ? NULL : final_iv_addr); 1105 return err; 1106 } 1107 1108 static int n2_encrypt_chaining(struct ablkcipher_request *req) 1109 { 1110 return n2_do_chaining(req, true); 1111 } 1112 1113 static int n2_decrypt_chaining(struct ablkcipher_request *req) 1114 { 1115 return n2_do_chaining(req, false); 1116 } 1117 1118 struct n2_cipher_tmpl { 1119 const char *name; 1120 const char *drv_name; 1121 u8 block_size; 1122 u8 enc_type; 1123 struct ablkcipher_alg ablkcipher; 1124 }; 1125 1126 static const struct n2_cipher_tmpl cipher_tmpls[] = { 1127 /* ARC4: only ECB is supported (chaining bits ignored) */ 1128 { .name = "ecb(arc4)", 1129 .drv_name = "ecb-arc4", 1130 .block_size = 1, 1131 .enc_type = (ENC_TYPE_ALG_RC4_STREAM | 1132 ENC_TYPE_CHAINING_ECB), 1133 .ablkcipher = { 1134 .min_keysize = 1, 1135 .max_keysize = 256, 1136 .setkey = n2_arc4_setkey, 1137 .encrypt = n2_encrypt_ecb, 1138 .decrypt = n2_decrypt_ecb, 1139 }, 1140 }, 1141 1142 /* DES: ECB CBC and CFB are supported */ 1143 { .name = "ecb(des)", 1144 .drv_name = "ecb-des", 1145 .block_size = DES_BLOCK_SIZE, 1146 .enc_type = (ENC_TYPE_ALG_DES | 1147 ENC_TYPE_CHAINING_ECB), 1148 .ablkcipher = { 1149 .min_keysize = DES_KEY_SIZE, 1150 .max_keysize = DES_KEY_SIZE, 1151 .setkey = n2_des_setkey, 1152 .encrypt = n2_encrypt_ecb, 1153 .decrypt = n2_decrypt_ecb, 1154 }, 1155 }, 1156 { .name = "cbc(des)", 1157 .drv_name = "cbc-des", 1158 .block_size = DES_BLOCK_SIZE, 1159 .enc_type = (ENC_TYPE_ALG_DES | 1160 ENC_TYPE_CHAINING_CBC), 1161 .ablkcipher = { 1162 .ivsize = DES_BLOCK_SIZE, 1163 .min_keysize = DES_KEY_SIZE, 1164 .max_keysize = DES_KEY_SIZE, 1165 .setkey = n2_des_setkey, 1166 .encrypt = n2_encrypt_chaining, 1167 .decrypt = n2_decrypt_chaining, 1168 }, 1169 }, 1170 { .name = "cfb(des)", 1171 .drv_name = "cfb-des", 1172 .block_size = DES_BLOCK_SIZE, 1173 .enc_type = (ENC_TYPE_ALG_DES | 1174 ENC_TYPE_CHAINING_CFB), 1175 .ablkcipher = { 1176 .min_keysize = DES_KEY_SIZE, 1177 .max_keysize = DES_KEY_SIZE, 1178 .setkey = n2_des_setkey, 1179 .encrypt = n2_encrypt_chaining, 1180 .decrypt = n2_decrypt_chaining, 1181 }, 1182 }, 1183 1184 /* 3DES: ECB CBC and CFB are supported */ 1185 { .name = "ecb(des3_ede)", 1186 .drv_name = "ecb-3des", 1187 .block_size = DES_BLOCK_SIZE, 1188 .enc_type = (ENC_TYPE_ALG_3DES | 1189 ENC_TYPE_CHAINING_ECB), 1190 .ablkcipher = { 1191 .min_keysize = 3 * DES_KEY_SIZE, 1192 .max_keysize = 3 * DES_KEY_SIZE, 1193 .setkey = n2_3des_setkey, 1194 .encrypt = n2_encrypt_ecb, 1195 .decrypt = n2_decrypt_ecb, 1196 }, 1197 }, 1198 { .name = "cbc(des3_ede)", 1199 .drv_name = "cbc-3des", 1200 .block_size = DES_BLOCK_SIZE, 1201 .enc_type = (ENC_TYPE_ALG_3DES | 1202 ENC_TYPE_CHAINING_CBC), 1203 .ablkcipher = { 1204 .ivsize = DES_BLOCK_SIZE, 1205 .min_keysize = 3 * DES_KEY_SIZE, 1206 .max_keysize = 3 * DES_KEY_SIZE, 1207 .setkey = n2_3des_setkey, 1208 .encrypt = n2_encrypt_chaining, 1209 .decrypt = n2_decrypt_chaining, 1210 }, 1211 }, 1212 { .name = "cfb(des3_ede)", 1213 .drv_name = "cfb-3des", 1214 .block_size = DES_BLOCK_SIZE, 1215 .enc_type = (ENC_TYPE_ALG_3DES | 1216 ENC_TYPE_CHAINING_CFB), 1217 .ablkcipher = { 1218 .min_keysize = 3 * DES_KEY_SIZE, 1219 .max_keysize = 3 * DES_KEY_SIZE, 1220 .setkey = n2_3des_setkey, 1221 .encrypt = n2_encrypt_chaining, 1222 .decrypt = n2_decrypt_chaining, 1223 }, 1224 }, 1225 /* AES: ECB CBC and CTR are supported */ 1226 { .name = "ecb(aes)", 1227 .drv_name = "ecb-aes", 1228 .block_size = AES_BLOCK_SIZE, 1229 .enc_type = (ENC_TYPE_ALG_AES128 | 1230 ENC_TYPE_CHAINING_ECB), 1231 .ablkcipher = { 1232 .min_keysize = AES_MIN_KEY_SIZE, 1233 .max_keysize = AES_MAX_KEY_SIZE, 1234 .setkey = n2_aes_setkey, 1235 .encrypt = n2_encrypt_ecb, 1236 .decrypt = n2_decrypt_ecb, 1237 }, 1238 }, 1239 { .name = "cbc(aes)", 1240 .drv_name = "cbc-aes", 1241 .block_size = AES_BLOCK_SIZE, 1242 .enc_type = (ENC_TYPE_ALG_AES128 | 1243 ENC_TYPE_CHAINING_CBC), 1244 .ablkcipher = { 1245 .ivsize = AES_BLOCK_SIZE, 1246 .min_keysize = AES_MIN_KEY_SIZE, 1247 .max_keysize = AES_MAX_KEY_SIZE, 1248 .setkey = n2_aes_setkey, 1249 .encrypt = n2_encrypt_chaining, 1250 .decrypt = n2_decrypt_chaining, 1251 }, 1252 }, 1253 { .name = "ctr(aes)", 1254 .drv_name = "ctr-aes", 1255 .block_size = AES_BLOCK_SIZE, 1256 .enc_type = (ENC_TYPE_ALG_AES128 | 1257 ENC_TYPE_CHAINING_COUNTER), 1258 .ablkcipher = { 1259 .ivsize = AES_BLOCK_SIZE, 1260 .min_keysize = AES_MIN_KEY_SIZE, 1261 .max_keysize = AES_MAX_KEY_SIZE, 1262 .setkey = n2_aes_setkey, 1263 .encrypt = n2_encrypt_chaining, 1264 .decrypt = n2_encrypt_chaining, 1265 }, 1266 }, 1267 1268 }; 1269 #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) 1270 1271 static LIST_HEAD(cipher_algs); 1272 1273 struct n2_hash_tmpl { 1274 const char *name; 1275 const u8 *hash_zero; 1276 const u32 *hash_init; 1277 u8 hw_op_hashsz; 1278 u8 digest_size; 1279 u8 block_size; 1280 u8 auth_type; 1281 u8 hmac_type; 1282 }; 1283 1284 static const u32 md5_init[MD5_HASH_WORDS] = { 1285 cpu_to_le32(MD5_H0), 1286 cpu_to_le32(MD5_H1), 1287 cpu_to_le32(MD5_H2), 1288 cpu_to_le32(MD5_H3), 1289 }; 1290 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { 1291 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 1292 }; 1293 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { 1294 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 1295 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, 1296 }; 1297 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { 1298 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 1299 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, 1300 }; 1301 1302 static const struct n2_hash_tmpl hash_tmpls[] = { 1303 { .name = "md5", 1304 .hash_zero = md5_zero_message_hash, 1305 .hash_init = md5_init, 1306 .auth_type = AUTH_TYPE_MD5, 1307 .hmac_type = AUTH_TYPE_HMAC_MD5, 1308 .hw_op_hashsz = MD5_DIGEST_SIZE, 1309 .digest_size = MD5_DIGEST_SIZE, 1310 .block_size = MD5_HMAC_BLOCK_SIZE }, 1311 { .name = "sha1", 1312 .hash_zero = sha1_zero_message_hash, 1313 .hash_init = sha1_init, 1314 .auth_type = AUTH_TYPE_SHA1, 1315 .hmac_type = AUTH_TYPE_HMAC_SHA1, 1316 .hw_op_hashsz = SHA1_DIGEST_SIZE, 1317 .digest_size = SHA1_DIGEST_SIZE, 1318 .block_size = SHA1_BLOCK_SIZE }, 1319 { .name = "sha256", 1320 .hash_zero = sha256_zero_message_hash, 1321 .hash_init = sha256_init, 1322 .auth_type = AUTH_TYPE_SHA256, 1323 .hmac_type = AUTH_TYPE_HMAC_SHA256, 1324 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1325 .digest_size = SHA256_DIGEST_SIZE, 1326 .block_size = SHA256_BLOCK_SIZE }, 1327 { .name = "sha224", 1328 .hash_zero = sha224_zero_message_hash, 1329 .hash_init = sha224_init, 1330 .auth_type = AUTH_TYPE_SHA256, 1331 .hmac_type = AUTH_TYPE_RESERVED, 1332 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1333 .digest_size = SHA224_DIGEST_SIZE, 1334 .block_size = SHA224_BLOCK_SIZE }, 1335 }; 1336 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) 1337 1338 static LIST_HEAD(ahash_algs); 1339 static LIST_HEAD(hmac_algs); 1340 1341 static int algs_registered; 1342 1343 static void __n2_unregister_algs(void) 1344 { 1345 struct n2_cipher_alg *cipher, *cipher_tmp; 1346 struct n2_ahash_alg *alg, *alg_tmp; 1347 struct n2_hmac_alg *hmac, *hmac_tmp; 1348 1349 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { 1350 crypto_unregister_alg(&cipher->alg); 1351 list_del(&cipher->entry); 1352 kfree(cipher); 1353 } 1354 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { 1355 crypto_unregister_ahash(&hmac->derived.alg); 1356 list_del(&hmac->derived.entry); 1357 kfree(hmac); 1358 } 1359 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { 1360 crypto_unregister_ahash(&alg->alg); 1361 list_del(&alg->entry); 1362 kfree(alg); 1363 } 1364 } 1365 1366 static int n2_cipher_cra_init(struct crypto_tfm *tfm) 1367 { 1368 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); 1369 return 0; 1370 } 1371 1372 static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) 1373 { 1374 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1375 struct crypto_alg *alg; 1376 int err; 1377 1378 if (!p) 1379 return -ENOMEM; 1380 1381 alg = &p->alg; 1382 1383 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 1384 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); 1385 alg->cra_priority = N2_CRA_PRIORITY; 1386 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1387 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; 1388 alg->cra_blocksize = tmpl->block_size; 1389 p->enc_type = tmpl->enc_type; 1390 alg->cra_ctxsize = sizeof(struct n2_cipher_context); 1391 alg->cra_type = &crypto_ablkcipher_type; 1392 alg->cra_u.ablkcipher = tmpl->ablkcipher; 1393 alg->cra_init = n2_cipher_cra_init; 1394 alg->cra_module = THIS_MODULE; 1395 1396 list_add(&p->entry, &cipher_algs); 1397 err = crypto_register_alg(alg); 1398 if (err) { 1399 pr_err("%s alg registration failed\n", alg->cra_name); 1400 list_del(&p->entry); 1401 kfree(p); 1402 } else { 1403 pr_info("%s alg registered\n", alg->cra_name); 1404 } 1405 return err; 1406 } 1407 1408 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) 1409 { 1410 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1411 struct ahash_alg *ahash; 1412 struct crypto_alg *base; 1413 int err; 1414 1415 if (!p) 1416 return -ENOMEM; 1417 1418 p->child_alg = n2ahash->alg.halg.base.cra_name; 1419 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); 1420 INIT_LIST_HEAD(&p->derived.entry); 1421 1422 ahash = &p->derived.alg; 1423 ahash->digest = n2_hmac_async_digest; 1424 ahash->setkey = n2_hmac_async_setkey; 1425 1426 base = &ahash->halg.base; 1427 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); 1428 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); 1429 1430 base->cra_ctxsize = sizeof(struct n2_hmac_ctx); 1431 base->cra_init = n2_hmac_cra_init; 1432 base->cra_exit = n2_hmac_cra_exit; 1433 1434 list_add(&p->derived.entry, &hmac_algs); 1435 err = crypto_register_ahash(ahash); 1436 if (err) { 1437 pr_err("%s alg registration failed\n", base->cra_name); 1438 list_del(&p->derived.entry); 1439 kfree(p); 1440 } else { 1441 pr_info("%s alg registered\n", base->cra_name); 1442 } 1443 return err; 1444 } 1445 1446 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) 1447 { 1448 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1449 struct hash_alg_common *halg; 1450 struct crypto_alg *base; 1451 struct ahash_alg *ahash; 1452 int err; 1453 1454 if (!p) 1455 return -ENOMEM; 1456 1457 p->hash_zero = tmpl->hash_zero; 1458 p->hash_init = tmpl->hash_init; 1459 p->auth_type = tmpl->auth_type; 1460 p->hmac_type = tmpl->hmac_type; 1461 p->hw_op_hashsz = tmpl->hw_op_hashsz; 1462 p->digest_size = tmpl->digest_size; 1463 1464 ahash = &p->alg; 1465 ahash->init = n2_hash_async_init; 1466 ahash->update = n2_hash_async_update; 1467 ahash->final = n2_hash_async_final; 1468 ahash->finup = n2_hash_async_finup; 1469 ahash->digest = n2_hash_async_digest; 1470 ahash->export = n2_hash_async_noexport; 1471 ahash->import = n2_hash_async_noimport; 1472 1473 halg = &ahash->halg; 1474 halg->digestsize = tmpl->digest_size; 1475 1476 base = &halg->base; 1477 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 1478 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); 1479 base->cra_priority = N2_CRA_PRIORITY; 1480 base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1481 CRYPTO_ALG_NEED_FALLBACK; 1482 base->cra_blocksize = tmpl->block_size; 1483 base->cra_ctxsize = sizeof(struct n2_hash_ctx); 1484 base->cra_module = THIS_MODULE; 1485 base->cra_init = n2_hash_cra_init; 1486 base->cra_exit = n2_hash_cra_exit; 1487 1488 list_add(&p->entry, &ahash_algs); 1489 err = crypto_register_ahash(ahash); 1490 if (err) { 1491 pr_err("%s alg registration failed\n", base->cra_name); 1492 list_del(&p->entry); 1493 kfree(p); 1494 } else { 1495 pr_info("%s alg registered\n", base->cra_name); 1496 } 1497 if (!err && p->hmac_type != AUTH_TYPE_RESERVED) 1498 err = __n2_register_one_hmac(p); 1499 return err; 1500 } 1501 1502 static int n2_register_algs(void) 1503 { 1504 int i, err = 0; 1505 1506 mutex_lock(&spu_lock); 1507 if (algs_registered++) 1508 goto out; 1509 1510 for (i = 0; i < NUM_HASH_TMPLS; i++) { 1511 err = __n2_register_one_ahash(&hash_tmpls[i]); 1512 if (err) { 1513 __n2_unregister_algs(); 1514 goto out; 1515 } 1516 } 1517 for (i = 0; i < NUM_CIPHER_TMPLS; i++) { 1518 err = __n2_register_one_cipher(&cipher_tmpls[i]); 1519 if (err) { 1520 __n2_unregister_algs(); 1521 goto out; 1522 } 1523 } 1524 1525 out: 1526 mutex_unlock(&spu_lock); 1527 return err; 1528 } 1529 1530 static void n2_unregister_algs(void) 1531 { 1532 mutex_lock(&spu_lock); 1533 if (!--algs_registered) 1534 __n2_unregister_algs(); 1535 mutex_unlock(&spu_lock); 1536 } 1537 1538 /* To map CWQ queues to interrupt sources, the hypervisor API provides 1539 * a devino. This isn't very useful to us because all of the 1540 * interrupts listed in the device_node have been translated to 1541 * Linux virtual IRQ cookie numbers. 1542 * 1543 * So we have to back-translate, going through the 'intr' and 'ino' 1544 * property tables of the n2cp MDESC node, matching it with the OF 1545 * 'interrupts' property entries, in order to to figure out which 1546 * devino goes to which already-translated IRQ. 1547 */ 1548 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, 1549 unsigned long dev_ino) 1550 { 1551 const unsigned int *dev_intrs; 1552 unsigned int intr; 1553 int i; 1554 1555 for (i = 0; i < ip->num_intrs; i++) { 1556 if (ip->ino_table[i].ino == dev_ino) 1557 break; 1558 } 1559 if (i == ip->num_intrs) 1560 return -ENODEV; 1561 1562 intr = ip->ino_table[i].intr; 1563 1564 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); 1565 if (!dev_intrs) 1566 return -ENODEV; 1567 1568 for (i = 0; i < dev->archdata.num_irqs; i++) { 1569 if (dev_intrs[i] == intr) 1570 return i; 1571 } 1572 1573 return -ENODEV; 1574 } 1575 1576 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, 1577 const char *irq_name, struct spu_queue *p, 1578 irq_handler_t handler) 1579 { 1580 unsigned long herr; 1581 int index; 1582 1583 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); 1584 if (herr) 1585 return -EINVAL; 1586 1587 index = find_devino_index(dev, ip, p->devino); 1588 if (index < 0) 1589 return index; 1590 1591 p->irq = dev->archdata.irqs[index]; 1592 1593 sprintf(p->irq_name, "%s-%d", irq_name, index); 1594 1595 return request_irq(p->irq, handler, 0, p->irq_name, p); 1596 } 1597 1598 static struct kmem_cache *queue_cache[2]; 1599 1600 static void *new_queue(unsigned long q_type) 1601 { 1602 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); 1603 } 1604 1605 static void free_queue(void *p, unsigned long q_type) 1606 { 1607 kmem_cache_free(queue_cache[q_type - 1], p); 1608 } 1609 1610 static int queue_cache_init(void) 1611 { 1612 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1613 queue_cache[HV_NCS_QTYPE_MAU - 1] = 1614 kmem_cache_create("mau_queue", 1615 (MAU_NUM_ENTRIES * 1616 MAU_ENTRY_SIZE), 1617 MAU_ENTRY_SIZE, 0, NULL); 1618 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1619 return -ENOMEM; 1620 1621 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) 1622 queue_cache[HV_NCS_QTYPE_CWQ - 1] = 1623 kmem_cache_create("cwq_queue", 1624 (CWQ_NUM_ENTRIES * 1625 CWQ_ENTRY_SIZE), 1626 CWQ_ENTRY_SIZE, 0, NULL); 1627 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 1628 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1629 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; 1630 return -ENOMEM; 1631 } 1632 return 0; 1633 } 1634 1635 static void queue_cache_destroy(void) 1636 { 1637 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1638 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 1639 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; 1640 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; 1641 } 1642 1643 static long spu_queue_register_workfn(void *arg) 1644 { 1645 struct spu_qreg *qr = arg; 1646 struct spu_queue *p = qr->queue; 1647 unsigned long q_type = qr->type; 1648 unsigned long hv_ret; 1649 1650 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), 1651 CWQ_NUM_ENTRIES, &p->qhandle); 1652 if (!hv_ret) 1653 sun4v_ncs_sethead_marker(p->qhandle, 0); 1654 1655 return hv_ret ? -EINVAL : 0; 1656 } 1657 1658 static int spu_queue_register(struct spu_queue *p, unsigned long q_type) 1659 { 1660 int cpu = cpumask_any_and(&p->sharing, cpu_online_mask); 1661 struct spu_qreg qr = { .queue = p, .type = q_type }; 1662 1663 return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr); 1664 } 1665 1666 static int spu_queue_setup(struct spu_queue *p) 1667 { 1668 int err; 1669 1670 p->q = new_queue(p->q_type); 1671 if (!p->q) 1672 return -ENOMEM; 1673 1674 err = spu_queue_register(p, p->q_type); 1675 if (err) { 1676 free_queue(p->q, p->q_type); 1677 p->q = NULL; 1678 } 1679 1680 return err; 1681 } 1682 1683 static void spu_queue_destroy(struct spu_queue *p) 1684 { 1685 unsigned long hv_ret; 1686 1687 if (!p->q) 1688 return; 1689 1690 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); 1691 1692 if (!hv_ret) 1693 free_queue(p->q, p->q_type); 1694 } 1695 1696 static void spu_list_destroy(struct list_head *list) 1697 { 1698 struct spu_queue *p, *n; 1699 1700 list_for_each_entry_safe(p, n, list, list) { 1701 int i; 1702 1703 for (i = 0; i < NR_CPUS; i++) { 1704 if (cpu_to_cwq[i] == p) 1705 cpu_to_cwq[i] = NULL; 1706 } 1707 1708 if (p->irq) { 1709 free_irq(p->irq, p); 1710 p->irq = 0; 1711 } 1712 spu_queue_destroy(p); 1713 list_del(&p->list); 1714 kfree(p); 1715 } 1716 } 1717 1718 /* Walk the backward arcs of a CWQ 'exec-unit' node, 1719 * gathering cpu membership information. 1720 */ 1721 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, 1722 struct platform_device *dev, 1723 u64 node, struct spu_queue *p, 1724 struct spu_queue **table) 1725 { 1726 u64 arc; 1727 1728 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { 1729 u64 tgt = mdesc_arc_target(mdesc, arc); 1730 const char *name = mdesc_node_name(mdesc, tgt); 1731 const u64 *id; 1732 1733 if (strcmp(name, "cpu")) 1734 continue; 1735 id = mdesc_get_property(mdesc, tgt, "id", NULL); 1736 if (table[*id] != NULL) { 1737 dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n", 1738 dev->dev.of_node); 1739 return -EINVAL; 1740 } 1741 cpumask_set_cpu(*id, &p->sharing); 1742 table[*id] = p; 1743 } 1744 return 0; 1745 } 1746 1747 /* Process an 'exec-unit' MDESC node of type 'cwq'. */ 1748 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, 1749 struct platform_device *dev, struct mdesc_handle *mdesc, 1750 u64 node, const char *iname, unsigned long q_type, 1751 irq_handler_t handler, struct spu_queue **table) 1752 { 1753 struct spu_queue *p; 1754 int err; 1755 1756 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); 1757 if (!p) { 1758 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n", 1759 dev->dev.of_node); 1760 return -ENOMEM; 1761 } 1762 1763 cpumask_clear(&p->sharing); 1764 spin_lock_init(&p->lock); 1765 p->q_type = q_type; 1766 INIT_LIST_HEAD(&p->jobs); 1767 list_add(&p->list, list); 1768 1769 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); 1770 if (err) 1771 return err; 1772 1773 err = spu_queue_setup(p); 1774 if (err) 1775 return err; 1776 1777 return spu_map_ino(dev, ip, iname, p, handler); 1778 } 1779 1780 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, 1781 struct spu_mdesc_info *ip, struct list_head *list, 1782 const char *exec_name, unsigned long q_type, 1783 irq_handler_t handler, struct spu_queue **table) 1784 { 1785 int err = 0; 1786 u64 node; 1787 1788 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { 1789 const char *type; 1790 1791 type = mdesc_get_property(mdesc, node, "type", NULL); 1792 if (!type || strcmp(type, exec_name)) 1793 continue; 1794 1795 err = handle_exec_unit(ip, list, dev, mdesc, node, 1796 exec_name, q_type, handler, table); 1797 if (err) { 1798 spu_list_destroy(list); 1799 break; 1800 } 1801 } 1802 1803 return err; 1804 } 1805 1806 static int get_irq_props(struct mdesc_handle *mdesc, u64 node, 1807 struct spu_mdesc_info *ip) 1808 { 1809 const u64 *ino; 1810 int ino_len; 1811 int i; 1812 1813 ino = mdesc_get_property(mdesc, node, "ino", &ino_len); 1814 if (!ino) { 1815 printk("NO 'ino'\n"); 1816 return -ENODEV; 1817 } 1818 1819 ip->num_intrs = ino_len / sizeof(u64); 1820 ip->ino_table = kzalloc((sizeof(struct ino_blob) * 1821 ip->num_intrs), 1822 GFP_KERNEL); 1823 if (!ip->ino_table) 1824 return -ENOMEM; 1825 1826 for (i = 0; i < ip->num_intrs; i++) { 1827 struct ino_blob *b = &ip->ino_table[i]; 1828 b->intr = i + 1; 1829 b->ino = ino[i]; 1830 } 1831 1832 return 0; 1833 } 1834 1835 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc, 1836 struct platform_device *dev, 1837 struct spu_mdesc_info *ip, 1838 const char *node_name) 1839 { 1840 const unsigned int *reg; 1841 u64 node; 1842 1843 reg = of_get_property(dev->dev.of_node, "reg", NULL); 1844 if (!reg) 1845 return -ENODEV; 1846 1847 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { 1848 const char *name; 1849 const u64 *chdl; 1850 1851 name = mdesc_get_property(mdesc, node, "name", NULL); 1852 if (!name || strcmp(name, node_name)) 1853 continue; 1854 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); 1855 if (!chdl || (*chdl != *reg)) 1856 continue; 1857 ip->cfg_handle = *chdl; 1858 return get_irq_props(mdesc, node, ip); 1859 } 1860 1861 return -ENODEV; 1862 } 1863 1864 static unsigned long n2_spu_hvapi_major; 1865 static unsigned long n2_spu_hvapi_minor; 1866 1867 static int n2_spu_hvapi_register(void) 1868 { 1869 int err; 1870 1871 n2_spu_hvapi_major = 2; 1872 n2_spu_hvapi_minor = 0; 1873 1874 err = sun4v_hvapi_register(HV_GRP_NCS, 1875 n2_spu_hvapi_major, 1876 &n2_spu_hvapi_minor); 1877 1878 if (!err) 1879 pr_info("Registered NCS HVAPI version %lu.%lu\n", 1880 n2_spu_hvapi_major, 1881 n2_spu_hvapi_minor); 1882 1883 return err; 1884 } 1885 1886 static void n2_spu_hvapi_unregister(void) 1887 { 1888 sun4v_hvapi_unregister(HV_GRP_NCS); 1889 } 1890 1891 static int global_ref; 1892 1893 static int grab_global_resources(void) 1894 { 1895 int err = 0; 1896 1897 mutex_lock(&spu_lock); 1898 1899 if (global_ref++) 1900 goto out; 1901 1902 err = n2_spu_hvapi_register(); 1903 if (err) 1904 goto out; 1905 1906 err = queue_cache_init(); 1907 if (err) 1908 goto out_hvapi_release; 1909 1910 err = -ENOMEM; 1911 cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *), 1912 GFP_KERNEL); 1913 if (!cpu_to_cwq) 1914 goto out_queue_cache_destroy; 1915 1916 cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *), 1917 GFP_KERNEL); 1918 if (!cpu_to_mau) 1919 goto out_free_cwq_table; 1920 1921 err = 0; 1922 1923 out: 1924 if (err) 1925 global_ref--; 1926 mutex_unlock(&spu_lock); 1927 return err; 1928 1929 out_free_cwq_table: 1930 kfree(cpu_to_cwq); 1931 cpu_to_cwq = NULL; 1932 1933 out_queue_cache_destroy: 1934 queue_cache_destroy(); 1935 1936 out_hvapi_release: 1937 n2_spu_hvapi_unregister(); 1938 goto out; 1939 } 1940 1941 static void release_global_resources(void) 1942 { 1943 mutex_lock(&spu_lock); 1944 if (!--global_ref) { 1945 kfree(cpu_to_cwq); 1946 cpu_to_cwq = NULL; 1947 1948 kfree(cpu_to_mau); 1949 cpu_to_mau = NULL; 1950 1951 queue_cache_destroy(); 1952 n2_spu_hvapi_unregister(); 1953 } 1954 mutex_unlock(&spu_lock); 1955 } 1956 1957 static struct n2_crypto *alloc_n2cp(void) 1958 { 1959 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); 1960 1961 if (np) 1962 INIT_LIST_HEAD(&np->cwq_list); 1963 1964 return np; 1965 } 1966 1967 static void free_n2cp(struct n2_crypto *np) 1968 { 1969 kfree(np->cwq_info.ino_table); 1970 np->cwq_info.ino_table = NULL; 1971 1972 kfree(np); 1973 } 1974 1975 static void n2_spu_driver_version(void) 1976 { 1977 static int n2_spu_version_printed; 1978 1979 if (n2_spu_version_printed++ == 0) 1980 pr_info("%s", version); 1981 } 1982 1983 static int n2_crypto_probe(struct platform_device *dev) 1984 { 1985 struct mdesc_handle *mdesc; 1986 struct n2_crypto *np; 1987 int err; 1988 1989 n2_spu_driver_version(); 1990 1991 pr_info("Found N2CP at %pOF\n", dev->dev.of_node); 1992 1993 np = alloc_n2cp(); 1994 if (!np) { 1995 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n", 1996 dev->dev.of_node); 1997 return -ENOMEM; 1998 } 1999 2000 err = grab_global_resources(); 2001 if (err) { 2002 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", 2003 dev->dev.of_node); 2004 goto out_free_n2cp; 2005 } 2006 2007 mdesc = mdesc_grab(); 2008 2009 if (!mdesc) { 2010 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", 2011 dev->dev.of_node); 2012 err = -ENODEV; 2013 goto out_free_global; 2014 } 2015 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); 2016 if (err) { 2017 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", 2018 dev->dev.of_node); 2019 mdesc_release(mdesc); 2020 goto out_free_global; 2021 } 2022 2023 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, 2024 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, 2025 cpu_to_cwq); 2026 mdesc_release(mdesc); 2027 2028 if (err) { 2029 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n", 2030 dev->dev.of_node); 2031 goto out_free_global; 2032 } 2033 2034 err = n2_register_algs(); 2035 if (err) { 2036 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n", 2037 dev->dev.of_node); 2038 goto out_free_spu_list; 2039 } 2040 2041 dev_set_drvdata(&dev->dev, np); 2042 2043 return 0; 2044 2045 out_free_spu_list: 2046 spu_list_destroy(&np->cwq_list); 2047 2048 out_free_global: 2049 release_global_resources(); 2050 2051 out_free_n2cp: 2052 free_n2cp(np); 2053 2054 return err; 2055 } 2056 2057 static int n2_crypto_remove(struct platform_device *dev) 2058 { 2059 struct n2_crypto *np = dev_get_drvdata(&dev->dev); 2060 2061 n2_unregister_algs(); 2062 2063 spu_list_destroy(&np->cwq_list); 2064 2065 release_global_resources(); 2066 2067 free_n2cp(np); 2068 2069 return 0; 2070 } 2071 2072 static struct n2_mau *alloc_ncp(void) 2073 { 2074 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); 2075 2076 if (mp) 2077 INIT_LIST_HEAD(&mp->mau_list); 2078 2079 return mp; 2080 } 2081 2082 static void free_ncp(struct n2_mau *mp) 2083 { 2084 kfree(mp->mau_info.ino_table); 2085 mp->mau_info.ino_table = NULL; 2086 2087 kfree(mp); 2088 } 2089 2090 static int n2_mau_probe(struct platform_device *dev) 2091 { 2092 struct mdesc_handle *mdesc; 2093 struct n2_mau *mp; 2094 int err; 2095 2096 n2_spu_driver_version(); 2097 2098 pr_info("Found NCP at %pOF\n", dev->dev.of_node); 2099 2100 mp = alloc_ncp(); 2101 if (!mp) { 2102 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n", 2103 dev->dev.of_node); 2104 return -ENOMEM; 2105 } 2106 2107 err = grab_global_resources(); 2108 if (err) { 2109 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", 2110 dev->dev.of_node); 2111 goto out_free_ncp; 2112 } 2113 2114 mdesc = mdesc_grab(); 2115 2116 if (!mdesc) { 2117 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", 2118 dev->dev.of_node); 2119 err = -ENODEV; 2120 goto out_free_global; 2121 } 2122 2123 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); 2124 if (err) { 2125 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", 2126 dev->dev.of_node); 2127 mdesc_release(mdesc); 2128 goto out_free_global; 2129 } 2130 2131 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, 2132 "mau", HV_NCS_QTYPE_MAU, mau_intr, 2133 cpu_to_mau); 2134 mdesc_release(mdesc); 2135 2136 if (err) { 2137 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n", 2138 dev->dev.of_node); 2139 goto out_free_global; 2140 } 2141 2142 dev_set_drvdata(&dev->dev, mp); 2143 2144 return 0; 2145 2146 out_free_global: 2147 release_global_resources(); 2148 2149 out_free_ncp: 2150 free_ncp(mp); 2151 2152 return err; 2153 } 2154 2155 static int n2_mau_remove(struct platform_device *dev) 2156 { 2157 struct n2_mau *mp = dev_get_drvdata(&dev->dev); 2158 2159 spu_list_destroy(&mp->mau_list); 2160 2161 release_global_resources(); 2162 2163 free_ncp(mp); 2164 2165 return 0; 2166 } 2167 2168 static const struct of_device_id n2_crypto_match[] = { 2169 { 2170 .name = "n2cp", 2171 .compatible = "SUNW,n2-cwq", 2172 }, 2173 { 2174 .name = "n2cp", 2175 .compatible = "SUNW,vf-cwq", 2176 }, 2177 { 2178 .name = "n2cp", 2179 .compatible = "SUNW,kt-cwq", 2180 }, 2181 {}, 2182 }; 2183 2184 MODULE_DEVICE_TABLE(of, n2_crypto_match); 2185 2186 static struct platform_driver n2_crypto_driver = { 2187 .driver = { 2188 .name = "n2cp", 2189 .of_match_table = n2_crypto_match, 2190 }, 2191 .probe = n2_crypto_probe, 2192 .remove = n2_crypto_remove, 2193 }; 2194 2195 static const struct of_device_id n2_mau_match[] = { 2196 { 2197 .name = "ncp", 2198 .compatible = "SUNW,n2-mau", 2199 }, 2200 { 2201 .name = "ncp", 2202 .compatible = "SUNW,vf-mau", 2203 }, 2204 { 2205 .name = "ncp", 2206 .compatible = "SUNW,kt-mau", 2207 }, 2208 {}, 2209 }; 2210 2211 MODULE_DEVICE_TABLE(of, n2_mau_match); 2212 2213 static struct platform_driver n2_mau_driver = { 2214 .driver = { 2215 .name = "ncp", 2216 .of_match_table = n2_mau_match, 2217 }, 2218 .probe = n2_mau_probe, 2219 .remove = n2_mau_remove, 2220 }; 2221 2222 static struct platform_driver * const drivers[] = { 2223 &n2_crypto_driver, 2224 &n2_mau_driver, 2225 }; 2226 2227 static int __init n2_init(void) 2228 { 2229 return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 2230 } 2231 2232 static void __exit n2_exit(void) 2233 { 2234 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 2235 } 2236 2237 module_init(n2_init); 2238 module_exit(n2_exit); 2239