1 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. 2 * 3 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/of.h> 11 #include <linux/of_device.h> 12 #include <linux/cpumask.h> 13 #include <linux/slab.h> 14 #include <linux/interrupt.h> 15 #include <linux/crypto.h> 16 #include <crypto/md5.h> 17 #include <crypto/sha.h> 18 #include <crypto/aes.h> 19 #include <crypto/des.h> 20 #include <linux/mutex.h> 21 #include <linux/delay.h> 22 #include <linux/sched.h> 23 24 #include <crypto/internal/hash.h> 25 #include <crypto/scatterwalk.h> 26 #include <crypto/algapi.h> 27 28 #include <asm/hypervisor.h> 29 #include <asm/mdesc.h> 30 31 #include "n2_core.h" 32 33 #define DRV_MODULE_NAME "n2_crypto" 34 #define DRV_MODULE_VERSION "0.2" 35 #define DRV_MODULE_RELDATE "July 28, 2011" 36 37 static const char version[] = 38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 39 40 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 41 MODULE_DESCRIPTION("Niagara2 Crypto driver"); 42 MODULE_LICENSE("GPL"); 43 MODULE_VERSION(DRV_MODULE_VERSION); 44 45 #define N2_CRA_PRIORITY 200 46 47 static DEFINE_MUTEX(spu_lock); 48 49 struct spu_queue { 50 cpumask_t sharing; 51 unsigned long qhandle; 52 53 spinlock_t lock; 54 u8 q_type; 55 void *q; 56 unsigned long head; 57 unsigned long tail; 58 struct list_head jobs; 59 60 unsigned long devino; 61 62 char irq_name[32]; 63 unsigned int irq; 64 65 struct list_head list; 66 }; 67 68 struct spu_qreg { 69 struct spu_queue *queue; 70 unsigned long type; 71 }; 72 73 static struct spu_queue **cpu_to_cwq; 74 static struct spu_queue **cpu_to_mau; 75 76 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) 77 { 78 if (q->q_type == HV_NCS_QTYPE_MAU) { 79 off += MAU_ENTRY_SIZE; 80 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) 81 off = 0; 82 } else { 83 off += CWQ_ENTRY_SIZE; 84 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) 85 off = 0; 86 } 87 return off; 88 } 89 90 struct n2_request_common { 91 struct list_head entry; 92 unsigned int offset; 93 }; 94 #define OFFSET_NOT_RUNNING (~(unsigned int)0) 95 96 /* An async job request records the final tail value it used in 97 * n2_request_common->offset, test to see if that offset is in 98 * the range old_head, new_head, inclusive. 99 */ 100 static inline bool job_finished(struct spu_queue *q, unsigned int offset, 101 unsigned long old_head, unsigned long new_head) 102 { 103 if (old_head <= new_head) { 104 if (offset > old_head && offset <= new_head) 105 return true; 106 } else { 107 if (offset > old_head || offset <= new_head) 108 return true; 109 } 110 return false; 111 } 112 113 /* When the HEAD marker is unequal to the actual HEAD, we get 114 * a virtual device INO interrupt. We should process the 115 * completed CWQ entries and adjust the HEAD marker to clear 116 * the IRQ. 117 */ 118 static irqreturn_t cwq_intr(int irq, void *dev_id) 119 { 120 unsigned long off, new_head, hv_ret; 121 struct spu_queue *q = dev_id; 122 123 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", 124 smp_processor_id(), q->qhandle); 125 126 spin_lock(&q->lock); 127 128 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); 129 130 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", 131 smp_processor_id(), new_head, hv_ret); 132 133 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { 134 /* XXX ... XXX */ 135 } 136 137 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); 138 if (hv_ret == HV_EOK) 139 q->head = new_head; 140 141 spin_unlock(&q->lock); 142 143 return IRQ_HANDLED; 144 } 145 146 static irqreturn_t mau_intr(int irq, void *dev_id) 147 { 148 struct spu_queue *q = dev_id; 149 unsigned long head, hv_ret; 150 151 spin_lock(&q->lock); 152 153 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", 154 smp_processor_id(), q->qhandle); 155 156 hv_ret = sun4v_ncs_gethead(q->qhandle, &head); 157 158 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", 159 smp_processor_id(), head, hv_ret); 160 161 sun4v_ncs_sethead_marker(q->qhandle, head); 162 163 spin_unlock(&q->lock); 164 165 return IRQ_HANDLED; 166 } 167 168 static void *spu_queue_next(struct spu_queue *q, void *cur) 169 { 170 return q->q + spu_next_offset(q, cur - q->q); 171 } 172 173 static int spu_queue_num_free(struct spu_queue *q) 174 { 175 unsigned long head = q->head; 176 unsigned long tail = q->tail; 177 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); 178 unsigned long diff; 179 180 if (head > tail) 181 diff = head - tail; 182 else 183 diff = (end - tail) + head; 184 185 return (diff / CWQ_ENTRY_SIZE) - 1; 186 } 187 188 static void *spu_queue_alloc(struct spu_queue *q, int num_entries) 189 { 190 int avail = spu_queue_num_free(q); 191 192 if (avail >= num_entries) 193 return q->q + q->tail; 194 195 return NULL; 196 } 197 198 static unsigned long spu_queue_submit(struct spu_queue *q, void *last) 199 { 200 unsigned long hv_ret, new_tail; 201 202 new_tail = spu_next_offset(q, last - q->q); 203 204 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); 205 if (hv_ret == HV_EOK) 206 q->tail = new_tail; 207 return hv_ret; 208 } 209 210 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, 211 int enc_type, int auth_type, 212 unsigned int hash_len, 213 bool sfas, bool sob, bool eob, bool encrypt, 214 int opcode) 215 { 216 u64 word = (len - 1) & CONTROL_LEN; 217 218 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); 219 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); 220 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); 221 if (sfas) 222 word |= CONTROL_STORE_FINAL_AUTH_STATE; 223 if (sob) 224 word |= CONTROL_START_OF_BLOCK; 225 if (eob) 226 word |= CONTROL_END_OF_BLOCK; 227 if (encrypt) 228 word |= CONTROL_ENCRYPT; 229 if (hmac_key_len) 230 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; 231 if (hash_len) 232 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; 233 234 return word; 235 } 236 237 #if 0 238 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 239 { 240 if (this_len >= 64 || 241 qp->head != qp->tail) 242 return true; 243 return false; 244 } 245 #endif 246 247 struct n2_ahash_alg { 248 struct list_head entry; 249 const u8 *hash_zero; 250 const u32 *hash_init; 251 u8 hw_op_hashsz; 252 u8 digest_size; 253 u8 auth_type; 254 u8 hmac_type; 255 struct ahash_alg alg; 256 }; 257 258 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) 259 { 260 struct crypto_alg *alg = tfm->__crt_alg; 261 struct ahash_alg *ahash_alg; 262 263 ahash_alg = container_of(alg, struct ahash_alg, halg.base); 264 265 return container_of(ahash_alg, struct n2_ahash_alg, alg); 266 } 267 268 struct n2_hmac_alg { 269 const char *child_alg; 270 struct n2_ahash_alg derived; 271 }; 272 273 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) 274 { 275 struct crypto_alg *alg = tfm->__crt_alg; 276 struct ahash_alg *ahash_alg; 277 278 ahash_alg = container_of(alg, struct ahash_alg, halg.base); 279 280 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); 281 } 282 283 struct n2_hash_ctx { 284 struct crypto_ahash *fallback_tfm; 285 }; 286 287 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ 288 289 struct n2_hmac_ctx { 290 struct n2_hash_ctx base; 291 292 struct crypto_shash *child_shash; 293 294 int hash_key_len; 295 unsigned char hash_key[N2_HASH_KEY_MAX]; 296 }; 297 298 struct n2_hash_req_ctx { 299 union { 300 struct md5_state md5; 301 struct sha1_state sha1; 302 struct sha256_state sha256; 303 } u; 304 305 struct ahash_request fallback_req; 306 }; 307 308 static int n2_hash_async_init(struct ahash_request *req) 309 { 310 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 311 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 312 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 313 314 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 315 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 316 317 return crypto_ahash_init(&rctx->fallback_req); 318 } 319 320 static int n2_hash_async_update(struct ahash_request *req) 321 { 322 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 323 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 324 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 325 326 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 327 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 328 rctx->fallback_req.nbytes = req->nbytes; 329 rctx->fallback_req.src = req->src; 330 331 return crypto_ahash_update(&rctx->fallback_req); 332 } 333 334 static int n2_hash_async_final(struct ahash_request *req) 335 { 336 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 337 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 338 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 339 340 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 341 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 342 rctx->fallback_req.result = req->result; 343 344 return crypto_ahash_final(&rctx->fallback_req); 345 } 346 347 static int n2_hash_async_finup(struct ahash_request *req) 348 { 349 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 350 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 351 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 352 353 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 354 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 355 rctx->fallback_req.nbytes = req->nbytes; 356 rctx->fallback_req.src = req->src; 357 rctx->fallback_req.result = req->result; 358 359 return crypto_ahash_finup(&rctx->fallback_req); 360 } 361 362 static int n2_hash_async_noimport(struct ahash_request *req, const void *in) 363 { 364 return -ENOSYS; 365 } 366 367 static int n2_hash_async_noexport(struct ahash_request *req, void *out) 368 { 369 return -ENOSYS; 370 } 371 372 static int n2_hash_cra_init(struct crypto_tfm *tfm) 373 { 374 const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 375 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 376 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 377 struct crypto_ahash *fallback_tfm; 378 int err; 379 380 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 381 CRYPTO_ALG_NEED_FALLBACK); 382 if (IS_ERR(fallback_tfm)) { 383 pr_warning("Fallback driver '%s' could not be loaded!\n", 384 fallback_driver_name); 385 err = PTR_ERR(fallback_tfm); 386 goto out; 387 } 388 389 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 390 crypto_ahash_reqsize(fallback_tfm))); 391 392 ctx->fallback_tfm = fallback_tfm; 393 return 0; 394 395 out: 396 return err; 397 } 398 399 static void n2_hash_cra_exit(struct crypto_tfm *tfm) 400 { 401 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 402 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 403 404 crypto_free_ahash(ctx->fallback_tfm); 405 } 406 407 static int n2_hmac_cra_init(struct crypto_tfm *tfm) 408 { 409 const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 410 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 411 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 412 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); 413 struct crypto_ahash *fallback_tfm; 414 struct crypto_shash *child_shash; 415 int err; 416 417 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 418 CRYPTO_ALG_NEED_FALLBACK); 419 if (IS_ERR(fallback_tfm)) { 420 pr_warning("Fallback driver '%s' could not be loaded!\n", 421 fallback_driver_name); 422 err = PTR_ERR(fallback_tfm); 423 goto out; 424 } 425 426 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); 427 if (IS_ERR(child_shash)) { 428 pr_warning("Child shash '%s' could not be loaded!\n", 429 n2alg->child_alg); 430 err = PTR_ERR(child_shash); 431 goto out_free_fallback; 432 } 433 434 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 435 crypto_ahash_reqsize(fallback_tfm))); 436 437 ctx->child_shash = child_shash; 438 ctx->base.fallback_tfm = fallback_tfm; 439 return 0; 440 441 out_free_fallback: 442 crypto_free_ahash(fallback_tfm); 443 444 out: 445 return err; 446 } 447 448 static void n2_hmac_cra_exit(struct crypto_tfm *tfm) 449 { 450 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 451 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 452 453 crypto_free_ahash(ctx->base.fallback_tfm); 454 crypto_free_shash(ctx->child_shash); 455 } 456 457 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, 458 unsigned int keylen) 459 { 460 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 461 struct crypto_shash *child_shash = ctx->child_shash; 462 struct crypto_ahash *fallback_tfm; 463 SHASH_DESC_ON_STACK(shash, child_shash); 464 int err, bs, ds; 465 466 fallback_tfm = ctx->base.fallback_tfm; 467 err = crypto_ahash_setkey(fallback_tfm, key, keylen); 468 if (err) 469 return err; 470 471 shash->tfm = child_shash; 472 shash->flags = crypto_ahash_get_flags(tfm) & 473 CRYPTO_TFM_REQ_MAY_SLEEP; 474 475 bs = crypto_shash_blocksize(child_shash); 476 ds = crypto_shash_digestsize(child_shash); 477 BUG_ON(ds > N2_HASH_KEY_MAX); 478 if (keylen > bs) { 479 err = crypto_shash_digest(shash, key, keylen, 480 ctx->hash_key); 481 if (err) 482 return err; 483 keylen = ds; 484 } else if (keylen <= N2_HASH_KEY_MAX) 485 memcpy(ctx->hash_key, key, keylen); 486 487 ctx->hash_key_len = keylen; 488 489 return err; 490 } 491 492 static unsigned long wait_for_tail(struct spu_queue *qp) 493 { 494 unsigned long head, hv_ret; 495 496 do { 497 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); 498 if (hv_ret != HV_EOK) { 499 pr_err("Hypervisor error on gethead\n"); 500 break; 501 } 502 if (head == qp->tail) { 503 qp->head = head; 504 break; 505 } 506 } while (1); 507 return hv_ret; 508 } 509 510 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, 511 struct cwq_initial_entry *ent) 512 { 513 unsigned long hv_ret = spu_queue_submit(qp, ent); 514 515 if (hv_ret == HV_EOK) 516 hv_ret = wait_for_tail(qp); 517 518 return hv_ret; 519 } 520 521 static int n2_do_async_digest(struct ahash_request *req, 522 unsigned int auth_type, unsigned int digest_size, 523 unsigned int result_size, void *hash_loc, 524 unsigned long auth_key, unsigned int auth_key_len) 525 { 526 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 527 struct cwq_initial_entry *ent; 528 struct crypto_hash_walk walk; 529 struct spu_queue *qp; 530 unsigned long flags; 531 int err = -ENODEV; 532 int nbytes, cpu; 533 534 /* The total effective length of the operation may not 535 * exceed 2^16. 536 */ 537 if (unlikely(req->nbytes > (1 << 16))) { 538 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 539 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 540 541 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 542 rctx->fallback_req.base.flags = 543 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 544 rctx->fallback_req.nbytes = req->nbytes; 545 rctx->fallback_req.src = req->src; 546 rctx->fallback_req.result = req->result; 547 548 return crypto_ahash_digest(&rctx->fallback_req); 549 } 550 551 nbytes = crypto_hash_walk_first(req, &walk); 552 553 cpu = get_cpu(); 554 qp = cpu_to_cwq[cpu]; 555 if (!qp) 556 goto out; 557 558 spin_lock_irqsave(&qp->lock, flags); 559 560 /* XXX can do better, improve this later by doing a by-hand scatterlist 561 * XXX walk, etc. 562 */ 563 ent = qp->q + qp->tail; 564 565 ent->control = control_word_base(nbytes, auth_key_len, 0, 566 auth_type, digest_size, 567 false, true, false, false, 568 OPCODE_INPLACE_BIT | 569 OPCODE_AUTH_MAC); 570 ent->src_addr = __pa(walk.data); 571 ent->auth_key_addr = auth_key; 572 ent->auth_iv_addr = __pa(hash_loc); 573 ent->final_auth_state_addr = 0UL; 574 ent->enc_key_addr = 0UL; 575 ent->enc_iv_addr = 0UL; 576 ent->dest_addr = __pa(hash_loc); 577 578 nbytes = crypto_hash_walk_done(&walk, 0); 579 while (nbytes > 0) { 580 ent = spu_queue_next(qp, ent); 581 582 ent->control = (nbytes - 1); 583 ent->src_addr = __pa(walk.data); 584 ent->auth_key_addr = 0UL; 585 ent->auth_iv_addr = 0UL; 586 ent->final_auth_state_addr = 0UL; 587 ent->enc_key_addr = 0UL; 588 ent->enc_iv_addr = 0UL; 589 ent->dest_addr = 0UL; 590 591 nbytes = crypto_hash_walk_done(&walk, 0); 592 } 593 ent->control |= CONTROL_END_OF_BLOCK; 594 595 if (submit_and_wait_for_tail(qp, ent) != HV_EOK) 596 err = -EINVAL; 597 else 598 err = 0; 599 600 spin_unlock_irqrestore(&qp->lock, flags); 601 602 if (!err) 603 memcpy(req->result, hash_loc, result_size); 604 out: 605 put_cpu(); 606 607 return err; 608 } 609 610 static int n2_hash_async_digest(struct ahash_request *req) 611 { 612 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); 613 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 614 int ds; 615 616 ds = n2alg->digest_size; 617 if (unlikely(req->nbytes == 0)) { 618 memcpy(req->result, n2alg->hash_zero, ds); 619 return 0; 620 } 621 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); 622 623 return n2_do_async_digest(req, n2alg->auth_type, 624 n2alg->hw_op_hashsz, ds, 625 &rctx->u, 0UL, 0); 626 } 627 628 static int n2_hmac_async_digest(struct ahash_request *req) 629 { 630 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); 631 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 632 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 633 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 634 int ds; 635 636 ds = n2alg->derived.digest_size; 637 if (unlikely(req->nbytes == 0) || 638 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { 639 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 640 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 641 642 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 643 rctx->fallback_req.base.flags = 644 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 645 rctx->fallback_req.nbytes = req->nbytes; 646 rctx->fallback_req.src = req->src; 647 rctx->fallback_req.result = req->result; 648 649 return crypto_ahash_digest(&rctx->fallback_req); 650 } 651 memcpy(&rctx->u, n2alg->derived.hash_init, 652 n2alg->derived.hw_op_hashsz); 653 654 return n2_do_async_digest(req, n2alg->derived.hmac_type, 655 n2alg->derived.hw_op_hashsz, ds, 656 &rctx->u, 657 __pa(&ctx->hash_key), 658 ctx->hash_key_len); 659 } 660 661 struct n2_cipher_context { 662 int key_len; 663 int enc_type; 664 union { 665 u8 aes[AES_MAX_KEY_SIZE]; 666 u8 des[DES_KEY_SIZE]; 667 u8 des3[3 * DES_KEY_SIZE]; 668 u8 arc4[258]; /* S-box, X, Y */ 669 } key; 670 }; 671 672 #define N2_CHUNK_ARR_LEN 16 673 674 struct n2_crypto_chunk { 675 struct list_head entry; 676 unsigned long iv_paddr : 44; 677 unsigned long arr_len : 20; 678 unsigned long dest_paddr; 679 unsigned long dest_final; 680 struct { 681 unsigned long src_paddr : 44; 682 unsigned long src_len : 20; 683 } arr[N2_CHUNK_ARR_LEN]; 684 }; 685 686 struct n2_request_context { 687 struct ablkcipher_walk walk; 688 struct list_head chunk_list; 689 struct n2_crypto_chunk chunk; 690 u8 temp_iv[16]; 691 }; 692 693 /* The SPU allows some level of flexibility for partial cipher blocks 694 * being specified in a descriptor. 695 * 696 * It merely requires that every descriptor's length field is at least 697 * as large as the cipher block size. This means that a cipher block 698 * can span at most 2 descriptors. However, this does not allow a 699 * partial block to span into the final descriptor as that would 700 * violate the rule (since every descriptor's length must be at lest 701 * the block size). So, for example, assuming an 8 byte block size: 702 * 703 * 0xe --> 0xa --> 0x8 704 * 705 * is a valid length sequence, whereas: 706 * 707 * 0xe --> 0xb --> 0x7 708 * 709 * is not a valid sequence. 710 */ 711 712 struct n2_cipher_alg { 713 struct list_head entry; 714 u8 enc_type; 715 struct crypto_alg alg; 716 }; 717 718 static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) 719 { 720 struct crypto_alg *alg = tfm->__crt_alg; 721 722 return container_of(alg, struct n2_cipher_alg, alg); 723 } 724 725 struct n2_cipher_request_context { 726 struct ablkcipher_walk walk; 727 }; 728 729 static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 730 unsigned int keylen) 731 { 732 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 733 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 734 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 735 736 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); 737 738 switch (keylen) { 739 case AES_KEYSIZE_128: 740 ctx->enc_type |= ENC_TYPE_ALG_AES128; 741 break; 742 case AES_KEYSIZE_192: 743 ctx->enc_type |= ENC_TYPE_ALG_AES192; 744 break; 745 case AES_KEYSIZE_256: 746 ctx->enc_type |= ENC_TYPE_ALG_AES256; 747 break; 748 default: 749 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 750 return -EINVAL; 751 } 752 753 ctx->key_len = keylen; 754 memcpy(ctx->key.aes, key, keylen); 755 return 0; 756 } 757 758 static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 759 unsigned int keylen) 760 { 761 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 762 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 763 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 764 u32 tmp[DES_EXPKEY_WORDS]; 765 int err; 766 767 ctx->enc_type = n2alg->enc_type; 768 769 if (keylen != DES_KEY_SIZE) { 770 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 771 return -EINVAL; 772 } 773 774 err = des_ekey(tmp, key); 775 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) { 776 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 777 return -EINVAL; 778 } 779 780 ctx->key_len = keylen; 781 memcpy(ctx->key.des, key, keylen); 782 return 0; 783 } 784 785 static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 786 unsigned int keylen) 787 { 788 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 789 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 790 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 791 u32 flags; 792 int err; 793 794 flags = crypto_ablkcipher_get_flags(cipher); 795 err = __des3_verify_key(&flags, key); 796 if (unlikely(err)) { 797 crypto_ablkcipher_set_flags(cipher, flags); 798 return err; 799 } 800 801 ctx->enc_type = n2alg->enc_type; 802 803 ctx->key_len = keylen; 804 memcpy(ctx->key.des3, key, keylen); 805 return 0; 806 } 807 808 static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 809 unsigned int keylen) 810 { 811 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 812 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 813 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 814 u8 *s = ctx->key.arc4; 815 u8 *x = s + 256; 816 u8 *y = x + 1; 817 int i, j, k; 818 819 ctx->enc_type = n2alg->enc_type; 820 821 j = k = 0; 822 *x = 0; 823 *y = 0; 824 for (i = 0; i < 256; i++) 825 s[i] = i; 826 for (i = 0; i < 256; i++) { 827 u8 a = s[i]; 828 j = (j + key[k] + a) & 0xff; 829 s[i] = s[j]; 830 s[j] = a; 831 if (++k >= keylen) 832 k = 0; 833 } 834 835 return 0; 836 } 837 838 static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) 839 { 840 int this_len = nbytes; 841 842 this_len -= (nbytes & (block_size - 1)); 843 return this_len > (1 << 16) ? (1 << 16) : this_len; 844 } 845 846 static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, 847 struct spu_queue *qp, bool encrypt) 848 { 849 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 850 struct cwq_initial_entry *ent; 851 bool in_place; 852 int i; 853 854 ent = spu_queue_alloc(qp, cp->arr_len); 855 if (!ent) { 856 pr_info("queue_alloc() of %d fails\n", 857 cp->arr_len); 858 return -EBUSY; 859 } 860 861 in_place = (cp->dest_paddr == cp->arr[0].src_paddr); 862 863 ent->control = control_word_base(cp->arr[0].src_len, 864 0, ctx->enc_type, 0, 0, 865 false, true, false, encrypt, 866 OPCODE_ENCRYPT | 867 (in_place ? OPCODE_INPLACE_BIT : 0)); 868 ent->src_addr = cp->arr[0].src_paddr; 869 ent->auth_key_addr = 0UL; 870 ent->auth_iv_addr = 0UL; 871 ent->final_auth_state_addr = 0UL; 872 ent->enc_key_addr = __pa(&ctx->key); 873 ent->enc_iv_addr = cp->iv_paddr; 874 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); 875 876 for (i = 1; i < cp->arr_len; i++) { 877 ent = spu_queue_next(qp, ent); 878 879 ent->control = cp->arr[i].src_len - 1; 880 ent->src_addr = cp->arr[i].src_paddr; 881 ent->auth_key_addr = 0UL; 882 ent->auth_iv_addr = 0UL; 883 ent->final_auth_state_addr = 0UL; 884 ent->enc_key_addr = 0UL; 885 ent->enc_iv_addr = 0UL; 886 ent->dest_addr = 0UL; 887 } 888 ent->control |= CONTROL_END_OF_BLOCK; 889 890 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; 891 } 892 893 static int n2_compute_chunks(struct ablkcipher_request *req) 894 { 895 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 896 struct ablkcipher_walk *walk = &rctx->walk; 897 struct n2_crypto_chunk *chunk; 898 unsigned long dest_prev; 899 unsigned int tot_len; 900 bool prev_in_place; 901 int err, nbytes; 902 903 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); 904 err = ablkcipher_walk_phys(req, walk); 905 if (err) 906 return err; 907 908 INIT_LIST_HEAD(&rctx->chunk_list); 909 910 chunk = &rctx->chunk; 911 INIT_LIST_HEAD(&chunk->entry); 912 913 chunk->iv_paddr = 0UL; 914 chunk->arr_len = 0; 915 chunk->dest_paddr = 0UL; 916 917 prev_in_place = false; 918 dest_prev = ~0UL; 919 tot_len = 0; 920 921 while ((nbytes = walk->nbytes) != 0) { 922 unsigned long dest_paddr, src_paddr; 923 bool in_place; 924 int this_len; 925 926 src_paddr = (page_to_phys(walk->src.page) + 927 walk->src.offset); 928 dest_paddr = (page_to_phys(walk->dst.page) + 929 walk->dst.offset); 930 in_place = (src_paddr == dest_paddr); 931 this_len = cipher_descriptor_len(nbytes, walk->blocksize); 932 933 if (chunk->arr_len != 0) { 934 if (in_place != prev_in_place || 935 (!prev_in_place && 936 dest_paddr != dest_prev) || 937 chunk->arr_len == N2_CHUNK_ARR_LEN || 938 tot_len + this_len > (1 << 16)) { 939 chunk->dest_final = dest_prev; 940 list_add_tail(&chunk->entry, 941 &rctx->chunk_list); 942 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); 943 if (!chunk) { 944 err = -ENOMEM; 945 break; 946 } 947 INIT_LIST_HEAD(&chunk->entry); 948 } 949 } 950 if (chunk->arr_len == 0) { 951 chunk->dest_paddr = dest_paddr; 952 tot_len = 0; 953 } 954 chunk->arr[chunk->arr_len].src_paddr = src_paddr; 955 chunk->arr[chunk->arr_len].src_len = this_len; 956 chunk->arr_len++; 957 958 dest_prev = dest_paddr + this_len; 959 prev_in_place = in_place; 960 tot_len += this_len; 961 962 err = ablkcipher_walk_done(req, walk, nbytes - this_len); 963 if (err) 964 break; 965 } 966 if (!err && chunk->arr_len != 0) { 967 chunk->dest_final = dest_prev; 968 list_add_tail(&chunk->entry, &rctx->chunk_list); 969 } 970 971 return err; 972 } 973 974 static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) 975 { 976 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 977 struct n2_crypto_chunk *c, *tmp; 978 979 if (final_iv) 980 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); 981 982 ablkcipher_walk_complete(&rctx->walk); 983 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 984 list_del(&c->entry); 985 if (unlikely(c != &rctx->chunk)) 986 kfree(c); 987 } 988 989 } 990 991 static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) 992 { 993 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 994 struct crypto_tfm *tfm = req->base.tfm; 995 int err = n2_compute_chunks(req); 996 struct n2_crypto_chunk *c, *tmp; 997 unsigned long flags, hv_ret; 998 struct spu_queue *qp; 999 1000 if (err) 1001 return err; 1002 1003 qp = cpu_to_cwq[get_cpu()]; 1004 err = -ENODEV; 1005 if (!qp) 1006 goto out; 1007 1008 spin_lock_irqsave(&qp->lock, flags); 1009 1010 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 1011 err = __n2_crypt_chunk(tfm, c, qp, encrypt); 1012 if (err) 1013 break; 1014 list_del(&c->entry); 1015 if (unlikely(c != &rctx->chunk)) 1016 kfree(c); 1017 } 1018 if (!err) { 1019 hv_ret = wait_for_tail(qp); 1020 if (hv_ret != HV_EOK) 1021 err = -EINVAL; 1022 } 1023 1024 spin_unlock_irqrestore(&qp->lock, flags); 1025 1026 out: 1027 put_cpu(); 1028 1029 n2_chunk_complete(req, NULL); 1030 return err; 1031 } 1032 1033 static int n2_encrypt_ecb(struct ablkcipher_request *req) 1034 { 1035 return n2_do_ecb(req, true); 1036 } 1037 1038 static int n2_decrypt_ecb(struct ablkcipher_request *req) 1039 { 1040 return n2_do_ecb(req, false); 1041 } 1042 1043 static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) 1044 { 1045 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 1046 struct crypto_tfm *tfm = req->base.tfm; 1047 unsigned long flags, hv_ret, iv_paddr; 1048 int err = n2_compute_chunks(req); 1049 struct n2_crypto_chunk *c, *tmp; 1050 struct spu_queue *qp; 1051 void *final_iv_addr; 1052 1053 final_iv_addr = NULL; 1054 1055 if (err) 1056 return err; 1057 1058 qp = cpu_to_cwq[get_cpu()]; 1059 err = -ENODEV; 1060 if (!qp) 1061 goto out; 1062 1063 spin_lock_irqsave(&qp->lock, flags); 1064 1065 if (encrypt) { 1066 iv_paddr = __pa(rctx->walk.iv); 1067 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, 1068 entry) { 1069 c->iv_paddr = iv_paddr; 1070 err = __n2_crypt_chunk(tfm, c, qp, true); 1071 if (err) 1072 break; 1073 iv_paddr = c->dest_final - rctx->walk.blocksize; 1074 list_del(&c->entry); 1075 if (unlikely(c != &rctx->chunk)) 1076 kfree(c); 1077 } 1078 final_iv_addr = __va(iv_paddr); 1079 } else { 1080 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, 1081 entry) { 1082 if (c == &rctx->chunk) { 1083 iv_paddr = __pa(rctx->walk.iv); 1084 } else { 1085 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + 1086 tmp->arr[tmp->arr_len-1].src_len - 1087 rctx->walk.blocksize); 1088 } 1089 if (!final_iv_addr) { 1090 unsigned long pa; 1091 1092 pa = (c->arr[c->arr_len-1].src_paddr + 1093 c->arr[c->arr_len-1].src_len - 1094 rctx->walk.blocksize); 1095 final_iv_addr = rctx->temp_iv; 1096 memcpy(rctx->temp_iv, __va(pa), 1097 rctx->walk.blocksize); 1098 } 1099 c->iv_paddr = iv_paddr; 1100 err = __n2_crypt_chunk(tfm, c, qp, false); 1101 if (err) 1102 break; 1103 list_del(&c->entry); 1104 if (unlikely(c != &rctx->chunk)) 1105 kfree(c); 1106 } 1107 } 1108 if (!err) { 1109 hv_ret = wait_for_tail(qp); 1110 if (hv_ret != HV_EOK) 1111 err = -EINVAL; 1112 } 1113 1114 spin_unlock_irqrestore(&qp->lock, flags); 1115 1116 out: 1117 put_cpu(); 1118 1119 n2_chunk_complete(req, err ? NULL : final_iv_addr); 1120 return err; 1121 } 1122 1123 static int n2_encrypt_chaining(struct ablkcipher_request *req) 1124 { 1125 return n2_do_chaining(req, true); 1126 } 1127 1128 static int n2_decrypt_chaining(struct ablkcipher_request *req) 1129 { 1130 return n2_do_chaining(req, false); 1131 } 1132 1133 struct n2_cipher_tmpl { 1134 const char *name; 1135 const char *drv_name; 1136 u8 block_size; 1137 u8 enc_type; 1138 struct ablkcipher_alg ablkcipher; 1139 }; 1140 1141 static const struct n2_cipher_tmpl cipher_tmpls[] = { 1142 /* ARC4: only ECB is supported (chaining bits ignored) */ 1143 { .name = "ecb(arc4)", 1144 .drv_name = "ecb-arc4", 1145 .block_size = 1, 1146 .enc_type = (ENC_TYPE_ALG_RC4_STREAM | 1147 ENC_TYPE_CHAINING_ECB), 1148 .ablkcipher = { 1149 .min_keysize = 1, 1150 .max_keysize = 256, 1151 .setkey = n2_arc4_setkey, 1152 .encrypt = n2_encrypt_ecb, 1153 .decrypt = n2_decrypt_ecb, 1154 }, 1155 }, 1156 1157 /* DES: ECB CBC and CFB are supported */ 1158 { .name = "ecb(des)", 1159 .drv_name = "ecb-des", 1160 .block_size = DES_BLOCK_SIZE, 1161 .enc_type = (ENC_TYPE_ALG_DES | 1162 ENC_TYPE_CHAINING_ECB), 1163 .ablkcipher = { 1164 .min_keysize = DES_KEY_SIZE, 1165 .max_keysize = DES_KEY_SIZE, 1166 .setkey = n2_des_setkey, 1167 .encrypt = n2_encrypt_ecb, 1168 .decrypt = n2_decrypt_ecb, 1169 }, 1170 }, 1171 { .name = "cbc(des)", 1172 .drv_name = "cbc-des", 1173 .block_size = DES_BLOCK_SIZE, 1174 .enc_type = (ENC_TYPE_ALG_DES | 1175 ENC_TYPE_CHAINING_CBC), 1176 .ablkcipher = { 1177 .ivsize = DES_BLOCK_SIZE, 1178 .min_keysize = DES_KEY_SIZE, 1179 .max_keysize = DES_KEY_SIZE, 1180 .setkey = n2_des_setkey, 1181 .encrypt = n2_encrypt_chaining, 1182 .decrypt = n2_decrypt_chaining, 1183 }, 1184 }, 1185 { .name = "cfb(des)", 1186 .drv_name = "cfb-des", 1187 .block_size = DES_BLOCK_SIZE, 1188 .enc_type = (ENC_TYPE_ALG_DES | 1189 ENC_TYPE_CHAINING_CFB), 1190 .ablkcipher = { 1191 .min_keysize = DES_KEY_SIZE, 1192 .max_keysize = DES_KEY_SIZE, 1193 .setkey = n2_des_setkey, 1194 .encrypt = n2_encrypt_chaining, 1195 .decrypt = n2_decrypt_chaining, 1196 }, 1197 }, 1198 1199 /* 3DES: ECB CBC and CFB are supported */ 1200 { .name = "ecb(des3_ede)", 1201 .drv_name = "ecb-3des", 1202 .block_size = DES_BLOCK_SIZE, 1203 .enc_type = (ENC_TYPE_ALG_3DES | 1204 ENC_TYPE_CHAINING_ECB), 1205 .ablkcipher = { 1206 .min_keysize = 3 * DES_KEY_SIZE, 1207 .max_keysize = 3 * DES_KEY_SIZE, 1208 .setkey = n2_3des_setkey, 1209 .encrypt = n2_encrypt_ecb, 1210 .decrypt = n2_decrypt_ecb, 1211 }, 1212 }, 1213 { .name = "cbc(des3_ede)", 1214 .drv_name = "cbc-3des", 1215 .block_size = DES_BLOCK_SIZE, 1216 .enc_type = (ENC_TYPE_ALG_3DES | 1217 ENC_TYPE_CHAINING_CBC), 1218 .ablkcipher = { 1219 .ivsize = DES_BLOCK_SIZE, 1220 .min_keysize = 3 * DES_KEY_SIZE, 1221 .max_keysize = 3 * DES_KEY_SIZE, 1222 .setkey = n2_3des_setkey, 1223 .encrypt = n2_encrypt_chaining, 1224 .decrypt = n2_decrypt_chaining, 1225 }, 1226 }, 1227 { .name = "cfb(des3_ede)", 1228 .drv_name = "cfb-3des", 1229 .block_size = DES_BLOCK_SIZE, 1230 .enc_type = (ENC_TYPE_ALG_3DES | 1231 ENC_TYPE_CHAINING_CFB), 1232 .ablkcipher = { 1233 .min_keysize = 3 * DES_KEY_SIZE, 1234 .max_keysize = 3 * DES_KEY_SIZE, 1235 .setkey = n2_3des_setkey, 1236 .encrypt = n2_encrypt_chaining, 1237 .decrypt = n2_decrypt_chaining, 1238 }, 1239 }, 1240 /* AES: ECB CBC and CTR are supported */ 1241 { .name = "ecb(aes)", 1242 .drv_name = "ecb-aes", 1243 .block_size = AES_BLOCK_SIZE, 1244 .enc_type = (ENC_TYPE_ALG_AES128 | 1245 ENC_TYPE_CHAINING_ECB), 1246 .ablkcipher = { 1247 .min_keysize = AES_MIN_KEY_SIZE, 1248 .max_keysize = AES_MAX_KEY_SIZE, 1249 .setkey = n2_aes_setkey, 1250 .encrypt = n2_encrypt_ecb, 1251 .decrypt = n2_decrypt_ecb, 1252 }, 1253 }, 1254 { .name = "cbc(aes)", 1255 .drv_name = "cbc-aes", 1256 .block_size = AES_BLOCK_SIZE, 1257 .enc_type = (ENC_TYPE_ALG_AES128 | 1258 ENC_TYPE_CHAINING_CBC), 1259 .ablkcipher = { 1260 .ivsize = AES_BLOCK_SIZE, 1261 .min_keysize = AES_MIN_KEY_SIZE, 1262 .max_keysize = AES_MAX_KEY_SIZE, 1263 .setkey = n2_aes_setkey, 1264 .encrypt = n2_encrypt_chaining, 1265 .decrypt = n2_decrypt_chaining, 1266 }, 1267 }, 1268 { .name = "ctr(aes)", 1269 .drv_name = "ctr-aes", 1270 .block_size = AES_BLOCK_SIZE, 1271 .enc_type = (ENC_TYPE_ALG_AES128 | 1272 ENC_TYPE_CHAINING_COUNTER), 1273 .ablkcipher = { 1274 .ivsize = AES_BLOCK_SIZE, 1275 .min_keysize = AES_MIN_KEY_SIZE, 1276 .max_keysize = AES_MAX_KEY_SIZE, 1277 .setkey = n2_aes_setkey, 1278 .encrypt = n2_encrypt_chaining, 1279 .decrypt = n2_encrypt_chaining, 1280 }, 1281 }, 1282 1283 }; 1284 #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) 1285 1286 static LIST_HEAD(cipher_algs); 1287 1288 struct n2_hash_tmpl { 1289 const char *name; 1290 const u8 *hash_zero; 1291 const u32 *hash_init; 1292 u8 hw_op_hashsz; 1293 u8 digest_size; 1294 u8 block_size; 1295 u8 auth_type; 1296 u8 hmac_type; 1297 }; 1298 1299 static const u32 md5_init[MD5_HASH_WORDS] = { 1300 cpu_to_le32(MD5_H0), 1301 cpu_to_le32(MD5_H1), 1302 cpu_to_le32(MD5_H2), 1303 cpu_to_le32(MD5_H3), 1304 }; 1305 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { 1306 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 1307 }; 1308 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { 1309 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 1310 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, 1311 }; 1312 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { 1313 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 1314 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, 1315 }; 1316 1317 static const struct n2_hash_tmpl hash_tmpls[] = { 1318 { .name = "md5", 1319 .hash_zero = md5_zero_message_hash, 1320 .hash_init = md5_init, 1321 .auth_type = AUTH_TYPE_MD5, 1322 .hmac_type = AUTH_TYPE_HMAC_MD5, 1323 .hw_op_hashsz = MD5_DIGEST_SIZE, 1324 .digest_size = MD5_DIGEST_SIZE, 1325 .block_size = MD5_HMAC_BLOCK_SIZE }, 1326 { .name = "sha1", 1327 .hash_zero = sha1_zero_message_hash, 1328 .hash_init = sha1_init, 1329 .auth_type = AUTH_TYPE_SHA1, 1330 .hmac_type = AUTH_TYPE_HMAC_SHA1, 1331 .hw_op_hashsz = SHA1_DIGEST_SIZE, 1332 .digest_size = SHA1_DIGEST_SIZE, 1333 .block_size = SHA1_BLOCK_SIZE }, 1334 { .name = "sha256", 1335 .hash_zero = sha256_zero_message_hash, 1336 .hash_init = sha256_init, 1337 .auth_type = AUTH_TYPE_SHA256, 1338 .hmac_type = AUTH_TYPE_HMAC_SHA256, 1339 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1340 .digest_size = SHA256_DIGEST_SIZE, 1341 .block_size = SHA256_BLOCK_SIZE }, 1342 { .name = "sha224", 1343 .hash_zero = sha224_zero_message_hash, 1344 .hash_init = sha224_init, 1345 .auth_type = AUTH_TYPE_SHA256, 1346 .hmac_type = AUTH_TYPE_RESERVED, 1347 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1348 .digest_size = SHA224_DIGEST_SIZE, 1349 .block_size = SHA224_BLOCK_SIZE }, 1350 }; 1351 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) 1352 1353 static LIST_HEAD(ahash_algs); 1354 static LIST_HEAD(hmac_algs); 1355 1356 static int algs_registered; 1357 1358 static void __n2_unregister_algs(void) 1359 { 1360 struct n2_cipher_alg *cipher, *cipher_tmp; 1361 struct n2_ahash_alg *alg, *alg_tmp; 1362 struct n2_hmac_alg *hmac, *hmac_tmp; 1363 1364 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { 1365 crypto_unregister_alg(&cipher->alg); 1366 list_del(&cipher->entry); 1367 kfree(cipher); 1368 } 1369 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { 1370 crypto_unregister_ahash(&hmac->derived.alg); 1371 list_del(&hmac->derived.entry); 1372 kfree(hmac); 1373 } 1374 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { 1375 crypto_unregister_ahash(&alg->alg); 1376 list_del(&alg->entry); 1377 kfree(alg); 1378 } 1379 } 1380 1381 static int n2_cipher_cra_init(struct crypto_tfm *tfm) 1382 { 1383 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); 1384 return 0; 1385 } 1386 1387 static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) 1388 { 1389 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1390 struct crypto_alg *alg; 1391 int err; 1392 1393 if (!p) 1394 return -ENOMEM; 1395 1396 alg = &p->alg; 1397 1398 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 1399 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); 1400 alg->cra_priority = N2_CRA_PRIORITY; 1401 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1402 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; 1403 alg->cra_blocksize = tmpl->block_size; 1404 p->enc_type = tmpl->enc_type; 1405 alg->cra_ctxsize = sizeof(struct n2_cipher_context); 1406 alg->cra_type = &crypto_ablkcipher_type; 1407 alg->cra_u.ablkcipher = tmpl->ablkcipher; 1408 alg->cra_init = n2_cipher_cra_init; 1409 alg->cra_module = THIS_MODULE; 1410 1411 list_add(&p->entry, &cipher_algs); 1412 err = crypto_register_alg(alg); 1413 if (err) { 1414 pr_err("%s alg registration failed\n", alg->cra_name); 1415 list_del(&p->entry); 1416 kfree(p); 1417 } else { 1418 pr_info("%s alg registered\n", alg->cra_name); 1419 } 1420 return err; 1421 } 1422 1423 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) 1424 { 1425 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1426 struct ahash_alg *ahash; 1427 struct crypto_alg *base; 1428 int err; 1429 1430 if (!p) 1431 return -ENOMEM; 1432 1433 p->child_alg = n2ahash->alg.halg.base.cra_name; 1434 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); 1435 INIT_LIST_HEAD(&p->derived.entry); 1436 1437 ahash = &p->derived.alg; 1438 ahash->digest = n2_hmac_async_digest; 1439 ahash->setkey = n2_hmac_async_setkey; 1440 1441 base = &ahash->halg.base; 1442 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); 1443 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); 1444 1445 base->cra_ctxsize = sizeof(struct n2_hmac_ctx); 1446 base->cra_init = n2_hmac_cra_init; 1447 base->cra_exit = n2_hmac_cra_exit; 1448 1449 list_add(&p->derived.entry, &hmac_algs); 1450 err = crypto_register_ahash(ahash); 1451 if (err) { 1452 pr_err("%s alg registration failed\n", base->cra_name); 1453 list_del(&p->derived.entry); 1454 kfree(p); 1455 } else { 1456 pr_info("%s alg registered\n", base->cra_name); 1457 } 1458 return err; 1459 } 1460 1461 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) 1462 { 1463 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1464 struct hash_alg_common *halg; 1465 struct crypto_alg *base; 1466 struct ahash_alg *ahash; 1467 int err; 1468 1469 if (!p) 1470 return -ENOMEM; 1471 1472 p->hash_zero = tmpl->hash_zero; 1473 p->hash_init = tmpl->hash_init; 1474 p->auth_type = tmpl->auth_type; 1475 p->hmac_type = tmpl->hmac_type; 1476 p->hw_op_hashsz = tmpl->hw_op_hashsz; 1477 p->digest_size = tmpl->digest_size; 1478 1479 ahash = &p->alg; 1480 ahash->init = n2_hash_async_init; 1481 ahash->update = n2_hash_async_update; 1482 ahash->final = n2_hash_async_final; 1483 ahash->finup = n2_hash_async_finup; 1484 ahash->digest = n2_hash_async_digest; 1485 ahash->export = n2_hash_async_noexport; 1486 ahash->import = n2_hash_async_noimport; 1487 1488 halg = &ahash->halg; 1489 halg->digestsize = tmpl->digest_size; 1490 1491 base = &halg->base; 1492 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 1493 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); 1494 base->cra_priority = N2_CRA_PRIORITY; 1495 base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 1496 CRYPTO_ALG_NEED_FALLBACK; 1497 base->cra_blocksize = tmpl->block_size; 1498 base->cra_ctxsize = sizeof(struct n2_hash_ctx); 1499 base->cra_module = THIS_MODULE; 1500 base->cra_init = n2_hash_cra_init; 1501 base->cra_exit = n2_hash_cra_exit; 1502 1503 list_add(&p->entry, &ahash_algs); 1504 err = crypto_register_ahash(ahash); 1505 if (err) { 1506 pr_err("%s alg registration failed\n", base->cra_name); 1507 list_del(&p->entry); 1508 kfree(p); 1509 } else { 1510 pr_info("%s alg registered\n", base->cra_name); 1511 } 1512 if (!err && p->hmac_type != AUTH_TYPE_RESERVED) 1513 err = __n2_register_one_hmac(p); 1514 return err; 1515 } 1516 1517 static int n2_register_algs(void) 1518 { 1519 int i, err = 0; 1520 1521 mutex_lock(&spu_lock); 1522 if (algs_registered++) 1523 goto out; 1524 1525 for (i = 0; i < NUM_HASH_TMPLS; i++) { 1526 err = __n2_register_one_ahash(&hash_tmpls[i]); 1527 if (err) { 1528 __n2_unregister_algs(); 1529 goto out; 1530 } 1531 } 1532 for (i = 0; i < NUM_CIPHER_TMPLS; i++) { 1533 err = __n2_register_one_cipher(&cipher_tmpls[i]); 1534 if (err) { 1535 __n2_unregister_algs(); 1536 goto out; 1537 } 1538 } 1539 1540 out: 1541 mutex_unlock(&spu_lock); 1542 return err; 1543 } 1544 1545 static void n2_unregister_algs(void) 1546 { 1547 mutex_lock(&spu_lock); 1548 if (!--algs_registered) 1549 __n2_unregister_algs(); 1550 mutex_unlock(&spu_lock); 1551 } 1552 1553 /* To map CWQ queues to interrupt sources, the hypervisor API provides 1554 * a devino. This isn't very useful to us because all of the 1555 * interrupts listed in the device_node have been translated to 1556 * Linux virtual IRQ cookie numbers. 1557 * 1558 * So we have to back-translate, going through the 'intr' and 'ino' 1559 * property tables of the n2cp MDESC node, matching it with the OF 1560 * 'interrupts' property entries, in order to to figure out which 1561 * devino goes to which already-translated IRQ. 1562 */ 1563 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, 1564 unsigned long dev_ino) 1565 { 1566 const unsigned int *dev_intrs; 1567 unsigned int intr; 1568 int i; 1569 1570 for (i = 0; i < ip->num_intrs; i++) { 1571 if (ip->ino_table[i].ino == dev_ino) 1572 break; 1573 } 1574 if (i == ip->num_intrs) 1575 return -ENODEV; 1576 1577 intr = ip->ino_table[i].intr; 1578 1579 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); 1580 if (!dev_intrs) 1581 return -ENODEV; 1582 1583 for (i = 0; i < dev->archdata.num_irqs; i++) { 1584 if (dev_intrs[i] == intr) 1585 return i; 1586 } 1587 1588 return -ENODEV; 1589 } 1590 1591 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, 1592 const char *irq_name, struct spu_queue *p, 1593 irq_handler_t handler) 1594 { 1595 unsigned long herr; 1596 int index; 1597 1598 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); 1599 if (herr) 1600 return -EINVAL; 1601 1602 index = find_devino_index(dev, ip, p->devino); 1603 if (index < 0) 1604 return index; 1605 1606 p->irq = dev->archdata.irqs[index]; 1607 1608 sprintf(p->irq_name, "%s-%d", irq_name, index); 1609 1610 return request_irq(p->irq, handler, 0, p->irq_name, p); 1611 } 1612 1613 static struct kmem_cache *queue_cache[2]; 1614 1615 static void *new_queue(unsigned long q_type) 1616 { 1617 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); 1618 } 1619 1620 static void free_queue(void *p, unsigned long q_type) 1621 { 1622 kmem_cache_free(queue_cache[q_type - 1], p); 1623 } 1624 1625 static int queue_cache_init(void) 1626 { 1627 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1628 queue_cache[HV_NCS_QTYPE_MAU - 1] = 1629 kmem_cache_create("mau_queue", 1630 (MAU_NUM_ENTRIES * 1631 MAU_ENTRY_SIZE), 1632 MAU_ENTRY_SIZE, 0, NULL); 1633 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1634 return -ENOMEM; 1635 1636 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) 1637 queue_cache[HV_NCS_QTYPE_CWQ - 1] = 1638 kmem_cache_create("cwq_queue", 1639 (CWQ_NUM_ENTRIES * 1640 CWQ_ENTRY_SIZE), 1641 CWQ_ENTRY_SIZE, 0, NULL); 1642 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 1643 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1644 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; 1645 return -ENOMEM; 1646 } 1647 return 0; 1648 } 1649 1650 static void queue_cache_destroy(void) 1651 { 1652 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1653 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 1654 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; 1655 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; 1656 } 1657 1658 static long spu_queue_register_workfn(void *arg) 1659 { 1660 struct spu_qreg *qr = arg; 1661 struct spu_queue *p = qr->queue; 1662 unsigned long q_type = qr->type; 1663 unsigned long hv_ret; 1664 1665 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), 1666 CWQ_NUM_ENTRIES, &p->qhandle); 1667 if (!hv_ret) 1668 sun4v_ncs_sethead_marker(p->qhandle, 0); 1669 1670 return hv_ret ? -EINVAL : 0; 1671 } 1672 1673 static int spu_queue_register(struct spu_queue *p, unsigned long q_type) 1674 { 1675 int cpu = cpumask_any_and(&p->sharing, cpu_online_mask); 1676 struct spu_qreg qr = { .queue = p, .type = q_type }; 1677 1678 return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr); 1679 } 1680 1681 static int spu_queue_setup(struct spu_queue *p) 1682 { 1683 int err; 1684 1685 p->q = new_queue(p->q_type); 1686 if (!p->q) 1687 return -ENOMEM; 1688 1689 err = spu_queue_register(p, p->q_type); 1690 if (err) { 1691 free_queue(p->q, p->q_type); 1692 p->q = NULL; 1693 } 1694 1695 return err; 1696 } 1697 1698 static void spu_queue_destroy(struct spu_queue *p) 1699 { 1700 unsigned long hv_ret; 1701 1702 if (!p->q) 1703 return; 1704 1705 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); 1706 1707 if (!hv_ret) 1708 free_queue(p->q, p->q_type); 1709 } 1710 1711 static void spu_list_destroy(struct list_head *list) 1712 { 1713 struct spu_queue *p, *n; 1714 1715 list_for_each_entry_safe(p, n, list, list) { 1716 int i; 1717 1718 for (i = 0; i < NR_CPUS; i++) { 1719 if (cpu_to_cwq[i] == p) 1720 cpu_to_cwq[i] = NULL; 1721 } 1722 1723 if (p->irq) { 1724 free_irq(p->irq, p); 1725 p->irq = 0; 1726 } 1727 spu_queue_destroy(p); 1728 list_del(&p->list); 1729 kfree(p); 1730 } 1731 } 1732 1733 /* Walk the backward arcs of a CWQ 'exec-unit' node, 1734 * gathering cpu membership information. 1735 */ 1736 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, 1737 struct platform_device *dev, 1738 u64 node, struct spu_queue *p, 1739 struct spu_queue **table) 1740 { 1741 u64 arc; 1742 1743 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { 1744 u64 tgt = mdesc_arc_target(mdesc, arc); 1745 const char *name = mdesc_node_name(mdesc, tgt); 1746 const u64 *id; 1747 1748 if (strcmp(name, "cpu")) 1749 continue; 1750 id = mdesc_get_property(mdesc, tgt, "id", NULL); 1751 if (table[*id] != NULL) { 1752 dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n", 1753 dev->dev.of_node); 1754 return -EINVAL; 1755 } 1756 cpumask_set_cpu(*id, &p->sharing); 1757 table[*id] = p; 1758 } 1759 return 0; 1760 } 1761 1762 /* Process an 'exec-unit' MDESC node of type 'cwq'. */ 1763 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, 1764 struct platform_device *dev, struct mdesc_handle *mdesc, 1765 u64 node, const char *iname, unsigned long q_type, 1766 irq_handler_t handler, struct spu_queue **table) 1767 { 1768 struct spu_queue *p; 1769 int err; 1770 1771 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); 1772 if (!p) { 1773 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n", 1774 dev->dev.of_node); 1775 return -ENOMEM; 1776 } 1777 1778 cpumask_clear(&p->sharing); 1779 spin_lock_init(&p->lock); 1780 p->q_type = q_type; 1781 INIT_LIST_HEAD(&p->jobs); 1782 list_add(&p->list, list); 1783 1784 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); 1785 if (err) 1786 return err; 1787 1788 err = spu_queue_setup(p); 1789 if (err) 1790 return err; 1791 1792 return spu_map_ino(dev, ip, iname, p, handler); 1793 } 1794 1795 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, 1796 struct spu_mdesc_info *ip, struct list_head *list, 1797 const char *exec_name, unsigned long q_type, 1798 irq_handler_t handler, struct spu_queue **table) 1799 { 1800 int err = 0; 1801 u64 node; 1802 1803 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { 1804 const char *type; 1805 1806 type = mdesc_get_property(mdesc, node, "type", NULL); 1807 if (!type || strcmp(type, exec_name)) 1808 continue; 1809 1810 err = handle_exec_unit(ip, list, dev, mdesc, node, 1811 exec_name, q_type, handler, table); 1812 if (err) { 1813 spu_list_destroy(list); 1814 break; 1815 } 1816 } 1817 1818 return err; 1819 } 1820 1821 static int get_irq_props(struct mdesc_handle *mdesc, u64 node, 1822 struct spu_mdesc_info *ip) 1823 { 1824 const u64 *ino; 1825 int ino_len; 1826 int i; 1827 1828 ino = mdesc_get_property(mdesc, node, "ino", &ino_len); 1829 if (!ino) { 1830 printk("NO 'ino'\n"); 1831 return -ENODEV; 1832 } 1833 1834 ip->num_intrs = ino_len / sizeof(u64); 1835 ip->ino_table = kzalloc((sizeof(struct ino_blob) * 1836 ip->num_intrs), 1837 GFP_KERNEL); 1838 if (!ip->ino_table) 1839 return -ENOMEM; 1840 1841 for (i = 0; i < ip->num_intrs; i++) { 1842 struct ino_blob *b = &ip->ino_table[i]; 1843 b->intr = i + 1; 1844 b->ino = ino[i]; 1845 } 1846 1847 return 0; 1848 } 1849 1850 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc, 1851 struct platform_device *dev, 1852 struct spu_mdesc_info *ip, 1853 const char *node_name) 1854 { 1855 const unsigned int *reg; 1856 u64 node; 1857 1858 reg = of_get_property(dev->dev.of_node, "reg", NULL); 1859 if (!reg) 1860 return -ENODEV; 1861 1862 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { 1863 const char *name; 1864 const u64 *chdl; 1865 1866 name = mdesc_get_property(mdesc, node, "name", NULL); 1867 if (!name || strcmp(name, node_name)) 1868 continue; 1869 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); 1870 if (!chdl || (*chdl != *reg)) 1871 continue; 1872 ip->cfg_handle = *chdl; 1873 return get_irq_props(mdesc, node, ip); 1874 } 1875 1876 return -ENODEV; 1877 } 1878 1879 static unsigned long n2_spu_hvapi_major; 1880 static unsigned long n2_spu_hvapi_minor; 1881 1882 static int n2_spu_hvapi_register(void) 1883 { 1884 int err; 1885 1886 n2_spu_hvapi_major = 2; 1887 n2_spu_hvapi_minor = 0; 1888 1889 err = sun4v_hvapi_register(HV_GRP_NCS, 1890 n2_spu_hvapi_major, 1891 &n2_spu_hvapi_minor); 1892 1893 if (!err) 1894 pr_info("Registered NCS HVAPI version %lu.%lu\n", 1895 n2_spu_hvapi_major, 1896 n2_spu_hvapi_minor); 1897 1898 return err; 1899 } 1900 1901 static void n2_spu_hvapi_unregister(void) 1902 { 1903 sun4v_hvapi_unregister(HV_GRP_NCS); 1904 } 1905 1906 static int global_ref; 1907 1908 static int grab_global_resources(void) 1909 { 1910 int err = 0; 1911 1912 mutex_lock(&spu_lock); 1913 1914 if (global_ref++) 1915 goto out; 1916 1917 err = n2_spu_hvapi_register(); 1918 if (err) 1919 goto out; 1920 1921 err = queue_cache_init(); 1922 if (err) 1923 goto out_hvapi_release; 1924 1925 err = -ENOMEM; 1926 cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *), 1927 GFP_KERNEL); 1928 if (!cpu_to_cwq) 1929 goto out_queue_cache_destroy; 1930 1931 cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *), 1932 GFP_KERNEL); 1933 if (!cpu_to_mau) 1934 goto out_free_cwq_table; 1935 1936 err = 0; 1937 1938 out: 1939 if (err) 1940 global_ref--; 1941 mutex_unlock(&spu_lock); 1942 return err; 1943 1944 out_free_cwq_table: 1945 kfree(cpu_to_cwq); 1946 cpu_to_cwq = NULL; 1947 1948 out_queue_cache_destroy: 1949 queue_cache_destroy(); 1950 1951 out_hvapi_release: 1952 n2_spu_hvapi_unregister(); 1953 goto out; 1954 } 1955 1956 static void release_global_resources(void) 1957 { 1958 mutex_lock(&spu_lock); 1959 if (!--global_ref) { 1960 kfree(cpu_to_cwq); 1961 cpu_to_cwq = NULL; 1962 1963 kfree(cpu_to_mau); 1964 cpu_to_mau = NULL; 1965 1966 queue_cache_destroy(); 1967 n2_spu_hvapi_unregister(); 1968 } 1969 mutex_unlock(&spu_lock); 1970 } 1971 1972 static struct n2_crypto *alloc_n2cp(void) 1973 { 1974 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); 1975 1976 if (np) 1977 INIT_LIST_HEAD(&np->cwq_list); 1978 1979 return np; 1980 } 1981 1982 static void free_n2cp(struct n2_crypto *np) 1983 { 1984 kfree(np->cwq_info.ino_table); 1985 np->cwq_info.ino_table = NULL; 1986 1987 kfree(np); 1988 } 1989 1990 static void n2_spu_driver_version(void) 1991 { 1992 static int n2_spu_version_printed; 1993 1994 if (n2_spu_version_printed++ == 0) 1995 pr_info("%s", version); 1996 } 1997 1998 static int n2_crypto_probe(struct platform_device *dev) 1999 { 2000 struct mdesc_handle *mdesc; 2001 struct n2_crypto *np; 2002 int err; 2003 2004 n2_spu_driver_version(); 2005 2006 pr_info("Found N2CP at %pOF\n", dev->dev.of_node); 2007 2008 np = alloc_n2cp(); 2009 if (!np) { 2010 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n", 2011 dev->dev.of_node); 2012 return -ENOMEM; 2013 } 2014 2015 err = grab_global_resources(); 2016 if (err) { 2017 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", 2018 dev->dev.of_node); 2019 goto out_free_n2cp; 2020 } 2021 2022 mdesc = mdesc_grab(); 2023 2024 if (!mdesc) { 2025 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", 2026 dev->dev.of_node); 2027 err = -ENODEV; 2028 goto out_free_global; 2029 } 2030 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); 2031 if (err) { 2032 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", 2033 dev->dev.of_node); 2034 mdesc_release(mdesc); 2035 goto out_free_global; 2036 } 2037 2038 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, 2039 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, 2040 cpu_to_cwq); 2041 mdesc_release(mdesc); 2042 2043 if (err) { 2044 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n", 2045 dev->dev.of_node); 2046 goto out_free_global; 2047 } 2048 2049 err = n2_register_algs(); 2050 if (err) { 2051 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n", 2052 dev->dev.of_node); 2053 goto out_free_spu_list; 2054 } 2055 2056 dev_set_drvdata(&dev->dev, np); 2057 2058 return 0; 2059 2060 out_free_spu_list: 2061 spu_list_destroy(&np->cwq_list); 2062 2063 out_free_global: 2064 release_global_resources(); 2065 2066 out_free_n2cp: 2067 free_n2cp(np); 2068 2069 return err; 2070 } 2071 2072 static int n2_crypto_remove(struct platform_device *dev) 2073 { 2074 struct n2_crypto *np = dev_get_drvdata(&dev->dev); 2075 2076 n2_unregister_algs(); 2077 2078 spu_list_destroy(&np->cwq_list); 2079 2080 release_global_resources(); 2081 2082 free_n2cp(np); 2083 2084 return 0; 2085 } 2086 2087 static struct n2_mau *alloc_ncp(void) 2088 { 2089 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); 2090 2091 if (mp) 2092 INIT_LIST_HEAD(&mp->mau_list); 2093 2094 return mp; 2095 } 2096 2097 static void free_ncp(struct n2_mau *mp) 2098 { 2099 kfree(mp->mau_info.ino_table); 2100 mp->mau_info.ino_table = NULL; 2101 2102 kfree(mp); 2103 } 2104 2105 static int n2_mau_probe(struct platform_device *dev) 2106 { 2107 struct mdesc_handle *mdesc; 2108 struct n2_mau *mp; 2109 int err; 2110 2111 n2_spu_driver_version(); 2112 2113 pr_info("Found NCP at %pOF\n", dev->dev.of_node); 2114 2115 mp = alloc_ncp(); 2116 if (!mp) { 2117 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n", 2118 dev->dev.of_node); 2119 return -ENOMEM; 2120 } 2121 2122 err = grab_global_resources(); 2123 if (err) { 2124 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", 2125 dev->dev.of_node); 2126 goto out_free_ncp; 2127 } 2128 2129 mdesc = mdesc_grab(); 2130 2131 if (!mdesc) { 2132 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", 2133 dev->dev.of_node); 2134 err = -ENODEV; 2135 goto out_free_global; 2136 } 2137 2138 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); 2139 if (err) { 2140 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", 2141 dev->dev.of_node); 2142 mdesc_release(mdesc); 2143 goto out_free_global; 2144 } 2145 2146 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, 2147 "mau", HV_NCS_QTYPE_MAU, mau_intr, 2148 cpu_to_mau); 2149 mdesc_release(mdesc); 2150 2151 if (err) { 2152 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n", 2153 dev->dev.of_node); 2154 goto out_free_global; 2155 } 2156 2157 dev_set_drvdata(&dev->dev, mp); 2158 2159 return 0; 2160 2161 out_free_global: 2162 release_global_resources(); 2163 2164 out_free_ncp: 2165 free_ncp(mp); 2166 2167 return err; 2168 } 2169 2170 static int n2_mau_remove(struct platform_device *dev) 2171 { 2172 struct n2_mau *mp = dev_get_drvdata(&dev->dev); 2173 2174 spu_list_destroy(&mp->mau_list); 2175 2176 release_global_resources(); 2177 2178 free_ncp(mp); 2179 2180 return 0; 2181 } 2182 2183 static const struct of_device_id n2_crypto_match[] = { 2184 { 2185 .name = "n2cp", 2186 .compatible = "SUNW,n2-cwq", 2187 }, 2188 { 2189 .name = "n2cp", 2190 .compatible = "SUNW,vf-cwq", 2191 }, 2192 { 2193 .name = "n2cp", 2194 .compatible = "SUNW,kt-cwq", 2195 }, 2196 {}, 2197 }; 2198 2199 MODULE_DEVICE_TABLE(of, n2_crypto_match); 2200 2201 static struct platform_driver n2_crypto_driver = { 2202 .driver = { 2203 .name = "n2cp", 2204 .of_match_table = n2_crypto_match, 2205 }, 2206 .probe = n2_crypto_probe, 2207 .remove = n2_crypto_remove, 2208 }; 2209 2210 static const struct of_device_id n2_mau_match[] = { 2211 { 2212 .name = "ncp", 2213 .compatible = "SUNW,n2-mau", 2214 }, 2215 { 2216 .name = "ncp", 2217 .compatible = "SUNW,vf-mau", 2218 }, 2219 { 2220 .name = "ncp", 2221 .compatible = "SUNW,kt-mau", 2222 }, 2223 {}, 2224 }; 2225 2226 MODULE_DEVICE_TABLE(of, n2_mau_match); 2227 2228 static struct platform_driver n2_mau_driver = { 2229 .driver = { 2230 .name = "ncp", 2231 .of_match_table = n2_mau_match, 2232 }, 2233 .probe = n2_mau_probe, 2234 .remove = n2_mau_remove, 2235 }; 2236 2237 static struct platform_driver * const drivers[] = { 2238 &n2_crypto_driver, 2239 &n2_mau_driver, 2240 }; 2241 2242 static int __init n2_init(void) 2243 { 2244 return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 2245 } 2246 2247 static void __exit n2_exit(void) 2248 { 2249 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 2250 } 2251 2252 module_init(n2_init); 2253 module_exit(n2_exit); 2254