1 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support. 2 * 3 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net> 4 */ 5 6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 7 8 #include <linux/kernel.h> 9 #include <linux/module.h> 10 #include <linux/of.h> 11 #include <linux/of_device.h> 12 #include <linux/cpumask.h> 13 #include <linux/slab.h> 14 #include <linux/interrupt.h> 15 #include <linux/crypto.h> 16 #include <crypto/md5.h> 17 #include <crypto/sha.h> 18 #include <crypto/aes.h> 19 #include <crypto/des.h> 20 #include <linux/mutex.h> 21 #include <linux/delay.h> 22 #include <linux/sched.h> 23 24 #include <crypto/internal/hash.h> 25 #include <crypto/scatterwalk.h> 26 #include <crypto/algapi.h> 27 28 #include <asm/hypervisor.h> 29 #include <asm/mdesc.h> 30 31 #include "n2_core.h" 32 33 #define DRV_MODULE_NAME "n2_crypto" 34 #define DRV_MODULE_VERSION "0.2" 35 #define DRV_MODULE_RELDATE "July 28, 2011" 36 37 static const char version[] = 38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; 39 40 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); 41 MODULE_DESCRIPTION("Niagara2 Crypto driver"); 42 MODULE_LICENSE("GPL"); 43 MODULE_VERSION(DRV_MODULE_VERSION); 44 45 #define N2_CRA_PRIORITY 200 46 47 static DEFINE_MUTEX(spu_lock); 48 49 struct spu_queue { 50 cpumask_t sharing; 51 unsigned long qhandle; 52 53 spinlock_t lock; 54 u8 q_type; 55 void *q; 56 unsigned long head; 57 unsigned long tail; 58 struct list_head jobs; 59 60 unsigned long devino; 61 62 char irq_name[32]; 63 unsigned int irq; 64 65 struct list_head list; 66 }; 67 68 struct spu_qreg { 69 struct spu_queue *queue; 70 unsigned long type; 71 }; 72 73 static struct spu_queue **cpu_to_cwq; 74 static struct spu_queue **cpu_to_mau; 75 76 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) 77 { 78 if (q->q_type == HV_NCS_QTYPE_MAU) { 79 off += MAU_ENTRY_SIZE; 80 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES)) 81 off = 0; 82 } else { 83 off += CWQ_ENTRY_SIZE; 84 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES)) 85 off = 0; 86 } 87 return off; 88 } 89 90 struct n2_request_common { 91 struct list_head entry; 92 unsigned int offset; 93 }; 94 #define OFFSET_NOT_RUNNING (~(unsigned int)0) 95 96 /* An async job request records the final tail value it used in 97 * n2_request_common->offset, test to see if that offset is in 98 * the range old_head, new_head, inclusive. 99 */ 100 static inline bool job_finished(struct spu_queue *q, unsigned int offset, 101 unsigned long old_head, unsigned long new_head) 102 { 103 if (old_head <= new_head) { 104 if (offset > old_head && offset <= new_head) 105 return true; 106 } else { 107 if (offset > old_head || offset <= new_head) 108 return true; 109 } 110 return false; 111 } 112 113 /* When the HEAD marker is unequal to the actual HEAD, we get 114 * a virtual device INO interrupt. We should process the 115 * completed CWQ entries and adjust the HEAD marker to clear 116 * the IRQ. 117 */ 118 static irqreturn_t cwq_intr(int irq, void *dev_id) 119 { 120 unsigned long off, new_head, hv_ret; 121 struct spu_queue *q = dev_id; 122 123 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n", 124 smp_processor_id(), q->qhandle); 125 126 spin_lock(&q->lock); 127 128 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); 129 130 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n", 131 smp_processor_id(), new_head, hv_ret); 132 133 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { 134 /* XXX ... XXX */ 135 } 136 137 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); 138 if (hv_ret == HV_EOK) 139 q->head = new_head; 140 141 spin_unlock(&q->lock); 142 143 return IRQ_HANDLED; 144 } 145 146 static irqreturn_t mau_intr(int irq, void *dev_id) 147 { 148 struct spu_queue *q = dev_id; 149 unsigned long head, hv_ret; 150 151 spin_lock(&q->lock); 152 153 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n", 154 smp_processor_id(), q->qhandle); 155 156 hv_ret = sun4v_ncs_gethead(q->qhandle, &head); 157 158 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n", 159 smp_processor_id(), head, hv_ret); 160 161 sun4v_ncs_sethead_marker(q->qhandle, head); 162 163 spin_unlock(&q->lock); 164 165 return IRQ_HANDLED; 166 } 167 168 static void *spu_queue_next(struct spu_queue *q, void *cur) 169 { 170 return q->q + spu_next_offset(q, cur - q->q); 171 } 172 173 static int spu_queue_num_free(struct spu_queue *q) 174 { 175 unsigned long head = q->head; 176 unsigned long tail = q->tail; 177 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES); 178 unsigned long diff; 179 180 if (head > tail) 181 diff = head - tail; 182 else 183 diff = (end - tail) + head; 184 185 return (diff / CWQ_ENTRY_SIZE) - 1; 186 } 187 188 static void *spu_queue_alloc(struct spu_queue *q, int num_entries) 189 { 190 int avail = spu_queue_num_free(q); 191 192 if (avail >= num_entries) 193 return q->q + q->tail; 194 195 return NULL; 196 } 197 198 static unsigned long spu_queue_submit(struct spu_queue *q, void *last) 199 { 200 unsigned long hv_ret, new_tail; 201 202 new_tail = spu_next_offset(q, last - q->q); 203 204 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); 205 if (hv_ret == HV_EOK) 206 q->tail = new_tail; 207 return hv_ret; 208 } 209 210 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len, 211 int enc_type, int auth_type, 212 unsigned int hash_len, 213 bool sfas, bool sob, bool eob, bool encrypt, 214 int opcode) 215 { 216 u64 word = (len - 1) & CONTROL_LEN; 217 218 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT); 219 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT); 220 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT); 221 if (sfas) 222 word |= CONTROL_STORE_FINAL_AUTH_STATE; 223 if (sob) 224 word |= CONTROL_START_OF_BLOCK; 225 if (eob) 226 word |= CONTROL_END_OF_BLOCK; 227 if (encrypt) 228 word |= CONTROL_ENCRYPT; 229 if (hmac_key_len) 230 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT; 231 if (hash_len) 232 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT; 233 234 return word; 235 } 236 237 #if 0 238 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len) 239 { 240 if (this_len >= 64 || 241 qp->head != qp->tail) 242 return true; 243 return false; 244 } 245 #endif 246 247 struct n2_ahash_alg { 248 struct list_head entry; 249 const u8 *hash_zero; 250 const u32 *hash_init; 251 u8 hw_op_hashsz; 252 u8 digest_size; 253 u8 auth_type; 254 u8 hmac_type; 255 struct ahash_alg alg; 256 }; 257 258 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm) 259 { 260 struct crypto_alg *alg = tfm->__crt_alg; 261 struct ahash_alg *ahash_alg; 262 263 ahash_alg = container_of(alg, struct ahash_alg, halg.base); 264 265 return container_of(ahash_alg, struct n2_ahash_alg, alg); 266 } 267 268 struct n2_hmac_alg { 269 const char *child_alg; 270 struct n2_ahash_alg derived; 271 }; 272 273 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm) 274 { 275 struct crypto_alg *alg = tfm->__crt_alg; 276 struct ahash_alg *ahash_alg; 277 278 ahash_alg = container_of(alg, struct ahash_alg, halg.base); 279 280 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg); 281 } 282 283 struct n2_hash_ctx { 284 struct crypto_ahash *fallback_tfm; 285 }; 286 287 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */ 288 289 struct n2_hmac_ctx { 290 struct n2_hash_ctx base; 291 292 struct crypto_shash *child_shash; 293 294 int hash_key_len; 295 unsigned char hash_key[N2_HASH_KEY_MAX]; 296 }; 297 298 struct n2_hash_req_ctx { 299 union { 300 struct md5_state md5; 301 struct sha1_state sha1; 302 struct sha256_state sha256; 303 } u; 304 305 struct ahash_request fallback_req; 306 }; 307 308 static int n2_hash_async_init(struct ahash_request *req) 309 { 310 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 311 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 312 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 313 314 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 315 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 316 317 return crypto_ahash_init(&rctx->fallback_req); 318 } 319 320 static int n2_hash_async_update(struct ahash_request *req) 321 { 322 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 323 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 324 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 325 326 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 327 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 328 rctx->fallback_req.nbytes = req->nbytes; 329 rctx->fallback_req.src = req->src; 330 331 return crypto_ahash_update(&rctx->fallback_req); 332 } 333 334 static int n2_hash_async_final(struct ahash_request *req) 335 { 336 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 337 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 338 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 339 340 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 341 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 342 rctx->fallback_req.result = req->result; 343 344 return crypto_ahash_final(&rctx->fallback_req); 345 } 346 347 static int n2_hash_async_finup(struct ahash_request *req) 348 { 349 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 350 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 351 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 352 353 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 354 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 355 rctx->fallback_req.nbytes = req->nbytes; 356 rctx->fallback_req.src = req->src; 357 rctx->fallback_req.result = req->result; 358 359 return crypto_ahash_finup(&rctx->fallback_req); 360 } 361 362 static int n2_hash_cra_init(struct crypto_tfm *tfm) 363 { 364 const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 365 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 366 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 367 struct crypto_ahash *fallback_tfm; 368 int err; 369 370 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 371 CRYPTO_ALG_NEED_FALLBACK); 372 if (IS_ERR(fallback_tfm)) { 373 pr_warning("Fallback driver '%s' could not be loaded!\n", 374 fallback_driver_name); 375 err = PTR_ERR(fallback_tfm); 376 goto out; 377 } 378 379 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 380 crypto_ahash_reqsize(fallback_tfm))); 381 382 ctx->fallback_tfm = fallback_tfm; 383 return 0; 384 385 out: 386 return err; 387 } 388 389 static void n2_hash_cra_exit(struct crypto_tfm *tfm) 390 { 391 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 392 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash); 393 394 crypto_free_ahash(ctx->fallback_tfm); 395 } 396 397 static int n2_hmac_cra_init(struct crypto_tfm *tfm) 398 { 399 const char *fallback_driver_name = crypto_tfm_alg_name(tfm); 400 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 401 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 402 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm); 403 struct crypto_ahash *fallback_tfm; 404 struct crypto_shash *child_shash; 405 int err; 406 407 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0, 408 CRYPTO_ALG_NEED_FALLBACK); 409 if (IS_ERR(fallback_tfm)) { 410 pr_warning("Fallback driver '%s' could not be loaded!\n", 411 fallback_driver_name); 412 err = PTR_ERR(fallback_tfm); 413 goto out; 414 } 415 416 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0); 417 if (IS_ERR(child_shash)) { 418 pr_warning("Child shash '%s' could not be loaded!\n", 419 n2alg->child_alg); 420 err = PTR_ERR(child_shash); 421 goto out_free_fallback; 422 } 423 424 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) + 425 crypto_ahash_reqsize(fallback_tfm))); 426 427 ctx->child_shash = child_shash; 428 ctx->base.fallback_tfm = fallback_tfm; 429 return 0; 430 431 out_free_fallback: 432 crypto_free_ahash(fallback_tfm); 433 434 out: 435 return err; 436 } 437 438 static void n2_hmac_cra_exit(struct crypto_tfm *tfm) 439 { 440 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); 441 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash); 442 443 crypto_free_ahash(ctx->base.fallback_tfm); 444 crypto_free_shash(ctx->child_shash); 445 } 446 447 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key, 448 unsigned int keylen) 449 { 450 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 451 struct crypto_shash *child_shash = ctx->child_shash; 452 struct crypto_ahash *fallback_tfm; 453 SHASH_DESC_ON_STACK(shash, child_shash); 454 int err, bs, ds; 455 456 fallback_tfm = ctx->base.fallback_tfm; 457 err = crypto_ahash_setkey(fallback_tfm, key, keylen); 458 if (err) 459 return err; 460 461 shash->tfm = child_shash; 462 shash->flags = crypto_ahash_get_flags(tfm) & 463 CRYPTO_TFM_REQ_MAY_SLEEP; 464 465 bs = crypto_shash_blocksize(child_shash); 466 ds = crypto_shash_digestsize(child_shash); 467 BUG_ON(ds > N2_HASH_KEY_MAX); 468 if (keylen > bs) { 469 err = crypto_shash_digest(shash, key, keylen, 470 ctx->hash_key); 471 if (err) 472 return err; 473 keylen = ds; 474 } else if (keylen <= N2_HASH_KEY_MAX) 475 memcpy(ctx->hash_key, key, keylen); 476 477 ctx->hash_key_len = keylen; 478 479 return err; 480 } 481 482 static unsigned long wait_for_tail(struct spu_queue *qp) 483 { 484 unsigned long head, hv_ret; 485 486 do { 487 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head); 488 if (hv_ret != HV_EOK) { 489 pr_err("Hypervisor error on gethead\n"); 490 break; 491 } 492 if (head == qp->tail) { 493 qp->head = head; 494 break; 495 } 496 } while (1); 497 return hv_ret; 498 } 499 500 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp, 501 struct cwq_initial_entry *ent) 502 { 503 unsigned long hv_ret = spu_queue_submit(qp, ent); 504 505 if (hv_ret == HV_EOK) 506 hv_ret = wait_for_tail(qp); 507 508 return hv_ret; 509 } 510 511 static int n2_do_async_digest(struct ahash_request *req, 512 unsigned int auth_type, unsigned int digest_size, 513 unsigned int result_size, void *hash_loc, 514 unsigned long auth_key, unsigned int auth_key_len) 515 { 516 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 517 struct cwq_initial_entry *ent; 518 struct crypto_hash_walk walk; 519 struct spu_queue *qp; 520 unsigned long flags; 521 int err = -ENODEV; 522 int nbytes, cpu; 523 524 /* The total effective length of the operation may not 525 * exceed 2^16. 526 */ 527 if (unlikely(req->nbytes > (1 << 16))) { 528 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 529 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 530 531 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 532 rctx->fallback_req.base.flags = 533 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 534 rctx->fallback_req.nbytes = req->nbytes; 535 rctx->fallback_req.src = req->src; 536 rctx->fallback_req.result = req->result; 537 538 return crypto_ahash_digest(&rctx->fallback_req); 539 } 540 541 nbytes = crypto_hash_walk_first(req, &walk); 542 543 cpu = get_cpu(); 544 qp = cpu_to_cwq[cpu]; 545 if (!qp) 546 goto out; 547 548 spin_lock_irqsave(&qp->lock, flags); 549 550 /* XXX can do better, improve this later by doing a by-hand scatterlist 551 * XXX walk, etc. 552 */ 553 ent = qp->q + qp->tail; 554 555 ent->control = control_word_base(nbytes, auth_key_len, 0, 556 auth_type, digest_size, 557 false, true, false, false, 558 OPCODE_INPLACE_BIT | 559 OPCODE_AUTH_MAC); 560 ent->src_addr = __pa(walk.data); 561 ent->auth_key_addr = auth_key; 562 ent->auth_iv_addr = __pa(hash_loc); 563 ent->final_auth_state_addr = 0UL; 564 ent->enc_key_addr = 0UL; 565 ent->enc_iv_addr = 0UL; 566 ent->dest_addr = __pa(hash_loc); 567 568 nbytes = crypto_hash_walk_done(&walk, 0); 569 while (nbytes > 0) { 570 ent = spu_queue_next(qp, ent); 571 572 ent->control = (nbytes - 1); 573 ent->src_addr = __pa(walk.data); 574 ent->auth_key_addr = 0UL; 575 ent->auth_iv_addr = 0UL; 576 ent->final_auth_state_addr = 0UL; 577 ent->enc_key_addr = 0UL; 578 ent->enc_iv_addr = 0UL; 579 ent->dest_addr = 0UL; 580 581 nbytes = crypto_hash_walk_done(&walk, 0); 582 } 583 ent->control |= CONTROL_END_OF_BLOCK; 584 585 if (submit_and_wait_for_tail(qp, ent) != HV_EOK) 586 err = -EINVAL; 587 else 588 err = 0; 589 590 spin_unlock_irqrestore(&qp->lock, flags); 591 592 if (!err) 593 memcpy(req->result, hash_loc, result_size); 594 out: 595 put_cpu(); 596 597 return err; 598 } 599 600 static int n2_hash_async_digest(struct ahash_request *req) 601 { 602 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm); 603 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 604 int ds; 605 606 ds = n2alg->digest_size; 607 if (unlikely(req->nbytes == 0)) { 608 memcpy(req->result, n2alg->hash_zero, ds); 609 return 0; 610 } 611 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz); 612 613 return n2_do_async_digest(req, n2alg->auth_type, 614 n2alg->hw_op_hashsz, ds, 615 &rctx->u, 0UL, 0); 616 } 617 618 static int n2_hmac_async_digest(struct ahash_request *req) 619 { 620 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm); 621 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 622 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); 623 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm); 624 int ds; 625 626 ds = n2alg->derived.digest_size; 627 if (unlikely(req->nbytes == 0) || 628 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) { 629 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req); 630 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm); 631 632 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm); 633 rctx->fallback_req.base.flags = 634 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP; 635 rctx->fallback_req.nbytes = req->nbytes; 636 rctx->fallback_req.src = req->src; 637 rctx->fallback_req.result = req->result; 638 639 return crypto_ahash_digest(&rctx->fallback_req); 640 } 641 memcpy(&rctx->u, n2alg->derived.hash_init, 642 n2alg->derived.hw_op_hashsz); 643 644 return n2_do_async_digest(req, n2alg->derived.hmac_type, 645 n2alg->derived.hw_op_hashsz, ds, 646 &rctx->u, 647 __pa(&ctx->hash_key), 648 ctx->hash_key_len); 649 } 650 651 struct n2_cipher_context { 652 int key_len; 653 int enc_type; 654 union { 655 u8 aes[AES_MAX_KEY_SIZE]; 656 u8 des[DES_KEY_SIZE]; 657 u8 des3[3 * DES_KEY_SIZE]; 658 u8 arc4[258]; /* S-box, X, Y */ 659 } key; 660 }; 661 662 #define N2_CHUNK_ARR_LEN 16 663 664 struct n2_crypto_chunk { 665 struct list_head entry; 666 unsigned long iv_paddr : 44; 667 unsigned long arr_len : 20; 668 unsigned long dest_paddr; 669 unsigned long dest_final; 670 struct { 671 unsigned long src_paddr : 44; 672 unsigned long src_len : 20; 673 } arr[N2_CHUNK_ARR_LEN]; 674 }; 675 676 struct n2_request_context { 677 struct ablkcipher_walk walk; 678 struct list_head chunk_list; 679 struct n2_crypto_chunk chunk; 680 u8 temp_iv[16]; 681 }; 682 683 /* The SPU allows some level of flexibility for partial cipher blocks 684 * being specified in a descriptor. 685 * 686 * It merely requires that every descriptor's length field is at least 687 * as large as the cipher block size. This means that a cipher block 688 * can span at most 2 descriptors. However, this does not allow a 689 * partial block to span into the final descriptor as that would 690 * violate the rule (since every descriptor's length must be at lest 691 * the block size). So, for example, assuming an 8 byte block size: 692 * 693 * 0xe --> 0xa --> 0x8 694 * 695 * is a valid length sequence, whereas: 696 * 697 * 0xe --> 0xb --> 0x7 698 * 699 * is not a valid sequence. 700 */ 701 702 struct n2_cipher_alg { 703 struct list_head entry; 704 u8 enc_type; 705 struct crypto_alg alg; 706 }; 707 708 static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm) 709 { 710 struct crypto_alg *alg = tfm->__crt_alg; 711 712 return container_of(alg, struct n2_cipher_alg, alg); 713 } 714 715 struct n2_cipher_request_context { 716 struct ablkcipher_walk walk; 717 }; 718 719 static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 720 unsigned int keylen) 721 { 722 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 723 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 724 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 725 726 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK); 727 728 switch (keylen) { 729 case AES_KEYSIZE_128: 730 ctx->enc_type |= ENC_TYPE_ALG_AES128; 731 break; 732 case AES_KEYSIZE_192: 733 ctx->enc_type |= ENC_TYPE_ALG_AES192; 734 break; 735 case AES_KEYSIZE_256: 736 ctx->enc_type |= ENC_TYPE_ALG_AES256; 737 break; 738 default: 739 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 740 return -EINVAL; 741 } 742 743 ctx->key_len = keylen; 744 memcpy(ctx->key.aes, key, keylen); 745 return 0; 746 } 747 748 static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 749 unsigned int keylen) 750 { 751 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 752 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 753 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 754 u32 tmp[DES_EXPKEY_WORDS]; 755 int err; 756 757 ctx->enc_type = n2alg->enc_type; 758 759 if (keylen != DES_KEY_SIZE) { 760 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 761 return -EINVAL; 762 } 763 764 err = des_ekey(tmp, key); 765 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { 766 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; 767 return -EINVAL; 768 } 769 770 ctx->key_len = keylen; 771 memcpy(ctx->key.des, key, keylen); 772 return 0; 773 } 774 775 static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 776 unsigned int keylen) 777 { 778 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 779 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 780 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 781 782 ctx->enc_type = n2alg->enc_type; 783 784 if (keylen != (3 * DES_KEY_SIZE)) { 785 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); 786 return -EINVAL; 787 } 788 ctx->key_len = keylen; 789 memcpy(ctx->key.des3, key, keylen); 790 return 0; 791 } 792 793 static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key, 794 unsigned int keylen) 795 { 796 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); 797 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 798 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm); 799 u8 *s = ctx->key.arc4; 800 u8 *x = s + 256; 801 u8 *y = x + 1; 802 int i, j, k; 803 804 ctx->enc_type = n2alg->enc_type; 805 806 j = k = 0; 807 *x = 0; 808 *y = 0; 809 for (i = 0; i < 256; i++) 810 s[i] = i; 811 for (i = 0; i < 256; i++) { 812 u8 a = s[i]; 813 j = (j + key[k] + a) & 0xff; 814 s[i] = s[j]; 815 s[j] = a; 816 if (++k >= keylen) 817 k = 0; 818 } 819 820 return 0; 821 } 822 823 static inline int cipher_descriptor_len(int nbytes, unsigned int block_size) 824 { 825 int this_len = nbytes; 826 827 this_len -= (nbytes & (block_size - 1)); 828 return this_len > (1 << 16) ? (1 << 16) : this_len; 829 } 830 831 static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp, 832 struct spu_queue *qp, bool encrypt) 833 { 834 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm); 835 struct cwq_initial_entry *ent; 836 bool in_place; 837 int i; 838 839 ent = spu_queue_alloc(qp, cp->arr_len); 840 if (!ent) { 841 pr_info("queue_alloc() of %d fails\n", 842 cp->arr_len); 843 return -EBUSY; 844 } 845 846 in_place = (cp->dest_paddr == cp->arr[0].src_paddr); 847 848 ent->control = control_word_base(cp->arr[0].src_len, 849 0, ctx->enc_type, 0, 0, 850 false, true, false, encrypt, 851 OPCODE_ENCRYPT | 852 (in_place ? OPCODE_INPLACE_BIT : 0)); 853 ent->src_addr = cp->arr[0].src_paddr; 854 ent->auth_key_addr = 0UL; 855 ent->auth_iv_addr = 0UL; 856 ent->final_auth_state_addr = 0UL; 857 ent->enc_key_addr = __pa(&ctx->key); 858 ent->enc_iv_addr = cp->iv_paddr; 859 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr); 860 861 for (i = 1; i < cp->arr_len; i++) { 862 ent = spu_queue_next(qp, ent); 863 864 ent->control = cp->arr[i].src_len - 1; 865 ent->src_addr = cp->arr[i].src_paddr; 866 ent->auth_key_addr = 0UL; 867 ent->auth_iv_addr = 0UL; 868 ent->final_auth_state_addr = 0UL; 869 ent->enc_key_addr = 0UL; 870 ent->enc_iv_addr = 0UL; 871 ent->dest_addr = 0UL; 872 } 873 ent->control |= CONTROL_END_OF_BLOCK; 874 875 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0; 876 } 877 878 static int n2_compute_chunks(struct ablkcipher_request *req) 879 { 880 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 881 struct ablkcipher_walk *walk = &rctx->walk; 882 struct n2_crypto_chunk *chunk; 883 unsigned long dest_prev; 884 unsigned int tot_len; 885 bool prev_in_place; 886 int err, nbytes; 887 888 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes); 889 err = ablkcipher_walk_phys(req, walk); 890 if (err) 891 return err; 892 893 INIT_LIST_HEAD(&rctx->chunk_list); 894 895 chunk = &rctx->chunk; 896 INIT_LIST_HEAD(&chunk->entry); 897 898 chunk->iv_paddr = 0UL; 899 chunk->arr_len = 0; 900 chunk->dest_paddr = 0UL; 901 902 prev_in_place = false; 903 dest_prev = ~0UL; 904 tot_len = 0; 905 906 while ((nbytes = walk->nbytes) != 0) { 907 unsigned long dest_paddr, src_paddr; 908 bool in_place; 909 int this_len; 910 911 src_paddr = (page_to_phys(walk->src.page) + 912 walk->src.offset); 913 dest_paddr = (page_to_phys(walk->dst.page) + 914 walk->dst.offset); 915 in_place = (src_paddr == dest_paddr); 916 this_len = cipher_descriptor_len(nbytes, walk->blocksize); 917 918 if (chunk->arr_len != 0) { 919 if (in_place != prev_in_place || 920 (!prev_in_place && 921 dest_paddr != dest_prev) || 922 chunk->arr_len == N2_CHUNK_ARR_LEN || 923 tot_len + this_len > (1 << 16)) { 924 chunk->dest_final = dest_prev; 925 list_add_tail(&chunk->entry, 926 &rctx->chunk_list); 927 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC); 928 if (!chunk) { 929 err = -ENOMEM; 930 break; 931 } 932 INIT_LIST_HEAD(&chunk->entry); 933 } 934 } 935 if (chunk->arr_len == 0) { 936 chunk->dest_paddr = dest_paddr; 937 tot_len = 0; 938 } 939 chunk->arr[chunk->arr_len].src_paddr = src_paddr; 940 chunk->arr[chunk->arr_len].src_len = this_len; 941 chunk->arr_len++; 942 943 dest_prev = dest_paddr + this_len; 944 prev_in_place = in_place; 945 tot_len += this_len; 946 947 err = ablkcipher_walk_done(req, walk, nbytes - this_len); 948 if (err) 949 break; 950 } 951 if (!err && chunk->arr_len != 0) { 952 chunk->dest_final = dest_prev; 953 list_add_tail(&chunk->entry, &rctx->chunk_list); 954 } 955 956 return err; 957 } 958 959 static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv) 960 { 961 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 962 struct n2_crypto_chunk *c, *tmp; 963 964 if (final_iv) 965 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize); 966 967 ablkcipher_walk_complete(&rctx->walk); 968 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 969 list_del(&c->entry); 970 if (unlikely(c != &rctx->chunk)) 971 kfree(c); 972 } 973 974 } 975 976 static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt) 977 { 978 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 979 struct crypto_tfm *tfm = req->base.tfm; 980 int err = n2_compute_chunks(req); 981 struct n2_crypto_chunk *c, *tmp; 982 unsigned long flags, hv_ret; 983 struct spu_queue *qp; 984 985 if (err) 986 return err; 987 988 qp = cpu_to_cwq[get_cpu()]; 989 err = -ENODEV; 990 if (!qp) 991 goto out; 992 993 spin_lock_irqsave(&qp->lock, flags); 994 995 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) { 996 err = __n2_crypt_chunk(tfm, c, qp, encrypt); 997 if (err) 998 break; 999 list_del(&c->entry); 1000 if (unlikely(c != &rctx->chunk)) 1001 kfree(c); 1002 } 1003 if (!err) { 1004 hv_ret = wait_for_tail(qp); 1005 if (hv_ret != HV_EOK) 1006 err = -EINVAL; 1007 } 1008 1009 spin_unlock_irqrestore(&qp->lock, flags); 1010 1011 out: 1012 put_cpu(); 1013 1014 n2_chunk_complete(req, NULL); 1015 return err; 1016 } 1017 1018 static int n2_encrypt_ecb(struct ablkcipher_request *req) 1019 { 1020 return n2_do_ecb(req, true); 1021 } 1022 1023 static int n2_decrypt_ecb(struct ablkcipher_request *req) 1024 { 1025 return n2_do_ecb(req, false); 1026 } 1027 1028 static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt) 1029 { 1030 struct n2_request_context *rctx = ablkcipher_request_ctx(req); 1031 struct crypto_tfm *tfm = req->base.tfm; 1032 unsigned long flags, hv_ret, iv_paddr; 1033 int err = n2_compute_chunks(req); 1034 struct n2_crypto_chunk *c, *tmp; 1035 struct spu_queue *qp; 1036 void *final_iv_addr; 1037 1038 final_iv_addr = NULL; 1039 1040 if (err) 1041 return err; 1042 1043 qp = cpu_to_cwq[get_cpu()]; 1044 err = -ENODEV; 1045 if (!qp) 1046 goto out; 1047 1048 spin_lock_irqsave(&qp->lock, flags); 1049 1050 if (encrypt) { 1051 iv_paddr = __pa(rctx->walk.iv); 1052 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, 1053 entry) { 1054 c->iv_paddr = iv_paddr; 1055 err = __n2_crypt_chunk(tfm, c, qp, true); 1056 if (err) 1057 break; 1058 iv_paddr = c->dest_final - rctx->walk.blocksize; 1059 list_del(&c->entry); 1060 if (unlikely(c != &rctx->chunk)) 1061 kfree(c); 1062 } 1063 final_iv_addr = __va(iv_paddr); 1064 } else { 1065 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list, 1066 entry) { 1067 if (c == &rctx->chunk) { 1068 iv_paddr = __pa(rctx->walk.iv); 1069 } else { 1070 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr + 1071 tmp->arr[tmp->arr_len-1].src_len - 1072 rctx->walk.blocksize); 1073 } 1074 if (!final_iv_addr) { 1075 unsigned long pa; 1076 1077 pa = (c->arr[c->arr_len-1].src_paddr + 1078 c->arr[c->arr_len-1].src_len - 1079 rctx->walk.blocksize); 1080 final_iv_addr = rctx->temp_iv; 1081 memcpy(rctx->temp_iv, __va(pa), 1082 rctx->walk.blocksize); 1083 } 1084 c->iv_paddr = iv_paddr; 1085 err = __n2_crypt_chunk(tfm, c, qp, false); 1086 if (err) 1087 break; 1088 list_del(&c->entry); 1089 if (unlikely(c != &rctx->chunk)) 1090 kfree(c); 1091 } 1092 } 1093 if (!err) { 1094 hv_ret = wait_for_tail(qp); 1095 if (hv_ret != HV_EOK) 1096 err = -EINVAL; 1097 } 1098 1099 spin_unlock_irqrestore(&qp->lock, flags); 1100 1101 out: 1102 put_cpu(); 1103 1104 n2_chunk_complete(req, err ? NULL : final_iv_addr); 1105 return err; 1106 } 1107 1108 static int n2_encrypt_chaining(struct ablkcipher_request *req) 1109 { 1110 return n2_do_chaining(req, true); 1111 } 1112 1113 static int n2_decrypt_chaining(struct ablkcipher_request *req) 1114 { 1115 return n2_do_chaining(req, false); 1116 } 1117 1118 struct n2_cipher_tmpl { 1119 const char *name; 1120 const char *drv_name; 1121 u8 block_size; 1122 u8 enc_type; 1123 struct ablkcipher_alg ablkcipher; 1124 }; 1125 1126 static const struct n2_cipher_tmpl cipher_tmpls[] = { 1127 /* ARC4: only ECB is supported (chaining bits ignored) */ 1128 { .name = "ecb(arc4)", 1129 .drv_name = "ecb-arc4", 1130 .block_size = 1, 1131 .enc_type = (ENC_TYPE_ALG_RC4_STREAM | 1132 ENC_TYPE_CHAINING_ECB), 1133 .ablkcipher = { 1134 .min_keysize = 1, 1135 .max_keysize = 256, 1136 .setkey = n2_arc4_setkey, 1137 .encrypt = n2_encrypt_ecb, 1138 .decrypt = n2_decrypt_ecb, 1139 }, 1140 }, 1141 1142 /* DES: ECB CBC and CFB are supported */ 1143 { .name = "ecb(des)", 1144 .drv_name = "ecb-des", 1145 .block_size = DES_BLOCK_SIZE, 1146 .enc_type = (ENC_TYPE_ALG_DES | 1147 ENC_TYPE_CHAINING_ECB), 1148 .ablkcipher = { 1149 .min_keysize = DES_KEY_SIZE, 1150 .max_keysize = DES_KEY_SIZE, 1151 .setkey = n2_des_setkey, 1152 .encrypt = n2_encrypt_ecb, 1153 .decrypt = n2_decrypt_ecb, 1154 }, 1155 }, 1156 { .name = "cbc(des)", 1157 .drv_name = "cbc-des", 1158 .block_size = DES_BLOCK_SIZE, 1159 .enc_type = (ENC_TYPE_ALG_DES | 1160 ENC_TYPE_CHAINING_CBC), 1161 .ablkcipher = { 1162 .ivsize = DES_BLOCK_SIZE, 1163 .min_keysize = DES_KEY_SIZE, 1164 .max_keysize = DES_KEY_SIZE, 1165 .setkey = n2_des_setkey, 1166 .encrypt = n2_encrypt_chaining, 1167 .decrypt = n2_decrypt_chaining, 1168 }, 1169 }, 1170 { .name = "cfb(des)", 1171 .drv_name = "cfb-des", 1172 .block_size = DES_BLOCK_SIZE, 1173 .enc_type = (ENC_TYPE_ALG_DES | 1174 ENC_TYPE_CHAINING_CFB), 1175 .ablkcipher = { 1176 .min_keysize = DES_KEY_SIZE, 1177 .max_keysize = DES_KEY_SIZE, 1178 .setkey = n2_des_setkey, 1179 .encrypt = n2_encrypt_chaining, 1180 .decrypt = n2_decrypt_chaining, 1181 }, 1182 }, 1183 1184 /* 3DES: ECB CBC and CFB are supported */ 1185 { .name = "ecb(des3_ede)", 1186 .drv_name = "ecb-3des", 1187 .block_size = DES_BLOCK_SIZE, 1188 .enc_type = (ENC_TYPE_ALG_3DES | 1189 ENC_TYPE_CHAINING_ECB), 1190 .ablkcipher = { 1191 .min_keysize = 3 * DES_KEY_SIZE, 1192 .max_keysize = 3 * DES_KEY_SIZE, 1193 .setkey = n2_3des_setkey, 1194 .encrypt = n2_encrypt_ecb, 1195 .decrypt = n2_decrypt_ecb, 1196 }, 1197 }, 1198 { .name = "cbc(des3_ede)", 1199 .drv_name = "cbc-3des", 1200 .block_size = DES_BLOCK_SIZE, 1201 .enc_type = (ENC_TYPE_ALG_3DES | 1202 ENC_TYPE_CHAINING_CBC), 1203 .ablkcipher = { 1204 .ivsize = DES_BLOCK_SIZE, 1205 .min_keysize = 3 * DES_KEY_SIZE, 1206 .max_keysize = 3 * DES_KEY_SIZE, 1207 .setkey = n2_3des_setkey, 1208 .encrypt = n2_encrypt_chaining, 1209 .decrypt = n2_decrypt_chaining, 1210 }, 1211 }, 1212 { .name = "cfb(des3_ede)", 1213 .drv_name = "cfb-3des", 1214 .block_size = DES_BLOCK_SIZE, 1215 .enc_type = (ENC_TYPE_ALG_3DES | 1216 ENC_TYPE_CHAINING_CFB), 1217 .ablkcipher = { 1218 .min_keysize = 3 * DES_KEY_SIZE, 1219 .max_keysize = 3 * DES_KEY_SIZE, 1220 .setkey = n2_3des_setkey, 1221 .encrypt = n2_encrypt_chaining, 1222 .decrypt = n2_decrypt_chaining, 1223 }, 1224 }, 1225 /* AES: ECB CBC and CTR are supported */ 1226 { .name = "ecb(aes)", 1227 .drv_name = "ecb-aes", 1228 .block_size = AES_BLOCK_SIZE, 1229 .enc_type = (ENC_TYPE_ALG_AES128 | 1230 ENC_TYPE_CHAINING_ECB), 1231 .ablkcipher = { 1232 .min_keysize = AES_MIN_KEY_SIZE, 1233 .max_keysize = AES_MAX_KEY_SIZE, 1234 .setkey = n2_aes_setkey, 1235 .encrypt = n2_encrypt_ecb, 1236 .decrypt = n2_decrypt_ecb, 1237 }, 1238 }, 1239 { .name = "cbc(aes)", 1240 .drv_name = "cbc-aes", 1241 .block_size = AES_BLOCK_SIZE, 1242 .enc_type = (ENC_TYPE_ALG_AES128 | 1243 ENC_TYPE_CHAINING_CBC), 1244 .ablkcipher = { 1245 .ivsize = AES_BLOCK_SIZE, 1246 .min_keysize = AES_MIN_KEY_SIZE, 1247 .max_keysize = AES_MAX_KEY_SIZE, 1248 .setkey = n2_aes_setkey, 1249 .encrypt = n2_encrypt_chaining, 1250 .decrypt = n2_decrypt_chaining, 1251 }, 1252 }, 1253 { .name = "ctr(aes)", 1254 .drv_name = "ctr-aes", 1255 .block_size = AES_BLOCK_SIZE, 1256 .enc_type = (ENC_TYPE_ALG_AES128 | 1257 ENC_TYPE_CHAINING_COUNTER), 1258 .ablkcipher = { 1259 .ivsize = AES_BLOCK_SIZE, 1260 .min_keysize = AES_MIN_KEY_SIZE, 1261 .max_keysize = AES_MAX_KEY_SIZE, 1262 .setkey = n2_aes_setkey, 1263 .encrypt = n2_encrypt_chaining, 1264 .decrypt = n2_encrypt_chaining, 1265 }, 1266 }, 1267 1268 }; 1269 #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls) 1270 1271 static LIST_HEAD(cipher_algs); 1272 1273 struct n2_hash_tmpl { 1274 const char *name; 1275 const u8 *hash_zero; 1276 const u32 *hash_init; 1277 u8 hw_op_hashsz; 1278 u8 digest_size; 1279 u8 block_size; 1280 u8 auth_type; 1281 u8 hmac_type; 1282 }; 1283 1284 static const u32 md5_init[MD5_HASH_WORDS] = { 1285 cpu_to_le32(MD5_H0), 1286 cpu_to_le32(MD5_H1), 1287 cpu_to_le32(MD5_H2), 1288 cpu_to_le32(MD5_H3), 1289 }; 1290 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { 1291 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 1292 }; 1293 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { 1294 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, 1295 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, 1296 }; 1297 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { 1298 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, 1299 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, 1300 }; 1301 1302 static const struct n2_hash_tmpl hash_tmpls[] = { 1303 { .name = "md5", 1304 .hash_zero = md5_zero_message_hash, 1305 .hash_init = md5_init, 1306 .auth_type = AUTH_TYPE_MD5, 1307 .hmac_type = AUTH_TYPE_HMAC_MD5, 1308 .hw_op_hashsz = MD5_DIGEST_SIZE, 1309 .digest_size = MD5_DIGEST_SIZE, 1310 .block_size = MD5_HMAC_BLOCK_SIZE }, 1311 { .name = "sha1", 1312 .hash_zero = sha1_zero_message_hash, 1313 .hash_init = sha1_init, 1314 .auth_type = AUTH_TYPE_SHA1, 1315 .hmac_type = AUTH_TYPE_HMAC_SHA1, 1316 .hw_op_hashsz = SHA1_DIGEST_SIZE, 1317 .digest_size = SHA1_DIGEST_SIZE, 1318 .block_size = SHA1_BLOCK_SIZE }, 1319 { .name = "sha256", 1320 .hash_zero = sha256_zero_message_hash, 1321 .hash_init = sha256_init, 1322 .auth_type = AUTH_TYPE_SHA256, 1323 .hmac_type = AUTH_TYPE_HMAC_SHA256, 1324 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1325 .digest_size = SHA256_DIGEST_SIZE, 1326 .block_size = SHA256_BLOCK_SIZE }, 1327 { .name = "sha224", 1328 .hash_zero = sha224_zero_message_hash, 1329 .hash_init = sha224_init, 1330 .auth_type = AUTH_TYPE_SHA256, 1331 .hmac_type = AUTH_TYPE_RESERVED, 1332 .hw_op_hashsz = SHA256_DIGEST_SIZE, 1333 .digest_size = SHA224_DIGEST_SIZE, 1334 .block_size = SHA224_BLOCK_SIZE }, 1335 }; 1336 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls) 1337 1338 static LIST_HEAD(ahash_algs); 1339 static LIST_HEAD(hmac_algs); 1340 1341 static int algs_registered; 1342 1343 static void __n2_unregister_algs(void) 1344 { 1345 struct n2_cipher_alg *cipher, *cipher_tmp; 1346 struct n2_ahash_alg *alg, *alg_tmp; 1347 struct n2_hmac_alg *hmac, *hmac_tmp; 1348 1349 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) { 1350 crypto_unregister_alg(&cipher->alg); 1351 list_del(&cipher->entry); 1352 kfree(cipher); 1353 } 1354 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) { 1355 crypto_unregister_ahash(&hmac->derived.alg); 1356 list_del(&hmac->derived.entry); 1357 kfree(hmac); 1358 } 1359 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) { 1360 crypto_unregister_ahash(&alg->alg); 1361 list_del(&alg->entry); 1362 kfree(alg); 1363 } 1364 } 1365 1366 static int n2_cipher_cra_init(struct crypto_tfm *tfm) 1367 { 1368 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context); 1369 return 0; 1370 } 1371 1372 static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl) 1373 { 1374 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1375 struct crypto_alg *alg; 1376 int err; 1377 1378 if (!p) 1379 return -ENOMEM; 1380 1381 alg = &p->alg; 1382 1383 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 1384 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name); 1385 alg->cra_priority = N2_CRA_PRIORITY; 1386 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | 1387 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC; 1388 alg->cra_blocksize = tmpl->block_size; 1389 p->enc_type = tmpl->enc_type; 1390 alg->cra_ctxsize = sizeof(struct n2_cipher_context); 1391 alg->cra_type = &crypto_ablkcipher_type; 1392 alg->cra_u.ablkcipher = tmpl->ablkcipher; 1393 alg->cra_init = n2_cipher_cra_init; 1394 alg->cra_module = THIS_MODULE; 1395 1396 list_add(&p->entry, &cipher_algs); 1397 err = crypto_register_alg(alg); 1398 if (err) { 1399 pr_err("%s alg registration failed\n", alg->cra_name); 1400 list_del(&p->entry); 1401 kfree(p); 1402 } else { 1403 pr_info("%s alg registered\n", alg->cra_name); 1404 } 1405 return err; 1406 } 1407 1408 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash) 1409 { 1410 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1411 struct ahash_alg *ahash; 1412 struct crypto_alg *base; 1413 int err; 1414 1415 if (!p) 1416 return -ENOMEM; 1417 1418 p->child_alg = n2ahash->alg.halg.base.cra_name; 1419 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg)); 1420 INIT_LIST_HEAD(&p->derived.entry); 1421 1422 ahash = &p->derived.alg; 1423 ahash->digest = n2_hmac_async_digest; 1424 ahash->setkey = n2_hmac_async_setkey; 1425 1426 base = &ahash->halg.base; 1427 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg); 1428 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg); 1429 1430 base->cra_ctxsize = sizeof(struct n2_hmac_ctx); 1431 base->cra_init = n2_hmac_cra_init; 1432 base->cra_exit = n2_hmac_cra_exit; 1433 1434 list_add(&p->derived.entry, &hmac_algs); 1435 err = crypto_register_ahash(ahash); 1436 if (err) { 1437 pr_err("%s alg registration failed\n", base->cra_name); 1438 list_del(&p->derived.entry); 1439 kfree(p); 1440 } else { 1441 pr_info("%s alg registered\n", base->cra_name); 1442 } 1443 return err; 1444 } 1445 1446 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl) 1447 { 1448 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL); 1449 struct hash_alg_common *halg; 1450 struct crypto_alg *base; 1451 struct ahash_alg *ahash; 1452 int err; 1453 1454 if (!p) 1455 return -ENOMEM; 1456 1457 p->hash_zero = tmpl->hash_zero; 1458 p->hash_init = tmpl->hash_init; 1459 p->auth_type = tmpl->auth_type; 1460 p->hmac_type = tmpl->hmac_type; 1461 p->hw_op_hashsz = tmpl->hw_op_hashsz; 1462 p->digest_size = tmpl->digest_size; 1463 1464 ahash = &p->alg; 1465 ahash->init = n2_hash_async_init; 1466 ahash->update = n2_hash_async_update; 1467 ahash->final = n2_hash_async_final; 1468 ahash->finup = n2_hash_async_finup; 1469 ahash->digest = n2_hash_async_digest; 1470 1471 halg = &ahash->halg; 1472 halg->digestsize = tmpl->digest_size; 1473 1474 base = &halg->base; 1475 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name); 1476 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name); 1477 base->cra_priority = N2_CRA_PRIORITY; 1478 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | 1479 CRYPTO_ALG_KERN_DRIVER_ONLY | 1480 CRYPTO_ALG_NEED_FALLBACK; 1481 base->cra_blocksize = tmpl->block_size; 1482 base->cra_ctxsize = sizeof(struct n2_hash_ctx); 1483 base->cra_module = THIS_MODULE; 1484 base->cra_init = n2_hash_cra_init; 1485 base->cra_exit = n2_hash_cra_exit; 1486 1487 list_add(&p->entry, &ahash_algs); 1488 err = crypto_register_ahash(ahash); 1489 if (err) { 1490 pr_err("%s alg registration failed\n", base->cra_name); 1491 list_del(&p->entry); 1492 kfree(p); 1493 } else { 1494 pr_info("%s alg registered\n", base->cra_name); 1495 } 1496 if (!err && p->hmac_type != AUTH_TYPE_RESERVED) 1497 err = __n2_register_one_hmac(p); 1498 return err; 1499 } 1500 1501 static int n2_register_algs(void) 1502 { 1503 int i, err = 0; 1504 1505 mutex_lock(&spu_lock); 1506 if (algs_registered++) 1507 goto out; 1508 1509 for (i = 0; i < NUM_HASH_TMPLS; i++) { 1510 err = __n2_register_one_ahash(&hash_tmpls[i]); 1511 if (err) { 1512 __n2_unregister_algs(); 1513 goto out; 1514 } 1515 } 1516 for (i = 0; i < NUM_CIPHER_TMPLS; i++) { 1517 err = __n2_register_one_cipher(&cipher_tmpls[i]); 1518 if (err) { 1519 __n2_unregister_algs(); 1520 goto out; 1521 } 1522 } 1523 1524 out: 1525 mutex_unlock(&spu_lock); 1526 return err; 1527 } 1528 1529 static void n2_unregister_algs(void) 1530 { 1531 mutex_lock(&spu_lock); 1532 if (!--algs_registered) 1533 __n2_unregister_algs(); 1534 mutex_unlock(&spu_lock); 1535 } 1536 1537 /* To map CWQ queues to interrupt sources, the hypervisor API provides 1538 * a devino. This isn't very useful to us because all of the 1539 * interrupts listed in the device_node have been translated to 1540 * Linux virtual IRQ cookie numbers. 1541 * 1542 * So we have to back-translate, going through the 'intr' and 'ino' 1543 * property tables of the n2cp MDESC node, matching it with the OF 1544 * 'interrupts' property entries, in order to to figure out which 1545 * devino goes to which already-translated IRQ. 1546 */ 1547 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip, 1548 unsigned long dev_ino) 1549 { 1550 const unsigned int *dev_intrs; 1551 unsigned int intr; 1552 int i; 1553 1554 for (i = 0; i < ip->num_intrs; i++) { 1555 if (ip->ino_table[i].ino == dev_ino) 1556 break; 1557 } 1558 if (i == ip->num_intrs) 1559 return -ENODEV; 1560 1561 intr = ip->ino_table[i].intr; 1562 1563 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL); 1564 if (!dev_intrs) 1565 return -ENODEV; 1566 1567 for (i = 0; i < dev->archdata.num_irqs; i++) { 1568 if (dev_intrs[i] == intr) 1569 return i; 1570 } 1571 1572 return -ENODEV; 1573 } 1574 1575 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip, 1576 const char *irq_name, struct spu_queue *p, 1577 irq_handler_t handler) 1578 { 1579 unsigned long herr; 1580 int index; 1581 1582 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino); 1583 if (herr) 1584 return -EINVAL; 1585 1586 index = find_devino_index(dev, ip, p->devino); 1587 if (index < 0) 1588 return index; 1589 1590 p->irq = dev->archdata.irqs[index]; 1591 1592 sprintf(p->irq_name, "%s-%d", irq_name, index); 1593 1594 return request_irq(p->irq, handler, 0, p->irq_name, p); 1595 } 1596 1597 static struct kmem_cache *queue_cache[2]; 1598 1599 static void *new_queue(unsigned long q_type) 1600 { 1601 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL); 1602 } 1603 1604 static void free_queue(void *p, unsigned long q_type) 1605 { 1606 kmem_cache_free(queue_cache[q_type - 1], p); 1607 } 1608 1609 static int queue_cache_init(void) 1610 { 1611 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1612 queue_cache[HV_NCS_QTYPE_MAU - 1] = 1613 kmem_cache_create("mau_queue", 1614 (MAU_NUM_ENTRIES * 1615 MAU_ENTRY_SIZE), 1616 MAU_ENTRY_SIZE, 0, NULL); 1617 if (!queue_cache[HV_NCS_QTYPE_MAU - 1]) 1618 return -ENOMEM; 1619 1620 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) 1621 queue_cache[HV_NCS_QTYPE_CWQ - 1] = 1622 kmem_cache_create("cwq_queue", 1623 (CWQ_NUM_ENTRIES * 1624 CWQ_ENTRY_SIZE), 1625 CWQ_ENTRY_SIZE, 0, NULL); 1626 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) { 1627 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1628 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; 1629 return -ENOMEM; 1630 } 1631 return 0; 1632 } 1633 1634 static void queue_cache_destroy(void) 1635 { 1636 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]); 1637 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]); 1638 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL; 1639 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL; 1640 } 1641 1642 static long spu_queue_register_workfn(void *arg) 1643 { 1644 struct spu_qreg *qr = arg; 1645 struct spu_queue *p = qr->queue; 1646 unsigned long q_type = qr->type; 1647 unsigned long hv_ret; 1648 1649 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), 1650 CWQ_NUM_ENTRIES, &p->qhandle); 1651 if (!hv_ret) 1652 sun4v_ncs_sethead_marker(p->qhandle, 0); 1653 1654 return hv_ret ? -EINVAL : 0; 1655 } 1656 1657 static int spu_queue_register(struct spu_queue *p, unsigned long q_type) 1658 { 1659 int cpu = cpumask_any_and(&p->sharing, cpu_online_mask); 1660 struct spu_qreg qr = { .queue = p, .type = q_type }; 1661 1662 return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr); 1663 } 1664 1665 static int spu_queue_setup(struct spu_queue *p) 1666 { 1667 int err; 1668 1669 p->q = new_queue(p->q_type); 1670 if (!p->q) 1671 return -ENOMEM; 1672 1673 err = spu_queue_register(p, p->q_type); 1674 if (err) { 1675 free_queue(p->q, p->q_type); 1676 p->q = NULL; 1677 } 1678 1679 return err; 1680 } 1681 1682 static void spu_queue_destroy(struct spu_queue *p) 1683 { 1684 unsigned long hv_ret; 1685 1686 if (!p->q) 1687 return; 1688 1689 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle); 1690 1691 if (!hv_ret) 1692 free_queue(p->q, p->q_type); 1693 } 1694 1695 static void spu_list_destroy(struct list_head *list) 1696 { 1697 struct spu_queue *p, *n; 1698 1699 list_for_each_entry_safe(p, n, list, list) { 1700 int i; 1701 1702 for (i = 0; i < NR_CPUS; i++) { 1703 if (cpu_to_cwq[i] == p) 1704 cpu_to_cwq[i] = NULL; 1705 } 1706 1707 if (p->irq) { 1708 free_irq(p->irq, p); 1709 p->irq = 0; 1710 } 1711 spu_queue_destroy(p); 1712 list_del(&p->list); 1713 kfree(p); 1714 } 1715 } 1716 1717 /* Walk the backward arcs of a CWQ 'exec-unit' node, 1718 * gathering cpu membership information. 1719 */ 1720 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc, 1721 struct platform_device *dev, 1722 u64 node, struct spu_queue *p, 1723 struct spu_queue **table) 1724 { 1725 u64 arc; 1726 1727 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) { 1728 u64 tgt = mdesc_arc_target(mdesc, arc); 1729 const char *name = mdesc_node_name(mdesc, tgt); 1730 const u64 *id; 1731 1732 if (strcmp(name, "cpu")) 1733 continue; 1734 id = mdesc_get_property(mdesc, tgt, "id", NULL); 1735 if (table[*id] != NULL) { 1736 dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n", 1737 dev->dev.of_node); 1738 return -EINVAL; 1739 } 1740 cpumask_set_cpu(*id, &p->sharing); 1741 table[*id] = p; 1742 } 1743 return 0; 1744 } 1745 1746 /* Process an 'exec-unit' MDESC node of type 'cwq'. */ 1747 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list, 1748 struct platform_device *dev, struct mdesc_handle *mdesc, 1749 u64 node, const char *iname, unsigned long q_type, 1750 irq_handler_t handler, struct spu_queue **table) 1751 { 1752 struct spu_queue *p; 1753 int err; 1754 1755 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL); 1756 if (!p) { 1757 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n", 1758 dev->dev.of_node); 1759 return -ENOMEM; 1760 } 1761 1762 cpumask_clear(&p->sharing); 1763 spin_lock_init(&p->lock); 1764 p->q_type = q_type; 1765 INIT_LIST_HEAD(&p->jobs); 1766 list_add(&p->list, list); 1767 1768 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table); 1769 if (err) 1770 return err; 1771 1772 err = spu_queue_setup(p); 1773 if (err) 1774 return err; 1775 1776 return spu_map_ino(dev, ip, iname, p, handler); 1777 } 1778 1779 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev, 1780 struct spu_mdesc_info *ip, struct list_head *list, 1781 const char *exec_name, unsigned long q_type, 1782 irq_handler_t handler, struct spu_queue **table) 1783 { 1784 int err = 0; 1785 u64 node; 1786 1787 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") { 1788 const char *type; 1789 1790 type = mdesc_get_property(mdesc, node, "type", NULL); 1791 if (!type || strcmp(type, exec_name)) 1792 continue; 1793 1794 err = handle_exec_unit(ip, list, dev, mdesc, node, 1795 exec_name, q_type, handler, table); 1796 if (err) { 1797 spu_list_destroy(list); 1798 break; 1799 } 1800 } 1801 1802 return err; 1803 } 1804 1805 static int get_irq_props(struct mdesc_handle *mdesc, u64 node, 1806 struct spu_mdesc_info *ip) 1807 { 1808 const u64 *ino; 1809 int ino_len; 1810 int i; 1811 1812 ino = mdesc_get_property(mdesc, node, "ino", &ino_len); 1813 if (!ino) { 1814 printk("NO 'ino'\n"); 1815 return -ENODEV; 1816 } 1817 1818 ip->num_intrs = ino_len / sizeof(u64); 1819 ip->ino_table = kzalloc((sizeof(struct ino_blob) * 1820 ip->num_intrs), 1821 GFP_KERNEL); 1822 if (!ip->ino_table) 1823 return -ENOMEM; 1824 1825 for (i = 0; i < ip->num_intrs; i++) { 1826 struct ino_blob *b = &ip->ino_table[i]; 1827 b->intr = i + 1; 1828 b->ino = ino[i]; 1829 } 1830 1831 return 0; 1832 } 1833 1834 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc, 1835 struct platform_device *dev, 1836 struct spu_mdesc_info *ip, 1837 const char *node_name) 1838 { 1839 const unsigned int *reg; 1840 u64 node; 1841 1842 reg = of_get_property(dev->dev.of_node, "reg", NULL); 1843 if (!reg) 1844 return -ENODEV; 1845 1846 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") { 1847 const char *name; 1848 const u64 *chdl; 1849 1850 name = mdesc_get_property(mdesc, node, "name", NULL); 1851 if (!name || strcmp(name, node_name)) 1852 continue; 1853 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL); 1854 if (!chdl || (*chdl != *reg)) 1855 continue; 1856 ip->cfg_handle = *chdl; 1857 return get_irq_props(mdesc, node, ip); 1858 } 1859 1860 return -ENODEV; 1861 } 1862 1863 static unsigned long n2_spu_hvapi_major; 1864 static unsigned long n2_spu_hvapi_minor; 1865 1866 static int n2_spu_hvapi_register(void) 1867 { 1868 int err; 1869 1870 n2_spu_hvapi_major = 2; 1871 n2_spu_hvapi_minor = 0; 1872 1873 err = sun4v_hvapi_register(HV_GRP_NCS, 1874 n2_spu_hvapi_major, 1875 &n2_spu_hvapi_minor); 1876 1877 if (!err) 1878 pr_info("Registered NCS HVAPI version %lu.%lu\n", 1879 n2_spu_hvapi_major, 1880 n2_spu_hvapi_minor); 1881 1882 return err; 1883 } 1884 1885 static void n2_spu_hvapi_unregister(void) 1886 { 1887 sun4v_hvapi_unregister(HV_GRP_NCS); 1888 } 1889 1890 static int global_ref; 1891 1892 static int grab_global_resources(void) 1893 { 1894 int err = 0; 1895 1896 mutex_lock(&spu_lock); 1897 1898 if (global_ref++) 1899 goto out; 1900 1901 err = n2_spu_hvapi_register(); 1902 if (err) 1903 goto out; 1904 1905 err = queue_cache_init(); 1906 if (err) 1907 goto out_hvapi_release; 1908 1909 err = -ENOMEM; 1910 cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, 1911 GFP_KERNEL); 1912 if (!cpu_to_cwq) 1913 goto out_queue_cache_destroy; 1914 1915 cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS, 1916 GFP_KERNEL); 1917 if (!cpu_to_mau) 1918 goto out_free_cwq_table; 1919 1920 err = 0; 1921 1922 out: 1923 if (err) 1924 global_ref--; 1925 mutex_unlock(&spu_lock); 1926 return err; 1927 1928 out_free_cwq_table: 1929 kfree(cpu_to_cwq); 1930 cpu_to_cwq = NULL; 1931 1932 out_queue_cache_destroy: 1933 queue_cache_destroy(); 1934 1935 out_hvapi_release: 1936 n2_spu_hvapi_unregister(); 1937 goto out; 1938 } 1939 1940 static void release_global_resources(void) 1941 { 1942 mutex_lock(&spu_lock); 1943 if (!--global_ref) { 1944 kfree(cpu_to_cwq); 1945 cpu_to_cwq = NULL; 1946 1947 kfree(cpu_to_mau); 1948 cpu_to_mau = NULL; 1949 1950 queue_cache_destroy(); 1951 n2_spu_hvapi_unregister(); 1952 } 1953 mutex_unlock(&spu_lock); 1954 } 1955 1956 static struct n2_crypto *alloc_n2cp(void) 1957 { 1958 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL); 1959 1960 if (np) 1961 INIT_LIST_HEAD(&np->cwq_list); 1962 1963 return np; 1964 } 1965 1966 static void free_n2cp(struct n2_crypto *np) 1967 { 1968 kfree(np->cwq_info.ino_table); 1969 np->cwq_info.ino_table = NULL; 1970 1971 kfree(np); 1972 } 1973 1974 static void n2_spu_driver_version(void) 1975 { 1976 static int n2_spu_version_printed; 1977 1978 if (n2_spu_version_printed++ == 0) 1979 pr_info("%s", version); 1980 } 1981 1982 static int n2_crypto_probe(struct platform_device *dev) 1983 { 1984 struct mdesc_handle *mdesc; 1985 struct n2_crypto *np; 1986 int err; 1987 1988 n2_spu_driver_version(); 1989 1990 pr_info("Found N2CP at %pOF\n", dev->dev.of_node); 1991 1992 np = alloc_n2cp(); 1993 if (!np) { 1994 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n", 1995 dev->dev.of_node); 1996 return -ENOMEM; 1997 } 1998 1999 err = grab_global_resources(); 2000 if (err) { 2001 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", 2002 dev->dev.of_node); 2003 goto out_free_n2cp; 2004 } 2005 2006 mdesc = mdesc_grab(); 2007 2008 if (!mdesc) { 2009 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", 2010 dev->dev.of_node); 2011 err = -ENODEV; 2012 goto out_free_global; 2013 } 2014 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp"); 2015 if (err) { 2016 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", 2017 dev->dev.of_node); 2018 mdesc_release(mdesc); 2019 goto out_free_global; 2020 } 2021 2022 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list, 2023 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr, 2024 cpu_to_cwq); 2025 mdesc_release(mdesc); 2026 2027 if (err) { 2028 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n", 2029 dev->dev.of_node); 2030 goto out_free_global; 2031 } 2032 2033 err = n2_register_algs(); 2034 if (err) { 2035 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n", 2036 dev->dev.of_node); 2037 goto out_free_spu_list; 2038 } 2039 2040 dev_set_drvdata(&dev->dev, np); 2041 2042 return 0; 2043 2044 out_free_spu_list: 2045 spu_list_destroy(&np->cwq_list); 2046 2047 out_free_global: 2048 release_global_resources(); 2049 2050 out_free_n2cp: 2051 free_n2cp(np); 2052 2053 return err; 2054 } 2055 2056 static int n2_crypto_remove(struct platform_device *dev) 2057 { 2058 struct n2_crypto *np = dev_get_drvdata(&dev->dev); 2059 2060 n2_unregister_algs(); 2061 2062 spu_list_destroy(&np->cwq_list); 2063 2064 release_global_resources(); 2065 2066 free_n2cp(np); 2067 2068 return 0; 2069 } 2070 2071 static struct n2_mau *alloc_ncp(void) 2072 { 2073 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL); 2074 2075 if (mp) 2076 INIT_LIST_HEAD(&mp->mau_list); 2077 2078 return mp; 2079 } 2080 2081 static void free_ncp(struct n2_mau *mp) 2082 { 2083 kfree(mp->mau_info.ino_table); 2084 mp->mau_info.ino_table = NULL; 2085 2086 kfree(mp); 2087 } 2088 2089 static int n2_mau_probe(struct platform_device *dev) 2090 { 2091 struct mdesc_handle *mdesc; 2092 struct n2_mau *mp; 2093 int err; 2094 2095 n2_spu_driver_version(); 2096 2097 pr_info("Found NCP at %pOF\n", dev->dev.of_node); 2098 2099 mp = alloc_ncp(); 2100 if (!mp) { 2101 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n", 2102 dev->dev.of_node); 2103 return -ENOMEM; 2104 } 2105 2106 err = grab_global_resources(); 2107 if (err) { 2108 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n", 2109 dev->dev.of_node); 2110 goto out_free_ncp; 2111 } 2112 2113 mdesc = mdesc_grab(); 2114 2115 if (!mdesc) { 2116 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n", 2117 dev->dev.of_node); 2118 err = -ENODEV; 2119 goto out_free_global; 2120 } 2121 2122 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp"); 2123 if (err) { 2124 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n", 2125 dev->dev.of_node); 2126 mdesc_release(mdesc); 2127 goto out_free_global; 2128 } 2129 2130 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list, 2131 "mau", HV_NCS_QTYPE_MAU, mau_intr, 2132 cpu_to_mau); 2133 mdesc_release(mdesc); 2134 2135 if (err) { 2136 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n", 2137 dev->dev.of_node); 2138 goto out_free_global; 2139 } 2140 2141 dev_set_drvdata(&dev->dev, mp); 2142 2143 return 0; 2144 2145 out_free_global: 2146 release_global_resources(); 2147 2148 out_free_ncp: 2149 free_ncp(mp); 2150 2151 return err; 2152 } 2153 2154 static int n2_mau_remove(struct platform_device *dev) 2155 { 2156 struct n2_mau *mp = dev_get_drvdata(&dev->dev); 2157 2158 spu_list_destroy(&mp->mau_list); 2159 2160 release_global_resources(); 2161 2162 free_ncp(mp); 2163 2164 return 0; 2165 } 2166 2167 static const struct of_device_id n2_crypto_match[] = { 2168 { 2169 .name = "n2cp", 2170 .compatible = "SUNW,n2-cwq", 2171 }, 2172 { 2173 .name = "n2cp", 2174 .compatible = "SUNW,vf-cwq", 2175 }, 2176 { 2177 .name = "n2cp", 2178 .compatible = "SUNW,kt-cwq", 2179 }, 2180 {}, 2181 }; 2182 2183 MODULE_DEVICE_TABLE(of, n2_crypto_match); 2184 2185 static struct platform_driver n2_crypto_driver = { 2186 .driver = { 2187 .name = "n2cp", 2188 .of_match_table = n2_crypto_match, 2189 }, 2190 .probe = n2_crypto_probe, 2191 .remove = n2_crypto_remove, 2192 }; 2193 2194 static const struct of_device_id n2_mau_match[] = { 2195 { 2196 .name = "ncp", 2197 .compatible = "SUNW,n2-mau", 2198 }, 2199 { 2200 .name = "ncp", 2201 .compatible = "SUNW,vf-mau", 2202 }, 2203 { 2204 .name = "ncp", 2205 .compatible = "SUNW,kt-mau", 2206 }, 2207 {}, 2208 }; 2209 2210 MODULE_DEVICE_TABLE(of, n2_mau_match); 2211 2212 static struct platform_driver n2_mau_driver = { 2213 .driver = { 2214 .name = "ncp", 2215 .of_match_table = n2_mau_match, 2216 }, 2217 .probe = n2_mau_probe, 2218 .remove = n2_mau_remove, 2219 }; 2220 2221 static struct platform_driver * const drivers[] = { 2222 &n2_crypto_driver, 2223 &n2_mau_driver, 2224 }; 2225 2226 static int __init n2_init(void) 2227 { 2228 return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); 2229 } 2230 2231 static void __exit n2_exit(void) 2232 { 2233 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); 2234 } 2235 2236 module_init(n2_init); 2237 module_exit(n2_exit); 2238