1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * zcrypt 2.1.0 4 * 5 * Copyright IBM Corp. 2001, 2012 6 * Author(s): Robert Burroughs 7 * Eric Rossman (edrossma@us.ibm.com) 8 * Cornelia Huck <cornelia.huck@de.ibm.com> 9 * 10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 12 * Ralph Wuerthner <rwuerthn@de.ibm.com> 13 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 14 */ 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/miscdevice.h> 20 #include <linux/fs.h> 21 #include <linux/compat.h> 22 #include <linux/slab.h> 23 #include <linux/atomic.h> 24 #include <linux/uaccess.h> 25 #include <linux/hw_random.h> 26 #include <linux/debugfs.h> 27 #include <asm/debug.h> 28 29 #define CREATE_TRACE_POINTS 30 #include <asm/trace/zcrypt.h> 31 32 #include "zcrypt_api.h" 33 #include "zcrypt_debug.h" 34 35 #include "zcrypt_msgtype6.h" 36 #include "zcrypt_msgtype50.h" 37 38 /* 39 * Module description. 40 */ 41 MODULE_AUTHOR("IBM Corporation"); 42 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 43 "Copyright IBM Corp. 2001, 2012"); 44 MODULE_LICENSE("GPL"); 45 46 /* 47 * zcrypt tracepoint functions 48 */ 49 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 50 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 51 52 static int zcrypt_hwrng_seed = 1; 53 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440); 54 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 55 56 DEFINE_SPINLOCK(zcrypt_list_lock); 57 LIST_HEAD(zcrypt_card_list); 58 int zcrypt_device_count; 59 60 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 61 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 62 63 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 64 EXPORT_SYMBOL(zcrypt_rescan_req); 65 66 static LIST_HEAD(zcrypt_ops_list); 67 68 /* Zcrypt related debug feature stuff. */ 69 debug_info_t *zcrypt_dbf_info; 70 71 /** 72 * Process a rescan of the transport layer. 73 * 74 * Returns 1, if the rescan has been processed, otherwise 0. 75 */ 76 static inline int zcrypt_process_rescan(void) 77 { 78 if (atomic_read(&zcrypt_rescan_req)) { 79 atomic_set(&zcrypt_rescan_req, 0); 80 atomic_inc(&zcrypt_rescan_count); 81 ap_bus_force_rescan(); 82 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n", 83 atomic_inc_return(&zcrypt_rescan_count)); 84 return 1; 85 } 86 return 0; 87 } 88 89 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 90 { 91 list_add_tail(&zops->list, &zcrypt_ops_list); 92 } 93 94 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 95 { 96 list_del_init(&zops->list); 97 } 98 99 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 100 { 101 struct zcrypt_ops *zops; 102 103 list_for_each_entry(zops, &zcrypt_ops_list, list) 104 if ((zops->variant == variant) && 105 (!strncmp(zops->name, name, sizeof(zops->name)))) 106 return zops; 107 return NULL; 108 } 109 EXPORT_SYMBOL(zcrypt_msgtype); 110 111 /** 112 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 113 * 114 * This function is not supported beyond zcrypt 1.3.1. 115 */ 116 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 117 size_t count, loff_t *f_pos) 118 { 119 return -EPERM; 120 } 121 122 /** 123 * zcrypt_write(): Not allowed. 124 * 125 * Write is is not allowed 126 */ 127 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 128 size_t count, loff_t *f_pos) 129 { 130 return -EPERM; 131 } 132 133 /** 134 * zcrypt_open(): Count number of users. 135 * 136 * Device open function to count number of users. 137 */ 138 static int zcrypt_open(struct inode *inode, struct file *filp) 139 { 140 atomic_inc(&zcrypt_open_count); 141 return nonseekable_open(inode, filp); 142 } 143 144 /** 145 * zcrypt_release(): Count number of users. 146 * 147 * Device close function to count number of users. 148 */ 149 static int zcrypt_release(struct inode *inode, struct file *filp) 150 { 151 atomic_dec(&zcrypt_open_count); 152 return 0; 153 } 154 155 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 156 struct zcrypt_queue *zq, 157 unsigned int weight) 158 { 159 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 160 return NULL; 161 zcrypt_queue_get(zq); 162 get_device(&zq->queue->ap_dev.device); 163 atomic_add(weight, &zc->load); 164 atomic_add(weight, &zq->load); 165 zq->request_count++; 166 return zq; 167 } 168 169 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 170 struct zcrypt_queue *zq, 171 unsigned int weight) 172 { 173 struct module *mod = zq->queue->ap_dev.drv->driver.owner; 174 175 zq->request_count--; 176 atomic_sub(weight, &zc->load); 177 atomic_sub(weight, &zq->load); 178 put_device(&zq->queue->ap_dev.device); 179 zcrypt_queue_put(zq); 180 module_put(mod); 181 } 182 183 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 184 struct zcrypt_card *pref_zc, 185 unsigned int weight, 186 unsigned int pref_weight) 187 { 188 if (!pref_zc) 189 return false; 190 weight += atomic_read(&zc->load); 191 pref_weight += atomic_read(&pref_zc->load); 192 if (weight == pref_weight) 193 return atomic_read(&zc->card->total_request_count) > 194 atomic_read(&pref_zc->card->total_request_count); 195 return weight > pref_weight; 196 } 197 198 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 199 struct zcrypt_queue *pref_zq, 200 unsigned int weight, 201 unsigned int pref_weight) 202 { 203 if (!pref_zq) 204 return false; 205 weight += atomic_read(&zq->load); 206 pref_weight += atomic_read(&pref_zq->load); 207 if (weight == pref_weight) 208 return zq->queue->total_request_count > 209 pref_zq->queue->total_request_count; 210 return weight > pref_weight; 211 } 212 213 /* 214 * zcrypt ioctls. 215 */ 216 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 217 { 218 struct zcrypt_card *zc, *pref_zc; 219 struct zcrypt_queue *zq, *pref_zq; 220 unsigned int weight, pref_weight; 221 unsigned int func_code; 222 int qid = 0, rc = -ENODEV; 223 224 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 225 226 if (mex->outputdatalength < mex->inputdatalength) { 227 rc = -EINVAL; 228 goto out; 229 } 230 231 /* 232 * As long as outputdatalength is big enough, we can set the 233 * outputdatalength equal to the inputdatalength, since that is the 234 * number of bytes we will copy in any case 235 */ 236 mex->outputdatalength = mex->inputdatalength; 237 238 rc = get_rsa_modex_fc(mex, &func_code); 239 if (rc) 240 goto out; 241 242 pref_zc = NULL; 243 pref_zq = NULL; 244 spin_lock(&zcrypt_list_lock); 245 for_each_zcrypt_card(zc) { 246 /* Check for online accelarator and CCA cards */ 247 if (!zc->online || !(zc->card->functions & 0x18000000)) 248 continue; 249 /* Check for size limits */ 250 if (zc->min_mod_size > mex->inputdatalength || 251 zc->max_mod_size < mex->inputdatalength) 252 continue; 253 /* get weight index of the card device */ 254 weight = zc->speed_rating[func_code]; 255 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 256 continue; 257 for_each_zcrypt_queue(zq, zc) { 258 /* check if device is online and eligible */ 259 if (!zq->online || !zq->ops->rsa_modexpo) 260 continue; 261 if (zcrypt_queue_compare(zq, pref_zq, 262 weight, pref_weight)) 263 continue; 264 pref_zc = zc; 265 pref_zq = zq; 266 pref_weight = weight; 267 } 268 } 269 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 270 spin_unlock(&zcrypt_list_lock); 271 272 if (!pref_zq) { 273 rc = -ENODEV; 274 goto out; 275 } 276 277 qid = pref_zq->queue->qid; 278 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 279 280 spin_lock(&zcrypt_list_lock); 281 zcrypt_drop_queue(pref_zc, pref_zq, weight); 282 spin_unlock(&zcrypt_list_lock); 283 284 out: 285 trace_s390_zcrypt_rep(mex, func_code, rc, 286 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 287 return rc; 288 } 289 290 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 291 { 292 struct zcrypt_card *zc, *pref_zc; 293 struct zcrypt_queue *zq, *pref_zq; 294 unsigned int weight, pref_weight; 295 unsigned int func_code; 296 int qid = 0, rc = -ENODEV; 297 298 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 299 300 if (crt->outputdatalength < crt->inputdatalength) { 301 rc = -EINVAL; 302 goto out; 303 } 304 305 /* 306 * As long as outputdatalength is big enough, we can set the 307 * outputdatalength equal to the inputdatalength, since that is the 308 * number of bytes we will copy in any case 309 */ 310 crt->outputdatalength = crt->inputdatalength; 311 312 rc = get_rsa_crt_fc(crt, &func_code); 313 if (rc) 314 goto out; 315 316 pref_zc = NULL; 317 pref_zq = NULL; 318 spin_lock(&zcrypt_list_lock); 319 for_each_zcrypt_card(zc) { 320 /* Check for online accelarator and CCA cards */ 321 if (!zc->online || !(zc->card->functions & 0x18000000)) 322 continue; 323 /* Check for size limits */ 324 if (zc->min_mod_size > crt->inputdatalength || 325 zc->max_mod_size < crt->inputdatalength) 326 continue; 327 /* get weight index of the card device */ 328 weight = zc->speed_rating[func_code]; 329 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 330 continue; 331 for_each_zcrypt_queue(zq, zc) { 332 /* check if device is online and eligible */ 333 if (!zq->online || !zq->ops->rsa_modexpo_crt) 334 continue; 335 if (zcrypt_queue_compare(zq, pref_zq, 336 weight, pref_weight)) 337 continue; 338 pref_zc = zc; 339 pref_zq = zq; 340 pref_weight = weight; 341 } 342 } 343 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 344 spin_unlock(&zcrypt_list_lock); 345 346 if (!pref_zq) { 347 rc = -ENODEV; 348 goto out; 349 } 350 351 qid = pref_zq->queue->qid; 352 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 353 354 spin_lock(&zcrypt_list_lock); 355 zcrypt_drop_queue(pref_zc, pref_zq, weight); 356 spin_unlock(&zcrypt_list_lock); 357 358 out: 359 trace_s390_zcrypt_rep(crt, func_code, rc, 360 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 361 return rc; 362 } 363 364 long zcrypt_send_cprb(struct ica_xcRB *xcRB) 365 { 366 struct zcrypt_card *zc, *pref_zc; 367 struct zcrypt_queue *zq, *pref_zq; 368 struct ap_message ap_msg; 369 unsigned int weight, pref_weight; 370 unsigned int func_code; 371 unsigned short *domain; 372 int qid = 0, rc = -ENODEV; 373 374 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 375 376 ap_init_message(&ap_msg); 377 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 378 if (rc) 379 goto out; 380 381 pref_zc = NULL; 382 pref_zq = NULL; 383 spin_lock(&zcrypt_list_lock); 384 for_each_zcrypt_card(zc) { 385 /* Check for online CCA cards */ 386 if (!zc->online || !(zc->card->functions & 0x10000000)) 387 continue; 388 /* Check for user selected CCA card */ 389 if (xcRB->user_defined != AUTOSELECT && 390 xcRB->user_defined != zc->card->id) 391 continue; 392 /* get weight index of the card device */ 393 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 394 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 395 continue; 396 for_each_zcrypt_queue(zq, zc) { 397 /* check if device is online and eligible */ 398 if (!zq->online || 399 !zq->ops->send_cprb || 400 ((*domain != (unsigned short) AUTOSELECT) && 401 (*domain != AP_QID_QUEUE(zq->queue->qid)))) 402 continue; 403 if (zcrypt_queue_compare(zq, pref_zq, 404 weight, pref_weight)) 405 continue; 406 pref_zc = zc; 407 pref_zq = zq; 408 pref_weight = weight; 409 } 410 } 411 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 412 spin_unlock(&zcrypt_list_lock); 413 414 if (!pref_zq) { 415 rc = -ENODEV; 416 goto out; 417 } 418 419 /* in case of auto select, provide the correct domain */ 420 qid = pref_zq->queue->qid; 421 if (*domain == (unsigned short) AUTOSELECT) 422 *domain = AP_QID_QUEUE(qid); 423 424 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 425 426 spin_lock(&zcrypt_list_lock); 427 zcrypt_drop_queue(pref_zc, pref_zq, weight); 428 spin_unlock(&zcrypt_list_lock); 429 430 out: 431 ap_release_message(&ap_msg); 432 trace_s390_zcrypt_rep(xcRB, func_code, rc, 433 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 434 return rc; 435 } 436 EXPORT_SYMBOL(zcrypt_send_cprb); 437 438 static bool is_desired_ep11_card(unsigned int dev_id, 439 unsigned short target_num, 440 struct ep11_target_dev *targets) 441 { 442 while (target_num-- > 0) { 443 if (dev_id == targets->ap_id) 444 return true; 445 targets++; 446 } 447 return false; 448 } 449 450 static bool is_desired_ep11_queue(unsigned int dev_qid, 451 unsigned short target_num, 452 struct ep11_target_dev *targets) 453 { 454 while (target_num-- > 0) { 455 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 456 return true; 457 targets++; 458 } 459 return false; 460 } 461 462 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 463 { 464 struct zcrypt_card *zc, *pref_zc; 465 struct zcrypt_queue *zq, *pref_zq; 466 struct ep11_target_dev *targets; 467 unsigned short target_num; 468 unsigned int weight, pref_weight; 469 unsigned int func_code; 470 struct ap_message ap_msg; 471 int qid = 0, rc = -ENODEV; 472 473 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 474 475 ap_init_message(&ap_msg); 476 477 target_num = (unsigned short) xcrb->targets_num; 478 479 /* empty list indicates autoselect (all available targets) */ 480 targets = NULL; 481 if (target_num != 0) { 482 struct ep11_target_dev __user *uptr; 483 484 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 485 if (!targets) { 486 rc = -ENOMEM; 487 goto out; 488 } 489 490 uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 491 if (copy_from_user(targets, uptr, 492 target_num * sizeof(*targets))) { 493 rc = -EFAULT; 494 goto out_free; 495 } 496 } 497 498 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 499 if (rc) 500 goto out_free; 501 502 pref_zc = NULL; 503 pref_zq = NULL; 504 spin_lock(&zcrypt_list_lock); 505 for_each_zcrypt_card(zc) { 506 /* Check for online EP11 cards */ 507 if (!zc->online || !(zc->card->functions & 0x04000000)) 508 continue; 509 /* Check for user selected EP11 card */ 510 if (targets && 511 !is_desired_ep11_card(zc->card->id, target_num, targets)) 512 continue; 513 /* get weight index of the card device */ 514 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 515 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 516 continue; 517 for_each_zcrypt_queue(zq, zc) { 518 /* check if device is online and eligible */ 519 if (!zq->online || 520 !zq->ops->send_ep11_cprb || 521 (targets && 522 !is_desired_ep11_queue(zq->queue->qid, 523 target_num, targets))) 524 continue; 525 if (zcrypt_queue_compare(zq, pref_zq, 526 weight, pref_weight)) 527 continue; 528 pref_zc = zc; 529 pref_zq = zq; 530 pref_weight = weight; 531 } 532 } 533 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 534 spin_unlock(&zcrypt_list_lock); 535 536 if (!pref_zq) { 537 rc = -ENODEV; 538 goto out_free; 539 } 540 541 qid = pref_zq->queue->qid; 542 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 543 544 spin_lock(&zcrypt_list_lock); 545 zcrypt_drop_queue(pref_zc, pref_zq, weight); 546 spin_unlock(&zcrypt_list_lock); 547 548 out_free: 549 kfree(targets); 550 out: 551 ap_release_message(&ap_msg); 552 trace_s390_zcrypt_rep(xcrb, func_code, rc, 553 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 554 return rc; 555 } 556 557 static long zcrypt_rng(char *buffer) 558 { 559 struct zcrypt_card *zc, *pref_zc; 560 struct zcrypt_queue *zq, *pref_zq; 561 unsigned int weight, pref_weight; 562 unsigned int func_code; 563 struct ap_message ap_msg; 564 unsigned int domain; 565 int qid = 0, rc = -ENODEV; 566 567 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 568 569 ap_init_message(&ap_msg); 570 rc = get_rng_fc(&ap_msg, &func_code, &domain); 571 if (rc) 572 goto out; 573 574 pref_zc = NULL; 575 pref_zq = NULL; 576 spin_lock(&zcrypt_list_lock); 577 for_each_zcrypt_card(zc) { 578 /* Check for online CCA cards */ 579 if (!zc->online || !(zc->card->functions & 0x10000000)) 580 continue; 581 /* get weight index of the card device */ 582 weight = zc->speed_rating[func_code]; 583 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 584 continue; 585 for_each_zcrypt_queue(zq, zc) { 586 /* check if device is online and eligible */ 587 if (!zq->online || !zq->ops->rng) 588 continue; 589 if (zcrypt_queue_compare(zq, pref_zq, 590 weight, pref_weight)) 591 continue; 592 pref_zc = zc; 593 pref_zq = zq; 594 pref_weight = weight; 595 } 596 } 597 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 598 spin_unlock(&zcrypt_list_lock); 599 600 if (!pref_zq) { 601 rc = -ENODEV; 602 goto out; 603 } 604 605 qid = pref_zq->queue->qid; 606 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 607 608 spin_lock(&zcrypt_list_lock); 609 zcrypt_drop_queue(pref_zc, pref_zq, weight); 610 spin_unlock(&zcrypt_list_lock); 611 612 out: 613 ap_release_message(&ap_msg); 614 trace_s390_zcrypt_rep(buffer, func_code, rc, 615 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 616 return rc; 617 } 618 619 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 620 { 621 struct zcrypt_card *zc; 622 struct zcrypt_queue *zq; 623 struct zcrypt_device_status *stat; 624 int card, queue; 625 626 memset(devstatus, 0, MAX_ZDEV_ENTRIES 627 * sizeof(struct zcrypt_device_status)); 628 629 spin_lock(&zcrypt_list_lock); 630 for_each_zcrypt_card(zc) { 631 for_each_zcrypt_queue(zq, zc) { 632 card = AP_QID_CARD(zq->queue->qid); 633 if (card >= MAX_ZDEV_CARDIDS) 634 continue; 635 queue = AP_QID_QUEUE(zq->queue->qid); 636 stat = &devstatus[card * AP_DOMAINS + queue]; 637 stat->hwtype = zc->card->ap_dev.device_type; 638 stat->functions = zc->card->functions >> 26; 639 stat->qid = zq->queue->qid; 640 stat->online = zq->online ? 0x01 : 0x00; 641 } 642 } 643 spin_unlock(&zcrypt_list_lock); 644 } 645 646 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 647 { 648 struct zcrypt_card *zc; 649 struct zcrypt_queue *zq; 650 struct zcrypt_device_status_ext *stat; 651 int card, queue; 652 653 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT 654 * sizeof(struct zcrypt_device_status_ext)); 655 656 spin_lock(&zcrypt_list_lock); 657 for_each_zcrypt_card(zc) { 658 for_each_zcrypt_queue(zq, zc) { 659 card = AP_QID_CARD(zq->queue->qid); 660 queue = AP_QID_QUEUE(zq->queue->qid); 661 stat = &devstatus[card * AP_DOMAINS + queue]; 662 stat->hwtype = zc->card->ap_dev.device_type; 663 stat->functions = zc->card->functions >> 26; 664 stat->qid = zq->queue->qid; 665 stat->online = zq->online ? 0x01 : 0x00; 666 } 667 } 668 spin_unlock(&zcrypt_list_lock); 669 } 670 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 671 672 static void zcrypt_status_mask(char status[], size_t max_adapters) 673 { 674 struct zcrypt_card *zc; 675 struct zcrypt_queue *zq; 676 int card; 677 678 memset(status, 0, max_adapters); 679 spin_lock(&zcrypt_list_lock); 680 for_each_zcrypt_card(zc) { 681 for_each_zcrypt_queue(zq, zc) { 682 card = AP_QID_CARD(zq->queue->qid); 683 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 684 || card >= max_adapters) 685 continue; 686 status[card] = zc->online ? zc->user_space_type : 0x0d; 687 } 688 } 689 spin_unlock(&zcrypt_list_lock); 690 } 691 692 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 693 { 694 struct zcrypt_card *zc; 695 struct zcrypt_queue *zq; 696 int card; 697 698 memset(qdepth, 0, max_adapters); 699 spin_lock(&zcrypt_list_lock); 700 local_bh_disable(); 701 for_each_zcrypt_card(zc) { 702 for_each_zcrypt_queue(zq, zc) { 703 card = AP_QID_CARD(zq->queue->qid); 704 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 705 || card >= max_adapters) 706 continue; 707 spin_lock(&zq->queue->lock); 708 qdepth[card] = 709 zq->queue->pendingq_count + 710 zq->queue->requestq_count; 711 spin_unlock(&zq->queue->lock); 712 } 713 } 714 local_bh_enable(); 715 spin_unlock(&zcrypt_list_lock); 716 } 717 718 static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) 719 { 720 struct zcrypt_card *zc; 721 struct zcrypt_queue *zq; 722 int card; 723 724 memset(reqcnt, 0, sizeof(int) * max_adapters); 725 spin_lock(&zcrypt_list_lock); 726 local_bh_disable(); 727 for_each_zcrypt_card(zc) { 728 for_each_zcrypt_queue(zq, zc) { 729 card = AP_QID_CARD(zq->queue->qid); 730 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 731 || card >= max_adapters) 732 continue; 733 spin_lock(&zq->queue->lock); 734 reqcnt[card] = zq->queue->total_request_count; 735 spin_unlock(&zq->queue->lock); 736 } 737 } 738 local_bh_enable(); 739 spin_unlock(&zcrypt_list_lock); 740 } 741 742 static int zcrypt_pendingq_count(void) 743 { 744 struct zcrypt_card *zc; 745 struct zcrypt_queue *zq; 746 int pendingq_count; 747 748 pendingq_count = 0; 749 spin_lock(&zcrypt_list_lock); 750 local_bh_disable(); 751 for_each_zcrypt_card(zc) { 752 for_each_zcrypt_queue(zq, zc) { 753 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 754 continue; 755 spin_lock(&zq->queue->lock); 756 pendingq_count += zq->queue->pendingq_count; 757 spin_unlock(&zq->queue->lock); 758 } 759 } 760 local_bh_enable(); 761 spin_unlock(&zcrypt_list_lock); 762 return pendingq_count; 763 } 764 765 static int zcrypt_requestq_count(void) 766 { 767 struct zcrypt_card *zc; 768 struct zcrypt_queue *zq; 769 int requestq_count; 770 771 requestq_count = 0; 772 spin_lock(&zcrypt_list_lock); 773 local_bh_disable(); 774 for_each_zcrypt_card(zc) { 775 for_each_zcrypt_queue(zq, zc) { 776 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 777 continue; 778 spin_lock(&zq->queue->lock); 779 requestq_count += zq->queue->requestq_count; 780 spin_unlock(&zq->queue->lock); 781 } 782 } 783 local_bh_enable(); 784 spin_unlock(&zcrypt_list_lock); 785 return requestq_count; 786 } 787 788 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 789 unsigned long arg) 790 { 791 int rc = 0; 792 793 switch (cmd) { 794 case ICARSAMODEXPO: { 795 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 796 struct ica_rsa_modexpo mex; 797 798 if (copy_from_user(&mex, umex, sizeof(mex))) 799 return -EFAULT; 800 do { 801 rc = zcrypt_rsa_modexpo(&mex); 802 } while (rc == -EAGAIN); 803 /* on failure: retry once again after a requested rescan */ 804 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 805 do { 806 rc = zcrypt_rsa_modexpo(&mex); 807 } while (rc == -EAGAIN); 808 if (rc) { 809 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); 810 return rc; 811 } 812 return put_user(mex.outputdatalength, &umex->outputdatalength); 813 } 814 case ICARSACRT: { 815 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 816 struct ica_rsa_modexpo_crt crt; 817 818 if (copy_from_user(&crt, ucrt, sizeof(crt))) 819 return -EFAULT; 820 do { 821 rc = zcrypt_rsa_crt(&crt); 822 } while (rc == -EAGAIN); 823 /* on failure: retry once again after a requested rescan */ 824 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 825 do { 826 rc = zcrypt_rsa_crt(&crt); 827 } while (rc == -EAGAIN); 828 if (rc) { 829 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); 830 return rc; 831 } 832 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 833 } 834 case ZSECSENDCPRB: { 835 struct ica_xcRB __user *uxcRB = (void __user *) arg; 836 struct ica_xcRB xcRB; 837 838 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 839 return -EFAULT; 840 do { 841 rc = zcrypt_send_cprb(&xcRB); 842 } while (rc == -EAGAIN); 843 /* on failure: retry once again after a requested rescan */ 844 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 845 do { 846 rc = zcrypt_send_cprb(&xcRB); 847 } while (rc == -EAGAIN); 848 if (rc) 849 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc); 850 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 851 return -EFAULT; 852 return rc; 853 } 854 case ZSENDEP11CPRB: { 855 struct ep11_urb __user *uxcrb = (void __user *)arg; 856 struct ep11_urb xcrb; 857 858 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 859 return -EFAULT; 860 do { 861 rc = zcrypt_send_ep11_cprb(&xcrb); 862 } while (rc == -EAGAIN); 863 /* on failure: retry once again after a requested rescan */ 864 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 865 do { 866 rc = zcrypt_send_ep11_cprb(&xcrb); 867 } while (rc == -EAGAIN); 868 if (rc) 869 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); 870 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 871 return -EFAULT; 872 return rc; 873 } 874 case ZCRYPT_DEVICE_STATUS: { 875 struct zcrypt_device_status_ext *device_status; 876 size_t total_size = MAX_ZDEV_ENTRIES_EXT 877 * sizeof(struct zcrypt_device_status_ext); 878 879 device_status = kzalloc(total_size, GFP_KERNEL); 880 if (!device_status) 881 return -ENOMEM; 882 zcrypt_device_status_mask_ext(device_status); 883 if (copy_to_user((char __user *) arg, device_status, 884 total_size)) 885 rc = -EFAULT; 886 kfree(device_status); 887 return rc; 888 } 889 case ZCRYPT_STATUS_MASK: { 890 char status[AP_DEVICES]; 891 892 zcrypt_status_mask(status, AP_DEVICES); 893 if (copy_to_user((char __user *) arg, status, sizeof(status))) 894 return -EFAULT; 895 return 0; 896 } 897 case ZCRYPT_QDEPTH_MASK: { 898 char qdepth[AP_DEVICES]; 899 900 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 901 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 902 return -EFAULT; 903 return 0; 904 } 905 case ZCRYPT_PERDEV_REQCNT: { 906 int *reqcnt; 907 908 reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL); 909 if (!reqcnt) 910 return -ENOMEM; 911 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 912 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 913 rc = -EFAULT; 914 kfree(reqcnt); 915 return rc; 916 } 917 case Z90STAT_REQUESTQ_COUNT: 918 return put_user(zcrypt_requestq_count(), (int __user *) arg); 919 case Z90STAT_PENDINGQ_COUNT: 920 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 921 case Z90STAT_TOTALOPEN_COUNT: 922 return put_user(atomic_read(&zcrypt_open_count), 923 (int __user *) arg); 924 case Z90STAT_DOMAIN_INDEX: 925 return put_user(ap_domain_index, (int __user *) arg); 926 /* 927 * Deprecated ioctls 928 */ 929 case ZDEVICESTATUS: { 930 /* the old ioctl supports only 64 adapters */ 931 struct zcrypt_device_status *device_status; 932 size_t total_size = MAX_ZDEV_ENTRIES 933 * sizeof(struct zcrypt_device_status); 934 935 device_status = kzalloc(total_size, GFP_KERNEL); 936 if (!device_status) 937 return -ENOMEM; 938 zcrypt_device_status_mask(device_status); 939 if (copy_to_user((char __user *) arg, device_status, 940 total_size)) 941 rc = -EFAULT; 942 kfree(device_status); 943 return rc; 944 } 945 case Z90STAT_STATUS_MASK: { 946 /* the old ioctl supports only 64 adapters */ 947 char status[MAX_ZDEV_CARDIDS]; 948 949 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 950 if (copy_to_user((char __user *) arg, status, sizeof(status))) 951 return -EFAULT; 952 return 0; 953 } 954 case Z90STAT_QDEPTH_MASK: { 955 /* the old ioctl supports only 64 adapters */ 956 char qdepth[MAX_ZDEV_CARDIDS]; 957 958 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 959 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 960 return -EFAULT; 961 return 0; 962 } 963 case Z90STAT_PERDEV_REQCNT: { 964 /* the old ioctl supports only 64 adapters */ 965 int reqcnt[MAX_ZDEV_CARDIDS]; 966 967 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 968 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 969 return -EFAULT; 970 return 0; 971 } 972 /* unknown ioctl number */ 973 default: 974 ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd); 975 return -ENOIOCTLCMD; 976 } 977 } 978 979 #ifdef CONFIG_COMPAT 980 /* 981 * ioctl32 conversion routines 982 */ 983 struct compat_ica_rsa_modexpo { 984 compat_uptr_t inputdata; 985 unsigned int inputdatalength; 986 compat_uptr_t outputdata; 987 unsigned int outputdatalength; 988 compat_uptr_t b_key; 989 compat_uptr_t n_modulus; 990 }; 991 992 static long trans_modexpo32(struct file *filp, unsigned int cmd, 993 unsigned long arg) 994 { 995 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 996 struct compat_ica_rsa_modexpo mex32; 997 struct ica_rsa_modexpo mex64; 998 long rc; 999 1000 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1001 return -EFAULT; 1002 mex64.inputdata = compat_ptr(mex32.inputdata); 1003 mex64.inputdatalength = mex32.inputdatalength; 1004 mex64.outputdata = compat_ptr(mex32.outputdata); 1005 mex64.outputdatalength = mex32.outputdatalength; 1006 mex64.b_key = compat_ptr(mex32.b_key); 1007 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1008 do { 1009 rc = zcrypt_rsa_modexpo(&mex64); 1010 } while (rc == -EAGAIN); 1011 /* on failure: retry once again after a requested rescan */ 1012 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1013 do { 1014 rc = zcrypt_rsa_modexpo(&mex64); 1015 } while (rc == -EAGAIN); 1016 if (rc) 1017 return rc; 1018 return put_user(mex64.outputdatalength, 1019 &umex32->outputdatalength); 1020 } 1021 1022 struct compat_ica_rsa_modexpo_crt { 1023 compat_uptr_t inputdata; 1024 unsigned int inputdatalength; 1025 compat_uptr_t outputdata; 1026 unsigned int outputdatalength; 1027 compat_uptr_t bp_key; 1028 compat_uptr_t bq_key; 1029 compat_uptr_t np_prime; 1030 compat_uptr_t nq_prime; 1031 compat_uptr_t u_mult_inv; 1032 }; 1033 1034 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 1035 unsigned long arg) 1036 { 1037 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1038 struct compat_ica_rsa_modexpo_crt crt32; 1039 struct ica_rsa_modexpo_crt crt64; 1040 long rc; 1041 1042 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1043 return -EFAULT; 1044 crt64.inputdata = compat_ptr(crt32.inputdata); 1045 crt64.inputdatalength = crt32.inputdatalength; 1046 crt64.outputdata = compat_ptr(crt32.outputdata); 1047 crt64.outputdatalength = crt32.outputdatalength; 1048 crt64.bp_key = compat_ptr(crt32.bp_key); 1049 crt64.bq_key = compat_ptr(crt32.bq_key); 1050 crt64.np_prime = compat_ptr(crt32.np_prime); 1051 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1052 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1053 do { 1054 rc = zcrypt_rsa_crt(&crt64); 1055 } while (rc == -EAGAIN); 1056 /* on failure: retry once again after a requested rescan */ 1057 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1058 do { 1059 rc = zcrypt_rsa_crt(&crt64); 1060 } while (rc == -EAGAIN); 1061 if (rc) 1062 return rc; 1063 return put_user(crt64.outputdatalength, 1064 &ucrt32->outputdatalength); 1065 } 1066 1067 struct compat_ica_xcRB { 1068 unsigned short agent_ID; 1069 unsigned int user_defined; 1070 unsigned short request_ID; 1071 unsigned int request_control_blk_length; 1072 unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1073 compat_uptr_t request_control_blk_addr; 1074 unsigned int request_data_length; 1075 char padding2[16 - sizeof(compat_uptr_t)]; 1076 compat_uptr_t request_data_address; 1077 unsigned int reply_control_blk_length; 1078 char padding3[16 - sizeof(compat_uptr_t)]; 1079 compat_uptr_t reply_control_blk_addr; 1080 unsigned int reply_data_length; 1081 char padding4[16 - sizeof(compat_uptr_t)]; 1082 compat_uptr_t reply_data_addr; 1083 unsigned short priority_window; 1084 unsigned int status; 1085 } __packed; 1086 1087 static long trans_xcRB32(struct file *filp, unsigned int cmd, 1088 unsigned long arg) 1089 { 1090 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1091 struct compat_ica_xcRB xcRB32; 1092 struct ica_xcRB xcRB64; 1093 long rc; 1094 1095 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1096 return -EFAULT; 1097 xcRB64.agent_ID = xcRB32.agent_ID; 1098 xcRB64.user_defined = xcRB32.user_defined; 1099 xcRB64.request_ID = xcRB32.request_ID; 1100 xcRB64.request_control_blk_length = 1101 xcRB32.request_control_blk_length; 1102 xcRB64.request_control_blk_addr = 1103 compat_ptr(xcRB32.request_control_blk_addr); 1104 xcRB64.request_data_length = 1105 xcRB32.request_data_length; 1106 xcRB64.request_data_address = 1107 compat_ptr(xcRB32.request_data_address); 1108 xcRB64.reply_control_blk_length = 1109 xcRB32.reply_control_blk_length; 1110 xcRB64.reply_control_blk_addr = 1111 compat_ptr(xcRB32.reply_control_blk_addr); 1112 xcRB64.reply_data_length = xcRB32.reply_data_length; 1113 xcRB64.reply_data_addr = 1114 compat_ptr(xcRB32.reply_data_addr); 1115 xcRB64.priority_window = xcRB32.priority_window; 1116 xcRB64.status = xcRB32.status; 1117 do { 1118 rc = zcrypt_send_cprb(&xcRB64); 1119 } while (rc == -EAGAIN); 1120 /* on failure: retry once again after a requested rescan */ 1121 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1122 do { 1123 rc = zcrypt_send_cprb(&xcRB64); 1124 } while (rc == -EAGAIN); 1125 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1126 xcRB32.reply_data_length = xcRB64.reply_data_length; 1127 xcRB32.status = xcRB64.status; 1128 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1129 return -EFAULT; 1130 return rc; 1131 } 1132 1133 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1134 unsigned long arg) 1135 { 1136 if (cmd == ICARSAMODEXPO) 1137 return trans_modexpo32(filp, cmd, arg); 1138 if (cmd == ICARSACRT) 1139 return trans_modexpo_crt32(filp, cmd, arg); 1140 if (cmd == ZSECSENDCPRB) 1141 return trans_xcRB32(filp, cmd, arg); 1142 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1143 } 1144 #endif 1145 1146 /* 1147 * Misc device file operations. 1148 */ 1149 static const struct file_operations zcrypt_fops = { 1150 .owner = THIS_MODULE, 1151 .read = zcrypt_read, 1152 .write = zcrypt_write, 1153 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1154 #ifdef CONFIG_COMPAT 1155 .compat_ioctl = zcrypt_compat_ioctl, 1156 #endif 1157 .open = zcrypt_open, 1158 .release = zcrypt_release, 1159 .llseek = no_llseek, 1160 }; 1161 1162 /* 1163 * Misc device. 1164 */ 1165 static struct miscdevice zcrypt_misc_device = { 1166 .minor = MISC_DYNAMIC_MINOR, 1167 .name = "z90crypt", 1168 .fops = &zcrypt_fops, 1169 }; 1170 1171 static int zcrypt_rng_device_count; 1172 static u32 *zcrypt_rng_buffer; 1173 static int zcrypt_rng_buffer_index; 1174 static DEFINE_MUTEX(zcrypt_rng_mutex); 1175 1176 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1177 { 1178 int rc; 1179 1180 /* 1181 * We don't need locking here because the RNG API guarantees serialized 1182 * read method calls. 1183 */ 1184 if (zcrypt_rng_buffer_index == 0) { 1185 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1186 /* on failure: retry once again after a requested rescan */ 1187 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1188 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1189 if (rc < 0) 1190 return -EIO; 1191 zcrypt_rng_buffer_index = rc / sizeof(*data); 1192 } 1193 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1194 return sizeof(*data); 1195 } 1196 1197 static struct hwrng zcrypt_rng_dev = { 1198 .name = "zcrypt", 1199 .data_read = zcrypt_rng_data_read, 1200 .quality = 990, 1201 }; 1202 1203 int zcrypt_rng_device_add(void) 1204 { 1205 int rc = 0; 1206 1207 mutex_lock(&zcrypt_rng_mutex); 1208 if (zcrypt_rng_device_count == 0) { 1209 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1210 if (!zcrypt_rng_buffer) { 1211 rc = -ENOMEM; 1212 goto out; 1213 } 1214 zcrypt_rng_buffer_index = 0; 1215 if (!zcrypt_hwrng_seed) 1216 zcrypt_rng_dev.quality = 0; 1217 rc = hwrng_register(&zcrypt_rng_dev); 1218 if (rc) 1219 goto out_free; 1220 zcrypt_rng_device_count = 1; 1221 } else 1222 zcrypt_rng_device_count++; 1223 mutex_unlock(&zcrypt_rng_mutex); 1224 return 0; 1225 1226 out_free: 1227 free_page((unsigned long) zcrypt_rng_buffer); 1228 out: 1229 mutex_unlock(&zcrypt_rng_mutex); 1230 return rc; 1231 } 1232 1233 void zcrypt_rng_device_remove(void) 1234 { 1235 mutex_lock(&zcrypt_rng_mutex); 1236 zcrypt_rng_device_count--; 1237 if (zcrypt_rng_device_count == 0) { 1238 hwrng_unregister(&zcrypt_rng_dev); 1239 free_page((unsigned long) zcrypt_rng_buffer); 1240 } 1241 mutex_unlock(&zcrypt_rng_mutex); 1242 } 1243 1244 int __init zcrypt_debug_init(void) 1245 { 1246 zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 1247 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1248 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 1249 debug_set_level(zcrypt_dbf_info, DBF_ERR); 1250 1251 return 0; 1252 } 1253 1254 void zcrypt_debug_exit(void) 1255 { 1256 debug_unregister(zcrypt_dbf_info); 1257 } 1258 1259 /** 1260 * zcrypt_api_init(): Module initialization. 1261 * 1262 * The module initialization code. 1263 */ 1264 int __init zcrypt_api_init(void) 1265 { 1266 int rc; 1267 1268 rc = zcrypt_debug_init(); 1269 if (rc) 1270 goto out; 1271 1272 /* Register the request sprayer. */ 1273 rc = misc_register(&zcrypt_misc_device); 1274 if (rc < 0) 1275 goto out; 1276 1277 zcrypt_msgtype6_init(); 1278 zcrypt_msgtype50_init(); 1279 return 0; 1280 1281 out: 1282 return rc; 1283 } 1284 1285 /** 1286 * zcrypt_api_exit(): Module termination. 1287 * 1288 * The module termination code. 1289 */ 1290 void __exit zcrypt_api_exit(void) 1291 { 1292 misc_deregister(&zcrypt_misc_device); 1293 zcrypt_msgtype6_exit(); 1294 zcrypt_msgtype50_exit(); 1295 zcrypt_debug_exit(); 1296 } 1297 1298 module_init(zcrypt_api_init); 1299 module_exit(zcrypt_api_exit); 1300