1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright IBM Corp. 2001, 2018 4 * Author(s): Robert Burroughs 5 * Eric Rossman (edrossma@us.ibm.com) 6 * Cornelia Huck <cornelia.huck@de.ibm.com> 7 * 8 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 9 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Ralph Wuerthner <rwuerthn@de.ibm.com> 11 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 12 * Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com> 13 */ 14 15 #include <linux/module.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/miscdevice.h> 19 #include <linux/fs.h> 20 #include <linux/compat.h> 21 #include <linux/slab.h> 22 #include <linux/atomic.h> 23 #include <linux/uaccess.h> 24 #include <linux/hw_random.h> 25 #include <linux/debugfs.h> 26 #include <linux/cdev.h> 27 #include <linux/ctype.h> 28 #include <linux/capability.h> 29 #include <asm/debug.h> 30 31 #define CREATE_TRACE_POINTS 32 #include <asm/trace/zcrypt.h> 33 34 #include "zcrypt_api.h" 35 #include "zcrypt_debug.h" 36 37 #include "zcrypt_msgtype6.h" 38 #include "zcrypt_msgtype50.h" 39 #include "zcrypt_ccamisc.h" 40 #include "zcrypt_ep11misc.h" 41 42 /* 43 * Module description. 44 */ 45 MODULE_AUTHOR("IBM Corporation"); 46 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 47 "Copyright IBM Corp. 2001, 2012"); 48 MODULE_LICENSE("GPL"); 49 50 /* 51 * zcrypt tracepoint functions 52 */ 53 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 54 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 55 56 static int zcrypt_hwrng_seed = 1; 57 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, 0440); 58 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 59 60 DEFINE_SPINLOCK(zcrypt_list_lock); 61 LIST_HEAD(zcrypt_card_list); 62 63 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 64 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 65 66 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 67 EXPORT_SYMBOL(zcrypt_rescan_req); 68 69 static LIST_HEAD(zcrypt_ops_list); 70 71 /* Zcrypt related debug feature stuff. */ 72 debug_info_t *zcrypt_dbf_info; 73 74 /* 75 * Process a rescan of the transport layer. 76 * 77 * Returns 1, if the rescan has been processed, otherwise 0. 78 */ 79 static inline int zcrypt_process_rescan(void) 80 { 81 if (atomic_read(&zcrypt_rescan_req)) { 82 atomic_set(&zcrypt_rescan_req, 0); 83 atomic_inc(&zcrypt_rescan_count); 84 ap_bus_force_rescan(); 85 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n", 86 atomic_inc_return(&zcrypt_rescan_count)); 87 return 1; 88 } 89 return 0; 90 } 91 92 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 93 { 94 list_add_tail(&zops->list, &zcrypt_ops_list); 95 } 96 97 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 98 { 99 list_del_init(&zops->list); 100 } 101 102 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 103 { 104 struct zcrypt_ops *zops; 105 106 list_for_each_entry(zops, &zcrypt_ops_list, list) 107 if ((zops->variant == variant) && 108 (!strncmp(zops->name, name, sizeof(zops->name)))) 109 return zops; 110 return NULL; 111 } 112 EXPORT_SYMBOL(zcrypt_msgtype); 113 114 /* 115 * Multi device nodes extension functions. 116 */ 117 118 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 119 120 struct zcdn_device; 121 122 static struct class *zcrypt_class; 123 static dev_t zcrypt_devt; 124 static struct cdev zcrypt_cdev; 125 126 struct zcdn_device { 127 struct device device; 128 struct ap_perms perms; 129 }; 130 131 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device) 132 133 #define ZCDN_MAX_NAME 32 134 135 static int zcdn_create(const char *name); 136 static int zcdn_destroy(const char *name); 137 138 /* 139 * Find zcdn device by name. 140 * Returns reference to the zcdn device which needs to be released 141 * with put_device() after use. 142 */ 143 static inline struct zcdn_device *find_zcdndev_by_name(const char *name) 144 { 145 struct device *dev = class_find_device_by_name(zcrypt_class, name); 146 147 return dev ? to_zcdn_dev(dev) : NULL; 148 } 149 150 /* 151 * Find zcdn device by devt value. 152 * Returns reference to the zcdn device which needs to be released 153 * with put_device() after use. 154 */ 155 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt) 156 { 157 struct device *dev = class_find_device_by_devt(zcrypt_class, devt); 158 159 return dev ? to_zcdn_dev(dev) : NULL; 160 } 161 162 static ssize_t ioctlmask_show(struct device *dev, 163 struct device_attribute *attr, 164 char *buf) 165 { 166 int i, rc; 167 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 168 169 if (mutex_lock_interruptible(&ap_perms_mutex)) 170 return -ERESTARTSYS; 171 172 buf[0] = '0'; 173 buf[1] = 'x'; 174 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++) 175 snprintf(buf + 2 + 2 * i * sizeof(long), 176 PAGE_SIZE - 2 - 2 * i * sizeof(long), 177 "%016lx", zcdndev->perms.ioctlm[i]); 178 buf[2 + 2 * i * sizeof(long)] = '\n'; 179 buf[2 + 2 * i * sizeof(long) + 1] = '\0'; 180 rc = 2 + 2 * i * sizeof(long) + 1; 181 182 mutex_unlock(&ap_perms_mutex); 183 184 return rc; 185 } 186 187 static ssize_t ioctlmask_store(struct device *dev, 188 struct device_attribute *attr, 189 const char *buf, size_t count) 190 { 191 int rc; 192 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 193 194 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm, 195 AP_IOCTLS, &ap_perms_mutex); 196 if (rc) 197 return rc; 198 199 return count; 200 } 201 202 static DEVICE_ATTR_RW(ioctlmask); 203 204 static ssize_t apmask_show(struct device *dev, 205 struct device_attribute *attr, 206 char *buf) 207 { 208 int i, rc; 209 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 210 211 if (mutex_lock_interruptible(&ap_perms_mutex)) 212 return -ERESTARTSYS; 213 214 buf[0] = '0'; 215 buf[1] = 'x'; 216 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++) 217 snprintf(buf + 2 + 2 * i * sizeof(long), 218 PAGE_SIZE - 2 - 2 * i * sizeof(long), 219 "%016lx", zcdndev->perms.apm[i]); 220 buf[2 + 2 * i * sizeof(long)] = '\n'; 221 buf[2 + 2 * i * sizeof(long) + 1] = '\0'; 222 rc = 2 + 2 * i * sizeof(long) + 1; 223 224 mutex_unlock(&ap_perms_mutex); 225 226 return rc; 227 } 228 229 static ssize_t apmask_store(struct device *dev, 230 struct device_attribute *attr, 231 const char *buf, size_t count) 232 { 233 int rc; 234 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 235 236 rc = ap_parse_mask_str(buf, zcdndev->perms.apm, 237 AP_DEVICES, &ap_perms_mutex); 238 if (rc) 239 return rc; 240 241 return count; 242 } 243 244 static DEVICE_ATTR_RW(apmask); 245 246 static ssize_t aqmask_show(struct device *dev, 247 struct device_attribute *attr, 248 char *buf) 249 { 250 int i, rc; 251 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 252 253 if (mutex_lock_interruptible(&ap_perms_mutex)) 254 return -ERESTARTSYS; 255 256 buf[0] = '0'; 257 buf[1] = 'x'; 258 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++) 259 snprintf(buf + 2 + 2 * i * sizeof(long), 260 PAGE_SIZE - 2 - 2 * i * sizeof(long), 261 "%016lx", zcdndev->perms.aqm[i]); 262 buf[2 + 2 * i * sizeof(long)] = '\n'; 263 buf[2 + 2 * i * sizeof(long) + 1] = '\0'; 264 rc = 2 + 2 * i * sizeof(long) + 1; 265 266 mutex_unlock(&ap_perms_mutex); 267 268 return rc; 269 } 270 271 static ssize_t aqmask_store(struct device *dev, 272 struct device_attribute *attr, 273 const char *buf, size_t count) 274 { 275 int rc; 276 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 277 278 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm, 279 AP_DOMAINS, &ap_perms_mutex); 280 if (rc) 281 return rc; 282 283 return count; 284 } 285 286 static DEVICE_ATTR_RW(aqmask); 287 288 static struct attribute *zcdn_dev_attrs[] = { 289 &dev_attr_ioctlmask.attr, 290 &dev_attr_apmask.attr, 291 &dev_attr_aqmask.attr, 292 NULL 293 }; 294 295 static struct attribute_group zcdn_dev_attr_group = { 296 .attrs = zcdn_dev_attrs 297 }; 298 299 static const struct attribute_group *zcdn_dev_attr_groups[] = { 300 &zcdn_dev_attr_group, 301 NULL 302 }; 303 304 static ssize_t zcdn_create_store(struct class *class, 305 struct class_attribute *attr, 306 const char *buf, size_t count) 307 { 308 int rc; 309 char name[ZCDN_MAX_NAME]; 310 311 strncpy(name, skip_spaces(buf), sizeof(name)); 312 name[sizeof(name) - 1] = '\0'; 313 314 rc = zcdn_create(strim(name)); 315 316 return rc ? rc : count; 317 } 318 319 static const struct class_attribute class_attr_zcdn_create = 320 __ATTR(create, 0600, NULL, zcdn_create_store); 321 322 static ssize_t zcdn_destroy_store(struct class *class, 323 struct class_attribute *attr, 324 const char *buf, size_t count) 325 { 326 int rc; 327 char name[ZCDN_MAX_NAME]; 328 329 strncpy(name, skip_spaces(buf), sizeof(name)); 330 name[sizeof(name) - 1] = '\0'; 331 332 rc = zcdn_destroy(strim(name)); 333 334 return rc ? rc : count; 335 } 336 337 static const struct class_attribute class_attr_zcdn_destroy = 338 __ATTR(destroy, 0600, NULL, zcdn_destroy_store); 339 340 static void zcdn_device_release(struct device *dev) 341 { 342 struct zcdn_device *zcdndev = to_zcdn_dev(dev); 343 344 ZCRYPT_DBF(DBF_INFO, "releasing zcdn device %d:%d\n", 345 MAJOR(dev->devt), MINOR(dev->devt)); 346 347 kfree(zcdndev); 348 } 349 350 static int zcdn_create(const char *name) 351 { 352 dev_t devt; 353 int i, rc = 0; 354 char nodename[ZCDN_MAX_NAME]; 355 struct zcdn_device *zcdndev; 356 357 if (mutex_lock_interruptible(&ap_perms_mutex)) 358 return -ERESTARTSYS; 359 360 /* check if device node with this name already exists */ 361 if (name[0]) { 362 zcdndev = find_zcdndev_by_name(name); 363 if (zcdndev) { 364 put_device(&zcdndev->device); 365 rc = -EEXIST; 366 goto unlockout; 367 } 368 } 369 370 /* find an unused minor number */ 371 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 372 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 373 zcdndev = find_zcdndev_by_devt(devt); 374 if (zcdndev) 375 put_device(&zcdndev->device); 376 else 377 break; 378 } 379 if (i == ZCRYPT_MAX_MINOR_NODES) { 380 rc = -ENOSPC; 381 goto unlockout; 382 } 383 384 /* alloc and prepare a new zcdn device */ 385 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL); 386 if (!zcdndev) { 387 rc = -ENOMEM; 388 goto unlockout; 389 } 390 zcdndev->device.release = zcdn_device_release; 391 zcdndev->device.class = zcrypt_class; 392 zcdndev->device.devt = devt; 393 zcdndev->device.groups = zcdn_dev_attr_groups; 394 if (name[0]) 395 strncpy(nodename, name, sizeof(nodename)); 396 else 397 snprintf(nodename, sizeof(nodename), 398 ZCRYPT_NAME "_%d", (int) MINOR(devt)); 399 nodename[sizeof(nodename)-1] = '\0'; 400 if (dev_set_name(&zcdndev->device, nodename)) { 401 rc = -EINVAL; 402 goto unlockout; 403 } 404 rc = device_register(&zcdndev->device); 405 if (rc) { 406 put_device(&zcdndev->device); 407 goto unlockout; 408 } 409 410 ZCRYPT_DBF(DBF_INFO, "created zcdn device %d:%d\n", 411 MAJOR(devt), MINOR(devt)); 412 413 unlockout: 414 mutex_unlock(&ap_perms_mutex); 415 return rc; 416 } 417 418 static int zcdn_destroy(const char *name) 419 { 420 int rc = 0; 421 struct zcdn_device *zcdndev; 422 423 if (mutex_lock_interruptible(&ap_perms_mutex)) 424 return -ERESTARTSYS; 425 426 /* try to find this zcdn device */ 427 zcdndev = find_zcdndev_by_name(name); 428 if (!zcdndev) { 429 rc = -ENOENT; 430 goto unlockout; 431 } 432 433 /* 434 * The zcdn device is not hard destroyed. It is subject to 435 * reference counting and thus just needs to be unregistered. 436 */ 437 put_device(&zcdndev->device); 438 device_unregister(&zcdndev->device); 439 440 unlockout: 441 mutex_unlock(&ap_perms_mutex); 442 return rc; 443 } 444 445 static void zcdn_destroy_all(void) 446 { 447 int i; 448 dev_t devt; 449 struct zcdn_device *zcdndev; 450 451 mutex_lock(&ap_perms_mutex); 452 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) { 453 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i); 454 zcdndev = find_zcdndev_by_devt(devt); 455 if (zcdndev) { 456 put_device(&zcdndev->device); 457 device_unregister(&zcdndev->device); 458 } 459 } 460 mutex_unlock(&ap_perms_mutex); 461 } 462 463 #endif 464 465 /* 466 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 467 * 468 * This function is not supported beyond zcrypt 1.3.1. 469 */ 470 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 471 size_t count, loff_t *f_pos) 472 { 473 return -EPERM; 474 } 475 476 /* 477 * zcrypt_write(): Not allowed. 478 * 479 * Write is is not allowed 480 */ 481 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 482 size_t count, loff_t *f_pos) 483 { 484 return -EPERM; 485 } 486 487 /* 488 * zcrypt_open(): Count number of users. 489 * 490 * Device open function to count number of users. 491 */ 492 static int zcrypt_open(struct inode *inode, struct file *filp) 493 { 494 struct ap_perms *perms = &ap_perms; 495 496 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 497 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 498 struct zcdn_device *zcdndev; 499 500 if (mutex_lock_interruptible(&ap_perms_mutex)) 501 return -ERESTARTSYS; 502 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 503 /* find returns a reference, no get_device() needed */ 504 mutex_unlock(&ap_perms_mutex); 505 if (zcdndev) 506 perms = &zcdndev->perms; 507 } 508 #endif 509 filp->private_data = (void *) perms; 510 511 atomic_inc(&zcrypt_open_count); 512 return stream_open(inode, filp); 513 } 514 515 /* 516 * zcrypt_release(): Count number of users. 517 * 518 * Device close function to count number of users. 519 */ 520 static int zcrypt_release(struct inode *inode, struct file *filp) 521 { 522 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 523 if (filp->f_inode->i_cdev == &zcrypt_cdev) { 524 struct zcdn_device *zcdndev; 525 526 mutex_lock(&ap_perms_mutex); 527 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev); 528 mutex_unlock(&ap_perms_mutex); 529 if (zcdndev) { 530 /* 2 puts here: one for find, one for open */ 531 put_device(&zcdndev->device); 532 put_device(&zcdndev->device); 533 } 534 } 535 #endif 536 537 atomic_dec(&zcrypt_open_count); 538 return 0; 539 } 540 541 static inline int zcrypt_check_ioctl(struct ap_perms *perms, 542 unsigned int cmd) 543 { 544 int rc = -EPERM; 545 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT; 546 547 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) { 548 if (test_bit_inv(ioctlnr, perms->ioctlm)) 549 rc = 0; 550 } 551 552 if (rc) 553 ZCRYPT_DBF(DBF_WARN, 554 "ioctl check failed: ioctlnr=0x%04x rc=%d\n", 555 ioctlnr, rc); 556 557 return rc; 558 } 559 560 static inline bool zcrypt_check_card(struct ap_perms *perms, int card) 561 { 562 return test_bit_inv(card, perms->apm) ? true : false; 563 } 564 565 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue) 566 { 567 return test_bit_inv(queue, perms->aqm) ? true : false; 568 } 569 570 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 571 struct zcrypt_queue *zq, 572 struct module **pmod, 573 unsigned int weight) 574 { 575 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner)) 576 return NULL; 577 zcrypt_queue_get(zq); 578 get_device(&zq->queue->ap_dev.device); 579 atomic_add(weight, &zc->load); 580 atomic_add(weight, &zq->load); 581 zq->request_count++; 582 *pmod = zq->queue->ap_dev.device.driver->owner; 583 return zq; 584 } 585 586 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 587 struct zcrypt_queue *zq, 588 struct module *mod, 589 unsigned int weight) 590 { 591 zq->request_count--; 592 atomic_sub(weight, &zc->load); 593 atomic_sub(weight, &zq->load); 594 put_device(&zq->queue->ap_dev.device); 595 zcrypt_queue_put(zq); 596 module_put(mod); 597 } 598 599 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 600 struct zcrypt_card *pref_zc, 601 unsigned int weight, 602 unsigned int pref_weight) 603 { 604 if (!pref_zc) 605 return true; 606 weight += atomic_read(&zc->load); 607 pref_weight += atomic_read(&pref_zc->load); 608 if (weight == pref_weight) 609 return atomic64_read(&zc->card->total_request_count) < 610 atomic64_read(&pref_zc->card->total_request_count); 611 return weight < pref_weight; 612 } 613 614 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 615 struct zcrypt_queue *pref_zq, 616 unsigned int weight, 617 unsigned int pref_weight) 618 { 619 if (!pref_zq) 620 return true; 621 weight += atomic_read(&zq->load); 622 pref_weight += atomic_read(&pref_zq->load); 623 if (weight == pref_weight) 624 return zq->queue->total_request_count < 625 pref_zq->queue->total_request_count; 626 return weight < pref_weight; 627 } 628 629 /* 630 * zcrypt ioctls. 631 */ 632 static long zcrypt_rsa_modexpo(struct ap_perms *perms, 633 struct zcrypt_track *tr, 634 struct ica_rsa_modexpo *mex) 635 { 636 struct zcrypt_card *zc, *pref_zc; 637 struct zcrypt_queue *zq, *pref_zq; 638 struct ap_message ap_msg; 639 unsigned int wgt = 0, pref_wgt = 0; 640 unsigned int func_code; 641 int cpen, qpen, qid = 0, rc = -ENODEV; 642 struct module *mod; 643 644 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 645 646 ap_init_message(&ap_msg); 647 648 #ifdef CONFIG_ZCRYPT_DEBUG 649 if (tr && tr->fi.cmd) 650 ap_msg.fi.cmd = tr->fi.cmd; 651 #endif 652 653 if (mex->outputdatalength < mex->inputdatalength) { 654 func_code = 0; 655 rc = -EINVAL; 656 goto out; 657 } 658 659 /* 660 * As long as outputdatalength is big enough, we can set the 661 * outputdatalength equal to the inputdatalength, since that is the 662 * number of bytes we will copy in any case 663 */ 664 mex->outputdatalength = mex->inputdatalength; 665 666 rc = get_rsa_modex_fc(mex, &func_code); 667 if (rc) 668 goto out; 669 670 pref_zc = NULL; 671 pref_zq = NULL; 672 spin_lock(&zcrypt_list_lock); 673 for_each_zcrypt_card(zc) { 674 /* Check for useable accelarator or CCA card */ 675 if (!zc->online || !zc->card->config || 676 !(zc->card->functions & 0x18000000)) 677 continue; 678 /* Check for size limits */ 679 if (zc->min_mod_size > mex->inputdatalength || 680 zc->max_mod_size < mex->inputdatalength) 681 continue; 682 /* check if device node has admission for this card */ 683 if (!zcrypt_check_card(perms, zc->card->id)) 684 continue; 685 /* get weight index of the card device */ 686 wgt = zc->speed_rating[func_code]; 687 /* penalty if this msg was previously sent via this card */ 688 cpen = (tr && tr->again_counter && tr->last_qid && 689 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 690 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 691 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 692 continue; 693 for_each_zcrypt_queue(zq, zc) { 694 /* check if device is useable and eligible */ 695 if (!zq->online || !zq->ops->rsa_modexpo || 696 !zq->queue->config) 697 continue; 698 /* check if device node has admission for this queue */ 699 if (!zcrypt_check_queue(perms, 700 AP_QID_QUEUE(zq->queue->qid))) 701 continue; 702 /* penalty if the msg was previously sent at this qid */ 703 qpen = (tr && tr->again_counter && tr->last_qid && 704 tr->last_qid == zq->queue->qid) ? 705 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 706 if (!zcrypt_queue_compare(zq, pref_zq, 707 wgt + cpen + qpen, pref_wgt)) 708 continue; 709 pref_zc = zc; 710 pref_zq = zq; 711 pref_wgt = wgt + cpen + qpen; 712 } 713 } 714 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 715 spin_unlock(&zcrypt_list_lock); 716 717 if (!pref_zq) { 718 rc = -ENODEV; 719 goto out; 720 } 721 722 qid = pref_zq->queue->qid; 723 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg); 724 725 spin_lock(&zcrypt_list_lock); 726 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 727 spin_unlock(&zcrypt_list_lock); 728 729 out: 730 ap_release_message(&ap_msg); 731 if (tr) { 732 tr->last_rc = rc; 733 tr->last_qid = qid; 734 } 735 trace_s390_zcrypt_rep(mex, func_code, rc, 736 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 737 return rc; 738 } 739 740 static long zcrypt_rsa_crt(struct ap_perms *perms, 741 struct zcrypt_track *tr, 742 struct ica_rsa_modexpo_crt *crt) 743 { 744 struct zcrypt_card *zc, *pref_zc; 745 struct zcrypt_queue *zq, *pref_zq; 746 struct ap_message ap_msg; 747 unsigned int wgt = 0, pref_wgt = 0; 748 unsigned int func_code; 749 int cpen, qpen, qid = 0, rc = -ENODEV; 750 struct module *mod; 751 752 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 753 754 ap_init_message(&ap_msg); 755 756 #ifdef CONFIG_ZCRYPT_DEBUG 757 if (tr && tr->fi.cmd) 758 ap_msg.fi.cmd = tr->fi.cmd; 759 #endif 760 761 if (crt->outputdatalength < crt->inputdatalength) { 762 func_code = 0; 763 rc = -EINVAL; 764 goto out; 765 } 766 767 /* 768 * As long as outputdatalength is big enough, we can set the 769 * outputdatalength equal to the inputdatalength, since that is the 770 * number of bytes we will copy in any case 771 */ 772 crt->outputdatalength = crt->inputdatalength; 773 774 rc = get_rsa_crt_fc(crt, &func_code); 775 if (rc) 776 goto out; 777 778 pref_zc = NULL; 779 pref_zq = NULL; 780 spin_lock(&zcrypt_list_lock); 781 for_each_zcrypt_card(zc) { 782 /* Check for useable accelarator or CCA card */ 783 if (!zc->online || !zc->card->config || 784 !(zc->card->functions & 0x18000000)) 785 continue; 786 /* Check for size limits */ 787 if (zc->min_mod_size > crt->inputdatalength || 788 zc->max_mod_size < crt->inputdatalength) 789 continue; 790 /* check if device node has admission for this card */ 791 if (!zcrypt_check_card(perms, zc->card->id)) 792 continue; 793 /* get weight index of the card device */ 794 wgt = zc->speed_rating[func_code]; 795 /* penalty if this msg was previously sent via this card */ 796 cpen = (tr && tr->again_counter && tr->last_qid && 797 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 798 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 799 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 800 continue; 801 for_each_zcrypt_queue(zq, zc) { 802 /* check if device is useable and eligible */ 803 if (!zq->online || !zq->ops->rsa_modexpo_crt || 804 !zq->queue->config) 805 continue; 806 /* check if device node has admission for this queue */ 807 if (!zcrypt_check_queue(perms, 808 AP_QID_QUEUE(zq->queue->qid))) 809 continue; 810 /* penalty if the msg was previously sent at this qid */ 811 qpen = (tr && tr->again_counter && tr->last_qid && 812 tr->last_qid == zq->queue->qid) ? 813 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 814 if (!zcrypt_queue_compare(zq, pref_zq, 815 wgt + cpen + qpen, pref_wgt)) 816 continue; 817 pref_zc = zc; 818 pref_zq = zq; 819 pref_wgt = wgt + cpen + qpen; 820 } 821 } 822 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 823 spin_unlock(&zcrypt_list_lock); 824 825 if (!pref_zq) { 826 rc = -ENODEV; 827 goto out; 828 } 829 830 qid = pref_zq->queue->qid; 831 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg); 832 833 spin_lock(&zcrypt_list_lock); 834 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 835 spin_unlock(&zcrypt_list_lock); 836 837 out: 838 ap_release_message(&ap_msg); 839 if (tr) { 840 tr->last_rc = rc; 841 tr->last_qid = qid; 842 } 843 trace_s390_zcrypt_rep(crt, func_code, rc, 844 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 845 return rc; 846 } 847 848 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms, 849 struct zcrypt_track *tr, 850 struct ica_xcRB *xcRB) 851 { 852 struct zcrypt_card *zc, *pref_zc; 853 struct zcrypt_queue *zq, *pref_zq; 854 struct ap_message ap_msg; 855 unsigned int wgt = 0, pref_wgt = 0; 856 unsigned int func_code; 857 unsigned short *domain, tdom; 858 int cpen, qpen, qid = 0, rc = -ENODEV; 859 struct module *mod; 860 861 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 862 863 xcRB->status = 0; 864 ap_init_message(&ap_msg); 865 866 #ifdef CONFIG_ZCRYPT_DEBUG 867 if (tr && tr->fi.cmd) 868 ap_msg.fi.cmd = tr->fi.cmd; 869 if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) { 870 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n", 871 __func__, tr->fi.cmd); 872 xcRB->agent_ID = 0x4646; 873 } 874 #endif 875 876 rc = get_cprb_fc(userspace, xcRB, &ap_msg, &func_code, &domain); 877 if (rc) 878 goto out; 879 880 /* 881 * If a valid target domain is set and this domain is NOT a usage 882 * domain but a control only domain, use the default domain as target. 883 */ 884 tdom = *domain; 885 if (tdom < AP_DOMAINS && 886 !ap_test_config_usage_domain(tdom) && 887 ap_test_config_ctrl_domain(tdom) && 888 ap_domain_index >= 0) 889 tdom = ap_domain_index; 890 891 pref_zc = NULL; 892 pref_zq = NULL; 893 spin_lock(&zcrypt_list_lock); 894 for_each_zcrypt_card(zc) { 895 /* Check for useable CCA card */ 896 if (!zc->online || !zc->card->config || 897 !(zc->card->functions & 0x10000000)) 898 continue; 899 /* Check for user selected CCA card */ 900 if (xcRB->user_defined != AUTOSELECT && 901 xcRB->user_defined != zc->card->id) 902 continue; 903 /* check if request size exceeds card max msg size */ 904 if (ap_msg.len > zc->card->maxmsgsize) 905 continue; 906 /* check if device node has admission for this card */ 907 if (!zcrypt_check_card(perms, zc->card->id)) 908 continue; 909 /* get weight index of the card device */ 910 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 911 /* penalty if this msg was previously sent via this card */ 912 cpen = (tr && tr->again_counter && tr->last_qid && 913 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 914 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 915 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 916 continue; 917 for_each_zcrypt_queue(zq, zc) { 918 /* check for device useable and eligible */ 919 if (!zq->online || 920 !zq->ops->send_cprb || 921 !zq->queue->config || 922 (tdom != AUTOSEL_DOM && 923 tdom != AP_QID_QUEUE(zq->queue->qid))) 924 continue; 925 /* check if device node has admission for this queue */ 926 if (!zcrypt_check_queue(perms, 927 AP_QID_QUEUE(zq->queue->qid))) 928 continue; 929 /* penalty if the msg was previously sent at this qid */ 930 qpen = (tr && tr->again_counter && tr->last_qid && 931 tr->last_qid == zq->queue->qid) ? 932 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 933 if (!zcrypt_queue_compare(zq, pref_zq, 934 wgt + cpen + qpen, pref_wgt)) 935 continue; 936 pref_zc = zc; 937 pref_zq = zq; 938 pref_wgt = wgt + cpen + qpen; 939 } 940 } 941 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 942 spin_unlock(&zcrypt_list_lock); 943 944 if (!pref_zq) { 945 rc = -ENODEV; 946 goto out; 947 } 948 949 /* in case of auto select, provide the correct domain */ 950 qid = pref_zq->queue->qid; 951 if (*domain == AUTOSEL_DOM) 952 *domain = AP_QID_QUEUE(qid); 953 954 #ifdef CONFIG_ZCRYPT_DEBUG 955 if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) { 956 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n", 957 __func__, tr->fi.cmd); 958 *domain = 99; 959 } 960 #endif 961 962 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcRB, &ap_msg); 963 964 spin_lock(&zcrypt_list_lock); 965 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 966 spin_unlock(&zcrypt_list_lock); 967 968 out: 969 ap_release_message(&ap_msg); 970 if (tr) { 971 tr->last_rc = rc; 972 tr->last_qid = qid; 973 } 974 trace_s390_zcrypt_rep(xcRB, func_code, rc, 975 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 976 return rc; 977 } 978 979 long zcrypt_send_cprb(struct ica_xcRB *xcRB) 980 { 981 return _zcrypt_send_cprb(false, &ap_perms, NULL, xcRB); 982 } 983 EXPORT_SYMBOL(zcrypt_send_cprb); 984 985 static bool is_desired_ep11_card(unsigned int dev_id, 986 unsigned short target_num, 987 struct ep11_target_dev *targets) 988 { 989 while (target_num-- > 0) { 990 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP) 991 return true; 992 targets++; 993 } 994 return false; 995 } 996 997 static bool is_desired_ep11_queue(unsigned int dev_qid, 998 unsigned short target_num, 999 struct ep11_target_dev *targets) 1000 { 1001 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid); 1002 1003 while (target_num-- > 0) { 1004 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) && 1005 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM)) 1006 return true; 1007 targets++; 1008 } 1009 return false; 1010 } 1011 1012 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms, 1013 struct zcrypt_track *tr, 1014 struct ep11_urb *xcrb) 1015 { 1016 struct zcrypt_card *zc, *pref_zc; 1017 struct zcrypt_queue *zq, *pref_zq; 1018 struct ep11_target_dev *targets; 1019 unsigned short target_num; 1020 unsigned int wgt = 0, pref_wgt = 0; 1021 unsigned int func_code; 1022 struct ap_message ap_msg; 1023 int cpen, qpen, qid = 0, rc = -ENODEV; 1024 struct module *mod; 1025 1026 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 1027 1028 ap_init_message(&ap_msg); 1029 1030 #ifdef CONFIG_ZCRYPT_DEBUG 1031 if (tr && tr->fi.cmd) 1032 ap_msg.fi.cmd = tr->fi.cmd; 1033 #endif 1034 1035 target_num = (unsigned short) xcrb->targets_num; 1036 1037 /* empty list indicates autoselect (all available targets) */ 1038 targets = NULL; 1039 if (target_num != 0) { 1040 struct ep11_target_dev __user *uptr; 1041 1042 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 1043 if (!targets) { 1044 func_code = 0; 1045 rc = -ENOMEM; 1046 goto out; 1047 } 1048 1049 uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 1050 if (z_copy_from_user(userspace, targets, uptr, 1051 target_num * sizeof(*targets))) { 1052 func_code = 0; 1053 rc = -EFAULT; 1054 goto out_free; 1055 } 1056 } 1057 1058 rc = get_ep11cprb_fc(userspace, xcrb, &ap_msg, &func_code); 1059 if (rc) 1060 goto out_free; 1061 1062 pref_zc = NULL; 1063 pref_zq = NULL; 1064 spin_lock(&zcrypt_list_lock); 1065 for_each_zcrypt_card(zc) { 1066 /* Check for useable EP11 card */ 1067 if (!zc->online || !zc->card->config || 1068 !(zc->card->functions & 0x04000000)) 1069 continue; 1070 /* Check for user selected EP11 card */ 1071 if (targets && 1072 !is_desired_ep11_card(zc->card->id, target_num, targets)) 1073 continue; 1074 /* check if request size exceeds card max msg size */ 1075 if (ap_msg.len > zc->card->maxmsgsize) 1076 continue; 1077 /* check if device node has admission for this card */ 1078 if (!zcrypt_check_card(perms, zc->card->id)) 1079 continue; 1080 /* get weight index of the card device */ 1081 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 1082 /* penalty if this msg was previously sent via this card */ 1083 cpen = (tr && tr->again_counter && tr->last_qid && 1084 AP_QID_CARD(tr->last_qid) == zc->card->id) ? 1085 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0; 1086 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt)) 1087 continue; 1088 for_each_zcrypt_queue(zq, zc) { 1089 /* check if device is useable and eligible */ 1090 if (!zq->online || 1091 !zq->ops->send_ep11_cprb || 1092 !zq->queue->config || 1093 (targets && 1094 !is_desired_ep11_queue(zq->queue->qid, 1095 target_num, targets))) 1096 continue; 1097 /* check if device node has admission for this queue */ 1098 if (!zcrypt_check_queue(perms, 1099 AP_QID_QUEUE(zq->queue->qid))) 1100 continue; 1101 /* penalty if the msg was previously sent at this qid */ 1102 qpen = (tr && tr->again_counter && tr->last_qid && 1103 tr->last_qid == zq->queue->qid) ? 1104 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0; 1105 if (!zcrypt_queue_compare(zq, pref_zq, 1106 wgt + cpen + qpen, pref_wgt)) 1107 continue; 1108 pref_zc = zc; 1109 pref_zq = zq; 1110 pref_wgt = wgt + cpen + qpen; 1111 } 1112 } 1113 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1114 spin_unlock(&zcrypt_list_lock); 1115 1116 if (!pref_zq) { 1117 rc = -ENODEV; 1118 goto out_free; 1119 } 1120 1121 qid = pref_zq->queue->qid; 1122 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg); 1123 1124 spin_lock(&zcrypt_list_lock); 1125 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1126 spin_unlock(&zcrypt_list_lock); 1127 1128 out_free: 1129 kfree(targets); 1130 out: 1131 ap_release_message(&ap_msg); 1132 if (tr) { 1133 tr->last_rc = rc; 1134 tr->last_qid = qid; 1135 } 1136 trace_s390_zcrypt_rep(xcrb, func_code, rc, 1137 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1138 return rc; 1139 } 1140 1141 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 1142 { 1143 return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb); 1144 } 1145 EXPORT_SYMBOL(zcrypt_send_ep11_cprb); 1146 1147 static long zcrypt_rng(char *buffer) 1148 { 1149 struct zcrypt_card *zc, *pref_zc; 1150 struct zcrypt_queue *zq, *pref_zq; 1151 unsigned int wgt = 0, pref_wgt = 0; 1152 unsigned int func_code; 1153 struct ap_message ap_msg; 1154 unsigned int domain; 1155 int qid = 0, rc = -ENODEV; 1156 struct module *mod; 1157 1158 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1159 1160 ap_init_message(&ap_msg); 1161 rc = get_rng_fc(&ap_msg, &func_code, &domain); 1162 if (rc) 1163 goto out; 1164 1165 pref_zc = NULL; 1166 pref_zq = NULL; 1167 spin_lock(&zcrypt_list_lock); 1168 for_each_zcrypt_card(zc) { 1169 /* Check for useable CCA card */ 1170 if (!zc->online || !zc->card->config || 1171 !(zc->card->functions & 0x10000000)) 1172 continue; 1173 /* get weight index of the card device */ 1174 wgt = zc->speed_rating[func_code]; 1175 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt)) 1176 continue; 1177 for_each_zcrypt_queue(zq, zc) { 1178 /* check if device is useable and eligible */ 1179 if (!zq->online || !zq->ops->rng || 1180 !zq->queue->config) 1181 continue; 1182 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt)) 1183 continue; 1184 pref_zc = zc; 1185 pref_zq = zq; 1186 pref_wgt = wgt; 1187 } 1188 } 1189 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt); 1190 spin_unlock(&zcrypt_list_lock); 1191 1192 if (!pref_zq) { 1193 rc = -ENODEV; 1194 goto out; 1195 } 1196 1197 qid = pref_zq->queue->qid; 1198 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1199 1200 spin_lock(&zcrypt_list_lock); 1201 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt); 1202 spin_unlock(&zcrypt_list_lock); 1203 1204 out: 1205 ap_release_message(&ap_msg); 1206 trace_s390_zcrypt_rep(buffer, func_code, rc, 1207 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 1208 return rc; 1209 } 1210 1211 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 1212 { 1213 struct zcrypt_card *zc; 1214 struct zcrypt_queue *zq; 1215 struct zcrypt_device_status *stat; 1216 int card, queue; 1217 1218 memset(devstatus, 0, MAX_ZDEV_ENTRIES 1219 * sizeof(struct zcrypt_device_status)); 1220 1221 spin_lock(&zcrypt_list_lock); 1222 for_each_zcrypt_card(zc) { 1223 for_each_zcrypt_queue(zq, zc) { 1224 card = AP_QID_CARD(zq->queue->qid); 1225 if (card >= MAX_ZDEV_CARDIDS) 1226 continue; 1227 queue = AP_QID_QUEUE(zq->queue->qid); 1228 stat = &devstatus[card * AP_DOMAINS + queue]; 1229 stat->hwtype = zc->card->ap_dev.device_type; 1230 stat->functions = zc->card->functions >> 26; 1231 stat->qid = zq->queue->qid; 1232 stat->online = zq->online ? 0x01 : 0x00; 1233 } 1234 } 1235 spin_unlock(&zcrypt_list_lock); 1236 } 1237 1238 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 1239 { 1240 struct zcrypt_card *zc; 1241 struct zcrypt_queue *zq; 1242 struct zcrypt_device_status_ext *stat; 1243 int card, queue; 1244 1245 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT 1246 * sizeof(struct zcrypt_device_status_ext)); 1247 1248 spin_lock(&zcrypt_list_lock); 1249 for_each_zcrypt_card(zc) { 1250 for_each_zcrypt_queue(zq, zc) { 1251 card = AP_QID_CARD(zq->queue->qid); 1252 queue = AP_QID_QUEUE(zq->queue->qid); 1253 stat = &devstatus[card * AP_DOMAINS + queue]; 1254 stat->hwtype = zc->card->ap_dev.device_type; 1255 stat->functions = zc->card->functions >> 26; 1256 stat->qid = zq->queue->qid; 1257 stat->online = zq->online ? 0x01 : 0x00; 1258 } 1259 } 1260 spin_unlock(&zcrypt_list_lock); 1261 } 1262 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 1263 1264 int zcrypt_device_status_ext(int card, int queue, 1265 struct zcrypt_device_status_ext *devstat) 1266 { 1267 struct zcrypt_card *zc; 1268 struct zcrypt_queue *zq; 1269 1270 memset(devstat, 0, sizeof(*devstat)); 1271 1272 spin_lock(&zcrypt_list_lock); 1273 for_each_zcrypt_card(zc) { 1274 for_each_zcrypt_queue(zq, zc) { 1275 if (card == AP_QID_CARD(zq->queue->qid) && 1276 queue == AP_QID_QUEUE(zq->queue->qid)) { 1277 devstat->hwtype = zc->card->ap_dev.device_type; 1278 devstat->functions = zc->card->functions >> 26; 1279 devstat->qid = zq->queue->qid; 1280 devstat->online = zq->online ? 0x01 : 0x00; 1281 spin_unlock(&zcrypt_list_lock); 1282 return 0; 1283 } 1284 } 1285 } 1286 spin_unlock(&zcrypt_list_lock); 1287 1288 return -ENODEV; 1289 } 1290 EXPORT_SYMBOL(zcrypt_device_status_ext); 1291 1292 static void zcrypt_status_mask(char status[], size_t max_adapters) 1293 { 1294 struct zcrypt_card *zc; 1295 struct zcrypt_queue *zq; 1296 int card; 1297 1298 memset(status, 0, max_adapters); 1299 spin_lock(&zcrypt_list_lock); 1300 for_each_zcrypt_card(zc) { 1301 for_each_zcrypt_queue(zq, zc) { 1302 card = AP_QID_CARD(zq->queue->qid); 1303 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1304 || card >= max_adapters) 1305 continue; 1306 status[card] = zc->online ? zc->user_space_type : 0x0d; 1307 } 1308 } 1309 spin_unlock(&zcrypt_list_lock); 1310 } 1311 1312 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 1313 { 1314 struct zcrypt_card *zc; 1315 struct zcrypt_queue *zq; 1316 int card; 1317 1318 memset(qdepth, 0, max_adapters); 1319 spin_lock(&zcrypt_list_lock); 1320 local_bh_disable(); 1321 for_each_zcrypt_card(zc) { 1322 for_each_zcrypt_queue(zq, zc) { 1323 card = AP_QID_CARD(zq->queue->qid); 1324 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1325 || card >= max_adapters) 1326 continue; 1327 spin_lock(&zq->queue->lock); 1328 qdepth[card] = 1329 zq->queue->pendingq_count + 1330 zq->queue->requestq_count; 1331 spin_unlock(&zq->queue->lock); 1332 } 1333 } 1334 local_bh_enable(); 1335 spin_unlock(&zcrypt_list_lock); 1336 } 1337 1338 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters) 1339 { 1340 struct zcrypt_card *zc; 1341 struct zcrypt_queue *zq; 1342 int card; 1343 u64 cnt; 1344 1345 memset(reqcnt, 0, sizeof(int) * max_adapters); 1346 spin_lock(&zcrypt_list_lock); 1347 local_bh_disable(); 1348 for_each_zcrypt_card(zc) { 1349 for_each_zcrypt_queue(zq, zc) { 1350 card = AP_QID_CARD(zq->queue->qid); 1351 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 1352 || card >= max_adapters) 1353 continue; 1354 spin_lock(&zq->queue->lock); 1355 cnt = zq->queue->total_request_count; 1356 spin_unlock(&zq->queue->lock); 1357 reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX; 1358 } 1359 } 1360 local_bh_enable(); 1361 spin_unlock(&zcrypt_list_lock); 1362 } 1363 1364 static int zcrypt_pendingq_count(void) 1365 { 1366 struct zcrypt_card *zc; 1367 struct zcrypt_queue *zq; 1368 int pendingq_count; 1369 1370 pendingq_count = 0; 1371 spin_lock(&zcrypt_list_lock); 1372 local_bh_disable(); 1373 for_each_zcrypt_card(zc) { 1374 for_each_zcrypt_queue(zq, zc) { 1375 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1376 continue; 1377 spin_lock(&zq->queue->lock); 1378 pendingq_count += zq->queue->pendingq_count; 1379 spin_unlock(&zq->queue->lock); 1380 } 1381 } 1382 local_bh_enable(); 1383 spin_unlock(&zcrypt_list_lock); 1384 return pendingq_count; 1385 } 1386 1387 static int zcrypt_requestq_count(void) 1388 { 1389 struct zcrypt_card *zc; 1390 struct zcrypt_queue *zq; 1391 int requestq_count; 1392 1393 requestq_count = 0; 1394 spin_lock(&zcrypt_list_lock); 1395 local_bh_disable(); 1396 for_each_zcrypt_card(zc) { 1397 for_each_zcrypt_queue(zq, zc) { 1398 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1399 continue; 1400 spin_lock(&zq->queue->lock); 1401 requestq_count += zq->queue->requestq_count; 1402 spin_unlock(&zq->queue->lock); 1403 } 1404 } 1405 local_bh_enable(); 1406 spin_unlock(&zcrypt_list_lock); 1407 return requestq_count; 1408 } 1409 1410 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg) 1411 { 1412 int rc; 1413 struct zcrypt_track tr; 1414 struct ica_rsa_modexpo mex; 1415 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 1416 1417 memset(&tr, 0, sizeof(tr)); 1418 if (copy_from_user(&mex, umex, sizeof(mex))) 1419 return -EFAULT; 1420 1421 #ifdef CONFIG_ZCRYPT_DEBUG 1422 if (mex.inputdatalength & (1U << 31)) { 1423 if (!capable(CAP_SYS_ADMIN)) 1424 return -EPERM; 1425 tr.fi.cmd = (u16)(mex.inputdatalength >> 16); 1426 } 1427 mex.inputdatalength &= 0x0000FFFF; 1428 #endif 1429 1430 do { 1431 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1432 if (rc == -EAGAIN) 1433 tr.again_counter++; 1434 #ifdef CONFIG_ZCRYPT_DEBUG 1435 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) 1436 break; 1437 #endif 1438 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1439 /* on failure: retry once again after a requested rescan */ 1440 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1441 do { 1442 rc = zcrypt_rsa_modexpo(perms, &tr, &mex); 1443 if (rc == -EAGAIN) 1444 tr.again_counter++; 1445 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1446 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1447 rc = -EIO; 1448 if (rc) { 1449 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); 1450 return rc; 1451 } 1452 return put_user(mex.outputdatalength, &umex->outputdatalength); 1453 } 1454 1455 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg) 1456 { 1457 int rc; 1458 struct zcrypt_track tr; 1459 struct ica_rsa_modexpo_crt crt; 1460 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 1461 1462 memset(&tr, 0, sizeof(tr)); 1463 if (copy_from_user(&crt, ucrt, sizeof(crt))) 1464 return -EFAULT; 1465 1466 #ifdef CONFIG_ZCRYPT_DEBUG 1467 if (crt.inputdatalength & (1U << 31)) { 1468 if (!capable(CAP_SYS_ADMIN)) 1469 return -EPERM; 1470 tr.fi.cmd = (u16)(crt.inputdatalength >> 16); 1471 } 1472 crt.inputdatalength &= 0x0000FFFF; 1473 #endif 1474 1475 do { 1476 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1477 if (rc == -EAGAIN) 1478 tr.again_counter++; 1479 #ifdef CONFIG_ZCRYPT_DEBUG 1480 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) 1481 break; 1482 #endif 1483 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1484 /* on failure: retry once again after a requested rescan */ 1485 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1486 do { 1487 rc = zcrypt_rsa_crt(perms, &tr, &crt); 1488 if (rc == -EAGAIN) 1489 tr.again_counter++; 1490 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1491 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1492 rc = -EIO; 1493 if (rc) { 1494 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); 1495 return rc; 1496 } 1497 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 1498 } 1499 1500 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg) 1501 { 1502 int rc; 1503 struct ica_xcRB xcRB; 1504 struct zcrypt_track tr; 1505 struct ica_xcRB __user *uxcRB = (void __user *) arg; 1506 1507 memset(&tr, 0, sizeof(tr)); 1508 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 1509 return -EFAULT; 1510 1511 #ifdef CONFIG_ZCRYPT_DEBUG 1512 if (xcRB.status & (1U << 31)) { 1513 if (!capable(CAP_SYS_ADMIN)) 1514 return -EPERM; 1515 tr.fi.cmd = (u16)(xcRB.status >> 16); 1516 } 1517 xcRB.status &= 0x0000FFFF; 1518 #endif 1519 1520 do { 1521 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); 1522 if (rc == -EAGAIN) 1523 tr.again_counter++; 1524 #ifdef CONFIG_ZCRYPT_DEBUG 1525 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) 1526 break; 1527 #endif 1528 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1529 /* on failure: retry once again after a requested rescan */ 1530 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1531 do { 1532 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB); 1533 if (rc == -EAGAIN) 1534 tr.again_counter++; 1535 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1536 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1537 rc = -EIO; 1538 if (rc) 1539 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d status=0x%x\n", 1540 rc, xcRB.status); 1541 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 1542 return -EFAULT; 1543 return rc; 1544 } 1545 1546 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg) 1547 { 1548 int rc; 1549 struct ep11_urb xcrb; 1550 struct zcrypt_track tr; 1551 struct ep11_urb __user *uxcrb = (void __user *)arg; 1552 1553 memset(&tr, 0, sizeof(tr)); 1554 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 1555 return -EFAULT; 1556 1557 #ifdef CONFIG_ZCRYPT_DEBUG 1558 if (xcrb.req_len & (1ULL << 63)) { 1559 if (!capable(CAP_SYS_ADMIN)) 1560 return -EPERM; 1561 tr.fi.cmd = (u16)(xcrb.req_len >> 48); 1562 } 1563 xcrb.req_len &= 0x0000FFFFFFFFFFFFULL; 1564 #endif 1565 1566 do { 1567 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1568 if (rc == -EAGAIN) 1569 tr.again_counter++; 1570 #ifdef CONFIG_ZCRYPT_DEBUG 1571 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY)) 1572 break; 1573 #endif 1574 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1575 /* on failure: retry once again after a requested rescan */ 1576 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1577 do { 1578 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb); 1579 if (rc == -EAGAIN) 1580 tr.again_counter++; 1581 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1582 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1583 rc = -EIO; 1584 if (rc) 1585 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); 1586 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 1587 return -EFAULT; 1588 return rc; 1589 } 1590 1591 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 1592 unsigned long arg) 1593 { 1594 int rc; 1595 struct ap_perms *perms = 1596 (struct ap_perms *) filp->private_data; 1597 1598 rc = zcrypt_check_ioctl(perms, cmd); 1599 if (rc) 1600 return rc; 1601 1602 switch (cmd) { 1603 case ICARSAMODEXPO: 1604 return icarsamodexpo_ioctl(perms, arg); 1605 case ICARSACRT: 1606 return icarsacrt_ioctl(perms, arg); 1607 case ZSECSENDCPRB: 1608 return zsecsendcprb_ioctl(perms, arg); 1609 case ZSENDEP11CPRB: 1610 return zsendep11cprb_ioctl(perms, arg); 1611 case ZCRYPT_DEVICE_STATUS: { 1612 struct zcrypt_device_status_ext *device_status; 1613 size_t total_size = MAX_ZDEV_ENTRIES_EXT 1614 * sizeof(struct zcrypt_device_status_ext); 1615 1616 device_status = kzalloc(total_size, GFP_KERNEL); 1617 if (!device_status) 1618 return -ENOMEM; 1619 zcrypt_device_status_mask_ext(device_status); 1620 if (copy_to_user((char __user *) arg, device_status, 1621 total_size)) 1622 rc = -EFAULT; 1623 kfree(device_status); 1624 return rc; 1625 } 1626 case ZCRYPT_STATUS_MASK: { 1627 char status[AP_DEVICES]; 1628 1629 zcrypt_status_mask(status, AP_DEVICES); 1630 if (copy_to_user((char __user *) arg, status, sizeof(status))) 1631 return -EFAULT; 1632 return 0; 1633 } 1634 case ZCRYPT_QDEPTH_MASK: { 1635 char qdepth[AP_DEVICES]; 1636 1637 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 1638 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 1639 return -EFAULT; 1640 return 0; 1641 } 1642 case ZCRYPT_PERDEV_REQCNT: { 1643 u32 *reqcnt; 1644 1645 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL); 1646 if (!reqcnt) 1647 return -ENOMEM; 1648 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 1649 if (copy_to_user((int __user *) arg, reqcnt, 1650 sizeof(u32) * AP_DEVICES)) 1651 rc = -EFAULT; 1652 kfree(reqcnt); 1653 return rc; 1654 } 1655 case Z90STAT_REQUESTQ_COUNT: 1656 return put_user(zcrypt_requestq_count(), (int __user *) arg); 1657 case Z90STAT_PENDINGQ_COUNT: 1658 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 1659 case Z90STAT_TOTALOPEN_COUNT: 1660 return put_user(atomic_read(&zcrypt_open_count), 1661 (int __user *) arg); 1662 case Z90STAT_DOMAIN_INDEX: 1663 return put_user(ap_domain_index, (int __user *) arg); 1664 /* 1665 * Deprecated ioctls 1666 */ 1667 case ZDEVICESTATUS: { 1668 /* the old ioctl supports only 64 adapters */ 1669 struct zcrypt_device_status *device_status; 1670 size_t total_size = MAX_ZDEV_ENTRIES 1671 * sizeof(struct zcrypt_device_status); 1672 1673 device_status = kzalloc(total_size, GFP_KERNEL); 1674 if (!device_status) 1675 return -ENOMEM; 1676 zcrypt_device_status_mask(device_status); 1677 if (copy_to_user((char __user *) arg, device_status, 1678 total_size)) 1679 rc = -EFAULT; 1680 kfree(device_status); 1681 return rc; 1682 } 1683 case Z90STAT_STATUS_MASK: { 1684 /* the old ioctl supports only 64 adapters */ 1685 char status[MAX_ZDEV_CARDIDS]; 1686 1687 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 1688 if (copy_to_user((char __user *) arg, status, sizeof(status))) 1689 return -EFAULT; 1690 return 0; 1691 } 1692 case Z90STAT_QDEPTH_MASK: { 1693 /* the old ioctl supports only 64 adapters */ 1694 char qdepth[MAX_ZDEV_CARDIDS]; 1695 1696 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 1697 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 1698 return -EFAULT; 1699 return 0; 1700 } 1701 case Z90STAT_PERDEV_REQCNT: { 1702 /* the old ioctl supports only 64 adapters */ 1703 u32 reqcnt[MAX_ZDEV_CARDIDS]; 1704 1705 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 1706 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 1707 return -EFAULT; 1708 return 0; 1709 } 1710 /* unknown ioctl number */ 1711 default: 1712 ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd); 1713 return -ENOIOCTLCMD; 1714 } 1715 } 1716 1717 #ifdef CONFIG_COMPAT 1718 /* 1719 * ioctl32 conversion routines 1720 */ 1721 struct compat_ica_rsa_modexpo { 1722 compat_uptr_t inputdata; 1723 unsigned int inputdatalength; 1724 compat_uptr_t outputdata; 1725 unsigned int outputdatalength; 1726 compat_uptr_t b_key; 1727 compat_uptr_t n_modulus; 1728 }; 1729 1730 static long trans_modexpo32(struct ap_perms *perms, struct file *filp, 1731 unsigned int cmd, unsigned long arg) 1732 { 1733 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 1734 struct compat_ica_rsa_modexpo mex32; 1735 struct ica_rsa_modexpo mex64; 1736 struct zcrypt_track tr; 1737 long rc; 1738 1739 memset(&tr, 0, sizeof(tr)); 1740 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 1741 return -EFAULT; 1742 mex64.inputdata = compat_ptr(mex32.inputdata); 1743 mex64.inputdatalength = mex32.inputdatalength; 1744 mex64.outputdata = compat_ptr(mex32.outputdata); 1745 mex64.outputdatalength = mex32.outputdatalength; 1746 mex64.b_key = compat_ptr(mex32.b_key); 1747 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1748 do { 1749 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1750 if (rc == -EAGAIN) 1751 tr.again_counter++; 1752 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1753 /* on failure: retry once again after a requested rescan */ 1754 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1755 do { 1756 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64); 1757 if (rc == -EAGAIN) 1758 tr.again_counter++; 1759 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1760 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1761 rc = -EIO; 1762 if (rc) 1763 return rc; 1764 return put_user(mex64.outputdatalength, 1765 &umex32->outputdatalength); 1766 } 1767 1768 struct compat_ica_rsa_modexpo_crt { 1769 compat_uptr_t inputdata; 1770 unsigned int inputdatalength; 1771 compat_uptr_t outputdata; 1772 unsigned int outputdatalength; 1773 compat_uptr_t bp_key; 1774 compat_uptr_t bq_key; 1775 compat_uptr_t np_prime; 1776 compat_uptr_t nq_prime; 1777 compat_uptr_t u_mult_inv; 1778 }; 1779 1780 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp, 1781 unsigned int cmd, unsigned long arg) 1782 { 1783 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1784 struct compat_ica_rsa_modexpo_crt crt32; 1785 struct ica_rsa_modexpo_crt crt64; 1786 struct zcrypt_track tr; 1787 long rc; 1788 1789 memset(&tr, 0, sizeof(tr)); 1790 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1791 return -EFAULT; 1792 crt64.inputdata = compat_ptr(crt32.inputdata); 1793 crt64.inputdatalength = crt32.inputdatalength; 1794 crt64.outputdata = compat_ptr(crt32.outputdata); 1795 crt64.outputdatalength = crt32.outputdatalength; 1796 crt64.bp_key = compat_ptr(crt32.bp_key); 1797 crt64.bq_key = compat_ptr(crt32.bq_key); 1798 crt64.np_prime = compat_ptr(crt32.np_prime); 1799 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1800 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1801 do { 1802 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1803 if (rc == -EAGAIN) 1804 tr.again_counter++; 1805 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1806 /* on failure: retry once again after a requested rescan */ 1807 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1808 do { 1809 rc = zcrypt_rsa_crt(perms, &tr, &crt64); 1810 if (rc == -EAGAIN) 1811 tr.again_counter++; 1812 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1813 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1814 rc = -EIO; 1815 if (rc) 1816 return rc; 1817 return put_user(crt64.outputdatalength, 1818 &ucrt32->outputdatalength); 1819 } 1820 1821 struct compat_ica_xcRB { 1822 unsigned short agent_ID; 1823 unsigned int user_defined; 1824 unsigned short request_ID; 1825 unsigned int request_control_blk_length; 1826 unsigned char padding1[16 - sizeof(compat_uptr_t)]; 1827 compat_uptr_t request_control_blk_addr; 1828 unsigned int request_data_length; 1829 char padding2[16 - sizeof(compat_uptr_t)]; 1830 compat_uptr_t request_data_address; 1831 unsigned int reply_control_blk_length; 1832 char padding3[16 - sizeof(compat_uptr_t)]; 1833 compat_uptr_t reply_control_blk_addr; 1834 unsigned int reply_data_length; 1835 char padding4[16 - sizeof(compat_uptr_t)]; 1836 compat_uptr_t reply_data_addr; 1837 unsigned short priority_window; 1838 unsigned int status; 1839 } __packed; 1840 1841 static long trans_xcRB32(struct ap_perms *perms, struct file *filp, 1842 unsigned int cmd, unsigned long arg) 1843 { 1844 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1845 struct compat_ica_xcRB xcRB32; 1846 struct zcrypt_track tr; 1847 struct ica_xcRB xcRB64; 1848 long rc; 1849 1850 memset(&tr, 0, sizeof(tr)); 1851 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1852 return -EFAULT; 1853 xcRB64.agent_ID = xcRB32.agent_ID; 1854 xcRB64.user_defined = xcRB32.user_defined; 1855 xcRB64.request_ID = xcRB32.request_ID; 1856 xcRB64.request_control_blk_length = 1857 xcRB32.request_control_blk_length; 1858 xcRB64.request_control_blk_addr = 1859 compat_ptr(xcRB32.request_control_blk_addr); 1860 xcRB64.request_data_length = 1861 xcRB32.request_data_length; 1862 xcRB64.request_data_address = 1863 compat_ptr(xcRB32.request_data_address); 1864 xcRB64.reply_control_blk_length = 1865 xcRB32.reply_control_blk_length; 1866 xcRB64.reply_control_blk_addr = 1867 compat_ptr(xcRB32.reply_control_blk_addr); 1868 xcRB64.reply_data_length = xcRB32.reply_data_length; 1869 xcRB64.reply_data_addr = 1870 compat_ptr(xcRB32.reply_data_addr); 1871 xcRB64.priority_window = xcRB32.priority_window; 1872 xcRB64.status = xcRB32.status; 1873 do { 1874 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); 1875 if (rc == -EAGAIN) 1876 tr.again_counter++; 1877 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1878 /* on failure: retry once again after a requested rescan */ 1879 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1880 do { 1881 rc = _zcrypt_send_cprb(true, perms, &tr, &xcRB64); 1882 if (rc == -EAGAIN) 1883 tr.again_counter++; 1884 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX); 1885 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX) 1886 rc = -EIO; 1887 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1888 xcRB32.reply_data_length = xcRB64.reply_data_length; 1889 xcRB32.status = xcRB64.status; 1890 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1891 return -EFAULT; 1892 return rc; 1893 } 1894 1895 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1896 unsigned long arg) 1897 { 1898 int rc; 1899 struct ap_perms *perms = 1900 (struct ap_perms *) filp->private_data; 1901 1902 rc = zcrypt_check_ioctl(perms, cmd); 1903 if (rc) 1904 return rc; 1905 1906 if (cmd == ICARSAMODEXPO) 1907 return trans_modexpo32(perms, filp, cmd, arg); 1908 if (cmd == ICARSACRT) 1909 return trans_modexpo_crt32(perms, filp, cmd, arg); 1910 if (cmd == ZSECSENDCPRB) 1911 return trans_xcRB32(perms, filp, cmd, arg); 1912 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1913 } 1914 #endif 1915 1916 /* 1917 * Misc device file operations. 1918 */ 1919 static const struct file_operations zcrypt_fops = { 1920 .owner = THIS_MODULE, 1921 .read = zcrypt_read, 1922 .write = zcrypt_write, 1923 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1924 #ifdef CONFIG_COMPAT 1925 .compat_ioctl = zcrypt_compat_ioctl, 1926 #endif 1927 .open = zcrypt_open, 1928 .release = zcrypt_release, 1929 .llseek = no_llseek, 1930 }; 1931 1932 /* 1933 * Misc device. 1934 */ 1935 static struct miscdevice zcrypt_misc_device = { 1936 .minor = MISC_DYNAMIC_MINOR, 1937 .name = "z90crypt", 1938 .fops = &zcrypt_fops, 1939 }; 1940 1941 static int zcrypt_rng_device_count; 1942 static u32 *zcrypt_rng_buffer; 1943 static int zcrypt_rng_buffer_index; 1944 static DEFINE_MUTEX(zcrypt_rng_mutex); 1945 1946 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1947 { 1948 int rc; 1949 1950 /* 1951 * We don't need locking here because the RNG API guarantees serialized 1952 * read method calls. 1953 */ 1954 if (zcrypt_rng_buffer_index == 0) { 1955 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1956 /* on failure: retry once again after a requested rescan */ 1957 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1958 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1959 if (rc < 0) 1960 return -EIO; 1961 zcrypt_rng_buffer_index = rc / sizeof(*data); 1962 } 1963 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1964 return sizeof(*data); 1965 } 1966 1967 static struct hwrng zcrypt_rng_dev = { 1968 .name = "zcrypt", 1969 .data_read = zcrypt_rng_data_read, 1970 .quality = 990, 1971 }; 1972 1973 int zcrypt_rng_device_add(void) 1974 { 1975 int rc = 0; 1976 1977 mutex_lock(&zcrypt_rng_mutex); 1978 if (zcrypt_rng_device_count == 0) { 1979 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1980 if (!zcrypt_rng_buffer) { 1981 rc = -ENOMEM; 1982 goto out; 1983 } 1984 zcrypt_rng_buffer_index = 0; 1985 if (!zcrypt_hwrng_seed) 1986 zcrypt_rng_dev.quality = 0; 1987 rc = hwrng_register(&zcrypt_rng_dev); 1988 if (rc) 1989 goto out_free; 1990 zcrypt_rng_device_count = 1; 1991 } else 1992 zcrypt_rng_device_count++; 1993 mutex_unlock(&zcrypt_rng_mutex); 1994 return 0; 1995 1996 out_free: 1997 free_page((unsigned long) zcrypt_rng_buffer); 1998 out: 1999 mutex_unlock(&zcrypt_rng_mutex); 2000 return rc; 2001 } 2002 2003 void zcrypt_rng_device_remove(void) 2004 { 2005 mutex_lock(&zcrypt_rng_mutex); 2006 zcrypt_rng_device_count--; 2007 if (zcrypt_rng_device_count == 0) { 2008 hwrng_unregister(&zcrypt_rng_dev); 2009 free_page((unsigned long) zcrypt_rng_buffer); 2010 } 2011 mutex_unlock(&zcrypt_rng_mutex); 2012 } 2013 2014 /* 2015 * Wait until the zcrypt api is operational. 2016 * The AP bus scan and the binding of ap devices to device drivers is 2017 * an asynchronous job. This function waits until these initial jobs 2018 * are done and so the zcrypt api should be ready to serve crypto 2019 * requests - if there are resources available. The function uses an 2020 * internal timeout of 60s. The very first caller will either wait for 2021 * ap bus bindings complete or the timeout happens. This state will be 2022 * remembered for further callers which will only be blocked until a 2023 * decision is made (timeout or bindings complete). 2024 * On timeout -ETIME is returned, on success the return value is 0. 2025 */ 2026 int zcrypt_wait_api_operational(void) 2027 { 2028 static DEFINE_MUTEX(zcrypt_wait_api_lock); 2029 static int zcrypt_wait_api_state; 2030 int rc; 2031 2032 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock); 2033 if (rc) 2034 return rc; 2035 2036 switch (zcrypt_wait_api_state) { 2037 case 0: 2038 /* initial state, invoke wait for the ap bus complete */ 2039 rc = ap_wait_init_apqn_bindings_complete( 2040 msecs_to_jiffies(60 * 1000)); 2041 switch (rc) { 2042 case 0: 2043 /* ap bus bindings are complete */ 2044 zcrypt_wait_api_state = 1; 2045 break; 2046 case -EINTR: 2047 /* interrupted, go back to caller */ 2048 break; 2049 case -ETIME: 2050 /* timeout */ 2051 ZCRYPT_DBF(DBF_WARN, 2052 "%s ap_wait_init_apqn_bindings_complete() returned with ETIME\n", 2053 __func__); 2054 zcrypt_wait_api_state = -ETIME; 2055 break; 2056 default: 2057 /* other failure */ 2058 ZCRYPT_DBF(DBF_DEBUG, 2059 "%s ap_wait_init_apqn_bindings_complete() failure rc=%d\n", 2060 __func__, rc); 2061 break; 2062 } 2063 break; 2064 case 1: 2065 /* a previous caller already found ap bus bindings complete */ 2066 rc = 0; 2067 break; 2068 default: 2069 /* a previous caller had timeout or other failure */ 2070 rc = zcrypt_wait_api_state; 2071 break; 2072 } 2073 2074 mutex_unlock(&zcrypt_wait_api_lock); 2075 2076 return rc; 2077 } 2078 EXPORT_SYMBOL(zcrypt_wait_api_operational); 2079 2080 int __init zcrypt_debug_init(void) 2081 { 2082 zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 2083 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 2084 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 2085 debug_set_level(zcrypt_dbf_info, DBF_ERR); 2086 2087 return 0; 2088 } 2089 2090 void zcrypt_debug_exit(void) 2091 { 2092 debug_unregister(zcrypt_dbf_info); 2093 } 2094 2095 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 2096 2097 static int __init zcdn_init(void) 2098 { 2099 int rc; 2100 2101 /* create a new class 'zcrypt' */ 2102 zcrypt_class = class_create(THIS_MODULE, ZCRYPT_NAME); 2103 if (IS_ERR(zcrypt_class)) { 2104 rc = PTR_ERR(zcrypt_class); 2105 goto out_class_create_failed; 2106 } 2107 zcrypt_class->dev_release = zcdn_device_release; 2108 2109 /* alloc device minor range */ 2110 rc = alloc_chrdev_region(&zcrypt_devt, 2111 0, ZCRYPT_MAX_MINOR_NODES, 2112 ZCRYPT_NAME); 2113 if (rc) 2114 goto out_alloc_chrdev_failed; 2115 2116 cdev_init(&zcrypt_cdev, &zcrypt_fops); 2117 zcrypt_cdev.owner = THIS_MODULE; 2118 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2119 if (rc) 2120 goto out_cdev_add_failed; 2121 2122 /* need some class specific sysfs attributes */ 2123 rc = class_create_file(zcrypt_class, &class_attr_zcdn_create); 2124 if (rc) 2125 goto out_class_create_file_1_failed; 2126 rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy); 2127 if (rc) 2128 goto out_class_create_file_2_failed; 2129 2130 return 0; 2131 2132 out_class_create_file_2_failed: 2133 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2134 out_class_create_file_1_failed: 2135 cdev_del(&zcrypt_cdev); 2136 out_cdev_add_failed: 2137 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2138 out_alloc_chrdev_failed: 2139 class_destroy(zcrypt_class); 2140 out_class_create_failed: 2141 return rc; 2142 } 2143 2144 static void zcdn_exit(void) 2145 { 2146 class_remove_file(zcrypt_class, &class_attr_zcdn_create); 2147 class_remove_file(zcrypt_class, &class_attr_zcdn_destroy); 2148 zcdn_destroy_all(); 2149 cdev_del(&zcrypt_cdev); 2150 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES); 2151 class_destroy(zcrypt_class); 2152 } 2153 2154 #endif 2155 2156 /* 2157 * zcrypt_api_init(): Module initialization. 2158 * 2159 * The module initialization code. 2160 */ 2161 int __init zcrypt_api_init(void) 2162 { 2163 int rc; 2164 2165 rc = zcrypt_debug_init(); 2166 if (rc) 2167 goto out; 2168 2169 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 2170 rc = zcdn_init(); 2171 if (rc) 2172 goto out; 2173 #endif 2174 2175 /* Register the request sprayer. */ 2176 rc = misc_register(&zcrypt_misc_device); 2177 if (rc < 0) 2178 goto out_misc_register_failed; 2179 2180 zcrypt_msgtype6_init(); 2181 zcrypt_msgtype50_init(); 2182 2183 return 0; 2184 2185 out_misc_register_failed: 2186 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 2187 zcdn_exit(); 2188 #endif 2189 zcrypt_debug_exit(); 2190 out: 2191 return rc; 2192 } 2193 2194 /* 2195 * zcrypt_api_exit(): Module termination. 2196 * 2197 * The module termination code. 2198 */ 2199 void __exit zcrypt_api_exit(void) 2200 { 2201 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES 2202 zcdn_exit(); 2203 #endif 2204 misc_deregister(&zcrypt_misc_device); 2205 zcrypt_msgtype6_exit(); 2206 zcrypt_msgtype50_exit(); 2207 zcrypt_ccamisc_exit(); 2208 zcrypt_ep11misc_exit(); 2209 zcrypt_debug_exit(); 2210 } 2211 2212 module_init(zcrypt_api_init); 2213 module_exit(zcrypt_api_exit); 2214