1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * zcrypt 2.1.0 4 * 5 * Copyright IBM Corp. 2001, 2012 6 * Author(s): Robert Burroughs 7 * Eric Rossman (edrossma@us.ibm.com) 8 * Cornelia Huck <cornelia.huck@de.ibm.com> 9 * 10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 12 * Ralph Wuerthner <rwuerthn@de.ibm.com> 13 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 14 */ 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/miscdevice.h> 20 #include <linux/fs.h> 21 #include <linux/compat.h> 22 #include <linux/slab.h> 23 #include <linux/atomic.h> 24 #include <linux/uaccess.h> 25 #include <linux/hw_random.h> 26 #include <linux/debugfs.h> 27 #include <asm/debug.h> 28 29 #define CREATE_TRACE_POINTS 30 #include <asm/trace/zcrypt.h> 31 32 #include "zcrypt_api.h" 33 #include "zcrypt_debug.h" 34 35 #include "zcrypt_msgtype6.h" 36 #include "zcrypt_msgtype50.h" 37 38 /* 39 * Module description. 40 */ 41 MODULE_AUTHOR("IBM Corporation"); 42 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 43 "Copyright IBM Corp. 2001, 2012"); 44 MODULE_LICENSE("GPL"); 45 46 /* 47 * zcrypt tracepoint functions 48 */ 49 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 50 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 51 52 static int zcrypt_hwrng_seed = 1; 53 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); 54 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 55 56 DEFINE_SPINLOCK(zcrypt_list_lock); 57 LIST_HEAD(zcrypt_card_list); 58 int zcrypt_device_count; 59 60 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 61 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 62 63 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 64 EXPORT_SYMBOL(zcrypt_rescan_req); 65 66 static LIST_HEAD(zcrypt_ops_list); 67 68 /* Zcrypt related debug feature stuff. */ 69 debug_info_t *zcrypt_dbf_info; 70 71 /** 72 * Process a rescan of the transport layer. 73 * 74 * Returns 1, if the rescan has been processed, otherwise 0. 75 */ 76 static inline int zcrypt_process_rescan(void) 77 { 78 if (atomic_read(&zcrypt_rescan_req)) { 79 atomic_set(&zcrypt_rescan_req, 0); 80 atomic_inc(&zcrypt_rescan_count); 81 ap_bus_force_rescan(); 82 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n", 83 atomic_inc_return(&zcrypt_rescan_count)); 84 return 1; 85 } 86 return 0; 87 } 88 89 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 90 { 91 list_add_tail(&zops->list, &zcrypt_ops_list); 92 } 93 94 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 95 { 96 list_del_init(&zops->list); 97 } 98 99 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 100 { 101 struct zcrypt_ops *zops; 102 103 list_for_each_entry(zops, &zcrypt_ops_list, list) 104 if ((zops->variant == variant) && 105 (!strncmp(zops->name, name, sizeof(zops->name)))) 106 return zops; 107 return NULL; 108 } 109 EXPORT_SYMBOL(zcrypt_msgtype); 110 111 /** 112 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 113 * 114 * This function is not supported beyond zcrypt 1.3.1. 115 */ 116 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 117 size_t count, loff_t *f_pos) 118 { 119 return -EPERM; 120 } 121 122 /** 123 * zcrypt_write(): Not allowed. 124 * 125 * Write is is not allowed 126 */ 127 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 128 size_t count, loff_t *f_pos) 129 { 130 return -EPERM; 131 } 132 133 /** 134 * zcrypt_open(): Count number of users. 135 * 136 * Device open function to count number of users. 137 */ 138 static int zcrypt_open(struct inode *inode, struct file *filp) 139 { 140 atomic_inc(&zcrypt_open_count); 141 return nonseekable_open(inode, filp); 142 } 143 144 /** 145 * zcrypt_release(): Count number of users. 146 * 147 * Device close function to count number of users. 148 */ 149 static int zcrypt_release(struct inode *inode, struct file *filp) 150 { 151 atomic_dec(&zcrypt_open_count); 152 return 0; 153 } 154 155 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 156 struct zcrypt_queue *zq, 157 unsigned int weight) 158 { 159 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 160 return NULL; 161 zcrypt_queue_get(zq); 162 get_device(&zq->queue->ap_dev.device); 163 atomic_add(weight, &zc->load); 164 atomic_add(weight, &zq->load); 165 zq->request_count++; 166 return zq; 167 } 168 169 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 170 struct zcrypt_queue *zq, 171 unsigned int weight) 172 { 173 struct module *mod = zq->queue->ap_dev.drv->driver.owner; 174 175 zq->request_count--; 176 atomic_sub(weight, &zc->load); 177 atomic_sub(weight, &zq->load); 178 put_device(&zq->queue->ap_dev.device); 179 zcrypt_queue_put(zq); 180 module_put(mod); 181 } 182 183 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 184 struct zcrypt_card *pref_zc, 185 unsigned weight, unsigned pref_weight) 186 { 187 if (!pref_zc) 188 return false; 189 weight += atomic_read(&zc->load); 190 pref_weight += atomic_read(&pref_zc->load); 191 if (weight == pref_weight) 192 return atomic_read(&zc->card->total_request_count) > 193 atomic_read(&pref_zc->card->total_request_count); 194 return weight > pref_weight; 195 } 196 197 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 198 struct zcrypt_queue *pref_zq, 199 unsigned weight, unsigned pref_weight) 200 { 201 if (!pref_zq) 202 return false; 203 weight += atomic_read(&zq->load); 204 pref_weight += atomic_read(&pref_zq->load); 205 if (weight == pref_weight) 206 return zq->queue->total_request_count > 207 pref_zq->queue->total_request_count; 208 return weight > pref_weight; 209 } 210 211 /* 212 * zcrypt ioctls. 213 */ 214 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 215 { 216 struct zcrypt_card *zc, *pref_zc; 217 struct zcrypt_queue *zq, *pref_zq; 218 unsigned int weight, pref_weight; 219 unsigned int func_code; 220 int qid = 0, rc = -ENODEV; 221 222 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 223 224 if (mex->outputdatalength < mex->inputdatalength) { 225 rc = -EINVAL; 226 goto out; 227 } 228 229 /* 230 * As long as outputdatalength is big enough, we can set the 231 * outputdatalength equal to the inputdatalength, since that is the 232 * number of bytes we will copy in any case 233 */ 234 mex->outputdatalength = mex->inputdatalength; 235 236 rc = get_rsa_modex_fc(mex, &func_code); 237 if (rc) 238 goto out; 239 240 pref_zc = NULL; 241 pref_zq = NULL; 242 spin_lock(&zcrypt_list_lock); 243 for_each_zcrypt_card(zc) { 244 /* Check for online accelarator and CCA cards */ 245 if (!zc->online || !(zc->card->functions & 0x18000000)) 246 continue; 247 /* Check for size limits */ 248 if (zc->min_mod_size > mex->inputdatalength || 249 zc->max_mod_size < mex->inputdatalength) 250 continue; 251 /* get weight index of the card device */ 252 weight = zc->speed_rating[func_code]; 253 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 254 continue; 255 for_each_zcrypt_queue(zq, zc) { 256 /* check if device is online and eligible */ 257 if (!zq->online || !zq->ops->rsa_modexpo) 258 continue; 259 if (zcrypt_queue_compare(zq, pref_zq, 260 weight, pref_weight)) 261 continue; 262 pref_zc = zc; 263 pref_zq = zq; 264 pref_weight = weight; 265 } 266 } 267 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 268 spin_unlock(&zcrypt_list_lock); 269 270 if (!pref_zq) { 271 rc = -ENODEV; 272 goto out; 273 } 274 275 qid = pref_zq->queue->qid; 276 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 277 278 spin_lock(&zcrypt_list_lock); 279 zcrypt_drop_queue(pref_zc, pref_zq, weight); 280 spin_unlock(&zcrypt_list_lock); 281 282 out: 283 trace_s390_zcrypt_rep(mex, func_code, rc, 284 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 285 return rc; 286 } 287 288 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 289 { 290 struct zcrypt_card *zc, *pref_zc; 291 struct zcrypt_queue *zq, *pref_zq; 292 unsigned int weight, pref_weight; 293 unsigned int func_code; 294 int qid = 0, rc = -ENODEV; 295 296 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 297 298 if (crt->outputdatalength < crt->inputdatalength) { 299 rc = -EINVAL; 300 goto out; 301 } 302 303 /* 304 * As long as outputdatalength is big enough, we can set the 305 * outputdatalength equal to the inputdatalength, since that is the 306 * number of bytes we will copy in any case 307 */ 308 crt->outputdatalength = crt->inputdatalength; 309 310 rc = get_rsa_crt_fc(crt, &func_code); 311 if (rc) 312 goto out; 313 314 pref_zc = NULL; 315 pref_zq = NULL; 316 spin_lock(&zcrypt_list_lock); 317 for_each_zcrypt_card(zc) { 318 /* Check for online accelarator and CCA cards */ 319 if (!zc->online || !(zc->card->functions & 0x18000000)) 320 continue; 321 /* Check for size limits */ 322 if (zc->min_mod_size > crt->inputdatalength || 323 zc->max_mod_size < crt->inputdatalength) 324 continue; 325 /* get weight index of the card device */ 326 weight = zc->speed_rating[func_code]; 327 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 328 continue; 329 for_each_zcrypt_queue(zq, zc) { 330 /* check if device is online and eligible */ 331 if (!zq->online || !zq->ops->rsa_modexpo_crt) 332 continue; 333 if (zcrypt_queue_compare(zq, pref_zq, 334 weight, pref_weight)) 335 continue; 336 pref_zc = zc; 337 pref_zq = zq; 338 pref_weight = weight; 339 } 340 } 341 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 342 spin_unlock(&zcrypt_list_lock); 343 344 if (!pref_zq) { 345 rc = -ENODEV; 346 goto out; 347 } 348 349 qid = pref_zq->queue->qid; 350 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 351 352 spin_lock(&zcrypt_list_lock); 353 zcrypt_drop_queue(pref_zc, pref_zq, weight); 354 spin_unlock(&zcrypt_list_lock); 355 356 out: 357 trace_s390_zcrypt_rep(crt, func_code, rc, 358 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 359 return rc; 360 } 361 362 long zcrypt_send_cprb(struct ica_xcRB *xcRB) 363 { 364 struct zcrypt_card *zc, *pref_zc; 365 struct zcrypt_queue *zq, *pref_zq; 366 struct ap_message ap_msg; 367 unsigned int weight, pref_weight; 368 unsigned int func_code; 369 unsigned short *domain; 370 int qid = 0, rc = -ENODEV; 371 372 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 373 374 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 375 if (rc) 376 goto out; 377 378 pref_zc = NULL; 379 pref_zq = NULL; 380 spin_lock(&zcrypt_list_lock); 381 for_each_zcrypt_card(zc) { 382 /* Check for online CCA cards */ 383 if (!zc->online || !(zc->card->functions & 0x10000000)) 384 continue; 385 /* Check for user selected CCA card */ 386 if (xcRB->user_defined != AUTOSELECT && 387 xcRB->user_defined != zc->card->id) 388 continue; 389 /* get weight index of the card device */ 390 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 391 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 392 continue; 393 for_each_zcrypt_queue(zq, zc) { 394 /* check if device is online and eligible */ 395 if (!zq->online || 396 !zq->ops->send_cprb || 397 ((*domain != (unsigned short) AUTOSELECT) && 398 (*domain != AP_QID_QUEUE(zq->queue->qid)))) 399 continue; 400 if (zcrypt_queue_compare(zq, pref_zq, 401 weight, pref_weight)) 402 continue; 403 pref_zc = zc; 404 pref_zq = zq; 405 pref_weight = weight; 406 } 407 } 408 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 409 spin_unlock(&zcrypt_list_lock); 410 411 if (!pref_zq) { 412 rc = -ENODEV; 413 goto out; 414 } 415 416 /* in case of auto select, provide the correct domain */ 417 qid = pref_zq->queue->qid; 418 if (*domain == (unsigned short) AUTOSELECT) 419 *domain = AP_QID_QUEUE(qid); 420 421 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 422 423 spin_lock(&zcrypt_list_lock); 424 zcrypt_drop_queue(pref_zc, pref_zq, weight); 425 spin_unlock(&zcrypt_list_lock); 426 427 out: 428 trace_s390_zcrypt_rep(xcRB, func_code, rc, 429 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 430 return rc; 431 } 432 EXPORT_SYMBOL(zcrypt_send_cprb); 433 434 static bool is_desired_ep11_card(unsigned int dev_id, 435 unsigned short target_num, 436 struct ep11_target_dev *targets) 437 { 438 while (target_num-- > 0) { 439 if (dev_id == targets->ap_id) 440 return true; 441 targets++; 442 } 443 return false; 444 } 445 446 static bool is_desired_ep11_queue(unsigned int dev_qid, 447 unsigned short target_num, 448 struct ep11_target_dev *targets) 449 { 450 while (target_num-- > 0) { 451 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 452 return true; 453 targets++; 454 } 455 return false; 456 } 457 458 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 459 { 460 struct zcrypt_card *zc, *pref_zc; 461 struct zcrypt_queue *zq, *pref_zq; 462 struct ep11_target_dev *targets; 463 unsigned short target_num; 464 unsigned int weight, pref_weight; 465 unsigned int func_code; 466 struct ap_message ap_msg; 467 int qid = 0, rc = -ENODEV; 468 469 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 470 471 target_num = (unsigned short) xcrb->targets_num; 472 473 /* empty list indicates autoselect (all available targets) */ 474 targets = NULL; 475 if (target_num != 0) { 476 struct ep11_target_dev __user *uptr; 477 478 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 479 if (!targets) { 480 rc = -ENOMEM; 481 goto out; 482 } 483 484 uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 485 if (copy_from_user(targets, uptr, 486 target_num * sizeof(*targets))) { 487 rc = -EFAULT; 488 goto out; 489 } 490 } 491 492 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 493 if (rc) 494 goto out_free; 495 496 pref_zc = NULL; 497 pref_zq = NULL; 498 spin_lock(&zcrypt_list_lock); 499 for_each_zcrypt_card(zc) { 500 /* Check for online EP11 cards */ 501 if (!zc->online || !(zc->card->functions & 0x04000000)) 502 continue; 503 /* Check for user selected EP11 card */ 504 if (targets && 505 !is_desired_ep11_card(zc->card->id, target_num, targets)) 506 continue; 507 /* get weight index of the card device */ 508 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 509 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 510 continue; 511 for_each_zcrypt_queue(zq, zc) { 512 /* check if device is online and eligible */ 513 if (!zq->online || 514 !zq->ops->send_ep11_cprb || 515 (targets && 516 !is_desired_ep11_queue(zq->queue->qid, 517 target_num, targets))) 518 continue; 519 if (zcrypt_queue_compare(zq, pref_zq, 520 weight, pref_weight)) 521 continue; 522 pref_zc = zc; 523 pref_zq = zq; 524 pref_weight = weight; 525 } 526 } 527 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 528 spin_unlock(&zcrypt_list_lock); 529 530 if (!pref_zq) { 531 rc = -ENODEV; 532 goto out_free; 533 } 534 535 qid = pref_zq->queue->qid; 536 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 537 538 spin_lock(&zcrypt_list_lock); 539 zcrypt_drop_queue(pref_zc, pref_zq, weight); 540 spin_unlock(&zcrypt_list_lock); 541 542 out_free: 543 kfree(targets); 544 out: 545 trace_s390_zcrypt_rep(xcrb, func_code, rc, 546 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 547 return rc; 548 } 549 550 static long zcrypt_rng(char *buffer) 551 { 552 struct zcrypt_card *zc, *pref_zc; 553 struct zcrypt_queue *zq, *pref_zq; 554 unsigned int weight, pref_weight; 555 unsigned int func_code; 556 struct ap_message ap_msg; 557 unsigned int domain; 558 int qid = 0, rc = -ENODEV; 559 560 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 561 562 rc = get_rng_fc(&ap_msg, &func_code, &domain); 563 if (rc) 564 goto out; 565 566 pref_zc = NULL; 567 pref_zq = NULL; 568 spin_lock(&zcrypt_list_lock); 569 for_each_zcrypt_card(zc) { 570 /* Check for online CCA cards */ 571 if (!zc->online || !(zc->card->functions & 0x10000000)) 572 continue; 573 /* get weight index of the card device */ 574 weight = zc->speed_rating[func_code]; 575 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 576 continue; 577 for_each_zcrypt_queue(zq, zc) { 578 /* check if device is online and eligible */ 579 if (!zq->online || !zq->ops->rng) 580 continue; 581 if (zcrypt_queue_compare(zq, pref_zq, 582 weight, pref_weight)) 583 continue; 584 pref_zc = zc; 585 pref_zq = zq; 586 pref_weight = weight; 587 } 588 } 589 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 590 spin_unlock(&zcrypt_list_lock); 591 592 if (!pref_zq) 593 return -ENODEV; 594 595 qid = pref_zq->queue->qid; 596 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 597 598 spin_lock(&zcrypt_list_lock); 599 zcrypt_drop_queue(pref_zc, pref_zq, weight); 600 spin_unlock(&zcrypt_list_lock); 601 602 out: 603 trace_s390_zcrypt_rep(buffer, func_code, rc, 604 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 605 return rc; 606 } 607 608 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 609 { 610 struct zcrypt_card *zc; 611 struct zcrypt_queue *zq; 612 struct zcrypt_device_status *stat; 613 int card, queue; 614 615 memset(devstatus, 0, MAX_ZDEV_ENTRIES 616 * sizeof(struct zcrypt_device_status)); 617 618 spin_lock(&zcrypt_list_lock); 619 for_each_zcrypt_card(zc) { 620 for_each_zcrypt_queue(zq, zc) { 621 card = AP_QID_CARD(zq->queue->qid); 622 if (card >= MAX_ZDEV_CARDIDS) 623 continue; 624 queue = AP_QID_QUEUE(zq->queue->qid); 625 stat = &devstatus[card * AP_DOMAINS + queue]; 626 stat->hwtype = zc->card->ap_dev.device_type; 627 stat->functions = zc->card->functions >> 26; 628 stat->qid = zq->queue->qid; 629 stat->online = zq->online ? 0x01 : 0x00; 630 } 631 } 632 spin_unlock(&zcrypt_list_lock); 633 } 634 635 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 636 { 637 struct zcrypt_card *zc; 638 struct zcrypt_queue *zq; 639 struct zcrypt_device_status_ext *stat; 640 int card, queue; 641 642 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT 643 * sizeof(struct zcrypt_device_status_ext)); 644 645 spin_lock(&zcrypt_list_lock); 646 for_each_zcrypt_card(zc) { 647 for_each_zcrypt_queue(zq, zc) { 648 card = AP_QID_CARD(zq->queue->qid); 649 queue = AP_QID_QUEUE(zq->queue->qid); 650 stat = &devstatus[card * AP_DOMAINS + queue]; 651 stat->hwtype = zc->card->ap_dev.device_type; 652 stat->functions = zc->card->functions >> 26; 653 stat->qid = zq->queue->qid; 654 stat->online = zq->online ? 0x01 : 0x00; 655 } 656 } 657 spin_unlock(&zcrypt_list_lock); 658 } 659 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 660 661 static void zcrypt_status_mask(char status[], size_t max_adapters) 662 { 663 struct zcrypt_card *zc; 664 struct zcrypt_queue *zq; 665 int card; 666 667 memset(status, 0, max_adapters); 668 spin_lock(&zcrypt_list_lock); 669 for_each_zcrypt_card(zc) { 670 for_each_zcrypt_queue(zq, zc) { 671 card = AP_QID_CARD(zq->queue->qid); 672 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 673 || card >= max_adapters) 674 continue; 675 status[card] = zc->online ? zc->user_space_type : 0x0d; 676 } 677 } 678 spin_unlock(&zcrypt_list_lock); 679 } 680 681 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 682 { 683 struct zcrypt_card *zc; 684 struct zcrypt_queue *zq; 685 int card; 686 687 memset(qdepth, 0, max_adapters); 688 spin_lock(&zcrypt_list_lock); 689 local_bh_disable(); 690 for_each_zcrypt_card(zc) { 691 for_each_zcrypt_queue(zq, zc) { 692 card = AP_QID_CARD(zq->queue->qid); 693 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 694 || card >= max_adapters) 695 continue; 696 spin_lock(&zq->queue->lock); 697 qdepth[card] = 698 zq->queue->pendingq_count + 699 zq->queue->requestq_count; 700 spin_unlock(&zq->queue->lock); 701 } 702 } 703 local_bh_enable(); 704 spin_unlock(&zcrypt_list_lock); 705 } 706 707 static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) 708 { 709 struct zcrypt_card *zc; 710 struct zcrypt_queue *zq; 711 int card; 712 713 memset(reqcnt, 0, sizeof(int) * max_adapters); 714 spin_lock(&zcrypt_list_lock); 715 local_bh_disable(); 716 for_each_zcrypt_card(zc) { 717 for_each_zcrypt_queue(zq, zc) { 718 card = AP_QID_CARD(zq->queue->qid); 719 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 720 || card >= max_adapters) 721 continue; 722 spin_lock(&zq->queue->lock); 723 reqcnt[card] = zq->queue->total_request_count; 724 spin_unlock(&zq->queue->lock); 725 } 726 } 727 local_bh_enable(); 728 spin_unlock(&zcrypt_list_lock); 729 } 730 731 static int zcrypt_pendingq_count(void) 732 { 733 struct zcrypt_card *zc; 734 struct zcrypt_queue *zq; 735 int pendingq_count; 736 737 pendingq_count = 0; 738 spin_lock(&zcrypt_list_lock); 739 local_bh_disable(); 740 for_each_zcrypt_card(zc) { 741 for_each_zcrypt_queue(zq, zc) { 742 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 743 continue; 744 spin_lock(&zq->queue->lock); 745 pendingq_count += zq->queue->pendingq_count; 746 spin_unlock(&zq->queue->lock); 747 } 748 } 749 local_bh_enable(); 750 spin_unlock(&zcrypt_list_lock); 751 return pendingq_count; 752 } 753 754 static int zcrypt_requestq_count(void) 755 { 756 struct zcrypt_card *zc; 757 struct zcrypt_queue *zq; 758 int requestq_count; 759 760 requestq_count = 0; 761 spin_lock(&zcrypt_list_lock); 762 local_bh_disable(); 763 for_each_zcrypt_card(zc) { 764 for_each_zcrypt_queue(zq, zc) { 765 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 766 continue; 767 spin_lock(&zq->queue->lock); 768 requestq_count += zq->queue->requestq_count; 769 spin_unlock(&zq->queue->lock); 770 } 771 } 772 local_bh_enable(); 773 spin_unlock(&zcrypt_list_lock); 774 return requestq_count; 775 } 776 777 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 778 unsigned long arg) 779 { 780 int rc = 0; 781 782 switch (cmd) { 783 case ICARSAMODEXPO: { 784 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 785 struct ica_rsa_modexpo mex; 786 if (copy_from_user(&mex, umex, sizeof(mex))) 787 return -EFAULT; 788 do { 789 rc = zcrypt_rsa_modexpo(&mex); 790 } while (rc == -EAGAIN); 791 /* on failure: retry once again after a requested rescan */ 792 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 793 do { 794 rc = zcrypt_rsa_modexpo(&mex); 795 } while (rc == -EAGAIN); 796 if (rc) { 797 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); 798 return rc; 799 } 800 return put_user(mex.outputdatalength, &umex->outputdatalength); 801 } 802 case ICARSACRT: { 803 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 804 struct ica_rsa_modexpo_crt crt; 805 if (copy_from_user(&crt, ucrt, sizeof(crt))) 806 return -EFAULT; 807 do { 808 rc = zcrypt_rsa_crt(&crt); 809 } while (rc == -EAGAIN); 810 /* on failure: retry once again after a requested rescan */ 811 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 812 do { 813 rc = zcrypt_rsa_crt(&crt); 814 } while (rc == -EAGAIN); 815 if (rc) { 816 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); 817 return rc; 818 } 819 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 820 } 821 case ZSECSENDCPRB: { 822 struct ica_xcRB __user *uxcRB = (void __user *) arg; 823 struct ica_xcRB xcRB; 824 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 825 return -EFAULT; 826 do { 827 rc = zcrypt_send_cprb(&xcRB); 828 } while (rc == -EAGAIN); 829 /* on failure: retry once again after a requested rescan */ 830 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 831 do { 832 rc = zcrypt_send_cprb(&xcRB); 833 } while (rc == -EAGAIN); 834 if (rc) 835 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc); 836 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 837 return -EFAULT; 838 return rc; 839 } 840 case ZSENDEP11CPRB: { 841 struct ep11_urb __user *uxcrb = (void __user *)arg; 842 struct ep11_urb xcrb; 843 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 844 return -EFAULT; 845 do { 846 rc = zcrypt_send_ep11_cprb(&xcrb); 847 } while (rc == -EAGAIN); 848 /* on failure: retry once again after a requested rescan */ 849 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 850 do { 851 rc = zcrypt_send_ep11_cprb(&xcrb); 852 } while (rc == -EAGAIN); 853 if (rc) 854 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); 855 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 856 return -EFAULT; 857 return rc; 858 } 859 case ZCRYPT_DEVICE_STATUS: { 860 struct zcrypt_device_status_ext *device_status; 861 size_t total_size = MAX_ZDEV_ENTRIES_EXT 862 * sizeof(struct zcrypt_device_status_ext); 863 864 device_status = kzalloc(total_size, GFP_KERNEL); 865 if (!device_status) 866 return -ENOMEM; 867 zcrypt_device_status_mask_ext(device_status); 868 if (copy_to_user((char __user *) arg, device_status, 869 total_size)) 870 rc = -EFAULT; 871 kfree(device_status); 872 return rc; 873 } 874 case ZCRYPT_STATUS_MASK: { 875 char status[AP_DEVICES]; 876 877 zcrypt_status_mask(status, AP_DEVICES); 878 if (copy_to_user((char __user *) arg, status, sizeof(status))) 879 return -EFAULT; 880 return 0; 881 } 882 case ZCRYPT_QDEPTH_MASK: { 883 char qdepth[AP_DEVICES]; 884 885 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 886 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 887 return -EFAULT; 888 return 0; 889 } 890 case ZCRYPT_PERDEV_REQCNT: { 891 int *reqcnt; 892 893 reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL); 894 if (!reqcnt) 895 return -ENOMEM; 896 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 897 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 898 rc = -EFAULT; 899 kfree(reqcnt); 900 return rc; 901 } 902 case Z90STAT_REQUESTQ_COUNT: 903 return put_user(zcrypt_requestq_count(), (int __user *) arg); 904 case Z90STAT_PENDINGQ_COUNT: 905 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 906 case Z90STAT_TOTALOPEN_COUNT: 907 return put_user(atomic_read(&zcrypt_open_count), 908 (int __user *) arg); 909 case Z90STAT_DOMAIN_INDEX: 910 return put_user(ap_domain_index, (int __user *) arg); 911 /* 912 * Deprecated ioctls 913 */ 914 case ZDEVICESTATUS: { 915 /* the old ioctl supports only 64 adapters */ 916 struct zcrypt_device_status *device_status; 917 size_t total_size = MAX_ZDEV_ENTRIES 918 * sizeof(struct zcrypt_device_status); 919 920 device_status = kzalloc(total_size, GFP_KERNEL); 921 if (!device_status) 922 return -ENOMEM; 923 zcrypt_device_status_mask(device_status); 924 if (copy_to_user((char __user *) arg, device_status, 925 total_size)) 926 rc = -EFAULT; 927 kfree(device_status); 928 return rc; 929 } 930 case Z90STAT_STATUS_MASK: { 931 /* the old ioctl supports only 64 adapters */ 932 char status[MAX_ZDEV_CARDIDS]; 933 934 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 935 if (copy_to_user((char __user *) arg, status, sizeof(status))) 936 return -EFAULT; 937 return 0; 938 } 939 case Z90STAT_QDEPTH_MASK: { 940 /* the old ioctl supports only 64 adapters */ 941 char qdepth[MAX_ZDEV_CARDIDS]; 942 943 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 944 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 945 return -EFAULT; 946 return 0; 947 } 948 case Z90STAT_PERDEV_REQCNT: { 949 /* the old ioctl supports only 64 adapters */ 950 int reqcnt[MAX_ZDEV_CARDIDS]; 951 952 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 953 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 954 return -EFAULT; 955 return 0; 956 } 957 /* unknown ioctl number */ 958 default: 959 ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd); 960 return -ENOIOCTLCMD; 961 } 962 } 963 964 #ifdef CONFIG_COMPAT 965 /* 966 * ioctl32 conversion routines 967 */ 968 struct compat_ica_rsa_modexpo { 969 compat_uptr_t inputdata; 970 unsigned int inputdatalength; 971 compat_uptr_t outputdata; 972 unsigned int outputdatalength; 973 compat_uptr_t b_key; 974 compat_uptr_t n_modulus; 975 }; 976 977 static long trans_modexpo32(struct file *filp, unsigned int cmd, 978 unsigned long arg) 979 { 980 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 981 struct compat_ica_rsa_modexpo mex32; 982 struct ica_rsa_modexpo mex64; 983 long rc; 984 985 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 986 return -EFAULT; 987 mex64.inputdata = compat_ptr(mex32.inputdata); 988 mex64.inputdatalength = mex32.inputdatalength; 989 mex64.outputdata = compat_ptr(mex32.outputdata); 990 mex64.outputdatalength = mex32.outputdatalength; 991 mex64.b_key = compat_ptr(mex32.b_key); 992 mex64.n_modulus = compat_ptr(mex32.n_modulus); 993 do { 994 rc = zcrypt_rsa_modexpo(&mex64); 995 } while (rc == -EAGAIN); 996 /* on failure: retry once again after a requested rescan */ 997 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 998 do { 999 rc = zcrypt_rsa_modexpo(&mex64); 1000 } while (rc == -EAGAIN); 1001 if (rc) 1002 return rc; 1003 return put_user(mex64.outputdatalength, 1004 &umex32->outputdatalength); 1005 } 1006 1007 struct compat_ica_rsa_modexpo_crt { 1008 compat_uptr_t inputdata; 1009 unsigned int inputdatalength; 1010 compat_uptr_t outputdata; 1011 unsigned int outputdatalength; 1012 compat_uptr_t bp_key; 1013 compat_uptr_t bq_key; 1014 compat_uptr_t np_prime; 1015 compat_uptr_t nq_prime; 1016 compat_uptr_t u_mult_inv; 1017 }; 1018 1019 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 1020 unsigned long arg) 1021 { 1022 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1023 struct compat_ica_rsa_modexpo_crt crt32; 1024 struct ica_rsa_modexpo_crt crt64; 1025 long rc; 1026 1027 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1028 return -EFAULT; 1029 crt64.inputdata = compat_ptr(crt32.inputdata); 1030 crt64.inputdatalength = crt32.inputdatalength; 1031 crt64.outputdata= compat_ptr(crt32.outputdata); 1032 crt64.outputdatalength = crt32.outputdatalength; 1033 crt64.bp_key = compat_ptr(crt32.bp_key); 1034 crt64.bq_key = compat_ptr(crt32.bq_key); 1035 crt64.np_prime = compat_ptr(crt32.np_prime); 1036 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1037 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1038 do { 1039 rc = zcrypt_rsa_crt(&crt64); 1040 } while (rc == -EAGAIN); 1041 /* on failure: retry once again after a requested rescan */ 1042 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1043 do { 1044 rc = zcrypt_rsa_crt(&crt64); 1045 } while (rc == -EAGAIN); 1046 if (rc) 1047 return rc; 1048 return put_user(crt64.outputdatalength, 1049 &ucrt32->outputdatalength); 1050 } 1051 1052 struct compat_ica_xcRB { 1053 unsigned short agent_ID; 1054 unsigned int user_defined; 1055 unsigned short request_ID; 1056 unsigned int request_control_blk_length; 1057 unsigned char padding1[16 - sizeof (compat_uptr_t)]; 1058 compat_uptr_t request_control_blk_addr; 1059 unsigned int request_data_length; 1060 char padding2[16 - sizeof (compat_uptr_t)]; 1061 compat_uptr_t request_data_address; 1062 unsigned int reply_control_blk_length; 1063 char padding3[16 - sizeof (compat_uptr_t)]; 1064 compat_uptr_t reply_control_blk_addr; 1065 unsigned int reply_data_length; 1066 char padding4[16 - sizeof (compat_uptr_t)]; 1067 compat_uptr_t reply_data_addr; 1068 unsigned short priority_window; 1069 unsigned int status; 1070 } __attribute__((packed)); 1071 1072 static long trans_xcRB32(struct file *filp, unsigned int cmd, 1073 unsigned long arg) 1074 { 1075 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1076 struct compat_ica_xcRB xcRB32; 1077 struct ica_xcRB xcRB64; 1078 long rc; 1079 1080 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1081 return -EFAULT; 1082 xcRB64.agent_ID = xcRB32.agent_ID; 1083 xcRB64.user_defined = xcRB32.user_defined; 1084 xcRB64.request_ID = xcRB32.request_ID; 1085 xcRB64.request_control_blk_length = 1086 xcRB32.request_control_blk_length; 1087 xcRB64.request_control_blk_addr = 1088 compat_ptr(xcRB32.request_control_blk_addr); 1089 xcRB64.request_data_length = 1090 xcRB32.request_data_length; 1091 xcRB64.request_data_address = 1092 compat_ptr(xcRB32.request_data_address); 1093 xcRB64.reply_control_blk_length = 1094 xcRB32.reply_control_blk_length; 1095 xcRB64.reply_control_blk_addr = 1096 compat_ptr(xcRB32.reply_control_blk_addr); 1097 xcRB64.reply_data_length = xcRB32.reply_data_length; 1098 xcRB64.reply_data_addr = 1099 compat_ptr(xcRB32.reply_data_addr); 1100 xcRB64.priority_window = xcRB32.priority_window; 1101 xcRB64.status = xcRB32.status; 1102 do { 1103 rc = zcrypt_send_cprb(&xcRB64); 1104 } while (rc == -EAGAIN); 1105 /* on failure: retry once again after a requested rescan */ 1106 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1107 do { 1108 rc = zcrypt_send_cprb(&xcRB64); 1109 } while (rc == -EAGAIN); 1110 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1111 xcRB32.reply_data_length = xcRB64.reply_data_length; 1112 xcRB32.status = xcRB64.status; 1113 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1114 return -EFAULT; 1115 return rc; 1116 } 1117 1118 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1119 unsigned long arg) 1120 { 1121 if (cmd == ICARSAMODEXPO) 1122 return trans_modexpo32(filp, cmd, arg); 1123 if (cmd == ICARSACRT) 1124 return trans_modexpo_crt32(filp, cmd, arg); 1125 if (cmd == ZSECSENDCPRB) 1126 return trans_xcRB32(filp, cmd, arg); 1127 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1128 } 1129 #endif 1130 1131 /* 1132 * Misc device file operations. 1133 */ 1134 static const struct file_operations zcrypt_fops = { 1135 .owner = THIS_MODULE, 1136 .read = zcrypt_read, 1137 .write = zcrypt_write, 1138 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1139 #ifdef CONFIG_COMPAT 1140 .compat_ioctl = zcrypt_compat_ioctl, 1141 #endif 1142 .open = zcrypt_open, 1143 .release = zcrypt_release, 1144 .llseek = no_llseek, 1145 }; 1146 1147 /* 1148 * Misc device. 1149 */ 1150 static struct miscdevice zcrypt_misc_device = { 1151 .minor = MISC_DYNAMIC_MINOR, 1152 .name = "z90crypt", 1153 .fops = &zcrypt_fops, 1154 }; 1155 1156 static int zcrypt_rng_device_count; 1157 static u32 *zcrypt_rng_buffer; 1158 static int zcrypt_rng_buffer_index; 1159 static DEFINE_MUTEX(zcrypt_rng_mutex); 1160 1161 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1162 { 1163 int rc; 1164 1165 /* 1166 * We don't need locking here because the RNG API guarantees serialized 1167 * read method calls. 1168 */ 1169 if (zcrypt_rng_buffer_index == 0) { 1170 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1171 /* on failure: retry once again after a requested rescan */ 1172 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1173 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1174 if (rc < 0) 1175 return -EIO; 1176 zcrypt_rng_buffer_index = rc / sizeof *data; 1177 } 1178 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1179 return sizeof *data; 1180 } 1181 1182 static struct hwrng zcrypt_rng_dev = { 1183 .name = "zcrypt", 1184 .data_read = zcrypt_rng_data_read, 1185 .quality = 990, 1186 }; 1187 1188 int zcrypt_rng_device_add(void) 1189 { 1190 int rc = 0; 1191 1192 mutex_lock(&zcrypt_rng_mutex); 1193 if (zcrypt_rng_device_count == 0) { 1194 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1195 if (!zcrypt_rng_buffer) { 1196 rc = -ENOMEM; 1197 goto out; 1198 } 1199 zcrypt_rng_buffer_index = 0; 1200 if (!zcrypt_hwrng_seed) 1201 zcrypt_rng_dev.quality = 0; 1202 rc = hwrng_register(&zcrypt_rng_dev); 1203 if (rc) 1204 goto out_free; 1205 zcrypt_rng_device_count = 1; 1206 } else 1207 zcrypt_rng_device_count++; 1208 mutex_unlock(&zcrypt_rng_mutex); 1209 return 0; 1210 1211 out_free: 1212 free_page((unsigned long) zcrypt_rng_buffer); 1213 out: 1214 mutex_unlock(&zcrypt_rng_mutex); 1215 return rc; 1216 } 1217 1218 void zcrypt_rng_device_remove(void) 1219 { 1220 mutex_lock(&zcrypt_rng_mutex); 1221 zcrypt_rng_device_count--; 1222 if (zcrypt_rng_device_count == 0) { 1223 hwrng_unregister(&zcrypt_rng_dev); 1224 free_page((unsigned long) zcrypt_rng_buffer); 1225 } 1226 mutex_unlock(&zcrypt_rng_mutex); 1227 } 1228 1229 int __init zcrypt_debug_init(void) 1230 { 1231 zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 1232 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1233 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 1234 debug_set_level(zcrypt_dbf_info, DBF_ERR); 1235 1236 return 0; 1237 } 1238 1239 void zcrypt_debug_exit(void) 1240 { 1241 debug_unregister(zcrypt_dbf_info); 1242 } 1243 1244 /** 1245 * zcrypt_api_init(): Module initialization. 1246 * 1247 * The module initialization code. 1248 */ 1249 int __init zcrypt_api_init(void) 1250 { 1251 int rc; 1252 1253 rc = zcrypt_debug_init(); 1254 if (rc) 1255 goto out; 1256 1257 /* Register the request sprayer. */ 1258 rc = misc_register(&zcrypt_misc_device); 1259 if (rc < 0) 1260 goto out; 1261 1262 zcrypt_msgtype6_init(); 1263 zcrypt_msgtype50_init(); 1264 return 0; 1265 1266 out: 1267 return rc; 1268 } 1269 1270 /** 1271 * zcrypt_api_exit(): Module termination. 1272 * 1273 * The module termination code. 1274 */ 1275 void __exit zcrypt_api_exit(void) 1276 { 1277 misc_deregister(&zcrypt_misc_device); 1278 zcrypt_msgtype6_exit(); 1279 zcrypt_msgtype50_exit(); 1280 zcrypt_debug_exit(); 1281 } 1282 1283 module_init(zcrypt_api_init); 1284 module_exit(zcrypt_api_exit); 1285