1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * zcrypt 2.1.0 4 * 5 * Copyright IBM Corp. 2001, 2012 6 * Author(s): Robert Burroughs 7 * Eric Rossman (edrossma@us.ibm.com) 8 * Cornelia Huck <cornelia.huck@de.ibm.com> 9 * 10 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 11 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 12 * Ralph Wuerthner <rwuerthn@de.ibm.com> 13 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 14 */ 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/interrupt.h> 19 #include <linux/miscdevice.h> 20 #include <linux/fs.h> 21 #include <linux/compat.h> 22 #include <linux/slab.h> 23 #include <linux/atomic.h> 24 #include <linux/uaccess.h> 25 #include <linux/hw_random.h> 26 #include <linux/debugfs.h> 27 #include <asm/debug.h> 28 29 #define CREATE_TRACE_POINTS 30 #include <asm/trace/zcrypt.h> 31 32 #include "zcrypt_api.h" 33 #include "zcrypt_debug.h" 34 35 #include "zcrypt_msgtype6.h" 36 #include "zcrypt_msgtype50.h" 37 38 /* 39 * Module description. 40 */ 41 MODULE_AUTHOR("IBM Corporation"); 42 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 43 "Copyright IBM Corp. 2001, 2012"); 44 MODULE_LICENSE("GPL"); 45 46 /* 47 * zcrypt tracepoint functions 48 */ 49 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 50 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 51 52 static int zcrypt_hwrng_seed = 1; 53 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); 54 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 55 56 DEFINE_SPINLOCK(zcrypt_list_lock); 57 LIST_HEAD(zcrypt_card_list); 58 int zcrypt_device_count; 59 60 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 61 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 62 63 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 64 EXPORT_SYMBOL(zcrypt_rescan_req); 65 66 static LIST_HEAD(zcrypt_ops_list); 67 68 /* Zcrypt related debug feature stuff. */ 69 debug_info_t *zcrypt_dbf_info; 70 71 /** 72 * Process a rescan of the transport layer. 73 * 74 * Returns 1, if the rescan has been processed, otherwise 0. 75 */ 76 static inline int zcrypt_process_rescan(void) 77 { 78 if (atomic_read(&zcrypt_rescan_req)) { 79 atomic_set(&zcrypt_rescan_req, 0); 80 atomic_inc(&zcrypt_rescan_count); 81 ap_bus_force_rescan(); 82 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n", 83 atomic_inc_return(&zcrypt_rescan_count)); 84 return 1; 85 } 86 return 0; 87 } 88 89 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 90 { 91 list_add_tail(&zops->list, &zcrypt_ops_list); 92 } 93 94 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 95 { 96 list_del_init(&zops->list); 97 } 98 99 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 100 { 101 struct zcrypt_ops *zops; 102 103 list_for_each_entry(zops, &zcrypt_ops_list, list) 104 if ((zops->variant == variant) && 105 (!strncmp(zops->name, name, sizeof(zops->name)))) 106 return zops; 107 return NULL; 108 } 109 EXPORT_SYMBOL(zcrypt_msgtype); 110 111 /** 112 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 113 * 114 * This function is not supported beyond zcrypt 1.3.1. 115 */ 116 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 117 size_t count, loff_t *f_pos) 118 { 119 return -EPERM; 120 } 121 122 /** 123 * zcrypt_write(): Not allowed. 124 * 125 * Write is is not allowed 126 */ 127 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 128 size_t count, loff_t *f_pos) 129 { 130 return -EPERM; 131 } 132 133 /** 134 * zcrypt_open(): Count number of users. 135 * 136 * Device open function to count number of users. 137 */ 138 static int zcrypt_open(struct inode *inode, struct file *filp) 139 { 140 atomic_inc(&zcrypt_open_count); 141 return nonseekable_open(inode, filp); 142 } 143 144 /** 145 * zcrypt_release(): Count number of users. 146 * 147 * Device close function to count number of users. 148 */ 149 static int zcrypt_release(struct inode *inode, struct file *filp) 150 { 151 atomic_dec(&zcrypt_open_count); 152 return 0; 153 } 154 155 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 156 struct zcrypt_queue *zq, 157 unsigned int weight) 158 { 159 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 160 return NULL; 161 zcrypt_queue_get(zq); 162 get_device(&zq->queue->ap_dev.device); 163 atomic_add(weight, &zc->load); 164 atomic_add(weight, &zq->load); 165 zq->request_count++; 166 return zq; 167 } 168 169 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 170 struct zcrypt_queue *zq, 171 unsigned int weight) 172 { 173 struct module *mod = zq->queue->ap_dev.drv->driver.owner; 174 175 zq->request_count--; 176 atomic_sub(weight, &zc->load); 177 atomic_sub(weight, &zq->load); 178 put_device(&zq->queue->ap_dev.device); 179 zcrypt_queue_put(zq); 180 module_put(mod); 181 } 182 183 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 184 struct zcrypt_card *pref_zc, 185 unsigned weight, unsigned pref_weight) 186 { 187 if (!pref_zc) 188 return false; 189 weight += atomic_read(&zc->load); 190 pref_weight += atomic_read(&pref_zc->load); 191 if (weight == pref_weight) 192 return atomic_read(&zc->card->total_request_count) > 193 atomic_read(&pref_zc->card->total_request_count); 194 return weight > pref_weight; 195 } 196 197 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 198 struct zcrypt_queue *pref_zq, 199 unsigned weight, unsigned pref_weight) 200 { 201 if (!pref_zq) 202 return false; 203 weight += atomic_read(&zq->load); 204 pref_weight += atomic_read(&pref_zq->load); 205 if (weight == pref_weight) 206 return zq->queue->total_request_count > 207 pref_zq->queue->total_request_count; 208 return weight > pref_weight; 209 } 210 211 /* 212 * zcrypt ioctls. 213 */ 214 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 215 { 216 struct zcrypt_card *zc, *pref_zc; 217 struct zcrypt_queue *zq, *pref_zq; 218 unsigned int weight, pref_weight; 219 unsigned int func_code; 220 int qid = 0, rc = -ENODEV; 221 222 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 223 224 if (mex->outputdatalength < mex->inputdatalength) { 225 rc = -EINVAL; 226 goto out; 227 } 228 229 /* 230 * As long as outputdatalength is big enough, we can set the 231 * outputdatalength equal to the inputdatalength, since that is the 232 * number of bytes we will copy in any case 233 */ 234 mex->outputdatalength = mex->inputdatalength; 235 236 rc = get_rsa_modex_fc(mex, &func_code); 237 if (rc) 238 goto out; 239 240 pref_zc = NULL; 241 pref_zq = NULL; 242 spin_lock(&zcrypt_list_lock); 243 for_each_zcrypt_card(zc) { 244 /* Check for online accelarator and CCA cards */ 245 if (!zc->online || !(zc->card->functions & 0x18000000)) 246 continue; 247 /* Check for size limits */ 248 if (zc->min_mod_size > mex->inputdatalength || 249 zc->max_mod_size < mex->inputdatalength) 250 continue; 251 /* get weight index of the card device */ 252 weight = zc->speed_rating[func_code]; 253 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 254 continue; 255 for_each_zcrypt_queue(zq, zc) { 256 /* check if device is online and eligible */ 257 if (!zq->online || !zq->ops->rsa_modexpo) 258 continue; 259 if (zcrypt_queue_compare(zq, pref_zq, 260 weight, pref_weight)) 261 continue; 262 pref_zc = zc; 263 pref_zq = zq; 264 pref_weight = weight; 265 } 266 } 267 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 268 spin_unlock(&zcrypt_list_lock); 269 270 if (!pref_zq) { 271 rc = -ENODEV; 272 goto out; 273 } 274 275 qid = pref_zq->queue->qid; 276 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 277 278 spin_lock(&zcrypt_list_lock); 279 zcrypt_drop_queue(pref_zc, pref_zq, weight); 280 spin_unlock(&zcrypt_list_lock); 281 282 out: 283 trace_s390_zcrypt_rep(mex, func_code, rc, 284 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 285 return rc; 286 } 287 288 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 289 { 290 struct zcrypt_card *zc, *pref_zc; 291 struct zcrypt_queue *zq, *pref_zq; 292 unsigned int weight, pref_weight; 293 unsigned int func_code; 294 int qid = 0, rc = -ENODEV; 295 296 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 297 298 if (crt->outputdatalength < crt->inputdatalength) { 299 rc = -EINVAL; 300 goto out; 301 } 302 303 /* 304 * As long as outputdatalength is big enough, we can set the 305 * outputdatalength equal to the inputdatalength, since that is the 306 * number of bytes we will copy in any case 307 */ 308 crt->outputdatalength = crt->inputdatalength; 309 310 rc = get_rsa_crt_fc(crt, &func_code); 311 if (rc) 312 goto out; 313 314 pref_zc = NULL; 315 pref_zq = NULL; 316 spin_lock(&zcrypt_list_lock); 317 for_each_zcrypt_card(zc) { 318 /* Check for online accelarator and CCA cards */ 319 if (!zc->online || !(zc->card->functions & 0x18000000)) 320 continue; 321 /* Check for size limits */ 322 if (zc->min_mod_size > crt->inputdatalength || 323 zc->max_mod_size < crt->inputdatalength) 324 continue; 325 /* get weight index of the card device */ 326 weight = zc->speed_rating[func_code]; 327 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 328 continue; 329 for_each_zcrypt_queue(zq, zc) { 330 /* check if device is online and eligible */ 331 if (!zq->online || !zq->ops->rsa_modexpo_crt) 332 continue; 333 if (zcrypt_queue_compare(zq, pref_zq, 334 weight, pref_weight)) 335 continue; 336 pref_zc = zc; 337 pref_zq = zq; 338 pref_weight = weight; 339 } 340 } 341 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 342 spin_unlock(&zcrypt_list_lock); 343 344 if (!pref_zq) { 345 rc = -ENODEV; 346 goto out; 347 } 348 349 qid = pref_zq->queue->qid; 350 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 351 352 spin_lock(&zcrypt_list_lock); 353 zcrypt_drop_queue(pref_zc, pref_zq, weight); 354 spin_unlock(&zcrypt_list_lock); 355 356 out: 357 trace_s390_zcrypt_rep(crt, func_code, rc, 358 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 359 return rc; 360 } 361 362 long zcrypt_send_cprb(struct ica_xcRB *xcRB) 363 { 364 struct zcrypt_card *zc, *pref_zc; 365 struct zcrypt_queue *zq, *pref_zq; 366 struct ap_message ap_msg; 367 unsigned int weight, pref_weight; 368 unsigned int func_code; 369 unsigned short *domain; 370 int qid = 0, rc = -ENODEV; 371 372 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 373 374 ap_init_message(&ap_msg); 375 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 376 if (rc) 377 goto out; 378 379 pref_zc = NULL; 380 pref_zq = NULL; 381 spin_lock(&zcrypt_list_lock); 382 for_each_zcrypt_card(zc) { 383 /* Check for online CCA cards */ 384 if (!zc->online || !(zc->card->functions & 0x10000000)) 385 continue; 386 /* Check for user selected CCA card */ 387 if (xcRB->user_defined != AUTOSELECT && 388 xcRB->user_defined != zc->card->id) 389 continue; 390 /* get weight index of the card device */ 391 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 392 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 393 continue; 394 for_each_zcrypt_queue(zq, zc) { 395 /* check if device is online and eligible */ 396 if (!zq->online || 397 !zq->ops->send_cprb || 398 ((*domain != (unsigned short) AUTOSELECT) && 399 (*domain != AP_QID_QUEUE(zq->queue->qid)))) 400 continue; 401 if (zcrypt_queue_compare(zq, pref_zq, 402 weight, pref_weight)) 403 continue; 404 pref_zc = zc; 405 pref_zq = zq; 406 pref_weight = weight; 407 } 408 } 409 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 410 spin_unlock(&zcrypt_list_lock); 411 412 if (!pref_zq) { 413 rc = -ENODEV; 414 goto out; 415 } 416 417 /* in case of auto select, provide the correct domain */ 418 qid = pref_zq->queue->qid; 419 if (*domain == (unsigned short) AUTOSELECT) 420 *domain = AP_QID_QUEUE(qid); 421 422 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 423 424 spin_lock(&zcrypt_list_lock); 425 zcrypt_drop_queue(pref_zc, pref_zq, weight); 426 spin_unlock(&zcrypt_list_lock); 427 428 out: 429 ap_release_message(&ap_msg); 430 trace_s390_zcrypt_rep(xcRB, func_code, rc, 431 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 432 return rc; 433 } 434 EXPORT_SYMBOL(zcrypt_send_cprb); 435 436 static bool is_desired_ep11_card(unsigned int dev_id, 437 unsigned short target_num, 438 struct ep11_target_dev *targets) 439 { 440 while (target_num-- > 0) { 441 if (dev_id == targets->ap_id) 442 return true; 443 targets++; 444 } 445 return false; 446 } 447 448 static bool is_desired_ep11_queue(unsigned int dev_qid, 449 unsigned short target_num, 450 struct ep11_target_dev *targets) 451 { 452 while (target_num-- > 0) { 453 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 454 return true; 455 targets++; 456 } 457 return false; 458 } 459 460 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 461 { 462 struct zcrypt_card *zc, *pref_zc; 463 struct zcrypt_queue *zq, *pref_zq; 464 struct ep11_target_dev *targets; 465 unsigned short target_num; 466 unsigned int weight, pref_weight; 467 unsigned int func_code; 468 struct ap_message ap_msg; 469 int qid = 0, rc = -ENODEV; 470 471 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 472 473 ap_init_message(&ap_msg); 474 475 target_num = (unsigned short) xcrb->targets_num; 476 477 /* empty list indicates autoselect (all available targets) */ 478 targets = NULL; 479 if (target_num != 0) { 480 struct ep11_target_dev __user *uptr; 481 482 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 483 if (!targets) { 484 rc = -ENOMEM; 485 goto out; 486 } 487 488 uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 489 if (copy_from_user(targets, uptr, 490 target_num * sizeof(*targets))) { 491 rc = -EFAULT; 492 goto out_free; 493 } 494 } 495 496 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 497 if (rc) 498 goto out_free; 499 500 pref_zc = NULL; 501 pref_zq = NULL; 502 spin_lock(&zcrypt_list_lock); 503 for_each_zcrypt_card(zc) { 504 /* Check for online EP11 cards */ 505 if (!zc->online || !(zc->card->functions & 0x04000000)) 506 continue; 507 /* Check for user selected EP11 card */ 508 if (targets && 509 !is_desired_ep11_card(zc->card->id, target_num, targets)) 510 continue; 511 /* get weight index of the card device */ 512 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 513 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 514 continue; 515 for_each_zcrypt_queue(zq, zc) { 516 /* check if device is online and eligible */ 517 if (!zq->online || 518 !zq->ops->send_ep11_cprb || 519 (targets && 520 !is_desired_ep11_queue(zq->queue->qid, 521 target_num, targets))) 522 continue; 523 if (zcrypt_queue_compare(zq, pref_zq, 524 weight, pref_weight)) 525 continue; 526 pref_zc = zc; 527 pref_zq = zq; 528 pref_weight = weight; 529 } 530 } 531 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 532 spin_unlock(&zcrypt_list_lock); 533 534 if (!pref_zq) { 535 rc = -ENODEV; 536 goto out_free; 537 } 538 539 qid = pref_zq->queue->qid; 540 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 541 542 spin_lock(&zcrypt_list_lock); 543 zcrypt_drop_queue(pref_zc, pref_zq, weight); 544 spin_unlock(&zcrypt_list_lock); 545 546 out_free: 547 kfree(targets); 548 out: 549 ap_release_message(&ap_msg); 550 trace_s390_zcrypt_rep(xcrb, func_code, rc, 551 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 552 return rc; 553 } 554 555 static long zcrypt_rng(char *buffer) 556 { 557 struct zcrypt_card *zc, *pref_zc; 558 struct zcrypt_queue *zq, *pref_zq; 559 unsigned int weight, pref_weight; 560 unsigned int func_code; 561 struct ap_message ap_msg; 562 unsigned int domain; 563 int qid = 0, rc = -ENODEV; 564 565 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 566 567 ap_init_message(&ap_msg); 568 rc = get_rng_fc(&ap_msg, &func_code, &domain); 569 if (rc) 570 goto out; 571 572 pref_zc = NULL; 573 pref_zq = NULL; 574 spin_lock(&zcrypt_list_lock); 575 for_each_zcrypt_card(zc) { 576 /* Check for online CCA cards */ 577 if (!zc->online || !(zc->card->functions & 0x10000000)) 578 continue; 579 /* get weight index of the card device */ 580 weight = zc->speed_rating[func_code]; 581 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 582 continue; 583 for_each_zcrypt_queue(zq, zc) { 584 /* check if device is online and eligible */ 585 if (!zq->online || !zq->ops->rng) 586 continue; 587 if (zcrypt_queue_compare(zq, pref_zq, 588 weight, pref_weight)) 589 continue; 590 pref_zc = zc; 591 pref_zq = zq; 592 pref_weight = weight; 593 } 594 } 595 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 596 spin_unlock(&zcrypt_list_lock); 597 598 if (!pref_zq) { 599 rc = -ENODEV; 600 goto out; 601 } 602 603 qid = pref_zq->queue->qid; 604 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 605 606 spin_lock(&zcrypt_list_lock); 607 zcrypt_drop_queue(pref_zc, pref_zq, weight); 608 spin_unlock(&zcrypt_list_lock); 609 610 out: 611 ap_release_message(&ap_msg); 612 trace_s390_zcrypt_rep(buffer, func_code, rc, 613 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 614 return rc; 615 } 616 617 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus) 618 { 619 struct zcrypt_card *zc; 620 struct zcrypt_queue *zq; 621 struct zcrypt_device_status *stat; 622 int card, queue; 623 624 memset(devstatus, 0, MAX_ZDEV_ENTRIES 625 * sizeof(struct zcrypt_device_status)); 626 627 spin_lock(&zcrypt_list_lock); 628 for_each_zcrypt_card(zc) { 629 for_each_zcrypt_queue(zq, zc) { 630 card = AP_QID_CARD(zq->queue->qid); 631 if (card >= MAX_ZDEV_CARDIDS) 632 continue; 633 queue = AP_QID_QUEUE(zq->queue->qid); 634 stat = &devstatus[card * AP_DOMAINS + queue]; 635 stat->hwtype = zc->card->ap_dev.device_type; 636 stat->functions = zc->card->functions >> 26; 637 stat->qid = zq->queue->qid; 638 stat->online = zq->online ? 0x01 : 0x00; 639 } 640 } 641 spin_unlock(&zcrypt_list_lock); 642 } 643 644 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus) 645 { 646 struct zcrypt_card *zc; 647 struct zcrypt_queue *zq; 648 struct zcrypt_device_status_ext *stat; 649 int card, queue; 650 651 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT 652 * sizeof(struct zcrypt_device_status_ext)); 653 654 spin_lock(&zcrypt_list_lock); 655 for_each_zcrypt_card(zc) { 656 for_each_zcrypt_queue(zq, zc) { 657 card = AP_QID_CARD(zq->queue->qid); 658 queue = AP_QID_QUEUE(zq->queue->qid); 659 stat = &devstatus[card * AP_DOMAINS + queue]; 660 stat->hwtype = zc->card->ap_dev.device_type; 661 stat->functions = zc->card->functions >> 26; 662 stat->qid = zq->queue->qid; 663 stat->online = zq->online ? 0x01 : 0x00; 664 } 665 } 666 spin_unlock(&zcrypt_list_lock); 667 } 668 EXPORT_SYMBOL(zcrypt_device_status_mask_ext); 669 670 static void zcrypt_status_mask(char status[], size_t max_adapters) 671 { 672 struct zcrypt_card *zc; 673 struct zcrypt_queue *zq; 674 int card; 675 676 memset(status, 0, max_adapters); 677 spin_lock(&zcrypt_list_lock); 678 for_each_zcrypt_card(zc) { 679 for_each_zcrypt_queue(zq, zc) { 680 card = AP_QID_CARD(zq->queue->qid); 681 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 682 || card >= max_adapters) 683 continue; 684 status[card] = zc->online ? zc->user_space_type : 0x0d; 685 } 686 } 687 spin_unlock(&zcrypt_list_lock); 688 } 689 690 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters) 691 { 692 struct zcrypt_card *zc; 693 struct zcrypt_queue *zq; 694 int card; 695 696 memset(qdepth, 0, max_adapters); 697 spin_lock(&zcrypt_list_lock); 698 local_bh_disable(); 699 for_each_zcrypt_card(zc) { 700 for_each_zcrypt_queue(zq, zc) { 701 card = AP_QID_CARD(zq->queue->qid); 702 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 703 || card >= max_adapters) 704 continue; 705 spin_lock(&zq->queue->lock); 706 qdepth[card] = 707 zq->queue->pendingq_count + 708 zq->queue->requestq_count; 709 spin_unlock(&zq->queue->lock); 710 } 711 } 712 local_bh_enable(); 713 spin_unlock(&zcrypt_list_lock); 714 } 715 716 static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters) 717 { 718 struct zcrypt_card *zc; 719 struct zcrypt_queue *zq; 720 int card; 721 722 memset(reqcnt, 0, sizeof(int) * max_adapters); 723 spin_lock(&zcrypt_list_lock); 724 local_bh_disable(); 725 for_each_zcrypt_card(zc) { 726 for_each_zcrypt_queue(zq, zc) { 727 card = AP_QID_CARD(zq->queue->qid); 728 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index 729 || card >= max_adapters) 730 continue; 731 spin_lock(&zq->queue->lock); 732 reqcnt[card] = zq->queue->total_request_count; 733 spin_unlock(&zq->queue->lock); 734 } 735 } 736 local_bh_enable(); 737 spin_unlock(&zcrypt_list_lock); 738 } 739 740 static int zcrypt_pendingq_count(void) 741 { 742 struct zcrypt_card *zc; 743 struct zcrypt_queue *zq; 744 int pendingq_count; 745 746 pendingq_count = 0; 747 spin_lock(&zcrypt_list_lock); 748 local_bh_disable(); 749 for_each_zcrypt_card(zc) { 750 for_each_zcrypt_queue(zq, zc) { 751 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 752 continue; 753 spin_lock(&zq->queue->lock); 754 pendingq_count += zq->queue->pendingq_count; 755 spin_unlock(&zq->queue->lock); 756 } 757 } 758 local_bh_enable(); 759 spin_unlock(&zcrypt_list_lock); 760 return pendingq_count; 761 } 762 763 static int zcrypt_requestq_count(void) 764 { 765 struct zcrypt_card *zc; 766 struct zcrypt_queue *zq; 767 int requestq_count; 768 769 requestq_count = 0; 770 spin_lock(&zcrypt_list_lock); 771 local_bh_disable(); 772 for_each_zcrypt_card(zc) { 773 for_each_zcrypt_queue(zq, zc) { 774 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 775 continue; 776 spin_lock(&zq->queue->lock); 777 requestq_count += zq->queue->requestq_count; 778 spin_unlock(&zq->queue->lock); 779 } 780 } 781 local_bh_enable(); 782 spin_unlock(&zcrypt_list_lock); 783 return requestq_count; 784 } 785 786 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 787 unsigned long arg) 788 { 789 int rc = 0; 790 791 switch (cmd) { 792 case ICARSAMODEXPO: { 793 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 794 struct ica_rsa_modexpo mex; 795 if (copy_from_user(&mex, umex, sizeof(mex))) 796 return -EFAULT; 797 do { 798 rc = zcrypt_rsa_modexpo(&mex); 799 } while (rc == -EAGAIN); 800 /* on failure: retry once again after a requested rescan */ 801 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 802 do { 803 rc = zcrypt_rsa_modexpo(&mex); 804 } while (rc == -EAGAIN); 805 if (rc) { 806 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); 807 return rc; 808 } 809 return put_user(mex.outputdatalength, &umex->outputdatalength); 810 } 811 case ICARSACRT: { 812 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 813 struct ica_rsa_modexpo_crt crt; 814 if (copy_from_user(&crt, ucrt, sizeof(crt))) 815 return -EFAULT; 816 do { 817 rc = zcrypt_rsa_crt(&crt); 818 } while (rc == -EAGAIN); 819 /* on failure: retry once again after a requested rescan */ 820 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 821 do { 822 rc = zcrypt_rsa_crt(&crt); 823 } while (rc == -EAGAIN); 824 if (rc) { 825 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); 826 return rc; 827 } 828 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 829 } 830 case ZSECSENDCPRB: { 831 struct ica_xcRB __user *uxcRB = (void __user *) arg; 832 struct ica_xcRB xcRB; 833 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 834 return -EFAULT; 835 do { 836 rc = zcrypt_send_cprb(&xcRB); 837 } while (rc == -EAGAIN); 838 /* on failure: retry once again after a requested rescan */ 839 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 840 do { 841 rc = zcrypt_send_cprb(&xcRB); 842 } while (rc == -EAGAIN); 843 if (rc) 844 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc); 845 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 846 return -EFAULT; 847 return rc; 848 } 849 case ZSENDEP11CPRB: { 850 struct ep11_urb __user *uxcrb = (void __user *)arg; 851 struct ep11_urb xcrb; 852 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 853 return -EFAULT; 854 do { 855 rc = zcrypt_send_ep11_cprb(&xcrb); 856 } while (rc == -EAGAIN); 857 /* on failure: retry once again after a requested rescan */ 858 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 859 do { 860 rc = zcrypt_send_ep11_cprb(&xcrb); 861 } while (rc == -EAGAIN); 862 if (rc) 863 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); 864 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 865 return -EFAULT; 866 return rc; 867 } 868 case ZCRYPT_DEVICE_STATUS: { 869 struct zcrypt_device_status_ext *device_status; 870 size_t total_size = MAX_ZDEV_ENTRIES_EXT 871 * sizeof(struct zcrypt_device_status_ext); 872 873 device_status = kzalloc(total_size, GFP_KERNEL); 874 if (!device_status) 875 return -ENOMEM; 876 zcrypt_device_status_mask_ext(device_status); 877 if (copy_to_user((char __user *) arg, device_status, 878 total_size)) 879 rc = -EFAULT; 880 kfree(device_status); 881 return rc; 882 } 883 case ZCRYPT_STATUS_MASK: { 884 char status[AP_DEVICES]; 885 886 zcrypt_status_mask(status, AP_DEVICES); 887 if (copy_to_user((char __user *) arg, status, sizeof(status))) 888 return -EFAULT; 889 return 0; 890 } 891 case ZCRYPT_QDEPTH_MASK: { 892 char qdepth[AP_DEVICES]; 893 894 zcrypt_qdepth_mask(qdepth, AP_DEVICES); 895 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 896 return -EFAULT; 897 return 0; 898 } 899 case ZCRYPT_PERDEV_REQCNT: { 900 int *reqcnt; 901 902 reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL); 903 if (!reqcnt) 904 return -ENOMEM; 905 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); 906 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 907 rc = -EFAULT; 908 kfree(reqcnt); 909 return rc; 910 } 911 case Z90STAT_REQUESTQ_COUNT: 912 return put_user(zcrypt_requestq_count(), (int __user *) arg); 913 case Z90STAT_PENDINGQ_COUNT: 914 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 915 case Z90STAT_TOTALOPEN_COUNT: 916 return put_user(atomic_read(&zcrypt_open_count), 917 (int __user *) arg); 918 case Z90STAT_DOMAIN_INDEX: 919 return put_user(ap_domain_index, (int __user *) arg); 920 /* 921 * Deprecated ioctls 922 */ 923 case ZDEVICESTATUS: { 924 /* the old ioctl supports only 64 adapters */ 925 struct zcrypt_device_status *device_status; 926 size_t total_size = MAX_ZDEV_ENTRIES 927 * sizeof(struct zcrypt_device_status); 928 929 device_status = kzalloc(total_size, GFP_KERNEL); 930 if (!device_status) 931 return -ENOMEM; 932 zcrypt_device_status_mask(device_status); 933 if (copy_to_user((char __user *) arg, device_status, 934 total_size)) 935 rc = -EFAULT; 936 kfree(device_status); 937 return rc; 938 } 939 case Z90STAT_STATUS_MASK: { 940 /* the old ioctl supports only 64 adapters */ 941 char status[MAX_ZDEV_CARDIDS]; 942 943 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS); 944 if (copy_to_user((char __user *) arg, status, sizeof(status))) 945 return -EFAULT; 946 return 0; 947 } 948 case Z90STAT_QDEPTH_MASK: { 949 /* the old ioctl supports only 64 adapters */ 950 char qdepth[MAX_ZDEV_CARDIDS]; 951 952 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS); 953 if (copy_to_user((char __user *) arg, qdepth, sizeof(qdepth))) 954 return -EFAULT; 955 return 0; 956 } 957 case Z90STAT_PERDEV_REQCNT: { 958 /* the old ioctl supports only 64 adapters */ 959 int reqcnt[MAX_ZDEV_CARDIDS]; 960 961 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS); 962 if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) 963 return -EFAULT; 964 return 0; 965 } 966 /* unknown ioctl number */ 967 default: 968 ZCRYPT_DBF(DBF_DEBUG, "unknown ioctl 0x%08x\n", cmd); 969 return -ENOIOCTLCMD; 970 } 971 } 972 973 #ifdef CONFIG_COMPAT 974 /* 975 * ioctl32 conversion routines 976 */ 977 struct compat_ica_rsa_modexpo { 978 compat_uptr_t inputdata; 979 unsigned int inputdatalength; 980 compat_uptr_t outputdata; 981 unsigned int outputdatalength; 982 compat_uptr_t b_key; 983 compat_uptr_t n_modulus; 984 }; 985 986 static long trans_modexpo32(struct file *filp, unsigned int cmd, 987 unsigned long arg) 988 { 989 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 990 struct compat_ica_rsa_modexpo mex32; 991 struct ica_rsa_modexpo mex64; 992 long rc; 993 994 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 995 return -EFAULT; 996 mex64.inputdata = compat_ptr(mex32.inputdata); 997 mex64.inputdatalength = mex32.inputdatalength; 998 mex64.outputdata = compat_ptr(mex32.outputdata); 999 mex64.outputdatalength = mex32.outputdatalength; 1000 mex64.b_key = compat_ptr(mex32.b_key); 1001 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1002 do { 1003 rc = zcrypt_rsa_modexpo(&mex64); 1004 } while (rc == -EAGAIN); 1005 /* on failure: retry once again after a requested rescan */ 1006 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1007 do { 1008 rc = zcrypt_rsa_modexpo(&mex64); 1009 } while (rc == -EAGAIN); 1010 if (rc) 1011 return rc; 1012 return put_user(mex64.outputdatalength, 1013 &umex32->outputdatalength); 1014 } 1015 1016 struct compat_ica_rsa_modexpo_crt { 1017 compat_uptr_t inputdata; 1018 unsigned int inputdatalength; 1019 compat_uptr_t outputdata; 1020 unsigned int outputdatalength; 1021 compat_uptr_t bp_key; 1022 compat_uptr_t bq_key; 1023 compat_uptr_t np_prime; 1024 compat_uptr_t nq_prime; 1025 compat_uptr_t u_mult_inv; 1026 }; 1027 1028 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 1029 unsigned long arg) 1030 { 1031 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1032 struct compat_ica_rsa_modexpo_crt crt32; 1033 struct ica_rsa_modexpo_crt crt64; 1034 long rc; 1035 1036 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1037 return -EFAULT; 1038 crt64.inputdata = compat_ptr(crt32.inputdata); 1039 crt64.inputdatalength = crt32.inputdatalength; 1040 crt64.outputdata= compat_ptr(crt32.outputdata); 1041 crt64.outputdatalength = crt32.outputdatalength; 1042 crt64.bp_key = compat_ptr(crt32.bp_key); 1043 crt64.bq_key = compat_ptr(crt32.bq_key); 1044 crt64.np_prime = compat_ptr(crt32.np_prime); 1045 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1046 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1047 do { 1048 rc = zcrypt_rsa_crt(&crt64); 1049 } while (rc == -EAGAIN); 1050 /* on failure: retry once again after a requested rescan */ 1051 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1052 do { 1053 rc = zcrypt_rsa_crt(&crt64); 1054 } while (rc == -EAGAIN); 1055 if (rc) 1056 return rc; 1057 return put_user(crt64.outputdatalength, 1058 &ucrt32->outputdatalength); 1059 } 1060 1061 struct compat_ica_xcRB { 1062 unsigned short agent_ID; 1063 unsigned int user_defined; 1064 unsigned short request_ID; 1065 unsigned int request_control_blk_length; 1066 unsigned char padding1[16 - sizeof (compat_uptr_t)]; 1067 compat_uptr_t request_control_blk_addr; 1068 unsigned int request_data_length; 1069 char padding2[16 - sizeof (compat_uptr_t)]; 1070 compat_uptr_t request_data_address; 1071 unsigned int reply_control_blk_length; 1072 char padding3[16 - sizeof (compat_uptr_t)]; 1073 compat_uptr_t reply_control_blk_addr; 1074 unsigned int reply_data_length; 1075 char padding4[16 - sizeof (compat_uptr_t)]; 1076 compat_uptr_t reply_data_addr; 1077 unsigned short priority_window; 1078 unsigned int status; 1079 } __attribute__((packed)); 1080 1081 static long trans_xcRB32(struct file *filp, unsigned int cmd, 1082 unsigned long arg) 1083 { 1084 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1085 struct compat_ica_xcRB xcRB32; 1086 struct ica_xcRB xcRB64; 1087 long rc; 1088 1089 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1090 return -EFAULT; 1091 xcRB64.agent_ID = xcRB32.agent_ID; 1092 xcRB64.user_defined = xcRB32.user_defined; 1093 xcRB64.request_ID = xcRB32.request_ID; 1094 xcRB64.request_control_blk_length = 1095 xcRB32.request_control_blk_length; 1096 xcRB64.request_control_blk_addr = 1097 compat_ptr(xcRB32.request_control_blk_addr); 1098 xcRB64.request_data_length = 1099 xcRB32.request_data_length; 1100 xcRB64.request_data_address = 1101 compat_ptr(xcRB32.request_data_address); 1102 xcRB64.reply_control_blk_length = 1103 xcRB32.reply_control_blk_length; 1104 xcRB64.reply_control_blk_addr = 1105 compat_ptr(xcRB32.reply_control_blk_addr); 1106 xcRB64.reply_data_length = xcRB32.reply_data_length; 1107 xcRB64.reply_data_addr = 1108 compat_ptr(xcRB32.reply_data_addr); 1109 xcRB64.priority_window = xcRB32.priority_window; 1110 xcRB64.status = xcRB32.status; 1111 do { 1112 rc = zcrypt_send_cprb(&xcRB64); 1113 } while (rc == -EAGAIN); 1114 /* on failure: retry once again after a requested rescan */ 1115 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1116 do { 1117 rc = zcrypt_send_cprb(&xcRB64); 1118 } while (rc == -EAGAIN); 1119 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1120 xcRB32.reply_data_length = xcRB64.reply_data_length; 1121 xcRB32.status = xcRB64.status; 1122 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1123 return -EFAULT; 1124 return rc; 1125 } 1126 1127 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1128 unsigned long arg) 1129 { 1130 if (cmd == ICARSAMODEXPO) 1131 return trans_modexpo32(filp, cmd, arg); 1132 if (cmd == ICARSACRT) 1133 return trans_modexpo_crt32(filp, cmd, arg); 1134 if (cmd == ZSECSENDCPRB) 1135 return trans_xcRB32(filp, cmd, arg); 1136 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1137 } 1138 #endif 1139 1140 /* 1141 * Misc device file operations. 1142 */ 1143 static const struct file_operations zcrypt_fops = { 1144 .owner = THIS_MODULE, 1145 .read = zcrypt_read, 1146 .write = zcrypt_write, 1147 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1148 #ifdef CONFIG_COMPAT 1149 .compat_ioctl = zcrypt_compat_ioctl, 1150 #endif 1151 .open = zcrypt_open, 1152 .release = zcrypt_release, 1153 .llseek = no_llseek, 1154 }; 1155 1156 /* 1157 * Misc device. 1158 */ 1159 static struct miscdevice zcrypt_misc_device = { 1160 .minor = MISC_DYNAMIC_MINOR, 1161 .name = "z90crypt", 1162 .fops = &zcrypt_fops, 1163 }; 1164 1165 static int zcrypt_rng_device_count; 1166 static u32 *zcrypt_rng_buffer; 1167 static int zcrypt_rng_buffer_index; 1168 static DEFINE_MUTEX(zcrypt_rng_mutex); 1169 1170 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1171 { 1172 int rc; 1173 1174 /* 1175 * We don't need locking here because the RNG API guarantees serialized 1176 * read method calls. 1177 */ 1178 if (zcrypt_rng_buffer_index == 0) { 1179 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1180 /* on failure: retry once again after a requested rescan */ 1181 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1182 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1183 if (rc < 0) 1184 return -EIO; 1185 zcrypt_rng_buffer_index = rc / sizeof *data; 1186 } 1187 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1188 return sizeof *data; 1189 } 1190 1191 static struct hwrng zcrypt_rng_dev = { 1192 .name = "zcrypt", 1193 .data_read = zcrypt_rng_data_read, 1194 .quality = 990, 1195 }; 1196 1197 int zcrypt_rng_device_add(void) 1198 { 1199 int rc = 0; 1200 1201 mutex_lock(&zcrypt_rng_mutex); 1202 if (zcrypt_rng_device_count == 0) { 1203 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1204 if (!zcrypt_rng_buffer) { 1205 rc = -ENOMEM; 1206 goto out; 1207 } 1208 zcrypt_rng_buffer_index = 0; 1209 if (!zcrypt_hwrng_seed) 1210 zcrypt_rng_dev.quality = 0; 1211 rc = hwrng_register(&zcrypt_rng_dev); 1212 if (rc) 1213 goto out_free; 1214 zcrypt_rng_device_count = 1; 1215 } else 1216 zcrypt_rng_device_count++; 1217 mutex_unlock(&zcrypt_rng_mutex); 1218 return 0; 1219 1220 out_free: 1221 free_page((unsigned long) zcrypt_rng_buffer); 1222 out: 1223 mutex_unlock(&zcrypt_rng_mutex); 1224 return rc; 1225 } 1226 1227 void zcrypt_rng_device_remove(void) 1228 { 1229 mutex_lock(&zcrypt_rng_mutex); 1230 zcrypt_rng_device_count--; 1231 if (zcrypt_rng_device_count == 0) { 1232 hwrng_unregister(&zcrypt_rng_dev); 1233 free_page((unsigned long) zcrypt_rng_buffer); 1234 } 1235 mutex_unlock(&zcrypt_rng_mutex); 1236 } 1237 1238 int __init zcrypt_debug_init(void) 1239 { 1240 zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 1241 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1242 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 1243 debug_set_level(zcrypt_dbf_info, DBF_ERR); 1244 1245 return 0; 1246 } 1247 1248 void zcrypt_debug_exit(void) 1249 { 1250 debug_unregister(zcrypt_dbf_info); 1251 } 1252 1253 /** 1254 * zcrypt_api_init(): Module initialization. 1255 * 1256 * The module initialization code. 1257 */ 1258 int __init zcrypt_api_init(void) 1259 { 1260 int rc; 1261 1262 rc = zcrypt_debug_init(); 1263 if (rc) 1264 goto out; 1265 1266 /* Register the request sprayer. */ 1267 rc = misc_register(&zcrypt_misc_device); 1268 if (rc < 0) 1269 goto out; 1270 1271 zcrypt_msgtype6_init(); 1272 zcrypt_msgtype50_init(); 1273 return 0; 1274 1275 out: 1276 return rc; 1277 } 1278 1279 /** 1280 * zcrypt_api_exit(): Module termination. 1281 * 1282 * The module termination code. 1283 */ 1284 void __exit zcrypt_api_exit(void) 1285 { 1286 misc_deregister(&zcrypt_misc_device); 1287 zcrypt_msgtype6_exit(); 1288 zcrypt_msgtype50_exit(); 1289 zcrypt_debug_exit(); 1290 } 1291 1292 module_init(zcrypt_api_init); 1293 module_exit(zcrypt_api_exit); 1294