1 /* 2 * zcrypt 2.1.0 3 * 4 * Copyright IBM Corp. 2001, 2012 5 * Author(s): Robert Burroughs 6 * Eric Rossman (edrossma@us.ibm.com) 7 * Cornelia Huck <cornelia.huck@de.ibm.com> 8 * 9 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 10 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Ralph Wuerthner <rwuerthn@de.ibm.com> 12 * MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com> 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2, or (at your option) 17 * any later version. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License 25 * along with this program; if not, write to the Free Software 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 27 */ 28 29 #include <linux/module.h> 30 #include <linux/init.h> 31 #include <linux/interrupt.h> 32 #include <linux/miscdevice.h> 33 #include <linux/fs.h> 34 #include <linux/proc_fs.h> 35 #include <linux/seq_file.h> 36 #include <linux/compat.h> 37 #include <linux/slab.h> 38 #include <linux/atomic.h> 39 #include <linux/uaccess.h> 40 #include <linux/hw_random.h> 41 #include <linux/debugfs.h> 42 #include <asm/debug.h> 43 44 #define CREATE_TRACE_POINTS 45 #include <asm/trace/zcrypt.h> 46 47 #include "zcrypt_api.h" 48 #include "zcrypt_debug.h" 49 50 #include "zcrypt_msgtype6.h" 51 #include "zcrypt_msgtype50.h" 52 53 /* 54 * Module description. 55 */ 56 MODULE_AUTHOR("IBM Corporation"); 57 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \ 58 "Copyright IBM Corp. 2001, 2012"); 59 MODULE_LICENSE("GPL"); 60 61 /* 62 * zcrypt tracepoint functions 63 */ 64 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req); 65 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep); 66 67 static int zcrypt_hwrng_seed = 1; 68 module_param_named(hwrng_seed, zcrypt_hwrng_seed, int, S_IRUSR|S_IRGRP); 69 MODULE_PARM_DESC(hwrng_seed, "Turn on/off hwrng auto seed, default is 1 (on)."); 70 71 DEFINE_SPINLOCK(zcrypt_list_lock); 72 LIST_HEAD(zcrypt_card_list); 73 int zcrypt_device_count; 74 75 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 76 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0); 77 78 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0); 79 EXPORT_SYMBOL(zcrypt_rescan_req); 80 81 static LIST_HEAD(zcrypt_ops_list); 82 83 /* Zcrypt related debug feature stuff. */ 84 debug_info_t *zcrypt_dbf_info; 85 86 /** 87 * Process a rescan of the transport layer. 88 * 89 * Returns 1, if the rescan has been processed, otherwise 0. 90 */ 91 static inline int zcrypt_process_rescan(void) 92 { 93 if (atomic_read(&zcrypt_rescan_req)) { 94 atomic_set(&zcrypt_rescan_req, 0); 95 atomic_inc(&zcrypt_rescan_count); 96 ap_bus_force_rescan(); 97 ZCRYPT_DBF(DBF_INFO, "rescan count=%07d\n", 98 atomic_inc_return(&zcrypt_rescan_count)); 99 return 1; 100 } 101 return 0; 102 } 103 104 void zcrypt_msgtype_register(struct zcrypt_ops *zops) 105 { 106 list_add_tail(&zops->list, &zcrypt_ops_list); 107 } 108 109 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops) 110 { 111 list_del_init(&zops->list); 112 } 113 114 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant) 115 { 116 struct zcrypt_ops *zops; 117 118 list_for_each_entry(zops, &zcrypt_ops_list, list) 119 if ((zops->variant == variant) && 120 (!strncmp(zops->name, name, sizeof(zops->name)))) 121 return zops; 122 return NULL; 123 } 124 EXPORT_SYMBOL(zcrypt_msgtype); 125 126 /** 127 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 128 * 129 * This function is not supported beyond zcrypt 1.3.1. 130 */ 131 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 132 size_t count, loff_t *f_pos) 133 { 134 return -EPERM; 135 } 136 137 /** 138 * zcrypt_write(): Not allowed. 139 * 140 * Write is is not allowed 141 */ 142 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 143 size_t count, loff_t *f_pos) 144 { 145 return -EPERM; 146 } 147 148 /** 149 * zcrypt_open(): Count number of users. 150 * 151 * Device open function to count number of users. 152 */ 153 static int zcrypt_open(struct inode *inode, struct file *filp) 154 { 155 atomic_inc(&zcrypt_open_count); 156 return nonseekable_open(inode, filp); 157 } 158 159 /** 160 * zcrypt_release(): Count number of users. 161 * 162 * Device close function to count number of users. 163 */ 164 static int zcrypt_release(struct inode *inode, struct file *filp) 165 { 166 atomic_dec(&zcrypt_open_count); 167 return 0; 168 } 169 170 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 171 struct zcrypt_queue *zq, 172 unsigned int weight) 173 { 174 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 175 return NULL; 176 zcrypt_queue_get(zq); 177 get_device(&zq->queue->ap_dev.device); 178 atomic_add(weight, &zc->load); 179 atomic_add(weight, &zq->load); 180 zq->request_count++; 181 return zq; 182 } 183 184 static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 185 struct zcrypt_queue *zq, 186 unsigned int weight) 187 { 188 struct module *mod = zq->queue->ap_dev.drv->driver.owner; 189 190 zq->request_count--; 191 atomic_sub(weight, &zc->load); 192 atomic_sub(weight, &zq->load); 193 put_device(&zq->queue->ap_dev.device); 194 zcrypt_queue_put(zq); 195 module_put(mod); 196 } 197 198 static inline bool zcrypt_card_compare(struct zcrypt_card *zc, 199 struct zcrypt_card *pref_zc, 200 unsigned weight, unsigned pref_weight) 201 { 202 if (!pref_zc) 203 return false; 204 weight += atomic_read(&zc->load); 205 pref_weight += atomic_read(&pref_zc->load); 206 if (weight == pref_weight) 207 return atomic_read(&zc->card->total_request_count) > 208 atomic_read(&pref_zc->card->total_request_count); 209 return weight > pref_weight; 210 } 211 212 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq, 213 struct zcrypt_queue *pref_zq, 214 unsigned weight, unsigned pref_weight) 215 { 216 if (!pref_zq) 217 return false; 218 weight += atomic_read(&zq->load); 219 pref_weight += atomic_read(&pref_zq->load); 220 if (weight == pref_weight) 221 return &zq->queue->total_request_count > 222 &pref_zq->queue->total_request_count; 223 return weight > pref_weight; 224 } 225 226 /* 227 * zcrypt ioctls. 228 */ 229 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 230 { 231 struct zcrypt_card *zc, *pref_zc; 232 struct zcrypt_queue *zq, *pref_zq; 233 unsigned int weight, pref_weight; 234 unsigned int func_code; 235 int qid = 0, rc = -ENODEV; 236 237 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 238 239 if (mex->outputdatalength < mex->inputdatalength) { 240 rc = -EINVAL; 241 goto out; 242 } 243 244 /* 245 * As long as outputdatalength is big enough, we can set the 246 * outputdatalength equal to the inputdatalength, since that is the 247 * number of bytes we will copy in any case 248 */ 249 mex->outputdatalength = mex->inputdatalength; 250 251 rc = get_rsa_modex_fc(mex, &func_code); 252 if (rc) 253 goto out; 254 255 pref_zc = NULL; 256 pref_zq = NULL; 257 spin_lock(&zcrypt_list_lock); 258 for_each_zcrypt_card(zc) { 259 /* Check for online accelarator and CCA cards */ 260 if (!zc->online || !(zc->card->functions & 0x18000000)) 261 continue; 262 /* Check for size limits */ 263 if (zc->min_mod_size > mex->inputdatalength || 264 zc->max_mod_size < mex->inputdatalength) 265 continue; 266 /* get weight index of the card device */ 267 weight = zc->speed_rating[func_code]; 268 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 269 continue; 270 for_each_zcrypt_queue(zq, zc) { 271 /* check if device is online and eligible */ 272 if (!zq->online || !zq->ops->rsa_modexpo) 273 continue; 274 if (zcrypt_queue_compare(zq, pref_zq, 275 weight, pref_weight)) 276 continue; 277 pref_zc = zc; 278 pref_zq = zq; 279 pref_weight = weight; 280 } 281 } 282 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 283 spin_unlock(&zcrypt_list_lock); 284 285 if (!pref_zq) { 286 rc = -ENODEV; 287 goto out; 288 } 289 290 qid = pref_zq->queue->qid; 291 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 292 293 spin_lock(&zcrypt_list_lock); 294 zcrypt_drop_queue(pref_zc, pref_zq, weight); 295 spin_unlock(&zcrypt_list_lock); 296 297 out: 298 trace_s390_zcrypt_rep(mex, func_code, rc, 299 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 300 return rc; 301 } 302 303 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 304 { 305 struct zcrypt_card *zc, *pref_zc; 306 struct zcrypt_queue *zq, *pref_zq; 307 unsigned int weight, pref_weight; 308 unsigned int func_code; 309 int qid = 0, rc = -ENODEV; 310 311 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 312 313 if (crt->outputdatalength < crt->inputdatalength) { 314 rc = -EINVAL; 315 goto out; 316 } 317 318 /* 319 * As long as outputdatalength is big enough, we can set the 320 * outputdatalength equal to the inputdatalength, since that is the 321 * number of bytes we will copy in any case 322 */ 323 crt->outputdatalength = crt->inputdatalength; 324 325 rc = get_rsa_crt_fc(crt, &func_code); 326 if (rc) 327 goto out; 328 329 pref_zc = NULL; 330 pref_zq = NULL; 331 spin_lock(&zcrypt_list_lock); 332 for_each_zcrypt_card(zc) { 333 /* Check for online accelarator and CCA cards */ 334 if (!zc->online || !(zc->card->functions & 0x18000000)) 335 continue; 336 /* Check for size limits */ 337 if (zc->min_mod_size > crt->inputdatalength || 338 zc->max_mod_size < crt->inputdatalength) 339 continue; 340 /* get weight index of the card device */ 341 weight = zc->speed_rating[func_code]; 342 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 343 continue; 344 for_each_zcrypt_queue(zq, zc) { 345 /* check if device is online and eligible */ 346 if (!zq->online || !zq->ops->rsa_modexpo_crt) 347 continue; 348 if (zcrypt_queue_compare(zq, pref_zq, 349 weight, pref_weight)) 350 continue; 351 pref_zc = zc; 352 pref_zq = zq; 353 pref_weight = weight; 354 } 355 } 356 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 357 spin_unlock(&zcrypt_list_lock); 358 359 if (!pref_zq) { 360 rc = -ENODEV; 361 goto out; 362 } 363 364 qid = pref_zq->queue->qid; 365 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 366 367 spin_lock(&zcrypt_list_lock); 368 zcrypt_drop_queue(pref_zc, pref_zq, weight); 369 spin_unlock(&zcrypt_list_lock); 370 371 out: 372 trace_s390_zcrypt_rep(crt, func_code, rc, 373 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 374 return rc; 375 } 376 377 long zcrypt_send_cprb(struct ica_xcRB *xcRB) 378 { 379 struct zcrypt_card *zc, *pref_zc; 380 struct zcrypt_queue *zq, *pref_zq; 381 struct ap_message ap_msg; 382 unsigned int weight, pref_weight; 383 unsigned int func_code; 384 unsigned short *domain; 385 int qid = 0, rc = -ENODEV; 386 387 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 388 389 rc = get_cprb_fc(xcRB, &ap_msg, &func_code, &domain); 390 if (rc) 391 goto out; 392 393 pref_zc = NULL; 394 pref_zq = NULL; 395 spin_lock(&zcrypt_list_lock); 396 for_each_zcrypt_card(zc) { 397 /* Check for online CCA cards */ 398 if (!zc->online || !(zc->card->functions & 0x10000000)) 399 continue; 400 /* Check for user selected CCA card */ 401 if (xcRB->user_defined != AUTOSELECT && 402 xcRB->user_defined != zc->card->id) 403 continue; 404 /* get weight index of the card device */ 405 weight = speed_idx_cca(func_code) * zc->speed_rating[SECKEY]; 406 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 407 continue; 408 for_each_zcrypt_queue(zq, zc) { 409 /* check if device is online and eligible */ 410 if (!zq->online || 411 !zq->ops->send_cprb || 412 ((*domain != (unsigned short) AUTOSELECT) && 413 (*domain != AP_QID_QUEUE(zq->queue->qid)))) 414 continue; 415 if (zcrypt_queue_compare(zq, pref_zq, 416 weight, pref_weight)) 417 continue; 418 pref_zc = zc; 419 pref_zq = zq; 420 pref_weight = weight; 421 } 422 } 423 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 424 spin_unlock(&zcrypt_list_lock); 425 426 if (!pref_zq) { 427 rc = -ENODEV; 428 goto out; 429 } 430 431 /* in case of auto select, provide the correct domain */ 432 qid = pref_zq->queue->qid; 433 if (*domain == (unsigned short) AUTOSELECT) 434 *domain = AP_QID_QUEUE(qid); 435 436 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 437 438 spin_lock(&zcrypt_list_lock); 439 zcrypt_drop_queue(pref_zc, pref_zq, weight); 440 spin_unlock(&zcrypt_list_lock); 441 442 out: 443 trace_s390_zcrypt_rep(xcRB, func_code, rc, 444 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 445 return rc; 446 } 447 EXPORT_SYMBOL(zcrypt_send_cprb); 448 449 static bool is_desired_ep11_card(unsigned int dev_id, 450 unsigned short target_num, 451 struct ep11_target_dev *targets) 452 { 453 while (target_num-- > 0) { 454 if (dev_id == targets->ap_id) 455 return true; 456 targets++; 457 } 458 return false; 459 } 460 461 static bool is_desired_ep11_queue(unsigned int dev_qid, 462 unsigned short target_num, 463 struct ep11_target_dev *targets) 464 { 465 while (target_num-- > 0) { 466 if (AP_MKQID(targets->ap_id, targets->dom_id) == dev_qid) 467 return true; 468 targets++; 469 } 470 return false; 471 } 472 473 static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb) 474 { 475 struct zcrypt_card *zc, *pref_zc; 476 struct zcrypt_queue *zq, *pref_zq; 477 struct ep11_target_dev *targets; 478 unsigned short target_num; 479 unsigned int weight, pref_weight; 480 unsigned int func_code; 481 struct ap_message ap_msg; 482 int qid = 0, rc = -ENODEV; 483 484 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 485 486 target_num = (unsigned short) xcrb->targets_num; 487 488 /* empty list indicates autoselect (all available targets) */ 489 targets = NULL; 490 if (target_num != 0) { 491 struct ep11_target_dev __user *uptr; 492 493 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL); 494 if (!targets) { 495 rc = -ENOMEM; 496 goto out; 497 } 498 499 uptr = (struct ep11_target_dev __force __user *) xcrb->targets; 500 if (copy_from_user(targets, uptr, 501 target_num * sizeof(*targets))) { 502 rc = -EFAULT; 503 goto out; 504 } 505 } 506 507 rc = get_ep11cprb_fc(xcrb, &ap_msg, &func_code); 508 if (rc) 509 goto out_free; 510 511 pref_zc = NULL; 512 pref_zq = NULL; 513 spin_lock(&zcrypt_list_lock); 514 for_each_zcrypt_card(zc) { 515 /* Check for online EP11 cards */ 516 if (!zc->online || !(zc->card->functions & 0x04000000)) 517 continue; 518 /* Check for user selected EP11 card */ 519 if (targets && 520 !is_desired_ep11_card(zc->card->id, target_num, targets)) 521 continue; 522 /* get weight index of the card device */ 523 weight = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY]; 524 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 525 continue; 526 for_each_zcrypt_queue(zq, zc) { 527 /* check if device is online and eligible */ 528 if (!zq->online || 529 !zq->ops->send_ep11_cprb || 530 (targets && 531 !is_desired_ep11_queue(zq->queue->qid, 532 target_num, targets))) 533 continue; 534 if (zcrypt_queue_compare(zq, pref_zq, 535 weight, pref_weight)) 536 continue; 537 pref_zc = zc; 538 pref_zq = zq; 539 pref_weight = weight; 540 } 541 } 542 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 543 spin_unlock(&zcrypt_list_lock); 544 545 if (!pref_zq) { 546 rc = -ENODEV; 547 goto out_free; 548 } 549 550 qid = pref_zq->queue->qid; 551 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 552 553 spin_lock(&zcrypt_list_lock); 554 zcrypt_drop_queue(pref_zc, pref_zq, weight); 555 spin_unlock(&zcrypt_list_lock); 556 557 out_free: 558 kfree(targets); 559 out: 560 trace_s390_zcrypt_rep(xcrb, func_code, rc, 561 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 562 return rc; 563 } 564 565 static long zcrypt_rng(char *buffer) 566 { 567 struct zcrypt_card *zc, *pref_zc; 568 struct zcrypt_queue *zq, *pref_zq; 569 unsigned int weight, pref_weight; 570 unsigned int func_code; 571 struct ap_message ap_msg; 572 unsigned int domain; 573 int qid = 0, rc = -ENODEV; 574 575 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 576 577 rc = get_rng_fc(&ap_msg, &func_code, &domain); 578 if (rc) 579 goto out; 580 581 pref_zc = NULL; 582 pref_zq = NULL; 583 spin_lock(&zcrypt_list_lock); 584 for_each_zcrypt_card(zc) { 585 /* Check for online CCA cards */ 586 if (!zc->online || !(zc->card->functions & 0x10000000)) 587 continue; 588 /* get weight index of the card device */ 589 weight = zc->speed_rating[func_code]; 590 if (zcrypt_card_compare(zc, pref_zc, weight, pref_weight)) 591 continue; 592 for_each_zcrypt_queue(zq, zc) { 593 /* check if device is online and eligible */ 594 if (!zq->online || !zq->ops->rng) 595 continue; 596 if (zcrypt_queue_compare(zq, pref_zq, 597 weight, pref_weight)) 598 continue; 599 pref_zc = zc; 600 pref_zq = zq; 601 pref_weight = weight; 602 } 603 } 604 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 605 spin_unlock(&zcrypt_list_lock); 606 607 if (!pref_zq) 608 return -ENODEV; 609 610 qid = pref_zq->queue->qid; 611 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 612 613 spin_lock(&zcrypt_list_lock); 614 zcrypt_drop_queue(pref_zc, pref_zq, weight); 615 spin_unlock(&zcrypt_list_lock); 616 617 out: 618 trace_s390_zcrypt_rep(buffer, func_code, rc, 619 AP_QID_CARD(qid), AP_QID_QUEUE(qid)); 620 return rc; 621 } 622 623 void zcrypt_device_status_mask(struct zcrypt_device_matrix *matrix) 624 { 625 struct zcrypt_card *zc; 626 struct zcrypt_queue *zq; 627 struct zcrypt_device_status *stat; 628 629 memset(matrix, 0, sizeof(*matrix)); 630 spin_lock(&zcrypt_list_lock); 631 for_each_zcrypt_card(zc) { 632 for_each_zcrypt_queue(zq, zc) { 633 stat = matrix->device; 634 stat += AP_QID_CARD(zq->queue->qid) * MAX_ZDEV_DOMAINS; 635 stat += AP_QID_QUEUE(zq->queue->qid); 636 stat->hwtype = zc->card->ap_dev.device_type; 637 stat->functions = zc->card->functions >> 26; 638 stat->qid = zq->queue->qid; 639 stat->online = zq->online ? 0x01 : 0x00; 640 } 641 } 642 spin_unlock(&zcrypt_list_lock); 643 } 644 EXPORT_SYMBOL(zcrypt_device_status_mask); 645 646 static void zcrypt_status_mask(char status[AP_DEVICES]) 647 { 648 struct zcrypt_card *zc; 649 struct zcrypt_queue *zq; 650 651 memset(status, 0, sizeof(char) * AP_DEVICES); 652 spin_lock(&zcrypt_list_lock); 653 for_each_zcrypt_card(zc) { 654 for_each_zcrypt_queue(zq, zc) { 655 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 656 continue; 657 status[AP_QID_CARD(zq->queue->qid)] = 658 zc->online ? zc->user_space_type : 0x0d; 659 } 660 } 661 spin_unlock(&zcrypt_list_lock); 662 } 663 664 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 665 { 666 struct zcrypt_card *zc; 667 struct zcrypt_queue *zq; 668 669 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 670 spin_lock(&zcrypt_list_lock); 671 local_bh_disable(); 672 for_each_zcrypt_card(zc) { 673 for_each_zcrypt_queue(zq, zc) { 674 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 675 continue; 676 spin_lock(&zq->queue->lock); 677 qdepth[AP_QID_CARD(zq->queue->qid)] = 678 zq->queue->pendingq_count + 679 zq->queue->requestq_count; 680 spin_unlock(&zq->queue->lock); 681 } 682 } 683 local_bh_enable(); 684 spin_unlock(&zcrypt_list_lock); 685 } 686 687 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 688 { 689 struct zcrypt_card *zc; 690 struct zcrypt_queue *zq; 691 692 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 693 spin_lock(&zcrypt_list_lock); 694 local_bh_disable(); 695 for_each_zcrypt_card(zc) { 696 for_each_zcrypt_queue(zq, zc) { 697 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 698 continue; 699 spin_lock(&zq->queue->lock); 700 reqcnt[AP_QID_CARD(zq->queue->qid)] = 701 zq->queue->total_request_count; 702 spin_unlock(&zq->queue->lock); 703 } 704 } 705 local_bh_enable(); 706 spin_unlock(&zcrypt_list_lock); 707 } 708 709 static int zcrypt_pendingq_count(void) 710 { 711 struct zcrypt_card *zc; 712 struct zcrypt_queue *zq; 713 int pendingq_count; 714 715 pendingq_count = 0; 716 spin_lock(&zcrypt_list_lock); 717 local_bh_disable(); 718 for_each_zcrypt_card(zc) { 719 for_each_zcrypt_queue(zq, zc) { 720 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 721 continue; 722 spin_lock(&zq->queue->lock); 723 pendingq_count += zq->queue->pendingq_count; 724 spin_unlock(&zq->queue->lock); 725 } 726 } 727 local_bh_enable(); 728 spin_unlock(&zcrypt_list_lock); 729 return pendingq_count; 730 } 731 732 static int zcrypt_requestq_count(void) 733 { 734 struct zcrypt_card *zc; 735 struct zcrypt_queue *zq; 736 int requestq_count; 737 738 requestq_count = 0; 739 spin_lock(&zcrypt_list_lock); 740 local_bh_disable(); 741 for_each_zcrypt_card(zc) { 742 for_each_zcrypt_queue(zq, zc) { 743 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 744 continue; 745 spin_lock(&zq->queue->lock); 746 requestq_count += zq->queue->requestq_count; 747 spin_unlock(&zq->queue->lock); 748 } 749 } 750 local_bh_enable(); 751 spin_unlock(&zcrypt_list_lock); 752 return requestq_count; 753 } 754 755 static int zcrypt_count_type(int type) 756 { 757 struct zcrypt_card *zc; 758 struct zcrypt_queue *zq; 759 int device_count; 760 761 device_count = 0; 762 spin_lock(&zcrypt_list_lock); 763 for_each_zcrypt_card(zc) { 764 if (zc->card->id != type) 765 continue; 766 for_each_zcrypt_queue(zq, zc) { 767 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 768 continue; 769 device_count++; 770 } 771 } 772 spin_unlock(&zcrypt_list_lock); 773 return device_count; 774 } 775 776 /** 777 * zcrypt_ica_status(): Old, depracted combi status call. 778 * 779 * Old, deprecated combi status call. 780 */ 781 static long zcrypt_ica_status(struct file *filp, unsigned long arg) 782 { 783 struct ica_z90_status *pstat; 784 int ret; 785 786 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); 787 if (!pstat) 788 return -ENOMEM; 789 pstat->totalcount = zcrypt_device_count; 790 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); 791 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); 792 pstat->requestqWaitCount = zcrypt_requestq_count(); 793 pstat->pendingqWaitCount = zcrypt_pendingq_count(); 794 pstat->totalOpenCount = atomic_read(&zcrypt_open_count); 795 pstat->cryptoDomain = ap_domain_index; 796 zcrypt_status_mask(pstat->status); 797 zcrypt_qdepth_mask(pstat->qdepth); 798 ret = 0; 799 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) 800 ret = -EFAULT; 801 kfree(pstat); 802 return ret; 803 } 804 805 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 806 unsigned long arg) 807 { 808 int rc; 809 810 switch (cmd) { 811 case ICARSAMODEXPO: { 812 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 813 struct ica_rsa_modexpo mex; 814 if (copy_from_user(&mex, umex, sizeof(mex))) 815 return -EFAULT; 816 do { 817 rc = zcrypt_rsa_modexpo(&mex); 818 } while (rc == -EAGAIN); 819 /* on failure: retry once again after a requested rescan */ 820 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 821 do { 822 rc = zcrypt_rsa_modexpo(&mex); 823 } while (rc == -EAGAIN); 824 if (rc) { 825 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSAMODEXPO rc=%d\n", rc); 826 return rc; 827 } 828 return put_user(mex.outputdatalength, &umex->outputdatalength); 829 } 830 case ICARSACRT: { 831 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 832 struct ica_rsa_modexpo_crt crt; 833 if (copy_from_user(&crt, ucrt, sizeof(crt))) 834 return -EFAULT; 835 do { 836 rc = zcrypt_rsa_crt(&crt); 837 } while (rc == -EAGAIN); 838 /* on failure: retry once again after a requested rescan */ 839 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 840 do { 841 rc = zcrypt_rsa_crt(&crt); 842 } while (rc == -EAGAIN); 843 if (rc) { 844 ZCRYPT_DBF(DBF_DEBUG, "ioctl ICARSACRT rc=%d\n", rc); 845 return rc; 846 } 847 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 848 } 849 case ZSECSENDCPRB: { 850 struct ica_xcRB __user *uxcRB = (void __user *) arg; 851 struct ica_xcRB xcRB; 852 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 853 return -EFAULT; 854 do { 855 rc = zcrypt_send_cprb(&xcRB); 856 } while (rc == -EAGAIN); 857 /* on failure: retry once again after a requested rescan */ 858 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 859 do { 860 rc = zcrypt_send_cprb(&xcRB); 861 } while (rc == -EAGAIN); 862 if (rc) 863 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDCPRB rc=%d\n", rc); 864 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 865 return -EFAULT; 866 return rc; 867 } 868 case ZSENDEP11CPRB: { 869 struct ep11_urb __user *uxcrb = (void __user *)arg; 870 struct ep11_urb xcrb; 871 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb))) 872 return -EFAULT; 873 do { 874 rc = zcrypt_send_ep11_cprb(&xcrb); 875 } while (rc == -EAGAIN); 876 /* on failure: retry once again after a requested rescan */ 877 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 878 do { 879 rc = zcrypt_send_ep11_cprb(&xcrb); 880 } while (rc == -EAGAIN); 881 if (rc) 882 ZCRYPT_DBF(DBF_DEBUG, "ioctl ZSENDEP11CPRB rc=%d\n", rc); 883 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb))) 884 return -EFAULT; 885 return rc; 886 } 887 case ZDEVICESTATUS: { 888 struct zcrypt_device_matrix *device_status; 889 890 device_status = kzalloc(sizeof(struct zcrypt_device_matrix), 891 GFP_KERNEL); 892 if (!device_status) 893 return -ENOMEM; 894 895 zcrypt_device_status_mask(device_status); 896 897 if (copy_to_user((char __user *) arg, device_status, 898 sizeof(struct zcrypt_device_matrix))) { 899 kfree(device_status); 900 return -EFAULT; 901 } 902 903 kfree(device_status); 904 return 0; 905 } 906 case Z90STAT_STATUS_MASK: { 907 char status[AP_DEVICES]; 908 zcrypt_status_mask(status); 909 if (copy_to_user((char __user *) arg, status, 910 sizeof(char) * AP_DEVICES)) 911 return -EFAULT; 912 return 0; 913 } 914 case Z90STAT_QDEPTH_MASK: { 915 char qdepth[AP_DEVICES]; 916 zcrypt_qdepth_mask(qdepth); 917 if (copy_to_user((char __user *) arg, qdepth, 918 sizeof(char) * AP_DEVICES)) 919 return -EFAULT; 920 return 0; 921 } 922 case Z90STAT_PERDEV_REQCNT: { 923 int reqcnt[AP_DEVICES]; 924 zcrypt_perdev_reqcnt(reqcnt); 925 if (copy_to_user((int __user *) arg, reqcnt, 926 sizeof(int) * AP_DEVICES)) 927 return -EFAULT; 928 return 0; 929 } 930 case Z90STAT_REQUESTQ_COUNT: 931 return put_user(zcrypt_requestq_count(), (int __user *) arg); 932 case Z90STAT_PENDINGQ_COUNT: 933 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 934 case Z90STAT_TOTALOPEN_COUNT: 935 return put_user(atomic_read(&zcrypt_open_count), 936 (int __user *) arg); 937 case Z90STAT_DOMAIN_INDEX: 938 return put_user(ap_domain_index, (int __user *) arg); 939 /* 940 * Deprecated ioctls. Don't add another device count ioctl, 941 * you can count them yourself in the user space with the 942 * output of the Z90STAT_STATUS_MASK ioctl. 943 */ 944 case ICAZ90STATUS: 945 return zcrypt_ica_status(filp, arg); 946 case Z90STAT_TOTALCOUNT: 947 return put_user(zcrypt_device_count, (int __user *) arg); 948 case Z90STAT_PCICACOUNT: 949 return put_user(zcrypt_count_type(ZCRYPT_PCICA), 950 (int __user *) arg); 951 case Z90STAT_PCICCCOUNT: 952 return put_user(zcrypt_count_type(ZCRYPT_PCICC), 953 (int __user *) arg); 954 case Z90STAT_PCIXCCMCL2COUNT: 955 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), 956 (int __user *) arg); 957 case Z90STAT_PCIXCCMCL3COUNT: 958 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 959 (int __user *) arg); 960 case Z90STAT_PCIXCCCOUNT: 961 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + 962 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 963 (int __user *) arg); 964 case Z90STAT_CEX2CCOUNT: 965 return put_user(zcrypt_count_type(ZCRYPT_CEX2C), 966 (int __user *) arg); 967 case Z90STAT_CEX2ACOUNT: 968 return put_user(zcrypt_count_type(ZCRYPT_CEX2A), 969 (int __user *) arg); 970 default: 971 /* unknown ioctl number */ 972 return -ENOIOCTLCMD; 973 } 974 } 975 976 #ifdef CONFIG_COMPAT 977 /* 978 * ioctl32 conversion routines 979 */ 980 struct compat_ica_rsa_modexpo { 981 compat_uptr_t inputdata; 982 unsigned int inputdatalength; 983 compat_uptr_t outputdata; 984 unsigned int outputdatalength; 985 compat_uptr_t b_key; 986 compat_uptr_t n_modulus; 987 }; 988 989 static long trans_modexpo32(struct file *filp, unsigned int cmd, 990 unsigned long arg) 991 { 992 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 993 struct compat_ica_rsa_modexpo mex32; 994 struct ica_rsa_modexpo mex64; 995 long rc; 996 997 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 998 return -EFAULT; 999 mex64.inputdata = compat_ptr(mex32.inputdata); 1000 mex64.inputdatalength = mex32.inputdatalength; 1001 mex64.outputdata = compat_ptr(mex32.outputdata); 1002 mex64.outputdatalength = mex32.outputdatalength; 1003 mex64.b_key = compat_ptr(mex32.b_key); 1004 mex64.n_modulus = compat_ptr(mex32.n_modulus); 1005 do { 1006 rc = zcrypt_rsa_modexpo(&mex64); 1007 } while (rc == -EAGAIN); 1008 /* on failure: retry once again after a requested rescan */ 1009 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1010 do { 1011 rc = zcrypt_rsa_modexpo(&mex64); 1012 } while (rc == -EAGAIN); 1013 if (rc) 1014 return rc; 1015 return put_user(mex64.outputdatalength, 1016 &umex32->outputdatalength); 1017 } 1018 1019 struct compat_ica_rsa_modexpo_crt { 1020 compat_uptr_t inputdata; 1021 unsigned int inputdatalength; 1022 compat_uptr_t outputdata; 1023 unsigned int outputdatalength; 1024 compat_uptr_t bp_key; 1025 compat_uptr_t bq_key; 1026 compat_uptr_t np_prime; 1027 compat_uptr_t nq_prime; 1028 compat_uptr_t u_mult_inv; 1029 }; 1030 1031 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 1032 unsigned long arg) 1033 { 1034 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 1035 struct compat_ica_rsa_modexpo_crt crt32; 1036 struct ica_rsa_modexpo_crt crt64; 1037 long rc; 1038 1039 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 1040 return -EFAULT; 1041 crt64.inputdata = compat_ptr(crt32.inputdata); 1042 crt64.inputdatalength = crt32.inputdatalength; 1043 crt64.outputdata= compat_ptr(crt32.outputdata); 1044 crt64.outputdatalength = crt32.outputdatalength; 1045 crt64.bp_key = compat_ptr(crt32.bp_key); 1046 crt64.bq_key = compat_ptr(crt32.bq_key); 1047 crt64.np_prime = compat_ptr(crt32.np_prime); 1048 crt64.nq_prime = compat_ptr(crt32.nq_prime); 1049 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 1050 do { 1051 rc = zcrypt_rsa_crt(&crt64); 1052 } while (rc == -EAGAIN); 1053 /* on failure: retry once again after a requested rescan */ 1054 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1055 do { 1056 rc = zcrypt_rsa_crt(&crt64); 1057 } while (rc == -EAGAIN); 1058 if (rc) 1059 return rc; 1060 return put_user(crt64.outputdatalength, 1061 &ucrt32->outputdatalength); 1062 } 1063 1064 struct compat_ica_xcRB { 1065 unsigned short agent_ID; 1066 unsigned int user_defined; 1067 unsigned short request_ID; 1068 unsigned int request_control_blk_length; 1069 unsigned char padding1[16 - sizeof (compat_uptr_t)]; 1070 compat_uptr_t request_control_blk_addr; 1071 unsigned int request_data_length; 1072 char padding2[16 - sizeof (compat_uptr_t)]; 1073 compat_uptr_t request_data_address; 1074 unsigned int reply_control_blk_length; 1075 char padding3[16 - sizeof (compat_uptr_t)]; 1076 compat_uptr_t reply_control_blk_addr; 1077 unsigned int reply_data_length; 1078 char padding4[16 - sizeof (compat_uptr_t)]; 1079 compat_uptr_t reply_data_addr; 1080 unsigned short priority_window; 1081 unsigned int status; 1082 } __attribute__((packed)); 1083 1084 static long trans_xcRB32(struct file *filp, unsigned int cmd, 1085 unsigned long arg) 1086 { 1087 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 1088 struct compat_ica_xcRB xcRB32; 1089 struct ica_xcRB xcRB64; 1090 long rc; 1091 1092 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 1093 return -EFAULT; 1094 xcRB64.agent_ID = xcRB32.agent_ID; 1095 xcRB64.user_defined = xcRB32.user_defined; 1096 xcRB64.request_ID = xcRB32.request_ID; 1097 xcRB64.request_control_blk_length = 1098 xcRB32.request_control_blk_length; 1099 xcRB64.request_control_blk_addr = 1100 compat_ptr(xcRB32.request_control_blk_addr); 1101 xcRB64.request_data_length = 1102 xcRB32.request_data_length; 1103 xcRB64.request_data_address = 1104 compat_ptr(xcRB32.request_data_address); 1105 xcRB64.reply_control_blk_length = 1106 xcRB32.reply_control_blk_length; 1107 xcRB64.reply_control_blk_addr = 1108 compat_ptr(xcRB32.reply_control_blk_addr); 1109 xcRB64.reply_data_length = xcRB32.reply_data_length; 1110 xcRB64.reply_data_addr = 1111 compat_ptr(xcRB32.reply_data_addr); 1112 xcRB64.priority_window = xcRB32.priority_window; 1113 xcRB64.status = xcRB32.status; 1114 do { 1115 rc = zcrypt_send_cprb(&xcRB64); 1116 } while (rc == -EAGAIN); 1117 /* on failure: retry once again after a requested rescan */ 1118 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1119 do { 1120 rc = zcrypt_send_cprb(&xcRB64); 1121 } while (rc == -EAGAIN); 1122 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 1123 xcRB32.reply_data_length = xcRB64.reply_data_length; 1124 xcRB32.status = xcRB64.status; 1125 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 1126 return -EFAULT; 1127 return rc; 1128 } 1129 1130 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 1131 unsigned long arg) 1132 { 1133 if (cmd == ICARSAMODEXPO) 1134 return trans_modexpo32(filp, cmd, arg); 1135 if (cmd == ICARSACRT) 1136 return trans_modexpo_crt32(filp, cmd, arg); 1137 if (cmd == ZSECSENDCPRB) 1138 return trans_xcRB32(filp, cmd, arg); 1139 return zcrypt_unlocked_ioctl(filp, cmd, arg); 1140 } 1141 #endif 1142 1143 /* 1144 * Misc device file operations. 1145 */ 1146 static const struct file_operations zcrypt_fops = { 1147 .owner = THIS_MODULE, 1148 .read = zcrypt_read, 1149 .write = zcrypt_write, 1150 .unlocked_ioctl = zcrypt_unlocked_ioctl, 1151 #ifdef CONFIG_COMPAT 1152 .compat_ioctl = zcrypt_compat_ioctl, 1153 #endif 1154 .open = zcrypt_open, 1155 .release = zcrypt_release, 1156 .llseek = no_llseek, 1157 }; 1158 1159 /* 1160 * Misc device. 1161 */ 1162 static struct miscdevice zcrypt_misc_device = { 1163 .minor = MISC_DYNAMIC_MINOR, 1164 .name = "z90crypt", 1165 .fops = &zcrypt_fops, 1166 }; 1167 1168 /* 1169 * Deprecated /proc entry support. 1170 */ 1171 static struct proc_dir_entry *zcrypt_entry; 1172 1173 static void sprintcl(struct seq_file *m, unsigned char *addr, unsigned int len) 1174 { 1175 int i; 1176 1177 for (i = 0; i < len; i++) 1178 seq_printf(m, "%01x", (unsigned int) addr[i]); 1179 seq_putc(m, ' '); 1180 } 1181 1182 static void sprintrw(struct seq_file *m, unsigned char *addr, unsigned int len) 1183 { 1184 int inl, c, cx; 1185 1186 seq_printf(m, " "); 1187 inl = 0; 1188 for (c = 0; c < (len / 16); c++) { 1189 sprintcl(m, addr+inl, 16); 1190 inl += 16; 1191 } 1192 cx = len%16; 1193 if (cx) { 1194 sprintcl(m, addr+inl, cx); 1195 inl += cx; 1196 } 1197 seq_putc(m, '\n'); 1198 } 1199 1200 static void sprinthx(unsigned char *title, struct seq_file *m, 1201 unsigned char *addr, unsigned int len) 1202 { 1203 int inl, r, rx; 1204 1205 seq_printf(m, "\n%s\n", title); 1206 inl = 0; 1207 for (r = 0; r < (len / 64); r++) { 1208 sprintrw(m, addr+inl, 64); 1209 inl += 64; 1210 } 1211 rx = len % 64; 1212 if (rx) { 1213 sprintrw(m, addr+inl, rx); 1214 inl += rx; 1215 } 1216 seq_putc(m, '\n'); 1217 } 1218 1219 static void sprinthx4(unsigned char *title, struct seq_file *m, 1220 unsigned int *array, unsigned int len) 1221 { 1222 seq_printf(m, "\n%s\n", title); 1223 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false); 1224 seq_putc(m, '\n'); 1225 } 1226 1227 static int zcrypt_proc_show(struct seq_file *m, void *v) 1228 { 1229 char workarea[sizeof(int) * AP_DEVICES]; 1230 1231 seq_printf(m, "\nzcrypt version: %d.%d.%d\n", 1232 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 1233 seq_printf(m, "Cryptographic domain: %d\n", ap_domain_index); 1234 seq_printf(m, "Total device count: %d\n", zcrypt_device_count); 1235 seq_printf(m, "PCICA count: %d\n", zcrypt_count_type(ZCRYPT_PCICA)); 1236 seq_printf(m, "PCICC count: %d\n", zcrypt_count_type(ZCRYPT_PCICC)); 1237 seq_printf(m, "PCIXCC MCL2 count: %d\n", 1238 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 1239 seq_printf(m, "PCIXCC MCL3 count: %d\n", 1240 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); 1241 seq_printf(m, "CEX2C count: %d\n", zcrypt_count_type(ZCRYPT_CEX2C)); 1242 seq_printf(m, "CEX2A count: %d\n", zcrypt_count_type(ZCRYPT_CEX2A)); 1243 seq_printf(m, "CEX3C count: %d\n", zcrypt_count_type(ZCRYPT_CEX3C)); 1244 seq_printf(m, "CEX3A count: %d\n", zcrypt_count_type(ZCRYPT_CEX3A)); 1245 seq_printf(m, "requestq count: %d\n", zcrypt_requestq_count()); 1246 seq_printf(m, "pendingq count: %d\n", zcrypt_pendingq_count()); 1247 seq_printf(m, "Total open handles: %d\n\n", 1248 atomic_read(&zcrypt_open_count)); 1249 zcrypt_status_mask(workarea); 1250 sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1251 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A 7=CEX3C 8=CEX3A", 1252 m, workarea, AP_DEVICES); 1253 zcrypt_qdepth_mask(workarea); 1254 sprinthx("Waiting work element counts", m, workarea, AP_DEVICES); 1255 zcrypt_perdev_reqcnt((int *) workarea); 1256 sprinthx4("Per-device successfully completed request counts", 1257 m, (unsigned int *) workarea, AP_DEVICES); 1258 return 0; 1259 } 1260 1261 static int zcrypt_proc_open(struct inode *inode, struct file *file) 1262 { 1263 return single_open(file, zcrypt_proc_show, NULL); 1264 } 1265 1266 static void zcrypt_disable_card(int index) 1267 { 1268 struct zcrypt_card *zc; 1269 struct zcrypt_queue *zq; 1270 1271 spin_lock(&zcrypt_list_lock); 1272 for_each_zcrypt_card(zc) { 1273 for_each_zcrypt_queue(zq, zc) { 1274 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1275 continue; 1276 zq->online = 0; 1277 ap_flush_queue(zq->queue); 1278 } 1279 } 1280 spin_unlock(&zcrypt_list_lock); 1281 } 1282 1283 static void zcrypt_enable_card(int index) 1284 { 1285 struct zcrypt_card *zc; 1286 struct zcrypt_queue *zq; 1287 1288 spin_lock(&zcrypt_list_lock); 1289 for_each_zcrypt_card(zc) { 1290 for_each_zcrypt_queue(zq, zc) { 1291 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index) 1292 continue; 1293 zq->online = 1; 1294 ap_flush_queue(zq->queue); 1295 } 1296 } 1297 spin_unlock(&zcrypt_list_lock); 1298 } 1299 1300 static ssize_t zcrypt_proc_write(struct file *file, const char __user *buffer, 1301 size_t count, loff_t *pos) 1302 { 1303 unsigned char *lbuf, *ptr; 1304 size_t local_count; 1305 int j; 1306 1307 if (count <= 0) 1308 return 0; 1309 1310 #define LBUFSIZE 1200UL 1311 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1312 if (!lbuf) 1313 return 0; 1314 1315 local_count = min(LBUFSIZE - 1, count); 1316 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1317 kfree(lbuf); 1318 return -EFAULT; 1319 } 1320 lbuf[local_count] = '\0'; 1321 1322 ptr = strstr(lbuf, "Online devices"); 1323 if (!ptr) 1324 goto out; 1325 ptr = strstr(ptr, "\n"); 1326 if (!ptr) 1327 goto out; 1328 ptr++; 1329 1330 if (strstr(ptr, "Waiting work element counts") == NULL) 1331 goto out; 1332 1333 for (j = 0; j < 64 && *ptr; ptr++) { 1334 /* 1335 * '0' for no device, '1' for PCICA, '2' for PCICC, 1336 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1337 * '5' for CEX2C and '6' for CEX2A' 1338 * '7' for CEX3C and '8' for CEX3A 1339 */ 1340 if (*ptr >= '0' && *ptr <= '8') 1341 j++; 1342 else if (*ptr == 'd' || *ptr == 'D') 1343 zcrypt_disable_card(j++); 1344 else if (*ptr == 'e' || *ptr == 'E') 1345 zcrypt_enable_card(j++); 1346 else if (*ptr != ' ' && *ptr != '\t') 1347 break; 1348 } 1349 out: 1350 kfree(lbuf); 1351 return count; 1352 } 1353 1354 static const struct file_operations zcrypt_proc_fops = { 1355 .owner = THIS_MODULE, 1356 .open = zcrypt_proc_open, 1357 .read = seq_read, 1358 .llseek = seq_lseek, 1359 .release = single_release, 1360 .write = zcrypt_proc_write, 1361 }; 1362 1363 static int zcrypt_rng_device_count; 1364 static u32 *zcrypt_rng_buffer; 1365 static int zcrypt_rng_buffer_index; 1366 static DEFINE_MUTEX(zcrypt_rng_mutex); 1367 1368 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1369 { 1370 int rc; 1371 1372 /* 1373 * We don't need locking here because the RNG API guarantees serialized 1374 * read method calls. 1375 */ 1376 if (zcrypt_rng_buffer_index == 0) { 1377 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1378 /* on failure: retry once again after a requested rescan */ 1379 if ((rc == -ENODEV) && (zcrypt_process_rescan())) 1380 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1381 if (rc < 0) 1382 return -EIO; 1383 zcrypt_rng_buffer_index = rc / sizeof *data; 1384 } 1385 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1386 return sizeof *data; 1387 } 1388 1389 static struct hwrng zcrypt_rng_dev = { 1390 .name = "zcrypt", 1391 .data_read = zcrypt_rng_data_read, 1392 .quality = 990, 1393 }; 1394 1395 int zcrypt_rng_device_add(void) 1396 { 1397 int rc = 0; 1398 1399 mutex_lock(&zcrypt_rng_mutex); 1400 if (zcrypt_rng_device_count == 0) { 1401 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1402 if (!zcrypt_rng_buffer) { 1403 rc = -ENOMEM; 1404 goto out; 1405 } 1406 zcrypt_rng_buffer_index = 0; 1407 if (!zcrypt_hwrng_seed) 1408 zcrypt_rng_dev.quality = 0; 1409 rc = hwrng_register(&zcrypt_rng_dev); 1410 if (rc) 1411 goto out_free; 1412 zcrypt_rng_device_count = 1; 1413 } else 1414 zcrypt_rng_device_count++; 1415 mutex_unlock(&zcrypt_rng_mutex); 1416 return 0; 1417 1418 out_free: 1419 free_page((unsigned long) zcrypt_rng_buffer); 1420 out: 1421 mutex_unlock(&zcrypt_rng_mutex); 1422 return rc; 1423 } 1424 1425 void zcrypt_rng_device_remove(void) 1426 { 1427 mutex_lock(&zcrypt_rng_mutex); 1428 zcrypt_rng_device_count--; 1429 if (zcrypt_rng_device_count == 0) { 1430 hwrng_unregister(&zcrypt_rng_dev); 1431 free_page((unsigned long) zcrypt_rng_buffer); 1432 } 1433 mutex_unlock(&zcrypt_rng_mutex); 1434 } 1435 1436 int __init zcrypt_debug_init(void) 1437 { 1438 zcrypt_dbf_info = debug_register("zcrypt", 1, 1, 1439 DBF_MAX_SPRINTF_ARGS * sizeof(long)); 1440 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view); 1441 debug_set_level(zcrypt_dbf_info, DBF_ERR); 1442 1443 return 0; 1444 } 1445 1446 void zcrypt_debug_exit(void) 1447 { 1448 debug_unregister(zcrypt_dbf_info); 1449 } 1450 1451 /** 1452 * zcrypt_api_init(): Module initialization. 1453 * 1454 * The module initialization code. 1455 */ 1456 int __init zcrypt_api_init(void) 1457 { 1458 int rc; 1459 1460 rc = zcrypt_debug_init(); 1461 if (rc) 1462 goto out; 1463 1464 atomic_set(&zcrypt_rescan_req, 0); 1465 1466 /* Register the request sprayer. */ 1467 rc = misc_register(&zcrypt_misc_device); 1468 if (rc < 0) 1469 goto out; 1470 1471 /* Set up the proc file system */ 1472 zcrypt_entry = proc_create("driver/z90crypt", 0644, NULL, 1473 &zcrypt_proc_fops); 1474 if (!zcrypt_entry) { 1475 rc = -ENOMEM; 1476 goto out_misc; 1477 } 1478 1479 zcrypt_msgtype6_init(); 1480 zcrypt_msgtype50_init(); 1481 return 0; 1482 1483 out_misc: 1484 misc_deregister(&zcrypt_misc_device); 1485 out: 1486 return rc; 1487 } 1488 1489 /** 1490 * zcrypt_api_exit(): Module termination. 1491 * 1492 * The module termination code. 1493 */ 1494 void __exit zcrypt_api_exit(void) 1495 { 1496 remove_proc_entry("driver/z90crypt", NULL); 1497 misc_deregister(&zcrypt_misc_device); 1498 zcrypt_msgtype6_exit(); 1499 zcrypt_msgtype50_exit(); 1500 zcrypt_debug_exit(); 1501 } 1502 1503 module_init(zcrypt_api_init); 1504 module_exit(zcrypt_api_exit); 1505