1 /* 2 * linux/drivers/s390/crypto/zcrypt_api.c 3 * 4 * zcrypt 2.1.0 5 * 6 * Copyright (C) 2001, 2006 IBM Corporation 7 * Author(s): Robert Burroughs 8 * Eric Rossman (edrossma@us.ibm.com) 9 * Cornelia Huck <cornelia.huck@de.ibm.com> 10 * 11 * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) 12 * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> 13 * Ralph Wuerthner <rwuerthn@de.ibm.com> 14 * 15 * This program is free software; you can redistribute it and/or modify 16 * it under the terms of the GNU General Public License as published by 17 * the Free Software Foundation; either version 2, or (at your option) 18 * any later version. 19 * 20 * This program is distributed in the hope that it will be useful, 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * GNU General Public License for more details. 24 * 25 * You should have received a copy of the GNU General Public License 26 * along with this program; if not, write to the Free Software 27 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 28 */ 29 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/interrupt.h> 33 #include <linux/miscdevice.h> 34 #include <linux/fs.h> 35 #include <linux/proc_fs.h> 36 #include <linux/compat.h> 37 #include <linux/smp_lock.h> 38 #include <asm/atomic.h> 39 #include <asm/uaccess.h> 40 #include <linux/hw_random.h> 41 42 #include "zcrypt_api.h" 43 44 /* 45 * Module description. 46 */ 47 MODULE_AUTHOR("IBM Corporation"); 48 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " 49 "Copyright 2001, 2006 IBM Corporation"); 50 MODULE_LICENSE("GPL"); 51 52 static DEFINE_SPINLOCK(zcrypt_device_lock); 53 static LIST_HEAD(zcrypt_device_list); 54 static int zcrypt_device_count = 0; 55 static atomic_t zcrypt_open_count = ATOMIC_INIT(0); 56 57 static int zcrypt_rng_device_add(void); 58 static void zcrypt_rng_device_remove(void); 59 60 /* 61 * Device attributes common for all crypto devices. 62 */ 63 static ssize_t zcrypt_type_show(struct device *dev, 64 struct device_attribute *attr, char *buf) 65 { 66 struct zcrypt_device *zdev = to_ap_dev(dev)->private; 67 return snprintf(buf, PAGE_SIZE, "%s\n", zdev->type_string); 68 } 69 70 static DEVICE_ATTR(type, 0444, zcrypt_type_show, NULL); 71 72 static ssize_t zcrypt_online_show(struct device *dev, 73 struct device_attribute *attr, char *buf) 74 { 75 struct zcrypt_device *zdev = to_ap_dev(dev)->private; 76 return snprintf(buf, PAGE_SIZE, "%d\n", zdev->online); 77 } 78 79 static ssize_t zcrypt_online_store(struct device *dev, 80 struct device_attribute *attr, 81 const char *buf, size_t count) 82 { 83 struct zcrypt_device *zdev = to_ap_dev(dev)->private; 84 int online; 85 86 if (sscanf(buf, "%d\n", &online) != 1 || online < 0 || online > 1) 87 return -EINVAL; 88 zdev->online = online; 89 if (!online) 90 ap_flush_queue(zdev->ap_dev); 91 return count; 92 } 93 94 static DEVICE_ATTR(online, 0644, zcrypt_online_show, zcrypt_online_store); 95 96 static struct attribute * zcrypt_device_attrs[] = { 97 &dev_attr_type.attr, 98 &dev_attr_online.attr, 99 NULL, 100 }; 101 102 static struct attribute_group zcrypt_device_attr_group = { 103 .attrs = zcrypt_device_attrs, 104 }; 105 106 /** 107 * __zcrypt_increase_preference(): Increase preference of a crypto device. 108 * @zdev: Pointer the crypto device 109 * 110 * Move the device towards the head of the device list. 111 * Need to be called while holding the zcrypt device list lock. 112 * Note: cards with speed_rating of 0 are kept at the end of the list. 113 */ 114 static void __zcrypt_increase_preference(struct zcrypt_device *zdev) 115 { 116 struct zcrypt_device *tmp; 117 struct list_head *l; 118 119 if (zdev->speed_rating == 0) 120 return; 121 for (l = zdev->list.prev; l != &zcrypt_device_list; l = l->prev) { 122 tmp = list_entry(l, struct zcrypt_device, list); 123 if ((tmp->request_count + 1) * tmp->speed_rating <= 124 (zdev->request_count + 1) * zdev->speed_rating && 125 tmp->speed_rating != 0) 126 break; 127 } 128 if (l == zdev->list.prev) 129 return; 130 /* Move zdev behind l */ 131 list_move(&zdev->list, l); 132 } 133 134 /** 135 * __zcrypt_decrease_preference(): Decrease preference of a crypto device. 136 * @zdev: Pointer to a crypto device. 137 * 138 * Move the device towards the tail of the device list. 139 * Need to be called while holding the zcrypt device list lock. 140 * Note: cards with speed_rating of 0 are kept at the end of the list. 141 */ 142 static void __zcrypt_decrease_preference(struct zcrypt_device *zdev) 143 { 144 struct zcrypt_device *tmp; 145 struct list_head *l; 146 147 if (zdev->speed_rating == 0) 148 return; 149 for (l = zdev->list.next; l != &zcrypt_device_list; l = l->next) { 150 tmp = list_entry(l, struct zcrypt_device, list); 151 if ((tmp->request_count + 1) * tmp->speed_rating > 152 (zdev->request_count + 1) * zdev->speed_rating || 153 tmp->speed_rating == 0) 154 break; 155 } 156 if (l == zdev->list.next) 157 return; 158 /* Move zdev before l */ 159 list_move_tail(&zdev->list, l); 160 } 161 162 static void zcrypt_device_release(struct kref *kref) 163 { 164 struct zcrypt_device *zdev = 165 container_of(kref, struct zcrypt_device, refcount); 166 zcrypt_device_free(zdev); 167 } 168 169 void zcrypt_device_get(struct zcrypt_device *zdev) 170 { 171 kref_get(&zdev->refcount); 172 } 173 EXPORT_SYMBOL(zcrypt_device_get); 174 175 int zcrypt_device_put(struct zcrypt_device *zdev) 176 { 177 return kref_put(&zdev->refcount, zcrypt_device_release); 178 } 179 EXPORT_SYMBOL(zcrypt_device_put); 180 181 struct zcrypt_device *zcrypt_device_alloc(size_t max_response_size) 182 { 183 struct zcrypt_device *zdev; 184 185 zdev = kzalloc(sizeof(struct zcrypt_device), GFP_KERNEL); 186 if (!zdev) 187 return NULL; 188 zdev->reply.message = kmalloc(max_response_size, GFP_KERNEL); 189 if (!zdev->reply.message) 190 goto out_free; 191 zdev->reply.length = max_response_size; 192 spin_lock_init(&zdev->lock); 193 INIT_LIST_HEAD(&zdev->list); 194 return zdev; 195 196 out_free: 197 kfree(zdev); 198 return NULL; 199 } 200 EXPORT_SYMBOL(zcrypt_device_alloc); 201 202 void zcrypt_device_free(struct zcrypt_device *zdev) 203 { 204 kfree(zdev->reply.message); 205 kfree(zdev); 206 } 207 EXPORT_SYMBOL(zcrypt_device_free); 208 209 /** 210 * zcrypt_device_register() - Register a crypto device. 211 * @zdev: Pointer to a crypto device 212 * 213 * Register a crypto device. Returns 0 if successful. 214 */ 215 int zcrypt_device_register(struct zcrypt_device *zdev) 216 { 217 int rc; 218 219 rc = sysfs_create_group(&zdev->ap_dev->device.kobj, 220 &zcrypt_device_attr_group); 221 if (rc) 222 goto out; 223 get_device(&zdev->ap_dev->device); 224 kref_init(&zdev->refcount); 225 spin_lock_bh(&zcrypt_device_lock); 226 zdev->online = 1; /* New devices are online by default. */ 227 list_add_tail(&zdev->list, &zcrypt_device_list); 228 __zcrypt_increase_preference(zdev); 229 zcrypt_device_count++; 230 spin_unlock_bh(&zcrypt_device_lock); 231 if (zdev->ops->rng) { 232 rc = zcrypt_rng_device_add(); 233 if (rc) 234 goto out_unregister; 235 } 236 return 0; 237 238 out_unregister: 239 spin_lock_bh(&zcrypt_device_lock); 240 zcrypt_device_count--; 241 list_del_init(&zdev->list); 242 spin_unlock_bh(&zcrypt_device_lock); 243 sysfs_remove_group(&zdev->ap_dev->device.kobj, 244 &zcrypt_device_attr_group); 245 put_device(&zdev->ap_dev->device); 246 zcrypt_device_put(zdev); 247 out: 248 return rc; 249 } 250 EXPORT_SYMBOL(zcrypt_device_register); 251 252 /** 253 * zcrypt_device_unregister(): Unregister a crypto device. 254 * @zdev: Pointer to crypto device 255 * 256 * Unregister a crypto device. 257 */ 258 void zcrypt_device_unregister(struct zcrypt_device *zdev) 259 { 260 if (zdev->ops->rng) 261 zcrypt_rng_device_remove(); 262 spin_lock_bh(&zcrypt_device_lock); 263 zcrypt_device_count--; 264 list_del_init(&zdev->list); 265 spin_unlock_bh(&zcrypt_device_lock); 266 sysfs_remove_group(&zdev->ap_dev->device.kobj, 267 &zcrypt_device_attr_group); 268 put_device(&zdev->ap_dev->device); 269 zcrypt_device_put(zdev); 270 } 271 EXPORT_SYMBOL(zcrypt_device_unregister); 272 273 /** 274 * zcrypt_read (): Not supported beyond zcrypt 1.3.1. 275 * 276 * This function is not supported beyond zcrypt 1.3.1. 277 */ 278 static ssize_t zcrypt_read(struct file *filp, char __user *buf, 279 size_t count, loff_t *f_pos) 280 { 281 return -EPERM; 282 } 283 284 /** 285 * zcrypt_write(): Not allowed. 286 * 287 * Write is is not allowed 288 */ 289 static ssize_t zcrypt_write(struct file *filp, const char __user *buf, 290 size_t count, loff_t *f_pos) 291 { 292 return -EPERM; 293 } 294 295 /** 296 * zcrypt_open(): Count number of users. 297 * 298 * Device open function to count number of users. 299 */ 300 static int zcrypt_open(struct inode *inode, struct file *filp) 301 { 302 lock_kernel(); 303 atomic_inc(&zcrypt_open_count); 304 unlock_kernel(); 305 return 0; 306 } 307 308 /** 309 * zcrypt_release(): Count number of users. 310 * 311 * Device close function to count number of users. 312 */ 313 static int zcrypt_release(struct inode *inode, struct file *filp) 314 { 315 atomic_dec(&zcrypt_open_count); 316 return 0; 317 } 318 319 /* 320 * zcrypt ioctls. 321 */ 322 static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex) 323 { 324 struct zcrypt_device *zdev; 325 int rc; 326 327 if (mex->outputdatalength < mex->inputdatalength) 328 return -EINVAL; 329 /* 330 * As long as outputdatalength is big enough, we can set the 331 * outputdatalength equal to the inputdatalength, since that is the 332 * number of bytes we will copy in any case 333 */ 334 mex->outputdatalength = mex->inputdatalength; 335 336 spin_lock_bh(&zcrypt_device_lock); 337 list_for_each_entry(zdev, &zcrypt_device_list, list) { 338 if (!zdev->online || 339 !zdev->ops->rsa_modexpo || 340 zdev->min_mod_size > mex->inputdatalength || 341 zdev->max_mod_size < mex->inputdatalength) 342 continue; 343 zcrypt_device_get(zdev); 344 get_device(&zdev->ap_dev->device); 345 zdev->request_count++; 346 __zcrypt_decrease_preference(zdev); 347 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 348 spin_unlock_bh(&zcrypt_device_lock); 349 rc = zdev->ops->rsa_modexpo(zdev, mex); 350 spin_lock_bh(&zcrypt_device_lock); 351 module_put(zdev->ap_dev->drv->driver.owner); 352 } 353 else 354 rc = -EAGAIN; 355 zdev->request_count--; 356 __zcrypt_increase_preference(zdev); 357 put_device(&zdev->ap_dev->device); 358 zcrypt_device_put(zdev); 359 spin_unlock_bh(&zcrypt_device_lock); 360 return rc; 361 } 362 spin_unlock_bh(&zcrypt_device_lock); 363 return -ENODEV; 364 } 365 366 static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt) 367 { 368 struct zcrypt_device *zdev; 369 unsigned long long z1, z2, z3; 370 int rc, copied; 371 372 if (crt->outputdatalength < crt->inputdatalength || 373 (crt->inputdatalength & 1)) 374 return -EINVAL; 375 /* 376 * As long as outputdatalength is big enough, we can set the 377 * outputdatalength equal to the inputdatalength, since that is the 378 * number of bytes we will copy in any case 379 */ 380 crt->outputdatalength = crt->inputdatalength; 381 382 copied = 0; 383 restart: 384 spin_lock_bh(&zcrypt_device_lock); 385 list_for_each_entry(zdev, &zcrypt_device_list, list) { 386 if (!zdev->online || 387 !zdev->ops->rsa_modexpo_crt || 388 zdev->min_mod_size > crt->inputdatalength || 389 zdev->max_mod_size < crt->inputdatalength) 390 continue; 391 if (zdev->short_crt && crt->inputdatalength > 240) { 392 /* 393 * Check inputdata for leading zeros for cards 394 * that can't handle np_prime, bp_key, or 395 * u_mult_inv > 128 bytes. 396 */ 397 if (copied == 0) { 398 int len; 399 spin_unlock_bh(&zcrypt_device_lock); 400 /* len is max 256 / 2 - 120 = 8 */ 401 len = crt->inputdatalength / 2 - 120; 402 z1 = z2 = z3 = 0; 403 if (copy_from_user(&z1, crt->np_prime, len) || 404 copy_from_user(&z2, crt->bp_key, len) || 405 copy_from_user(&z3, crt->u_mult_inv, len)) 406 return -EFAULT; 407 copied = 1; 408 /* 409 * We have to restart device lookup - 410 * the device list may have changed by now. 411 */ 412 goto restart; 413 } 414 if (z1 != 0ULL || z2 != 0ULL || z3 != 0ULL) 415 /* The device can't handle this request. */ 416 continue; 417 } 418 zcrypt_device_get(zdev); 419 get_device(&zdev->ap_dev->device); 420 zdev->request_count++; 421 __zcrypt_decrease_preference(zdev); 422 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 423 spin_unlock_bh(&zcrypt_device_lock); 424 rc = zdev->ops->rsa_modexpo_crt(zdev, crt); 425 spin_lock_bh(&zcrypt_device_lock); 426 module_put(zdev->ap_dev->drv->driver.owner); 427 } 428 else 429 rc = -EAGAIN; 430 zdev->request_count--; 431 __zcrypt_increase_preference(zdev); 432 put_device(&zdev->ap_dev->device); 433 zcrypt_device_put(zdev); 434 spin_unlock_bh(&zcrypt_device_lock); 435 return rc; 436 } 437 spin_unlock_bh(&zcrypt_device_lock); 438 return -ENODEV; 439 } 440 441 static long zcrypt_send_cprb(struct ica_xcRB *xcRB) 442 { 443 struct zcrypt_device *zdev; 444 int rc; 445 446 spin_lock_bh(&zcrypt_device_lock); 447 list_for_each_entry(zdev, &zcrypt_device_list, list) { 448 if (!zdev->online || !zdev->ops->send_cprb || 449 (xcRB->user_defined != AUTOSELECT && 450 AP_QID_DEVICE(zdev->ap_dev->qid) != xcRB->user_defined) 451 ) 452 continue; 453 zcrypt_device_get(zdev); 454 get_device(&zdev->ap_dev->device); 455 zdev->request_count++; 456 __zcrypt_decrease_preference(zdev); 457 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 458 spin_unlock_bh(&zcrypt_device_lock); 459 rc = zdev->ops->send_cprb(zdev, xcRB); 460 spin_lock_bh(&zcrypt_device_lock); 461 module_put(zdev->ap_dev->drv->driver.owner); 462 } 463 else 464 rc = -EAGAIN; 465 zdev->request_count--; 466 __zcrypt_increase_preference(zdev); 467 put_device(&zdev->ap_dev->device); 468 zcrypt_device_put(zdev); 469 spin_unlock_bh(&zcrypt_device_lock); 470 return rc; 471 } 472 spin_unlock_bh(&zcrypt_device_lock); 473 return -ENODEV; 474 } 475 476 static long zcrypt_rng(char *buffer) 477 { 478 struct zcrypt_device *zdev; 479 int rc; 480 481 spin_lock_bh(&zcrypt_device_lock); 482 list_for_each_entry(zdev, &zcrypt_device_list, list) { 483 if (!zdev->online || !zdev->ops->rng) 484 continue; 485 zcrypt_device_get(zdev); 486 get_device(&zdev->ap_dev->device); 487 zdev->request_count++; 488 __zcrypt_decrease_preference(zdev); 489 if (try_module_get(zdev->ap_dev->drv->driver.owner)) { 490 spin_unlock_bh(&zcrypt_device_lock); 491 rc = zdev->ops->rng(zdev, buffer); 492 spin_lock_bh(&zcrypt_device_lock); 493 module_put(zdev->ap_dev->drv->driver.owner); 494 } else 495 rc = -EAGAIN; 496 zdev->request_count--; 497 __zcrypt_increase_preference(zdev); 498 put_device(&zdev->ap_dev->device); 499 zcrypt_device_put(zdev); 500 spin_unlock_bh(&zcrypt_device_lock); 501 return rc; 502 } 503 spin_unlock_bh(&zcrypt_device_lock); 504 return -ENODEV; 505 } 506 507 static void zcrypt_status_mask(char status[AP_DEVICES]) 508 { 509 struct zcrypt_device *zdev; 510 511 memset(status, 0, sizeof(char) * AP_DEVICES); 512 spin_lock_bh(&zcrypt_device_lock); 513 list_for_each_entry(zdev, &zcrypt_device_list, list) 514 status[AP_QID_DEVICE(zdev->ap_dev->qid)] = 515 zdev->online ? zdev->user_space_type : 0x0d; 516 spin_unlock_bh(&zcrypt_device_lock); 517 } 518 519 static void zcrypt_qdepth_mask(char qdepth[AP_DEVICES]) 520 { 521 struct zcrypt_device *zdev; 522 523 memset(qdepth, 0, sizeof(char) * AP_DEVICES); 524 spin_lock_bh(&zcrypt_device_lock); 525 list_for_each_entry(zdev, &zcrypt_device_list, list) { 526 spin_lock(&zdev->ap_dev->lock); 527 qdepth[AP_QID_DEVICE(zdev->ap_dev->qid)] = 528 zdev->ap_dev->pendingq_count + 529 zdev->ap_dev->requestq_count; 530 spin_unlock(&zdev->ap_dev->lock); 531 } 532 spin_unlock_bh(&zcrypt_device_lock); 533 } 534 535 static void zcrypt_perdev_reqcnt(int reqcnt[AP_DEVICES]) 536 { 537 struct zcrypt_device *zdev; 538 539 memset(reqcnt, 0, sizeof(int) * AP_DEVICES); 540 spin_lock_bh(&zcrypt_device_lock); 541 list_for_each_entry(zdev, &zcrypt_device_list, list) { 542 spin_lock(&zdev->ap_dev->lock); 543 reqcnt[AP_QID_DEVICE(zdev->ap_dev->qid)] = 544 zdev->ap_dev->total_request_count; 545 spin_unlock(&zdev->ap_dev->lock); 546 } 547 spin_unlock_bh(&zcrypt_device_lock); 548 } 549 550 static int zcrypt_pendingq_count(void) 551 { 552 struct zcrypt_device *zdev; 553 int pendingq_count = 0; 554 555 spin_lock_bh(&zcrypt_device_lock); 556 list_for_each_entry(zdev, &zcrypt_device_list, list) { 557 spin_lock(&zdev->ap_dev->lock); 558 pendingq_count += zdev->ap_dev->pendingq_count; 559 spin_unlock(&zdev->ap_dev->lock); 560 } 561 spin_unlock_bh(&zcrypt_device_lock); 562 return pendingq_count; 563 } 564 565 static int zcrypt_requestq_count(void) 566 { 567 struct zcrypt_device *zdev; 568 int requestq_count = 0; 569 570 spin_lock_bh(&zcrypt_device_lock); 571 list_for_each_entry(zdev, &zcrypt_device_list, list) { 572 spin_lock(&zdev->ap_dev->lock); 573 requestq_count += zdev->ap_dev->requestq_count; 574 spin_unlock(&zdev->ap_dev->lock); 575 } 576 spin_unlock_bh(&zcrypt_device_lock); 577 return requestq_count; 578 } 579 580 static int zcrypt_count_type(int type) 581 { 582 struct zcrypt_device *zdev; 583 int device_count = 0; 584 585 spin_lock_bh(&zcrypt_device_lock); 586 list_for_each_entry(zdev, &zcrypt_device_list, list) 587 if (zdev->user_space_type == type) 588 device_count++; 589 spin_unlock_bh(&zcrypt_device_lock); 590 return device_count; 591 } 592 593 /** 594 * zcrypt_ica_status(): Old, depracted combi status call. 595 * 596 * Old, deprecated combi status call. 597 */ 598 static long zcrypt_ica_status(struct file *filp, unsigned long arg) 599 { 600 struct ica_z90_status *pstat; 601 int ret; 602 603 pstat = kzalloc(sizeof(*pstat), GFP_KERNEL); 604 if (!pstat) 605 return -ENOMEM; 606 pstat->totalcount = zcrypt_device_count; 607 pstat->leedslitecount = zcrypt_count_type(ZCRYPT_PCICA); 608 pstat->leeds2count = zcrypt_count_type(ZCRYPT_PCICC); 609 pstat->requestqWaitCount = zcrypt_requestq_count(); 610 pstat->pendingqWaitCount = zcrypt_pendingq_count(); 611 pstat->totalOpenCount = atomic_read(&zcrypt_open_count); 612 pstat->cryptoDomain = ap_domain_index; 613 zcrypt_status_mask(pstat->status); 614 zcrypt_qdepth_mask(pstat->qdepth); 615 ret = 0; 616 if (copy_to_user((void __user *) arg, pstat, sizeof(*pstat))) 617 ret = -EFAULT; 618 kfree(pstat); 619 return ret; 620 } 621 622 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, 623 unsigned long arg) 624 { 625 int rc; 626 627 switch (cmd) { 628 case ICARSAMODEXPO: { 629 struct ica_rsa_modexpo __user *umex = (void __user *) arg; 630 struct ica_rsa_modexpo mex; 631 if (copy_from_user(&mex, umex, sizeof(mex))) 632 return -EFAULT; 633 do { 634 rc = zcrypt_rsa_modexpo(&mex); 635 } while (rc == -EAGAIN); 636 if (rc) 637 return rc; 638 return put_user(mex.outputdatalength, &umex->outputdatalength); 639 } 640 case ICARSACRT: { 641 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *) arg; 642 struct ica_rsa_modexpo_crt crt; 643 if (copy_from_user(&crt, ucrt, sizeof(crt))) 644 return -EFAULT; 645 do { 646 rc = zcrypt_rsa_crt(&crt); 647 } while (rc == -EAGAIN); 648 if (rc) 649 return rc; 650 return put_user(crt.outputdatalength, &ucrt->outputdatalength); 651 } 652 case ZSECSENDCPRB: { 653 struct ica_xcRB __user *uxcRB = (void __user *) arg; 654 struct ica_xcRB xcRB; 655 if (copy_from_user(&xcRB, uxcRB, sizeof(xcRB))) 656 return -EFAULT; 657 do { 658 rc = zcrypt_send_cprb(&xcRB); 659 } while (rc == -EAGAIN); 660 if (copy_to_user(uxcRB, &xcRB, sizeof(xcRB))) 661 return -EFAULT; 662 return rc; 663 } 664 case Z90STAT_STATUS_MASK: { 665 char status[AP_DEVICES]; 666 zcrypt_status_mask(status); 667 if (copy_to_user((char __user *) arg, status, 668 sizeof(char) * AP_DEVICES)) 669 return -EFAULT; 670 return 0; 671 } 672 case Z90STAT_QDEPTH_MASK: { 673 char qdepth[AP_DEVICES]; 674 zcrypt_qdepth_mask(qdepth); 675 if (copy_to_user((char __user *) arg, qdepth, 676 sizeof(char) * AP_DEVICES)) 677 return -EFAULT; 678 return 0; 679 } 680 case Z90STAT_PERDEV_REQCNT: { 681 int reqcnt[AP_DEVICES]; 682 zcrypt_perdev_reqcnt(reqcnt); 683 if (copy_to_user((int __user *) arg, reqcnt, 684 sizeof(int) * AP_DEVICES)) 685 return -EFAULT; 686 return 0; 687 } 688 case Z90STAT_REQUESTQ_COUNT: 689 return put_user(zcrypt_requestq_count(), (int __user *) arg); 690 case Z90STAT_PENDINGQ_COUNT: 691 return put_user(zcrypt_pendingq_count(), (int __user *) arg); 692 case Z90STAT_TOTALOPEN_COUNT: 693 return put_user(atomic_read(&zcrypt_open_count), 694 (int __user *) arg); 695 case Z90STAT_DOMAIN_INDEX: 696 return put_user(ap_domain_index, (int __user *) arg); 697 /* 698 * Deprecated ioctls. Don't add another device count ioctl, 699 * you can count them yourself in the user space with the 700 * output of the Z90STAT_STATUS_MASK ioctl. 701 */ 702 case ICAZ90STATUS: 703 return zcrypt_ica_status(filp, arg); 704 case Z90STAT_TOTALCOUNT: 705 return put_user(zcrypt_device_count, (int __user *) arg); 706 case Z90STAT_PCICACOUNT: 707 return put_user(zcrypt_count_type(ZCRYPT_PCICA), 708 (int __user *) arg); 709 case Z90STAT_PCICCCOUNT: 710 return put_user(zcrypt_count_type(ZCRYPT_PCICC), 711 (int __user *) arg); 712 case Z90STAT_PCIXCCMCL2COUNT: 713 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2), 714 (int __user *) arg); 715 case Z90STAT_PCIXCCMCL3COUNT: 716 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 717 (int __user *) arg); 718 case Z90STAT_PCIXCCCOUNT: 719 return put_user(zcrypt_count_type(ZCRYPT_PCIXCC_MCL2) + 720 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3), 721 (int __user *) arg); 722 case Z90STAT_CEX2CCOUNT: 723 return put_user(zcrypt_count_type(ZCRYPT_CEX2C), 724 (int __user *) arg); 725 case Z90STAT_CEX2ACOUNT: 726 return put_user(zcrypt_count_type(ZCRYPT_CEX2A), 727 (int __user *) arg); 728 default: 729 /* unknown ioctl number */ 730 return -ENOIOCTLCMD; 731 } 732 } 733 734 #ifdef CONFIG_COMPAT 735 /* 736 * ioctl32 conversion routines 737 */ 738 struct compat_ica_rsa_modexpo { 739 compat_uptr_t inputdata; 740 unsigned int inputdatalength; 741 compat_uptr_t outputdata; 742 unsigned int outputdatalength; 743 compat_uptr_t b_key; 744 compat_uptr_t n_modulus; 745 }; 746 747 static long trans_modexpo32(struct file *filp, unsigned int cmd, 748 unsigned long arg) 749 { 750 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg); 751 struct compat_ica_rsa_modexpo mex32; 752 struct ica_rsa_modexpo mex64; 753 long rc; 754 755 if (copy_from_user(&mex32, umex32, sizeof(mex32))) 756 return -EFAULT; 757 mex64.inputdata = compat_ptr(mex32.inputdata); 758 mex64.inputdatalength = mex32.inputdatalength; 759 mex64.outputdata = compat_ptr(mex32.outputdata); 760 mex64.outputdatalength = mex32.outputdatalength; 761 mex64.b_key = compat_ptr(mex32.b_key); 762 mex64.n_modulus = compat_ptr(mex32.n_modulus); 763 do { 764 rc = zcrypt_rsa_modexpo(&mex64); 765 } while (rc == -EAGAIN); 766 if (!rc) 767 rc = put_user(mex64.outputdatalength, 768 &umex32->outputdatalength); 769 return rc; 770 } 771 772 struct compat_ica_rsa_modexpo_crt { 773 compat_uptr_t inputdata; 774 unsigned int inputdatalength; 775 compat_uptr_t outputdata; 776 unsigned int outputdatalength; 777 compat_uptr_t bp_key; 778 compat_uptr_t bq_key; 779 compat_uptr_t np_prime; 780 compat_uptr_t nq_prime; 781 compat_uptr_t u_mult_inv; 782 }; 783 784 static long trans_modexpo_crt32(struct file *filp, unsigned int cmd, 785 unsigned long arg) 786 { 787 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg); 788 struct compat_ica_rsa_modexpo_crt crt32; 789 struct ica_rsa_modexpo_crt crt64; 790 long rc; 791 792 if (copy_from_user(&crt32, ucrt32, sizeof(crt32))) 793 return -EFAULT; 794 crt64.inputdata = compat_ptr(crt32.inputdata); 795 crt64.inputdatalength = crt32.inputdatalength; 796 crt64.outputdata= compat_ptr(crt32.outputdata); 797 crt64.outputdatalength = crt32.outputdatalength; 798 crt64.bp_key = compat_ptr(crt32.bp_key); 799 crt64.bq_key = compat_ptr(crt32.bq_key); 800 crt64.np_prime = compat_ptr(crt32.np_prime); 801 crt64.nq_prime = compat_ptr(crt32.nq_prime); 802 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv); 803 do { 804 rc = zcrypt_rsa_crt(&crt64); 805 } while (rc == -EAGAIN); 806 if (!rc) 807 rc = put_user(crt64.outputdatalength, 808 &ucrt32->outputdatalength); 809 return rc; 810 } 811 812 struct compat_ica_xcRB { 813 unsigned short agent_ID; 814 unsigned int user_defined; 815 unsigned short request_ID; 816 unsigned int request_control_blk_length; 817 unsigned char padding1[16 - sizeof (compat_uptr_t)]; 818 compat_uptr_t request_control_blk_addr; 819 unsigned int request_data_length; 820 char padding2[16 - sizeof (compat_uptr_t)]; 821 compat_uptr_t request_data_address; 822 unsigned int reply_control_blk_length; 823 char padding3[16 - sizeof (compat_uptr_t)]; 824 compat_uptr_t reply_control_blk_addr; 825 unsigned int reply_data_length; 826 char padding4[16 - sizeof (compat_uptr_t)]; 827 compat_uptr_t reply_data_addr; 828 unsigned short priority_window; 829 unsigned int status; 830 } __attribute__((packed)); 831 832 static long trans_xcRB32(struct file *filp, unsigned int cmd, 833 unsigned long arg) 834 { 835 struct compat_ica_xcRB __user *uxcRB32 = compat_ptr(arg); 836 struct compat_ica_xcRB xcRB32; 837 struct ica_xcRB xcRB64; 838 long rc; 839 840 if (copy_from_user(&xcRB32, uxcRB32, sizeof(xcRB32))) 841 return -EFAULT; 842 xcRB64.agent_ID = xcRB32.agent_ID; 843 xcRB64.user_defined = xcRB32.user_defined; 844 xcRB64.request_ID = xcRB32.request_ID; 845 xcRB64.request_control_blk_length = 846 xcRB32.request_control_blk_length; 847 xcRB64.request_control_blk_addr = 848 compat_ptr(xcRB32.request_control_blk_addr); 849 xcRB64.request_data_length = 850 xcRB32.request_data_length; 851 xcRB64.request_data_address = 852 compat_ptr(xcRB32.request_data_address); 853 xcRB64.reply_control_blk_length = 854 xcRB32.reply_control_blk_length; 855 xcRB64.reply_control_blk_addr = 856 compat_ptr(xcRB32.reply_control_blk_addr); 857 xcRB64.reply_data_length = xcRB32.reply_data_length; 858 xcRB64.reply_data_addr = 859 compat_ptr(xcRB32.reply_data_addr); 860 xcRB64.priority_window = xcRB32.priority_window; 861 xcRB64.status = xcRB32.status; 862 do { 863 rc = zcrypt_send_cprb(&xcRB64); 864 } while (rc == -EAGAIN); 865 xcRB32.reply_control_blk_length = xcRB64.reply_control_blk_length; 866 xcRB32.reply_data_length = xcRB64.reply_data_length; 867 xcRB32.status = xcRB64.status; 868 if (copy_to_user(uxcRB32, &xcRB32, sizeof(xcRB32))) 869 return -EFAULT; 870 return rc; 871 } 872 873 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd, 874 unsigned long arg) 875 { 876 if (cmd == ICARSAMODEXPO) 877 return trans_modexpo32(filp, cmd, arg); 878 if (cmd == ICARSACRT) 879 return trans_modexpo_crt32(filp, cmd, arg); 880 if (cmd == ZSECSENDCPRB) 881 return trans_xcRB32(filp, cmd, arg); 882 return zcrypt_unlocked_ioctl(filp, cmd, arg); 883 } 884 #endif 885 886 /* 887 * Misc device file operations. 888 */ 889 static const struct file_operations zcrypt_fops = { 890 .owner = THIS_MODULE, 891 .read = zcrypt_read, 892 .write = zcrypt_write, 893 .unlocked_ioctl = zcrypt_unlocked_ioctl, 894 #ifdef CONFIG_COMPAT 895 .compat_ioctl = zcrypt_compat_ioctl, 896 #endif 897 .open = zcrypt_open, 898 .release = zcrypt_release 899 }; 900 901 /* 902 * Misc device. 903 */ 904 static struct miscdevice zcrypt_misc_device = { 905 .minor = MISC_DYNAMIC_MINOR, 906 .name = "z90crypt", 907 .fops = &zcrypt_fops, 908 }; 909 910 /* 911 * Deprecated /proc entry support. 912 */ 913 static struct proc_dir_entry *zcrypt_entry; 914 915 static int sprintcl(unsigned char *outaddr, unsigned char *addr, 916 unsigned int len) 917 { 918 int hl, i; 919 920 hl = 0; 921 for (i = 0; i < len; i++) 922 hl += sprintf(outaddr+hl, "%01x", (unsigned int) addr[i]); 923 hl += sprintf(outaddr+hl, " "); 924 return hl; 925 } 926 927 static int sprintrw(unsigned char *outaddr, unsigned char *addr, 928 unsigned int len) 929 { 930 int hl, inl, c, cx; 931 932 hl = sprintf(outaddr, " "); 933 inl = 0; 934 for (c = 0; c < (len / 16); c++) { 935 hl += sprintcl(outaddr+hl, addr+inl, 16); 936 inl += 16; 937 } 938 cx = len%16; 939 if (cx) { 940 hl += sprintcl(outaddr+hl, addr+inl, cx); 941 inl += cx; 942 } 943 hl += sprintf(outaddr+hl, "\n"); 944 return hl; 945 } 946 947 static int sprinthx(unsigned char *title, unsigned char *outaddr, 948 unsigned char *addr, unsigned int len) 949 { 950 int hl, inl, r, rx; 951 952 hl = sprintf(outaddr, "\n%s\n", title); 953 inl = 0; 954 for (r = 0; r < (len / 64); r++) { 955 hl += sprintrw(outaddr+hl, addr+inl, 64); 956 inl += 64; 957 } 958 rx = len % 64; 959 if (rx) { 960 hl += sprintrw(outaddr+hl, addr+inl, rx); 961 inl += rx; 962 } 963 hl += sprintf(outaddr+hl, "\n"); 964 return hl; 965 } 966 967 static int sprinthx4(unsigned char *title, unsigned char *outaddr, 968 unsigned int *array, unsigned int len) 969 { 970 int hl, r; 971 972 hl = sprintf(outaddr, "\n%s\n", title); 973 for (r = 0; r < len; r++) { 974 if ((r % 8) == 0) 975 hl += sprintf(outaddr+hl, " "); 976 hl += sprintf(outaddr+hl, "%08X ", array[r]); 977 if ((r % 8) == 7) 978 hl += sprintf(outaddr+hl, "\n"); 979 } 980 hl += sprintf(outaddr+hl, "\n"); 981 return hl; 982 } 983 984 static int zcrypt_status_read(char *resp_buff, char **start, off_t offset, 985 int count, int *eof, void *data) 986 { 987 unsigned char *workarea; 988 int len; 989 990 len = 0; 991 992 /* resp_buff is a page. Use the right half for a work area */ 993 workarea = resp_buff + 2000; 994 len += sprintf(resp_buff + len, "\nzcrypt version: %d.%d.%d\n", 995 ZCRYPT_VERSION, ZCRYPT_RELEASE, ZCRYPT_VARIANT); 996 len += sprintf(resp_buff + len, "Cryptographic domain: %d\n", 997 ap_domain_index); 998 len += sprintf(resp_buff + len, "Total device count: %d\n", 999 zcrypt_device_count); 1000 len += sprintf(resp_buff + len, "PCICA count: %d\n", 1001 zcrypt_count_type(ZCRYPT_PCICA)); 1002 len += sprintf(resp_buff + len, "PCICC count: %d\n", 1003 zcrypt_count_type(ZCRYPT_PCICC)); 1004 len += sprintf(resp_buff + len, "PCIXCC MCL2 count: %d\n", 1005 zcrypt_count_type(ZCRYPT_PCIXCC_MCL2)); 1006 len += sprintf(resp_buff + len, "PCIXCC MCL3 count: %d\n", 1007 zcrypt_count_type(ZCRYPT_PCIXCC_MCL3)); 1008 len += sprintf(resp_buff + len, "CEX2C count: %d\n", 1009 zcrypt_count_type(ZCRYPT_CEX2C)); 1010 len += sprintf(resp_buff + len, "CEX2A count: %d\n", 1011 zcrypt_count_type(ZCRYPT_CEX2A)); 1012 len += sprintf(resp_buff + len, "requestq count: %d\n", 1013 zcrypt_requestq_count()); 1014 len += sprintf(resp_buff + len, "pendingq count: %d\n", 1015 zcrypt_pendingq_count()); 1016 len += sprintf(resp_buff + len, "Total open handles: %d\n\n", 1017 atomic_read(&zcrypt_open_count)); 1018 zcrypt_status_mask(workarea); 1019 len += sprinthx("Online devices: 1=PCICA 2=PCICC 3=PCIXCC(MCL2) " 1020 "4=PCIXCC(MCL3) 5=CEX2C 6=CEX2A", 1021 resp_buff+len, workarea, AP_DEVICES); 1022 zcrypt_qdepth_mask(workarea); 1023 len += sprinthx("Waiting work element counts", 1024 resp_buff+len, workarea, AP_DEVICES); 1025 zcrypt_perdev_reqcnt((int *) workarea); 1026 len += sprinthx4("Per-device successfully completed request counts", 1027 resp_buff+len,(unsigned int *) workarea, AP_DEVICES); 1028 *eof = 1; 1029 memset((void *) workarea, 0x00, AP_DEVICES * sizeof(unsigned int)); 1030 return len; 1031 } 1032 1033 static void zcrypt_disable_card(int index) 1034 { 1035 struct zcrypt_device *zdev; 1036 1037 spin_lock_bh(&zcrypt_device_lock); 1038 list_for_each_entry(zdev, &zcrypt_device_list, list) 1039 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1040 zdev->online = 0; 1041 ap_flush_queue(zdev->ap_dev); 1042 break; 1043 } 1044 spin_unlock_bh(&zcrypt_device_lock); 1045 } 1046 1047 static void zcrypt_enable_card(int index) 1048 { 1049 struct zcrypt_device *zdev; 1050 1051 spin_lock_bh(&zcrypt_device_lock); 1052 list_for_each_entry(zdev, &zcrypt_device_list, list) 1053 if (AP_QID_DEVICE(zdev->ap_dev->qid) == index) { 1054 zdev->online = 1; 1055 break; 1056 } 1057 spin_unlock_bh(&zcrypt_device_lock); 1058 } 1059 1060 static int zcrypt_status_write(struct file *file, const char __user *buffer, 1061 unsigned long count, void *data) 1062 { 1063 unsigned char *lbuf, *ptr; 1064 unsigned long local_count; 1065 int j; 1066 1067 if (count <= 0) 1068 return 0; 1069 1070 #define LBUFSIZE 1200UL 1071 lbuf = kmalloc(LBUFSIZE, GFP_KERNEL); 1072 if (!lbuf) 1073 return 0; 1074 1075 local_count = min(LBUFSIZE - 1, count); 1076 if (copy_from_user(lbuf, buffer, local_count) != 0) { 1077 kfree(lbuf); 1078 return -EFAULT; 1079 } 1080 lbuf[local_count] = '\0'; 1081 1082 ptr = strstr(lbuf, "Online devices"); 1083 if (!ptr) 1084 goto out; 1085 ptr = strstr(ptr, "\n"); 1086 if (!ptr) 1087 goto out; 1088 ptr++; 1089 1090 if (strstr(ptr, "Waiting work element counts") == NULL) 1091 goto out; 1092 1093 for (j = 0; j < 64 && *ptr; ptr++) { 1094 /* 1095 * '0' for no device, '1' for PCICA, '2' for PCICC, 1096 * '3' for PCIXCC_MCL2, '4' for PCIXCC_MCL3, 1097 * '5' for CEX2C and '6' for CEX2A' 1098 */ 1099 if (*ptr >= '0' && *ptr <= '6') 1100 j++; 1101 else if (*ptr == 'd' || *ptr == 'D') 1102 zcrypt_disable_card(j++); 1103 else if (*ptr == 'e' || *ptr == 'E') 1104 zcrypt_enable_card(j++); 1105 else if (*ptr != ' ' && *ptr != '\t') 1106 break; 1107 } 1108 out: 1109 kfree(lbuf); 1110 return count; 1111 } 1112 1113 static int zcrypt_rng_device_count; 1114 static u32 *zcrypt_rng_buffer; 1115 static int zcrypt_rng_buffer_index; 1116 static DEFINE_MUTEX(zcrypt_rng_mutex); 1117 1118 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data) 1119 { 1120 int rc; 1121 1122 /* 1123 * We don't need locking here because the RNG API guarantees serialized 1124 * read method calls. 1125 */ 1126 if (zcrypt_rng_buffer_index == 0) { 1127 rc = zcrypt_rng((char *) zcrypt_rng_buffer); 1128 if (rc < 0) 1129 return -EIO; 1130 zcrypt_rng_buffer_index = rc / sizeof *data; 1131 } 1132 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index]; 1133 return sizeof *data; 1134 } 1135 1136 static struct hwrng zcrypt_rng_dev = { 1137 .name = "zcrypt", 1138 .data_read = zcrypt_rng_data_read, 1139 }; 1140 1141 static int zcrypt_rng_device_add(void) 1142 { 1143 int rc = 0; 1144 1145 mutex_lock(&zcrypt_rng_mutex); 1146 if (zcrypt_rng_device_count == 0) { 1147 zcrypt_rng_buffer = (u32 *) get_zeroed_page(GFP_KERNEL); 1148 if (!zcrypt_rng_buffer) { 1149 rc = -ENOMEM; 1150 goto out; 1151 } 1152 zcrypt_rng_buffer_index = 0; 1153 rc = hwrng_register(&zcrypt_rng_dev); 1154 if (rc) 1155 goto out_free; 1156 zcrypt_rng_device_count = 1; 1157 } else 1158 zcrypt_rng_device_count++; 1159 mutex_unlock(&zcrypt_rng_mutex); 1160 return 0; 1161 1162 out_free: 1163 free_page((unsigned long) zcrypt_rng_buffer); 1164 out: 1165 mutex_unlock(&zcrypt_rng_mutex); 1166 return rc; 1167 } 1168 1169 static void zcrypt_rng_device_remove(void) 1170 { 1171 mutex_lock(&zcrypt_rng_mutex); 1172 zcrypt_rng_device_count--; 1173 if (zcrypt_rng_device_count == 0) { 1174 hwrng_unregister(&zcrypt_rng_dev); 1175 free_page((unsigned long) zcrypt_rng_buffer); 1176 } 1177 mutex_unlock(&zcrypt_rng_mutex); 1178 } 1179 1180 /** 1181 * zcrypt_api_init(): Module initialization. 1182 * 1183 * The module initialization code. 1184 */ 1185 int __init zcrypt_api_init(void) 1186 { 1187 int rc; 1188 1189 /* Register the request sprayer. */ 1190 rc = misc_register(&zcrypt_misc_device); 1191 if (rc < 0) 1192 goto out; 1193 1194 /* Set up the proc file system */ 1195 zcrypt_entry = create_proc_entry("driver/z90crypt", 0644, NULL); 1196 if (!zcrypt_entry) { 1197 rc = -ENOMEM; 1198 goto out_misc; 1199 } 1200 zcrypt_entry->data = NULL; 1201 zcrypt_entry->read_proc = zcrypt_status_read; 1202 zcrypt_entry->write_proc = zcrypt_status_write; 1203 1204 return 0; 1205 1206 out_misc: 1207 misc_deregister(&zcrypt_misc_device); 1208 out: 1209 return rc; 1210 } 1211 1212 /** 1213 * zcrypt_api_exit(): Module termination. 1214 * 1215 * The module termination code. 1216 */ 1217 void zcrypt_api_exit(void) 1218 { 1219 remove_proc_entry("driver/z90crypt", NULL); 1220 misc_deregister(&zcrypt_misc_device); 1221 } 1222 1223 #ifndef CONFIG_ZCRYPT_MONOLITHIC 1224 module_init(zcrypt_api_init); 1225 module_exit(zcrypt_api_exit); 1226 #endif 1227