1 /* 2 * linux/drivers/s390/crypto/ap_bus.c 3 * 4 * Copyright (C) 2006 IBM Corporation 5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Ralph Wuerthner <rwuerthn@de.ibm.com> 8 * Felix Beck <felix.beck@de.ibm.com> 9 * 10 * Adjunct processor bus. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; if not, write to the Free Software 24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 25 */ 26 27 #define KMSG_COMPONENT "ap" 28 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 29 30 #include <linux/kernel_stat.h> 31 #include <linux/module.h> 32 #include <linux/init.h> 33 #include <linux/delay.h> 34 #include <linux/err.h> 35 #include <linux/interrupt.h> 36 #include <linux/workqueue.h> 37 #include <linux/slab.h> 38 #include <linux/notifier.h> 39 #include <linux/kthread.h> 40 #include <linux/mutex.h> 41 #include <asm/reset.h> 42 #include <asm/airq.h> 43 #include <asm/atomic.h> 44 #include <asm/system.h> 45 #include <asm/isc.h> 46 #include <linux/hrtimer.h> 47 #include <linux/ktime.h> 48 49 #include "ap_bus.h" 50 51 /* Some prototypes. */ 52 static void ap_scan_bus(struct work_struct *); 53 static void ap_poll_all(unsigned long); 54 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); 55 static int ap_poll_thread_start(void); 56 static void ap_poll_thread_stop(void); 57 static void ap_request_timeout(unsigned long); 58 static inline void ap_schedule_poll_timer(void); 59 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); 60 static int ap_device_remove(struct device *dev); 61 static int ap_device_probe(struct device *dev); 62 static void ap_interrupt_handler(void *unused1, void *unused2); 63 static void ap_reset(struct ap_device *ap_dev); 64 static void ap_config_timeout(unsigned long ptr); 65 static int ap_select_domain(void); 66 67 /* 68 * Module description. 69 */ 70 MODULE_AUTHOR("IBM Corporation"); 71 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " 72 "Copyright 2006 IBM Corporation"); 73 MODULE_LICENSE("GPL"); 74 75 /* 76 * Module parameter 77 */ 78 int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 79 module_param_named(domain, ap_domain_index, int, 0000); 80 MODULE_PARM_DESC(domain, "domain index for ap devices"); 81 EXPORT_SYMBOL(ap_domain_index); 82 83 static int ap_thread_flag = 0; 84 module_param_named(poll_thread, ap_thread_flag, int, 0000); 85 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 86 87 static struct device *ap_root_device = NULL; 88 static DEFINE_SPINLOCK(ap_device_list_lock); 89 static LIST_HEAD(ap_device_list); 90 91 /* 92 * Workqueue & timer for bus rescan. 93 */ 94 static struct workqueue_struct *ap_work_queue; 95 static struct timer_list ap_config_timer; 96 static int ap_config_time = AP_CONFIG_TIME; 97 static DECLARE_WORK(ap_config_work, ap_scan_bus); 98 99 /* 100 * Tasklet & timer for AP request polling and interrupts 101 */ 102 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 103 static atomic_t ap_poll_requests = ATOMIC_INIT(0); 104 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 105 static struct task_struct *ap_poll_kthread = NULL; 106 static DEFINE_MUTEX(ap_poll_thread_mutex); 107 static DEFINE_SPINLOCK(ap_poll_timer_lock); 108 static void *ap_interrupt_indicator; 109 static struct hrtimer ap_poll_timer; 110 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. 111 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ 112 static unsigned long long poll_timeout = 250000; 113 114 /* Suspend flag */ 115 static int ap_suspend_flag; 116 /* Flag to check if domain was set through module parameter domain=. This is 117 * important when supsend and resume is done in a z/VM environment where the 118 * domain might change. */ 119 static int user_set_domain = 0; 120 static struct bus_type ap_bus_type; 121 122 /** 123 * ap_using_interrupts() - Returns non-zero if interrupt support is 124 * available. 125 */ 126 static inline int ap_using_interrupts(void) 127 { 128 return ap_interrupt_indicator != NULL; 129 } 130 131 /** 132 * ap_intructions_available() - Test if AP instructions are available. 133 * 134 * Returns 0 if the AP instructions are installed. 135 */ 136 static inline int ap_instructions_available(void) 137 { 138 register unsigned long reg0 asm ("0") = AP_MKQID(0,0); 139 register unsigned long reg1 asm ("1") = -ENODEV; 140 register unsigned long reg2 asm ("2") = 0UL; 141 142 asm volatile( 143 " .long 0xb2af0000\n" /* PQAP(TAPQ) */ 144 "0: la %1,0\n" 145 "1:\n" 146 EX_TABLE(0b, 1b) 147 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); 148 return reg1; 149 } 150 151 /** 152 * ap_interrupts_available(): Test if AP interrupts are available. 153 * 154 * Returns 1 if AP interrupts are available. 155 */ 156 static int ap_interrupts_available(void) 157 { 158 return test_facility(2) && test_facility(65); 159 } 160 161 /** 162 * ap_test_queue(): Test adjunct processor queue. 163 * @qid: The AP queue number 164 * @queue_depth: Pointer to queue depth value 165 * @device_type: Pointer to device type value 166 * 167 * Returns AP queue status structure. 168 */ 169 static inline struct ap_queue_status 170 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 171 { 172 register unsigned long reg0 asm ("0") = qid; 173 register struct ap_queue_status reg1 asm ("1"); 174 register unsigned long reg2 asm ("2") = 0UL; 175 176 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 177 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 178 *device_type = (int) (reg2 >> 24); 179 *queue_depth = (int) (reg2 & 0xff); 180 return reg1; 181 } 182 183 /** 184 * ap_reset_queue(): Reset adjunct processor queue. 185 * @qid: The AP queue number 186 * 187 * Returns AP queue status structure. 188 */ 189 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) 190 { 191 register unsigned long reg0 asm ("0") = qid | 0x01000000UL; 192 register struct ap_queue_status reg1 asm ("1"); 193 register unsigned long reg2 asm ("2") = 0UL; 194 195 asm volatile( 196 ".long 0xb2af0000" /* PQAP(RAPQ) */ 197 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 198 return reg1; 199 } 200 201 #ifdef CONFIG_64BIT 202 /** 203 * ap_queue_interruption_control(): Enable interruption for a specific AP. 204 * @qid: The AP queue number 205 * @ind: The notification indicator byte 206 * 207 * Returns AP queue status. 208 */ 209 static inline struct ap_queue_status 210 ap_queue_interruption_control(ap_qid_t qid, void *ind) 211 { 212 register unsigned long reg0 asm ("0") = qid | 0x03000000UL; 213 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; 214 register struct ap_queue_status reg1_out asm ("1"); 215 register void *reg2 asm ("2") = ind; 216 asm volatile( 217 ".long 0xb2af0000" /* PQAP(RAPQ) */ 218 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 219 : 220 : "cc" ); 221 return reg1_out; 222 } 223 #endif 224 225 static inline struct ap_queue_status __ap_4096_commands_available(ap_qid_t qid, 226 int *support) 227 { 228 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); 229 register struct ap_queue_status reg1 asm ("1"); 230 register unsigned long reg2 asm ("2") = 0UL; 231 232 asm volatile( 233 ".long 0xb2af0000\n" 234 "0: la %1,0\n" 235 "1:\n" 236 EX_TABLE(0b, 1b) 237 : "+d" (reg0), "=d" (reg1), "=d" (reg2) 238 : 239 : "cc"); 240 241 if (reg2 & 0x6000000000000000ULL) 242 *support = 1; 243 else 244 *support = 0; 245 246 return reg1; 247 } 248 249 /** 250 * ap_4096_commands_availablen(): Check for availability of 4096 bit RSA 251 * support. 252 * @qid: The AP queue number 253 * 254 * Returns 1 if 4096 bit RSA keys are support fo the AP, returns 0 if not. 255 */ 256 int ap_4096_commands_available(ap_qid_t qid) 257 { 258 struct ap_queue_status status; 259 int i, support = 0; 260 status = __ap_4096_commands_available(qid, &support); 261 262 for (i = 0; i < AP_MAX_RESET; i++) { 263 switch (status.response_code) { 264 case AP_RESPONSE_NORMAL: 265 return support; 266 case AP_RESPONSE_RESET_IN_PROGRESS: 267 case AP_RESPONSE_BUSY: 268 break; 269 case AP_RESPONSE_Q_NOT_AVAIL: 270 case AP_RESPONSE_DECONFIGURED: 271 case AP_RESPONSE_CHECKSTOPPED: 272 case AP_RESPONSE_INVALID_ADDRESS: 273 return 0; 274 case AP_RESPONSE_OTHERWISE_CHANGED: 275 break; 276 default: 277 break; 278 } 279 if (i < AP_MAX_RESET - 1) { 280 udelay(5); 281 status = __ap_4096_commands_available(qid, &support); 282 } 283 } 284 return support; 285 } 286 EXPORT_SYMBOL(ap_4096_commands_available); 287 288 /** 289 * ap_queue_enable_interruption(): Enable interruption on an AP. 290 * @qid: The AP queue number 291 * @ind: the notification indicator byte 292 * 293 * Enables interruption on AP queue via ap_queue_interruption_control(). Based 294 * on the return value it waits a while and tests the AP queue if interrupts 295 * have been switched on using ap_test_queue(). 296 */ 297 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) 298 { 299 #ifdef CONFIG_64BIT 300 struct ap_queue_status status; 301 int t_depth, t_device_type, rc, i; 302 303 rc = -EBUSY; 304 status = ap_queue_interruption_control(qid, ind); 305 306 for (i = 0; i < AP_MAX_RESET; i++) { 307 switch (status.response_code) { 308 case AP_RESPONSE_NORMAL: 309 if (status.int_enabled) 310 return 0; 311 break; 312 case AP_RESPONSE_RESET_IN_PROGRESS: 313 case AP_RESPONSE_BUSY: 314 break; 315 case AP_RESPONSE_Q_NOT_AVAIL: 316 case AP_RESPONSE_DECONFIGURED: 317 case AP_RESPONSE_CHECKSTOPPED: 318 case AP_RESPONSE_INVALID_ADDRESS: 319 return -ENODEV; 320 case AP_RESPONSE_OTHERWISE_CHANGED: 321 if (status.int_enabled) 322 return 0; 323 break; 324 default: 325 break; 326 } 327 if (i < AP_MAX_RESET - 1) { 328 udelay(5); 329 status = ap_test_queue(qid, &t_depth, &t_device_type); 330 } 331 } 332 return rc; 333 #else 334 return -EINVAL; 335 #endif 336 } 337 338 /** 339 * __ap_send(): Send message to adjunct processor queue. 340 * @qid: The AP queue number 341 * @psmid: The program supplied message identifier 342 * @msg: The message text 343 * @length: The message length 344 * @special: Special Bit 345 * 346 * Returns AP queue status structure. 347 * Condition code 1 on NQAP can't happen because the L bit is 1. 348 * Condition code 2 on NQAP also means the send is incomplete, 349 * because a segment boundary was reached. The NQAP is repeated. 350 */ 351 static inline struct ap_queue_status 352 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, 353 unsigned int special) 354 { 355 typedef struct { char _[length]; } msgblock; 356 register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 357 register struct ap_queue_status reg1 asm ("1"); 358 register unsigned long reg2 asm ("2") = (unsigned long) msg; 359 register unsigned long reg3 asm ("3") = (unsigned long) length; 360 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 361 register unsigned long reg5 asm ("5") = (unsigned int) psmid; 362 363 if (special == 1) 364 reg0 |= 0x400000UL; 365 366 asm volatile ( 367 "0: .long 0xb2ad0042\n" /* DQAP */ 368 " brc 2,0b" 369 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 370 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) 371 : "cc" ); 372 return reg1; 373 } 374 375 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 376 { 377 struct ap_queue_status status; 378 379 status = __ap_send(qid, psmid, msg, length, 0); 380 switch (status.response_code) { 381 case AP_RESPONSE_NORMAL: 382 return 0; 383 case AP_RESPONSE_Q_FULL: 384 case AP_RESPONSE_RESET_IN_PROGRESS: 385 return -EBUSY; 386 case AP_RESPONSE_REQ_FAC_NOT_INST: 387 return -EINVAL; 388 default: /* Device is gone. */ 389 return -ENODEV; 390 } 391 } 392 EXPORT_SYMBOL(ap_send); 393 394 /** 395 * __ap_recv(): Receive message from adjunct processor queue. 396 * @qid: The AP queue number 397 * @psmid: Pointer to program supplied message identifier 398 * @msg: The message text 399 * @length: The message length 400 * 401 * Returns AP queue status structure. 402 * Condition code 1 on DQAP means the receive has taken place 403 * but only partially. The response is incomplete, hence the 404 * DQAP is repeated. 405 * Condition code 2 on DQAP also means the receive is incomplete, 406 * this time because a segment boundary was reached. Again, the 407 * DQAP is repeated. 408 * Note that gpr2 is used by the DQAP instruction to keep track of 409 * any 'residual' length, in case the instruction gets interrupted. 410 * Hence it gets zeroed before the instruction. 411 */ 412 static inline struct ap_queue_status 413 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 414 { 415 typedef struct { char _[length]; } msgblock; 416 register unsigned long reg0 asm("0") = qid | 0x80000000UL; 417 register struct ap_queue_status reg1 asm ("1"); 418 register unsigned long reg2 asm("2") = 0UL; 419 register unsigned long reg4 asm("4") = (unsigned long) msg; 420 register unsigned long reg5 asm("5") = (unsigned long) length; 421 register unsigned long reg6 asm("6") = 0UL; 422 register unsigned long reg7 asm("7") = 0UL; 423 424 425 asm volatile( 426 "0: .long 0xb2ae0064\n" 427 " brc 6,0b\n" 428 : "+d" (reg0), "=d" (reg1), "+d" (reg2), 429 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 430 "=m" (*(msgblock *) msg) : : "cc" ); 431 *psmid = (((unsigned long long) reg6) << 32) + reg7; 432 return reg1; 433 } 434 435 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 436 { 437 struct ap_queue_status status; 438 439 status = __ap_recv(qid, psmid, msg, length); 440 switch (status.response_code) { 441 case AP_RESPONSE_NORMAL: 442 return 0; 443 case AP_RESPONSE_NO_PENDING_REPLY: 444 if (status.queue_empty) 445 return -ENOENT; 446 return -EBUSY; 447 case AP_RESPONSE_RESET_IN_PROGRESS: 448 return -EBUSY; 449 default: 450 return -ENODEV; 451 } 452 } 453 EXPORT_SYMBOL(ap_recv); 454 455 /** 456 * ap_query_queue(): Check if an AP queue is available. 457 * @qid: The AP queue number 458 * @queue_depth: Pointer to queue depth value 459 * @device_type: Pointer to device type value 460 * 461 * The test is repeated for AP_MAX_RESET times. 462 */ 463 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 464 { 465 struct ap_queue_status status; 466 int t_depth, t_device_type, rc, i; 467 468 rc = -EBUSY; 469 for (i = 0; i < AP_MAX_RESET; i++) { 470 status = ap_test_queue(qid, &t_depth, &t_device_type); 471 switch (status.response_code) { 472 case AP_RESPONSE_NORMAL: 473 *queue_depth = t_depth + 1; 474 *device_type = t_device_type; 475 rc = 0; 476 break; 477 case AP_RESPONSE_Q_NOT_AVAIL: 478 rc = -ENODEV; 479 break; 480 case AP_RESPONSE_RESET_IN_PROGRESS: 481 break; 482 case AP_RESPONSE_DECONFIGURED: 483 rc = -ENODEV; 484 break; 485 case AP_RESPONSE_CHECKSTOPPED: 486 rc = -ENODEV; 487 break; 488 case AP_RESPONSE_INVALID_ADDRESS: 489 rc = -ENODEV; 490 break; 491 case AP_RESPONSE_OTHERWISE_CHANGED: 492 break; 493 case AP_RESPONSE_BUSY: 494 break; 495 default: 496 BUG(); 497 } 498 if (rc != -EBUSY) 499 break; 500 if (i < AP_MAX_RESET - 1) 501 udelay(5); 502 } 503 return rc; 504 } 505 506 /** 507 * ap_init_queue(): Reset an AP queue. 508 * @qid: The AP queue number 509 * 510 * Reset an AP queue and wait for it to become available again. 511 */ 512 static int ap_init_queue(ap_qid_t qid) 513 { 514 struct ap_queue_status status; 515 int rc, dummy, i; 516 517 rc = -ENODEV; 518 status = ap_reset_queue(qid); 519 for (i = 0; i < AP_MAX_RESET; i++) { 520 switch (status.response_code) { 521 case AP_RESPONSE_NORMAL: 522 if (status.queue_empty) 523 rc = 0; 524 break; 525 case AP_RESPONSE_Q_NOT_AVAIL: 526 case AP_RESPONSE_DECONFIGURED: 527 case AP_RESPONSE_CHECKSTOPPED: 528 i = AP_MAX_RESET; /* return with -ENODEV */ 529 break; 530 case AP_RESPONSE_RESET_IN_PROGRESS: 531 rc = -EBUSY; 532 case AP_RESPONSE_BUSY: 533 default: 534 break; 535 } 536 if (rc != -ENODEV && rc != -EBUSY) 537 break; 538 if (i < AP_MAX_RESET - 1) { 539 udelay(5); 540 status = ap_test_queue(qid, &dummy, &dummy); 541 } 542 } 543 if (rc == 0 && ap_using_interrupts()) { 544 rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator); 545 /* If interruption mode is supported by the machine, 546 * but an AP can not be enabled for interruption then 547 * the AP will be discarded. */ 548 if (rc) 549 pr_err("Registering adapter interrupts for " 550 "AP %d failed\n", AP_QID_DEVICE(qid)); 551 } 552 return rc; 553 } 554 555 /** 556 * ap_increase_queue_count(): Arm request timeout. 557 * @ap_dev: Pointer to an AP device. 558 * 559 * Arm request timeout if an AP device was idle and a new request is submitted. 560 */ 561 static void ap_increase_queue_count(struct ap_device *ap_dev) 562 { 563 int timeout = ap_dev->drv->request_timeout; 564 565 ap_dev->queue_count++; 566 if (ap_dev->queue_count == 1) { 567 mod_timer(&ap_dev->timeout, jiffies + timeout); 568 ap_dev->reset = AP_RESET_ARMED; 569 } 570 } 571 572 /** 573 * ap_decrease_queue_count(): Decrease queue count. 574 * @ap_dev: Pointer to an AP device. 575 * 576 * If AP device is still alive, re-schedule request timeout if there are still 577 * pending requests. 578 */ 579 static void ap_decrease_queue_count(struct ap_device *ap_dev) 580 { 581 int timeout = ap_dev->drv->request_timeout; 582 583 ap_dev->queue_count--; 584 if (ap_dev->queue_count > 0) 585 mod_timer(&ap_dev->timeout, jiffies + timeout); 586 else 587 /* 588 * The timeout timer should to be disabled now - since 589 * del_timer_sync() is very expensive, we just tell via the 590 * reset flag to ignore the pending timeout timer. 591 */ 592 ap_dev->reset = AP_RESET_IGNORE; 593 } 594 595 /* 596 * AP device related attributes. 597 */ 598 static ssize_t ap_hwtype_show(struct device *dev, 599 struct device_attribute *attr, char *buf) 600 { 601 struct ap_device *ap_dev = to_ap_dev(dev); 602 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); 603 } 604 605 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 606 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 607 char *buf) 608 { 609 struct ap_device *ap_dev = to_ap_dev(dev); 610 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); 611 } 612 613 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 614 static ssize_t ap_request_count_show(struct device *dev, 615 struct device_attribute *attr, 616 char *buf) 617 { 618 struct ap_device *ap_dev = to_ap_dev(dev); 619 int rc; 620 621 spin_lock_bh(&ap_dev->lock); 622 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); 623 spin_unlock_bh(&ap_dev->lock); 624 return rc; 625 } 626 627 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 628 629 static ssize_t ap_modalias_show(struct device *dev, 630 struct device_attribute *attr, char *buf) 631 { 632 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type); 633 } 634 635 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 636 637 static struct attribute *ap_dev_attrs[] = { 638 &dev_attr_hwtype.attr, 639 &dev_attr_depth.attr, 640 &dev_attr_request_count.attr, 641 &dev_attr_modalias.attr, 642 NULL 643 }; 644 static struct attribute_group ap_dev_attr_group = { 645 .attrs = ap_dev_attrs 646 }; 647 648 /** 649 * ap_bus_match() 650 * @dev: Pointer to device 651 * @drv: Pointer to device_driver 652 * 653 * AP bus driver registration/unregistration. 654 */ 655 static int ap_bus_match(struct device *dev, struct device_driver *drv) 656 { 657 struct ap_device *ap_dev = to_ap_dev(dev); 658 struct ap_driver *ap_drv = to_ap_drv(drv); 659 struct ap_device_id *id; 660 661 /* 662 * Compare device type of the device with the list of 663 * supported types of the device_driver. 664 */ 665 for (id = ap_drv->ids; id->match_flags; id++) { 666 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && 667 (id->dev_type != ap_dev->device_type)) 668 continue; 669 return 1; 670 } 671 return 0; 672 } 673 674 /** 675 * ap_uevent(): Uevent function for AP devices. 676 * @dev: Pointer to device 677 * @env: Pointer to kobj_uevent_env 678 * 679 * It sets up a single environment variable DEV_TYPE which contains the 680 * hardware device type. 681 */ 682 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) 683 { 684 struct ap_device *ap_dev = to_ap_dev(dev); 685 int retval = 0; 686 687 if (!ap_dev) 688 return -ENODEV; 689 690 /* Set up DEV_TYPE environment variable. */ 691 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); 692 if (retval) 693 return retval; 694 695 /* Add MODALIAS= */ 696 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); 697 698 return retval; 699 } 700 701 static int ap_bus_suspend(struct device *dev, pm_message_t state) 702 { 703 struct ap_device *ap_dev = to_ap_dev(dev); 704 unsigned long flags; 705 706 if (!ap_suspend_flag) { 707 ap_suspend_flag = 1; 708 709 /* Disable scanning for devices, thus we do not want to scan 710 * for them after removing. 711 */ 712 del_timer_sync(&ap_config_timer); 713 if (ap_work_queue != NULL) { 714 destroy_workqueue(ap_work_queue); 715 ap_work_queue = NULL; 716 } 717 718 tasklet_disable(&ap_tasklet); 719 } 720 /* Poll on the device until all requests are finished. */ 721 do { 722 flags = 0; 723 spin_lock_bh(&ap_dev->lock); 724 __ap_poll_device(ap_dev, &flags); 725 spin_unlock_bh(&ap_dev->lock); 726 } while ((flags & 1) || (flags & 2)); 727 728 spin_lock_bh(&ap_dev->lock); 729 ap_dev->unregistered = 1; 730 spin_unlock_bh(&ap_dev->lock); 731 732 return 0; 733 } 734 735 static int ap_bus_resume(struct device *dev) 736 { 737 int rc = 0; 738 struct ap_device *ap_dev = to_ap_dev(dev); 739 740 if (ap_suspend_flag) { 741 ap_suspend_flag = 0; 742 if (!ap_interrupts_available()) 743 ap_interrupt_indicator = NULL; 744 if (!user_set_domain) { 745 ap_domain_index = -1; 746 ap_select_domain(); 747 } 748 init_timer(&ap_config_timer); 749 ap_config_timer.function = ap_config_timeout; 750 ap_config_timer.data = 0; 751 ap_config_timer.expires = jiffies + ap_config_time * HZ; 752 add_timer(&ap_config_timer); 753 ap_work_queue = create_singlethread_workqueue("kapwork"); 754 if (!ap_work_queue) 755 return -ENOMEM; 756 tasklet_enable(&ap_tasklet); 757 if (!ap_using_interrupts()) 758 ap_schedule_poll_timer(); 759 else 760 tasklet_schedule(&ap_tasklet); 761 if (ap_thread_flag) 762 rc = ap_poll_thread_start(); 763 } 764 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { 765 spin_lock_bh(&ap_dev->lock); 766 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), 767 ap_domain_index); 768 spin_unlock_bh(&ap_dev->lock); 769 } 770 queue_work(ap_work_queue, &ap_config_work); 771 772 return rc; 773 } 774 775 static struct bus_type ap_bus_type = { 776 .name = "ap", 777 .match = &ap_bus_match, 778 .uevent = &ap_uevent, 779 .suspend = ap_bus_suspend, 780 .resume = ap_bus_resume 781 }; 782 783 static int ap_device_probe(struct device *dev) 784 { 785 struct ap_device *ap_dev = to_ap_dev(dev); 786 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 787 int rc; 788 789 ap_dev->drv = ap_drv; 790 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 791 if (!rc) { 792 spin_lock_bh(&ap_device_list_lock); 793 list_add(&ap_dev->list, &ap_device_list); 794 spin_unlock_bh(&ap_device_list_lock); 795 } 796 return rc; 797 } 798 799 /** 800 * __ap_flush_queue(): Flush requests. 801 * @ap_dev: Pointer to the AP device 802 * 803 * Flush all requests from the request/pending queue of an AP device. 804 */ 805 static void __ap_flush_queue(struct ap_device *ap_dev) 806 { 807 struct ap_message *ap_msg, *next; 808 809 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 810 list_del_init(&ap_msg->list); 811 ap_dev->pendingq_count--; 812 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 813 } 814 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 815 list_del_init(&ap_msg->list); 816 ap_dev->requestq_count--; 817 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 818 } 819 } 820 821 void ap_flush_queue(struct ap_device *ap_dev) 822 { 823 spin_lock_bh(&ap_dev->lock); 824 __ap_flush_queue(ap_dev); 825 spin_unlock_bh(&ap_dev->lock); 826 } 827 EXPORT_SYMBOL(ap_flush_queue); 828 829 static int ap_device_remove(struct device *dev) 830 { 831 struct ap_device *ap_dev = to_ap_dev(dev); 832 struct ap_driver *ap_drv = ap_dev->drv; 833 834 ap_flush_queue(ap_dev); 835 del_timer_sync(&ap_dev->timeout); 836 spin_lock_bh(&ap_device_list_lock); 837 list_del_init(&ap_dev->list); 838 spin_unlock_bh(&ap_device_list_lock); 839 if (ap_drv->remove) 840 ap_drv->remove(ap_dev); 841 spin_lock_bh(&ap_dev->lock); 842 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 843 spin_unlock_bh(&ap_dev->lock); 844 return 0; 845 } 846 847 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, 848 char *name) 849 { 850 struct device_driver *drv = &ap_drv->driver; 851 852 drv->bus = &ap_bus_type; 853 drv->probe = ap_device_probe; 854 drv->remove = ap_device_remove; 855 drv->owner = owner; 856 drv->name = name; 857 return driver_register(drv); 858 } 859 EXPORT_SYMBOL(ap_driver_register); 860 861 void ap_driver_unregister(struct ap_driver *ap_drv) 862 { 863 driver_unregister(&ap_drv->driver); 864 } 865 EXPORT_SYMBOL(ap_driver_unregister); 866 867 /* 868 * AP bus attributes. 869 */ 870 static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 871 { 872 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 873 } 874 875 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); 876 877 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 878 { 879 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 880 } 881 882 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) 883 { 884 return snprintf(buf, PAGE_SIZE, "%d\n", 885 ap_using_interrupts() ? 1 : 0); 886 } 887 888 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); 889 890 static ssize_t ap_config_time_store(struct bus_type *bus, 891 const char *buf, size_t count) 892 { 893 int time; 894 895 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) 896 return -EINVAL; 897 ap_config_time = time; 898 if (!timer_pending(&ap_config_timer) || 899 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) { 900 ap_config_timer.expires = jiffies + ap_config_time * HZ; 901 add_timer(&ap_config_timer); 902 } 903 return count; 904 } 905 906 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store); 907 908 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf) 909 { 910 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); 911 } 912 913 static ssize_t ap_poll_thread_store(struct bus_type *bus, 914 const char *buf, size_t count) 915 { 916 int flag, rc; 917 918 if (sscanf(buf, "%d\n", &flag) != 1) 919 return -EINVAL; 920 if (flag) { 921 rc = ap_poll_thread_start(); 922 if (rc) 923 return rc; 924 } 925 else 926 ap_poll_thread_stop(); 927 return count; 928 } 929 930 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); 931 932 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) 933 { 934 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); 935 } 936 937 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, 938 size_t count) 939 { 940 unsigned long long time; 941 ktime_t hr_time; 942 943 /* 120 seconds = maximum poll interval */ 944 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || 945 time > 120000000000ULL) 946 return -EINVAL; 947 poll_timeout = time; 948 hr_time = ktime_set(0, poll_timeout); 949 950 if (!hrtimer_is_queued(&ap_poll_timer) || 951 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { 952 hrtimer_set_expires(&ap_poll_timer, hr_time); 953 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); 954 } 955 return count; 956 } 957 958 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); 959 960 static struct bus_attribute *const ap_bus_attrs[] = { 961 &bus_attr_ap_domain, 962 &bus_attr_config_time, 963 &bus_attr_poll_thread, 964 &bus_attr_ap_interrupts, 965 &bus_attr_poll_timeout, 966 NULL, 967 }; 968 969 /** 970 * ap_select_domain(): Select an AP domain. 971 * 972 * Pick one of the 16 AP domains. 973 */ 974 static int ap_select_domain(void) 975 { 976 int queue_depth, device_type, count, max_count, best_domain; 977 int rc, i, j; 978 979 /* 980 * We want to use a single domain. Either the one specified with 981 * the "domain=" parameter or the domain with the maximum number 982 * of devices. 983 */ 984 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) 985 /* Domain has already been selected. */ 986 return 0; 987 best_domain = -1; 988 max_count = 0; 989 for (i = 0; i < AP_DOMAINS; i++) { 990 count = 0; 991 for (j = 0; j < AP_DEVICES; j++) { 992 ap_qid_t qid = AP_MKQID(j, i); 993 rc = ap_query_queue(qid, &queue_depth, &device_type); 994 if (rc) 995 continue; 996 count++; 997 } 998 if (count > max_count) { 999 max_count = count; 1000 best_domain = i; 1001 } 1002 } 1003 if (best_domain >= 0){ 1004 ap_domain_index = best_domain; 1005 return 0; 1006 } 1007 return -ENODEV; 1008 } 1009 1010 /** 1011 * ap_probe_device_type(): Find the device type of an AP. 1012 * @ap_dev: pointer to the AP device. 1013 * 1014 * Find the device type if query queue returned a device type of 0. 1015 */ 1016 static int ap_probe_device_type(struct ap_device *ap_dev) 1017 { 1018 static unsigned char msg[] = { 1019 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 1020 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1021 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 1022 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1023 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50, 1024 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01, 1025 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00, 1026 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00, 1027 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1028 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00, 1029 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1030 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00, 1031 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00, 1032 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1033 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00, 1034 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1035 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1036 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1037 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1038 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1039 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1040 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00, 1041 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1042 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00, 1043 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20, 1044 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53, 1045 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22, 1046 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1047 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88, 1048 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, 1049 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, 1050 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22, 1051 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1052 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77, 1053 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00, 1054 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00, 1055 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01, 1056 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c, 1057 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68, 1058 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66, 1059 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0, 1060 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8, 1061 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04, 1062 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57, 1063 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d, 1064 }; 1065 struct ap_queue_status status; 1066 unsigned long long psmid; 1067 char *reply; 1068 int rc, i; 1069 1070 reply = (void *) get_zeroed_page(GFP_KERNEL); 1071 if (!reply) { 1072 rc = -ENOMEM; 1073 goto out; 1074 } 1075 1076 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 1077 msg, sizeof(msg), 0); 1078 if (status.response_code != AP_RESPONSE_NORMAL) { 1079 rc = -ENODEV; 1080 goto out_free; 1081 } 1082 1083 /* Wait for the test message to complete. */ 1084 for (i = 0; i < 6; i++) { 1085 mdelay(300); 1086 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096); 1087 if (status.response_code == AP_RESPONSE_NORMAL && 1088 psmid == 0x0102030405060708ULL) 1089 break; 1090 } 1091 if (i < 6) { 1092 /* Got an answer. */ 1093 if (reply[0] == 0x00 && reply[1] == 0x86) 1094 ap_dev->device_type = AP_DEVICE_TYPE_PCICC; 1095 else 1096 ap_dev->device_type = AP_DEVICE_TYPE_PCICA; 1097 rc = 0; 1098 } else 1099 rc = -ENODEV; 1100 1101 out_free: 1102 free_page((unsigned long) reply); 1103 out: 1104 return rc; 1105 } 1106 1107 static void ap_interrupt_handler(void *unused1, void *unused2) 1108 { 1109 kstat_cpu(smp_processor_id()).irqs[IOINT_APB]++; 1110 tasklet_schedule(&ap_tasklet); 1111 } 1112 1113 /** 1114 * __ap_scan_bus(): Scan the AP bus. 1115 * @dev: Pointer to device 1116 * @data: Pointer to data 1117 * 1118 * Scan the AP bus for new devices. 1119 */ 1120 static int __ap_scan_bus(struct device *dev, void *data) 1121 { 1122 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 1123 } 1124 1125 static void ap_device_release(struct device *dev) 1126 { 1127 struct ap_device *ap_dev = to_ap_dev(dev); 1128 1129 kfree(ap_dev); 1130 } 1131 1132 static void ap_scan_bus(struct work_struct *unused) 1133 { 1134 struct ap_device *ap_dev; 1135 struct device *dev; 1136 ap_qid_t qid; 1137 int queue_depth, device_type; 1138 int rc, i; 1139 1140 if (ap_select_domain() != 0) 1141 return; 1142 for (i = 0; i < AP_DEVICES; i++) { 1143 qid = AP_MKQID(i, ap_domain_index); 1144 dev = bus_find_device(&ap_bus_type, NULL, 1145 (void *)(unsigned long)qid, 1146 __ap_scan_bus); 1147 rc = ap_query_queue(qid, &queue_depth, &device_type); 1148 if (dev) { 1149 if (rc == -EBUSY) { 1150 set_current_state(TASK_UNINTERRUPTIBLE); 1151 schedule_timeout(AP_RESET_TIMEOUT); 1152 rc = ap_query_queue(qid, &queue_depth, 1153 &device_type); 1154 } 1155 ap_dev = to_ap_dev(dev); 1156 spin_lock_bh(&ap_dev->lock); 1157 if (rc || ap_dev->unregistered) { 1158 spin_unlock_bh(&ap_dev->lock); 1159 if (ap_dev->unregistered) 1160 i--; 1161 device_unregister(dev); 1162 put_device(dev); 1163 continue; 1164 } 1165 spin_unlock_bh(&ap_dev->lock); 1166 put_device(dev); 1167 continue; 1168 } 1169 if (rc) 1170 continue; 1171 rc = ap_init_queue(qid); 1172 if (rc) 1173 continue; 1174 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); 1175 if (!ap_dev) 1176 break; 1177 ap_dev->qid = qid; 1178 ap_dev->queue_depth = queue_depth; 1179 ap_dev->unregistered = 1; 1180 spin_lock_init(&ap_dev->lock); 1181 INIT_LIST_HEAD(&ap_dev->pendingq); 1182 INIT_LIST_HEAD(&ap_dev->requestq); 1183 INIT_LIST_HEAD(&ap_dev->list); 1184 setup_timer(&ap_dev->timeout, ap_request_timeout, 1185 (unsigned long) ap_dev); 1186 if (device_type == 0) 1187 ap_probe_device_type(ap_dev); 1188 else 1189 ap_dev->device_type = device_type; 1190 1191 ap_dev->device.bus = &ap_bus_type; 1192 ap_dev->device.parent = ap_root_device; 1193 if (dev_set_name(&ap_dev->device, "card%02x", 1194 AP_QID_DEVICE(ap_dev->qid))) { 1195 kfree(ap_dev); 1196 continue; 1197 } 1198 ap_dev->device.release = ap_device_release; 1199 rc = device_register(&ap_dev->device); 1200 if (rc) { 1201 put_device(&ap_dev->device); 1202 continue; 1203 } 1204 /* Add device attributes. */ 1205 rc = sysfs_create_group(&ap_dev->device.kobj, 1206 &ap_dev_attr_group); 1207 if (!rc) { 1208 spin_lock_bh(&ap_dev->lock); 1209 ap_dev->unregistered = 0; 1210 spin_unlock_bh(&ap_dev->lock); 1211 } 1212 else 1213 device_unregister(&ap_dev->device); 1214 } 1215 } 1216 1217 static void 1218 ap_config_timeout(unsigned long ptr) 1219 { 1220 queue_work(ap_work_queue, &ap_config_work); 1221 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1222 add_timer(&ap_config_timer); 1223 } 1224 1225 /** 1226 * ap_schedule_poll_timer(): Schedule poll timer. 1227 * 1228 * Set up the timer to run the poll tasklet 1229 */ 1230 static inline void ap_schedule_poll_timer(void) 1231 { 1232 ktime_t hr_time; 1233 1234 spin_lock_bh(&ap_poll_timer_lock); 1235 if (ap_using_interrupts() || ap_suspend_flag) 1236 goto out; 1237 if (hrtimer_is_queued(&ap_poll_timer)) 1238 goto out; 1239 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1240 hr_time = ktime_set(0, poll_timeout); 1241 hrtimer_forward_now(&ap_poll_timer, hr_time); 1242 hrtimer_restart(&ap_poll_timer); 1243 } 1244 out: 1245 spin_unlock_bh(&ap_poll_timer_lock); 1246 } 1247 1248 /** 1249 * ap_poll_read(): Receive pending reply messages from an AP device. 1250 * @ap_dev: pointer to the AP device 1251 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1252 * required, bit 2^1 is set if the poll timer needs to get armed 1253 * 1254 * Returns 0 if the device is still present, -ENODEV if not. 1255 */ 1256 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 1257 { 1258 struct ap_queue_status status; 1259 struct ap_message *ap_msg; 1260 1261 if (ap_dev->queue_count <= 0) 1262 return 0; 1263 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, 1264 ap_dev->reply->message, ap_dev->reply->length); 1265 switch (status.response_code) { 1266 case AP_RESPONSE_NORMAL: 1267 atomic_dec(&ap_poll_requests); 1268 ap_decrease_queue_count(ap_dev); 1269 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 1270 if (ap_msg->psmid != ap_dev->reply->psmid) 1271 continue; 1272 list_del_init(&ap_msg->list); 1273 ap_dev->pendingq_count--; 1274 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply); 1275 break; 1276 } 1277 if (ap_dev->queue_count > 0) 1278 *flags |= 1; 1279 break; 1280 case AP_RESPONSE_NO_PENDING_REPLY: 1281 if (status.queue_empty) { 1282 /* The card shouldn't forget requests but who knows. */ 1283 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1284 ap_dev->queue_count = 0; 1285 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1286 ap_dev->requestq_count += ap_dev->pendingq_count; 1287 ap_dev->pendingq_count = 0; 1288 } else 1289 *flags |= 2; 1290 break; 1291 default: 1292 return -ENODEV; 1293 } 1294 return 0; 1295 } 1296 1297 /** 1298 * ap_poll_write(): Send messages from the request queue to an AP device. 1299 * @ap_dev: pointer to the AP device 1300 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1301 * required, bit 2^1 is set if the poll timer needs to get armed 1302 * 1303 * Returns 0 if the device is still present, -ENODEV if not. 1304 */ 1305 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 1306 { 1307 struct ap_queue_status status; 1308 struct ap_message *ap_msg; 1309 1310 if (ap_dev->requestq_count <= 0 || 1311 ap_dev->queue_count >= ap_dev->queue_depth) 1312 return 0; 1313 /* Start the next request on the queue. */ 1314 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1315 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1316 ap_msg->message, ap_msg->length, ap_msg->special); 1317 switch (status.response_code) { 1318 case AP_RESPONSE_NORMAL: 1319 atomic_inc(&ap_poll_requests); 1320 ap_increase_queue_count(ap_dev); 1321 list_move_tail(&ap_msg->list, &ap_dev->pendingq); 1322 ap_dev->requestq_count--; 1323 ap_dev->pendingq_count++; 1324 if (ap_dev->queue_count < ap_dev->queue_depth && 1325 ap_dev->requestq_count > 0) 1326 *flags |= 1; 1327 *flags |= 2; 1328 break; 1329 case AP_RESPONSE_Q_FULL: 1330 case AP_RESPONSE_RESET_IN_PROGRESS: 1331 *flags |= 2; 1332 break; 1333 case AP_RESPONSE_MESSAGE_TOO_BIG: 1334 case AP_RESPONSE_REQ_FAC_NOT_INST: 1335 return -EINVAL; 1336 default: 1337 return -ENODEV; 1338 } 1339 return 0; 1340 } 1341 1342 /** 1343 * ap_poll_queue(): Poll AP device for pending replies and send new messages. 1344 * @ap_dev: pointer to the bus device 1345 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1346 * required, bit 2^1 is set if the poll timer needs to get armed 1347 * 1348 * Poll AP device for pending replies and send new messages. If either 1349 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device. 1350 * Returns 0. 1351 */ 1352 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) 1353 { 1354 int rc; 1355 1356 rc = ap_poll_read(ap_dev, flags); 1357 if (rc) 1358 return rc; 1359 return ap_poll_write(ap_dev, flags); 1360 } 1361 1362 /** 1363 * __ap_queue_message(): Queue a message to a device. 1364 * @ap_dev: pointer to the AP device 1365 * @ap_msg: the message to be queued 1366 * 1367 * Queue a message to a device. Returns 0 if successful. 1368 */ 1369 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1370 { 1371 struct ap_queue_status status; 1372 1373 if (list_empty(&ap_dev->requestq) && 1374 ap_dev->queue_count < ap_dev->queue_depth) { 1375 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1376 ap_msg->message, ap_msg->length, 1377 ap_msg->special); 1378 switch (status.response_code) { 1379 case AP_RESPONSE_NORMAL: 1380 list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1381 atomic_inc(&ap_poll_requests); 1382 ap_dev->pendingq_count++; 1383 ap_increase_queue_count(ap_dev); 1384 ap_dev->total_request_count++; 1385 break; 1386 case AP_RESPONSE_Q_FULL: 1387 case AP_RESPONSE_RESET_IN_PROGRESS: 1388 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1389 ap_dev->requestq_count++; 1390 ap_dev->total_request_count++; 1391 return -EBUSY; 1392 case AP_RESPONSE_REQ_FAC_NOT_INST: 1393 case AP_RESPONSE_MESSAGE_TOO_BIG: 1394 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1395 return -EINVAL; 1396 default: /* Device is gone. */ 1397 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1398 return -ENODEV; 1399 } 1400 } else { 1401 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1402 ap_dev->requestq_count++; 1403 ap_dev->total_request_count++; 1404 return -EBUSY; 1405 } 1406 ap_schedule_poll_timer(); 1407 return 0; 1408 } 1409 1410 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1411 { 1412 unsigned long flags; 1413 int rc; 1414 1415 spin_lock_bh(&ap_dev->lock); 1416 if (!ap_dev->unregistered) { 1417 /* Make room on the queue by polling for finished requests. */ 1418 rc = ap_poll_queue(ap_dev, &flags); 1419 if (!rc) 1420 rc = __ap_queue_message(ap_dev, ap_msg); 1421 if (!rc) 1422 wake_up(&ap_poll_wait); 1423 if (rc == -ENODEV) 1424 ap_dev->unregistered = 1; 1425 } else { 1426 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1427 rc = -ENODEV; 1428 } 1429 spin_unlock_bh(&ap_dev->lock); 1430 if (rc == -ENODEV) 1431 device_unregister(&ap_dev->device); 1432 } 1433 EXPORT_SYMBOL(ap_queue_message); 1434 1435 /** 1436 * ap_cancel_message(): Cancel a crypto request. 1437 * @ap_dev: The AP device that has the message queued 1438 * @ap_msg: The message that is to be removed 1439 * 1440 * Cancel a crypto request. This is done by removing the request 1441 * from the device pending or request queue. Note that the 1442 * request stays on the AP queue. When it finishes the message 1443 * reply will be discarded because the psmid can't be found. 1444 */ 1445 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1446 { 1447 struct ap_message *tmp; 1448 1449 spin_lock_bh(&ap_dev->lock); 1450 if (!list_empty(&ap_msg->list)) { 1451 list_for_each_entry(tmp, &ap_dev->pendingq, list) 1452 if (tmp->psmid == ap_msg->psmid) { 1453 ap_dev->pendingq_count--; 1454 goto found; 1455 } 1456 ap_dev->requestq_count--; 1457 found: 1458 list_del_init(&ap_msg->list); 1459 } 1460 spin_unlock_bh(&ap_dev->lock); 1461 } 1462 EXPORT_SYMBOL(ap_cancel_message); 1463 1464 /** 1465 * ap_poll_timeout(): AP receive polling for finished AP requests. 1466 * @unused: Unused pointer. 1467 * 1468 * Schedules the AP tasklet using a high resolution timer. 1469 */ 1470 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) 1471 { 1472 tasklet_schedule(&ap_tasklet); 1473 return HRTIMER_NORESTART; 1474 } 1475 1476 /** 1477 * ap_reset(): Reset a not responding AP device. 1478 * @ap_dev: Pointer to the AP device 1479 * 1480 * Reset a not responding AP device and move all requests from the 1481 * pending queue to the request queue. 1482 */ 1483 static void ap_reset(struct ap_device *ap_dev) 1484 { 1485 int rc; 1486 1487 ap_dev->reset = AP_RESET_IGNORE; 1488 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1489 ap_dev->queue_count = 0; 1490 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1491 ap_dev->requestq_count += ap_dev->pendingq_count; 1492 ap_dev->pendingq_count = 0; 1493 rc = ap_init_queue(ap_dev->qid); 1494 if (rc == -ENODEV) 1495 ap_dev->unregistered = 1; 1496 } 1497 1498 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1499 { 1500 if (!ap_dev->unregistered) { 1501 if (ap_poll_queue(ap_dev, flags)) 1502 ap_dev->unregistered = 1; 1503 if (ap_dev->reset == AP_RESET_DO) 1504 ap_reset(ap_dev); 1505 } 1506 return 0; 1507 } 1508 1509 /** 1510 * ap_poll_all(): Poll all AP devices. 1511 * @dummy: Unused variable 1512 * 1513 * Poll all AP devices on the bus in a round robin fashion. Continue 1514 * polling until bit 2^0 of the control flags is not set. If bit 2^1 1515 * of the control flags has been set arm the poll timer. 1516 */ 1517 static void ap_poll_all(unsigned long dummy) 1518 { 1519 unsigned long flags; 1520 struct ap_device *ap_dev; 1521 1522 /* Reset the indicator if interrupts are used. Thus new interrupts can 1523 * be received. Doing it in the beginning of the tasklet is therefor 1524 * important that no requests on any AP get lost. 1525 */ 1526 if (ap_using_interrupts()) 1527 xchg((u8 *)ap_interrupt_indicator, 0); 1528 do { 1529 flags = 0; 1530 spin_lock(&ap_device_list_lock); 1531 list_for_each_entry(ap_dev, &ap_device_list, list) { 1532 spin_lock(&ap_dev->lock); 1533 __ap_poll_device(ap_dev, &flags); 1534 spin_unlock(&ap_dev->lock); 1535 } 1536 spin_unlock(&ap_device_list_lock); 1537 } while (flags & 1); 1538 if (flags & 2) 1539 ap_schedule_poll_timer(); 1540 } 1541 1542 /** 1543 * ap_poll_thread(): Thread that polls for finished requests. 1544 * @data: Unused pointer 1545 * 1546 * AP bus poll thread. The purpose of this thread is to poll for 1547 * finished requests in a loop if there is a "free" cpu - that is 1548 * a cpu that doesn't have anything better to do. The polling stops 1549 * as soon as there is another task or if all messages have been 1550 * delivered. 1551 */ 1552 static int ap_poll_thread(void *data) 1553 { 1554 DECLARE_WAITQUEUE(wait, current); 1555 unsigned long flags; 1556 int requests; 1557 struct ap_device *ap_dev; 1558 1559 set_user_nice(current, 19); 1560 while (1) { 1561 if (ap_suspend_flag) 1562 return 0; 1563 if (need_resched()) { 1564 schedule(); 1565 continue; 1566 } 1567 add_wait_queue(&ap_poll_wait, &wait); 1568 set_current_state(TASK_INTERRUPTIBLE); 1569 if (kthread_should_stop()) 1570 break; 1571 requests = atomic_read(&ap_poll_requests); 1572 if (requests <= 0) 1573 schedule(); 1574 set_current_state(TASK_RUNNING); 1575 remove_wait_queue(&ap_poll_wait, &wait); 1576 1577 flags = 0; 1578 spin_lock_bh(&ap_device_list_lock); 1579 list_for_each_entry(ap_dev, &ap_device_list, list) { 1580 spin_lock(&ap_dev->lock); 1581 __ap_poll_device(ap_dev, &flags); 1582 spin_unlock(&ap_dev->lock); 1583 } 1584 spin_unlock_bh(&ap_device_list_lock); 1585 } 1586 set_current_state(TASK_RUNNING); 1587 remove_wait_queue(&ap_poll_wait, &wait); 1588 return 0; 1589 } 1590 1591 static int ap_poll_thread_start(void) 1592 { 1593 int rc; 1594 1595 if (ap_using_interrupts() || ap_suspend_flag) 1596 return 0; 1597 mutex_lock(&ap_poll_thread_mutex); 1598 if (!ap_poll_kthread) { 1599 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); 1600 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0; 1601 if (rc) 1602 ap_poll_kthread = NULL; 1603 } 1604 else 1605 rc = 0; 1606 mutex_unlock(&ap_poll_thread_mutex); 1607 return rc; 1608 } 1609 1610 static void ap_poll_thread_stop(void) 1611 { 1612 mutex_lock(&ap_poll_thread_mutex); 1613 if (ap_poll_kthread) { 1614 kthread_stop(ap_poll_kthread); 1615 ap_poll_kthread = NULL; 1616 } 1617 mutex_unlock(&ap_poll_thread_mutex); 1618 } 1619 1620 /** 1621 * ap_request_timeout(): Handling of request timeouts 1622 * @data: Holds the AP device. 1623 * 1624 * Handles request timeouts. 1625 */ 1626 static void ap_request_timeout(unsigned long data) 1627 { 1628 struct ap_device *ap_dev = (struct ap_device *) data; 1629 1630 if (ap_dev->reset == AP_RESET_ARMED) { 1631 ap_dev->reset = AP_RESET_DO; 1632 1633 if (ap_using_interrupts()) 1634 tasklet_schedule(&ap_tasklet); 1635 } 1636 } 1637 1638 static void ap_reset_domain(void) 1639 { 1640 int i; 1641 1642 if (ap_domain_index != -1) 1643 for (i = 0; i < AP_DEVICES; i++) 1644 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1645 } 1646 1647 static void ap_reset_all(void) 1648 { 1649 int i, j; 1650 1651 for (i = 0; i < AP_DOMAINS; i++) 1652 for (j = 0; j < AP_DEVICES; j++) 1653 ap_reset_queue(AP_MKQID(j, i)); 1654 } 1655 1656 static struct reset_call ap_reset_call = { 1657 .fn = ap_reset_all, 1658 }; 1659 1660 /** 1661 * ap_module_init(): The module initialization code. 1662 * 1663 * Initializes the module. 1664 */ 1665 int __init ap_module_init(void) 1666 { 1667 int rc, i; 1668 1669 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { 1670 pr_warning("%d is not a valid cryptographic domain\n", 1671 ap_domain_index); 1672 return -EINVAL; 1673 } 1674 /* In resume callback we need to know if the user had set the domain. 1675 * If so, we can not just reset it. 1676 */ 1677 if (ap_domain_index >= 0) 1678 user_set_domain = 1; 1679 1680 if (ap_instructions_available() != 0) { 1681 pr_warning("The hardware system does not support " 1682 "AP instructions\n"); 1683 return -ENODEV; 1684 } 1685 if (ap_interrupts_available()) { 1686 isc_register(AP_ISC); 1687 ap_interrupt_indicator = s390_register_adapter_interrupt( 1688 &ap_interrupt_handler, NULL, AP_ISC); 1689 if (IS_ERR(ap_interrupt_indicator)) { 1690 ap_interrupt_indicator = NULL; 1691 isc_unregister(AP_ISC); 1692 } 1693 } 1694 1695 register_reset_call(&ap_reset_call); 1696 1697 /* Create /sys/bus/ap. */ 1698 rc = bus_register(&ap_bus_type); 1699 if (rc) 1700 goto out; 1701 for (i = 0; ap_bus_attrs[i]; i++) { 1702 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]); 1703 if (rc) 1704 goto out_bus; 1705 } 1706 1707 /* Create /sys/devices/ap. */ 1708 ap_root_device = root_device_register("ap"); 1709 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0; 1710 if (rc) 1711 goto out_bus; 1712 1713 ap_work_queue = create_singlethread_workqueue("kapwork"); 1714 if (!ap_work_queue) { 1715 rc = -ENOMEM; 1716 goto out_root; 1717 } 1718 1719 if (ap_select_domain() == 0) 1720 ap_scan_bus(NULL); 1721 1722 /* Setup the AP bus rescan timer. */ 1723 init_timer(&ap_config_timer); 1724 ap_config_timer.function = ap_config_timeout; 1725 ap_config_timer.data = 0; 1726 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1727 add_timer(&ap_config_timer); 1728 1729 /* Setup the high resultion poll timer. 1730 * If we are running under z/VM adjust polling to z/VM polling rate. 1731 */ 1732 if (MACHINE_IS_VM) 1733 poll_timeout = 1500000; 1734 spin_lock_init(&ap_poll_timer_lock); 1735 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1736 ap_poll_timer.function = ap_poll_timeout; 1737 1738 /* Start the low priority AP bus poll thread. */ 1739 if (ap_thread_flag) { 1740 rc = ap_poll_thread_start(); 1741 if (rc) 1742 goto out_work; 1743 } 1744 1745 return 0; 1746 1747 out_work: 1748 del_timer_sync(&ap_config_timer); 1749 hrtimer_cancel(&ap_poll_timer); 1750 destroy_workqueue(ap_work_queue); 1751 out_root: 1752 root_device_unregister(ap_root_device); 1753 out_bus: 1754 while (i--) 1755 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1756 bus_unregister(&ap_bus_type); 1757 out: 1758 unregister_reset_call(&ap_reset_call); 1759 if (ap_using_interrupts()) { 1760 s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); 1761 isc_unregister(AP_ISC); 1762 } 1763 return rc; 1764 } 1765 1766 static int __ap_match_all(struct device *dev, void *data) 1767 { 1768 return 1; 1769 } 1770 1771 /** 1772 * ap_modules_exit(): The module termination code 1773 * 1774 * Terminates the module. 1775 */ 1776 void ap_module_exit(void) 1777 { 1778 int i; 1779 struct device *dev; 1780 1781 ap_reset_domain(); 1782 ap_poll_thread_stop(); 1783 del_timer_sync(&ap_config_timer); 1784 hrtimer_cancel(&ap_poll_timer); 1785 destroy_workqueue(ap_work_queue); 1786 tasklet_kill(&ap_tasklet); 1787 root_device_unregister(ap_root_device); 1788 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 1789 __ap_match_all))) 1790 { 1791 device_unregister(dev); 1792 put_device(dev); 1793 } 1794 for (i = 0; ap_bus_attrs[i]; i++) 1795 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1796 bus_unregister(&ap_bus_type); 1797 unregister_reset_call(&ap_reset_call); 1798 if (ap_using_interrupts()) { 1799 s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); 1800 isc_unregister(AP_ISC); 1801 } 1802 } 1803 1804 #ifndef CONFIG_ZCRYPT_MONOLITHIC 1805 module_init(ap_module_init); 1806 module_exit(ap_module_exit); 1807 #endif 1808