1 /* 2 * Copyright IBM Corp. 2006, 2012 3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Ralph Wuerthner <rwuerthn@de.ibm.com> 6 * Felix Beck <felix.beck@de.ibm.com> 7 * Holger Dengler <hd@linux.vnet.ibm.com> 8 * 9 * Adjunct processor bus. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 */ 25 26 #define KMSG_COMPONENT "ap" 27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 28 29 #include <linux/kernel_stat.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/delay.h> 33 #include <linux/err.h> 34 #include <linux/interrupt.h> 35 #include <linux/workqueue.h> 36 #include <linux/slab.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/mutex.h> 40 #include <asm/reset.h> 41 #include <asm/airq.h> 42 #include <linux/atomic.h> 43 #include <asm/isc.h> 44 #include <linux/hrtimer.h> 45 #include <linux/ktime.h> 46 #include <asm/facility.h> 47 48 #include "ap_bus.h" 49 50 /* Some prototypes. */ 51 static void ap_scan_bus(struct work_struct *); 52 static void ap_poll_all(unsigned long); 53 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); 54 static int ap_poll_thread_start(void); 55 static void ap_poll_thread_stop(void); 56 static void ap_request_timeout(unsigned long); 57 static inline void ap_schedule_poll_timer(void); 58 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); 59 static int ap_device_remove(struct device *dev); 60 static int ap_device_probe(struct device *dev); 61 static void ap_interrupt_handler(struct airq_struct *airq); 62 static void ap_reset(struct ap_device *ap_dev); 63 static void ap_config_timeout(unsigned long ptr); 64 static int ap_select_domain(void); 65 static void ap_query_configuration(void); 66 67 /* 68 * Module description. 69 */ 70 MODULE_AUTHOR("IBM Corporation"); 71 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ 72 "Copyright IBM Corp. 2006, 2012"); 73 MODULE_LICENSE("GPL"); 74 MODULE_ALIAS("z90crypt"); 75 76 /* 77 * Module parameter 78 */ 79 int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 80 module_param_named(domain, ap_domain_index, int, 0000); 81 MODULE_PARM_DESC(domain, "domain index for ap devices"); 82 EXPORT_SYMBOL(ap_domain_index); 83 84 static int ap_thread_flag = 0; 85 module_param_named(poll_thread, ap_thread_flag, int, 0000); 86 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 87 88 static struct device *ap_root_device = NULL; 89 static struct ap_config_info *ap_configuration; 90 static DEFINE_SPINLOCK(ap_device_list_lock); 91 static LIST_HEAD(ap_device_list); 92 93 /* 94 * Workqueue & timer for bus rescan. 95 */ 96 static struct workqueue_struct *ap_work_queue; 97 static struct timer_list ap_config_timer; 98 static int ap_config_time = AP_CONFIG_TIME; 99 static DECLARE_WORK(ap_config_work, ap_scan_bus); 100 101 /* 102 * Tasklet & timer for AP request polling and interrupts 103 */ 104 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 105 static atomic_t ap_poll_requests = ATOMIC_INIT(0); 106 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 107 static struct task_struct *ap_poll_kthread = NULL; 108 static DEFINE_MUTEX(ap_poll_thread_mutex); 109 static DEFINE_SPINLOCK(ap_poll_timer_lock); 110 static struct hrtimer ap_poll_timer; 111 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. 112 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ 113 static unsigned long long poll_timeout = 250000; 114 115 /* Suspend flag */ 116 static int ap_suspend_flag; 117 /* Flag to check if domain was set through module parameter domain=. This is 118 * important when supsend and resume is done in a z/VM environment where the 119 * domain might change. */ 120 static int user_set_domain = 0; 121 static struct bus_type ap_bus_type; 122 123 /* Adapter interrupt definitions */ 124 static int ap_airq_flag; 125 126 static struct airq_struct ap_airq = { 127 .handler = ap_interrupt_handler, 128 .isc = AP_ISC, 129 }; 130 131 /** 132 * ap_using_interrupts() - Returns non-zero if interrupt support is 133 * available. 134 */ 135 static inline int ap_using_interrupts(void) 136 { 137 return ap_airq_flag; 138 } 139 140 /** 141 * ap_intructions_available() - Test if AP instructions are available. 142 * 143 * Returns 0 if the AP instructions are installed. 144 */ 145 static inline int ap_instructions_available(void) 146 { 147 register unsigned long reg0 asm ("0") = AP_MKQID(0,0); 148 register unsigned long reg1 asm ("1") = -ENODEV; 149 register unsigned long reg2 asm ("2") = 0UL; 150 151 asm volatile( 152 " .long 0xb2af0000\n" /* PQAP(TAPQ) */ 153 "0: la %1,0\n" 154 "1:\n" 155 EX_TABLE(0b, 1b) 156 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); 157 return reg1; 158 } 159 160 /** 161 * ap_interrupts_available(): Test if AP interrupts are available. 162 * 163 * Returns 1 if AP interrupts are available. 164 */ 165 static int ap_interrupts_available(void) 166 { 167 return test_facility(2) && test_facility(65); 168 } 169 170 /** 171 * ap_configuration_available(): Test if AP configuration 172 * information is available. 173 * 174 * Returns 1 if AP configuration information is available. 175 */ 176 #ifdef CONFIG_64BIT 177 static int ap_configuration_available(void) 178 { 179 return test_facility(2) && test_facility(12); 180 } 181 #endif 182 183 /** 184 * ap_test_queue(): Test adjunct processor queue. 185 * @qid: The AP queue number 186 * @queue_depth: Pointer to queue depth value 187 * @device_type: Pointer to device type value 188 * 189 * Returns AP queue status structure. 190 */ 191 static inline struct ap_queue_status 192 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 193 { 194 register unsigned long reg0 asm ("0") = qid; 195 register struct ap_queue_status reg1 asm ("1"); 196 register unsigned long reg2 asm ("2") = 0UL; 197 198 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 199 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 200 *device_type = (int) (reg2 >> 24); 201 *queue_depth = (int) (reg2 & 0xff); 202 return reg1; 203 } 204 205 /** 206 * ap_reset_queue(): Reset adjunct processor queue. 207 * @qid: The AP queue number 208 * 209 * Returns AP queue status structure. 210 */ 211 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) 212 { 213 register unsigned long reg0 asm ("0") = qid | 0x01000000UL; 214 register struct ap_queue_status reg1 asm ("1"); 215 register unsigned long reg2 asm ("2") = 0UL; 216 217 asm volatile( 218 ".long 0xb2af0000" /* PQAP(RAPQ) */ 219 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 220 return reg1; 221 } 222 223 #ifdef CONFIG_64BIT 224 /** 225 * ap_queue_interruption_control(): Enable interruption for a specific AP. 226 * @qid: The AP queue number 227 * @ind: The notification indicator byte 228 * 229 * Returns AP queue status. 230 */ 231 static inline struct ap_queue_status 232 ap_queue_interruption_control(ap_qid_t qid, void *ind) 233 { 234 register unsigned long reg0 asm ("0") = qid | 0x03000000UL; 235 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; 236 register struct ap_queue_status reg1_out asm ("1"); 237 register void *reg2 asm ("2") = ind; 238 asm volatile( 239 ".long 0xb2af0000" /* PQAP(AQIC) */ 240 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 241 : 242 : "cc" ); 243 return reg1_out; 244 } 245 #endif 246 247 #ifdef CONFIG_64BIT 248 static inline struct ap_queue_status 249 __ap_query_functions(ap_qid_t qid, unsigned int *functions) 250 { 251 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); 252 register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID; 253 register unsigned long reg2 asm ("2"); 254 255 asm volatile( 256 ".long 0xb2af0000\n" /* PQAP(TAPQ) */ 257 "0:\n" 258 EX_TABLE(0b, 0b) 259 : "+d" (reg0), "+d" (reg1), "=d" (reg2) 260 : 261 : "cc"); 262 263 *functions = (unsigned int)(reg2 >> 32); 264 return reg1; 265 } 266 #endif 267 268 #ifdef CONFIG_64BIT 269 static inline int __ap_query_configuration(struct ap_config_info *config) 270 { 271 register unsigned long reg0 asm ("0") = 0x04000000UL; 272 register unsigned long reg1 asm ("1") = -EINVAL; 273 register unsigned char *reg2 asm ("2") = (unsigned char *)config; 274 275 asm volatile( 276 ".long 0xb2af0000\n" /* PQAP(QCI) */ 277 "0: la %1,0\n" 278 "1:\n" 279 EX_TABLE(0b, 1b) 280 : "+d" (reg0), "+d" (reg1), "+d" (reg2) 281 : 282 : "cc"); 283 284 return reg1; 285 } 286 #endif 287 288 /** 289 * ap_query_functions(): Query supported functions. 290 * @qid: The AP queue number 291 * @functions: Pointer to functions field. 292 * 293 * Returns 294 * 0 on success. 295 * -ENODEV if queue not valid. 296 * -EBUSY if device busy. 297 * -EINVAL if query function is not supported 298 */ 299 static int ap_query_functions(ap_qid_t qid, unsigned int *functions) 300 { 301 #ifdef CONFIG_64BIT 302 struct ap_queue_status status; 303 int i; 304 status = __ap_query_functions(qid, functions); 305 306 for (i = 0; i < AP_MAX_RESET; i++) { 307 if (ap_queue_status_invalid_test(&status)) 308 return -ENODEV; 309 310 switch (status.response_code) { 311 case AP_RESPONSE_NORMAL: 312 return 0; 313 case AP_RESPONSE_RESET_IN_PROGRESS: 314 case AP_RESPONSE_BUSY: 315 break; 316 case AP_RESPONSE_Q_NOT_AVAIL: 317 case AP_RESPONSE_DECONFIGURED: 318 case AP_RESPONSE_CHECKSTOPPED: 319 case AP_RESPONSE_INVALID_ADDRESS: 320 return -ENODEV; 321 case AP_RESPONSE_OTHERWISE_CHANGED: 322 break; 323 default: 324 break; 325 } 326 if (i < AP_MAX_RESET - 1) { 327 udelay(5); 328 status = __ap_query_functions(qid, functions); 329 } 330 } 331 return -EBUSY; 332 #else 333 return -EINVAL; 334 #endif 335 } 336 337 /** 338 * ap_queue_enable_interruption(): Enable interruption on an AP. 339 * @qid: The AP queue number 340 * @ind: the notification indicator byte 341 * 342 * Enables interruption on AP queue via ap_queue_interruption_control(). Based 343 * on the return value it waits a while and tests the AP queue if interrupts 344 * have been switched on using ap_test_queue(). 345 */ 346 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) 347 { 348 #ifdef CONFIG_64BIT 349 struct ap_queue_status status; 350 int t_depth, t_device_type, rc, i; 351 352 rc = -EBUSY; 353 status = ap_queue_interruption_control(qid, ind); 354 355 for (i = 0; i < AP_MAX_RESET; i++) { 356 switch (status.response_code) { 357 case AP_RESPONSE_NORMAL: 358 if (status.int_enabled) 359 return 0; 360 break; 361 case AP_RESPONSE_RESET_IN_PROGRESS: 362 case AP_RESPONSE_BUSY: 363 if (i < AP_MAX_RESET - 1) { 364 udelay(5); 365 status = ap_queue_interruption_control(qid, 366 ind); 367 continue; 368 } 369 break; 370 case AP_RESPONSE_Q_NOT_AVAIL: 371 case AP_RESPONSE_DECONFIGURED: 372 case AP_RESPONSE_CHECKSTOPPED: 373 case AP_RESPONSE_INVALID_ADDRESS: 374 return -ENODEV; 375 case AP_RESPONSE_OTHERWISE_CHANGED: 376 if (status.int_enabled) 377 return 0; 378 break; 379 default: 380 break; 381 } 382 if (i < AP_MAX_RESET - 1) { 383 udelay(5); 384 status = ap_test_queue(qid, &t_depth, &t_device_type); 385 } 386 } 387 return rc; 388 #else 389 return -EINVAL; 390 #endif 391 } 392 393 /** 394 * __ap_send(): Send message to adjunct processor queue. 395 * @qid: The AP queue number 396 * @psmid: The program supplied message identifier 397 * @msg: The message text 398 * @length: The message length 399 * @special: Special Bit 400 * 401 * Returns AP queue status structure. 402 * Condition code 1 on NQAP can't happen because the L bit is 1. 403 * Condition code 2 on NQAP also means the send is incomplete, 404 * because a segment boundary was reached. The NQAP is repeated. 405 */ 406 static inline struct ap_queue_status 407 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, 408 unsigned int special) 409 { 410 typedef struct { char _[length]; } msgblock; 411 register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 412 register struct ap_queue_status reg1 asm ("1"); 413 register unsigned long reg2 asm ("2") = (unsigned long) msg; 414 register unsigned long reg3 asm ("3") = (unsigned long) length; 415 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 416 register unsigned long reg5 asm ("5") = psmid & 0xffffffff; 417 418 if (special == 1) 419 reg0 |= 0x400000UL; 420 421 asm volatile ( 422 "0: .long 0xb2ad0042\n" /* NQAP */ 423 " brc 2,0b" 424 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 425 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) 426 : "cc" ); 427 return reg1; 428 } 429 430 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 431 { 432 struct ap_queue_status status; 433 434 status = __ap_send(qid, psmid, msg, length, 0); 435 switch (status.response_code) { 436 case AP_RESPONSE_NORMAL: 437 return 0; 438 case AP_RESPONSE_Q_FULL: 439 case AP_RESPONSE_RESET_IN_PROGRESS: 440 return -EBUSY; 441 case AP_RESPONSE_REQ_FAC_NOT_INST: 442 return -EINVAL; 443 default: /* Device is gone. */ 444 return -ENODEV; 445 } 446 } 447 EXPORT_SYMBOL(ap_send); 448 449 /** 450 * __ap_recv(): Receive message from adjunct processor queue. 451 * @qid: The AP queue number 452 * @psmid: Pointer to program supplied message identifier 453 * @msg: The message text 454 * @length: The message length 455 * 456 * Returns AP queue status structure. 457 * Condition code 1 on DQAP means the receive has taken place 458 * but only partially. The response is incomplete, hence the 459 * DQAP is repeated. 460 * Condition code 2 on DQAP also means the receive is incomplete, 461 * this time because a segment boundary was reached. Again, the 462 * DQAP is repeated. 463 * Note that gpr2 is used by the DQAP instruction to keep track of 464 * any 'residual' length, in case the instruction gets interrupted. 465 * Hence it gets zeroed before the instruction. 466 */ 467 static inline struct ap_queue_status 468 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 469 { 470 typedef struct { char _[length]; } msgblock; 471 register unsigned long reg0 asm("0") = qid | 0x80000000UL; 472 register struct ap_queue_status reg1 asm ("1"); 473 register unsigned long reg2 asm("2") = 0UL; 474 register unsigned long reg4 asm("4") = (unsigned long) msg; 475 register unsigned long reg5 asm("5") = (unsigned long) length; 476 register unsigned long reg6 asm("6") = 0UL; 477 register unsigned long reg7 asm("7") = 0UL; 478 479 480 asm volatile( 481 "0: .long 0xb2ae0064\n" /* DQAP */ 482 " brc 6,0b\n" 483 : "+d" (reg0), "=d" (reg1), "+d" (reg2), 484 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 485 "=m" (*(msgblock *) msg) : : "cc" ); 486 *psmid = (((unsigned long long) reg6) << 32) + reg7; 487 return reg1; 488 } 489 490 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 491 { 492 struct ap_queue_status status; 493 494 status = __ap_recv(qid, psmid, msg, length); 495 switch (status.response_code) { 496 case AP_RESPONSE_NORMAL: 497 return 0; 498 case AP_RESPONSE_NO_PENDING_REPLY: 499 if (status.queue_empty) 500 return -ENOENT; 501 return -EBUSY; 502 case AP_RESPONSE_RESET_IN_PROGRESS: 503 return -EBUSY; 504 default: 505 return -ENODEV; 506 } 507 } 508 EXPORT_SYMBOL(ap_recv); 509 510 /** 511 * ap_query_queue(): Check if an AP queue is available. 512 * @qid: The AP queue number 513 * @queue_depth: Pointer to queue depth value 514 * @device_type: Pointer to device type value 515 * 516 * The test is repeated for AP_MAX_RESET times. 517 */ 518 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 519 { 520 struct ap_queue_status status; 521 int t_depth, t_device_type, rc, i; 522 523 rc = -EBUSY; 524 for (i = 0; i < AP_MAX_RESET; i++) { 525 status = ap_test_queue(qid, &t_depth, &t_device_type); 526 switch (status.response_code) { 527 case AP_RESPONSE_NORMAL: 528 *queue_depth = t_depth + 1; 529 *device_type = t_device_type; 530 rc = 0; 531 break; 532 case AP_RESPONSE_Q_NOT_AVAIL: 533 rc = -ENODEV; 534 break; 535 case AP_RESPONSE_RESET_IN_PROGRESS: 536 break; 537 case AP_RESPONSE_DECONFIGURED: 538 rc = -ENODEV; 539 break; 540 case AP_RESPONSE_CHECKSTOPPED: 541 rc = -ENODEV; 542 break; 543 case AP_RESPONSE_INVALID_ADDRESS: 544 rc = -ENODEV; 545 break; 546 case AP_RESPONSE_OTHERWISE_CHANGED: 547 break; 548 case AP_RESPONSE_BUSY: 549 break; 550 default: 551 BUG(); 552 } 553 if (rc != -EBUSY) 554 break; 555 if (i < AP_MAX_RESET - 1) 556 udelay(5); 557 } 558 return rc; 559 } 560 561 /** 562 * ap_init_queue(): Reset an AP queue. 563 * @qid: The AP queue number 564 * 565 * Reset an AP queue and wait for it to become available again. 566 */ 567 static int ap_init_queue(ap_qid_t qid) 568 { 569 struct ap_queue_status status; 570 int rc, dummy, i; 571 572 rc = -ENODEV; 573 status = ap_reset_queue(qid); 574 for (i = 0; i < AP_MAX_RESET; i++) { 575 switch (status.response_code) { 576 case AP_RESPONSE_NORMAL: 577 if (status.queue_empty) 578 rc = 0; 579 break; 580 case AP_RESPONSE_Q_NOT_AVAIL: 581 case AP_RESPONSE_DECONFIGURED: 582 case AP_RESPONSE_CHECKSTOPPED: 583 i = AP_MAX_RESET; /* return with -ENODEV */ 584 break; 585 case AP_RESPONSE_RESET_IN_PROGRESS: 586 rc = -EBUSY; 587 case AP_RESPONSE_BUSY: 588 default: 589 break; 590 } 591 if (rc != -ENODEV && rc != -EBUSY) 592 break; 593 if (i < AP_MAX_RESET - 1) { 594 udelay(5); 595 status = ap_test_queue(qid, &dummy, &dummy); 596 } 597 } 598 if (rc == 0 && ap_using_interrupts()) { 599 rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr); 600 /* If interruption mode is supported by the machine, 601 * but an AP can not be enabled for interruption then 602 * the AP will be discarded. */ 603 if (rc) 604 pr_err("Registering adapter interrupts for " 605 "AP %d failed\n", AP_QID_DEVICE(qid)); 606 } 607 return rc; 608 } 609 610 /** 611 * ap_increase_queue_count(): Arm request timeout. 612 * @ap_dev: Pointer to an AP device. 613 * 614 * Arm request timeout if an AP device was idle and a new request is submitted. 615 */ 616 static void ap_increase_queue_count(struct ap_device *ap_dev) 617 { 618 int timeout = ap_dev->drv->request_timeout; 619 620 ap_dev->queue_count++; 621 if (ap_dev->queue_count == 1) { 622 mod_timer(&ap_dev->timeout, jiffies + timeout); 623 ap_dev->reset = AP_RESET_ARMED; 624 } 625 } 626 627 /** 628 * ap_decrease_queue_count(): Decrease queue count. 629 * @ap_dev: Pointer to an AP device. 630 * 631 * If AP device is still alive, re-schedule request timeout if there are still 632 * pending requests. 633 */ 634 static void ap_decrease_queue_count(struct ap_device *ap_dev) 635 { 636 int timeout = ap_dev->drv->request_timeout; 637 638 ap_dev->queue_count--; 639 if (ap_dev->queue_count > 0) 640 mod_timer(&ap_dev->timeout, jiffies + timeout); 641 else 642 /* 643 * The timeout timer should to be disabled now - since 644 * del_timer_sync() is very expensive, we just tell via the 645 * reset flag to ignore the pending timeout timer. 646 */ 647 ap_dev->reset = AP_RESET_IGNORE; 648 } 649 650 /* 651 * AP device related attributes. 652 */ 653 static ssize_t ap_hwtype_show(struct device *dev, 654 struct device_attribute *attr, char *buf) 655 { 656 struct ap_device *ap_dev = to_ap_dev(dev); 657 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); 658 } 659 660 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 661 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 662 char *buf) 663 { 664 struct ap_device *ap_dev = to_ap_dev(dev); 665 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); 666 } 667 668 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 669 static ssize_t ap_request_count_show(struct device *dev, 670 struct device_attribute *attr, 671 char *buf) 672 { 673 struct ap_device *ap_dev = to_ap_dev(dev); 674 int rc; 675 676 spin_lock_bh(&ap_dev->lock); 677 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); 678 spin_unlock_bh(&ap_dev->lock); 679 return rc; 680 } 681 682 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 683 684 static ssize_t ap_requestq_count_show(struct device *dev, 685 struct device_attribute *attr, char *buf) 686 { 687 struct ap_device *ap_dev = to_ap_dev(dev); 688 int rc; 689 690 spin_lock_bh(&ap_dev->lock); 691 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count); 692 spin_unlock_bh(&ap_dev->lock); 693 return rc; 694 } 695 696 static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 697 698 static ssize_t ap_pendingq_count_show(struct device *dev, 699 struct device_attribute *attr, char *buf) 700 { 701 struct ap_device *ap_dev = to_ap_dev(dev); 702 int rc; 703 704 spin_lock_bh(&ap_dev->lock); 705 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count); 706 spin_unlock_bh(&ap_dev->lock); 707 return rc; 708 } 709 710 static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 711 712 static ssize_t ap_modalias_show(struct device *dev, 713 struct device_attribute *attr, char *buf) 714 { 715 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type); 716 } 717 718 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 719 720 static ssize_t ap_functions_show(struct device *dev, 721 struct device_attribute *attr, char *buf) 722 { 723 struct ap_device *ap_dev = to_ap_dev(dev); 724 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions); 725 } 726 727 static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); 728 729 static struct attribute *ap_dev_attrs[] = { 730 &dev_attr_hwtype.attr, 731 &dev_attr_depth.attr, 732 &dev_attr_request_count.attr, 733 &dev_attr_requestq_count.attr, 734 &dev_attr_pendingq_count.attr, 735 &dev_attr_modalias.attr, 736 &dev_attr_ap_functions.attr, 737 NULL 738 }; 739 static struct attribute_group ap_dev_attr_group = { 740 .attrs = ap_dev_attrs 741 }; 742 743 /** 744 * ap_bus_match() 745 * @dev: Pointer to device 746 * @drv: Pointer to device_driver 747 * 748 * AP bus driver registration/unregistration. 749 */ 750 static int ap_bus_match(struct device *dev, struct device_driver *drv) 751 { 752 struct ap_device *ap_dev = to_ap_dev(dev); 753 struct ap_driver *ap_drv = to_ap_drv(drv); 754 struct ap_device_id *id; 755 756 /* 757 * Compare device type of the device with the list of 758 * supported types of the device_driver. 759 */ 760 for (id = ap_drv->ids; id->match_flags; id++) { 761 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && 762 (id->dev_type != ap_dev->device_type)) 763 continue; 764 return 1; 765 } 766 return 0; 767 } 768 769 /** 770 * ap_uevent(): Uevent function for AP devices. 771 * @dev: Pointer to device 772 * @env: Pointer to kobj_uevent_env 773 * 774 * It sets up a single environment variable DEV_TYPE which contains the 775 * hardware device type. 776 */ 777 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) 778 { 779 struct ap_device *ap_dev = to_ap_dev(dev); 780 int retval = 0; 781 782 if (!ap_dev) 783 return -ENODEV; 784 785 /* Set up DEV_TYPE environment variable. */ 786 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); 787 if (retval) 788 return retval; 789 790 /* Add MODALIAS= */ 791 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); 792 793 return retval; 794 } 795 796 static int ap_bus_suspend(struct device *dev, pm_message_t state) 797 { 798 struct ap_device *ap_dev = to_ap_dev(dev); 799 unsigned long flags; 800 801 if (!ap_suspend_flag) { 802 ap_suspend_flag = 1; 803 804 /* Disable scanning for devices, thus we do not want to scan 805 * for them after removing. 806 */ 807 del_timer_sync(&ap_config_timer); 808 if (ap_work_queue != NULL) { 809 destroy_workqueue(ap_work_queue); 810 ap_work_queue = NULL; 811 } 812 813 tasklet_disable(&ap_tasklet); 814 } 815 /* Poll on the device until all requests are finished. */ 816 do { 817 flags = 0; 818 spin_lock_bh(&ap_dev->lock); 819 __ap_poll_device(ap_dev, &flags); 820 spin_unlock_bh(&ap_dev->lock); 821 } while ((flags & 1) || (flags & 2)); 822 823 spin_lock_bh(&ap_dev->lock); 824 ap_dev->unregistered = 1; 825 spin_unlock_bh(&ap_dev->lock); 826 827 return 0; 828 } 829 830 static int ap_bus_resume(struct device *dev) 831 { 832 struct ap_device *ap_dev = to_ap_dev(dev); 833 int rc; 834 835 if (ap_suspend_flag) { 836 ap_suspend_flag = 0; 837 if (ap_interrupts_available()) { 838 if (!ap_using_interrupts()) { 839 rc = register_adapter_interrupt(&ap_airq); 840 ap_airq_flag = (rc == 0); 841 } 842 } else { 843 if (ap_using_interrupts()) { 844 unregister_adapter_interrupt(&ap_airq); 845 ap_airq_flag = 0; 846 } 847 } 848 ap_query_configuration(); 849 if (!user_set_domain) { 850 ap_domain_index = -1; 851 ap_select_domain(); 852 } 853 init_timer(&ap_config_timer); 854 ap_config_timer.function = ap_config_timeout; 855 ap_config_timer.data = 0; 856 ap_config_timer.expires = jiffies + ap_config_time * HZ; 857 add_timer(&ap_config_timer); 858 ap_work_queue = create_singlethread_workqueue("kapwork"); 859 if (!ap_work_queue) 860 return -ENOMEM; 861 tasklet_enable(&ap_tasklet); 862 if (!ap_using_interrupts()) 863 ap_schedule_poll_timer(); 864 else 865 tasklet_schedule(&ap_tasklet); 866 if (ap_thread_flag) 867 rc = ap_poll_thread_start(); 868 else 869 rc = 0; 870 } else 871 rc = 0; 872 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { 873 spin_lock_bh(&ap_dev->lock); 874 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), 875 ap_domain_index); 876 spin_unlock_bh(&ap_dev->lock); 877 } 878 queue_work(ap_work_queue, &ap_config_work); 879 880 return rc; 881 } 882 883 static struct bus_type ap_bus_type = { 884 .name = "ap", 885 .match = &ap_bus_match, 886 .uevent = &ap_uevent, 887 .suspend = ap_bus_suspend, 888 .resume = ap_bus_resume 889 }; 890 891 static int ap_device_probe(struct device *dev) 892 { 893 struct ap_device *ap_dev = to_ap_dev(dev); 894 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 895 int rc; 896 897 ap_dev->drv = ap_drv; 898 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 899 if (!rc) { 900 spin_lock_bh(&ap_device_list_lock); 901 list_add(&ap_dev->list, &ap_device_list); 902 spin_unlock_bh(&ap_device_list_lock); 903 } 904 return rc; 905 } 906 907 /** 908 * __ap_flush_queue(): Flush requests. 909 * @ap_dev: Pointer to the AP device 910 * 911 * Flush all requests from the request/pending queue of an AP device. 912 */ 913 static void __ap_flush_queue(struct ap_device *ap_dev) 914 { 915 struct ap_message *ap_msg, *next; 916 917 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 918 list_del_init(&ap_msg->list); 919 ap_dev->pendingq_count--; 920 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 921 } 922 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 923 list_del_init(&ap_msg->list); 924 ap_dev->requestq_count--; 925 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 926 } 927 } 928 929 void ap_flush_queue(struct ap_device *ap_dev) 930 { 931 spin_lock_bh(&ap_dev->lock); 932 __ap_flush_queue(ap_dev); 933 spin_unlock_bh(&ap_dev->lock); 934 } 935 EXPORT_SYMBOL(ap_flush_queue); 936 937 static int ap_device_remove(struct device *dev) 938 { 939 struct ap_device *ap_dev = to_ap_dev(dev); 940 struct ap_driver *ap_drv = ap_dev->drv; 941 942 ap_flush_queue(ap_dev); 943 del_timer_sync(&ap_dev->timeout); 944 spin_lock_bh(&ap_device_list_lock); 945 list_del_init(&ap_dev->list); 946 spin_unlock_bh(&ap_device_list_lock); 947 if (ap_drv->remove) 948 ap_drv->remove(ap_dev); 949 spin_lock_bh(&ap_dev->lock); 950 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 951 spin_unlock_bh(&ap_dev->lock); 952 return 0; 953 } 954 955 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, 956 char *name) 957 { 958 struct device_driver *drv = &ap_drv->driver; 959 960 drv->bus = &ap_bus_type; 961 drv->probe = ap_device_probe; 962 drv->remove = ap_device_remove; 963 drv->owner = owner; 964 drv->name = name; 965 return driver_register(drv); 966 } 967 EXPORT_SYMBOL(ap_driver_register); 968 969 void ap_driver_unregister(struct ap_driver *ap_drv) 970 { 971 driver_unregister(&ap_drv->driver); 972 } 973 EXPORT_SYMBOL(ap_driver_unregister); 974 975 void ap_bus_force_rescan(void) 976 { 977 /* reconfigure the AP bus rescan timer. */ 978 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 979 /* processing a asynchronous bus rescan */ 980 queue_work(ap_work_queue, &ap_config_work); 981 flush_work(&ap_config_work); 982 } 983 EXPORT_SYMBOL(ap_bus_force_rescan); 984 985 /* 986 * AP bus attributes. 987 */ 988 static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 989 { 990 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 991 } 992 993 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); 994 995 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 996 { 997 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 998 } 999 1000 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) 1001 { 1002 return snprintf(buf, PAGE_SIZE, "%d\n", 1003 ap_using_interrupts() ? 1 : 0); 1004 } 1005 1006 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); 1007 1008 static ssize_t ap_config_time_store(struct bus_type *bus, 1009 const char *buf, size_t count) 1010 { 1011 int time; 1012 1013 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) 1014 return -EINVAL; 1015 ap_config_time = time; 1016 if (!timer_pending(&ap_config_timer) || 1017 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) { 1018 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1019 add_timer(&ap_config_timer); 1020 } 1021 return count; 1022 } 1023 1024 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store); 1025 1026 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf) 1027 { 1028 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); 1029 } 1030 1031 static ssize_t ap_poll_thread_store(struct bus_type *bus, 1032 const char *buf, size_t count) 1033 { 1034 int flag, rc; 1035 1036 if (sscanf(buf, "%d\n", &flag) != 1) 1037 return -EINVAL; 1038 if (flag) { 1039 rc = ap_poll_thread_start(); 1040 if (rc) 1041 return rc; 1042 } 1043 else 1044 ap_poll_thread_stop(); 1045 return count; 1046 } 1047 1048 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); 1049 1050 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) 1051 { 1052 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); 1053 } 1054 1055 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, 1056 size_t count) 1057 { 1058 unsigned long long time; 1059 ktime_t hr_time; 1060 1061 /* 120 seconds = maximum poll interval */ 1062 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || 1063 time > 120000000000ULL) 1064 return -EINVAL; 1065 poll_timeout = time; 1066 hr_time = ktime_set(0, poll_timeout); 1067 1068 if (!hrtimer_is_queued(&ap_poll_timer) || 1069 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { 1070 hrtimer_set_expires(&ap_poll_timer, hr_time); 1071 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); 1072 } 1073 return count; 1074 } 1075 1076 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); 1077 1078 static struct bus_attribute *const ap_bus_attrs[] = { 1079 &bus_attr_ap_domain, 1080 &bus_attr_config_time, 1081 &bus_attr_poll_thread, 1082 &bus_attr_ap_interrupts, 1083 &bus_attr_poll_timeout, 1084 NULL, 1085 }; 1086 1087 static inline int ap_test_config(unsigned int *field, unsigned int nr) 1088 { 1089 if (nr > 0xFFu) 1090 return 0; 1091 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f)); 1092 } 1093 1094 /* 1095 * ap_test_config_card_id(): Test, whether an AP card ID is configured. 1096 * @id AP card ID 1097 * 1098 * Returns 0 if the card is not configured 1099 * 1 if the card is configured or 1100 * if the configuration information is not available 1101 */ 1102 static inline int ap_test_config_card_id(unsigned int id) 1103 { 1104 if (!ap_configuration) 1105 return 1; 1106 return ap_test_config(ap_configuration->apm, id); 1107 } 1108 1109 /* 1110 * ap_test_config_domain(): Test, whether an AP usage domain is configured. 1111 * @domain AP usage domain ID 1112 * 1113 * Returns 0 if the usage domain is not configured 1114 * 1 if the usage domain is configured or 1115 * if the configuration information is not available 1116 */ 1117 static inline int ap_test_config_domain(unsigned int domain) 1118 { 1119 if (!ap_configuration) 1120 return 1; 1121 return ap_test_config(ap_configuration->aqm, domain); 1122 } 1123 1124 /** 1125 * ap_query_configuration(): Query AP configuration information. 1126 * 1127 * Query information of installed cards and configured domains from AP. 1128 */ 1129 static void ap_query_configuration(void) 1130 { 1131 #ifdef CONFIG_64BIT 1132 if (ap_configuration_available()) { 1133 if (!ap_configuration) 1134 ap_configuration = 1135 kzalloc(sizeof(struct ap_config_info), 1136 GFP_KERNEL); 1137 if (ap_configuration) 1138 __ap_query_configuration(ap_configuration); 1139 } else 1140 ap_configuration = NULL; 1141 #else 1142 ap_configuration = NULL; 1143 #endif 1144 } 1145 1146 /** 1147 * ap_select_domain(): Select an AP domain. 1148 * 1149 * Pick one of the 16 AP domains. 1150 */ 1151 static int ap_select_domain(void) 1152 { 1153 int queue_depth, device_type, count, max_count, best_domain; 1154 ap_qid_t qid; 1155 int rc, i, j; 1156 1157 /* 1158 * We want to use a single domain. Either the one specified with 1159 * the "domain=" parameter or the domain with the maximum number 1160 * of devices. 1161 */ 1162 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) 1163 /* Domain has already been selected. */ 1164 return 0; 1165 best_domain = -1; 1166 max_count = 0; 1167 for (i = 0; i < AP_DOMAINS; i++) { 1168 if (!ap_test_config_domain(i)) 1169 continue; 1170 count = 0; 1171 for (j = 0; j < AP_DEVICES; j++) { 1172 if (!ap_test_config_card_id(j)) 1173 continue; 1174 qid = AP_MKQID(j, i); 1175 rc = ap_query_queue(qid, &queue_depth, &device_type); 1176 if (rc) 1177 continue; 1178 count++; 1179 } 1180 if (count > max_count) { 1181 max_count = count; 1182 best_domain = i; 1183 } 1184 } 1185 if (best_domain >= 0){ 1186 ap_domain_index = best_domain; 1187 return 0; 1188 } 1189 return -ENODEV; 1190 } 1191 1192 /** 1193 * ap_probe_device_type(): Find the device type of an AP. 1194 * @ap_dev: pointer to the AP device. 1195 * 1196 * Find the device type if query queue returned a device type of 0. 1197 */ 1198 static int ap_probe_device_type(struct ap_device *ap_dev) 1199 { 1200 static unsigned char msg[] = { 1201 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 1202 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1203 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 1204 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1205 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50, 1206 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01, 1207 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00, 1208 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00, 1209 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1210 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00, 1211 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1212 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00, 1213 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00, 1214 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1215 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00, 1216 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1217 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1218 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1219 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1220 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1221 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1222 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00, 1223 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1224 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00, 1225 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20, 1226 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53, 1227 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22, 1228 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1229 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88, 1230 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, 1231 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, 1232 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22, 1233 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1234 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77, 1235 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00, 1236 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00, 1237 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01, 1238 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c, 1239 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68, 1240 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66, 1241 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0, 1242 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8, 1243 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04, 1244 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57, 1245 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d, 1246 }; 1247 struct ap_queue_status status; 1248 unsigned long long psmid; 1249 char *reply; 1250 int rc, i; 1251 1252 reply = (void *) get_zeroed_page(GFP_KERNEL); 1253 if (!reply) { 1254 rc = -ENOMEM; 1255 goto out; 1256 } 1257 1258 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 1259 msg, sizeof(msg), 0); 1260 if (status.response_code != AP_RESPONSE_NORMAL) { 1261 rc = -ENODEV; 1262 goto out_free; 1263 } 1264 1265 /* Wait for the test message to complete. */ 1266 for (i = 0; i < 6; i++) { 1267 mdelay(300); 1268 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096); 1269 if (status.response_code == AP_RESPONSE_NORMAL && 1270 psmid == 0x0102030405060708ULL) 1271 break; 1272 } 1273 if (i < 6) { 1274 /* Got an answer. */ 1275 if (reply[0] == 0x00 && reply[1] == 0x86) 1276 ap_dev->device_type = AP_DEVICE_TYPE_PCICC; 1277 else 1278 ap_dev->device_type = AP_DEVICE_TYPE_PCICA; 1279 rc = 0; 1280 } else 1281 rc = -ENODEV; 1282 1283 out_free: 1284 free_page((unsigned long) reply); 1285 out: 1286 return rc; 1287 } 1288 1289 static void ap_interrupt_handler(struct airq_struct *airq) 1290 { 1291 inc_irq_stat(IRQIO_APB); 1292 tasklet_schedule(&ap_tasklet); 1293 } 1294 1295 /** 1296 * __ap_scan_bus(): Scan the AP bus. 1297 * @dev: Pointer to device 1298 * @data: Pointer to data 1299 * 1300 * Scan the AP bus for new devices. 1301 */ 1302 static int __ap_scan_bus(struct device *dev, void *data) 1303 { 1304 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 1305 } 1306 1307 static void ap_device_release(struct device *dev) 1308 { 1309 struct ap_device *ap_dev = to_ap_dev(dev); 1310 1311 kfree(ap_dev); 1312 } 1313 1314 static void ap_scan_bus(struct work_struct *unused) 1315 { 1316 struct ap_device *ap_dev; 1317 struct device *dev; 1318 ap_qid_t qid; 1319 int queue_depth, device_type; 1320 unsigned int device_functions; 1321 int rc, i; 1322 1323 ap_query_configuration(); 1324 if (ap_select_domain() != 0) { 1325 return; 1326 } 1327 for (i = 0; i < AP_DEVICES; i++) { 1328 qid = AP_MKQID(i, ap_domain_index); 1329 dev = bus_find_device(&ap_bus_type, NULL, 1330 (void *)(unsigned long)qid, 1331 __ap_scan_bus); 1332 if (ap_test_config_card_id(i)) 1333 rc = ap_query_queue(qid, &queue_depth, &device_type); 1334 else 1335 rc = -ENODEV; 1336 if (dev) { 1337 if (rc == -EBUSY) { 1338 set_current_state(TASK_UNINTERRUPTIBLE); 1339 schedule_timeout(AP_RESET_TIMEOUT); 1340 rc = ap_query_queue(qid, &queue_depth, 1341 &device_type); 1342 } 1343 ap_dev = to_ap_dev(dev); 1344 spin_lock_bh(&ap_dev->lock); 1345 if (rc || ap_dev->unregistered) { 1346 spin_unlock_bh(&ap_dev->lock); 1347 if (ap_dev->unregistered) 1348 i--; 1349 device_unregister(dev); 1350 put_device(dev); 1351 continue; 1352 } 1353 spin_unlock_bh(&ap_dev->lock); 1354 put_device(dev); 1355 continue; 1356 } 1357 if (rc) 1358 continue; 1359 rc = ap_init_queue(qid); 1360 if (rc) 1361 continue; 1362 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); 1363 if (!ap_dev) 1364 break; 1365 ap_dev->qid = qid; 1366 ap_dev->queue_depth = queue_depth; 1367 ap_dev->unregistered = 1; 1368 spin_lock_init(&ap_dev->lock); 1369 INIT_LIST_HEAD(&ap_dev->pendingq); 1370 INIT_LIST_HEAD(&ap_dev->requestq); 1371 INIT_LIST_HEAD(&ap_dev->list); 1372 setup_timer(&ap_dev->timeout, ap_request_timeout, 1373 (unsigned long) ap_dev); 1374 switch (device_type) { 1375 case 0: 1376 /* device type probing for old cards */ 1377 if (ap_probe_device_type(ap_dev)) { 1378 kfree(ap_dev); 1379 continue; 1380 } 1381 break; 1382 default: 1383 ap_dev->device_type = device_type; 1384 } 1385 1386 rc = ap_query_functions(qid, &device_functions); 1387 if (!rc) 1388 ap_dev->functions = device_functions; 1389 else 1390 ap_dev->functions = 0u; 1391 1392 ap_dev->device.bus = &ap_bus_type; 1393 ap_dev->device.parent = ap_root_device; 1394 if (dev_set_name(&ap_dev->device, "card%02x", 1395 AP_QID_DEVICE(ap_dev->qid))) { 1396 kfree(ap_dev); 1397 continue; 1398 } 1399 ap_dev->device.release = ap_device_release; 1400 rc = device_register(&ap_dev->device); 1401 if (rc) { 1402 put_device(&ap_dev->device); 1403 continue; 1404 } 1405 /* Add device attributes. */ 1406 rc = sysfs_create_group(&ap_dev->device.kobj, 1407 &ap_dev_attr_group); 1408 if (!rc) { 1409 spin_lock_bh(&ap_dev->lock); 1410 ap_dev->unregistered = 0; 1411 spin_unlock_bh(&ap_dev->lock); 1412 } 1413 else 1414 device_unregister(&ap_dev->device); 1415 } 1416 } 1417 1418 static void 1419 ap_config_timeout(unsigned long ptr) 1420 { 1421 queue_work(ap_work_queue, &ap_config_work); 1422 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1423 add_timer(&ap_config_timer); 1424 } 1425 1426 /** 1427 * __ap_schedule_poll_timer(): Schedule poll timer. 1428 * 1429 * Set up the timer to run the poll tasklet 1430 */ 1431 static inline void __ap_schedule_poll_timer(void) 1432 { 1433 ktime_t hr_time; 1434 1435 spin_lock_bh(&ap_poll_timer_lock); 1436 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) 1437 goto out; 1438 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1439 hr_time = ktime_set(0, poll_timeout); 1440 hrtimer_forward_now(&ap_poll_timer, hr_time); 1441 hrtimer_restart(&ap_poll_timer); 1442 } 1443 out: 1444 spin_unlock_bh(&ap_poll_timer_lock); 1445 } 1446 1447 /** 1448 * ap_schedule_poll_timer(): Schedule poll timer. 1449 * 1450 * Set up the timer to run the poll tasklet 1451 */ 1452 static inline void ap_schedule_poll_timer(void) 1453 { 1454 if (ap_using_interrupts()) 1455 return; 1456 __ap_schedule_poll_timer(); 1457 } 1458 1459 /** 1460 * ap_poll_read(): Receive pending reply messages from an AP device. 1461 * @ap_dev: pointer to the AP device 1462 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1463 * required, bit 2^1 is set if the poll timer needs to get armed 1464 * 1465 * Returns 0 if the device is still present, -ENODEV if not. 1466 */ 1467 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 1468 { 1469 struct ap_queue_status status; 1470 struct ap_message *ap_msg; 1471 1472 if (ap_dev->queue_count <= 0) 1473 return 0; 1474 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, 1475 ap_dev->reply->message, ap_dev->reply->length); 1476 switch (status.response_code) { 1477 case AP_RESPONSE_NORMAL: 1478 atomic_dec(&ap_poll_requests); 1479 ap_decrease_queue_count(ap_dev); 1480 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 1481 if (ap_msg->psmid != ap_dev->reply->psmid) 1482 continue; 1483 list_del_init(&ap_msg->list); 1484 ap_dev->pendingq_count--; 1485 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); 1486 break; 1487 } 1488 if (ap_dev->queue_count > 0) 1489 *flags |= 1; 1490 break; 1491 case AP_RESPONSE_NO_PENDING_REPLY: 1492 if (status.queue_empty) { 1493 /* The card shouldn't forget requests but who knows. */ 1494 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1495 ap_dev->queue_count = 0; 1496 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1497 ap_dev->requestq_count += ap_dev->pendingq_count; 1498 ap_dev->pendingq_count = 0; 1499 } else 1500 *flags |= 2; 1501 break; 1502 default: 1503 return -ENODEV; 1504 } 1505 return 0; 1506 } 1507 1508 /** 1509 * ap_poll_write(): Send messages from the request queue to an AP device. 1510 * @ap_dev: pointer to the AP device 1511 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1512 * required, bit 2^1 is set if the poll timer needs to get armed 1513 * 1514 * Returns 0 if the device is still present, -ENODEV if not. 1515 */ 1516 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 1517 { 1518 struct ap_queue_status status; 1519 struct ap_message *ap_msg; 1520 1521 if (ap_dev->requestq_count <= 0 || 1522 ap_dev->queue_count >= ap_dev->queue_depth) 1523 return 0; 1524 /* Start the next request on the queue. */ 1525 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1526 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1527 ap_msg->message, ap_msg->length, ap_msg->special); 1528 switch (status.response_code) { 1529 case AP_RESPONSE_NORMAL: 1530 atomic_inc(&ap_poll_requests); 1531 ap_increase_queue_count(ap_dev); 1532 list_move_tail(&ap_msg->list, &ap_dev->pendingq); 1533 ap_dev->requestq_count--; 1534 ap_dev->pendingq_count++; 1535 if (ap_dev->queue_count < ap_dev->queue_depth && 1536 ap_dev->requestq_count > 0) 1537 *flags |= 1; 1538 *flags |= 2; 1539 break; 1540 case AP_RESPONSE_RESET_IN_PROGRESS: 1541 __ap_schedule_poll_timer(); 1542 case AP_RESPONSE_Q_FULL: 1543 *flags |= 2; 1544 break; 1545 case AP_RESPONSE_MESSAGE_TOO_BIG: 1546 case AP_RESPONSE_REQ_FAC_NOT_INST: 1547 return -EINVAL; 1548 default: 1549 return -ENODEV; 1550 } 1551 return 0; 1552 } 1553 1554 /** 1555 * ap_poll_queue(): Poll AP device for pending replies and send new messages. 1556 * @ap_dev: pointer to the bus device 1557 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1558 * required, bit 2^1 is set if the poll timer needs to get armed 1559 * 1560 * Poll AP device for pending replies and send new messages. If either 1561 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device. 1562 * Returns 0. 1563 */ 1564 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) 1565 { 1566 int rc; 1567 1568 rc = ap_poll_read(ap_dev, flags); 1569 if (rc) 1570 return rc; 1571 return ap_poll_write(ap_dev, flags); 1572 } 1573 1574 /** 1575 * __ap_queue_message(): Queue a message to a device. 1576 * @ap_dev: pointer to the AP device 1577 * @ap_msg: the message to be queued 1578 * 1579 * Queue a message to a device. Returns 0 if successful. 1580 */ 1581 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1582 { 1583 struct ap_queue_status status; 1584 1585 if (list_empty(&ap_dev->requestq) && 1586 ap_dev->queue_count < ap_dev->queue_depth) { 1587 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1588 ap_msg->message, ap_msg->length, 1589 ap_msg->special); 1590 switch (status.response_code) { 1591 case AP_RESPONSE_NORMAL: 1592 list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1593 atomic_inc(&ap_poll_requests); 1594 ap_dev->pendingq_count++; 1595 ap_increase_queue_count(ap_dev); 1596 ap_dev->total_request_count++; 1597 break; 1598 case AP_RESPONSE_Q_FULL: 1599 case AP_RESPONSE_RESET_IN_PROGRESS: 1600 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1601 ap_dev->requestq_count++; 1602 ap_dev->total_request_count++; 1603 return -EBUSY; 1604 case AP_RESPONSE_REQ_FAC_NOT_INST: 1605 case AP_RESPONSE_MESSAGE_TOO_BIG: 1606 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1607 return -EINVAL; 1608 default: /* Device is gone. */ 1609 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1610 return -ENODEV; 1611 } 1612 } else { 1613 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1614 ap_dev->requestq_count++; 1615 ap_dev->total_request_count++; 1616 return -EBUSY; 1617 } 1618 ap_schedule_poll_timer(); 1619 return 0; 1620 } 1621 1622 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1623 { 1624 unsigned long flags; 1625 int rc; 1626 1627 /* For asynchronous message handling a valid receive-callback 1628 * is required. */ 1629 BUG_ON(!ap_msg->receive); 1630 1631 spin_lock_bh(&ap_dev->lock); 1632 if (!ap_dev->unregistered) { 1633 /* Make room on the queue by polling for finished requests. */ 1634 rc = ap_poll_queue(ap_dev, &flags); 1635 if (!rc) 1636 rc = __ap_queue_message(ap_dev, ap_msg); 1637 if (!rc) 1638 wake_up(&ap_poll_wait); 1639 if (rc == -ENODEV) 1640 ap_dev->unregistered = 1; 1641 } else { 1642 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1643 rc = -ENODEV; 1644 } 1645 spin_unlock_bh(&ap_dev->lock); 1646 if (rc == -ENODEV) 1647 device_unregister(&ap_dev->device); 1648 } 1649 EXPORT_SYMBOL(ap_queue_message); 1650 1651 /** 1652 * ap_cancel_message(): Cancel a crypto request. 1653 * @ap_dev: The AP device that has the message queued 1654 * @ap_msg: The message that is to be removed 1655 * 1656 * Cancel a crypto request. This is done by removing the request 1657 * from the device pending or request queue. Note that the 1658 * request stays on the AP queue. When it finishes the message 1659 * reply will be discarded because the psmid can't be found. 1660 */ 1661 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1662 { 1663 struct ap_message *tmp; 1664 1665 spin_lock_bh(&ap_dev->lock); 1666 if (!list_empty(&ap_msg->list)) { 1667 list_for_each_entry(tmp, &ap_dev->pendingq, list) 1668 if (tmp->psmid == ap_msg->psmid) { 1669 ap_dev->pendingq_count--; 1670 goto found; 1671 } 1672 ap_dev->requestq_count--; 1673 found: 1674 list_del_init(&ap_msg->list); 1675 } 1676 spin_unlock_bh(&ap_dev->lock); 1677 } 1678 EXPORT_SYMBOL(ap_cancel_message); 1679 1680 /** 1681 * ap_poll_timeout(): AP receive polling for finished AP requests. 1682 * @unused: Unused pointer. 1683 * 1684 * Schedules the AP tasklet using a high resolution timer. 1685 */ 1686 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) 1687 { 1688 tasklet_schedule(&ap_tasklet); 1689 return HRTIMER_NORESTART; 1690 } 1691 1692 /** 1693 * ap_reset(): Reset a not responding AP device. 1694 * @ap_dev: Pointer to the AP device 1695 * 1696 * Reset a not responding AP device and move all requests from the 1697 * pending queue to the request queue. 1698 */ 1699 static void ap_reset(struct ap_device *ap_dev) 1700 { 1701 int rc; 1702 1703 ap_dev->reset = AP_RESET_IGNORE; 1704 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1705 ap_dev->queue_count = 0; 1706 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1707 ap_dev->requestq_count += ap_dev->pendingq_count; 1708 ap_dev->pendingq_count = 0; 1709 rc = ap_init_queue(ap_dev->qid); 1710 if (rc == -ENODEV) 1711 ap_dev->unregistered = 1; 1712 else 1713 __ap_schedule_poll_timer(); 1714 } 1715 1716 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1717 { 1718 if (!ap_dev->unregistered) { 1719 if (ap_poll_queue(ap_dev, flags)) 1720 ap_dev->unregistered = 1; 1721 if (ap_dev->reset == AP_RESET_DO) 1722 ap_reset(ap_dev); 1723 } 1724 return 0; 1725 } 1726 1727 /** 1728 * ap_poll_all(): Poll all AP devices. 1729 * @dummy: Unused variable 1730 * 1731 * Poll all AP devices on the bus in a round robin fashion. Continue 1732 * polling until bit 2^0 of the control flags is not set. If bit 2^1 1733 * of the control flags has been set arm the poll timer. 1734 */ 1735 static void ap_poll_all(unsigned long dummy) 1736 { 1737 unsigned long flags; 1738 struct ap_device *ap_dev; 1739 1740 /* Reset the indicator if interrupts are used. Thus new interrupts can 1741 * be received. Doing it in the beginning of the tasklet is therefor 1742 * important that no requests on any AP get lost. 1743 */ 1744 if (ap_using_interrupts()) 1745 xchg(ap_airq.lsi_ptr, 0); 1746 do { 1747 flags = 0; 1748 spin_lock(&ap_device_list_lock); 1749 list_for_each_entry(ap_dev, &ap_device_list, list) { 1750 spin_lock(&ap_dev->lock); 1751 __ap_poll_device(ap_dev, &flags); 1752 spin_unlock(&ap_dev->lock); 1753 } 1754 spin_unlock(&ap_device_list_lock); 1755 } while (flags & 1); 1756 if (flags & 2) 1757 ap_schedule_poll_timer(); 1758 } 1759 1760 /** 1761 * ap_poll_thread(): Thread that polls for finished requests. 1762 * @data: Unused pointer 1763 * 1764 * AP bus poll thread. The purpose of this thread is to poll for 1765 * finished requests in a loop if there is a "free" cpu - that is 1766 * a cpu that doesn't have anything better to do. The polling stops 1767 * as soon as there is another task or if all messages have been 1768 * delivered. 1769 */ 1770 static int ap_poll_thread(void *data) 1771 { 1772 DECLARE_WAITQUEUE(wait, current); 1773 unsigned long flags; 1774 int requests; 1775 struct ap_device *ap_dev; 1776 1777 set_user_nice(current, 19); 1778 while (1) { 1779 if (ap_suspend_flag) 1780 return 0; 1781 if (need_resched()) { 1782 schedule(); 1783 continue; 1784 } 1785 add_wait_queue(&ap_poll_wait, &wait); 1786 set_current_state(TASK_INTERRUPTIBLE); 1787 if (kthread_should_stop()) 1788 break; 1789 requests = atomic_read(&ap_poll_requests); 1790 if (requests <= 0) 1791 schedule(); 1792 set_current_state(TASK_RUNNING); 1793 remove_wait_queue(&ap_poll_wait, &wait); 1794 1795 flags = 0; 1796 spin_lock_bh(&ap_device_list_lock); 1797 list_for_each_entry(ap_dev, &ap_device_list, list) { 1798 spin_lock(&ap_dev->lock); 1799 __ap_poll_device(ap_dev, &flags); 1800 spin_unlock(&ap_dev->lock); 1801 } 1802 spin_unlock_bh(&ap_device_list_lock); 1803 } 1804 set_current_state(TASK_RUNNING); 1805 remove_wait_queue(&ap_poll_wait, &wait); 1806 return 0; 1807 } 1808 1809 static int ap_poll_thread_start(void) 1810 { 1811 int rc; 1812 1813 if (ap_using_interrupts() || ap_suspend_flag) 1814 return 0; 1815 mutex_lock(&ap_poll_thread_mutex); 1816 if (!ap_poll_kthread) { 1817 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); 1818 rc = PTR_RET(ap_poll_kthread); 1819 if (rc) 1820 ap_poll_kthread = NULL; 1821 } 1822 else 1823 rc = 0; 1824 mutex_unlock(&ap_poll_thread_mutex); 1825 return rc; 1826 } 1827 1828 static void ap_poll_thread_stop(void) 1829 { 1830 mutex_lock(&ap_poll_thread_mutex); 1831 if (ap_poll_kthread) { 1832 kthread_stop(ap_poll_kthread); 1833 ap_poll_kthread = NULL; 1834 } 1835 mutex_unlock(&ap_poll_thread_mutex); 1836 } 1837 1838 /** 1839 * ap_request_timeout(): Handling of request timeouts 1840 * @data: Holds the AP device. 1841 * 1842 * Handles request timeouts. 1843 */ 1844 static void ap_request_timeout(unsigned long data) 1845 { 1846 struct ap_device *ap_dev = (struct ap_device *) data; 1847 1848 if (ap_dev->reset == AP_RESET_ARMED) { 1849 ap_dev->reset = AP_RESET_DO; 1850 1851 if (ap_using_interrupts()) 1852 tasklet_schedule(&ap_tasklet); 1853 } 1854 } 1855 1856 static void ap_reset_domain(void) 1857 { 1858 int i; 1859 1860 if (ap_domain_index != -1) 1861 for (i = 0; i < AP_DEVICES; i++) 1862 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1863 } 1864 1865 static void ap_reset_all(void) 1866 { 1867 int i, j; 1868 1869 for (i = 0; i < AP_DOMAINS; i++) 1870 for (j = 0; j < AP_DEVICES; j++) 1871 ap_reset_queue(AP_MKQID(j, i)); 1872 } 1873 1874 static struct reset_call ap_reset_call = { 1875 .fn = ap_reset_all, 1876 }; 1877 1878 /** 1879 * ap_module_init(): The module initialization code. 1880 * 1881 * Initializes the module. 1882 */ 1883 int __init ap_module_init(void) 1884 { 1885 int rc, i; 1886 1887 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { 1888 pr_warning("%d is not a valid cryptographic domain\n", 1889 ap_domain_index); 1890 return -EINVAL; 1891 } 1892 /* In resume callback we need to know if the user had set the domain. 1893 * If so, we can not just reset it. 1894 */ 1895 if (ap_domain_index >= 0) 1896 user_set_domain = 1; 1897 1898 if (ap_instructions_available() != 0) { 1899 pr_warning("The hardware system does not support " 1900 "AP instructions\n"); 1901 return -ENODEV; 1902 } 1903 if (ap_interrupts_available()) { 1904 rc = register_adapter_interrupt(&ap_airq); 1905 ap_airq_flag = (rc == 0); 1906 } 1907 1908 register_reset_call(&ap_reset_call); 1909 1910 /* Create /sys/bus/ap. */ 1911 rc = bus_register(&ap_bus_type); 1912 if (rc) 1913 goto out; 1914 for (i = 0; ap_bus_attrs[i]; i++) { 1915 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]); 1916 if (rc) 1917 goto out_bus; 1918 } 1919 1920 /* Create /sys/devices/ap. */ 1921 ap_root_device = root_device_register("ap"); 1922 rc = PTR_RET(ap_root_device); 1923 if (rc) 1924 goto out_bus; 1925 1926 ap_work_queue = create_singlethread_workqueue("kapwork"); 1927 if (!ap_work_queue) { 1928 rc = -ENOMEM; 1929 goto out_root; 1930 } 1931 1932 ap_query_configuration(); 1933 if (ap_select_domain() == 0) 1934 ap_scan_bus(NULL); 1935 1936 /* Setup the AP bus rescan timer. */ 1937 init_timer(&ap_config_timer); 1938 ap_config_timer.function = ap_config_timeout; 1939 ap_config_timer.data = 0; 1940 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1941 add_timer(&ap_config_timer); 1942 1943 /* Setup the high resultion poll timer. 1944 * If we are running under z/VM adjust polling to z/VM polling rate. 1945 */ 1946 if (MACHINE_IS_VM) 1947 poll_timeout = 1500000; 1948 spin_lock_init(&ap_poll_timer_lock); 1949 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1950 ap_poll_timer.function = ap_poll_timeout; 1951 1952 /* Start the low priority AP bus poll thread. */ 1953 if (ap_thread_flag) { 1954 rc = ap_poll_thread_start(); 1955 if (rc) 1956 goto out_work; 1957 } 1958 1959 return 0; 1960 1961 out_work: 1962 del_timer_sync(&ap_config_timer); 1963 hrtimer_cancel(&ap_poll_timer); 1964 destroy_workqueue(ap_work_queue); 1965 out_root: 1966 root_device_unregister(ap_root_device); 1967 out_bus: 1968 while (i--) 1969 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1970 bus_unregister(&ap_bus_type); 1971 out: 1972 unregister_reset_call(&ap_reset_call); 1973 if (ap_using_interrupts()) 1974 unregister_adapter_interrupt(&ap_airq); 1975 return rc; 1976 } 1977 1978 static int __ap_match_all(struct device *dev, void *data) 1979 { 1980 return 1; 1981 } 1982 1983 /** 1984 * ap_modules_exit(): The module termination code 1985 * 1986 * Terminates the module. 1987 */ 1988 void ap_module_exit(void) 1989 { 1990 int i; 1991 struct device *dev; 1992 1993 ap_reset_domain(); 1994 ap_poll_thread_stop(); 1995 del_timer_sync(&ap_config_timer); 1996 hrtimer_cancel(&ap_poll_timer); 1997 destroy_workqueue(ap_work_queue); 1998 tasklet_kill(&ap_tasklet); 1999 root_device_unregister(ap_root_device); 2000 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 2001 __ap_match_all))) 2002 { 2003 device_unregister(dev); 2004 put_device(dev); 2005 } 2006 for (i = 0; ap_bus_attrs[i]; i++) 2007 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2008 bus_unregister(&ap_bus_type); 2009 unregister_reset_call(&ap_reset_call); 2010 if (ap_using_interrupts()) 2011 unregister_adapter_interrupt(&ap_airq); 2012 } 2013 2014 module_init(ap_module_init); 2015 module_exit(ap_module_exit); 2016