1 /* 2 * Copyright IBM Corp. 2006, 2012 3 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Ralph Wuerthner <rwuerthn@de.ibm.com> 6 * Felix Beck <felix.beck@de.ibm.com> 7 * Holger Dengler <hd@linux.vnet.ibm.com> 8 * 9 * Adjunct processor bus. 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * This program is distributed in the hope that it will be useful, 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * GNU General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 24 */ 25 26 #define KMSG_COMPONENT "ap" 27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 28 29 #include <linux/kernel_stat.h> 30 #include <linux/module.h> 31 #include <linux/init.h> 32 #include <linux/delay.h> 33 #include <linux/err.h> 34 #include <linux/interrupt.h> 35 #include <linux/workqueue.h> 36 #include <linux/slab.h> 37 #include <linux/notifier.h> 38 #include <linux/kthread.h> 39 #include <linux/mutex.h> 40 #include <asm/reset.h> 41 #include <asm/airq.h> 42 #include <linux/atomic.h> 43 #include <asm/isc.h> 44 #include <linux/hrtimer.h> 45 #include <linux/ktime.h> 46 #include <asm/facility.h> 47 48 #include "ap_bus.h" 49 50 /* Some prototypes. */ 51 static void ap_scan_bus(struct work_struct *); 52 static void ap_poll_all(unsigned long); 53 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); 54 static int ap_poll_thread_start(void); 55 static void ap_poll_thread_stop(void); 56 static void ap_request_timeout(unsigned long); 57 static inline void ap_schedule_poll_timer(void); 58 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); 59 static int ap_device_remove(struct device *dev); 60 static int ap_device_probe(struct device *dev); 61 static void ap_interrupt_handler(struct airq_struct *airq); 62 static void ap_reset(struct ap_device *ap_dev); 63 static void ap_config_timeout(unsigned long ptr); 64 static int ap_select_domain(void); 65 static void ap_query_configuration(void); 66 67 /* 68 * Module description. 69 */ 70 MODULE_AUTHOR("IBM Corporation"); 71 MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \ 72 "Copyright IBM Corp. 2006, 2012"); 73 MODULE_LICENSE("GPL"); 74 MODULE_ALIAS("z90crypt"); 75 76 /* 77 * Module parameter 78 */ 79 int ap_domain_index = -1; /* Adjunct Processor Domain Index */ 80 module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP); 81 MODULE_PARM_DESC(domain, "domain index for ap devices"); 82 EXPORT_SYMBOL(ap_domain_index); 83 84 static int ap_thread_flag = 0; 85 module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP); 86 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); 87 88 static struct device *ap_root_device = NULL; 89 static struct ap_config_info *ap_configuration; 90 static DEFINE_SPINLOCK(ap_device_list_lock); 91 static LIST_HEAD(ap_device_list); 92 93 /* 94 * Workqueue & timer for bus rescan. 95 */ 96 static struct workqueue_struct *ap_work_queue; 97 static struct timer_list ap_config_timer; 98 static int ap_config_time = AP_CONFIG_TIME; 99 static DECLARE_WORK(ap_config_work, ap_scan_bus); 100 101 /* 102 * Tasklet & timer for AP request polling and interrupts 103 */ 104 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); 105 static atomic_t ap_poll_requests = ATOMIC_INIT(0); 106 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); 107 static struct task_struct *ap_poll_kthread = NULL; 108 static DEFINE_MUTEX(ap_poll_thread_mutex); 109 static DEFINE_SPINLOCK(ap_poll_timer_lock); 110 static struct hrtimer ap_poll_timer; 111 /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. 112 * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ 113 static unsigned long long poll_timeout = 250000; 114 115 /* Suspend flag */ 116 static int ap_suspend_flag; 117 /* Flag to check if domain was set through module parameter domain=. This is 118 * important when supsend and resume is done in a z/VM environment where the 119 * domain might change. */ 120 static int user_set_domain = 0; 121 static struct bus_type ap_bus_type; 122 123 /* Adapter interrupt definitions */ 124 static int ap_airq_flag; 125 126 static struct airq_struct ap_airq = { 127 .handler = ap_interrupt_handler, 128 .isc = AP_ISC, 129 }; 130 131 /** 132 * ap_using_interrupts() - Returns non-zero if interrupt support is 133 * available. 134 */ 135 static inline int ap_using_interrupts(void) 136 { 137 return ap_airq_flag; 138 } 139 140 /** 141 * ap_intructions_available() - Test if AP instructions are available. 142 * 143 * Returns 0 if the AP instructions are installed. 144 */ 145 static inline int ap_instructions_available(void) 146 { 147 register unsigned long reg0 asm ("0") = AP_MKQID(0,0); 148 register unsigned long reg1 asm ("1") = -ENODEV; 149 register unsigned long reg2 asm ("2") = 0UL; 150 151 asm volatile( 152 " .long 0xb2af0000\n" /* PQAP(TAPQ) */ 153 "0: la %1,0\n" 154 "1:\n" 155 EX_TABLE(0b, 1b) 156 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" ); 157 return reg1; 158 } 159 160 /** 161 * ap_interrupts_available(): Test if AP interrupts are available. 162 * 163 * Returns 1 if AP interrupts are available. 164 */ 165 static int ap_interrupts_available(void) 166 { 167 return test_facility(2) && test_facility(65); 168 } 169 170 /** 171 * ap_configuration_available(): Test if AP configuration 172 * information is available. 173 * 174 * Returns 1 if AP configuration information is available. 175 */ 176 #ifdef CONFIG_64BIT 177 static int ap_configuration_available(void) 178 { 179 return test_facility(2) && test_facility(12); 180 } 181 #endif 182 183 /** 184 * ap_test_queue(): Test adjunct processor queue. 185 * @qid: The AP queue number 186 * @queue_depth: Pointer to queue depth value 187 * @device_type: Pointer to device type value 188 * 189 * Returns AP queue status structure. 190 */ 191 static inline struct ap_queue_status 192 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type) 193 { 194 register unsigned long reg0 asm ("0") = qid; 195 register struct ap_queue_status reg1 asm ("1"); 196 register unsigned long reg2 asm ("2") = 0UL; 197 198 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */ 199 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 200 *device_type = (int) (reg2 >> 24); 201 *queue_depth = (int) (reg2 & 0xff); 202 return reg1; 203 } 204 205 /** 206 * ap_reset_queue(): Reset adjunct processor queue. 207 * @qid: The AP queue number 208 * 209 * Returns AP queue status structure. 210 */ 211 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) 212 { 213 register unsigned long reg0 asm ("0") = qid | 0x01000000UL; 214 register struct ap_queue_status reg1 asm ("1"); 215 register unsigned long reg2 asm ("2") = 0UL; 216 217 asm volatile( 218 ".long 0xb2af0000" /* PQAP(RAPQ) */ 219 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc"); 220 return reg1; 221 } 222 223 #ifdef CONFIG_64BIT 224 /** 225 * ap_queue_interruption_control(): Enable interruption for a specific AP. 226 * @qid: The AP queue number 227 * @ind: The notification indicator byte 228 * 229 * Returns AP queue status. 230 */ 231 static inline struct ap_queue_status 232 ap_queue_interruption_control(ap_qid_t qid, void *ind) 233 { 234 register unsigned long reg0 asm ("0") = qid | 0x03000000UL; 235 register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; 236 register struct ap_queue_status reg1_out asm ("1"); 237 register void *reg2 asm ("2") = ind; 238 asm volatile( 239 ".long 0xb2af0000" /* PQAP(AQIC) */ 240 : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) 241 : 242 : "cc" ); 243 return reg1_out; 244 } 245 #endif 246 247 #ifdef CONFIG_64BIT 248 static inline struct ap_queue_status 249 __ap_query_functions(ap_qid_t qid, unsigned int *functions) 250 { 251 register unsigned long reg0 asm ("0") = 0UL | qid | (1UL << 23); 252 register struct ap_queue_status reg1 asm ("1") = AP_QUEUE_STATUS_INVALID; 253 register unsigned long reg2 asm ("2"); 254 255 asm volatile( 256 ".long 0xb2af0000\n" /* PQAP(TAPQ) */ 257 "0:\n" 258 EX_TABLE(0b, 0b) 259 : "+d" (reg0), "+d" (reg1), "=d" (reg2) 260 : 261 : "cc"); 262 263 *functions = (unsigned int)(reg2 >> 32); 264 return reg1; 265 } 266 #endif 267 268 #ifdef CONFIG_64BIT 269 static inline int __ap_query_configuration(struct ap_config_info *config) 270 { 271 register unsigned long reg0 asm ("0") = 0x04000000UL; 272 register unsigned long reg1 asm ("1") = -EINVAL; 273 register unsigned char *reg2 asm ("2") = (unsigned char *)config; 274 275 asm volatile( 276 ".long 0xb2af0000\n" /* PQAP(QCI) */ 277 "0: la %1,0\n" 278 "1:\n" 279 EX_TABLE(0b, 1b) 280 : "+d" (reg0), "+d" (reg1), "+d" (reg2) 281 : 282 : "cc"); 283 284 return reg1; 285 } 286 #endif 287 288 /** 289 * ap_query_functions(): Query supported functions. 290 * @qid: The AP queue number 291 * @functions: Pointer to functions field. 292 * 293 * Returns 294 * 0 on success. 295 * -ENODEV if queue not valid. 296 * -EBUSY if device busy. 297 * -EINVAL if query function is not supported 298 */ 299 static int ap_query_functions(ap_qid_t qid, unsigned int *functions) 300 { 301 #ifdef CONFIG_64BIT 302 struct ap_queue_status status; 303 int i; 304 status = __ap_query_functions(qid, functions); 305 306 for (i = 0; i < AP_MAX_RESET; i++) { 307 if (ap_queue_status_invalid_test(&status)) 308 return -ENODEV; 309 310 switch (status.response_code) { 311 case AP_RESPONSE_NORMAL: 312 return 0; 313 case AP_RESPONSE_RESET_IN_PROGRESS: 314 case AP_RESPONSE_BUSY: 315 break; 316 case AP_RESPONSE_Q_NOT_AVAIL: 317 case AP_RESPONSE_DECONFIGURED: 318 case AP_RESPONSE_CHECKSTOPPED: 319 case AP_RESPONSE_INVALID_ADDRESS: 320 return -ENODEV; 321 case AP_RESPONSE_OTHERWISE_CHANGED: 322 break; 323 default: 324 break; 325 } 326 if (i < AP_MAX_RESET - 1) { 327 udelay(5); 328 status = __ap_query_functions(qid, functions); 329 } 330 } 331 return -EBUSY; 332 #else 333 return -EINVAL; 334 #endif 335 } 336 337 /** 338 * ap_queue_enable_interruption(): Enable interruption on an AP. 339 * @qid: The AP queue number 340 * @ind: the notification indicator byte 341 * 342 * Enables interruption on AP queue via ap_queue_interruption_control(). Based 343 * on the return value it waits a while and tests the AP queue if interrupts 344 * have been switched on using ap_test_queue(). 345 */ 346 static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) 347 { 348 #ifdef CONFIG_64BIT 349 struct ap_queue_status status; 350 int t_depth, t_device_type, rc, i; 351 352 rc = -EBUSY; 353 status = ap_queue_interruption_control(qid, ind); 354 355 for (i = 0; i < AP_MAX_RESET; i++) { 356 switch (status.response_code) { 357 case AP_RESPONSE_NORMAL: 358 if (status.int_enabled) 359 return 0; 360 break; 361 case AP_RESPONSE_RESET_IN_PROGRESS: 362 case AP_RESPONSE_BUSY: 363 if (i < AP_MAX_RESET - 1) { 364 udelay(5); 365 status = ap_queue_interruption_control(qid, 366 ind); 367 continue; 368 } 369 break; 370 case AP_RESPONSE_Q_NOT_AVAIL: 371 case AP_RESPONSE_DECONFIGURED: 372 case AP_RESPONSE_CHECKSTOPPED: 373 case AP_RESPONSE_INVALID_ADDRESS: 374 return -ENODEV; 375 case AP_RESPONSE_OTHERWISE_CHANGED: 376 if (status.int_enabled) 377 return 0; 378 break; 379 default: 380 break; 381 } 382 if (i < AP_MAX_RESET - 1) { 383 udelay(5); 384 status = ap_test_queue(qid, &t_depth, &t_device_type); 385 } 386 } 387 return rc; 388 #else 389 return -EINVAL; 390 #endif 391 } 392 393 /** 394 * __ap_send(): Send message to adjunct processor queue. 395 * @qid: The AP queue number 396 * @psmid: The program supplied message identifier 397 * @msg: The message text 398 * @length: The message length 399 * @special: Special Bit 400 * 401 * Returns AP queue status structure. 402 * Condition code 1 on NQAP can't happen because the L bit is 1. 403 * Condition code 2 on NQAP also means the send is incomplete, 404 * because a segment boundary was reached. The NQAP is repeated. 405 */ 406 static inline struct ap_queue_status 407 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length, 408 unsigned int special) 409 { 410 typedef struct { char _[length]; } msgblock; 411 register unsigned long reg0 asm ("0") = qid | 0x40000000UL; 412 register struct ap_queue_status reg1 asm ("1"); 413 register unsigned long reg2 asm ("2") = (unsigned long) msg; 414 register unsigned long reg3 asm ("3") = (unsigned long) length; 415 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32); 416 register unsigned long reg5 asm ("5") = psmid & 0xffffffff; 417 418 if (special == 1) 419 reg0 |= 0x400000UL; 420 421 asm volatile ( 422 "0: .long 0xb2ad0042\n" /* NQAP */ 423 " brc 2,0b" 424 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3) 425 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg) 426 : "cc" ); 427 return reg1; 428 } 429 430 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length) 431 { 432 struct ap_queue_status status; 433 434 status = __ap_send(qid, psmid, msg, length, 0); 435 switch (status.response_code) { 436 case AP_RESPONSE_NORMAL: 437 return 0; 438 case AP_RESPONSE_Q_FULL: 439 case AP_RESPONSE_RESET_IN_PROGRESS: 440 return -EBUSY; 441 case AP_RESPONSE_REQ_FAC_NOT_INST: 442 return -EINVAL; 443 default: /* Device is gone. */ 444 return -ENODEV; 445 } 446 } 447 EXPORT_SYMBOL(ap_send); 448 449 /** 450 * __ap_recv(): Receive message from adjunct processor queue. 451 * @qid: The AP queue number 452 * @psmid: Pointer to program supplied message identifier 453 * @msg: The message text 454 * @length: The message length 455 * 456 * Returns AP queue status structure. 457 * Condition code 1 on DQAP means the receive has taken place 458 * but only partially. The response is incomplete, hence the 459 * DQAP is repeated. 460 * Condition code 2 on DQAP also means the receive is incomplete, 461 * this time because a segment boundary was reached. Again, the 462 * DQAP is repeated. 463 * Note that gpr2 is used by the DQAP instruction to keep track of 464 * any 'residual' length, in case the instruction gets interrupted. 465 * Hence it gets zeroed before the instruction. 466 */ 467 static inline struct ap_queue_status 468 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 469 { 470 typedef struct { char _[length]; } msgblock; 471 register unsigned long reg0 asm("0") = qid | 0x80000000UL; 472 register struct ap_queue_status reg1 asm ("1"); 473 register unsigned long reg2 asm("2") = 0UL; 474 register unsigned long reg4 asm("4") = (unsigned long) msg; 475 register unsigned long reg5 asm("5") = (unsigned long) length; 476 register unsigned long reg6 asm("6") = 0UL; 477 register unsigned long reg7 asm("7") = 0UL; 478 479 480 asm volatile( 481 "0: .long 0xb2ae0064\n" /* DQAP */ 482 " brc 6,0b\n" 483 : "+d" (reg0), "=d" (reg1), "+d" (reg2), 484 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7), 485 "=m" (*(msgblock *) msg) : : "cc" ); 486 *psmid = (((unsigned long long) reg6) << 32) + reg7; 487 return reg1; 488 } 489 490 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length) 491 { 492 struct ap_queue_status status; 493 494 status = __ap_recv(qid, psmid, msg, length); 495 switch (status.response_code) { 496 case AP_RESPONSE_NORMAL: 497 return 0; 498 case AP_RESPONSE_NO_PENDING_REPLY: 499 if (status.queue_empty) 500 return -ENOENT; 501 return -EBUSY; 502 case AP_RESPONSE_RESET_IN_PROGRESS: 503 return -EBUSY; 504 default: 505 return -ENODEV; 506 } 507 } 508 EXPORT_SYMBOL(ap_recv); 509 510 /** 511 * ap_query_queue(): Check if an AP queue is available. 512 * @qid: The AP queue number 513 * @queue_depth: Pointer to queue depth value 514 * @device_type: Pointer to device type value 515 * 516 * The test is repeated for AP_MAX_RESET times. 517 */ 518 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) 519 { 520 struct ap_queue_status status; 521 int t_depth, t_device_type, rc, i; 522 523 rc = -EBUSY; 524 for (i = 0; i < AP_MAX_RESET; i++) { 525 status = ap_test_queue(qid, &t_depth, &t_device_type); 526 switch (status.response_code) { 527 case AP_RESPONSE_NORMAL: 528 *queue_depth = t_depth + 1; 529 *device_type = t_device_type; 530 rc = 0; 531 break; 532 case AP_RESPONSE_Q_NOT_AVAIL: 533 rc = -ENODEV; 534 break; 535 case AP_RESPONSE_RESET_IN_PROGRESS: 536 break; 537 case AP_RESPONSE_DECONFIGURED: 538 rc = -ENODEV; 539 break; 540 case AP_RESPONSE_CHECKSTOPPED: 541 rc = -ENODEV; 542 break; 543 case AP_RESPONSE_INVALID_ADDRESS: 544 rc = -ENODEV; 545 break; 546 case AP_RESPONSE_OTHERWISE_CHANGED: 547 break; 548 case AP_RESPONSE_BUSY: 549 break; 550 default: 551 BUG(); 552 } 553 if (rc != -EBUSY) 554 break; 555 if (i < AP_MAX_RESET - 1) 556 udelay(5); 557 } 558 return rc; 559 } 560 561 /** 562 * ap_init_queue(): Reset an AP queue. 563 * @qid: The AP queue number 564 * 565 * Reset an AP queue and wait for it to become available again. 566 */ 567 static int ap_init_queue(ap_qid_t qid) 568 { 569 struct ap_queue_status status; 570 int rc, dummy, i; 571 572 rc = -ENODEV; 573 status = ap_reset_queue(qid); 574 for (i = 0; i < AP_MAX_RESET; i++) { 575 switch (status.response_code) { 576 case AP_RESPONSE_NORMAL: 577 if (status.queue_empty) 578 rc = 0; 579 break; 580 case AP_RESPONSE_Q_NOT_AVAIL: 581 case AP_RESPONSE_DECONFIGURED: 582 case AP_RESPONSE_CHECKSTOPPED: 583 i = AP_MAX_RESET; /* return with -ENODEV */ 584 break; 585 case AP_RESPONSE_RESET_IN_PROGRESS: 586 rc = -EBUSY; 587 case AP_RESPONSE_BUSY: 588 default: 589 break; 590 } 591 if (rc != -ENODEV && rc != -EBUSY) 592 break; 593 if (i < AP_MAX_RESET - 1) { 594 /* Time we are waiting until we give up (0.7sec * 90). 595 * Since the actual request (in progress) will not 596 * interrupted immediately for the reset command, 597 * we have to be patient. In worst case we have to 598 * wait 60sec + reset time (some msec). 599 */ 600 schedule_timeout(AP_RESET_TIMEOUT); 601 status = ap_test_queue(qid, &dummy, &dummy); 602 } 603 } 604 if (rc == 0 && ap_using_interrupts()) { 605 rc = ap_queue_enable_interruption(qid, ap_airq.lsi_ptr); 606 /* If interruption mode is supported by the machine, 607 * but an AP can not be enabled for interruption then 608 * the AP will be discarded. */ 609 if (rc) 610 pr_err("Registering adapter interrupts for " 611 "AP %d failed\n", AP_QID_DEVICE(qid)); 612 } 613 return rc; 614 } 615 616 /** 617 * ap_increase_queue_count(): Arm request timeout. 618 * @ap_dev: Pointer to an AP device. 619 * 620 * Arm request timeout if an AP device was idle and a new request is submitted. 621 */ 622 static void ap_increase_queue_count(struct ap_device *ap_dev) 623 { 624 int timeout = ap_dev->drv->request_timeout; 625 626 ap_dev->queue_count++; 627 if (ap_dev->queue_count == 1) { 628 mod_timer(&ap_dev->timeout, jiffies + timeout); 629 ap_dev->reset = AP_RESET_ARMED; 630 } 631 } 632 633 /** 634 * ap_decrease_queue_count(): Decrease queue count. 635 * @ap_dev: Pointer to an AP device. 636 * 637 * If AP device is still alive, re-schedule request timeout if there are still 638 * pending requests. 639 */ 640 static void ap_decrease_queue_count(struct ap_device *ap_dev) 641 { 642 int timeout = ap_dev->drv->request_timeout; 643 644 ap_dev->queue_count--; 645 if (ap_dev->queue_count > 0) 646 mod_timer(&ap_dev->timeout, jiffies + timeout); 647 else 648 /* 649 * The timeout timer should to be disabled now - since 650 * del_timer_sync() is very expensive, we just tell via the 651 * reset flag to ignore the pending timeout timer. 652 */ 653 ap_dev->reset = AP_RESET_IGNORE; 654 } 655 656 /* 657 * AP device related attributes. 658 */ 659 static ssize_t ap_hwtype_show(struct device *dev, 660 struct device_attribute *attr, char *buf) 661 { 662 struct ap_device *ap_dev = to_ap_dev(dev); 663 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); 664 } 665 666 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); 667 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, 668 char *buf) 669 { 670 struct ap_device *ap_dev = to_ap_dev(dev); 671 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); 672 } 673 674 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); 675 static ssize_t ap_request_count_show(struct device *dev, 676 struct device_attribute *attr, 677 char *buf) 678 { 679 struct ap_device *ap_dev = to_ap_dev(dev); 680 int rc; 681 682 spin_lock_bh(&ap_dev->lock); 683 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count); 684 spin_unlock_bh(&ap_dev->lock); 685 return rc; 686 } 687 688 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL); 689 690 static ssize_t ap_requestq_count_show(struct device *dev, 691 struct device_attribute *attr, char *buf) 692 { 693 struct ap_device *ap_dev = to_ap_dev(dev); 694 int rc; 695 696 spin_lock_bh(&ap_dev->lock); 697 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->requestq_count); 698 spin_unlock_bh(&ap_dev->lock); 699 return rc; 700 } 701 702 static DEVICE_ATTR(requestq_count, 0444, ap_requestq_count_show, NULL); 703 704 static ssize_t ap_pendingq_count_show(struct device *dev, 705 struct device_attribute *attr, char *buf) 706 { 707 struct ap_device *ap_dev = to_ap_dev(dev); 708 int rc; 709 710 spin_lock_bh(&ap_dev->lock); 711 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->pendingq_count); 712 spin_unlock_bh(&ap_dev->lock); 713 return rc; 714 } 715 716 static DEVICE_ATTR(pendingq_count, 0444, ap_pendingq_count_show, NULL); 717 718 static ssize_t ap_modalias_show(struct device *dev, 719 struct device_attribute *attr, char *buf) 720 { 721 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type); 722 } 723 724 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL); 725 726 static ssize_t ap_functions_show(struct device *dev, 727 struct device_attribute *attr, char *buf) 728 { 729 struct ap_device *ap_dev = to_ap_dev(dev); 730 return snprintf(buf, PAGE_SIZE, "0x%08X\n", ap_dev->functions); 731 } 732 733 static DEVICE_ATTR(ap_functions, 0444, ap_functions_show, NULL); 734 735 static struct attribute *ap_dev_attrs[] = { 736 &dev_attr_hwtype.attr, 737 &dev_attr_depth.attr, 738 &dev_attr_request_count.attr, 739 &dev_attr_requestq_count.attr, 740 &dev_attr_pendingq_count.attr, 741 &dev_attr_modalias.attr, 742 &dev_attr_ap_functions.attr, 743 NULL 744 }; 745 static struct attribute_group ap_dev_attr_group = { 746 .attrs = ap_dev_attrs 747 }; 748 749 /** 750 * ap_bus_match() 751 * @dev: Pointer to device 752 * @drv: Pointer to device_driver 753 * 754 * AP bus driver registration/unregistration. 755 */ 756 static int ap_bus_match(struct device *dev, struct device_driver *drv) 757 { 758 struct ap_device *ap_dev = to_ap_dev(dev); 759 struct ap_driver *ap_drv = to_ap_drv(drv); 760 struct ap_device_id *id; 761 762 /* 763 * Compare device type of the device with the list of 764 * supported types of the device_driver. 765 */ 766 for (id = ap_drv->ids; id->match_flags; id++) { 767 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) && 768 (id->dev_type != ap_dev->device_type)) 769 continue; 770 return 1; 771 } 772 return 0; 773 } 774 775 /** 776 * ap_uevent(): Uevent function for AP devices. 777 * @dev: Pointer to device 778 * @env: Pointer to kobj_uevent_env 779 * 780 * It sets up a single environment variable DEV_TYPE which contains the 781 * hardware device type. 782 */ 783 static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) 784 { 785 struct ap_device *ap_dev = to_ap_dev(dev); 786 int retval = 0; 787 788 if (!ap_dev) 789 return -ENODEV; 790 791 /* Set up DEV_TYPE environment variable. */ 792 retval = add_uevent_var(env, "DEV_TYPE=%04X", ap_dev->device_type); 793 if (retval) 794 return retval; 795 796 /* Add MODALIAS= */ 797 retval = add_uevent_var(env, "MODALIAS=ap:t%02X", ap_dev->device_type); 798 799 return retval; 800 } 801 802 static int ap_bus_suspend(struct device *dev, pm_message_t state) 803 { 804 struct ap_device *ap_dev = to_ap_dev(dev); 805 unsigned long flags; 806 807 if (!ap_suspend_flag) { 808 ap_suspend_flag = 1; 809 810 /* Disable scanning for devices, thus we do not want to scan 811 * for them after removing. 812 */ 813 del_timer_sync(&ap_config_timer); 814 if (ap_work_queue != NULL) { 815 destroy_workqueue(ap_work_queue); 816 ap_work_queue = NULL; 817 } 818 819 tasklet_disable(&ap_tasklet); 820 } 821 /* Poll on the device until all requests are finished. */ 822 do { 823 flags = 0; 824 spin_lock_bh(&ap_dev->lock); 825 __ap_poll_device(ap_dev, &flags); 826 spin_unlock_bh(&ap_dev->lock); 827 } while ((flags & 1) || (flags & 2)); 828 829 spin_lock_bh(&ap_dev->lock); 830 ap_dev->unregistered = 1; 831 spin_unlock_bh(&ap_dev->lock); 832 833 return 0; 834 } 835 836 static int ap_bus_resume(struct device *dev) 837 { 838 struct ap_device *ap_dev = to_ap_dev(dev); 839 int rc; 840 841 if (ap_suspend_flag) { 842 ap_suspend_flag = 0; 843 if (ap_interrupts_available()) { 844 if (!ap_using_interrupts()) { 845 rc = register_adapter_interrupt(&ap_airq); 846 ap_airq_flag = (rc == 0); 847 } 848 } else { 849 if (ap_using_interrupts()) { 850 unregister_adapter_interrupt(&ap_airq); 851 ap_airq_flag = 0; 852 } 853 } 854 ap_query_configuration(); 855 if (!user_set_domain) { 856 ap_domain_index = -1; 857 ap_select_domain(); 858 } 859 init_timer(&ap_config_timer); 860 ap_config_timer.function = ap_config_timeout; 861 ap_config_timer.data = 0; 862 ap_config_timer.expires = jiffies + ap_config_time * HZ; 863 add_timer(&ap_config_timer); 864 ap_work_queue = create_singlethread_workqueue("kapwork"); 865 if (!ap_work_queue) 866 return -ENOMEM; 867 tasklet_enable(&ap_tasklet); 868 if (!ap_using_interrupts()) 869 ap_schedule_poll_timer(); 870 else 871 tasklet_schedule(&ap_tasklet); 872 if (ap_thread_flag) 873 rc = ap_poll_thread_start(); 874 else 875 rc = 0; 876 } else 877 rc = 0; 878 if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { 879 spin_lock_bh(&ap_dev->lock); 880 ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), 881 ap_domain_index); 882 spin_unlock_bh(&ap_dev->lock); 883 } 884 queue_work(ap_work_queue, &ap_config_work); 885 886 return rc; 887 } 888 889 static struct bus_type ap_bus_type = { 890 .name = "ap", 891 .match = &ap_bus_match, 892 .uevent = &ap_uevent, 893 .suspend = ap_bus_suspend, 894 .resume = ap_bus_resume 895 }; 896 897 static int ap_device_probe(struct device *dev) 898 { 899 struct ap_device *ap_dev = to_ap_dev(dev); 900 struct ap_driver *ap_drv = to_ap_drv(dev->driver); 901 int rc; 902 903 ap_dev->drv = ap_drv; 904 905 spin_lock_bh(&ap_device_list_lock); 906 list_add(&ap_dev->list, &ap_device_list); 907 spin_unlock_bh(&ap_device_list_lock); 908 909 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; 910 if (rc) { 911 spin_lock_bh(&ap_device_list_lock); 912 list_del_init(&ap_dev->list); 913 spin_unlock_bh(&ap_device_list_lock); 914 } 915 return rc; 916 } 917 918 /** 919 * __ap_flush_queue(): Flush requests. 920 * @ap_dev: Pointer to the AP device 921 * 922 * Flush all requests from the request/pending queue of an AP device. 923 */ 924 static void __ap_flush_queue(struct ap_device *ap_dev) 925 { 926 struct ap_message *ap_msg, *next; 927 928 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) { 929 list_del_init(&ap_msg->list); 930 ap_dev->pendingq_count--; 931 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 932 } 933 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) { 934 list_del_init(&ap_msg->list); 935 ap_dev->requestq_count--; 936 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 937 } 938 } 939 940 void ap_flush_queue(struct ap_device *ap_dev) 941 { 942 spin_lock_bh(&ap_dev->lock); 943 __ap_flush_queue(ap_dev); 944 spin_unlock_bh(&ap_dev->lock); 945 } 946 EXPORT_SYMBOL(ap_flush_queue); 947 948 static int ap_device_remove(struct device *dev) 949 { 950 struct ap_device *ap_dev = to_ap_dev(dev); 951 struct ap_driver *ap_drv = ap_dev->drv; 952 953 ap_flush_queue(ap_dev); 954 del_timer_sync(&ap_dev->timeout); 955 spin_lock_bh(&ap_device_list_lock); 956 list_del_init(&ap_dev->list); 957 spin_unlock_bh(&ap_device_list_lock); 958 if (ap_drv->remove) 959 ap_drv->remove(ap_dev); 960 spin_lock_bh(&ap_dev->lock); 961 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 962 spin_unlock_bh(&ap_dev->lock); 963 return 0; 964 } 965 966 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner, 967 char *name) 968 { 969 struct device_driver *drv = &ap_drv->driver; 970 971 drv->bus = &ap_bus_type; 972 drv->probe = ap_device_probe; 973 drv->remove = ap_device_remove; 974 drv->owner = owner; 975 drv->name = name; 976 return driver_register(drv); 977 } 978 EXPORT_SYMBOL(ap_driver_register); 979 980 void ap_driver_unregister(struct ap_driver *ap_drv) 981 { 982 driver_unregister(&ap_drv->driver); 983 } 984 EXPORT_SYMBOL(ap_driver_unregister); 985 986 void ap_bus_force_rescan(void) 987 { 988 /* reconfigure the AP bus rescan timer. */ 989 mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ); 990 /* processing a asynchronous bus rescan */ 991 queue_work(ap_work_queue, &ap_config_work); 992 flush_work(&ap_config_work); 993 } 994 EXPORT_SYMBOL(ap_bus_force_rescan); 995 996 /* 997 * AP bus attributes. 998 */ 999 static ssize_t ap_domain_show(struct bus_type *bus, char *buf) 1000 { 1001 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index); 1002 } 1003 1004 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL); 1005 1006 static ssize_t ap_control_domain_mask_show(struct bus_type *bus, char *buf) 1007 { 1008 if (ap_configuration != NULL) { /* QCI not supported */ 1009 if (test_facility(76)) { /* format 1 - 256 bit domain field */ 1010 return snprintf(buf, PAGE_SIZE, 1011 "0x%08x%08x%08x%08x%08x%08x%08x%08x\n", 1012 ap_configuration->adm[0], ap_configuration->adm[1], 1013 ap_configuration->adm[2], ap_configuration->adm[3], 1014 ap_configuration->adm[4], ap_configuration->adm[5], 1015 ap_configuration->adm[6], ap_configuration->adm[7]); 1016 } else { /* format 0 - 16 bit domain field */ 1017 return snprintf(buf, PAGE_SIZE, "%08x%08x\n", 1018 ap_configuration->adm[0], ap_configuration->adm[1]); 1019 } 1020 } else { 1021 return snprintf(buf, PAGE_SIZE, "not supported\n"); 1022 } 1023 } 1024 1025 static BUS_ATTR(ap_control_domain_mask, 0444, 1026 ap_control_domain_mask_show, NULL); 1027 1028 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) 1029 { 1030 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); 1031 } 1032 1033 static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) 1034 { 1035 return snprintf(buf, PAGE_SIZE, "%d\n", 1036 ap_using_interrupts() ? 1 : 0); 1037 } 1038 1039 static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); 1040 1041 static ssize_t ap_config_time_store(struct bus_type *bus, 1042 const char *buf, size_t count) 1043 { 1044 int time; 1045 1046 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120) 1047 return -EINVAL; 1048 ap_config_time = time; 1049 if (!timer_pending(&ap_config_timer) || 1050 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) { 1051 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1052 add_timer(&ap_config_timer); 1053 } 1054 return count; 1055 } 1056 1057 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store); 1058 1059 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf) 1060 { 1061 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0); 1062 } 1063 1064 static ssize_t ap_poll_thread_store(struct bus_type *bus, 1065 const char *buf, size_t count) 1066 { 1067 int flag, rc; 1068 1069 if (sscanf(buf, "%d\n", &flag) != 1) 1070 return -EINVAL; 1071 if (flag) { 1072 rc = ap_poll_thread_start(); 1073 if (rc) 1074 return rc; 1075 } 1076 else 1077 ap_poll_thread_stop(); 1078 return count; 1079 } 1080 1081 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store); 1082 1083 static ssize_t poll_timeout_show(struct bus_type *bus, char *buf) 1084 { 1085 return snprintf(buf, PAGE_SIZE, "%llu\n", poll_timeout); 1086 } 1087 1088 static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, 1089 size_t count) 1090 { 1091 unsigned long long time; 1092 ktime_t hr_time; 1093 1094 /* 120 seconds = maximum poll interval */ 1095 if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || 1096 time > 120000000000ULL) 1097 return -EINVAL; 1098 poll_timeout = time; 1099 hr_time = ktime_set(0, poll_timeout); 1100 1101 if (!hrtimer_is_queued(&ap_poll_timer) || 1102 !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) { 1103 hrtimer_set_expires(&ap_poll_timer, hr_time); 1104 hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS); 1105 } 1106 return count; 1107 } 1108 1109 static BUS_ATTR(poll_timeout, 0644, poll_timeout_show, poll_timeout_store); 1110 1111 static struct bus_attribute *const ap_bus_attrs[] = { 1112 &bus_attr_ap_domain, 1113 &bus_attr_ap_control_domain_mask, 1114 &bus_attr_config_time, 1115 &bus_attr_poll_thread, 1116 &bus_attr_ap_interrupts, 1117 &bus_attr_poll_timeout, 1118 NULL, 1119 }; 1120 1121 static inline int ap_test_config(unsigned int *field, unsigned int nr) 1122 { 1123 if (nr > 0xFFu) 1124 return 0; 1125 return ap_test_bit((field + (nr >> 5)), (nr & 0x1f)); 1126 } 1127 1128 /* 1129 * ap_test_config_card_id(): Test, whether an AP card ID is configured. 1130 * @id AP card ID 1131 * 1132 * Returns 0 if the card is not configured 1133 * 1 if the card is configured or 1134 * if the configuration information is not available 1135 */ 1136 static inline int ap_test_config_card_id(unsigned int id) 1137 { 1138 if (!ap_configuration) 1139 return 1; 1140 return ap_test_config(ap_configuration->apm, id); 1141 } 1142 1143 /* 1144 * ap_test_config_domain(): Test, whether an AP usage domain is configured. 1145 * @domain AP usage domain ID 1146 * 1147 * Returns 0 if the usage domain is not configured 1148 * 1 if the usage domain is configured or 1149 * if the configuration information is not available 1150 */ 1151 static inline int ap_test_config_domain(unsigned int domain) 1152 { 1153 if (!ap_configuration) 1154 return 1; 1155 return ap_test_config(ap_configuration->aqm, domain); 1156 } 1157 1158 /** 1159 * ap_query_configuration(): Query AP configuration information. 1160 * 1161 * Query information of installed cards and configured domains from AP. 1162 */ 1163 static void ap_query_configuration(void) 1164 { 1165 #ifdef CONFIG_64BIT 1166 if (ap_configuration_available()) { 1167 if (!ap_configuration) 1168 ap_configuration = 1169 kzalloc(sizeof(struct ap_config_info), 1170 GFP_KERNEL); 1171 if (ap_configuration) 1172 __ap_query_configuration(ap_configuration); 1173 } else 1174 ap_configuration = NULL; 1175 #else 1176 ap_configuration = NULL; 1177 #endif 1178 } 1179 1180 /** 1181 * ap_select_domain(): Select an AP domain. 1182 * 1183 * Pick one of the 16 AP domains. 1184 */ 1185 static int ap_select_domain(void) 1186 { 1187 int queue_depth, device_type, count, max_count, best_domain; 1188 ap_qid_t qid; 1189 int rc, i, j; 1190 1191 /* 1192 * We want to use a single domain. Either the one specified with 1193 * the "domain=" parameter or the domain with the maximum number 1194 * of devices. 1195 */ 1196 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS) 1197 /* Domain has already been selected. */ 1198 return 0; 1199 best_domain = -1; 1200 max_count = 0; 1201 for (i = 0; i < AP_DOMAINS; i++) { 1202 if (!ap_test_config_domain(i)) 1203 continue; 1204 count = 0; 1205 for (j = 0; j < AP_DEVICES; j++) { 1206 if (!ap_test_config_card_id(j)) 1207 continue; 1208 qid = AP_MKQID(j, i); 1209 rc = ap_query_queue(qid, &queue_depth, &device_type); 1210 if (rc) 1211 continue; 1212 count++; 1213 } 1214 if (count > max_count) { 1215 max_count = count; 1216 best_domain = i; 1217 } 1218 } 1219 if (best_domain >= 0){ 1220 ap_domain_index = best_domain; 1221 return 0; 1222 } 1223 return -ENODEV; 1224 } 1225 1226 /** 1227 * ap_probe_device_type(): Find the device type of an AP. 1228 * @ap_dev: pointer to the AP device. 1229 * 1230 * Find the device type if query queue returned a device type of 0. 1231 */ 1232 static int ap_probe_device_type(struct ap_device *ap_dev) 1233 { 1234 static unsigned char msg[] = { 1235 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00, 1236 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1237 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00, 1238 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1239 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50, 1240 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01, 1241 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00, 1242 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00, 1243 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1244 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00, 1245 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1246 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00, 1247 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00, 1248 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1249 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00, 1250 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1251 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1252 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1253 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1254 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1255 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1256 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00, 1257 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 1258 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00, 1259 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20, 1260 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53, 1261 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22, 1262 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1263 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88, 1264 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66, 1265 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44, 1266 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22, 1267 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00, 1268 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77, 1269 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00, 1270 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00, 1271 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01, 1272 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c, 1273 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68, 1274 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66, 1275 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0, 1276 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8, 1277 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04, 1278 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57, 1279 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d, 1280 }; 1281 struct ap_queue_status status; 1282 unsigned long long psmid; 1283 char *reply; 1284 int rc, i; 1285 1286 reply = (void *) get_zeroed_page(GFP_KERNEL); 1287 if (!reply) { 1288 rc = -ENOMEM; 1289 goto out; 1290 } 1291 1292 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL, 1293 msg, sizeof(msg), 0); 1294 if (status.response_code != AP_RESPONSE_NORMAL) { 1295 rc = -ENODEV; 1296 goto out_free; 1297 } 1298 1299 /* Wait for the test message to complete. */ 1300 for (i = 0; i < 6; i++) { 1301 mdelay(300); 1302 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096); 1303 if (status.response_code == AP_RESPONSE_NORMAL && 1304 psmid == 0x0102030405060708ULL) 1305 break; 1306 } 1307 if (i < 6) { 1308 /* Got an answer. */ 1309 if (reply[0] == 0x00 && reply[1] == 0x86) 1310 ap_dev->device_type = AP_DEVICE_TYPE_PCICC; 1311 else 1312 ap_dev->device_type = AP_DEVICE_TYPE_PCICA; 1313 rc = 0; 1314 } else 1315 rc = -ENODEV; 1316 1317 out_free: 1318 free_page((unsigned long) reply); 1319 out: 1320 return rc; 1321 } 1322 1323 static void ap_interrupt_handler(struct airq_struct *airq) 1324 { 1325 inc_irq_stat(IRQIO_APB); 1326 tasklet_schedule(&ap_tasklet); 1327 } 1328 1329 /** 1330 * __ap_scan_bus(): Scan the AP bus. 1331 * @dev: Pointer to device 1332 * @data: Pointer to data 1333 * 1334 * Scan the AP bus for new devices. 1335 */ 1336 static int __ap_scan_bus(struct device *dev, void *data) 1337 { 1338 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data; 1339 } 1340 1341 static void ap_device_release(struct device *dev) 1342 { 1343 struct ap_device *ap_dev = to_ap_dev(dev); 1344 1345 kfree(ap_dev); 1346 } 1347 1348 static void ap_scan_bus(struct work_struct *unused) 1349 { 1350 struct ap_device *ap_dev; 1351 struct device *dev; 1352 ap_qid_t qid; 1353 int queue_depth, device_type; 1354 unsigned int device_functions; 1355 int rc, i; 1356 1357 ap_query_configuration(); 1358 if (ap_select_domain() != 0) { 1359 return; 1360 } 1361 for (i = 0; i < AP_DEVICES; i++) { 1362 qid = AP_MKQID(i, ap_domain_index); 1363 dev = bus_find_device(&ap_bus_type, NULL, 1364 (void *)(unsigned long)qid, 1365 __ap_scan_bus); 1366 if (ap_test_config_card_id(i)) 1367 rc = ap_query_queue(qid, &queue_depth, &device_type); 1368 else 1369 rc = -ENODEV; 1370 if (dev) { 1371 if (rc == -EBUSY) { 1372 set_current_state(TASK_UNINTERRUPTIBLE); 1373 schedule_timeout(AP_RESET_TIMEOUT); 1374 rc = ap_query_queue(qid, &queue_depth, 1375 &device_type); 1376 } 1377 ap_dev = to_ap_dev(dev); 1378 spin_lock_bh(&ap_dev->lock); 1379 if (rc || ap_dev->unregistered) { 1380 spin_unlock_bh(&ap_dev->lock); 1381 if (ap_dev->unregistered) 1382 i--; 1383 device_unregister(dev); 1384 put_device(dev); 1385 continue; 1386 } 1387 spin_unlock_bh(&ap_dev->lock); 1388 put_device(dev); 1389 continue; 1390 } 1391 if (rc) 1392 continue; 1393 rc = ap_init_queue(qid); 1394 if (rc) 1395 continue; 1396 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL); 1397 if (!ap_dev) 1398 break; 1399 ap_dev->qid = qid; 1400 ap_dev->queue_depth = queue_depth; 1401 ap_dev->unregistered = 1; 1402 spin_lock_init(&ap_dev->lock); 1403 INIT_LIST_HEAD(&ap_dev->pendingq); 1404 INIT_LIST_HEAD(&ap_dev->requestq); 1405 INIT_LIST_HEAD(&ap_dev->list); 1406 setup_timer(&ap_dev->timeout, ap_request_timeout, 1407 (unsigned long) ap_dev); 1408 switch (device_type) { 1409 case 0: 1410 /* device type probing for old cards */ 1411 if (ap_probe_device_type(ap_dev)) { 1412 kfree(ap_dev); 1413 continue; 1414 } 1415 break; 1416 default: 1417 ap_dev->device_type = device_type; 1418 } 1419 1420 rc = ap_query_functions(qid, &device_functions); 1421 if (!rc) 1422 ap_dev->functions = device_functions; 1423 else 1424 ap_dev->functions = 0u; 1425 1426 ap_dev->device.bus = &ap_bus_type; 1427 ap_dev->device.parent = ap_root_device; 1428 if (dev_set_name(&ap_dev->device, "card%02x", 1429 AP_QID_DEVICE(ap_dev->qid))) { 1430 kfree(ap_dev); 1431 continue; 1432 } 1433 ap_dev->device.release = ap_device_release; 1434 rc = device_register(&ap_dev->device); 1435 if (rc) { 1436 put_device(&ap_dev->device); 1437 continue; 1438 } 1439 /* Add device attributes. */ 1440 rc = sysfs_create_group(&ap_dev->device.kobj, 1441 &ap_dev_attr_group); 1442 if (!rc) { 1443 spin_lock_bh(&ap_dev->lock); 1444 ap_dev->unregistered = 0; 1445 spin_unlock_bh(&ap_dev->lock); 1446 } 1447 else 1448 device_unregister(&ap_dev->device); 1449 } 1450 } 1451 1452 static void 1453 ap_config_timeout(unsigned long ptr) 1454 { 1455 queue_work(ap_work_queue, &ap_config_work); 1456 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1457 add_timer(&ap_config_timer); 1458 } 1459 1460 /** 1461 * __ap_schedule_poll_timer(): Schedule poll timer. 1462 * 1463 * Set up the timer to run the poll tasklet 1464 */ 1465 static inline void __ap_schedule_poll_timer(void) 1466 { 1467 ktime_t hr_time; 1468 1469 spin_lock_bh(&ap_poll_timer_lock); 1470 if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag) 1471 goto out; 1472 if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) { 1473 hr_time = ktime_set(0, poll_timeout); 1474 hrtimer_forward_now(&ap_poll_timer, hr_time); 1475 hrtimer_restart(&ap_poll_timer); 1476 } 1477 out: 1478 spin_unlock_bh(&ap_poll_timer_lock); 1479 } 1480 1481 /** 1482 * ap_schedule_poll_timer(): Schedule poll timer. 1483 * 1484 * Set up the timer to run the poll tasklet 1485 */ 1486 static inline void ap_schedule_poll_timer(void) 1487 { 1488 if (ap_using_interrupts()) 1489 return; 1490 __ap_schedule_poll_timer(); 1491 } 1492 1493 /** 1494 * ap_poll_read(): Receive pending reply messages from an AP device. 1495 * @ap_dev: pointer to the AP device 1496 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1497 * required, bit 2^1 is set if the poll timer needs to get armed 1498 * 1499 * Returns 0 if the device is still present, -ENODEV if not. 1500 */ 1501 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags) 1502 { 1503 struct ap_queue_status status; 1504 struct ap_message *ap_msg; 1505 1506 if (ap_dev->queue_count <= 0) 1507 return 0; 1508 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid, 1509 ap_dev->reply->message, ap_dev->reply->length); 1510 switch (status.response_code) { 1511 case AP_RESPONSE_NORMAL: 1512 atomic_dec(&ap_poll_requests); 1513 ap_decrease_queue_count(ap_dev); 1514 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) { 1515 if (ap_msg->psmid != ap_dev->reply->psmid) 1516 continue; 1517 list_del_init(&ap_msg->list); 1518 ap_dev->pendingq_count--; 1519 ap_msg->receive(ap_dev, ap_msg, ap_dev->reply); 1520 break; 1521 } 1522 if (ap_dev->queue_count > 0) 1523 *flags |= 1; 1524 break; 1525 case AP_RESPONSE_NO_PENDING_REPLY: 1526 if (status.queue_empty) { 1527 /* The card shouldn't forget requests but who knows. */ 1528 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1529 ap_dev->queue_count = 0; 1530 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1531 ap_dev->requestq_count += ap_dev->pendingq_count; 1532 ap_dev->pendingq_count = 0; 1533 } else 1534 *flags |= 2; 1535 break; 1536 default: 1537 return -ENODEV; 1538 } 1539 return 0; 1540 } 1541 1542 /** 1543 * ap_poll_write(): Send messages from the request queue to an AP device. 1544 * @ap_dev: pointer to the AP device 1545 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1546 * required, bit 2^1 is set if the poll timer needs to get armed 1547 * 1548 * Returns 0 if the device is still present, -ENODEV if not. 1549 */ 1550 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags) 1551 { 1552 struct ap_queue_status status; 1553 struct ap_message *ap_msg; 1554 1555 if (ap_dev->requestq_count <= 0 || 1556 ap_dev->queue_count >= ap_dev->queue_depth) 1557 return 0; 1558 /* Start the next request on the queue. */ 1559 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list); 1560 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1561 ap_msg->message, ap_msg->length, ap_msg->special); 1562 switch (status.response_code) { 1563 case AP_RESPONSE_NORMAL: 1564 atomic_inc(&ap_poll_requests); 1565 ap_increase_queue_count(ap_dev); 1566 list_move_tail(&ap_msg->list, &ap_dev->pendingq); 1567 ap_dev->requestq_count--; 1568 ap_dev->pendingq_count++; 1569 if (ap_dev->queue_count < ap_dev->queue_depth && 1570 ap_dev->requestq_count > 0) 1571 *flags |= 1; 1572 *flags |= 2; 1573 break; 1574 case AP_RESPONSE_RESET_IN_PROGRESS: 1575 __ap_schedule_poll_timer(); 1576 case AP_RESPONSE_Q_FULL: 1577 *flags |= 2; 1578 break; 1579 case AP_RESPONSE_MESSAGE_TOO_BIG: 1580 case AP_RESPONSE_REQ_FAC_NOT_INST: 1581 return -EINVAL; 1582 default: 1583 return -ENODEV; 1584 } 1585 return 0; 1586 } 1587 1588 /** 1589 * ap_poll_queue(): Poll AP device for pending replies and send new messages. 1590 * @ap_dev: pointer to the bus device 1591 * @flags: pointer to control flags, bit 2^0 is set if another poll is 1592 * required, bit 2^1 is set if the poll timer needs to get armed 1593 * 1594 * Poll AP device for pending replies and send new messages. If either 1595 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device. 1596 * Returns 0. 1597 */ 1598 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags) 1599 { 1600 int rc; 1601 1602 rc = ap_poll_read(ap_dev, flags); 1603 if (rc) 1604 return rc; 1605 return ap_poll_write(ap_dev, flags); 1606 } 1607 1608 /** 1609 * __ap_queue_message(): Queue a message to a device. 1610 * @ap_dev: pointer to the AP device 1611 * @ap_msg: the message to be queued 1612 * 1613 * Queue a message to a device. Returns 0 if successful. 1614 */ 1615 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1616 { 1617 struct ap_queue_status status; 1618 1619 if (list_empty(&ap_dev->requestq) && 1620 ap_dev->queue_count < ap_dev->queue_depth) { 1621 status = __ap_send(ap_dev->qid, ap_msg->psmid, 1622 ap_msg->message, ap_msg->length, 1623 ap_msg->special); 1624 switch (status.response_code) { 1625 case AP_RESPONSE_NORMAL: 1626 list_add_tail(&ap_msg->list, &ap_dev->pendingq); 1627 atomic_inc(&ap_poll_requests); 1628 ap_dev->pendingq_count++; 1629 ap_increase_queue_count(ap_dev); 1630 ap_dev->total_request_count++; 1631 break; 1632 case AP_RESPONSE_Q_FULL: 1633 case AP_RESPONSE_RESET_IN_PROGRESS: 1634 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1635 ap_dev->requestq_count++; 1636 ap_dev->total_request_count++; 1637 return -EBUSY; 1638 case AP_RESPONSE_REQ_FAC_NOT_INST: 1639 case AP_RESPONSE_MESSAGE_TOO_BIG: 1640 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL)); 1641 return -EINVAL; 1642 default: /* Device is gone. */ 1643 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1644 return -ENODEV; 1645 } 1646 } else { 1647 list_add_tail(&ap_msg->list, &ap_dev->requestq); 1648 ap_dev->requestq_count++; 1649 ap_dev->total_request_count++; 1650 return -EBUSY; 1651 } 1652 ap_schedule_poll_timer(); 1653 return 0; 1654 } 1655 1656 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1657 { 1658 unsigned long flags; 1659 int rc; 1660 1661 /* For asynchronous message handling a valid receive-callback 1662 * is required. */ 1663 BUG_ON(!ap_msg->receive); 1664 1665 spin_lock_bh(&ap_dev->lock); 1666 if (!ap_dev->unregistered) { 1667 /* Make room on the queue by polling for finished requests. */ 1668 rc = ap_poll_queue(ap_dev, &flags); 1669 if (!rc) 1670 rc = __ap_queue_message(ap_dev, ap_msg); 1671 if (!rc) 1672 wake_up(&ap_poll_wait); 1673 if (rc == -ENODEV) 1674 ap_dev->unregistered = 1; 1675 } else { 1676 ap_msg->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV)); 1677 rc = -ENODEV; 1678 } 1679 spin_unlock_bh(&ap_dev->lock); 1680 if (rc == -ENODEV) 1681 device_unregister(&ap_dev->device); 1682 } 1683 EXPORT_SYMBOL(ap_queue_message); 1684 1685 /** 1686 * ap_cancel_message(): Cancel a crypto request. 1687 * @ap_dev: The AP device that has the message queued 1688 * @ap_msg: The message that is to be removed 1689 * 1690 * Cancel a crypto request. This is done by removing the request 1691 * from the device pending or request queue. Note that the 1692 * request stays on the AP queue. When it finishes the message 1693 * reply will be discarded because the psmid can't be found. 1694 */ 1695 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg) 1696 { 1697 struct ap_message *tmp; 1698 1699 spin_lock_bh(&ap_dev->lock); 1700 if (!list_empty(&ap_msg->list)) { 1701 list_for_each_entry(tmp, &ap_dev->pendingq, list) 1702 if (tmp->psmid == ap_msg->psmid) { 1703 ap_dev->pendingq_count--; 1704 goto found; 1705 } 1706 ap_dev->requestq_count--; 1707 found: 1708 list_del_init(&ap_msg->list); 1709 } 1710 spin_unlock_bh(&ap_dev->lock); 1711 } 1712 EXPORT_SYMBOL(ap_cancel_message); 1713 1714 /** 1715 * ap_poll_timeout(): AP receive polling for finished AP requests. 1716 * @unused: Unused pointer. 1717 * 1718 * Schedules the AP tasklet using a high resolution timer. 1719 */ 1720 static enum hrtimer_restart ap_poll_timeout(struct hrtimer *unused) 1721 { 1722 tasklet_schedule(&ap_tasklet); 1723 return HRTIMER_NORESTART; 1724 } 1725 1726 /** 1727 * ap_reset(): Reset a not responding AP device. 1728 * @ap_dev: Pointer to the AP device 1729 * 1730 * Reset a not responding AP device and move all requests from the 1731 * pending queue to the request queue. 1732 */ 1733 static void ap_reset(struct ap_device *ap_dev) 1734 { 1735 int rc; 1736 1737 ap_dev->reset = AP_RESET_IGNORE; 1738 atomic_sub(ap_dev->queue_count, &ap_poll_requests); 1739 ap_dev->queue_count = 0; 1740 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq); 1741 ap_dev->requestq_count += ap_dev->pendingq_count; 1742 ap_dev->pendingq_count = 0; 1743 rc = ap_init_queue(ap_dev->qid); 1744 if (rc == -ENODEV) 1745 ap_dev->unregistered = 1; 1746 else 1747 __ap_schedule_poll_timer(); 1748 } 1749 1750 static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) 1751 { 1752 if (!ap_dev->unregistered) { 1753 if (ap_poll_queue(ap_dev, flags)) 1754 ap_dev->unregistered = 1; 1755 if (ap_dev->reset == AP_RESET_DO) 1756 ap_reset(ap_dev); 1757 } 1758 return 0; 1759 } 1760 1761 /** 1762 * ap_poll_all(): Poll all AP devices. 1763 * @dummy: Unused variable 1764 * 1765 * Poll all AP devices on the bus in a round robin fashion. Continue 1766 * polling until bit 2^0 of the control flags is not set. If bit 2^1 1767 * of the control flags has been set arm the poll timer. 1768 */ 1769 static void ap_poll_all(unsigned long dummy) 1770 { 1771 unsigned long flags; 1772 struct ap_device *ap_dev; 1773 1774 /* Reset the indicator if interrupts are used. Thus new interrupts can 1775 * be received. Doing it in the beginning of the tasklet is therefor 1776 * important that no requests on any AP get lost. 1777 */ 1778 if (ap_using_interrupts()) 1779 xchg(ap_airq.lsi_ptr, 0); 1780 do { 1781 flags = 0; 1782 spin_lock(&ap_device_list_lock); 1783 list_for_each_entry(ap_dev, &ap_device_list, list) { 1784 spin_lock(&ap_dev->lock); 1785 __ap_poll_device(ap_dev, &flags); 1786 spin_unlock(&ap_dev->lock); 1787 } 1788 spin_unlock(&ap_device_list_lock); 1789 } while (flags & 1); 1790 if (flags & 2) 1791 ap_schedule_poll_timer(); 1792 } 1793 1794 /** 1795 * ap_poll_thread(): Thread that polls for finished requests. 1796 * @data: Unused pointer 1797 * 1798 * AP bus poll thread. The purpose of this thread is to poll for 1799 * finished requests in a loop if there is a "free" cpu - that is 1800 * a cpu that doesn't have anything better to do. The polling stops 1801 * as soon as there is another task or if all messages have been 1802 * delivered. 1803 */ 1804 static int ap_poll_thread(void *data) 1805 { 1806 DECLARE_WAITQUEUE(wait, current); 1807 unsigned long flags; 1808 int requests; 1809 struct ap_device *ap_dev; 1810 1811 set_user_nice(current, MAX_NICE); 1812 while (1) { 1813 if (ap_suspend_flag) 1814 return 0; 1815 if (need_resched()) { 1816 schedule(); 1817 continue; 1818 } 1819 add_wait_queue(&ap_poll_wait, &wait); 1820 set_current_state(TASK_INTERRUPTIBLE); 1821 if (kthread_should_stop()) 1822 break; 1823 requests = atomic_read(&ap_poll_requests); 1824 if (requests <= 0) 1825 schedule(); 1826 set_current_state(TASK_RUNNING); 1827 remove_wait_queue(&ap_poll_wait, &wait); 1828 1829 flags = 0; 1830 spin_lock_bh(&ap_device_list_lock); 1831 list_for_each_entry(ap_dev, &ap_device_list, list) { 1832 spin_lock(&ap_dev->lock); 1833 __ap_poll_device(ap_dev, &flags); 1834 spin_unlock(&ap_dev->lock); 1835 } 1836 spin_unlock_bh(&ap_device_list_lock); 1837 } 1838 set_current_state(TASK_RUNNING); 1839 remove_wait_queue(&ap_poll_wait, &wait); 1840 return 0; 1841 } 1842 1843 static int ap_poll_thread_start(void) 1844 { 1845 int rc; 1846 1847 if (ap_using_interrupts() || ap_suspend_flag) 1848 return 0; 1849 mutex_lock(&ap_poll_thread_mutex); 1850 if (!ap_poll_kthread) { 1851 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); 1852 rc = PTR_RET(ap_poll_kthread); 1853 if (rc) 1854 ap_poll_kthread = NULL; 1855 } 1856 else 1857 rc = 0; 1858 mutex_unlock(&ap_poll_thread_mutex); 1859 return rc; 1860 } 1861 1862 static void ap_poll_thread_stop(void) 1863 { 1864 mutex_lock(&ap_poll_thread_mutex); 1865 if (ap_poll_kthread) { 1866 kthread_stop(ap_poll_kthread); 1867 ap_poll_kthread = NULL; 1868 } 1869 mutex_unlock(&ap_poll_thread_mutex); 1870 } 1871 1872 /** 1873 * ap_request_timeout(): Handling of request timeouts 1874 * @data: Holds the AP device. 1875 * 1876 * Handles request timeouts. 1877 */ 1878 static void ap_request_timeout(unsigned long data) 1879 { 1880 struct ap_device *ap_dev = (struct ap_device *) data; 1881 1882 if (ap_dev->reset == AP_RESET_ARMED) { 1883 ap_dev->reset = AP_RESET_DO; 1884 1885 if (ap_using_interrupts()) 1886 tasklet_schedule(&ap_tasklet); 1887 } 1888 } 1889 1890 static void ap_reset_domain(void) 1891 { 1892 int i; 1893 1894 if (ap_domain_index != -1) 1895 for (i = 0; i < AP_DEVICES; i++) 1896 ap_reset_queue(AP_MKQID(i, ap_domain_index)); 1897 } 1898 1899 static void ap_reset_all(void) 1900 { 1901 int i, j; 1902 1903 for (i = 0; i < AP_DOMAINS; i++) 1904 for (j = 0; j < AP_DEVICES; j++) 1905 ap_reset_queue(AP_MKQID(j, i)); 1906 } 1907 1908 static struct reset_call ap_reset_call = { 1909 .fn = ap_reset_all, 1910 }; 1911 1912 /** 1913 * ap_module_init(): The module initialization code. 1914 * 1915 * Initializes the module. 1916 */ 1917 int __init ap_module_init(void) 1918 { 1919 int rc, i; 1920 1921 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { 1922 pr_warning("%d is not a valid cryptographic domain\n", 1923 ap_domain_index); 1924 return -EINVAL; 1925 } 1926 /* In resume callback we need to know if the user had set the domain. 1927 * If so, we can not just reset it. 1928 */ 1929 if (ap_domain_index >= 0) 1930 user_set_domain = 1; 1931 1932 if (ap_instructions_available() != 0) { 1933 pr_warning("The hardware system does not support " 1934 "AP instructions\n"); 1935 return -ENODEV; 1936 } 1937 if (ap_interrupts_available()) { 1938 rc = register_adapter_interrupt(&ap_airq); 1939 ap_airq_flag = (rc == 0); 1940 } 1941 1942 register_reset_call(&ap_reset_call); 1943 1944 /* Create /sys/bus/ap. */ 1945 rc = bus_register(&ap_bus_type); 1946 if (rc) 1947 goto out; 1948 for (i = 0; ap_bus_attrs[i]; i++) { 1949 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]); 1950 if (rc) 1951 goto out_bus; 1952 } 1953 1954 /* Create /sys/devices/ap. */ 1955 ap_root_device = root_device_register("ap"); 1956 rc = PTR_RET(ap_root_device); 1957 if (rc) 1958 goto out_bus; 1959 1960 ap_work_queue = create_singlethread_workqueue("kapwork"); 1961 if (!ap_work_queue) { 1962 rc = -ENOMEM; 1963 goto out_root; 1964 } 1965 1966 ap_query_configuration(); 1967 if (ap_select_domain() == 0) 1968 ap_scan_bus(NULL); 1969 1970 /* Setup the AP bus rescan timer. */ 1971 init_timer(&ap_config_timer); 1972 ap_config_timer.function = ap_config_timeout; 1973 ap_config_timer.data = 0; 1974 ap_config_timer.expires = jiffies + ap_config_time * HZ; 1975 add_timer(&ap_config_timer); 1976 1977 /* Setup the high resultion poll timer. 1978 * If we are running under z/VM adjust polling to z/VM polling rate. 1979 */ 1980 if (MACHINE_IS_VM) 1981 poll_timeout = 1500000; 1982 spin_lock_init(&ap_poll_timer_lock); 1983 hrtimer_init(&ap_poll_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); 1984 ap_poll_timer.function = ap_poll_timeout; 1985 1986 /* Start the low priority AP bus poll thread. */ 1987 if (ap_thread_flag) { 1988 rc = ap_poll_thread_start(); 1989 if (rc) 1990 goto out_work; 1991 } 1992 1993 return 0; 1994 1995 out_work: 1996 del_timer_sync(&ap_config_timer); 1997 hrtimer_cancel(&ap_poll_timer); 1998 destroy_workqueue(ap_work_queue); 1999 out_root: 2000 root_device_unregister(ap_root_device); 2001 out_bus: 2002 while (i--) 2003 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2004 bus_unregister(&ap_bus_type); 2005 out: 2006 unregister_reset_call(&ap_reset_call); 2007 if (ap_using_interrupts()) 2008 unregister_adapter_interrupt(&ap_airq); 2009 return rc; 2010 } 2011 2012 static int __ap_match_all(struct device *dev, void *data) 2013 { 2014 return 1; 2015 } 2016 2017 /** 2018 * ap_modules_exit(): The module termination code 2019 * 2020 * Terminates the module. 2021 */ 2022 void ap_module_exit(void) 2023 { 2024 int i; 2025 struct device *dev; 2026 2027 ap_reset_domain(); 2028 ap_poll_thread_stop(); 2029 del_timer_sync(&ap_config_timer); 2030 hrtimer_cancel(&ap_poll_timer); 2031 destroy_workqueue(ap_work_queue); 2032 tasklet_kill(&ap_tasklet); 2033 root_device_unregister(ap_root_device); 2034 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL, 2035 __ap_match_all))) 2036 { 2037 device_unregister(dev); 2038 put_device(dev); 2039 } 2040 for (i = 0; ap_bus_attrs[i]; i++) 2041 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 2042 bus_unregister(&ap_bus_type); 2043 unregister_reset_call(&ap_reset_call); 2044 if (ap_using_interrupts()) 2045 unregister_adapter_interrupt(&ap_airq); 2046 } 2047 2048 module_init(ap_module_init); 2049 module_exit(ap_module_exit); 2050