1 /* 2 * core function to access sclp interface 3 * 4 * Copyright IBM Corp. 1999, 2009 5 * 6 * Author(s): Martin Peschke <mpeschke@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 8 */ 9 10 #include <linux/kernel_stat.h> 11 #include <linux/module.h> 12 #include <linux/err.h> 13 #include <linux/spinlock.h> 14 #include <linux/interrupt.h> 15 #include <linux/timer.h> 16 #include <linux/reboot.h> 17 #include <linux/jiffies.h> 18 #include <linux/init.h> 19 #include <linux/suspend.h> 20 #include <linux/completion.h> 21 #include <linux/platform_device.h> 22 #include <asm/types.h> 23 #include <asm/irq.h> 24 25 #include "sclp.h" 26 27 #define SCLP_HEADER "sclp: " 28 29 /* Lock to protect internal data consistency. */ 30 static DEFINE_SPINLOCK(sclp_lock); 31 32 /* Mask of events that we can send to the sclp interface. */ 33 static sccb_mask_t sclp_receive_mask; 34 35 /* Mask of events that we can receive from the sclp interface. */ 36 static sccb_mask_t sclp_send_mask; 37 38 /* List of registered event listeners and senders. */ 39 static struct list_head sclp_reg_list; 40 41 /* List of queued requests. */ 42 static struct list_head sclp_req_queue; 43 44 /* Data for read and and init requests. */ 45 static struct sclp_req sclp_read_req; 46 static struct sclp_req sclp_init_req; 47 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 48 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 49 50 /* Suspend request */ 51 static DECLARE_COMPLETION(sclp_request_queue_flushed); 52 53 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ 54 int sclp_console_pages = SCLP_CONSOLE_PAGES; 55 /* Flag to indicate if buffer pages are dropped on buffer full condition */ 56 int sclp_console_drop = 0; 57 /* Number of times the console dropped buffer pages */ 58 unsigned long sclp_console_full; 59 60 static void sclp_suspend_req_cb(struct sclp_req *req, void *data) 61 { 62 complete(&sclp_request_queue_flushed); 63 } 64 65 static int __init sclp_setup_console_pages(char *str) 66 { 67 int pages, rc; 68 69 rc = kstrtoint(str, 0, &pages); 70 if (!rc && pages >= SCLP_CONSOLE_PAGES) 71 sclp_console_pages = pages; 72 return 1; 73 } 74 75 __setup("sclp_con_pages=", sclp_setup_console_pages); 76 77 static int __init sclp_setup_console_drop(char *str) 78 { 79 int drop, rc; 80 81 rc = kstrtoint(str, 0, &drop); 82 if (!rc && drop) 83 sclp_console_drop = 1; 84 return 1; 85 } 86 87 __setup("sclp_con_drop=", sclp_setup_console_drop); 88 89 static struct sclp_req sclp_suspend_req; 90 91 /* Timer for request retries. */ 92 static struct timer_list sclp_request_timer; 93 94 /* Internal state: is the driver initialized? */ 95 static volatile enum sclp_init_state_t { 96 sclp_init_state_uninitialized, 97 sclp_init_state_initializing, 98 sclp_init_state_initialized 99 } sclp_init_state = sclp_init_state_uninitialized; 100 101 /* Internal state: is a request active at the sclp? */ 102 static volatile enum sclp_running_state_t { 103 sclp_running_state_idle, 104 sclp_running_state_running, 105 sclp_running_state_reset_pending 106 } sclp_running_state = sclp_running_state_idle; 107 108 /* Internal state: is a read request pending? */ 109 static volatile enum sclp_reading_state_t { 110 sclp_reading_state_idle, 111 sclp_reading_state_reading 112 } sclp_reading_state = sclp_reading_state_idle; 113 114 /* Internal state: is the driver currently serving requests? */ 115 static volatile enum sclp_activation_state_t { 116 sclp_activation_state_active, 117 sclp_activation_state_deactivating, 118 sclp_activation_state_inactive, 119 sclp_activation_state_activating 120 } sclp_activation_state = sclp_activation_state_active; 121 122 /* Internal state: is an init mask request pending? */ 123 static volatile enum sclp_mask_state_t { 124 sclp_mask_state_idle, 125 sclp_mask_state_initializing 126 } sclp_mask_state = sclp_mask_state_idle; 127 128 /* Internal state: is the driver suspended? */ 129 static enum sclp_suspend_state_t { 130 sclp_suspend_state_running, 131 sclp_suspend_state_suspended, 132 } sclp_suspend_state = sclp_suspend_state_running; 133 134 /* Maximum retry counts */ 135 #define SCLP_INIT_RETRY 3 136 #define SCLP_MASK_RETRY 3 137 138 /* Timeout intervals in seconds.*/ 139 #define SCLP_BUSY_INTERVAL 10 140 #define SCLP_RETRY_INTERVAL 30 141 142 static void sclp_process_queue(void); 143 static void __sclp_make_read_req(void); 144 static int sclp_init_mask(int calculate); 145 static int sclp_init(void); 146 147 /* Perform service call. Return 0 on success, non-zero otherwise. */ 148 int 149 sclp_service_call(sclp_cmdw_t command, void *sccb) 150 { 151 int cc = 4; /* Initialize for program check handling */ 152 153 asm volatile( 154 "0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */ 155 "1: ipm %0\n" 156 " srl %0,28\n" 157 "2:\n" 158 EX_TABLE(0b, 2b) 159 EX_TABLE(1b, 2b) 160 : "+&d" (cc) : "d" (command), "a" (__pa(sccb)) 161 : "cc", "memory"); 162 if (cc == 4) 163 return -EINVAL; 164 if (cc == 3) 165 return -EIO; 166 if (cc == 2) 167 return -EBUSY; 168 return 0; 169 } 170 171 172 static void 173 __sclp_queue_read_req(void) 174 { 175 if (sclp_reading_state == sclp_reading_state_idle) { 176 sclp_reading_state = sclp_reading_state_reading; 177 __sclp_make_read_req(); 178 /* Add request to head of queue */ 179 list_add(&sclp_read_req.list, &sclp_req_queue); 180 } 181 } 182 183 /* Set up request retry timer. Called while sclp_lock is locked. */ 184 static inline void 185 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), 186 unsigned long data) 187 { 188 del_timer(&sclp_request_timer); 189 sclp_request_timer.function = function; 190 sclp_request_timer.data = data; 191 sclp_request_timer.expires = jiffies + time; 192 add_timer(&sclp_request_timer); 193 } 194 195 /* Request timeout handler. Restart the request queue. If DATA is non-zero, 196 * force restart of running request. */ 197 static void 198 sclp_request_timeout(unsigned long data) 199 { 200 unsigned long flags; 201 202 spin_lock_irqsave(&sclp_lock, flags); 203 if (data) { 204 if (sclp_running_state == sclp_running_state_running) { 205 /* Break running state and queue NOP read event request 206 * to get a defined interface state. */ 207 __sclp_queue_read_req(); 208 sclp_running_state = sclp_running_state_idle; 209 } 210 } else { 211 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, 212 sclp_request_timeout, 0); 213 } 214 spin_unlock_irqrestore(&sclp_lock, flags); 215 sclp_process_queue(); 216 } 217 218 /* Try to start a request. Return zero if the request was successfully 219 * started or if it will be started at a later time. Return non-zero otherwise. 220 * Called while sclp_lock is locked. */ 221 static int 222 __sclp_start_request(struct sclp_req *req) 223 { 224 int rc; 225 226 if (sclp_running_state != sclp_running_state_idle) 227 return 0; 228 del_timer(&sclp_request_timer); 229 rc = sclp_service_call(req->command, req->sccb); 230 req->start_count++; 231 232 if (rc == 0) { 233 /* Successfully started request */ 234 req->status = SCLP_REQ_RUNNING; 235 sclp_running_state = sclp_running_state_running; 236 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, 237 sclp_request_timeout, 1); 238 return 0; 239 } else if (rc == -EBUSY) { 240 /* Try again later */ 241 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, 242 sclp_request_timeout, 0); 243 return 0; 244 } 245 /* Request failed */ 246 req->status = SCLP_REQ_FAILED; 247 return rc; 248 } 249 250 /* Try to start queued requests. */ 251 static void 252 sclp_process_queue(void) 253 { 254 struct sclp_req *req; 255 int rc; 256 unsigned long flags; 257 258 spin_lock_irqsave(&sclp_lock, flags); 259 if (sclp_running_state != sclp_running_state_idle) { 260 spin_unlock_irqrestore(&sclp_lock, flags); 261 return; 262 } 263 del_timer(&sclp_request_timer); 264 while (!list_empty(&sclp_req_queue)) { 265 req = list_entry(sclp_req_queue.next, struct sclp_req, list); 266 if (!req->sccb) 267 goto do_post; 268 rc = __sclp_start_request(req); 269 if (rc == 0) 270 break; 271 /* Request failed */ 272 if (req->start_count > 1) { 273 /* Cannot abort already submitted request - could still 274 * be active at the SCLP */ 275 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, 276 sclp_request_timeout, 0); 277 break; 278 } 279 do_post: 280 /* Post-processing for aborted request */ 281 list_del(&req->list); 282 if (req->callback) { 283 spin_unlock_irqrestore(&sclp_lock, flags); 284 req->callback(req, req->callback_data); 285 spin_lock_irqsave(&sclp_lock, flags); 286 } 287 } 288 spin_unlock_irqrestore(&sclp_lock, flags); 289 } 290 291 static int __sclp_can_add_request(struct sclp_req *req) 292 { 293 if (req == &sclp_suspend_req || req == &sclp_init_req) 294 return 1; 295 if (sclp_suspend_state != sclp_suspend_state_running) 296 return 0; 297 if (sclp_init_state != sclp_init_state_initialized) 298 return 0; 299 if (sclp_activation_state != sclp_activation_state_active) 300 return 0; 301 return 1; 302 } 303 304 /* Queue a new request. Return zero on success, non-zero otherwise. */ 305 int 306 sclp_add_request(struct sclp_req *req) 307 { 308 unsigned long flags; 309 int rc; 310 311 spin_lock_irqsave(&sclp_lock, flags); 312 if (!__sclp_can_add_request(req)) { 313 spin_unlock_irqrestore(&sclp_lock, flags); 314 return -EIO; 315 } 316 req->status = SCLP_REQ_QUEUED; 317 req->start_count = 0; 318 list_add_tail(&req->list, &sclp_req_queue); 319 rc = 0; 320 /* Start if request is first in list */ 321 if (sclp_running_state == sclp_running_state_idle && 322 req->list.prev == &sclp_req_queue) { 323 if (!req->sccb) { 324 list_del(&req->list); 325 rc = -ENODATA; 326 goto out; 327 } 328 rc = __sclp_start_request(req); 329 if (rc) 330 list_del(&req->list); 331 } 332 out: 333 spin_unlock_irqrestore(&sclp_lock, flags); 334 return rc; 335 } 336 337 EXPORT_SYMBOL(sclp_add_request); 338 339 /* Dispatch events found in request buffer to registered listeners. Return 0 340 * if all events were dispatched, non-zero otherwise. */ 341 static int 342 sclp_dispatch_evbufs(struct sccb_header *sccb) 343 { 344 unsigned long flags; 345 struct evbuf_header *evbuf; 346 struct list_head *l; 347 struct sclp_register *reg; 348 int offset; 349 int rc; 350 351 spin_lock_irqsave(&sclp_lock, flags); 352 rc = 0; 353 for (offset = sizeof(struct sccb_header); offset < sccb->length; 354 offset += evbuf->length) { 355 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset); 356 /* Check for malformed hardware response */ 357 if (evbuf->length == 0) 358 break; 359 /* Search for event handler */ 360 reg = NULL; 361 list_for_each(l, &sclp_reg_list) { 362 reg = list_entry(l, struct sclp_register, list); 363 if (reg->receive_mask & (1 << (32 - evbuf->type))) 364 break; 365 else 366 reg = NULL; 367 } 368 if (reg && reg->receiver_fn) { 369 spin_unlock_irqrestore(&sclp_lock, flags); 370 reg->receiver_fn(evbuf); 371 spin_lock_irqsave(&sclp_lock, flags); 372 } else if (reg == NULL) 373 rc = -EOPNOTSUPP; 374 } 375 spin_unlock_irqrestore(&sclp_lock, flags); 376 return rc; 377 } 378 379 /* Read event data request callback. */ 380 static void 381 sclp_read_cb(struct sclp_req *req, void *data) 382 { 383 unsigned long flags; 384 struct sccb_header *sccb; 385 386 sccb = (struct sccb_header *) req->sccb; 387 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 || 388 sccb->response_code == 0x220)) 389 sclp_dispatch_evbufs(sccb); 390 spin_lock_irqsave(&sclp_lock, flags); 391 sclp_reading_state = sclp_reading_state_idle; 392 spin_unlock_irqrestore(&sclp_lock, flags); 393 } 394 395 /* Prepare read event data request. Called while sclp_lock is locked. */ 396 static void __sclp_make_read_req(void) 397 { 398 struct sccb_header *sccb; 399 400 sccb = (struct sccb_header *) sclp_read_sccb; 401 clear_page(sccb); 402 memset(&sclp_read_req, 0, sizeof(struct sclp_req)); 403 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; 404 sclp_read_req.status = SCLP_REQ_QUEUED; 405 sclp_read_req.start_count = 0; 406 sclp_read_req.callback = sclp_read_cb; 407 sclp_read_req.sccb = sccb; 408 sccb->length = PAGE_SIZE; 409 sccb->function_code = 0; 410 sccb->control_mask[2] = 0x80; 411 } 412 413 /* Search request list for request with matching sccb. Return request if found, 414 * NULL otherwise. Called while sclp_lock is locked. */ 415 static inline struct sclp_req * 416 __sclp_find_req(u32 sccb) 417 { 418 struct list_head *l; 419 struct sclp_req *req; 420 421 list_for_each(l, &sclp_req_queue) { 422 req = list_entry(l, struct sclp_req, list); 423 if (sccb == (u32) (addr_t) req->sccb) 424 return req; 425 } 426 return NULL; 427 } 428 429 /* Handler for external interruption. Perform request post-processing. 430 * Prepare read event data request if necessary. Start processing of next 431 * request on queue. */ 432 static void sclp_interrupt_handler(struct ext_code ext_code, 433 unsigned int param32, unsigned long param64) 434 { 435 struct sclp_req *req; 436 u32 finished_sccb; 437 u32 evbuf_pending; 438 439 inc_irq_stat(IRQEXT_SCP); 440 spin_lock(&sclp_lock); 441 finished_sccb = param32 & 0xfffffff8; 442 evbuf_pending = param32 & 0x3; 443 if (finished_sccb) { 444 del_timer(&sclp_request_timer); 445 sclp_running_state = sclp_running_state_reset_pending; 446 req = __sclp_find_req(finished_sccb); 447 if (req) { 448 /* Request post-processing */ 449 list_del(&req->list); 450 req->status = SCLP_REQ_DONE; 451 if (req->callback) { 452 spin_unlock(&sclp_lock); 453 req->callback(req, req->callback_data); 454 spin_lock(&sclp_lock); 455 } 456 } 457 sclp_running_state = sclp_running_state_idle; 458 } 459 if (evbuf_pending && 460 sclp_activation_state == sclp_activation_state_active) 461 __sclp_queue_read_req(); 462 spin_unlock(&sclp_lock); 463 sclp_process_queue(); 464 } 465 466 /* Convert interval in jiffies to TOD ticks. */ 467 static inline u64 468 sclp_tod_from_jiffies(unsigned long jiffies) 469 { 470 return (u64) (jiffies / HZ) << 32; 471 } 472 473 /* Wait until a currently running request finished. Note: while this function 474 * is running, no timers are served on the calling CPU. */ 475 void 476 sclp_sync_wait(void) 477 { 478 unsigned long long old_tick; 479 unsigned long flags; 480 unsigned long cr0, cr0_sync; 481 u64 timeout; 482 int irq_context; 483 484 /* We'll be disabling timer interrupts, so we need a custom timeout 485 * mechanism */ 486 timeout = 0; 487 if (timer_pending(&sclp_request_timer)) { 488 /* Get timeout TOD value */ 489 timeout = get_tod_clock_fast() + 490 sclp_tod_from_jiffies(sclp_request_timer.expires - 491 jiffies); 492 } 493 local_irq_save(flags); 494 /* Prevent bottom half from executing once we force interrupts open */ 495 irq_context = in_interrupt(); 496 if (!irq_context) 497 local_bh_disable(); 498 /* Enable service-signal interruption, disable timer interrupts */ 499 old_tick = local_tick_disable(); 500 trace_hardirqs_on(); 501 __ctl_store(cr0, 0, 0); 502 cr0_sync = cr0; 503 cr0_sync &= 0xffff00a0; 504 cr0_sync |= 0x00000200; 505 __ctl_load(cr0_sync, 0, 0); 506 __arch_local_irq_stosm(0x01); 507 /* Loop until driver state indicates finished request */ 508 while (sclp_running_state != sclp_running_state_idle) { 509 /* Check for expired request timer */ 510 if (timer_pending(&sclp_request_timer) && 511 get_tod_clock_fast() > timeout && 512 del_timer(&sclp_request_timer)) 513 sclp_request_timer.function(sclp_request_timer.data); 514 cpu_relax(); 515 } 516 local_irq_disable(); 517 __ctl_load(cr0, 0, 0); 518 if (!irq_context) 519 _local_bh_enable(); 520 local_tick_enable(old_tick); 521 local_irq_restore(flags); 522 } 523 EXPORT_SYMBOL(sclp_sync_wait); 524 525 /* Dispatch changes in send and receive mask to registered listeners. */ 526 static void 527 sclp_dispatch_state_change(void) 528 { 529 struct list_head *l; 530 struct sclp_register *reg; 531 unsigned long flags; 532 sccb_mask_t receive_mask; 533 sccb_mask_t send_mask; 534 535 do { 536 spin_lock_irqsave(&sclp_lock, flags); 537 reg = NULL; 538 list_for_each(l, &sclp_reg_list) { 539 reg = list_entry(l, struct sclp_register, list); 540 receive_mask = reg->send_mask & sclp_receive_mask; 541 send_mask = reg->receive_mask & sclp_send_mask; 542 if (reg->sclp_receive_mask != receive_mask || 543 reg->sclp_send_mask != send_mask) { 544 reg->sclp_receive_mask = receive_mask; 545 reg->sclp_send_mask = send_mask; 546 break; 547 } else 548 reg = NULL; 549 } 550 spin_unlock_irqrestore(&sclp_lock, flags); 551 if (reg && reg->state_change_fn) 552 reg->state_change_fn(reg); 553 } while (reg); 554 } 555 556 struct sclp_statechangebuf { 557 struct evbuf_header header; 558 u8 validity_sclp_active_facility_mask : 1; 559 u8 validity_sclp_receive_mask : 1; 560 u8 validity_sclp_send_mask : 1; 561 u8 validity_read_data_function_mask : 1; 562 u16 _zeros : 12; 563 u16 mask_length; 564 u64 sclp_active_facility_mask; 565 sccb_mask_t sclp_receive_mask; 566 sccb_mask_t sclp_send_mask; 567 u32 read_data_function_mask; 568 } __attribute__((packed)); 569 570 571 /* State change event callback. Inform listeners of changes. */ 572 static void 573 sclp_state_change_cb(struct evbuf_header *evbuf) 574 { 575 unsigned long flags; 576 struct sclp_statechangebuf *scbuf; 577 578 scbuf = (struct sclp_statechangebuf *) evbuf; 579 if (scbuf->mask_length != sizeof(sccb_mask_t)) 580 return; 581 spin_lock_irqsave(&sclp_lock, flags); 582 if (scbuf->validity_sclp_receive_mask) 583 sclp_receive_mask = scbuf->sclp_receive_mask; 584 if (scbuf->validity_sclp_send_mask) 585 sclp_send_mask = scbuf->sclp_send_mask; 586 spin_unlock_irqrestore(&sclp_lock, flags); 587 if (scbuf->validity_sclp_active_facility_mask) 588 sclp_facilities = scbuf->sclp_active_facility_mask; 589 sclp_dispatch_state_change(); 590 } 591 592 static struct sclp_register sclp_state_change_event = { 593 .receive_mask = EVTYP_STATECHANGE_MASK, 594 .receiver_fn = sclp_state_change_cb 595 }; 596 597 /* Calculate receive and send mask of currently registered listeners. 598 * Called while sclp_lock is locked. */ 599 static inline void 600 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask) 601 { 602 struct list_head *l; 603 struct sclp_register *t; 604 605 *receive_mask = 0; 606 *send_mask = 0; 607 list_for_each(l, &sclp_reg_list) { 608 t = list_entry(l, struct sclp_register, list); 609 *receive_mask |= t->receive_mask; 610 *send_mask |= t->send_mask; 611 } 612 } 613 614 /* Register event listener. Return 0 on success, non-zero otherwise. */ 615 int 616 sclp_register(struct sclp_register *reg) 617 { 618 unsigned long flags; 619 sccb_mask_t receive_mask; 620 sccb_mask_t send_mask; 621 int rc; 622 623 rc = sclp_init(); 624 if (rc) 625 return rc; 626 spin_lock_irqsave(&sclp_lock, flags); 627 /* Check event mask for collisions */ 628 __sclp_get_mask(&receive_mask, &send_mask); 629 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) { 630 spin_unlock_irqrestore(&sclp_lock, flags); 631 return -EBUSY; 632 } 633 /* Trigger initial state change callback */ 634 reg->sclp_receive_mask = 0; 635 reg->sclp_send_mask = 0; 636 reg->pm_event_posted = 0; 637 list_add(®->list, &sclp_reg_list); 638 spin_unlock_irqrestore(&sclp_lock, flags); 639 rc = sclp_init_mask(1); 640 if (rc) { 641 spin_lock_irqsave(&sclp_lock, flags); 642 list_del(®->list); 643 spin_unlock_irqrestore(&sclp_lock, flags); 644 } 645 return rc; 646 } 647 648 EXPORT_SYMBOL(sclp_register); 649 650 /* Unregister event listener. */ 651 void 652 sclp_unregister(struct sclp_register *reg) 653 { 654 unsigned long flags; 655 656 spin_lock_irqsave(&sclp_lock, flags); 657 list_del(®->list); 658 spin_unlock_irqrestore(&sclp_lock, flags); 659 sclp_init_mask(1); 660 } 661 662 EXPORT_SYMBOL(sclp_unregister); 663 664 /* Remove event buffers which are marked processed. Return the number of 665 * remaining event buffers. */ 666 int 667 sclp_remove_processed(struct sccb_header *sccb) 668 { 669 struct evbuf_header *evbuf; 670 int unprocessed; 671 u16 remaining; 672 673 evbuf = (struct evbuf_header *) (sccb + 1); 674 unprocessed = 0; 675 remaining = sccb->length - sizeof(struct sccb_header); 676 while (remaining > 0) { 677 remaining -= evbuf->length; 678 if (evbuf->flags & 0x80) { 679 sccb->length -= evbuf->length; 680 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length), 681 remaining); 682 } else { 683 unprocessed++; 684 evbuf = (struct evbuf_header *) 685 ((addr_t) evbuf + evbuf->length); 686 } 687 } 688 return unprocessed; 689 } 690 691 EXPORT_SYMBOL(sclp_remove_processed); 692 693 /* Prepare init mask request. Called while sclp_lock is locked. */ 694 static inline void 695 __sclp_make_init_req(u32 receive_mask, u32 send_mask) 696 { 697 struct init_sccb *sccb; 698 699 sccb = (struct init_sccb *) sclp_init_sccb; 700 clear_page(sccb); 701 memset(&sclp_init_req, 0, sizeof(struct sclp_req)); 702 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; 703 sclp_init_req.status = SCLP_REQ_FILLED; 704 sclp_init_req.start_count = 0; 705 sclp_init_req.callback = NULL; 706 sclp_init_req.callback_data = NULL; 707 sclp_init_req.sccb = sccb; 708 sccb->header.length = sizeof(struct init_sccb); 709 sccb->mask_length = sizeof(sccb_mask_t); 710 sccb->receive_mask = receive_mask; 711 sccb->send_mask = send_mask; 712 sccb->sclp_receive_mask = 0; 713 sccb->sclp_send_mask = 0; 714 } 715 716 /* Start init mask request. If calculate is non-zero, calculate the mask as 717 * requested by registered listeners. Use zero mask otherwise. Return 0 on 718 * success, non-zero otherwise. */ 719 static int 720 sclp_init_mask(int calculate) 721 { 722 unsigned long flags; 723 struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb; 724 sccb_mask_t receive_mask; 725 sccb_mask_t send_mask; 726 int retry; 727 int rc; 728 unsigned long wait; 729 730 spin_lock_irqsave(&sclp_lock, flags); 731 /* Check if interface is in appropriate state */ 732 if (sclp_mask_state != sclp_mask_state_idle) { 733 spin_unlock_irqrestore(&sclp_lock, flags); 734 return -EBUSY; 735 } 736 if (sclp_activation_state == sclp_activation_state_inactive) { 737 spin_unlock_irqrestore(&sclp_lock, flags); 738 return -EINVAL; 739 } 740 sclp_mask_state = sclp_mask_state_initializing; 741 /* Determine mask */ 742 if (calculate) 743 __sclp_get_mask(&receive_mask, &send_mask); 744 else { 745 receive_mask = 0; 746 send_mask = 0; 747 } 748 rc = -EIO; 749 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) { 750 /* Prepare request */ 751 __sclp_make_init_req(receive_mask, send_mask); 752 spin_unlock_irqrestore(&sclp_lock, flags); 753 if (sclp_add_request(&sclp_init_req)) { 754 /* Try again later */ 755 wait = jiffies + SCLP_BUSY_INTERVAL * HZ; 756 while (time_before(jiffies, wait)) 757 sclp_sync_wait(); 758 spin_lock_irqsave(&sclp_lock, flags); 759 continue; 760 } 761 while (sclp_init_req.status != SCLP_REQ_DONE && 762 sclp_init_req.status != SCLP_REQ_FAILED) 763 sclp_sync_wait(); 764 spin_lock_irqsave(&sclp_lock, flags); 765 if (sclp_init_req.status == SCLP_REQ_DONE && 766 sccb->header.response_code == 0x20) { 767 /* Successful request */ 768 if (calculate) { 769 sclp_receive_mask = sccb->sclp_receive_mask; 770 sclp_send_mask = sccb->sclp_send_mask; 771 } else { 772 sclp_receive_mask = 0; 773 sclp_send_mask = 0; 774 } 775 spin_unlock_irqrestore(&sclp_lock, flags); 776 sclp_dispatch_state_change(); 777 spin_lock_irqsave(&sclp_lock, flags); 778 rc = 0; 779 break; 780 } 781 } 782 sclp_mask_state = sclp_mask_state_idle; 783 spin_unlock_irqrestore(&sclp_lock, flags); 784 return rc; 785 } 786 787 /* Deactivate SCLP interface. On success, new requests will be rejected, 788 * events will no longer be dispatched. Return 0 on success, non-zero 789 * otherwise. */ 790 int 791 sclp_deactivate(void) 792 { 793 unsigned long flags; 794 int rc; 795 796 spin_lock_irqsave(&sclp_lock, flags); 797 /* Deactivate can only be called when active */ 798 if (sclp_activation_state != sclp_activation_state_active) { 799 spin_unlock_irqrestore(&sclp_lock, flags); 800 return -EINVAL; 801 } 802 sclp_activation_state = sclp_activation_state_deactivating; 803 spin_unlock_irqrestore(&sclp_lock, flags); 804 rc = sclp_init_mask(0); 805 spin_lock_irqsave(&sclp_lock, flags); 806 if (rc == 0) 807 sclp_activation_state = sclp_activation_state_inactive; 808 else 809 sclp_activation_state = sclp_activation_state_active; 810 spin_unlock_irqrestore(&sclp_lock, flags); 811 return rc; 812 } 813 814 EXPORT_SYMBOL(sclp_deactivate); 815 816 /* Reactivate SCLP interface after sclp_deactivate. On success, new 817 * requests will be accepted, events will be dispatched again. Return 0 on 818 * success, non-zero otherwise. */ 819 int 820 sclp_reactivate(void) 821 { 822 unsigned long flags; 823 int rc; 824 825 spin_lock_irqsave(&sclp_lock, flags); 826 /* Reactivate can only be called when inactive */ 827 if (sclp_activation_state != sclp_activation_state_inactive) { 828 spin_unlock_irqrestore(&sclp_lock, flags); 829 return -EINVAL; 830 } 831 sclp_activation_state = sclp_activation_state_activating; 832 spin_unlock_irqrestore(&sclp_lock, flags); 833 rc = sclp_init_mask(1); 834 spin_lock_irqsave(&sclp_lock, flags); 835 if (rc == 0) 836 sclp_activation_state = sclp_activation_state_active; 837 else 838 sclp_activation_state = sclp_activation_state_inactive; 839 spin_unlock_irqrestore(&sclp_lock, flags); 840 return rc; 841 } 842 843 EXPORT_SYMBOL(sclp_reactivate); 844 845 /* Handler for external interruption used during initialization. Modify 846 * request state to done. */ 847 static void sclp_check_handler(struct ext_code ext_code, 848 unsigned int param32, unsigned long param64) 849 { 850 u32 finished_sccb; 851 852 inc_irq_stat(IRQEXT_SCP); 853 finished_sccb = param32 & 0xfffffff8; 854 /* Is this the interrupt we are waiting for? */ 855 if (finished_sccb == 0) 856 return; 857 if (finished_sccb != (u32) (addr_t) sclp_init_sccb) 858 panic("sclp: unsolicited interrupt for buffer at 0x%x\n", 859 finished_sccb); 860 spin_lock(&sclp_lock); 861 if (sclp_running_state == sclp_running_state_running) { 862 sclp_init_req.status = SCLP_REQ_DONE; 863 sclp_running_state = sclp_running_state_idle; 864 } 865 spin_unlock(&sclp_lock); 866 } 867 868 /* Initial init mask request timed out. Modify request state to failed. */ 869 static void 870 sclp_check_timeout(unsigned long data) 871 { 872 unsigned long flags; 873 874 spin_lock_irqsave(&sclp_lock, flags); 875 if (sclp_running_state == sclp_running_state_running) { 876 sclp_init_req.status = SCLP_REQ_FAILED; 877 sclp_running_state = sclp_running_state_idle; 878 } 879 spin_unlock_irqrestore(&sclp_lock, flags); 880 } 881 882 /* Perform a check of the SCLP interface. Return zero if the interface is 883 * available and there are no pending requests from a previous instance. 884 * Return non-zero otherwise. */ 885 static int 886 sclp_check_interface(void) 887 { 888 struct init_sccb *sccb; 889 unsigned long flags; 890 int retry; 891 int rc; 892 893 spin_lock_irqsave(&sclp_lock, flags); 894 /* Prepare init mask command */ 895 rc = register_external_interrupt(0x2401, sclp_check_handler); 896 if (rc) { 897 spin_unlock_irqrestore(&sclp_lock, flags); 898 return rc; 899 } 900 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { 901 __sclp_make_init_req(0, 0); 902 sccb = (struct init_sccb *) sclp_init_req.sccb; 903 rc = sclp_service_call(sclp_init_req.command, sccb); 904 if (rc == -EIO) 905 break; 906 sclp_init_req.status = SCLP_REQ_RUNNING; 907 sclp_running_state = sclp_running_state_running; 908 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, 909 sclp_check_timeout, 0); 910 spin_unlock_irqrestore(&sclp_lock, flags); 911 /* Enable service-signal interruption - needs to happen 912 * with IRQs enabled. */ 913 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 914 /* Wait for signal from interrupt or timeout */ 915 sclp_sync_wait(); 916 /* Disable service-signal interruption - needs to happen 917 * with IRQs enabled. */ 918 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); 919 spin_lock_irqsave(&sclp_lock, flags); 920 del_timer(&sclp_request_timer); 921 if (sclp_init_req.status == SCLP_REQ_DONE && 922 sccb->header.response_code == 0x20) { 923 rc = 0; 924 break; 925 } else 926 rc = -EBUSY; 927 } 928 unregister_external_interrupt(0x2401, sclp_check_handler); 929 spin_unlock_irqrestore(&sclp_lock, flags); 930 return rc; 931 } 932 933 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP 934 * events from interfering with rebooted system. */ 935 static int 936 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) 937 { 938 sclp_deactivate(); 939 return NOTIFY_DONE; 940 } 941 942 static struct notifier_block sclp_reboot_notifier = { 943 .notifier_call = sclp_reboot_event 944 }; 945 946 /* 947 * Suspend/resume SCLP notifier implementation 948 */ 949 950 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback) 951 { 952 struct sclp_register *reg; 953 unsigned long flags; 954 955 if (!rollback) { 956 spin_lock_irqsave(&sclp_lock, flags); 957 list_for_each_entry(reg, &sclp_reg_list, list) 958 reg->pm_event_posted = 0; 959 spin_unlock_irqrestore(&sclp_lock, flags); 960 } 961 do { 962 spin_lock_irqsave(&sclp_lock, flags); 963 list_for_each_entry(reg, &sclp_reg_list, list) { 964 if (rollback && reg->pm_event_posted) 965 goto found; 966 if (!rollback && !reg->pm_event_posted) 967 goto found; 968 } 969 spin_unlock_irqrestore(&sclp_lock, flags); 970 return; 971 found: 972 spin_unlock_irqrestore(&sclp_lock, flags); 973 if (reg->pm_event_fn) 974 reg->pm_event_fn(reg, sclp_pm_event); 975 reg->pm_event_posted = rollback ? 0 : 1; 976 } while (1); 977 } 978 979 /* 980 * Susend/resume callbacks for platform device 981 */ 982 983 static int sclp_freeze(struct device *dev) 984 { 985 unsigned long flags; 986 int rc; 987 988 sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0); 989 990 spin_lock_irqsave(&sclp_lock, flags); 991 sclp_suspend_state = sclp_suspend_state_suspended; 992 spin_unlock_irqrestore(&sclp_lock, flags); 993 994 /* Init supend data */ 995 memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req)); 996 sclp_suspend_req.callback = sclp_suspend_req_cb; 997 sclp_suspend_req.status = SCLP_REQ_FILLED; 998 init_completion(&sclp_request_queue_flushed); 999 1000 rc = sclp_add_request(&sclp_suspend_req); 1001 if (rc == 0) 1002 wait_for_completion(&sclp_request_queue_flushed); 1003 else if (rc != -ENODATA) 1004 goto fail_thaw; 1005 1006 rc = sclp_deactivate(); 1007 if (rc) 1008 goto fail_thaw; 1009 return 0; 1010 1011 fail_thaw: 1012 spin_lock_irqsave(&sclp_lock, flags); 1013 sclp_suspend_state = sclp_suspend_state_running; 1014 spin_unlock_irqrestore(&sclp_lock, flags); 1015 sclp_pm_event(SCLP_PM_EVENT_THAW, 1); 1016 return rc; 1017 } 1018 1019 static int sclp_undo_suspend(enum sclp_pm_event event) 1020 { 1021 unsigned long flags; 1022 int rc; 1023 1024 rc = sclp_reactivate(); 1025 if (rc) 1026 return rc; 1027 1028 spin_lock_irqsave(&sclp_lock, flags); 1029 sclp_suspend_state = sclp_suspend_state_running; 1030 spin_unlock_irqrestore(&sclp_lock, flags); 1031 1032 sclp_pm_event(event, 0); 1033 return 0; 1034 } 1035 1036 static int sclp_thaw(struct device *dev) 1037 { 1038 return sclp_undo_suspend(SCLP_PM_EVENT_THAW); 1039 } 1040 1041 static int sclp_restore(struct device *dev) 1042 { 1043 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); 1044 } 1045 1046 static const struct dev_pm_ops sclp_pm_ops = { 1047 .freeze = sclp_freeze, 1048 .thaw = sclp_thaw, 1049 .restore = sclp_restore, 1050 }; 1051 1052 static ssize_t sclp_show_console_pages(struct device_driver *dev, char *buf) 1053 { 1054 return sprintf(buf, "%i\n", sclp_console_pages); 1055 } 1056 1057 static DRIVER_ATTR(con_pages, S_IRUSR, sclp_show_console_pages, NULL); 1058 1059 static ssize_t sclp_show_con_drop(struct device_driver *dev, char *buf) 1060 { 1061 return sprintf(buf, "%i\n", sclp_console_drop); 1062 } 1063 1064 static DRIVER_ATTR(con_drop, S_IRUSR, sclp_show_con_drop, NULL); 1065 1066 static ssize_t sclp_show_console_full(struct device_driver *dev, char *buf) 1067 { 1068 return sprintf(buf, "%lu\n", sclp_console_full); 1069 } 1070 1071 static DRIVER_ATTR(con_full, S_IRUSR, sclp_show_console_full, NULL); 1072 1073 static struct attribute *sclp_drv_attrs[] = { 1074 &driver_attr_con_pages.attr, 1075 &driver_attr_con_drop.attr, 1076 &driver_attr_con_full.attr, 1077 NULL, 1078 }; 1079 static struct attribute_group sclp_drv_attr_group = { 1080 .attrs = sclp_drv_attrs, 1081 }; 1082 static const struct attribute_group *sclp_drv_attr_groups[] = { 1083 &sclp_drv_attr_group, 1084 NULL, 1085 }; 1086 1087 static struct platform_driver sclp_pdrv = { 1088 .driver = { 1089 .name = "sclp", 1090 .owner = THIS_MODULE, 1091 .pm = &sclp_pm_ops, 1092 .groups = sclp_drv_attr_groups, 1093 }, 1094 }; 1095 1096 static struct platform_device *sclp_pdev; 1097 1098 /* Initialize SCLP driver. Return zero if driver is operational, non-zero 1099 * otherwise. */ 1100 static int 1101 sclp_init(void) 1102 { 1103 unsigned long flags; 1104 int rc = 0; 1105 1106 spin_lock_irqsave(&sclp_lock, flags); 1107 /* Check for previous or running initialization */ 1108 if (sclp_init_state != sclp_init_state_uninitialized) 1109 goto fail_unlock; 1110 sclp_init_state = sclp_init_state_initializing; 1111 /* Set up variables */ 1112 INIT_LIST_HEAD(&sclp_req_queue); 1113 INIT_LIST_HEAD(&sclp_reg_list); 1114 list_add(&sclp_state_change_event.list, &sclp_reg_list); 1115 init_timer(&sclp_request_timer); 1116 /* Check interface */ 1117 spin_unlock_irqrestore(&sclp_lock, flags); 1118 rc = sclp_check_interface(); 1119 spin_lock_irqsave(&sclp_lock, flags); 1120 if (rc) 1121 goto fail_init_state_uninitialized; 1122 /* Register reboot handler */ 1123 rc = register_reboot_notifier(&sclp_reboot_notifier); 1124 if (rc) 1125 goto fail_init_state_uninitialized; 1126 /* Register interrupt handler */ 1127 rc = register_external_interrupt(0x2401, sclp_interrupt_handler); 1128 if (rc) 1129 goto fail_unregister_reboot_notifier; 1130 sclp_init_state = sclp_init_state_initialized; 1131 spin_unlock_irqrestore(&sclp_lock, flags); 1132 /* Enable service-signal external interruption - needs to happen with 1133 * IRQs enabled. */ 1134 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 1135 sclp_init_mask(1); 1136 return 0; 1137 1138 fail_unregister_reboot_notifier: 1139 unregister_reboot_notifier(&sclp_reboot_notifier); 1140 fail_init_state_uninitialized: 1141 sclp_init_state = sclp_init_state_uninitialized; 1142 fail_unlock: 1143 spin_unlock_irqrestore(&sclp_lock, flags); 1144 return rc; 1145 } 1146 1147 /* 1148 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able 1149 * to print the panic message. 1150 */ 1151 static int sclp_panic_notify(struct notifier_block *self, 1152 unsigned long event, void *data) 1153 { 1154 if (sclp_suspend_state == sclp_suspend_state_suspended) 1155 sclp_undo_suspend(SCLP_PM_EVENT_THAW); 1156 return NOTIFY_OK; 1157 } 1158 1159 static struct notifier_block sclp_on_panic_nb = { 1160 .notifier_call = sclp_panic_notify, 1161 .priority = SCLP_PANIC_PRIO, 1162 }; 1163 1164 static __init int sclp_initcall(void) 1165 { 1166 int rc; 1167 1168 rc = platform_driver_register(&sclp_pdrv); 1169 if (rc) 1170 return rc; 1171 1172 sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); 1173 rc = PTR_RET(sclp_pdev); 1174 if (rc) 1175 goto fail_platform_driver_unregister; 1176 1177 rc = atomic_notifier_chain_register(&panic_notifier_list, 1178 &sclp_on_panic_nb); 1179 if (rc) 1180 goto fail_platform_device_unregister; 1181 1182 return sclp_init(); 1183 1184 fail_platform_device_unregister: 1185 platform_device_unregister(sclp_pdev); 1186 fail_platform_driver_unregister: 1187 platform_driver_unregister(&sclp_pdrv); 1188 return rc; 1189 } 1190 1191 arch_initcall(sclp_initcall); 1192