1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * core function to access sclp interface 4 * 5 * Copyright IBM Corp. 1999, 2009 6 * 7 * Author(s): Martin Peschke <mpeschke@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 */ 10 11 #include <linux/kernel_stat.h> 12 #include <linux/module.h> 13 #include <linux/err.h> 14 #include <linux/spinlock.h> 15 #include <linux/interrupt.h> 16 #include <linux/timer.h> 17 #include <linux/reboot.h> 18 #include <linux/jiffies.h> 19 #include <linux/init.h> 20 #include <linux/suspend.h> 21 #include <linux/completion.h> 22 #include <linux/platform_device.h> 23 #include <asm/types.h> 24 #include <asm/irq.h> 25 26 #include "sclp.h" 27 28 #define SCLP_HEADER "sclp: " 29 30 /* Lock to protect internal data consistency. */ 31 static DEFINE_SPINLOCK(sclp_lock); 32 33 /* Mask of events that we can send to the sclp interface. */ 34 static sccb_mask_t sclp_receive_mask; 35 36 /* Mask of events that we can receive from the sclp interface. */ 37 static sccb_mask_t sclp_send_mask; 38 39 /* List of registered event listeners and senders. */ 40 static struct list_head sclp_reg_list; 41 42 /* List of queued requests. */ 43 static struct list_head sclp_req_queue; 44 45 /* Data for read and and init requests. */ 46 static struct sclp_req sclp_read_req; 47 static struct sclp_req sclp_init_req; 48 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 49 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); 50 51 /* Suspend request */ 52 static DECLARE_COMPLETION(sclp_request_queue_flushed); 53 54 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ 55 int sclp_console_pages = SCLP_CONSOLE_PAGES; 56 /* Flag to indicate if buffer pages are dropped on buffer full condition */ 57 int sclp_console_drop = 1; 58 /* Number of times the console dropped buffer pages */ 59 unsigned long sclp_console_full; 60 61 static void sclp_suspend_req_cb(struct sclp_req *req, void *data) 62 { 63 complete(&sclp_request_queue_flushed); 64 } 65 66 static int __init sclp_setup_console_pages(char *str) 67 { 68 int pages, rc; 69 70 rc = kstrtoint(str, 0, &pages); 71 if (!rc && pages >= SCLP_CONSOLE_PAGES) 72 sclp_console_pages = pages; 73 return 1; 74 } 75 76 __setup("sclp_con_pages=", sclp_setup_console_pages); 77 78 static int __init sclp_setup_console_drop(char *str) 79 { 80 int drop, rc; 81 82 rc = kstrtoint(str, 0, &drop); 83 if (!rc) 84 sclp_console_drop = drop; 85 return 1; 86 } 87 88 __setup("sclp_con_drop=", sclp_setup_console_drop); 89 90 static struct sclp_req sclp_suspend_req; 91 92 /* Timer for request retries. */ 93 static struct timer_list sclp_request_timer; 94 95 /* Timer for queued requests. */ 96 static struct timer_list sclp_queue_timer; 97 98 /* Internal state: is a request active at the sclp? */ 99 static volatile enum sclp_running_state_t { 100 sclp_running_state_idle, 101 sclp_running_state_running, 102 sclp_running_state_reset_pending 103 } sclp_running_state = sclp_running_state_idle; 104 105 /* Internal state: is a read request pending? */ 106 static volatile enum sclp_reading_state_t { 107 sclp_reading_state_idle, 108 sclp_reading_state_reading 109 } sclp_reading_state = sclp_reading_state_idle; 110 111 /* Internal state: is the driver currently serving requests? */ 112 static volatile enum sclp_activation_state_t { 113 sclp_activation_state_active, 114 sclp_activation_state_deactivating, 115 sclp_activation_state_inactive, 116 sclp_activation_state_activating 117 } sclp_activation_state = sclp_activation_state_active; 118 119 /* Internal state: is an init mask request pending? */ 120 static volatile enum sclp_mask_state_t { 121 sclp_mask_state_idle, 122 sclp_mask_state_initializing 123 } sclp_mask_state = sclp_mask_state_idle; 124 125 /* Internal state: is the driver suspended? */ 126 static enum sclp_suspend_state_t { 127 sclp_suspend_state_running, 128 sclp_suspend_state_suspended, 129 } sclp_suspend_state = sclp_suspend_state_running; 130 131 /* Maximum retry counts */ 132 #define SCLP_INIT_RETRY 3 133 #define SCLP_MASK_RETRY 3 134 135 /* Timeout intervals in seconds.*/ 136 #define SCLP_BUSY_INTERVAL 10 137 #define SCLP_RETRY_INTERVAL 30 138 139 static void sclp_process_queue(void); 140 static void __sclp_make_read_req(void); 141 static int sclp_init_mask(int calculate); 142 static int sclp_init(void); 143 144 static void 145 __sclp_queue_read_req(void) 146 { 147 if (sclp_reading_state == sclp_reading_state_idle) { 148 sclp_reading_state = sclp_reading_state_reading; 149 __sclp_make_read_req(); 150 /* Add request to head of queue */ 151 list_add(&sclp_read_req.list, &sclp_req_queue); 152 } 153 } 154 155 /* Set up request retry timer. Called while sclp_lock is locked. */ 156 static inline void 157 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long), 158 unsigned long data) 159 { 160 del_timer(&sclp_request_timer); 161 sclp_request_timer.function = function; 162 sclp_request_timer.data = data; 163 sclp_request_timer.expires = jiffies + time; 164 add_timer(&sclp_request_timer); 165 } 166 167 /* Request timeout handler. Restart the request queue. If DATA is non-zero, 168 * force restart of running request. */ 169 static void 170 sclp_request_timeout(unsigned long data) 171 { 172 unsigned long flags; 173 174 spin_lock_irqsave(&sclp_lock, flags); 175 if (data) { 176 if (sclp_running_state == sclp_running_state_running) { 177 /* Break running state and queue NOP read event request 178 * to get a defined interface state. */ 179 __sclp_queue_read_req(); 180 sclp_running_state = sclp_running_state_idle; 181 } 182 } else { 183 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, 184 sclp_request_timeout, 0); 185 } 186 spin_unlock_irqrestore(&sclp_lock, flags); 187 sclp_process_queue(); 188 } 189 190 /* 191 * Returns the expire value in jiffies of the next pending request timeout, 192 * if any. Needs to be called with sclp_lock. 193 */ 194 static unsigned long __sclp_req_queue_find_next_timeout(void) 195 { 196 unsigned long expires_next = 0; 197 struct sclp_req *req; 198 199 list_for_each_entry(req, &sclp_req_queue, list) { 200 if (!req->queue_expires) 201 continue; 202 if (!expires_next || 203 (time_before(req->queue_expires, expires_next))) 204 expires_next = req->queue_expires; 205 } 206 return expires_next; 207 } 208 209 /* 210 * Returns expired request, if any, and removes it from the list. 211 */ 212 static struct sclp_req *__sclp_req_queue_remove_expired_req(void) 213 { 214 unsigned long flags, now; 215 struct sclp_req *req; 216 217 spin_lock_irqsave(&sclp_lock, flags); 218 now = jiffies; 219 /* Don't need list_for_each_safe because we break out after list_del */ 220 list_for_each_entry(req, &sclp_req_queue, list) { 221 if (!req->queue_expires) 222 continue; 223 if (time_before_eq(req->queue_expires, now)) { 224 if (req->status == SCLP_REQ_QUEUED) { 225 req->status = SCLP_REQ_QUEUED_TIMEOUT; 226 list_del(&req->list); 227 goto out; 228 } 229 } 230 } 231 req = NULL; 232 out: 233 spin_unlock_irqrestore(&sclp_lock, flags); 234 return req; 235 } 236 237 /* 238 * Timeout handler for queued requests. Removes request from list and 239 * invokes callback. This timer can be set per request in situations where 240 * waiting too long would be harmful to the system, e.g. during SE reboot. 241 */ 242 static void sclp_req_queue_timeout(unsigned long data) 243 { 244 unsigned long flags, expires_next; 245 struct sclp_req *req; 246 247 do { 248 req = __sclp_req_queue_remove_expired_req(); 249 if (req && req->callback) 250 req->callback(req, req->callback_data); 251 } while (req); 252 253 spin_lock_irqsave(&sclp_lock, flags); 254 expires_next = __sclp_req_queue_find_next_timeout(); 255 if (expires_next) 256 mod_timer(&sclp_queue_timer, expires_next); 257 spin_unlock_irqrestore(&sclp_lock, flags); 258 } 259 260 /* Try to start a request. Return zero if the request was successfully 261 * started or if it will be started at a later time. Return non-zero otherwise. 262 * Called while sclp_lock is locked. */ 263 static int 264 __sclp_start_request(struct sclp_req *req) 265 { 266 int rc; 267 268 if (sclp_running_state != sclp_running_state_idle) 269 return 0; 270 del_timer(&sclp_request_timer); 271 rc = sclp_service_call(req->command, req->sccb); 272 req->start_count++; 273 274 if (rc == 0) { 275 /* Successfully started request */ 276 req->status = SCLP_REQ_RUNNING; 277 sclp_running_state = sclp_running_state_running; 278 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, 279 sclp_request_timeout, 1); 280 return 0; 281 } else if (rc == -EBUSY) { 282 /* Try again later */ 283 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, 284 sclp_request_timeout, 0); 285 return 0; 286 } 287 /* Request failed */ 288 req->status = SCLP_REQ_FAILED; 289 return rc; 290 } 291 292 /* Try to start queued requests. */ 293 static void 294 sclp_process_queue(void) 295 { 296 struct sclp_req *req; 297 int rc; 298 unsigned long flags; 299 300 spin_lock_irqsave(&sclp_lock, flags); 301 if (sclp_running_state != sclp_running_state_idle) { 302 spin_unlock_irqrestore(&sclp_lock, flags); 303 return; 304 } 305 del_timer(&sclp_request_timer); 306 while (!list_empty(&sclp_req_queue)) { 307 req = list_entry(sclp_req_queue.next, struct sclp_req, list); 308 if (!req->sccb) 309 goto do_post; 310 rc = __sclp_start_request(req); 311 if (rc == 0) 312 break; 313 /* Request failed */ 314 if (req->start_count > 1) { 315 /* Cannot abort already submitted request - could still 316 * be active at the SCLP */ 317 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, 318 sclp_request_timeout, 0); 319 break; 320 } 321 do_post: 322 /* Post-processing for aborted request */ 323 list_del(&req->list); 324 if (req->callback) { 325 spin_unlock_irqrestore(&sclp_lock, flags); 326 req->callback(req, req->callback_data); 327 spin_lock_irqsave(&sclp_lock, flags); 328 } 329 } 330 spin_unlock_irqrestore(&sclp_lock, flags); 331 } 332 333 static int __sclp_can_add_request(struct sclp_req *req) 334 { 335 if (req == &sclp_suspend_req || req == &sclp_init_req) 336 return 1; 337 if (sclp_suspend_state != sclp_suspend_state_running) 338 return 0; 339 if (sclp_init_state != sclp_init_state_initialized) 340 return 0; 341 if (sclp_activation_state != sclp_activation_state_active) 342 return 0; 343 return 1; 344 } 345 346 /* Queue a new request. Return zero on success, non-zero otherwise. */ 347 int 348 sclp_add_request(struct sclp_req *req) 349 { 350 unsigned long flags; 351 int rc; 352 353 spin_lock_irqsave(&sclp_lock, flags); 354 if (!__sclp_can_add_request(req)) { 355 spin_unlock_irqrestore(&sclp_lock, flags); 356 return -EIO; 357 } 358 req->status = SCLP_REQ_QUEUED; 359 req->start_count = 0; 360 list_add_tail(&req->list, &sclp_req_queue); 361 rc = 0; 362 if (req->queue_timeout) { 363 req->queue_expires = jiffies + req->queue_timeout * HZ; 364 if (!timer_pending(&sclp_queue_timer) || 365 time_after(sclp_queue_timer.expires, req->queue_expires)) 366 mod_timer(&sclp_queue_timer, req->queue_expires); 367 } else 368 req->queue_expires = 0; 369 /* Start if request is first in list */ 370 if (sclp_running_state == sclp_running_state_idle && 371 req->list.prev == &sclp_req_queue) { 372 if (!req->sccb) { 373 list_del(&req->list); 374 rc = -ENODATA; 375 goto out; 376 } 377 rc = __sclp_start_request(req); 378 if (rc) 379 list_del(&req->list); 380 } 381 out: 382 spin_unlock_irqrestore(&sclp_lock, flags); 383 return rc; 384 } 385 386 EXPORT_SYMBOL(sclp_add_request); 387 388 /* Dispatch events found in request buffer to registered listeners. Return 0 389 * if all events were dispatched, non-zero otherwise. */ 390 static int 391 sclp_dispatch_evbufs(struct sccb_header *sccb) 392 { 393 unsigned long flags; 394 struct evbuf_header *evbuf; 395 struct list_head *l; 396 struct sclp_register *reg; 397 int offset; 398 int rc; 399 400 spin_lock_irqsave(&sclp_lock, flags); 401 rc = 0; 402 for (offset = sizeof(struct sccb_header); offset < sccb->length; 403 offset += evbuf->length) { 404 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset); 405 /* Check for malformed hardware response */ 406 if (evbuf->length == 0) 407 break; 408 /* Search for event handler */ 409 reg = NULL; 410 list_for_each(l, &sclp_reg_list) { 411 reg = list_entry(l, struct sclp_register, list); 412 if (reg->receive_mask & (1 << (32 - evbuf->type))) 413 break; 414 else 415 reg = NULL; 416 } 417 if (reg && reg->receiver_fn) { 418 spin_unlock_irqrestore(&sclp_lock, flags); 419 reg->receiver_fn(evbuf); 420 spin_lock_irqsave(&sclp_lock, flags); 421 } else if (reg == NULL) 422 rc = -EOPNOTSUPP; 423 } 424 spin_unlock_irqrestore(&sclp_lock, flags); 425 return rc; 426 } 427 428 /* Read event data request callback. */ 429 static void 430 sclp_read_cb(struct sclp_req *req, void *data) 431 { 432 unsigned long flags; 433 struct sccb_header *sccb; 434 435 sccb = (struct sccb_header *) req->sccb; 436 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 || 437 sccb->response_code == 0x220)) 438 sclp_dispatch_evbufs(sccb); 439 spin_lock_irqsave(&sclp_lock, flags); 440 sclp_reading_state = sclp_reading_state_idle; 441 spin_unlock_irqrestore(&sclp_lock, flags); 442 } 443 444 /* Prepare read event data request. Called while sclp_lock is locked. */ 445 static void __sclp_make_read_req(void) 446 { 447 struct sccb_header *sccb; 448 449 sccb = (struct sccb_header *) sclp_read_sccb; 450 clear_page(sccb); 451 memset(&sclp_read_req, 0, sizeof(struct sclp_req)); 452 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; 453 sclp_read_req.status = SCLP_REQ_QUEUED; 454 sclp_read_req.start_count = 0; 455 sclp_read_req.callback = sclp_read_cb; 456 sclp_read_req.sccb = sccb; 457 sccb->length = PAGE_SIZE; 458 sccb->function_code = 0; 459 sccb->control_mask[2] = 0x80; 460 } 461 462 /* Search request list for request with matching sccb. Return request if found, 463 * NULL otherwise. Called while sclp_lock is locked. */ 464 static inline struct sclp_req * 465 __sclp_find_req(u32 sccb) 466 { 467 struct list_head *l; 468 struct sclp_req *req; 469 470 list_for_each(l, &sclp_req_queue) { 471 req = list_entry(l, struct sclp_req, list); 472 if (sccb == (u32) (addr_t) req->sccb) 473 return req; 474 } 475 return NULL; 476 } 477 478 /* Handler for external interruption. Perform request post-processing. 479 * Prepare read event data request if necessary. Start processing of next 480 * request on queue. */ 481 static void sclp_interrupt_handler(struct ext_code ext_code, 482 unsigned int param32, unsigned long param64) 483 { 484 struct sclp_req *req; 485 u32 finished_sccb; 486 u32 evbuf_pending; 487 488 inc_irq_stat(IRQEXT_SCP); 489 spin_lock(&sclp_lock); 490 finished_sccb = param32 & 0xfffffff8; 491 evbuf_pending = param32 & 0x3; 492 if (finished_sccb) { 493 del_timer(&sclp_request_timer); 494 sclp_running_state = sclp_running_state_reset_pending; 495 req = __sclp_find_req(finished_sccb); 496 if (req) { 497 /* Request post-processing */ 498 list_del(&req->list); 499 req->status = SCLP_REQ_DONE; 500 if (req->callback) { 501 spin_unlock(&sclp_lock); 502 req->callback(req, req->callback_data); 503 spin_lock(&sclp_lock); 504 } 505 } 506 sclp_running_state = sclp_running_state_idle; 507 } 508 if (evbuf_pending && 509 sclp_activation_state == sclp_activation_state_active) 510 __sclp_queue_read_req(); 511 spin_unlock(&sclp_lock); 512 sclp_process_queue(); 513 } 514 515 /* Convert interval in jiffies to TOD ticks. */ 516 static inline u64 517 sclp_tod_from_jiffies(unsigned long jiffies) 518 { 519 return (u64) (jiffies / HZ) << 32; 520 } 521 522 /* Wait until a currently running request finished. Note: while this function 523 * is running, no timers are served on the calling CPU. */ 524 void 525 sclp_sync_wait(void) 526 { 527 unsigned long long old_tick; 528 unsigned long flags; 529 unsigned long cr0, cr0_sync; 530 u64 timeout; 531 int irq_context; 532 533 /* We'll be disabling timer interrupts, so we need a custom timeout 534 * mechanism */ 535 timeout = 0; 536 if (timer_pending(&sclp_request_timer)) { 537 /* Get timeout TOD value */ 538 timeout = get_tod_clock_fast() + 539 sclp_tod_from_jiffies(sclp_request_timer.expires - 540 jiffies); 541 } 542 local_irq_save(flags); 543 /* Prevent bottom half from executing once we force interrupts open */ 544 irq_context = in_interrupt(); 545 if (!irq_context) 546 local_bh_disable(); 547 /* Enable service-signal interruption, disable timer interrupts */ 548 old_tick = local_tick_disable(); 549 trace_hardirqs_on(); 550 __ctl_store(cr0, 0, 0); 551 cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK; 552 cr0_sync |= 1UL << (63 - 54); 553 __ctl_load(cr0_sync, 0, 0); 554 __arch_local_irq_stosm(0x01); 555 /* Loop until driver state indicates finished request */ 556 while (sclp_running_state != sclp_running_state_idle) { 557 /* Check for expired request timer */ 558 if (timer_pending(&sclp_request_timer) && 559 get_tod_clock_fast() > timeout && 560 del_timer(&sclp_request_timer)) 561 sclp_request_timer.function(sclp_request_timer.data); 562 cpu_relax(); 563 } 564 local_irq_disable(); 565 __ctl_load(cr0, 0, 0); 566 if (!irq_context) 567 _local_bh_enable(); 568 local_tick_enable(old_tick); 569 local_irq_restore(flags); 570 } 571 EXPORT_SYMBOL(sclp_sync_wait); 572 573 /* Dispatch changes in send and receive mask to registered listeners. */ 574 static void 575 sclp_dispatch_state_change(void) 576 { 577 struct list_head *l; 578 struct sclp_register *reg; 579 unsigned long flags; 580 sccb_mask_t receive_mask; 581 sccb_mask_t send_mask; 582 583 do { 584 spin_lock_irqsave(&sclp_lock, flags); 585 reg = NULL; 586 list_for_each(l, &sclp_reg_list) { 587 reg = list_entry(l, struct sclp_register, list); 588 receive_mask = reg->send_mask & sclp_receive_mask; 589 send_mask = reg->receive_mask & sclp_send_mask; 590 if (reg->sclp_receive_mask != receive_mask || 591 reg->sclp_send_mask != send_mask) { 592 reg->sclp_receive_mask = receive_mask; 593 reg->sclp_send_mask = send_mask; 594 break; 595 } else 596 reg = NULL; 597 } 598 spin_unlock_irqrestore(&sclp_lock, flags); 599 if (reg && reg->state_change_fn) 600 reg->state_change_fn(reg); 601 } while (reg); 602 } 603 604 struct sclp_statechangebuf { 605 struct evbuf_header header; 606 u8 validity_sclp_active_facility_mask : 1; 607 u8 validity_sclp_receive_mask : 1; 608 u8 validity_sclp_send_mask : 1; 609 u8 validity_read_data_function_mask : 1; 610 u16 _zeros : 12; 611 u16 mask_length; 612 u64 sclp_active_facility_mask; 613 sccb_mask_t sclp_receive_mask; 614 sccb_mask_t sclp_send_mask; 615 u32 read_data_function_mask; 616 } __attribute__((packed)); 617 618 619 /* State change event callback. Inform listeners of changes. */ 620 static void 621 sclp_state_change_cb(struct evbuf_header *evbuf) 622 { 623 unsigned long flags; 624 struct sclp_statechangebuf *scbuf; 625 626 scbuf = (struct sclp_statechangebuf *) evbuf; 627 if (scbuf->mask_length != sizeof(sccb_mask_t)) 628 return; 629 spin_lock_irqsave(&sclp_lock, flags); 630 if (scbuf->validity_sclp_receive_mask) 631 sclp_receive_mask = scbuf->sclp_receive_mask; 632 if (scbuf->validity_sclp_send_mask) 633 sclp_send_mask = scbuf->sclp_send_mask; 634 spin_unlock_irqrestore(&sclp_lock, flags); 635 if (scbuf->validity_sclp_active_facility_mask) 636 sclp.facilities = scbuf->sclp_active_facility_mask; 637 sclp_dispatch_state_change(); 638 } 639 640 static struct sclp_register sclp_state_change_event = { 641 .receive_mask = EVTYP_STATECHANGE_MASK, 642 .receiver_fn = sclp_state_change_cb 643 }; 644 645 /* Calculate receive and send mask of currently registered listeners. 646 * Called while sclp_lock is locked. */ 647 static inline void 648 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask) 649 { 650 struct list_head *l; 651 struct sclp_register *t; 652 653 *receive_mask = 0; 654 *send_mask = 0; 655 list_for_each(l, &sclp_reg_list) { 656 t = list_entry(l, struct sclp_register, list); 657 *receive_mask |= t->receive_mask; 658 *send_mask |= t->send_mask; 659 } 660 } 661 662 /* Register event listener. Return 0 on success, non-zero otherwise. */ 663 int 664 sclp_register(struct sclp_register *reg) 665 { 666 unsigned long flags; 667 sccb_mask_t receive_mask; 668 sccb_mask_t send_mask; 669 int rc; 670 671 rc = sclp_init(); 672 if (rc) 673 return rc; 674 spin_lock_irqsave(&sclp_lock, flags); 675 /* Check event mask for collisions */ 676 __sclp_get_mask(&receive_mask, &send_mask); 677 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) { 678 spin_unlock_irqrestore(&sclp_lock, flags); 679 return -EBUSY; 680 } 681 /* Trigger initial state change callback */ 682 reg->sclp_receive_mask = 0; 683 reg->sclp_send_mask = 0; 684 reg->pm_event_posted = 0; 685 list_add(®->list, &sclp_reg_list); 686 spin_unlock_irqrestore(&sclp_lock, flags); 687 rc = sclp_init_mask(1); 688 if (rc) { 689 spin_lock_irqsave(&sclp_lock, flags); 690 list_del(®->list); 691 spin_unlock_irqrestore(&sclp_lock, flags); 692 } 693 return rc; 694 } 695 696 EXPORT_SYMBOL(sclp_register); 697 698 /* Unregister event listener. */ 699 void 700 sclp_unregister(struct sclp_register *reg) 701 { 702 unsigned long flags; 703 704 spin_lock_irqsave(&sclp_lock, flags); 705 list_del(®->list); 706 spin_unlock_irqrestore(&sclp_lock, flags); 707 sclp_init_mask(1); 708 } 709 710 EXPORT_SYMBOL(sclp_unregister); 711 712 /* Remove event buffers which are marked processed. Return the number of 713 * remaining event buffers. */ 714 int 715 sclp_remove_processed(struct sccb_header *sccb) 716 { 717 struct evbuf_header *evbuf; 718 int unprocessed; 719 u16 remaining; 720 721 evbuf = (struct evbuf_header *) (sccb + 1); 722 unprocessed = 0; 723 remaining = sccb->length - sizeof(struct sccb_header); 724 while (remaining > 0) { 725 remaining -= evbuf->length; 726 if (evbuf->flags & 0x80) { 727 sccb->length -= evbuf->length; 728 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length), 729 remaining); 730 } else { 731 unprocessed++; 732 evbuf = (struct evbuf_header *) 733 ((addr_t) evbuf + evbuf->length); 734 } 735 } 736 return unprocessed; 737 } 738 739 EXPORT_SYMBOL(sclp_remove_processed); 740 741 /* Prepare init mask request. Called while sclp_lock is locked. */ 742 static inline void 743 __sclp_make_init_req(u32 receive_mask, u32 send_mask) 744 { 745 struct init_sccb *sccb; 746 747 sccb = (struct init_sccb *) sclp_init_sccb; 748 clear_page(sccb); 749 memset(&sclp_init_req, 0, sizeof(struct sclp_req)); 750 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; 751 sclp_init_req.status = SCLP_REQ_FILLED; 752 sclp_init_req.start_count = 0; 753 sclp_init_req.callback = NULL; 754 sclp_init_req.callback_data = NULL; 755 sclp_init_req.sccb = sccb; 756 sccb->header.length = sizeof(struct init_sccb); 757 sccb->mask_length = sizeof(sccb_mask_t); 758 sccb->receive_mask = receive_mask; 759 sccb->send_mask = send_mask; 760 sccb->sclp_receive_mask = 0; 761 sccb->sclp_send_mask = 0; 762 } 763 764 /* Start init mask request. If calculate is non-zero, calculate the mask as 765 * requested by registered listeners. Use zero mask otherwise. Return 0 on 766 * success, non-zero otherwise. */ 767 static int 768 sclp_init_mask(int calculate) 769 { 770 unsigned long flags; 771 struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb; 772 sccb_mask_t receive_mask; 773 sccb_mask_t send_mask; 774 int retry; 775 int rc; 776 unsigned long wait; 777 778 spin_lock_irqsave(&sclp_lock, flags); 779 /* Check if interface is in appropriate state */ 780 if (sclp_mask_state != sclp_mask_state_idle) { 781 spin_unlock_irqrestore(&sclp_lock, flags); 782 return -EBUSY; 783 } 784 if (sclp_activation_state == sclp_activation_state_inactive) { 785 spin_unlock_irqrestore(&sclp_lock, flags); 786 return -EINVAL; 787 } 788 sclp_mask_state = sclp_mask_state_initializing; 789 /* Determine mask */ 790 if (calculate) 791 __sclp_get_mask(&receive_mask, &send_mask); 792 else { 793 receive_mask = 0; 794 send_mask = 0; 795 } 796 rc = -EIO; 797 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) { 798 /* Prepare request */ 799 __sclp_make_init_req(receive_mask, send_mask); 800 spin_unlock_irqrestore(&sclp_lock, flags); 801 if (sclp_add_request(&sclp_init_req)) { 802 /* Try again later */ 803 wait = jiffies + SCLP_BUSY_INTERVAL * HZ; 804 while (time_before(jiffies, wait)) 805 sclp_sync_wait(); 806 spin_lock_irqsave(&sclp_lock, flags); 807 continue; 808 } 809 while (sclp_init_req.status != SCLP_REQ_DONE && 810 sclp_init_req.status != SCLP_REQ_FAILED) 811 sclp_sync_wait(); 812 spin_lock_irqsave(&sclp_lock, flags); 813 if (sclp_init_req.status == SCLP_REQ_DONE && 814 sccb->header.response_code == 0x20) { 815 /* Successful request */ 816 if (calculate) { 817 sclp_receive_mask = sccb->sclp_receive_mask; 818 sclp_send_mask = sccb->sclp_send_mask; 819 } else { 820 sclp_receive_mask = 0; 821 sclp_send_mask = 0; 822 } 823 spin_unlock_irqrestore(&sclp_lock, flags); 824 sclp_dispatch_state_change(); 825 spin_lock_irqsave(&sclp_lock, flags); 826 rc = 0; 827 break; 828 } 829 } 830 sclp_mask_state = sclp_mask_state_idle; 831 spin_unlock_irqrestore(&sclp_lock, flags); 832 return rc; 833 } 834 835 /* Deactivate SCLP interface. On success, new requests will be rejected, 836 * events will no longer be dispatched. Return 0 on success, non-zero 837 * otherwise. */ 838 int 839 sclp_deactivate(void) 840 { 841 unsigned long flags; 842 int rc; 843 844 spin_lock_irqsave(&sclp_lock, flags); 845 /* Deactivate can only be called when active */ 846 if (sclp_activation_state != sclp_activation_state_active) { 847 spin_unlock_irqrestore(&sclp_lock, flags); 848 return -EINVAL; 849 } 850 sclp_activation_state = sclp_activation_state_deactivating; 851 spin_unlock_irqrestore(&sclp_lock, flags); 852 rc = sclp_init_mask(0); 853 spin_lock_irqsave(&sclp_lock, flags); 854 if (rc == 0) 855 sclp_activation_state = sclp_activation_state_inactive; 856 else 857 sclp_activation_state = sclp_activation_state_active; 858 spin_unlock_irqrestore(&sclp_lock, flags); 859 return rc; 860 } 861 862 EXPORT_SYMBOL(sclp_deactivate); 863 864 /* Reactivate SCLP interface after sclp_deactivate. On success, new 865 * requests will be accepted, events will be dispatched again. Return 0 on 866 * success, non-zero otherwise. */ 867 int 868 sclp_reactivate(void) 869 { 870 unsigned long flags; 871 int rc; 872 873 spin_lock_irqsave(&sclp_lock, flags); 874 /* Reactivate can only be called when inactive */ 875 if (sclp_activation_state != sclp_activation_state_inactive) { 876 spin_unlock_irqrestore(&sclp_lock, flags); 877 return -EINVAL; 878 } 879 sclp_activation_state = sclp_activation_state_activating; 880 spin_unlock_irqrestore(&sclp_lock, flags); 881 rc = sclp_init_mask(1); 882 spin_lock_irqsave(&sclp_lock, flags); 883 if (rc == 0) 884 sclp_activation_state = sclp_activation_state_active; 885 else 886 sclp_activation_state = sclp_activation_state_inactive; 887 spin_unlock_irqrestore(&sclp_lock, flags); 888 return rc; 889 } 890 891 EXPORT_SYMBOL(sclp_reactivate); 892 893 /* Handler for external interruption used during initialization. Modify 894 * request state to done. */ 895 static void sclp_check_handler(struct ext_code ext_code, 896 unsigned int param32, unsigned long param64) 897 { 898 u32 finished_sccb; 899 900 inc_irq_stat(IRQEXT_SCP); 901 finished_sccb = param32 & 0xfffffff8; 902 /* Is this the interrupt we are waiting for? */ 903 if (finished_sccb == 0) 904 return; 905 if (finished_sccb != (u32) (addr_t) sclp_init_sccb) 906 panic("sclp: unsolicited interrupt for buffer at 0x%x\n", 907 finished_sccb); 908 spin_lock(&sclp_lock); 909 if (sclp_running_state == sclp_running_state_running) { 910 sclp_init_req.status = SCLP_REQ_DONE; 911 sclp_running_state = sclp_running_state_idle; 912 } 913 spin_unlock(&sclp_lock); 914 } 915 916 /* Initial init mask request timed out. Modify request state to failed. */ 917 static void 918 sclp_check_timeout(unsigned long data) 919 { 920 unsigned long flags; 921 922 spin_lock_irqsave(&sclp_lock, flags); 923 if (sclp_running_state == sclp_running_state_running) { 924 sclp_init_req.status = SCLP_REQ_FAILED; 925 sclp_running_state = sclp_running_state_idle; 926 } 927 spin_unlock_irqrestore(&sclp_lock, flags); 928 } 929 930 /* Perform a check of the SCLP interface. Return zero if the interface is 931 * available and there are no pending requests from a previous instance. 932 * Return non-zero otherwise. */ 933 static int 934 sclp_check_interface(void) 935 { 936 struct init_sccb *sccb; 937 unsigned long flags; 938 int retry; 939 int rc; 940 941 spin_lock_irqsave(&sclp_lock, flags); 942 /* Prepare init mask command */ 943 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); 944 if (rc) { 945 spin_unlock_irqrestore(&sclp_lock, flags); 946 return rc; 947 } 948 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { 949 __sclp_make_init_req(0, 0); 950 sccb = (struct init_sccb *) sclp_init_req.sccb; 951 rc = sclp_service_call(sclp_init_req.command, sccb); 952 if (rc == -EIO) 953 break; 954 sclp_init_req.status = SCLP_REQ_RUNNING; 955 sclp_running_state = sclp_running_state_running; 956 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, 957 sclp_check_timeout, 0); 958 spin_unlock_irqrestore(&sclp_lock, flags); 959 /* Enable service-signal interruption - needs to happen 960 * with IRQs enabled. */ 961 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 962 /* Wait for signal from interrupt or timeout */ 963 sclp_sync_wait(); 964 /* Disable service-signal interruption - needs to happen 965 * with IRQs enabled. */ 966 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); 967 spin_lock_irqsave(&sclp_lock, flags); 968 del_timer(&sclp_request_timer); 969 if (sclp_init_req.status == SCLP_REQ_DONE && 970 sccb->header.response_code == 0x20) { 971 rc = 0; 972 break; 973 } else 974 rc = -EBUSY; 975 } 976 unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); 977 spin_unlock_irqrestore(&sclp_lock, flags); 978 return rc; 979 } 980 981 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP 982 * events from interfering with rebooted system. */ 983 static int 984 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) 985 { 986 sclp_deactivate(); 987 return NOTIFY_DONE; 988 } 989 990 static struct notifier_block sclp_reboot_notifier = { 991 .notifier_call = sclp_reboot_event 992 }; 993 994 /* 995 * Suspend/resume SCLP notifier implementation 996 */ 997 998 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback) 999 { 1000 struct sclp_register *reg; 1001 unsigned long flags; 1002 1003 if (!rollback) { 1004 spin_lock_irqsave(&sclp_lock, flags); 1005 list_for_each_entry(reg, &sclp_reg_list, list) 1006 reg->pm_event_posted = 0; 1007 spin_unlock_irqrestore(&sclp_lock, flags); 1008 } 1009 do { 1010 spin_lock_irqsave(&sclp_lock, flags); 1011 list_for_each_entry(reg, &sclp_reg_list, list) { 1012 if (rollback && reg->pm_event_posted) 1013 goto found; 1014 if (!rollback && !reg->pm_event_posted) 1015 goto found; 1016 } 1017 spin_unlock_irqrestore(&sclp_lock, flags); 1018 return; 1019 found: 1020 spin_unlock_irqrestore(&sclp_lock, flags); 1021 if (reg->pm_event_fn) 1022 reg->pm_event_fn(reg, sclp_pm_event); 1023 reg->pm_event_posted = rollback ? 0 : 1; 1024 } while (1); 1025 } 1026 1027 /* 1028 * Susend/resume callbacks for platform device 1029 */ 1030 1031 static int sclp_freeze(struct device *dev) 1032 { 1033 unsigned long flags; 1034 int rc; 1035 1036 sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0); 1037 1038 spin_lock_irqsave(&sclp_lock, flags); 1039 sclp_suspend_state = sclp_suspend_state_suspended; 1040 spin_unlock_irqrestore(&sclp_lock, flags); 1041 1042 /* Init supend data */ 1043 memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req)); 1044 sclp_suspend_req.callback = sclp_suspend_req_cb; 1045 sclp_suspend_req.status = SCLP_REQ_FILLED; 1046 init_completion(&sclp_request_queue_flushed); 1047 1048 rc = sclp_add_request(&sclp_suspend_req); 1049 if (rc == 0) 1050 wait_for_completion(&sclp_request_queue_flushed); 1051 else if (rc != -ENODATA) 1052 goto fail_thaw; 1053 1054 rc = sclp_deactivate(); 1055 if (rc) 1056 goto fail_thaw; 1057 return 0; 1058 1059 fail_thaw: 1060 spin_lock_irqsave(&sclp_lock, flags); 1061 sclp_suspend_state = sclp_suspend_state_running; 1062 spin_unlock_irqrestore(&sclp_lock, flags); 1063 sclp_pm_event(SCLP_PM_EVENT_THAW, 1); 1064 return rc; 1065 } 1066 1067 static int sclp_undo_suspend(enum sclp_pm_event event) 1068 { 1069 unsigned long flags; 1070 int rc; 1071 1072 rc = sclp_reactivate(); 1073 if (rc) 1074 return rc; 1075 1076 spin_lock_irqsave(&sclp_lock, flags); 1077 sclp_suspend_state = sclp_suspend_state_running; 1078 spin_unlock_irqrestore(&sclp_lock, flags); 1079 1080 sclp_pm_event(event, 0); 1081 return 0; 1082 } 1083 1084 static int sclp_thaw(struct device *dev) 1085 { 1086 return sclp_undo_suspend(SCLP_PM_EVENT_THAW); 1087 } 1088 1089 static int sclp_restore(struct device *dev) 1090 { 1091 return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); 1092 } 1093 1094 static const struct dev_pm_ops sclp_pm_ops = { 1095 .freeze = sclp_freeze, 1096 .thaw = sclp_thaw, 1097 .restore = sclp_restore, 1098 }; 1099 1100 static ssize_t con_pages_show(struct device_driver *dev, char *buf) 1101 { 1102 return sprintf(buf, "%i\n", sclp_console_pages); 1103 } 1104 1105 static DRIVER_ATTR_RO(con_pages); 1106 1107 static ssize_t con_drop_show(struct device_driver *dev, char *buf) 1108 { 1109 return sprintf(buf, "%i\n", sclp_console_drop); 1110 } 1111 1112 static DRIVER_ATTR_RO(con_drop); 1113 1114 static ssize_t con_full_show(struct device_driver *dev, char *buf) 1115 { 1116 return sprintf(buf, "%lu\n", sclp_console_full); 1117 } 1118 1119 static DRIVER_ATTR_RO(con_full); 1120 1121 static struct attribute *sclp_drv_attrs[] = { 1122 &driver_attr_con_pages.attr, 1123 &driver_attr_con_drop.attr, 1124 &driver_attr_con_full.attr, 1125 NULL, 1126 }; 1127 static struct attribute_group sclp_drv_attr_group = { 1128 .attrs = sclp_drv_attrs, 1129 }; 1130 static const struct attribute_group *sclp_drv_attr_groups[] = { 1131 &sclp_drv_attr_group, 1132 NULL, 1133 }; 1134 1135 static struct platform_driver sclp_pdrv = { 1136 .driver = { 1137 .name = "sclp", 1138 .pm = &sclp_pm_ops, 1139 .groups = sclp_drv_attr_groups, 1140 }, 1141 }; 1142 1143 static struct platform_device *sclp_pdev; 1144 1145 /* Initialize SCLP driver. Return zero if driver is operational, non-zero 1146 * otherwise. */ 1147 static int 1148 sclp_init(void) 1149 { 1150 unsigned long flags; 1151 int rc = 0; 1152 1153 spin_lock_irqsave(&sclp_lock, flags); 1154 /* Check for previous or running initialization */ 1155 if (sclp_init_state != sclp_init_state_uninitialized) 1156 goto fail_unlock; 1157 sclp_init_state = sclp_init_state_initializing; 1158 /* Set up variables */ 1159 INIT_LIST_HEAD(&sclp_req_queue); 1160 INIT_LIST_HEAD(&sclp_reg_list); 1161 list_add(&sclp_state_change_event.list, &sclp_reg_list); 1162 init_timer(&sclp_request_timer); 1163 init_timer(&sclp_queue_timer); 1164 sclp_queue_timer.function = sclp_req_queue_timeout; 1165 /* Check interface */ 1166 spin_unlock_irqrestore(&sclp_lock, flags); 1167 rc = sclp_check_interface(); 1168 spin_lock_irqsave(&sclp_lock, flags); 1169 if (rc) 1170 goto fail_init_state_uninitialized; 1171 /* Register reboot handler */ 1172 rc = register_reboot_notifier(&sclp_reboot_notifier); 1173 if (rc) 1174 goto fail_init_state_uninitialized; 1175 /* Register interrupt handler */ 1176 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler); 1177 if (rc) 1178 goto fail_unregister_reboot_notifier; 1179 sclp_init_state = sclp_init_state_initialized; 1180 spin_unlock_irqrestore(&sclp_lock, flags); 1181 /* Enable service-signal external interruption - needs to happen with 1182 * IRQs enabled. */ 1183 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 1184 sclp_init_mask(1); 1185 return 0; 1186 1187 fail_unregister_reboot_notifier: 1188 unregister_reboot_notifier(&sclp_reboot_notifier); 1189 fail_init_state_uninitialized: 1190 sclp_init_state = sclp_init_state_uninitialized; 1191 fail_unlock: 1192 spin_unlock_irqrestore(&sclp_lock, flags); 1193 return rc; 1194 } 1195 1196 /* 1197 * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able 1198 * to print the panic message. 1199 */ 1200 static int sclp_panic_notify(struct notifier_block *self, 1201 unsigned long event, void *data) 1202 { 1203 if (sclp_suspend_state == sclp_suspend_state_suspended) 1204 sclp_undo_suspend(SCLP_PM_EVENT_THAW); 1205 return NOTIFY_OK; 1206 } 1207 1208 static struct notifier_block sclp_on_panic_nb = { 1209 .notifier_call = sclp_panic_notify, 1210 .priority = SCLP_PANIC_PRIO, 1211 }; 1212 1213 static __init int sclp_initcall(void) 1214 { 1215 int rc; 1216 1217 rc = platform_driver_register(&sclp_pdrv); 1218 if (rc) 1219 return rc; 1220 1221 sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); 1222 rc = PTR_ERR_OR_ZERO(sclp_pdev); 1223 if (rc) 1224 goto fail_platform_driver_unregister; 1225 1226 rc = atomic_notifier_chain_register(&panic_notifier_list, 1227 &sclp_on_panic_nb); 1228 if (rc) 1229 goto fail_platform_device_unregister; 1230 1231 return sclp_init(); 1232 1233 fail_platform_device_unregister: 1234 platform_device_unregister(sclp_pdev); 1235 fail_platform_driver_unregister: 1236 platform_driver_unregister(&sclp_pdrv); 1237 return rc; 1238 } 1239 1240 arch_initcall(sclp_initcall); 1241