1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * core function to access sclp interface 4 * 5 * Copyright IBM Corp. 1999, 2009 6 * 7 * Author(s): Martin Peschke <mpeschke@de.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 */ 10 11 #include <linux/kernel_stat.h> 12 #include <linux/module.h> 13 #include <linux/err.h> 14 #include <linux/panic_notifier.h> 15 #include <linux/spinlock.h> 16 #include <linux/interrupt.h> 17 #include <linux/timer.h> 18 #include <linux/reboot.h> 19 #include <linux/jiffies.h> 20 #include <linux/init.h> 21 #include <linux/platform_device.h> 22 #include <asm/types.h> 23 #include <asm/irq.h> 24 25 #include "sclp.h" 26 27 #define SCLP_HEADER "sclp: " 28 29 /* Lock to protect internal data consistency. */ 30 static DEFINE_SPINLOCK(sclp_lock); 31 32 /* Mask of events that we can send to the sclp interface. */ 33 static sccb_mask_t sclp_receive_mask; 34 35 /* Mask of events that we can receive from the sclp interface. */ 36 static sccb_mask_t sclp_send_mask; 37 38 /* List of registered event listeners and senders. */ 39 static LIST_HEAD(sclp_reg_list); 40 41 /* List of queued requests. */ 42 static LIST_HEAD(sclp_req_queue); 43 44 /* Data for read and and init requests. */ 45 static struct sclp_req sclp_read_req; 46 static struct sclp_req sclp_init_req; 47 static void *sclp_read_sccb; 48 static struct init_sccb *sclp_init_sccb; 49 50 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */ 51 int sclp_console_pages = SCLP_CONSOLE_PAGES; 52 /* Flag to indicate if buffer pages are dropped on buffer full condition */ 53 int sclp_console_drop = 1; 54 /* Number of times the console dropped buffer pages */ 55 unsigned long sclp_console_full; 56 57 static int __init sclp_setup_console_pages(char *str) 58 { 59 int pages, rc; 60 61 rc = kstrtoint(str, 0, &pages); 62 if (!rc && pages >= SCLP_CONSOLE_PAGES) 63 sclp_console_pages = pages; 64 return 1; 65 } 66 67 __setup("sclp_con_pages=", sclp_setup_console_pages); 68 69 static int __init sclp_setup_console_drop(char *str) 70 { 71 int drop, rc; 72 73 rc = kstrtoint(str, 0, &drop); 74 if (!rc) 75 sclp_console_drop = drop; 76 return 1; 77 } 78 79 __setup("sclp_con_drop=", sclp_setup_console_drop); 80 81 /* Timer for request retries. */ 82 static struct timer_list sclp_request_timer; 83 84 /* Timer for queued requests. */ 85 static struct timer_list sclp_queue_timer; 86 87 /* Internal state: is a request active at the sclp? */ 88 static volatile enum sclp_running_state_t { 89 sclp_running_state_idle, 90 sclp_running_state_running, 91 sclp_running_state_reset_pending 92 } sclp_running_state = sclp_running_state_idle; 93 94 /* Internal state: is a read request pending? */ 95 static volatile enum sclp_reading_state_t { 96 sclp_reading_state_idle, 97 sclp_reading_state_reading 98 } sclp_reading_state = sclp_reading_state_idle; 99 100 /* Internal state: is the driver currently serving requests? */ 101 static volatile enum sclp_activation_state_t { 102 sclp_activation_state_active, 103 sclp_activation_state_deactivating, 104 sclp_activation_state_inactive, 105 sclp_activation_state_activating 106 } sclp_activation_state = sclp_activation_state_active; 107 108 /* Internal state: is an init mask request pending? */ 109 static volatile enum sclp_mask_state_t { 110 sclp_mask_state_idle, 111 sclp_mask_state_initializing 112 } sclp_mask_state = sclp_mask_state_idle; 113 114 /* Maximum retry counts */ 115 #define SCLP_INIT_RETRY 3 116 #define SCLP_MASK_RETRY 3 117 118 /* Timeout intervals in seconds.*/ 119 #define SCLP_BUSY_INTERVAL 10 120 #define SCLP_RETRY_INTERVAL 30 121 122 static void sclp_request_timeout(bool force_restart); 123 static void sclp_process_queue(void); 124 static void __sclp_make_read_req(void); 125 static int sclp_init_mask(int calculate); 126 static int sclp_init(void); 127 128 static void 129 __sclp_queue_read_req(void) 130 { 131 if (sclp_reading_state == sclp_reading_state_idle) { 132 sclp_reading_state = sclp_reading_state_reading; 133 __sclp_make_read_req(); 134 /* Add request to head of queue */ 135 list_add(&sclp_read_req.list, &sclp_req_queue); 136 } 137 } 138 139 /* Set up request retry timer. Called while sclp_lock is locked. */ 140 static inline void 141 __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *)) 142 { 143 del_timer(&sclp_request_timer); 144 sclp_request_timer.function = cb; 145 sclp_request_timer.expires = jiffies + time; 146 add_timer(&sclp_request_timer); 147 } 148 149 static void sclp_request_timeout_restart(struct timer_list *unused) 150 { 151 sclp_request_timeout(true); 152 } 153 154 static void sclp_request_timeout_normal(struct timer_list *unused) 155 { 156 sclp_request_timeout(false); 157 } 158 159 /* Request timeout handler. Restart the request queue. If force_restart, 160 * force restart of running request. */ 161 static void sclp_request_timeout(bool force_restart) 162 { 163 unsigned long flags; 164 165 spin_lock_irqsave(&sclp_lock, flags); 166 if (force_restart) { 167 if (sclp_running_state == sclp_running_state_running) { 168 /* Break running state and queue NOP read event request 169 * to get a defined interface state. */ 170 __sclp_queue_read_req(); 171 sclp_running_state = sclp_running_state_idle; 172 } 173 } else { 174 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, 175 sclp_request_timeout_normal); 176 } 177 spin_unlock_irqrestore(&sclp_lock, flags); 178 sclp_process_queue(); 179 } 180 181 /* 182 * Returns the expire value in jiffies of the next pending request timeout, 183 * if any. Needs to be called with sclp_lock. 184 */ 185 static unsigned long __sclp_req_queue_find_next_timeout(void) 186 { 187 unsigned long expires_next = 0; 188 struct sclp_req *req; 189 190 list_for_each_entry(req, &sclp_req_queue, list) { 191 if (!req->queue_expires) 192 continue; 193 if (!expires_next || 194 (time_before(req->queue_expires, expires_next))) 195 expires_next = req->queue_expires; 196 } 197 return expires_next; 198 } 199 200 /* 201 * Returns expired request, if any, and removes it from the list. 202 */ 203 static struct sclp_req *__sclp_req_queue_remove_expired_req(void) 204 { 205 unsigned long flags, now; 206 struct sclp_req *req; 207 208 spin_lock_irqsave(&sclp_lock, flags); 209 now = jiffies; 210 /* Don't need list_for_each_safe because we break out after list_del */ 211 list_for_each_entry(req, &sclp_req_queue, list) { 212 if (!req->queue_expires) 213 continue; 214 if (time_before_eq(req->queue_expires, now)) { 215 if (req->status == SCLP_REQ_QUEUED) { 216 req->status = SCLP_REQ_QUEUED_TIMEOUT; 217 list_del(&req->list); 218 goto out; 219 } 220 } 221 } 222 req = NULL; 223 out: 224 spin_unlock_irqrestore(&sclp_lock, flags); 225 return req; 226 } 227 228 /* 229 * Timeout handler for queued requests. Removes request from list and 230 * invokes callback. This timer can be set per request in situations where 231 * waiting too long would be harmful to the system, e.g. during SE reboot. 232 */ 233 static void sclp_req_queue_timeout(struct timer_list *unused) 234 { 235 unsigned long flags, expires_next; 236 struct sclp_req *req; 237 238 do { 239 req = __sclp_req_queue_remove_expired_req(); 240 if (req && req->callback) 241 req->callback(req, req->callback_data); 242 } while (req); 243 244 spin_lock_irqsave(&sclp_lock, flags); 245 expires_next = __sclp_req_queue_find_next_timeout(); 246 if (expires_next) 247 mod_timer(&sclp_queue_timer, expires_next); 248 spin_unlock_irqrestore(&sclp_lock, flags); 249 } 250 251 /* Try to start a request. Return zero if the request was successfully 252 * started or if it will be started at a later time. Return non-zero otherwise. 253 * Called while sclp_lock is locked. */ 254 static int 255 __sclp_start_request(struct sclp_req *req) 256 { 257 int rc; 258 259 if (sclp_running_state != sclp_running_state_idle) 260 return 0; 261 del_timer(&sclp_request_timer); 262 rc = sclp_service_call(req->command, req->sccb); 263 req->start_count++; 264 265 if (rc == 0) { 266 /* Successfully started request */ 267 req->status = SCLP_REQ_RUNNING; 268 sclp_running_state = sclp_running_state_running; 269 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, 270 sclp_request_timeout_restart); 271 return 0; 272 } else if (rc == -EBUSY) { 273 /* Try again later */ 274 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, 275 sclp_request_timeout_normal); 276 return 0; 277 } 278 /* Request failed */ 279 req->status = SCLP_REQ_FAILED; 280 return rc; 281 } 282 283 /* Try to start queued requests. */ 284 static void 285 sclp_process_queue(void) 286 { 287 struct sclp_req *req; 288 int rc; 289 unsigned long flags; 290 291 spin_lock_irqsave(&sclp_lock, flags); 292 if (sclp_running_state != sclp_running_state_idle) { 293 spin_unlock_irqrestore(&sclp_lock, flags); 294 return; 295 } 296 del_timer(&sclp_request_timer); 297 while (!list_empty(&sclp_req_queue)) { 298 req = list_entry(sclp_req_queue.next, struct sclp_req, list); 299 rc = __sclp_start_request(req); 300 if (rc == 0) 301 break; 302 /* Request failed */ 303 if (req->start_count > 1) { 304 /* Cannot abort already submitted request - could still 305 * be active at the SCLP */ 306 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ, 307 sclp_request_timeout_normal); 308 break; 309 } 310 /* Post-processing for aborted request */ 311 list_del(&req->list); 312 if (req->callback) { 313 spin_unlock_irqrestore(&sclp_lock, flags); 314 req->callback(req, req->callback_data); 315 spin_lock_irqsave(&sclp_lock, flags); 316 } 317 } 318 spin_unlock_irqrestore(&sclp_lock, flags); 319 } 320 321 static int __sclp_can_add_request(struct sclp_req *req) 322 { 323 if (req == &sclp_init_req) 324 return 1; 325 if (sclp_init_state != sclp_init_state_initialized) 326 return 0; 327 if (sclp_activation_state != sclp_activation_state_active) 328 return 0; 329 return 1; 330 } 331 332 /* Queue a new request. Return zero on success, non-zero otherwise. */ 333 int 334 sclp_add_request(struct sclp_req *req) 335 { 336 unsigned long flags; 337 int rc; 338 339 spin_lock_irqsave(&sclp_lock, flags); 340 if (!__sclp_can_add_request(req)) { 341 spin_unlock_irqrestore(&sclp_lock, flags); 342 return -EIO; 343 } 344 req->status = SCLP_REQ_QUEUED; 345 req->start_count = 0; 346 list_add_tail(&req->list, &sclp_req_queue); 347 rc = 0; 348 if (req->queue_timeout) { 349 req->queue_expires = jiffies + req->queue_timeout * HZ; 350 if (!timer_pending(&sclp_queue_timer) || 351 time_after(sclp_queue_timer.expires, req->queue_expires)) 352 mod_timer(&sclp_queue_timer, req->queue_expires); 353 } else 354 req->queue_expires = 0; 355 /* Start if request is first in list */ 356 if (sclp_running_state == sclp_running_state_idle && 357 req->list.prev == &sclp_req_queue) { 358 rc = __sclp_start_request(req); 359 if (rc) 360 list_del(&req->list); 361 } 362 spin_unlock_irqrestore(&sclp_lock, flags); 363 return rc; 364 } 365 366 EXPORT_SYMBOL(sclp_add_request); 367 368 /* Dispatch events found in request buffer to registered listeners. Return 0 369 * if all events were dispatched, non-zero otherwise. */ 370 static int 371 sclp_dispatch_evbufs(struct sccb_header *sccb) 372 { 373 unsigned long flags; 374 struct evbuf_header *evbuf; 375 struct list_head *l; 376 struct sclp_register *reg; 377 int offset; 378 int rc; 379 380 spin_lock_irqsave(&sclp_lock, flags); 381 rc = 0; 382 for (offset = sizeof(struct sccb_header); offset < sccb->length; 383 offset += evbuf->length) { 384 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset); 385 /* Check for malformed hardware response */ 386 if (evbuf->length == 0) 387 break; 388 /* Search for event handler */ 389 reg = NULL; 390 list_for_each(l, &sclp_reg_list) { 391 reg = list_entry(l, struct sclp_register, list); 392 if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type)) 393 break; 394 else 395 reg = NULL; 396 } 397 if (reg && reg->receiver_fn) { 398 spin_unlock_irqrestore(&sclp_lock, flags); 399 reg->receiver_fn(evbuf); 400 spin_lock_irqsave(&sclp_lock, flags); 401 } else if (reg == NULL) 402 rc = -EOPNOTSUPP; 403 } 404 spin_unlock_irqrestore(&sclp_lock, flags); 405 return rc; 406 } 407 408 /* Read event data request callback. */ 409 static void 410 sclp_read_cb(struct sclp_req *req, void *data) 411 { 412 unsigned long flags; 413 struct sccb_header *sccb; 414 415 sccb = (struct sccb_header *) req->sccb; 416 if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 || 417 sccb->response_code == 0x220)) 418 sclp_dispatch_evbufs(sccb); 419 spin_lock_irqsave(&sclp_lock, flags); 420 sclp_reading_state = sclp_reading_state_idle; 421 spin_unlock_irqrestore(&sclp_lock, flags); 422 } 423 424 /* Prepare read event data request. Called while sclp_lock is locked. */ 425 static void __sclp_make_read_req(void) 426 { 427 struct sccb_header *sccb; 428 429 sccb = (struct sccb_header *) sclp_read_sccb; 430 clear_page(sccb); 431 memset(&sclp_read_req, 0, sizeof(struct sclp_req)); 432 sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA; 433 sclp_read_req.status = SCLP_REQ_QUEUED; 434 sclp_read_req.start_count = 0; 435 sclp_read_req.callback = sclp_read_cb; 436 sclp_read_req.sccb = sccb; 437 sccb->length = PAGE_SIZE; 438 sccb->function_code = 0; 439 sccb->control_mask[2] = 0x80; 440 } 441 442 /* Search request list for request with matching sccb. Return request if found, 443 * NULL otherwise. Called while sclp_lock is locked. */ 444 static inline struct sclp_req * 445 __sclp_find_req(u32 sccb) 446 { 447 struct list_head *l; 448 struct sclp_req *req; 449 450 list_for_each(l, &sclp_req_queue) { 451 req = list_entry(l, struct sclp_req, list); 452 if (sccb == (u32) (addr_t) req->sccb) 453 return req; 454 } 455 return NULL; 456 } 457 458 /* Handler for external interruption. Perform request post-processing. 459 * Prepare read event data request if necessary. Start processing of next 460 * request on queue. */ 461 static void sclp_interrupt_handler(struct ext_code ext_code, 462 unsigned int param32, unsigned long param64) 463 { 464 struct sclp_req *req; 465 u32 finished_sccb; 466 u32 evbuf_pending; 467 468 inc_irq_stat(IRQEXT_SCP); 469 spin_lock(&sclp_lock); 470 finished_sccb = param32 & 0xfffffff8; 471 evbuf_pending = param32 & 0x3; 472 if (finished_sccb) { 473 del_timer(&sclp_request_timer); 474 sclp_running_state = sclp_running_state_reset_pending; 475 req = __sclp_find_req(finished_sccb); 476 if (req) { 477 /* Request post-processing */ 478 list_del(&req->list); 479 req->status = SCLP_REQ_DONE; 480 if (req->callback) { 481 spin_unlock(&sclp_lock); 482 req->callback(req, req->callback_data); 483 spin_lock(&sclp_lock); 484 } 485 } 486 sclp_running_state = sclp_running_state_idle; 487 } 488 if (evbuf_pending && 489 sclp_activation_state == sclp_activation_state_active) 490 __sclp_queue_read_req(); 491 spin_unlock(&sclp_lock); 492 sclp_process_queue(); 493 } 494 495 /* Convert interval in jiffies to TOD ticks. */ 496 static inline u64 497 sclp_tod_from_jiffies(unsigned long jiffies) 498 { 499 return (u64) (jiffies / HZ) << 32; 500 } 501 502 /* Wait until a currently running request finished. Note: while this function 503 * is running, no timers are served on the calling CPU. */ 504 void 505 sclp_sync_wait(void) 506 { 507 unsigned long long old_tick; 508 unsigned long flags; 509 unsigned long cr0, cr0_sync; 510 u64 timeout; 511 int irq_context; 512 513 /* We'll be disabling timer interrupts, so we need a custom timeout 514 * mechanism */ 515 timeout = 0; 516 if (timer_pending(&sclp_request_timer)) { 517 /* Get timeout TOD value */ 518 timeout = get_tod_clock_fast() + 519 sclp_tod_from_jiffies(sclp_request_timer.expires - 520 jiffies); 521 } 522 local_irq_save(flags); 523 /* Prevent bottom half from executing once we force interrupts open */ 524 irq_context = in_interrupt(); 525 if (!irq_context) 526 local_bh_disable(); 527 /* Enable service-signal interruption, disable timer interrupts */ 528 old_tick = local_tick_disable(); 529 trace_hardirqs_on(); 530 __ctl_store(cr0, 0, 0); 531 cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK; 532 cr0_sync |= 1UL << (63 - 54); 533 __ctl_load(cr0_sync, 0, 0); 534 __arch_local_irq_stosm(0x01); 535 /* Loop until driver state indicates finished request */ 536 while (sclp_running_state != sclp_running_state_idle) { 537 /* Check for expired request timer */ 538 if (timer_pending(&sclp_request_timer) && 539 get_tod_clock_fast() > timeout && 540 del_timer(&sclp_request_timer)) 541 sclp_request_timer.function(&sclp_request_timer); 542 cpu_relax(); 543 } 544 local_irq_disable(); 545 __ctl_load(cr0, 0, 0); 546 if (!irq_context) 547 _local_bh_enable(); 548 local_tick_enable(old_tick); 549 local_irq_restore(flags); 550 } 551 EXPORT_SYMBOL(sclp_sync_wait); 552 553 /* Dispatch changes in send and receive mask to registered listeners. */ 554 static void 555 sclp_dispatch_state_change(void) 556 { 557 struct list_head *l; 558 struct sclp_register *reg; 559 unsigned long flags; 560 sccb_mask_t receive_mask; 561 sccb_mask_t send_mask; 562 563 do { 564 spin_lock_irqsave(&sclp_lock, flags); 565 reg = NULL; 566 list_for_each(l, &sclp_reg_list) { 567 reg = list_entry(l, struct sclp_register, list); 568 receive_mask = reg->send_mask & sclp_receive_mask; 569 send_mask = reg->receive_mask & sclp_send_mask; 570 if (reg->sclp_receive_mask != receive_mask || 571 reg->sclp_send_mask != send_mask) { 572 reg->sclp_receive_mask = receive_mask; 573 reg->sclp_send_mask = send_mask; 574 break; 575 } else 576 reg = NULL; 577 } 578 spin_unlock_irqrestore(&sclp_lock, flags); 579 if (reg && reg->state_change_fn) 580 reg->state_change_fn(reg); 581 } while (reg); 582 } 583 584 struct sclp_statechangebuf { 585 struct evbuf_header header; 586 u8 validity_sclp_active_facility_mask : 1; 587 u8 validity_sclp_receive_mask : 1; 588 u8 validity_sclp_send_mask : 1; 589 u8 validity_read_data_function_mask : 1; 590 u16 _zeros : 12; 591 u16 mask_length; 592 u64 sclp_active_facility_mask; 593 u8 masks[2 * 1021 + 4]; /* variable length */ 594 /* 595 * u8 sclp_receive_mask[mask_length]; 596 * u8 sclp_send_mask[mask_length]; 597 * u32 read_data_function_mask; 598 */ 599 } __attribute__((packed)); 600 601 602 /* State change event callback. Inform listeners of changes. */ 603 static void 604 sclp_state_change_cb(struct evbuf_header *evbuf) 605 { 606 unsigned long flags; 607 struct sclp_statechangebuf *scbuf; 608 609 BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE); 610 611 scbuf = (struct sclp_statechangebuf *) evbuf; 612 spin_lock_irqsave(&sclp_lock, flags); 613 if (scbuf->validity_sclp_receive_mask) 614 sclp_receive_mask = sccb_get_recv_mask(scbuf); 615 if (scbuf->validity_sclp_send_mask) 616 sclp_send_mask = sccb_get_send_mask(scbuf); 617 spin_unlock_irqrestore(&sclp_lock, flags); 618 if (scbuf->validity_sclp_active_facility_mask) 619 sclp.facilities = scbuf->sclp_active_facility_mask; 620 sclp_dispatch_state_change(); 621 } 622 623 static struct sclp_register sclp_state_change_event = { 624 .receive_mask = EVTYP_STATECHANGE_MASK, 625 .receiver_fn = sclp_state_change_cb 626 }; 627 628 /* Calculate receive and send mask of currently registered listeners. 629 * Called while sclp_lock is locked. */ 630 static inline void 631 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask) 632 { 633 struct list_head *l; 634 struct sclp_register *t; 635 636 *receive_mask = 0; 637 *send_mask = 0; 638 list_for_each(l, &sclp_reg_list) { 639 t = list_entry(l, struct sclp_register, list); 640 *receive_mask |= t->receive_mask; 641 *send_mask |= t->send_mask; 642 } 643 } 644 645 /* Register event listener. Return 0 on success, non-zero otherwise. */ 646 int 647 sclp_register(struct sclp_register *reg) 648 { 649 unsigned long flags; 650 sccb_mask_t receive_mask; 651 sccb_mask_t send_mask; 652 int rc; 653 654 rc = sclp_init(); 655 if (rc) 656 return rc; 657 spin_lock_irqsave(&sclp_lock, flags); 658 /* Check event mask for collisions */ 659 __sclp_get_mask(&receive_mask, &send_mask); 660 if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) { 661 spin_unlock_irqrestore(&sclp_lock, flags); 662 return -EBUSY; 663 } 664 /* Trigger initial state change callback */ 665 reg->sclp_receive_mask = 0; 666 reg->sclp_send_mask = 0; 667 list_add(®->list, &sclp_reg_list); 668 spin_unlock_irqrestore(&sclp_lock, flags); 669 rc = sclp_init_mask(1); 670 if (rc) { 671 spin_lock_irqsave(&sclp_lock, flags); 672 list_del(®->list); 673 spin_unlock_irqrestore(&sclp_lock, flags); 674 } 675 return rc; 676 } 677 678 EXPORT_SYMBOL(sclp_register); 679 680 /* Unregister event listener. */ 681 void 682 sclp_unregister(struct sclp_register *reg) 683 { 684 unsigned long flags; 685 686 spin_lock_irqsave(&sclp_lock, flags); 687 list_del(®->list); 688 spin_unlock_irqrestore(&sclp_lock, flags); 689 sclp_init_mask(1); 690 } 691 692 EXPORT_SYMBOL(sclp_unregister); 693 694 /* Remove event buffers which are marked processed. Return the number of 695 * remaining event buffers. */ 696 int 697 sclp_remove_processed(struct sccb_header *sccb) 698 { 699 struct evbuf_header *evbuf; 700 int unprocessed; 701 u16 remaining; 702 703 evbuf = (struct evbuf_header *) (sccb + 1); 704 unprocessed = 0; 705 remaining = sccb->length - sizeof(struct sccb_header); 706 while (remaining > 0) { 707 remaining -= evbuf->length; 708 if (evbuf->flags & 0x80) { 709 sccb->length -= evbuf->length; 710 memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length), 711 remaining); 712 } else { 713 unprocessed++; 714 evbuf = (struct evbuf_header *) 715 ((addr_t) evbuf + evbuf->length); 716 } 717 } 718 return unprocessed; 719 } 720 721 EXPORT_SYMBOL(sclp_remove_processed); 722 723 /* Prepare init mask request. Called while sclp_lock is locked. */ 724 static inline void 725 __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask) 726 { 727 struct init_sccb *sccb = sclp_init_sccb; 728 729 clear_page(sccb); 730 memset(&sclp_init_req, 0, sizeof(struct sclp_req)); 731 sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK; 732 sclp_init_req.status = SCLP_REQ_FILLED; 733 sclp_init_req.start_count = 0; 734 sclp_init_req.callback = NULL; 735 sclp_init_req.callback_data = NULL; 736 sclp_init_req.sccb = sccb; 737 sccb->header.length = sizeof(*sccb); 738 if (sclp_mask_compat_mode) 739 sccb->mask_length = SCLP_MASK_SIZE_COMPAT; 740 else 741 sccb->mask_length = sizeof(sccb_mask_t); 742 sccb_set_recv_mask(sccb, receive_mask); 743 sccb_set_send_mask(sccb, send_mask); 744 sccb_set_sclp_recv_mask(sccb, 0); 745 sccb_set_sclp_send_mask(sccb, 0); 746 } 747 748 /* Start init mask request. If calculate is non-zero, calculate the mask as 749 * requested by registered listeners. Use zero mask otherwise. Return 0 on 750 * success, non-zero otherwise. */ 751 static int 752 sclp_init_mask(int calculate) 753 { 754 unsigned long flags; 755 struct init_sccb *sccb = sclp_init_sccb; 756 sccb_mask_t receive_mask; 757 sccb_mask_t send_mask; 758 int retry; 759 int rc; 760 unsigned long wait; 761 762 spin_lock_irqsave(&sclp_lock, flags); 763 /* Check if interface is in appropriate state */ 764 if (sclp_mask_state != sclp_mask_state_idle) { 765 spin_unlock_irqrestore(&sclp_lock, flags); 766 return -EBUSY; 767 } 768 if (sclp_activation_state == sclp_activation_state_inactive) { 769 spin_unlock_irqrestore(&sclp_lock, flags); 770 return -EINVAL; 771 } 772 sclp_mask_state = sclp_mask_state_initializing; 773 /* Determine mask */ 774 if (calculate) 775 __sclp_get_mask(&receive_mask, &send_mask); 776 else { 777 receive_mask = 0; 778 send_mask = 0; 779 } 780 rc = -EIO; 781 for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) { 782 /* Prepare request */ 783 __sclp_make_init_req(receive_mask, send_mask); 784 spin_unlock_irqrestore(&sclp_lock, flags); 785 if (sclp_add_request(&sclp_init_req)) { 786 /* Try again later */ 787 wait = jiffies + SCLP_BUSY_INTERVAL * HZ; 788 while (time_before(jiffies, wait)) 789 sclp_sync_wait(); 790 spin_lock_irqsave(&sclp_lock, flags); 791 continue; 792 } 793 while (sclp_init_req.status != SCLP_REQ_DONE && 794 sclp_init_req.status != SCLP_REQ_FAILED) 795 sclp_sync_wait(); 796 spin_lock_irqsave(&sclp_lock, flags); 797 if (sclp_init_req.status == SCLP_REQ_DONE && 798 sccb->header.response_code == 0x20) { 799 /* Successful request */ 800 if (calculate) { 801 sclp_receive_mask = sccb_get_sclp_recv_mask(sccb); 802 sclp_send_mask = sccb_get_sclp_send_mask(sccb); 803 } else { 804 sclp_receive_mask = 0; 805 sclp_send_mask = 0; 806 } 807 spin_unlock_irqrestore(&sclp_lock, flags); 808 sclp_dispatch_state_change(); 809 spin_lock_irqsave(&sclp_lock, flags); 810 rc = 0; 811 break; 812 } 813 } 814 sclp_mask_state = sclp_mask_state_idle; 815 spin_unlock_irqrestore(&sclp_lock, flags); 816 return rc; 817 } 818 819 /* Deactivate SCLP interface. On success, new requests will be rejected, 820 * events will no longer be dispatched. Return 0 on success, non-zero 821 * otherwise. */ 822 int 823 sclp_deactivate(void) 824 { 825 unsigned long flags; 826 int rc; 827 828 spin_lock_irqsave(&sclp_lock, flags); 829 /* Deactivate can only be called when active */ 830 if (sclp_activation_state != sclp_activation_state_active) { 831 spin_unlock_irqrestore(&sclp_lock, flags); 832 return -EINVAL; 833 } 834 sclp_activation_state = sclp_activation_state_deactivating; 835 spin_unlock_irqrestore(&sclp_lock, flags); 836 rc = sclp_init_mask(0); 837 spin_lock_irqsave(&sclp_lock, flags); 838 if (rc == 0) 839 sclp_activation_state = sclp_activation_state_inactive; 840 else 841 sclp_activation_state = sclp_activation_state_active; 842 spin_unlock_irqrestore(&sclp_lock, flags); 843 return rc; 844 } 845 846 EXPORT_SYMBOL(sclp_deactivate); 847 848 /* Reactivate SCLP interface after sclp_deactivate. On success, new 849 * requests will be accepted, events will be dispatched again. Return 0 on 850 * success, non-zero otherwise. */ 851 int 852 sclp_reactivate(void) 853 { 854 unsigned long flags; 855 int rc; 856 857 spin_lock_irqsave(&sclp_lock, flags); 858 /* Reactivate can only be called when inactive */ 859 if (sclp_activation_state != sclp_activation_state_inactive) { 860 spin_unlock_irqrestore(&sclp_lock, flags); 861 return -EINVAL; 862 } 863 sclp_activation_state = sclp_activation_state_activating; 864 spin_unlock_irqrestore(&sclp_lock, flags); 865 rc = sclp_init_mask(1); 866 spin_lock_irqsave(&sclp_lock, flags); 867 if (rc == 0) 868 sclp_activation_state = sclp_activation_state_active; 869 else 870 sclp_activation_state = sclp_activation_state_inactive; 871 spin_unlock_irqrestore(&sclp_lock, flags); 872 return rc; 873 } 874 875 EXPORT_SYMBOL(sclp_reactivate); 876 877 /* Handler for external interruption used during initialization. Modify 878 * request state to done. */ 879 static void sclp_check_handler(struct ext_code ext_code, 880 unsigned int param32, unsigned long param64) 881 { 882 u32 finished_sccb; 883 884 inc_irq_stat(IRQEXT_SCP); 885 finished_sccb = param32 & 0xfffffff8; 886 /* Is this the interrupt we are waiting for? */ 887 if (finished_sccb == 0) 888 return; 889 if (finished_sccb != (u32) (addr_t) sclp_init_sccb) 890 panic("sclp: unsolicited interrupt for buffer at 0x%x\n", 891 finished_sccb); 892 spin_lock(&sclp_lock); 893 if (sclp_running_state == sclp_running_state_running) { 894 sclp_init_req.status = SCLP_REQ_DONE; 895 sclp_running_state = sclp_running_state_idle; 896 } 897 spin_unlock(&sclp_lock); 898 } 899 900 /* Initial init mask request timed out. Modify request state to failed. */ 901 static void 902 sclp_check_timeout(struct timer_list *unused) 903 { 904 unsigned long flags; 905 906 spin_lock_irqsave(&sclp_lock, flags); 907 if (sclp_running_state == sclp_running_state_running) { 908 sclp_init_req.status = SCLP_REQ_FAILED; 909 sclp_running_state = sclp_running_state_idle; 910 } 911 spin_unlock_irqrestore(&sclp_lock, flags); 912 } 913 914 /* Perform a check of the SCLP interface. Return zero if the interface is 915 * available and there are no pending requests from a previous instance. 916 * Return non-zero otherwise. */ 917 static int 918 sclp_check_interface(void) 919 { 920 struct init_sccb *sccb; 921 unsigned long flags; 922 int retry; 923 int rc; 924 925 spin_lock_irqsave(&sclp_lock, flags); 926 /* Prepare init mask command */ 927 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); 928 if (rc) { 929 spin_unlock_irqrestore(&sclp_lock, flags); 930 return rc; 931 } 932 for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) { 933 __sclp_make_init_req(0, 0); 934 sccb = (struct init_sccb *) sclp_init_req.sccb; 935 rc = sclp_service_call(sclp_init_req.command, sccb); 936 if (rc == -EIO) 937 break; 938 sclp_init_req.status = SCLP_REQ_RUNNING; 939 sclp_running_state = sclp_running_state_running; 940 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ, 941 sclp_check_timeout); 942 spin_unlock_irqrestore(&sclp_lock, flags); 943 /* Enable service-signal interruption - needs to happen 944 * with IRQs enabled. */ 945 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 946 /* Wait for signal from interrupt or timeout */ 947 sclp_sync_wait(); 948 /* Disable service-signal interruption - needs to happen 949 * with IRQs enabled. */ 950 irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL); 951 spin_lock_irqsave(&sclp_lock, flags); 952 del_timer(&sclp_request_timer); 953 rc = -EBUSY; 954 if (sclp_init_req.status == SCLP_REQ_DONE) { 955 if (sccb->header.response_code == 0x20) { 956 rc = 0; 957 break; 958 } else if (sccb->header.response_code == 0x74f0) { 959 if (!sclp_mask_compat_mode) { 960 sclp_mask_compat_mode = true; 961 retry = 0; 962 } 963 } 964 } 965 } 966 unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler); 967 spin_unlock_irqrestore(&sclp_lock, flags); 968 return rc; 969 } 970 971 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP 972 * events from interfering with rebooted system. */ 973 static int 974 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) 975 { 976 sclp_deactivate(); 977 return NOTIFY_DONE; 978 } 979 980 static struct notifier_block sclp_reboot_notifier = { 981 .notifier_call = sclp_reboot_event 982 }; 983 984 static ssize_t con_pages_show(struct device_driver *dev, char *buf) 985 { 986 return sprintf(buf, "%i\n", sclp_console_pages); 987 } 988 989 static DRIVER_ATTR_RO(con_pages); 990 991 static ssize_t con_drop_show(struct device_driver *dev, char *buf) 992 { 993 return sprintf(buf, "%i\n", sclp_console_drop); 994 } 995 996 static DRIVER_ATTR_RO(con_drop); 997 998 static ssize_t con_full_show(struct device_driver *dev, char *buf) 999 { 1000 return sprintf(buf, "%lu\n", sclp_console_full); 1001 } 1002 1003 static DRIVER_ATTR_RO(con_full); 1004 1005 static struct attribute *sclp_drv_attrs[] = { 1006 &driver_attr_con_pages.attr, 1007 &driver_attr_con_drop.attr, 1008 &driver_attr_con_full.attr, 1009 NULL, 1010 }; 1011 static struct attribute_group sclp_drv_attr_group = { 1012 .attrs = sclp_drv_attrs, 1013 }; 1014 static const struct attribute_group *sclp_drv_attr_groups[] = { 1015 &sclp_drv_attr_group, 1016 NULL, 1017 }; 1018 1019 static struct platform_driver sclp_pdrv = { 1020 .driver = { 1021 .name = "sclp", 1022 .groups = sclp_drv_attr_groups, 1023 }, 1024 }; 1025 1026 /* Initialize SCLP driver. Return zero if driver is operational, non-zero 1027 * otherwise. */ 1028 static int 1029 sclp_init(void) 1030 { 1031 unsigned long flags; 1032 int rc = 0; 1033 1034 spin_lock_irqsave(&sclp_lock, flags); 1035 /* Check for previous or running initialization */ 1036 if (sclp_init_state != sclp_init_state_uninitialized) 1037 goto fail_unlock; 1038 sclp_init_state = sclp_init_state_initializing; 1039 sclp_read_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); 1040 sclp_init_sccb = (void *) __get_free_page(GFP_ATOMIC | GFP_DMA); 1041 BUG_ON(!sclp_read_sccb || !sclp_init_sccb); 1042 /* Set up variables */ 1043 list_add(&sclp_state_change_event.list, &sclp_reg_list); 1044 timer_setup(&sclp_request_timer, NULL, 0); 1045 timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0); 1046 /* Check interface */ 1047 spin_unlock_irqrestore(&sclp_lock, flags); 1048 rc = sclp_check_interface(); 1049 spin_lock_irqsave(&sclp_lock, flags); 1050 if (rc) 1051 goto fail_init_state_uninitialized; 1052 /* Register reboot handler */ 1053 rc = register_reboot_notifier(&sclp_reboot_notifier); 1054 if (rc) 1055 goto fail_init_state_uninitialized; 1056 /* Register interrupt handler */ 1057 rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler); 1058 if (rc) 1059 goto fail_unregister_reboot_notifier; 1060 sclp_init_state = sclp_init_state_initialized; 1061 spin_unlock_irqrestore(&sclp_lock, flags); 1062 /* Enable service-signal external interruption - needs to happen with 1063 * IRQs enabled. */ 1064 irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); 1065 sclp_init_mask(1); 1066 return 0; 1067 1068 fail_unregister_reboot_notifier: 1069 unregister_reboot_notifier(&sclp_reboot_notifier); 1070 fail_init_state_uninitialized: 1071 sclp_init_state = sclp_init_state_uninitialized; 1072 free_page((unsigned long) sclp_read_sccb); 1073 free_page((unsigned long) sclp_init_sccb); 1074 fail_unlock: 1075 spin_unlock_irqrestore(&sclp_lock, flags); 1076 return rc; 1077 } 1078 1079 static __init int sclp_initcall(void) 1080 { 1081 int rc; 1082 1083 rc = platform_driver_register(&sclp_pdrv); 1084 if (rc) 1085 return rc; 1086 1087 return sclp_init(); 1088 } 1089 1090 arch_initcall(sclp_initcall); 1091