xref: /openbmc/linux/drivers/s390/char/sclp.c (revision 9be08a27)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * core function to access sclp interface
4  *
5  * Copyright IBM Corp. 1999, 2009
6  *
7  * Author(s): Martin Peschke <mpeschke@de.ibm.com>
8  *	      Martin Schwidefsky <schwidefsky@de.ibm.com>
9  */
10 
11 #include <linux/kernel_stat.h>
12 #include <linux/module.h>
13 #include <linux/err.h>
14 #include <linux/spinlock.h>
15 #include <linux/interrupt.h>
16 #include <linux/timer.h>
17 #include <linux/reboot.h>
18 #include <linux/jiffies.h>
19 #include <linux/init.h>
20 #include <linux/suspend.h>
21 #include <linux/completion.h>
22 #include <linux/platform_device.h>
23 #include <asm/types.h>
24 #include <asm/irq.h>
25 
26 #include "sclp.h"
27 
28 #define SCLP_HEADER		"sclp: "
29 
30 /* Lock to protect internal data consistency. */
31 static DEFINE_SPINLOCK(sclp_lock);
32 
33 /* Mask of events that we can send to the sclp interface. */
34 static sccb_mask_t sclp_receive_mask;
35 
36 /* Mask of events that we can receive from the sclp interface. */
37 static sccb_mask_t sclp_send_mask;
38 
39 /* List of registered event listeners and senders. */
40 static struct list_head sclp_reg_list;
41 
42 /* List of queued requests. */
43 static struct list_head sclp_req_queue;
44 
45 /* Data for read and and init requests. */
46 static struct sclp_req sclp_read_req;
47 static struct sclp_req sclp_init_req;
48 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
49 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
50 
51 /* Suspend request */
52 static DECLARE_COMPLETION(sclp_request_queue_flushed);
53 
54 /* Number of console pages to allocate, used by sclp_con.c and sclp_vt220.c */
55 int sclp_console_pages = SCLP_CONSOLE_PAGES;
56 /* Flag to indicate if buffer pages are dropped on buffer full condition */
57 int sclp_console_drop = 1;
58 /* Number of times the console dropped buffer pages */
59 unsigned long sclp_console_full;
60 
61 static void sclp_suspend_req_cb(struct sclp_req *req, void *data)
62 {
63 	complete(&sclp_request_queue_flushed);
64 }
65 
66 static int __init sclp_setup_console_pages(char *str)
67 {
68 	int pages, rc;
69 
70 	rc = kstrtoint(str, 0, &pages);
71 	if (!rc && pages >= SCLP_CONSOLE_PAGES)
72 		sclp_console_pages = pages;
73 	return 1;
74 }
75 
76 __setup("sclp_con_pages=", sclp_setup_console_pages);
77 
78 static int __init sclp_setup_console_drop(char *str)
79 {
80 	int drop, rc;
81 
82 	rc = kstrtoint(str, 0, &drop);
83 	if (!rc)
84 		sclp_console_drop = drop;
85 	return 1;
86 }
87 
88 __setup("sclp_con_drop=", sclp_setup_console_drop);
89 
90 static struct sclp_req sclp_suspend_req;
91 
92 /* Timer for request retries. */
93 static struct timer_list sclp_request_timer;
94 
95 /* Timer for queued requests. */
96 static struct timer_list sclp_queue_timer;
97 
98 /* Internal state: is a request active at the sclp? */
99 static volatile enum sclp_running_state_t {
100 	sclp_running_state_idle,
101 	sclp_running_state_running,
102 	sclp_running_state_reset_pending
103 } sclp_running_state = sclp_running_state_idle;
104 
105 /* Internal state: is a read request pending? */
106 static volatile enum sclp_reading_state_t {
107 	sclp_reading_state_idle,
108 	sclp_reading_state_reading
109 } sclp_reading_state = sclp_reading_state_idle;
110 
111 /* Internal state: is the driver currently serving requests? */
112 static volatile enum sclp_activation_state_t {
113 	sclp_activation_state_active,
114 	sclp_activation_state_deactivating,
115 	sclp_activation_state_inactive,
116 	sclp_activation_state_activating
117 } sclp_activation_state = sclp_activation_state_active;
118 
119 /* Internal state: is an init mask request pending? */
120 static volatile enum sclp_mask_state_t {
121 	sclp_mask_state_idle,
122 	sclp_mask_state_initializing
123 } sclp_mask_state = sclp_mask_state_idle;
124 
125 /* Internal state: is the driver suspended? */
126 static enum sclp_suspend_state_t {
127 	sclp_suspend_state_running,
128 	sclp_suspend_state_suspended,
129 } sclp_suspend_state = sclp_suspend_state_running;
130 
131 /* Maximum retry counts */
132 #define SCLP_INIT_RETRY		3
133 #define SCLP_MASK_RETRY		3
134 
135 /* Timeout intervals in seconds.*/
136 #define SCLP_BUSY_INTERVAL	10
137 #define SCLP_RETRY_INTERVAL	30
138 
139 static void sclp_request_timeout(bool force_restart);
140 static void sclp_process_queue(void);
141 static void __sclp_make_read_req(void);
142 static int sclp_init_mask(int calculate);
143 static int sclp_init(void);
144 
145 static void
146 __sclp_queue_read_req(void)
147 {
148 	if (sclp_reading_state == sclp_reading_state_idle) {
149 		sclp_reading_state = sclp_reading_state_reading;
150 		__sclp_make_read_req();
151 		/* Add request to head of queue */
152 		list_add(&sclp_read_req.list, &sclp_req_queue);
153 	}
154 }
155 
156 /* Set up request retry timer. Called while sclp_lock is locked. */
157 static inline void
158 __sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
159 {
160 	del_timer(&sclp_request_timer);
161 	sclp_request_timer.function = cb;
162 	sclp_request_timer.expires = jiffies + time;
163 	add_timer(&sclp_request_timer);
164 }
165 
166 static void sclp_request_timeout_restart(struct timer_list *unused)
167 {
168 	sclp_request_timeout(true);
169 }
170 
171 static void sclp_request_timeout_normal(struct timer_list *unused)
172 {
173 	sclp_request_timeout(false);
174 }
175 
176 /* Request timeout handler. Restart the request queue. If force_restart,
177  * force restart of running request. */
178 static void sclp_request_timeout(bool force_restart)
179 {
180 	unsigned long flags;
181 
182 	spin_lock_irqsave(&sclp_lock, flags);
183 	if (force_restart) {
184 		if (sclp_running_state == sclp_running_state_running) {
185 			/* Break running state and queue NOP read event request
186 			 * to get a defined interface state. */
187 			__sclp_queue_read_req();
188 			sclp_running_state = sclp_running_state_idle;
189 		}
190 	} else {
191 		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
192 					 sclp_request_timeout_normal);
193 	}
194 	spin_unlock_irqrestore(&sclp_lock, flags);
195 	sclp_process_queue();
196 }
197 
198 /*
199  * Returns the expire value in jiffies of the next pending request timeout,
200  * if any. Needs to be called with sclp_lock.
201  */
202 static unsigned long __sclp_req_queue_find_next_timeout(void)
203 {
204 	unsigned long expires_next = 0;
205 	struct sclp_req *req;
206 
207 	list_for_each_entry(req, &sclp_req_queue, list) {
208 		if (!req->queue_expires)
209 			continue;
210 		if (!expires_next ||
211 		   (time_before(req->queue_expires, expires_next)))
212 				expires_next = req->queue_expires;
213 	}
214 	return expires_next;
215 }
216 
217 /*
218  * Returns expired request, if any, and removes it from the list.
219  */
220 static struct sclp_req *__sclp_req_queue_remove_expired_req(void)
221 {
222 	unsigned long flags, now;
223 	struct sclp_req *req;
224 
225 	spin_lock_irqsave(&sclp_lock, flags);
226 	now = jiffies;
227 	/* Don't need list_for_each_safe because we break out after list_del */
228 	list_for_each_entry(req, &sclp_req_queue, list) {
229 		if (!req->queue_expires)
230 			continue;
231 		if (time_before_eq(req->queue_expires, now)) {
232 			if (req->status == SCLP_REQ_QUEUED) {
233 				req->status = SCLP_REQ_QUEUED_TIMEOUT;
234 				list_del(&req->list);
235 				goto out;
236 			}
237 		}
238 	}
239 	req = NULL;
240 out:
241 	spin_unlock_irqrestore(&sclp_lock, flags);
242 	return req;
243 }
244 
245 /*
246  * Timeout handler for queued requests. Removes request from list and
247  * invokes callback. This timer can be set per request in situations where
248  * waiting too long would be harmful to the system, e.g. during SE reboot.
249  */
250 static void sclp_req_queue_timeout(struct timer_list *unused)
251 {
252 	unsigned long flags, expires_next;
253 	struct sclp_req *req;
254 
255 	do {
256 		req = __sclp_req_queue_remove_expired_req();
257 		if (req && req->callback)
258 			req->callback(req, req->callback_data);
259 	} while (req);
260 
261 	spin_lock_irqsave(&sclp_lock, flags);
262 	expires_next = __sclp_req_queue_find_next_timeout();
263 	if (expires_next)
264 		mod_timer(&sclp_queue_timer, expires_next);
265 	spin_unlock_irqrestore(&sclp_lock, flags);
266 }
267 
268 /* Try to start a request. Return zero if the request was successfully
269  * started or if it will be started at a later time. Return non-zero otherwise.
270  * Called while sclp_lock is locked. */
271 static int
272 __sclp_start_request(struct sclp_req *req)
273 {
274 	int rc;
275 
276 	if (sclp_running_state != sclp_running_state_idle)
277 		return 0;
278 	del_timer(&sclp_request_timer);
279 	rc = sclp_service_call(req->command, req->sccb);
280 	req->start_count++;
281 
282 	if (rc == 0) {
283 		/* Successfully started request */
284 		req->status = SCLP_REQ_RUNNING;
285 		sclp_running_state = sclp_running_state_running;
286 		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
287 					 sclp_request_timeout_restart);
288 		return 0;
289 	} else if (rc == -EBUSY) {
290 		/* Try again later */
291 		__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
292 					 sclp_request_timeout_normal);
293 		return 0;
294 	}
295 	/* Request failed */
296 	req->status = SCLP_REQ_FAILED;
297 	return rc;
298 }
299 
300 /* Try to start queued requests. */
301 static void
302 sclp_process_queue(void)
303 {
304 	struct sclp_req *req;
305 	int rc;
306 	unsigned long flags;
307 
308 	spin_lock_irqsave(&sclp_lock, flags);
309 	if (sclp_running_state != sclp_running_state_idle) {
310 		spin_unlock_irqrestore(&sclp_lock, flags);
311 		return;
312 	}
313 	del_timer(&sclp_request_timer);
314 	while (!list_empty(&sclp_req_queue)) {
315 		req = list_entry(sclp_req_queue.next, struct sclp_req, list);
316 		if (!req->sccb)
317 			goto do_post;
318 		rc = __sclp_start_request(req);
319 		if (rc == 0)
320 			break;
321 		/* Request failed */
322 		if (req->start_count > 1) {
323 			/* Cannot abort already submitted request - could still
324 			 * be active at the SCLP */
325 			__sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
326 						 sclp_request_timeout_normal);
327 			break;
328 		}
329 do_post:
330 		/* Post-processing for aborted request */
331 		list_del(&req->list);
332 		if (req->callback) {
333 			spin_unlock_irqrestore(&sclp_lock, flags);
334 			req->callback(req, req->callback_data);
335 			spin_lock_irqsave(&sclp_lock, flags);
336 		}
337 	}
338 	spin_unlock_irqrestore(&sclp_lock, flags);
339 }
340 
341 static int __sclp_can_add_request(struct sclp_req *req)
342 {
343 	if (req == &sclp_suspend_req || req == &sclp_init_req)
344 		return 1;
345 	if (sclp_suspend_state != sclp_suspend_state_running)
346 		return 0;
347 	if (sclp_init_state != sclp_init_state_initialized)
348 		return 0;
349 	if (sclp_activation_state != sclp_activation_state_active)
350 		return 0;
351 	return 1;
352 }
353 
354 /* Queue a new request. Return zero on success, non-zero otherwise. */
355 int
356 sclp_add_request(struct sclp_req *req)
357 {
358 	unsigned long flags;
359 	int rc;
360 
361 	spin_lock_irqsave(&sclp_lock, flags);
362 	if (!__sclp_can_add_request(req)) {
363 		spin_unlock_irqrestore(&sclp_lock, flags);
364 		return -EIO;
365 	}
366 	req->status = SCLP_REQ_QUEUED;
367 	req->start_count = 0;
368 	list_add_tail(&req->list, &sclp_req_queue);
369 	rc = 0;
370 	if (req->queue_timeout) {
371 		req->queue_expires = jiffies + req->queue_timeout * HZ;
372 		if (!timer_pending(&sclp_queue_timer) ||
373 		    time_after(sclp_queue_timer.expires, req->queue_expires))
374 			mod_timer(&sclp_queue_timer, req->queue_expires);
375 	} else
376 		req->queue_expires = 0;
377 	/* Start if request is first in list */
378 	if (sclp_running_state == sclp_running_state_idle &&
379 	    req->list.prev == &sclp_req_queue) {
380 		if (!req->sccb) {
381 			list_del(&req->list);
382 			rc = -ENODATA;
383 			goto out;
384 		}
385 		rc = __sclp_start_request(req);
386 		if (rc)
387 			list_del(&req->list);
388 	}
389 out:
390 	spin_unlock_irqrestore(&sclp_lock, flags);
391 	return rc;
392 }
393 
394 EXPORT_SYMBOL(sclp_add_request);
395 
396 /* Dispatch events found in request buffer to registered listeners. Return 0
397  * if all events were dispatched, non-zero otherwise. */
398 static int
399 sclp_dispatch_evbufs(struct sccb_header *sccb)
400 {
401 	unsigned long flags;
402 	struct evbuf_header *evbuf;
403 	struct list_head *l;
404 	struct sclp_register *reg;
405 	int offset;
406 	int rc;
407 
408 	spin_lock_irqsave(&sclp_lock, flags);
409 	rc = 0;
410 	for (offset = sizeof(struct sccb_header); offset < sccb->length;
411 	     offset += evbuf->length) {
412 		evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
413 		/* Check for malformed hardware response */
414 		if (evbuf->length == 0)
415 			break;
416 		/* Search for event handler */
417 		reg = NULL;
418 		list_for_each(l, &sclp_reg_list) {
419 			reg = list_entry(l, struct sclp_register, list);
420 			if (reg->receive_mask & SCLP_EVTYP_MASK(evbuf->type))
421 				break;
422 			else
423 				reg = NULL;
424 		}
425 		if (reg && reg->receiver_fn) {
426 			spin_unlock_irqrestore(&sclp_lock, flags);
427 			reg->receiver_fn(evbuf);
428 			spin_lock_irqsave(&sclp_lock, flags);
429 		} else if (reg == NULL)
430 			rc = -EOPNOTSUPP;
431 	}
432 	spin_unlock_irqrestore(&sclp_lock, flags);
433 	return rc;
434 }
435 
436 /* Read event data request callback. */
437 static void
438 sclp_read_cb(struct sclp_req *req, void *data)
439 {
440 	unsigned long flags;
441 	struct sccb_header *sccb;
442 
443 	sccb = (struct sccb_header *) req->sccb;
444 	if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
445 	    sccb->response_code == 0x220))
446 		sclp_dispatch_evbufs(sccb);
447 	spin_lock_irqsave(&sclp_lock, flags);
448 	sclp_reading_state = sclp_reading_state_idle;
449 	spin_unlock_irqrestore(&sclp_lock, flags);
450 }
451 
452 /* Prepare read event data request. Called while sclp_lock is locked. */
453 static void __sclp_make_read_req(void)
454 {
455 	struct sccb_header *sccb;
456 
457 	sccb = (struct sccb_header *) sclp_read_sccb;
458 	clear_page(sccb);
459 	memset(&sclp_read_req, 0, sizeof(struct sclp_req));
460 	sclp_read_req.command = SCLP_CMDW_READ_EVENT_DATA;
461 	sclp_read_req.status = SCLP_REQ_QUEUED;
462 	sclp_read_req.start_count = 0;
463 	sclp_read_req.callback = sclp_read_cb;
464 	sclp_read_req.sccb = sccb;
465 	sccb->length = PAGE_SIZE;
466 	sccb->function_code = 0;
467 	sccb->control_mask[2] = 0x80;
468 }
469 
470 /* Search request list for request with matching sccb. Return request if found,
471  * NULL otherwise. Called while sclp_lock is locked. */
472 static inline struct sclp_req *
473 __sclp_find_req(u32 sccb)
474 {
475 	struct list_head *l;
476 	struct sclp_req *req;
477 
478 	list_for_each(l, &sclp_req_queue) {
479 		req = list_entry(l, struct sclp_req, list);
480 		if (sccb == (u32) (addr_t) req->sccb)
481 				return req;
482 	}
483 	return NULL;
484 }
485 
486 /* Handler for external interruption. Perform request post-processing.
487  * Prepare read event data request if necessary. Start processing of next
488  * request on queue. */
489 static void sclp_interrupt_handler(struct ext_code ext_code,
490 				   unsigned int param32, unsigned long param64)
491 {
492 	struct sclp_req *req;
493 	u32 finished_sccb;
494 	u32 evbuf_pending;
495 
496 	inc_irq_stat(IRQEXT_SCP);
497 	spin_lock(&sclp_lock);
498 	finished_sccb = param32 & 0xfffffff8;
499 	evbuf_pending = param32 & 0x3;
500 	if (finished_sccb) {
501 		del_timer(&sclp_request_timer);
502 		sclp_running_state = sclp_running_state_reset_pending;
503 		req = __sclp_find_req(finished_sccb);
504 		if (req) {
505 			/* Request post-processing */
506 			list_del(&req->list);
507 			req->status = SCLP_REQ_DONE;
508 			if (req->callback) {
509 				spin_unlock(&sclp_lock);
510 				req->callback(req, req->callback_data);
511 				spin_lock(&sclp_lock);
512 			}
513 		}
514 		sclp_running_state = sclp_running_state_idle;
515 	}
516 	if (evbuf_pending &&
517 	    sclp_activation_state == sclp_activation_state_active)
518 		__sclp_queue_read_req();
519 	spin_unlock(&sclp_lock);
520 	sclp_process_queue();
521 }
522 
523 /* Convert interval in jiffies to TOD ticks. */
524 static inline u64
525 sclp_tod_from_jiffies(unsigned long jiffies)
526 {
527 	return (u64) (jiffies / HZ) << 32;
528 }
529 
530 /* Wait until a currently running request finished. Note: while this function
531  * is running, no timers are served on the calling CPU. */
532 void
533 sclp_sync_wait(void)
534 {
535 	unsigned long long old_tick;
536 	unsigned long flags;
537 	unsigned long cr0, cr0_sync;
538 	u64 timeout;
539 	int irq_context;
540 
541 	/* We'll be disabling timer interrupts, so we need a custom timeout
542 	 * mechanism */
543 	timeout = 0;
544 	if (timer_pending(&sclp_request_timer)) {
545 		/* Get timeout TOD value */
546 		timeout = get_tod_clock_fast() +
547 			  sclp_tod_from_jiffies(sclp_request_timer.expires -
548 						jiffies);
549 	}
550 	local_irq_save(flags);
551 	/* Prevent bottom half from executing once we force interrupts open */
552 	irq_context = in_interrupt();
553 	if (!irq_context)
554 		local_bh_disable();
555 	/* Enable service-signal interruption, disable timer interrupts */
556 	old_tick = local_tick_disable();
557 	trace_hardirqs_on();
558 	__ctl_store(cr0, 0, 0);
559 	cr0_sync = cr0 & ~CR0_IRQ_SUBCLASS_MASK;
560 	cr0_sync |= 1UL << (63 - 54);
561 	__ctl_load(cr0_sync, 0, 0);
562 	__arch_local_irq_stosm(0x01);
563 	/* Loop until driver state indicates finished request */
564 	while (sclp_running_state != sclp_running_state_idle) {
565 		/* Check for expired request timer */
566 		if (timer_pending(&sclp_request_timer) &&
567 		    get_tod_clock_fast() > timeout &&
568 		    del_timer(&sclp_request_timer))
569 			sclp_request_timer.function(&sclp_request_timer);
570 		cpu_relax();
571 	}
572 	local_irq_disable();
573 	__ctl_load(cr0, 0, 0);
574 	if (!irq_context)
575 		_local_bh_enable();
576 	local_tick_enable(old_tick);
577 	local_irq_restore(flags);
578 }
579 EXPORT_SYMBOL(sclp_sync_wait);
580 
581 /* Dispatch changes in send and receive mask to registered listeners. */
582 static void
583 sclp_dispatch_state_change(void)
584 {
585 	struct list_head *l;
586 	struct sclp_register *reg;
587 	unsigned long flags;
588 	sccb_mask_t receive_mask;
589 	sccb_mask_t send_mask;
590 
591 	do {
592 		spin_lock_irqsave(&sclp_lock, flags);
593 		reg = NULL;
594 		list_for_each(l, &sclp_reg_list) {
595 			reg = list_entry(l, struct sclp_register, list);
596 			receive_mask = reg->send_mask & sclp_receive_mask;
597 			send_mask = reg->receive_mask & sclp_send_mask;
598 			if (reg->sclp_receive_mask != receive_mask ||
599 			    reg->sclp_send_mask != send_mask) {
600 				reg->sclp_receive_mask = receive_mask;
601 				reg->sclp_send_mask = send_mask;
602 				break;
603 			} else
604 				reg = NULL;
605 		}
606 		spin_unlock_irqrestore(&sclp_lock, flags);
607 		if (reg && reg->state_change_fn)
608 			reg->state_change_fn(reg);
609 	} while (reg);
610 }
611 
612 struct sclp_statechangebuf {
613 	struct evbuf_header	header;
614 	u8		validity_sclp_active_facility_mask : 1;
615 	u8		validity_sclp_receive_mask : 1;
616 	u8		validity_sclp_send_mask : 1;
617 	u8		validity_read_data_function_mask : 1;
618 	u16		_zeros : 12;
619 	u16		mask_length;
620 	u64		sclp_active_facility_mask;
621 	u8		masks[2 * 1021 + 4];	/* variable length */
622 	/*
623 	 * u8		sclp_receive_mask[mask_length];
624 	 * u8		sclp_send_mask[mask_length];
625 	 * u32		read_data_function_mask;
626 	 */
627 } __attribute__((packed));
628 
629 
630 /* State change event callback. Inform listeners of changes. */
631 static void
632 sclp_state_change_cb(struct evbuf_header *evbuf)
633 {
634 	unsigned long flags;
635 	struct sclp_statechangebuf *scbuf;
636 
637 	BUILD_BUG_ON(sizeof(struct sclp_statechangebuf) > PAGE_SIZE);
638 
639 	scbuf = (struct sclp_statechangebuf *) evbuf;
640 	spin_lock_irqsave(&sclp_lock, flags);
641 	if (scbuf->validity_sclp_receive_mask)
642 		sclp_receive_mask = sccb_get_recv_mask(scbuf);
643 	if (scbuf->validity_sclp_send_mask)
644 		sclp_send_mask = sccb_get_send_mask(scbuf);
645 	spin_unlock_irqrestore(&sclp_lock, flags);
646 	if (scbuf->validity_sclp_active_facility_mask)
647 		sclp.facilities = scbuf->sclp_active_facility_mask;
648 	sclp_dispatch_state_change();
649 }
650 
651 static struct sclp_register sclp_state_change_event = {
652 	.receive_mask = EVTYP_STATECHANGE_MASK,
653 	.receiver_fn = sclp_state_change_cb
654 };
655 
656 /* Calculate receive and send mask of currently registered listeners.
657  * Called while sclp_lock is locked. */
658 static inline void
659 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
660 {
661 	struct list_head *l;
662 	struct sclp_register *t;
663 
664 	*receive_mask = 0;
665 	*send_mask = 0;
666 	list_for_each(l, &sclp_reg_list) {
667 		t = list_entry(l, struct sclp_register, list);
668 		*receive_mask |= t->receive_mask;
669 		*send_mask |= t->send_mask;
670 	}
671 }
672 
673 /* Register event listener. Return 0 on success, non-zero otherwise. */
674 int
675 sclp_register(struct sclp_register *reg)
676 {
677 	unsigned long flags;
678 	sccb_mask_t receive_mask;
679 	sccb_mask_t send_mask;
680 	int rc;
681 
682 	rc = sclp_init();
683 	if (rc)
684 		return rc;
685 	spin_lock_irqsave(&sclp_lock, flags);
686 	/* Check event mask for collisions */
687 	__sclp_get_mask(&receive_mask, &send_mask);
688 	if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
689 		spin_unlock_irqrestore(&sclp_lock, flags);
690 		return -EBUSY;
691 	}
692 	/* Trigger initial state change callback */
693 	reg->sclp_receive_mask = 0;
694 	reg->sclp_send_mask = 0;
695 	reg->pm_event_posted = 0;
696 	list_add(&reg->list, &sclp_reg_list);
697 	spin_unlock_irqrestore(&sclp_lock, flags);
698 	rc = sclp_init_mask(1);
699 	if (rc) {
700 		spin_lock_irqsave(&sclp_lock, flags);
701 		list_del(&reg->list);
702 		spin_unlock_irqrestore(&sclp_lock, flags);
703 	}
704 	return rc;
705 }
706 
707 EXPORT_SYMBOL(sclp_register);
708 
709 /* Unregister event listener. */
710 void
711 sclp_unregister(struct sclp_register *reg)
712 {
713 	unsigned long flags;
714 
715 	spin_lock_irqsave(&sclp_lock, flags);
716 	list_del(&reg->list);
717 	spin_unlock_irqrestore(&sclp_lock, flags);
718 	sclp_init_mask(1);
719 }
720 
721 EXPORT_SYMBOL(sclp_unregister);
722 
723 /* Remove event buffers which are marked processed. Return the number of
724  * remaining event buffers. */
725 int
726 sclp_remove_processed(struct sccb_header *sccb)
727 {
728 	struct evbuf_header *evbuf;
729 	int unprocessed;
730 	u16 remaining;
731 
732 	evbuf = (struct evbuf_header *) (sccb + 1);
733 	unprocessed = 0;
734 	remaining = sccb->length - sizeof(struct sccb_header);
735 	while (remaining > 0) {
736 		remaining -= evbuf->length;
737 		if (evbuf->flags & 0x80) {
738 			sccb->length -= evbuf->length;
739 			memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
740 			       remaining);
741 		} else {
742 			unprocessed++;
743 			evbuf = (struct evbuf_header *)
744 					((addr_t) evbuf + evbuf->length);
745 		}
746 	}
747 	return unprocessed;
748 }
749 
750 EXPORT_SYMBOL(sclp_remove_processed);
751 
752 /* Prepare init mask request. Called while sclp_lock is locked. */
753 static inline void
754 __sclp_make_init_req(sccb_mask_t receive_mask, sccb_mask_t send_mask)
755 {
756 	struct init_sccb *sccb;
757 
758 	sccb = (struct init_sccb *) sclp_init_sccb;
759 	clear_page(sccb);
760 	memset(&sclp_init_req, 0, sizeof(struct sclp_req));
761 	sclp_init_req.command = SCLP_CMDW_WRITE_EVENT_MASK;
762 	sclp_init_req.status = SCLP_REQ_FILLED;
763 	sclp_init_req.start_count = 0;
764 	sclp_init_req.callback = NULL;
765 	sclp_init_req.callback_data = NULL;
766 	sclp_init_req.sccb = sccb;
767 	sccb->header.length = sizeof(*sccb);
768 	if (sclp_mask_compat_mode)
769 		sccb->mask_length = SCLP_MASK_SIZE_COMPAT;
770 	else
771 		sccb->mask_length = sizeof(sccb_mask_t);
772 	sccb_set_recv_mask(sccb, receive_mask);
773 	sccb_set_send_mask(sccb, send_mask);
774 	sccb_set_sclp_recv_mask(sccb, 0);
775 	sccb_set_sclp_send_mask(sccb, 0);
776 }
777 
778 /* Start init mask request. If calculate is non-zero, calculate the mask as
779  * requested by registered listeners. Use zero mask otherwise. Return 0 on
780  * success, non-zero otherwise. */
781 static int
782 sclp_init_mask(int calculate)
783 {
784 	unsigned long flags;
785 	struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
786 	sccb_mask_t receive_mask;
787 	sccb_mask_t send_mask;
788 	int retry;
789 	int rc;
790 	unsigned long wait;
791 
792 	spin_lock_irqsave(&sclp_lock, flags);
793 	/* Check if interface is in appropriate state */
794 	if (sclp_mask_state != sclp_mask_state_idle) {
795 		spin_unlock_irqrestore(&sclp_lock, flags);
796 		return -EBUSY;
797 	}
798 	if (sclp_activation_state == sclp_activation_state_inactive) {
799 		spin_unlock_irqrestore(&sclp_lock, flags);
800 		return -EINVAL;
801 	}
802 	sclp_mask_state = sclp_mask_state_initializing;
803 	/* Determine mask */
804 	if (calculate)
805 		__sclp_get_mask(&receive_mask, &send_mask);
806 	else {
807 		receive_mask = 0;
808 		send_mask = 0;
809 	}
810 	rc = -EIO;
811 	for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
812 		/* Prepare request */
813 		__sclp_make_init_req(receive_mask, send_mask);
814 		spin_unlock_irqrestore(&sclp_lock, flags);
815 		if (sclp_add_request(&sclp_init_req)) {
816 			/* Try again later */
817 			wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
818 			while (time_before(jiffies, wait))
819 				sclp_sync_wait();
820 			spin_lock_irqsave(&sclp_lock, flags);
821 			continue;
822 		}
823 		while (sclp_init_req.status != SCLP_REQ_DONE &&
824 		       sclp_init_req.status != SCLP_REQ_FAILED)
825 			sclp_sync_wait();
826 		spin_lock_irqsave(&sclp_lock, flags);
827 		if (sclp_init_req.status == SCLP_REQ_DONE &&
828 		    sccb->header.response_code == 0x20) {
829 			/* Successful request */
830 			if (calculate) {
831 				sclp_receive_mask = sccb_get_sclp_recv_mask(sccb);
832 				sclp_send_mask = sccb_get_sclp_send_mask(sccb);
833 			} else {
834 				sclp_receive_mask = 0;
835 				sclp_send_mask = 0;
836 			}
837 			spin_unlock_irqrestore(&sclp_lock, flags);
838 			sclp_dispatch_state_change();
839 			spin_lock_irqsave(&sclp_lock, flags);
840 			rc = 0;
841 			break;
842 		}
843 	}
844 	sclp_mask_state = sclp_mask_state_idle;
845 	spin_unlock_irqrestore(&sclp_lock, flags);
846 	return rc;
847 }
848 
849 /* Deactivate SCLP interface. On success, new requests will be rejected,
850  * events will no longer be dispatched. Return 0 on success, non-zero
851  * otherwise. */
852 int
853 sclp_deactivate(void)
854 {
855 	unsigned long flags;
856 	int rc;
857 
858 	spin_lock_irqsave(&sclp_lock, flags);
859 	/* Deactivate can only be called when active */
860 	if (sclp_activation_state != sclp_activation_state_active) {
861 		spin_unlock_irqrestore(&sclp_lock, flags);
862 		return -EINVAL;
863 	}
864 	sclp_activation_state = sclp_activation_state_deactivating;
865 	spin_unlock_irqrestore(&sclp_lock, flags);
866 	rc = sclp_init_mask(0);
867 	spin_lock_irqsave(&sclp_lock, flags);
868 	if (rc == 0)
869 		sclp_activation_state = sclp_activation_state_inactive;
870 	else
871 		sclp_activation_state = sclp_activation_state_active;
872 	spin_unlock_irqrestore(&sclp_lock, flags);
873 	return rc;
874 }
875 
876 EXPORT_SYMBOL(sclp_deactivate);
877 
878 /* Reactivate SCLP interface after sclp_deactivate. On success, new
879  * requests will be accepted, events will be dispatched again. Return 0 on
880  * success, non-zero otherwise. */
881 int
882 sclp_reactivate(void)
883 {
884 	unsigned long flags;
885 	int rc;
886 
887 	spin_lock_irqsave(&sclp_lock, flags);
888 	/* Reactivate can only be called when inactive */
889 	if (sclp_activation_state != sclp_activation_state_inactive) {
890 		spin_unlock_irqrestore(&sclp_lock, flags);
891 		return -EINVAL;
892 	}
893 	sclp_activation_state = sclp_activation_state_activating;
894 	spin_unlock_irqrestore(&sclp_lock, flags);
895 	rc = sclp_init_mask(1);
896 	spin_lock_irqsave(&sclp_lock, flags);
897 	if (rc == 0)
898 		sclp_activation_state = sclp_activation_state_active;
899 	else
900 		sclp_activation_state = sclp_activation_state_inactive;
901 	spin_unlock_irqrestore(&sclp_lock, flags);
902 	return rc;
903 }
904 
905 EXPORT_SYMBOL(sclp_reactivate);
906 
907 /* Handler for external interruption used during initialization. Modify
908  * request state to done. */
909 static void sclp_check_handler(struct ext_code ext_code,
910 			       unsigned int param32, unsigned long param64)
911 {
912 	u32 finished_sccb;
913 
914 	inc_irq_stat(IRQEXT_SCP);
915 	finished_sccb = param32 & 0xfffffff8;
916 	/* Is this the interrupt we are waiting for? */
917 	if (finished_sccb == 0)
918 		return;
919 	if (finished_sccb != (u32) (addr_t) sclp_init_sccb)
920 		panic("sclp: unsolicited interrupt for buffer at 0x%x\n",
921 		      finished_sccb);
922 	spin_lock(&sclp_lock);
923 	if (sclp_running_state == sclp_running_state_running) {
924 		sclp_init_req.status = SCLP_REQ_DONE;
925 		sclp_running_state = sclp_running_state_idle;
926 	}
927 	spin_unlock(&sclp_lock);
928 }
929 
930 /* Initial init mask request timed out. Modify request state to failed. */
931 static void
932 sclp_check_timeout(struct timer_list *unused)
933 {
934 	unsigned long flags;
935 
936 	spin_lock_irqsave(&sclp_lock, flags);
937 	if (sclp_running_state == sclp_running_state_running) {
938 		sclp_init_req.status = SCLP_REQ_FAILED;
939 		sclp_running_state = sclp_running_state_idle;
940 	}
941 	spin_unlock_irqrestore(&sclp_lock, flags);
942 }
943 
944 /* Perform a check of the SCLP interface. Return zero if the interface is
945  * available and there are no pending requests from a previous instance.
946  * Return non-zero otherwise. */
947 static int
948 sclp_check_interface(void)
949 {
950 	struct init_sccb *sccb;
951 	unsigned long flags;
952 	int retry;
953 	int rc;
954 
955 	spin_lock_irqsave(&sclp_lock, flags);
956 	/* Prepare init mask command */
957 	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
958 	if (rc) {
959 		spin_unlock_irqrestore(&sclp_lock, flags);
960 		return rc;
961 	}
962 	for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
963 		__sclp_make_init_req(0, 0);
964 		sccb = (struct init_sccb *) sclp_init_req.sccb;
965 		rc = sclp_service_call(sclp_init_req.command, sccb);
966 		if (rc == -EIO)
967 			break;
968 		sclp_init_req.status = SCLP_REQ_RUNNING;
969 		sclp_running_state = sclp_running_state_running;
970 		__sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
971 					 sclp_check_timeout);
972 		spin_unlock_irqrestore(&sclp_lock, flags);
973 		/* Enable service-signal interruption - needs to happen
974 		 * with IRQs enabled. */
975 		irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
976 		/* Wait for signal from interrupt or timeout */
977 		sclp_sync_wait();
978 		/* Disable service-signal interruption - needs to happen
979 		 * with IRQs enabled. */
980 		irq_subclass_unregister(IRQ_SUBCLASS_SERVICE_SIGNAL);
981 		spin_lock_irqsave(&sclp_lock, flags);
982 		del_timer(&sclp_request_timer);
983 		rc = -EBUSY;
984 		if (sclp_init_req.status == SCLP_REQ_DONE) {
985 			if (sccb->header.response_code == 0x20) {
986 				rc = 0;
987 				break;
988 			} else if (sccb->header.response_code == 0x74f0) {
989 				if (!sclp_mask_compat_mode) {
990 					sclp_mask_compat_mode = true;
991 					retry = 0;
992 				}
993 			}
994 		}
995 	}
996 	unregister_external_irq(EXT_IRQ_SERVICE_SIG, sclp_check_handler);
997 	spin_unlock_irqrestore(&sclp_lock, flags);
998 	return rc;
999 }
1000 
1001 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
1002  * events from interfering with rebooted system. */
1003 static int
1004 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
1005 {
1006 	sclp_deactivate();
1007 	return NOTIFY_DONE;
1008 }
1009 
1010 static struct notifier_block sclp_reboot_notifier = {
1011 	.notifier_call = sclp_reboot_event
1012 };
1013 
1014 /*
1015  * Suspend/resume SCLP notifier implementation
1016  */
1017 
1018 static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback)
1019 {
1020 	struct sclp_register *reg;
1021 	unsigned long flags;
1022 
1023 	if (!rollback) {
1024 		spin_lock_irqsave(&sclp_lock, flags);
1025 		list_for_each_entry(reg, &sclp_reg_list, list)
1026 			reg->pm_event_posted = 0;
1027 		spin_unlock_irqrestore(&sclp_lock, flags);
1028 	}
1029 	do {
1030 		spin_lock_irqsave(&sclp_lock, flags);
1031 		list_for_each_entry(reg, &sclp_reg_list, list) {
1032 			if (rollback && reg->pm_event_posted)
1033 				goto found;
1034 			if (!rollback && !reg->pm_event_posted)
1035 				goto found;
1036 		}
1037 		spin_unlock_irqrestore(&sclp_lock, flags);
1038 		return;
1039 found:
1040 		spin_unlock_irqrestore(&sclp_lock, flags);
1041 		if (reg->pm_event_fn)
1042 			reg->pm_event_fn(reg, sclp_pm_event);
1043 		reg->pm_event_posted = rollback ? 0 : 1;
1044 	} while (1);
1045 }
1046 
1047 /*
1048  * Susend/resume callbacks for platform device
1049  */
1050 
1051 static int sclp_freeze(struct device *dev)
1052 {
1053 	unsigned long flags;
1054 	int rc;
1055 
1056 	sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0);
1057 
1058 	spin_lock_irqsave(&sclp_lock, flags);
1059 	sclp_suspend_state = sclp_suspend_state_suspended;
1060 	spin_unlock_irqrestore(&sclp_lock, flags);
1061 
1062 	/* Init supend data */
1063 	memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req));
1064 	sclp_suspend_req.callback = sclp_suspend_req_cb;
1065 	sclp_suspend_req.status = SCLP_REQ_FILLED;
1066 	init_completion(&sclp_request_queue_flushed);
1067 
1068 	rc = sclp_add_request(&sclp_suspend_req);
1069 	if (rc == 0)
1070 		wait_for_completion(&sclp_request_queue_flushed);
1071 	else if (rc != -ENODATA)
1072 		goto fail_thaw;
1073 
1074 	rc = sclp_deactivate();
1075 	if (rc)
1076 		goto fail_thaw;
1077 	return 0;
1078 
1079 fail_thaw:
1080 	spin_lock_irqsave(&sclp_lock, flags);
1081 	sclp_suspend_state = sclp_suspend_state_running;
1082 	spin_unlock_irqrestore(&sclp_lock, flags);
1083 	sclp_pm_event(SCLP_PM_EVENT_THAW, 1);
1084 	return rc;
1085 }
1086 
1087 static int sclp_undo_suspend(enum sclp_pm_event event)
1088 {
1089 	unsigned long flags;
1090 	int rc;
1091 
1092 	rc = sclp_reactivate();
1093 	if (rc)
1094 		return rc;
1095 
1096 	spin_lock_irqsave(&sclp_lock, flags);
1097 	sclp_suspend_state = sclp_suspend_state_running;
1098 	spin_unlock_irqrestore(&sclp_lock, flags);
1099 
1100 	sclp_pm_event(event, 0);
1101 	return 0;
1102 }
1103 
1104 static int sclp_thaw(struct device *dev)
1105 {
1106 	return sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1107 }
1108 
1109 static int sclp_restore(struct device *dev)
1110 {
1111 	return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE);
1112 }
1113 
1114 static const struct dev_pm_ops sclp_pm_ops = {
1115 	.freeze		= sclp_freeze,
1116 	.thaw		= sclp_thaw,
1117 	.restore	= sclp_restore,
1118 };
1119 
1120 static ssize_t con_pages_show(struct device_driver *dev, char *buf)
1121 {
1122 	return sprintf(buf, "%i\n", sclp_console_pages);
1123 }
1124 
1125 static DRIVER_ATTR_RO(con_pages);
1126 
1127 static ssize_t con_drop_show(struct device_driver *dev, char *buf)
1128 {
1129 	return sprintf(buf, "%i\n", sclp_console_drop);
1130 }
1131 
1132 static DRIVER_ATTR_RO(con_drop);
1133 
1134 static ssize_t con_full_show(struct device_driver *dev, char *buf)
1135 {
1136 	return sprintf(buf, "%lu\n", sclp_console_full);
1137 }
1138 
1139 static DRIVER_ATTR_RO(con_full);
1140 
1141 static struct attribute *sclp_drv_attrs[] = {
1142 	&driver_attr_con_pages.attr,
1143 	&driver_attr_con_drop.attr,
1144 	&driver_attr_con_full.attr,
1145 	NULL,
1146 };
1147 static struct attribute_group sclp_drv_attr_group = {
1148 	.attrs = sclp_drv_attrs,
1149 };
1150 static const struct attribute_group *sclp_drv_attr_groups[] = {
1151 	&sclp_drv_attr_group,
1152 	NULL,
1153 };
1154 
1155 static struct platform_driver sclp_pdrv = {
1156 	.driver = {
1157 		.name	= "sclp",
1158 		.pm	= &sclp_pm_ops,
1159 		.groups = sclp_drv_attr_groups,
1160 	},
1161 };
1162 
1163 static struct platform_device *sclp_pdev;
1164 
1165 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
1166  * otherwise. */
1167 static int
1168 sclp_init(void)
1169 {
1170 	unsigned long flags;
1171 	int rc = 0;
1172 
1173 	spin_lock_irqsave(&sclp_lock, flags);
1174 	/* Check for previous or running initialization */
1175 	if (sclp_init_state != sclp_init_state_uninitialized)
1176 		goto fail_unlock;
1177 	sclp_init_state = sclp_init_state_initializing;
1178 	/* Set up variables */
1179 	INIT_LIST_HEAD(&sclp_req_queue);
1180 	INIT_LIST_HEAD(&sclp_reg_list);
1181 	list_add(&sclp_state_change_event.list, &sclp_reg_list);
1182 	timer_setup(&sclp_request_timer, NULL, 0);
1183 	timer_setup(&sclp_queue_timer, sclp_req_queue_timeout, 0);
1184 	/* Check interface */
1185 	spin_unlock_irqrestore(&sclp_lock, flags);
1186 	rc = sclp_check_interface();
1187 	spin_lock_irqsave(&sclp_lock, flags);
1188 	if (rc)
1189 		goto fail_init_state_uninitialized;
1190 	/* Register reboot handler */
1191 	rc = register_reboot_notifier(&sclp_reboot_notifier);
1192 	if (rc)
1193 		goto fail_init_state_uninitialized;
1194 	/* Register interrupt handler */
1195 	rc = register_external_irq(EXT_IRQ_SERVICE_SIG, sclp_interrupt_handler);
1196 	if (rc)
1197 		goto fail_unregister_reboot_notifier;
1198 	sclp_init_state = sclp_init_state_initialized;
1199 	spin_unlock_irqrestore(&sclp_lock, flags);
1200 	/* Enable service-signal external interruption - needs to happen with
1201 	 * IRQs enabled. */
1202 	irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL);
1203 	sclp_init_mask(1);
1204 	return 0;
1205 
1206 fail_unregister_reboot_notifier:
1207 	unregister_reboot_notifier(&sclp_reboot_notifier);
1208 fail_init_state_uninitialized:
1209 	sclp_init_state = sclp_init_state_uninitialized;
1210 fail_unlock:
1211 	spin_unlock_irqrestore(&sclp_lock, flags);
1212 	return rc;
1213 }
1214 
1215 /*
1216  * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able
1217  * to print the panic message.
1218  */
1219 static int sclp_panic_notify(struct notifier_block *self,
1220 			     unsigned long event, void *data)
1221 {
1222 	if (sclp_suspend_state == sclp_suspend_state_suspended)
1223 		sclp_undo_suspend(SCLP_PM_EVENT_THAW);
1224 	return NOTIFY_OK;
1225 }
1226 
1227 static struct notifier_block sclp_on_panic_nb = {
1228 	.notifier_call = sclp_panic_notify,
1229 	.priority = SCLP_PANIC_PRIO,
1230 };
1231 
1232 static __init int sclp_initcall(void)
1233 {
1234 	int rc;
1235 
1236 	rc = platform_driver_register(&sclp_pdrv);
1237 	if (rc)
1238 		return rc;
1239 
1240 	sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0);
1241 	rc = PTR_ERR_OR_ZERO(sclp_pdev);
1242 	if (rc)
1243 		goto fail_platform_driver_unregister;
1244 
1245 	rc = atomic_notifier_chain_register(&panic_notifier_list,
1246 					    &sclp_on_panic_nb);
1247 	if (rc)
1248 		goto fail_platform_device_unregister;
1249 
1250 	return sclp_init();
1251 
1252 fail_platform_device_unregister:
1253 	platform_device_unregister(sclp_pdev);
1254 fail_platform_driver_unregister:
1255 	platform_driver_unregister(&sclp_pdrv);
1256 	return rc;
1257 }
1258 
1259 arch_initcall(sclp_initcall);
1260