xref: /openbmc/linux/drivers/s390/char/sclp_vt220.c (revision 7b6d864b)
1 /*
2  * SCLP VT220 terminal driver.
3  *
4  * Copyright IBM Corp. 2003, 2009
5  *
6  * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com>
7  */
8 
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/list.h>
12 #include <linux/wait.h>
13 #include <linux/timer.h>
14 #include <linux/kernel.h>
15 #include <linux/tty.h>
16 #include <linux/tty_driver.h>
17 #include <linux/tty_flip.h>
18 #include <linux/errno.h>
19 #include <linux/mm.h>
20 #include <linux/major.h>
21 #include <linux/console.h>
22 #include <linux/kdev_t.h>
23 #include <linux/interrupt.h>
24 #include <linux/init.h>
25 #include <linux/reboot.h>
26 #include <linux/slab.h>
27 
28 #include <asm/uaccess.h>
29 #include "sclp.h"
30 
31 #define SCLP_VT220_MAJOR		TTY_MAJOR
32 #define SCLP_VT220_MINOR		65
33 #define SCLP_VT220_DRIVER_NAME		"sclp_vt220"
34 #define SCLP_VT220_DEVICE_NAME		"ttysclp"
35 #define SCLP_VT220_CONSOLE_NAME		"ttyS"
36 #define SCLP_VT220_CONSOLE_INDEX	1	/* console=ttyS1 */
37 
38 /* Representation of a single write request */
39 struct sclp_vt220_request {
40 	struct list_head list;
41 	struct sclp_req sclp_req;
42 	int retry_count;
43 };
44 
45 /* VT220 SCCB */
46 struct sclp_vt220_sccb {
47 	struct sccb_header header;
48 	struct evbuf_header evbuf;
49 };
50 
51 #define SCLP_VT220_MAX_CHARS_PER_BUFFER	(PAGE_SIZE - \
52 					 sizeof(struct sclp_vt220_request) - \
53 					 sizeof(struct sclp_vt220_sccb))
54 
55 /* Structures and data needed to register tty driver */
56 static struct tty_driver *sclp_vt220_driver;
57 
58 static struct tty_port sclp_vt220_port;
59 
60 /* Lock to protect internal data from concurrent access */
61 static spinlock_t sclp_vt220_lock;
62 
63 /* List of empty pages to be used as write request buffers */
64 static struct list_head sclp_vt220_empty;
65 
66 /* List of pending requests */
67 static struct list_head sclp_vt220_outqueue;
68 
69 /* Suspend mode flag */
70 static int sclp_vt220_suspended;
71 
72 /* Flag that output queue is currently running */
73 static int sclp_vt220_queue_running;
74 
75 /* Timer used for delaying write requests to merge subsequent messages into
76  * a single buffer */
77 static struct timer_list sclp_vt220_timer;
78 
79 /* Pointer to current request buffer which has been partially filled but not
80  * yet sent */
81 static struct sclp_vt220_request *sclp_vt220_current_request;
82 
83 /* Number of characters in current request buffer */
84 static int sclp_vt220_buffered_chars;
85 
86 /* Counter controlling core driver initialization. */
87 static int __initdata sclp_vt220_init_count;
88 
89 /* Flag indicating that sclp_vt220_current_request should really
90  * have been already queued but wasn't because the SCLP was processing
91  * another buffer */
92 static int sclp_vt220_flush_later;
93 
94 static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf);
95 static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
96 				   enum sclp_pm_event sclp_pm_event);
97 static int __sclp_vt220_emit(struct sclp_vt220_request *request);
98 static void sclp_vt220_emit_current(void);
99 
100 /* Registration structure for our interest in SCLP event buffers */
101 static struct sclp_register sclp_vt220_register = {
102 	.send_mask		= EVTYP_VT220MSG_MASK,
103 	.receive_mask		= EVTYP_VT220MSG_MASK,
104 	.state_change_fn	= NULL,
105 	.receiver_fn		= sclp_vt220_receiver_fn,
106 	.pm_event_fn		= sclp_vt220_pm_event_fn,
107 };
108 
109 
110 /*
111  * Put provided request buffer back into queue and check emit pending
112  * buffers if necessary.
113  */
114 static void
115 sclp_vt220_process_queue(struct sclp_vt220_request *request)
116 {
117 	unsigned long flags;
118 	void *page;
119 
120 	do {
121 		/* Put buffer back to list of empty buffers */
122 		page = request->sclp_req.sccb;
123 		spin_lock_irqsave(&sclp_vt220_lock, flags);
124 		/* Move request from outqueue to empty queue */
125 		list_del(&request->list);
126 		list_add_tail((struct list_head *) page, &sclp_vt220_empty);
127 		/* Check if there is a pending buffer on the out queue. */
128 		request = NULL;
129 		if (!list_empty(&sclp_vt220_outqueue))
130 			request = list_entry(sclp_vt220_outqueue.next,
131 					     struct sclp_vt220_request, list);
132 		if (!request || sclp_vt220_suspended) {
133 			sclp_vt220_queue_running = 0;
134 			spin_unlock_irqrestore(&sclp_vt220_lock, flags);
135 			break;
136 		}
137 		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
138 	} while (__sclp_vt220_emit(request));
139 	if (request == NULL && sclp_vt220_flush_later)
140 		sclp_vt220_emit_current();
141 	tty_port_tty_wakeup(&sclp_vt220_port);
142 }
143 
144 #define SCLP_BUFFER_MAX_RETRY		1
145 
146 /*
147  * Callback through which the result of a write request is reported by the
148  * SCLP.
149  */
150 static void
151 sclp_vt220_callback(struct sclp_req *request, void *data)
152 {
153 	struct sclp_vt220_request *vt220_request;
154 	struct sclp_vt220_sccb *sccb;
155 
156 	vt220_request = (struct sclp_vt220_request *) data;
157 	if (request->status == SCLP_REQ_FAILED) {
158 		sclp_vt220_process_queue(vt220_request);
159 		return;
160 	}
161 	sccb = (struct sclp_vt220_sccb *) vt220_request->sclp_req.sccb;
162 
163 	/* Check SCLP response code and choose suitable action	*/
164 	switch (sccb->header.response_code) {
165 	case 0x0020 :
166 		break;
167 
168 	case 0x05f0: /* Target resource in improper state */
169 		break;
170 
171 	case 0x0340: /* Contained SCLP equipment check */
172 		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
173 			break;
174 		/* Remove processed buffers and requeue rest */
175 		if (sclp_remove_processed((struct sccb_header *) sccb) > 0) {
176 			/* Not all buffers were processed */
177 			sccb->header.response_code = 0x0000;
178 			vt220_request->sclp_req.status = SCLP_REQ_FILLED;
179 			if (sclp_add_request(request) == 0)
180 				return;
181 		}
182 		break;
183 
184 	case 0x0040: /* SCLP equipment check */
185 		if (++vt220_request->retry_count > SCLP_BUFFER_MAX_RETRY)
186 			break;
187 		sccb->header.response_code = 0x0000;
188 		vt220_request->sclp_req.status = SCLP_REQ_FILLED;
189 		if (sclp_add_request(request) == 0)
190 			return;
191 		break;
192 
193 	default:
194 		break;
195 	}
196 	sclp_vt220_process_queue(vt220_request);
197 }
198 
199 /*
200  * Emit vt220 request buffer to SCLP. Return zero on success, non-zero
201  * otherwise.
202  */
203 static int
204 __sclp_vt220_emit(struct sclp_vt220_request *request)
205 {
206 	if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
207 		request->sclp_req.status = SCLP_REQ_FAILED;
208 		return -EIO;
209 	}
210 	request->sclp_req.command = SCLP_CMDW_WRITE_EVENT_DATA;
211 	request->sclp_req.status = SCLP_REQ_FILLED;
212 	request->sclp_req.callback = sclp_vt220_callback;
213 	request->sclp_req.callback_data = (void *) request;
214 
215 	return sclp_add_request(&request->sclp_req);
216 }
217 
218 /*
219  * Queue and emit current request.
220  */
221 static void
222 sclp_vt220_emit_current(void)
223 {
224 	unsigned long flags;
225 	struct sclp_vt220_request *request;
226 	struct sclp_vt220_sccb *sccb;
227 
228 	spin_lock_irqsave(&sclp_vt220_lock, flags);
229 	if (sclp_vt220_current_request) {
230 		sccb = (struct sclp_vt220_sccb *)
231 				sclp_vt220_current_request->sclp_req.sccb;
232 		/* Only emit buffers with content */
233 		if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) {
234 			list_add_tail(&sclp_vt220_current_request->list,
235 				      &sclp_vt220_outqueue);
236 			sclp_vt220_current_request = NULL;
237 			if (timer_pending(&sclp_vt220_timer))
238 				del_timer(&sclp_vt220_timer);
239 		}
240 		sclp_vt220_flush_later = 0;
241 	}
242 	if (sclp_vt220_queue_running || sclp_vt220_suspended)
243 		goto out_unlock;
244 	if (list_empty(&sclp_vt220_outqueue))
245 		goto out_unlock;
246 	request = list_first_entry(&sclp_vt220_outqueue,
247 				   struct sclp_vt220_request, list);
248 	sclp_vt220_queue_running = 1;
249 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
250 
251 	if (__sclp_vt220_emit(request))
252 		sclp_vt220_process_queue(request);
253 	return;
254 out_unlock:
255 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
256 }
257 
258 #define SCLP_NORMAL_WRITE	0x00
259 
260 /*
261  * Helper function to initialize a page with the sclp request structure.
262  */
263 static struct sclp_vt220_request *
264 sclp_vt220_initialize_page(void *page)
265 {
266 	struct sclp_vt220_request *request;
267 	struct sclp_vt220_sccb *sccb;
268 
269 	/* Place request structure at end of page */
270 	request = ((struct sclp_vt220_request *)
271 			((addr_t) page + PAGE_SIZE)) - 1;
272 	request->retry_count = 0;
273 	request->sclp_req.sccb = page;
274 	/* SCCB goes at start of page */
275 	sccb = (struct sclp_vt220_sccb *) page;
276 	memset((void *) sccb, 0, sizeof(struct sclp_vt220_sccb));
277 	sccb->header.length = sizeof(struct sclp_vt220_sccb);
278 	sccb->header.function_code = SCLP_NORMAL_WRITE;
279 	sccb->header.response_code = 0x0000;
280 	sccb->evbuf.type = EVTYP_VT220MSG;
281 	sccb->evbuf.length = sizeof(struct evbuf_header);
282 
283 	return request;
284 }
285 
286 static inline unsigned int
287 sclp_vt220_space_left(struct sclp_vt220_request *request)
288 {
289 	struct sclp_vt220_sccb *sccb;
290 	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
291 	return PAGE_SIZE - sizeof(struct sclp_vt220_request) -
292 	       sccb->header.length;
293 }
294 
295 static inline unsigned int
296 sclp_vt220_chars_stored(struct sclp_vt220_request *request)
297 {
298 	struct sclp_vt220_sccb *sccb;
299 	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
300 	return sccb->evbuf.length - sizeof(struct evbuf_header);
301 }
302 
303 /*
304  * Add msg to buffer associated with request. Return the number of characters
305  * added.
306  */
307 static int
308 sclp_vt220_add_msg(struct sclp_vt220_request *request,
309 		   const unsigned char *msg, int count, int convertlf)
310 {
311 	struct sclp_vt220_sccb *sccb;
312 	void *buffer;
313 	unsigned char c;
314 	int from;
315 	int to;
316 
317 	if (count > sclp_vt220_space_left(request))
318 		count = sclp_vt220_space_left(request);
319 	if (count <= 0)
320 		return 0;
321 
322 	sccb = (struct sclp_vt220_sccb *) request->sclp_req.sccb;
323 	buffer = (void *) ((addr_t) sccb + sccb->header.length);
324 
325 	if (convertlf) {
326 		/* Perform Linefeed conversion (0x0a -> 0x0a 0x0d)*/
327 		for (from=0, to=0;
328 		     (from < count) && (to < sclp_vt220_space_left(request));
329 		     from++) {
330 			/* Retrieve character */
331 			c = msg[from];
332 			/* Perform conversion */
333 			if (c == 0x0a) {
334 				if (to + 1 < sclp_vt220_space_left(request)) {
335 					((unsigned char *) buffer)[to++] = c;
336 					((unsigned char *) buffer)[to++] = 0x0d;
337 				} else
338 					break;
339 
340 			} else
341 				((unsigned char *) buffer)[to++] = c;
342 		}
343 		sccb->header.length += to;
344 		sccb->evbuf.length += to;
345 		return from;
346 	} else {
347 		memcpy(buffer, (const void *) msg, count);
348 		sccb->header.length += count;
349 		sccb->evbuf.length += count;
350 		return count;
351 	}
352 }
353 
354 /*
355  * Emit buffer after having waited long enough for more data to arrive.
356  */
357 static void
358 sclp_vt220_timeout(unsigned long data)
359 {
360 	sclp_vt220_emit_current();
361 }
362 
363 #define BUFFER_MAX_DELAY	HZ/20
364 
365 /*
366  * Drop oldest console buffer if sclp_con_drop is set
367  */
368 static int
369 sclp_vt220_drop_buffer(void)
370 {
371 	struct list_head *list;
372 	struct sclp_vt220_request *request;
373 	void *page;
374 
375 	if (!sclp_console_drop)
376 		return 0;
377 	list = sclp_vt220_outqueue.next;
378 	if (sclp_vt220_queue_running)
379 		/* The first element is in I/O */
380 		list = list->next;
381 	if (list == &sclp_vt220_outqueue)
382 		return 0;
383 	list_del(list);
384 	request = list_entry(list, struct sclp_vt220_request, list);
385 	page = request->sclp_req.sccb;
386 	list_add_tail((struct list_head *) page, &sclp_vt220_empty);
387 	return 1;
388 }
389 
390 /*
391  * Internal implementation of the write function. Write COUNT bytes of data
392  * from memory at BUF
393  * to the SCLP interface. In case that the data does not fit into the current
394  * write buffer, emit the current one and allocate a new one. If there are no
395  * more empty buffers available, wait until one gets emptied. If DO_SCHEDULE
396  * is non-zero, the buffer will be scheduled for emitting after a timeout -
397  * otherwise the user has to explicitly call the flush function.
398  * A non-zero CONVERTLF parameter indicates that 0x0a characters in the message
399  * buffer should be converted to 0x0a 0x0d. After completion, return the number
400  * of bytes written.
401  */
402 static int
403 __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
404 		   int convertlf, int may_fail)
405 {
406 	unsigned long flags;
407 	void *page;
408 	int written;
409 	int overall_written;
410 
411 	if (count <= 0)
412 		return 0;
413 	overall_written = 0;
414 	spin_lock_irqsave(&sclp_vt220_lock, flags);
415 	do {
416 		/* Create an sclp output buffer if none exists yet */
417 		if (sclp_vt220_current_request == NULL) {
418 			if (list_empty(&sclp_vt220_empty))
419 				sclp_console_full++;
420 			while (list_empty(&sclp_vt220_empty)) {
421 				if (may_fail || sclp_vt220_suspended)
422 					goto out;
423 				if (sclp_vt220_drop_buffer())
424 					break;
425 				spin_unlock_irqrestore(&sclp_vt220_lock, flags);
426 
427 				sclp_sync_wait();
428 				spin_lock_irqsave(&sclp_vt220_lock, flags);
429 			}
430 			page = (void *) sclp_vt220_empty.next;
431 			list_del((struct list_head *) page);
432 			sclp_vt220_current_request =
433 				sclp_vt220_initialize_page(page);
434 		}
435 		/* Try to write the string to the current request buffer */
436 		written = sclp_vt220_add_msg(sclp_vt220_current_request,
437 					     buf, count, convertlf);
438 		overall_written += written;
439 		if (written == count)
440 			break;
441 		/*
442 		 * Not all characters could be written to the current
443 		 * output buffer. Emit the buffer, create a new buffer
444 		 * and then output the rest of the string.
445 		 */
446 		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
447 		sclp_vt220_emit_current();
448 		spin_lock_irqsave(&sclp_vt220_lock, flags);
449 		buf += written;
450 		count -= written;
451 	} while (count > 0);
452 	/* Setup timer to output current console buffer after some time */
453 	if (sclp_vt220_current_request != NULL &&
454 	    !timer_pending(&sclp_vt220_timer) && do_schedule) {
455 		sclp_vt220_timer.function = sclp_vt220_timeout;
456 		sclp_vt220_timer.data = 0UL;
457 		sclp_vt220_timer.expires = jiffies + BUFFER_MAX_DELAY;
458 		add_timer(&sclp_vt220_timer);
459 	}
460 out:
461 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
462 	return overall_written;
463 }
464 
465 /*
466  * This routine is called by the kernel to write a series of
467  * characters to the tty device.  The characters may come from
468  * user space or kernel space.  This routine will return the
469  * number of characters actually accepted for writing.
470  */
471 static int
472 sclp_vt220_write(struct tty_struct *tty, const unsigned char *buf, int count)
473 {
474 	return __sclp_vt220_write(buf, count, 1, 0, 1);
475 }
476 
477 #define SCLP_VT220_SESSION_ENDED	0x01
478 #define	SCLP_VT220_SESSION_STARTED	0x80
479 #define SCLP_VT220_SESSION_DATA		0x00
480 
481 /*
482  * Called by the SCLP to report incoming event buffers.
483  */
484 static void
485 sclp_vt220_receiver_fn(struct evbuf_header *evbuf)
486 {
487 	char *buffer;
488 	unsigned int count;
489 
490 	buffer = (char *) ((addr_t) evbuf + sizeof(struct evbuf_header));
491 	count = evbuf->length - sizeof(struct evbuf_header);
492 
493 	switch (*buffer) {
494 	case SCLP_VT220_SESSION_ENDED:
495 	case SCLP_VT220_SESSION_STARTED:
496 		break;
497 	case SCLP_VT220_SESSION_DATA:
498 		/* Send input to line discipline */
499 		buffer++;
500 		count--;
501 		tty_insert_flip_string(&sclp_vt220_port, buffer, count);
502 		tty_flip_buffer_push(&sclp_vt220_port);
503 		break;
504 	}
505 }
506 
507 /*
508  * This routine is called when a particular tty device is opened.
509  */
510 static int
511 sclp_vt220_open(struct tty_struct *tty, struct file *filp)
512 {
513 	if (tty->count == 1) {
514 		tty_port_tty_set(&sclp_vt220_port, tty);
515 		sclp_vt220_port.low_latency = 0;
516 		if (!tty->winsize.ws_row && !tty->winsize.ws_col) {
517 			tty->winsize.ws_row = 24;
518 			tty->winsize.ws_col = 80;
519 		}
520 	}
521 	return 0;
522 }
523 
524 /*
525  * This routine is called when a particular tty device is closed.
526  */
527 static void
528 sclp_vt220_close(struct tty_struct *tty, struct file *filp)
529 {
530 	if (tty->count == 1)
531 		tty_port_tty_set(&sclp_vt220_port, NULL);
532 }
533 
534 /*
535  * This routine is called by the kernel to write a single
536  * character to the tty device.  If the kernel uses this routine,
537  * it must call the flush_chars() routine (if defined) when it is
538  * done stuffing characters into the driver.
539  */
540 static int
541 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
542 {
543 	return __sclp_vt220_write(&ch, 1, 0, 0, 1);
544 }
545 
546 /*
547  * This routine is called by the kernel after it has written a
548  * series of characters to the tty device using put_char().
549  */
550 static void
551 sclp_vt220_flush_chars(struct tty_struct *tty)
552 {
553 	if (!sclp_vt220_queue_running)
554 		sclp_vt220_emit_current();
555 	else
556 		sclp_vt220_flush_later = 1;
557 }
558 
559 /*
560  * This routine returns the numbers of characters the tty driver
561  * will accept for queuing to be written.  This number is subject
562  * to change as output buffers get emptied, or if the output flow
563  * control is acted.
564  */
565 static int
566 sclp_vt220_write_room(struct tty_struct *tty)
567 {
568 	unsigned long flags;
569 	struct list_head *l;
570 	int count;
571 
572 	spin_lock_irqsave(&sclp_vt220_lock, flags);
573 	count = 0;
574 	if (sclp_vt220_current_request != NULL)
575 		count = sclp_vt220_space_left(sclp_vt220_current_request);
576 	list_for_each(l, &sclp_vt220_empty)
577 		count += SCLP_VT220_MAX_CHARS_PER_BUFFER;
578 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
579 	return count;
580 }
581 
582 /*
583  * Return number of buffered chars.
584  */
585 static int
586 sclp_vt220_chars_in_buffer(struct tty_struct *tty)
587 {
588 	unsigned long flags;
589 	struct list_head *l;
590 	struct sclp_vt220_request *r;
591 	int count;
592 
593 	spin_lock_irqsave(&sclp_vt220_lock, flags);
594 	count = 0;
595 	if (sclp_vt220_current_request != NULL)
596 		count = sclp_vt220_chars_stored(sclp_vt220_current_request);
597 	list_for_each(l, &sclp_vt220_outqueue) {
598 		r = list_entry(l, struct sclp_vt220_request, list);
599 		count += sclp_vt220_chars_stored(r);
600 	}
601 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
602 	return count;
603 }
604 
605 /*
606  * Pass on all buffers to the hardware. Return only when there are no more
607  * buffers pending.
608  */
609 static void
610 sclp_vt220_flush_buffer(struct tty_struct *tty)
611 {
612 	sclp_vt220_emit_current();
613 }
614 
615 /* Release allocated pages. */
616 static void __init __sclp_vt220_free_pages(void)
617 {
618 	struct list_head *page, *p;
619 
620 	list_for_each_safe(page, p, &sclp_vt220_empty) {
621 		list_del(page);
622 		free_page((unsigned long) page);
623 	}
624 }
625 
626 /* Release memory and unregister from sclp core. Controlled by init counting -
627  * only the last invoker will actually perform these actions. */
628 static void __init __sclp_vt220_cleanup(void)
629 {
630 	sclp_vt220_init_count--;
631 	if (sclp_vt220_init_count != 0)
632 		return;
633 	sclp_unregister(&sclp_vt220_register);
634 	__sclp_vt220_free_pages();
635 	tty_port_destroy(&sclp_vt220_port);
636 }
637 
638 /* Allocate buffer pages and register with sclp core. Controlled by init
639  * counting - only the first invoker will actually perform these actions. */
640 static int __init __sclp_vt220_init(int num_pages)
641 {
642 	void *page;
643 	int i;
644 	int rc;
645 
646 	sclp_vt220_init_count++;
647 	if (sclp_vt220_init_count != 1)
648 		return 0;
649 	spin_lock_init(&sclp_vt220_lock);
650 	INIT_LIST_HEAD(&sclp_vt220_empty);
651 	INIT_LIST_HEAD(&sclp_vt220_outqueue);
652 	init_timer(&sclp_vt220_timer);
653 	tty_port_init(&sclp_vt220_port);
654 	sclp_vt220_current_request = NULL;
655 	sclp_vt220_buffered_chars = 0;
656 	sclp_vt220_flush_later = 0;
657 
658 	/* Allocate pages for output buffering */
659 	rc = -ENOMEM;
660 	for (i = 0; i < num_pages; i++) {
661 		page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
662 		if (!page)
663 			goto out;
664 		list_add_tail(page, &sclp_vt220_empty);
665 	}
666 	rc = sclp_register(&sclp_vt220_register);
667 out:
668 	if (rc) {
669 		__sclp_vt220_free_pages();
670 		sclp_vt220_init_count--;
671 		tty_port_destroy(&sclp_vt220_port);
672 	}
673 	return rc;
674 }
675 
676 static const struct tty_operations sclp_vt220_ops = {
677 	.open = sclp_vt220_open,
678 	.close = sclp_vt220_close,
679 	.write = sclp_vt220_write,
680 	.put_char = sclp_vt220_put_char,
681 	.flush_chars = sclp_vt220_flush_chars,
682 	.write_room = sclp_vt220_write_room,
683 	.chars_in_buffer = sclp_vt220_chars_in_buffer,
684 	.flush_buffer = sclp_vt220_flush_buffer,
685 };
686 
687 /*
688  * Register driver with SCLP and Linux and initialize internal tty structures.
689  */
690 static int __init sclp_vt220_tty_init(void)
691 {
692 	struct tty_driver *driver;
693 	int rc;
694 
695 	/* Note: we're not testing for CONSOLE_IS_SCLP here to preserve
696 	 * symmetry between VM and LPAR systems regarding ttyS1. */
697 	driver = alloc_tty_driver(1);
698 	if (!driver)
699 		return -ENOMEM;
700 	rc = __sclp_vt220_init(MAX_KMEM_PAGES);
701 	if (rc)
702 		goto out_driver;
703 
704 	driver->driver_name = SCLP_VT220_DRIVER_NAME;
705 	driver->name = SCLP_VT220_DEVICE_NAME;
706 	driver->major = SCLP_VT220_MAJOR;
707 	driver->minor_start = SCLP_VT220_MINOR;
708 	driver->type = TTY_DRIVER_TYPE_SYSTEM;
709 	driver->subtype = SYSTEM_TYPE_TTY;
710 	driver->init_termios = tty_std_termios;
711 	driver->flags = TTY_DRIVER_REAL_RAW;
712 	tty_set_operations(driver, &sclp_vt220_ops);
713 	tty_port_link_device(&sclp_vt220_port, driver, 0);
714 
715 	rc = tty_register_driver(driver);
716 	if (rc)
717 		goto out_init;
718 	sclp_vt220_driver = driver;
719 	return 0;
720 
721 out_init:
722 	__sclp_vt220_cleanup();
723 out_driver:
724 	put_tty_driver(driver);
725 	return rc;
726 }
727 __initcall(sclp_vt220_tty_init);
728 
729 static void __sclp_vt220_flush_buffer(void)
730 {
731 	unsigned long flags;
732 
733 	sclp_vt220_emit_current();
734 	spin_lock_irqsave(&sclp_vt220_lock, flags);
735 	if (timer_pending(&sclp_vt220_timer))
736 		del_timer(&sclp_vt220_timer);
737 	while (sclp_vt220_queue_running) {
738 		spin_unlock_irqrestore(&sclp_vt220_lock, flags);
739 		sclp_sync_wait();
740 		spin_lock_irqsave(&sclp_vt220_lock, flags);
741 	}
742 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
743 }
744 
745 /*
746  * Resume console: If there are cached messages, emit them.
747  */
748 static void sclp_vt220_resume(void)
749 {
750 	unsigned long flags;
751 
752 	spin_lock_irqsave(&sclp_vt220_lock, flags);
753 	sclp_vt220_suspended = 0;
754 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
755 	sclp_vt220_emit_current();
756 }
757 
758 /*
759  * Suspend console: Set suspend flag and flush console
760  */
761 static void sclp_vt220_suspend(void)
762 {
763 	unsigned long flags;
764 
765 	spin_lock_irqsave(&sclp_vt220_lock, flags);
766 	sclp_vt220_suspended = 1;
767 	spin_unlock_irqrestore(&sclp_vt220_lock, flags);
768 	__sclp_vt220_flush_buffer();
769 }
770 
771 static void sclp_vt220_pm_event_fn(struct sclp_register *reg,
772 				   enum sclp_pm_event sclp_pm_event)
773 {
774 	switch (sclp_pm_event) {
775 	case SCLP_PM_EVENT_FREEZE:
776 		sclp_vt220_suspend();
777 		break;
778 	case SCLP_PM_EVENT_RESTORE:
779 	case SCLP_PM_EVENT_THAW:
780 		sclp_vt220_resume();
781 		break;
782 	}
783 }
784 
785 #ifdef CONFIG_SCLP_VT220_CONSOLE
786 
787 static void
788 sclp_vt220_con_write(struct console *con, const char *buf, unsigned int count)
789 {
790 	__sclp_vt220_write((const unsigned char *) buf, count, 1, 1, 0);
791 }
792 
793 static struct tty_driver *
794 sclp_vt220_con_device(struct console *c, int *index)
795 {
796 	*index = 0;
797 	return sclp_vt220_driver;
798 }
799 
800 static int
801 sclp_vt220_notify(struct notifier_block *self,
802 			  unsigned long event, void *data)
803 {
804 	__sclp_vt220_flush_buffer();
805 	return NOTIFY_OK;
806 }
807 
808 static struct notifier_block on_panic_nb = {
809 	.notifier_call = sclp_vt220_notify,
810 	.priority = 1,
811 };
812 
813 static struct notifier_block on_reboot_nb = {
814 	.notifier_call = sclp_vt220_notify,
815 	.priority = 1,
816 };
817 
818 /* Structure needed to register with printk */
819 static struct console sclp_vt220_console =
820 {
821 	.name = SCLP_VT220_CONSOLE_NAME,
822 	.write = sclp_vt220_con_write,
823 	.device = sclp_vt220_con_device,
824 	.flags = CON_PRINTBUFFER,
825 	.index = SCLP_VT220_CONSOLE_INDEX
826 };
827 
828 static int __init
829 sclp_vt220_con_init(void)
830 {
831 	int rc;
832 
833 	if (!CONSOLE_IS_SCLP)
834 		return 0;
835 	rc = __sclp_vt220_init(sclp_console_pages);
836 	if (rc)
837 		return rc;
838 	/* Attach linux console */
839 	atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb);
840 	register_reboot_notifier(&on_reboot_nb);
841 	register_console(&sclp_vt220_console);
842 	return 0;
843 }
844 
845 console_initcall(sclp_vt220_con_init);
846 #endif /* CONFIG_SCLP_VT220_CONSOLE */
847 
848