xref: /openbmc/linux/drivers/tty/tty_buffer.c (revision 861e10be)
1 /*
2  * Tty buffer allocation management
3  */
4 
5 #include <linux/types.h>
6 #include <linux/errno.h>
7 #include <linux/tty.h>
8 #include <linux/tty_driver.h>
9 #include <linux/tty_flip.h>
10 #include <linux/timer.h>
11 #include <linux/string.h>
12 #include <linux/slab.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/wait.h>
16 #include <linux/bitops.h>
17 #include <linux/delay.h>
18 #include <linux/module.h>
19 
20 /**
21  *	tty_buffer_free_all		-	free buffers used by a tty
22  *	@tty: tty to free from
23  *
24  *	Remove all the buffers pending on a tty whether queued with data
25  *	or in the free ring. Must be called when the tty is no longer in use
26  *
27  *	Locking: none
28  */
29 
30 void tty_buffer_free_all(struct tty_port *port)
31 {
32 	struct tty_bufhead *buf = &port->buf;
33 	struct tty_buffer *thead;
34 
35 	while ((thead = buf->head) != NULL) {
36 		buf->head = thead->next;
37 		kfree(thead);
38 	}
39 	while ((thead = buf->free) != NULL) {
40 		buf->free = thead->next;
41 		kfree(thead);
42 	}
43 	buf->tail = NULL;
44 	buf->memory_used = 0;
45 }
46 
47 /**
48  *	tty_buffer_alloc	-	allocate a tty buffer
49  *	@tty: tty device
50  *	@size: desired size (characters)
51  *
52  *	Allocate a new tty buffer to hold the desired number of characters.
53  *	Return NULL if out of memory or the allocation would exceed the
54  *	per device queue
55  *
56  *	Locking: Caller must hold tty->buf.lock
57  */
58 
59 static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
60 {
61 	struct tty_buffer *p;
62 
63 	if (port->buf.memory_used + size > 65536)
64 		return NULL;
65 	p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
66 	if (p == NULL)
67 		return NULL;
68 	p->used = 0;
69 	p->size = size;
70 	p->next = NULL;
71 	p->commit = 0;
72 	p->read = 0;
73 	p->char_buf_ptr = (char *)(p->data);
74 	p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
75 	port->buf.memory_used += size;
76 	return p;
77 }
78 
79 /**
80  *	tty_buffer_free		-	free a tty buffer
81  *	@tty: tty owning the buffer
82  *	@b: the buffer to free
83  *
84  *	Free a tty buffer, or add it to the free list according to our
85  *	internal strategy
86  *
87  *	Locking: Caller must hold tty->buf.lock
88  */
89 
90 static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
91 {
92 	struct tty_bufhead *buf = &port->buf;
93 
94 	/* Dumb strategy for now - should keep some stats */
95 	buf->memory_used -= b->size;
96 	WARN_ON(buf->memory_used < 0);
97 
98 	if (b->size >= 512)
99 		kfree(b);
100 	else {
101 		b->next = buf->free;
102 		buf->free = b;
103 	}
104 }
105 
106 /**
107  *	__tty_buffer_flush		-	flush full tty buffers
108  *	@tty: tty to flush
109  *
110  *	flush all the buffers containing receive data. Caller must
111  *	hold the buffer lock and must have ensured no parallel flush to
112  *	ldisc is running.
113  *
114  *	Locking: Caller must hold tty->buf.lock
115  */
116 
117 static void __tty_buffer_flush(struct tty_port *port)
118 {
119 	struct tty_bufhead *buf = &port->buf;
120 	struct tty_buffer *thead;
121 
122 	while ((thead = buf->head) != NULL) {
123 		buf->head = thead->next;
124 		tty_buffer_free(port, thead);
125 	}
126 	buf->tail = NULL;
127 }
128 
129 /**
130  *	tty_buffer_flush		-	flush full tty buffers
131  *	@tty: tty to flush
132  *
133  *	flush all the buffers containing receive data. If the buffer is
134  *	being processed by flush_to_ldisc then we defer the processing
135  *	to that function
136  *
137  *	Locking: none
138  */
139 
140 void tty_buffer_flush(struct tty_struct *tty)
141 {
142 	struct tty_port *port = tty->port;
143 	struct tty_bufhead *buf = &port->buf;
144 	unsigned long flags;
145 
146 	spin_lock_irqsave(&buf->lock, flags);
147 
148 	/* If the data is being pushed to the tty layer then we can't
149 	   process it here. Instead set a flag and the flush_to_ldisc
150 	   path will process the flush request before it exits */
151 	if (test_bit(TTYP_FLUSHING, &port->iflags)) {
152 		set_bit(TTYP_FLUSHPENDING, &port->iflags);
153 		spin_unlock_irqrestore(&buf->lock, flags);
154 		wait_event(tty->read_wait,
155 				test_bit(TTYP_FLUSHPENDING, &port->iflags) == 0);
156 		return;
157 	} else
158 		__tty_buffer_flush(port);
159 	spin_unlock_irqrestore(&buf->lock, flags);
160 }
161 
162 /**
163  *	tty_buffer_find		-	find a free tty buffer
164  *	@tty: tty owning the buffer
165  *	@size: characters wanted
166  *
167  *	Locate an existing suitable tty buffer or if we are lacking one then
168  *	allocate a new one. We round our buffers off in 256 character chunks
169  *	to get better allocation behaviour.
170  *
171  *	Locking: Caller must hold tty->buf.lock
172  */
173 
174 static struct tty_buffer *tty_buffer_find(struct tty_port *port, size_t size)
175 {
176 	struct tty_buffer **tbh = &port->buf.free;
177 	while ((*tbh) != NULL) {
178 		struct tty_buffer *t = *tbh;
179 		if (t->size >= size) {
180 			*tbh = t->next;
181 			t->next = NULL;
182 			t->used = 0;
183 			t->commit = 0;
184 			t->read = 0;
185 			port->buf.memory_used += t->size;
186 			return t;
187 		}
188 		tbh = &((*tbh)->next);
189 	}
190 	/* Round the buffer size out */
191 	size = (size + 0xFF) & ~0xFF;
192 	return tty_buffer_alloc(port, size);
193 	/* Should possibly check if this fails for the largest buffer we
194 	   have queued and recycle that ? */
195 }
196 /**
197  *	__tty_buffer_request_room		-	grow tty buffer if needed
198  *	@tty: tty structure
199  *	@size: size desired
200  *
201  *	Make at least size bytes of linear space available for the tty
202  *	buffer. If we fail return the size we managed to find.
203  *      Locking: Caller must hold port->buf.lock
204  */
205 static int __tty_buffer_request_room(struct tty_port *port, size_t size)
206 {
207 	struct tty_bufhead *buf = &port->buf;
208 	struct tty_buffer *b, *n;
209 	int left;
210 	/* OPTIMISATION: We could keep a per tty "zero" sized buffer to
211 	   remove this conditional if its worth it. This would be invisible
212 	   to the callers */
213 	b = buf->tail;
214 	if (b != NULL)
215 		left = b->size - b->used;
216 	else
217 		left = 0;
218 
219 	if (left < size) {
220 		/* This is the slow path - looking for new buffers to use */
221 		if ((n = tty_buffer_find(port, size)) != NULL) {
222 			if (b != NULL) {
223 				b->next = n;
224 				b->commit = b->used;
225 			} else
226 				buf->head = n;
227 			buf->tail = n;
228 		} else
229 			size = left;
230 	}
231 
232 	return size;
233 }
234 
235 
236 /**
237  *	tty_buffer_request_room		-	grow tty buffer if needed
238  *	@tty: tty structure
239  *	@size: size desired
240  *
241  *	Make at least size bytes of linear space available for the tty
242  *	buffer. If we fail return the size we managed to find.
243  *
244  *	Locking: Takes port->buf.lock
245  */
246 int tty_buffer_request_room(struct tty_struct *tty, size_t size)
247 {
248 	struct tty_port *port = tty->port;
249 	unsigned long flags;
250 	int length;
251 
252 	spin_lock_irqsave(&port->buf.lock, flags);
253 	length = __tty_buffer_request_room(port, size);
254 	spin_unlock_irqrestore(&port->buf.lock, flags);
255 	return length;
256 }
257 EXPORT_SYMBOL_GPL(tty_buffer_request_room);
258 
259 /**
260  *	tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
261  *	@tty: tty structure
262  *	@chars: characters
263  *	@flag: flag value for each character
264  *	@size: size
265  *
266  *	Queue a series of bytes to the tty buffering. All the characters
267  *	passed are marked with the supplied flag. Returns the number added.
268  *
269  *	Locking: Called functions may take port->buf.lock
270  */
271 
272 int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
273 		const unsigned char *chars, char flag, size_t size)
274 {
275 	struct tty_bufhead *buf = &tty->port->buf;
276 	int copied = 0;
277 	do {
278 		int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
279 		int space;
280 		unsigned long flags;
281 		struct tty_buffer *tb;
282 
283 		spin_lock_irqsave(&buf->lock, flags);
284 		space = __tty_buffer_request_room(tty->port, goal);
285 		tb = buf->tail;
286 		/* If there is no space then tb may be NULL */
287 		if (unlikely(space == 0)) {
288 			spin_unlock_irqrestore(&buf->lock, flags);
289 			break;
290 		}
291 		memcpy(tb->char_buf_ptr + tb->used, chars, space);
292 		memset(tb->flag_buf_ptr + tb->used, flag, space);
293 		tb->used += space;
294 		spin_unlock_irqrestore(&buf->lock, flags);
295 		copied += space;
296 		chars += space;
297 		/* There is a small chance that we need to split the data over
298 		   several buffers. If this is the case we must loop */
299 	} while (unlikely(size > copied));
300 	return copied;
301 }
302 EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
303 
304 /**
305  *	tty_insert_flip_string_flags	-	Add characters to the tty buffer
306  *	@tty: tty structure
307  *	@chars: characters
308  *	@flags: flag bytes
309  *	@size: size
310  *
311  *	Queue a series of bytes to the tty buffering. For each character
312  *	the flags array indicates the status of the character. Returns the
313  *	number added.
314  *
315  *	Locking: Called functions may take port->buf.lock
316  */
317 
318 int tty_insert_flip_string_flags(struct tty_struct *tty,
319 		const unsigned char *chars, const char *flags, size_t size)
320 {
321 	struct tty_bufhead *buf = &tty->port->buf;
322 	int copied = 0;
323 	do {
324 		int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
325 		int space;
326 		unsigned long __flags;
327 		struct tty_buffer *tb;
328 
329 		spin_lock_irqsave(&buf->lock, __flags);
330 		space = __tty_buffer_request_room(tty->port, goal);
331 		tb = buf->tail;
332 		/* If there is no space then tb may be NULL */
333 		if (unlikely(space == 0)) {
334 			spin_unlock_irqrestore(&buf->lock, __flags);
335 			break;
336 		}
337 		memcpy(tb->char_buf_ptr + tb->used, chars, space);
338 		memcpy(tb->flag_buf_ptr + tb->used, flags, space);
339 		tb->used += space;
340 		spin_unlock_irqrestore(&buf->lock, __flags);
341 		copied += space;
342 		chars += space;
343 		flags += space;
344 		/* There is a small chance that we need to split the data over
345 		   several buffers. If this is the case we must loop */
346 	} while (unlikely(size > copied));
347 	return copied;
348 }
349 EXPORT_SYMBOL(tty_insert_flip_string_flags);
350 
351 /**
352  *	tty_schedule_flip	-	push characters to ldisc
353  *	@tty: tty to push from
354  *
355  *	Takes any pending buffers and transfers their ownership to the
356  *	ldisc side of the queue. It then schedules those characters for
357  *	processing by the line discipline.
358  *	Note that this function can only be used when the low_latency flag
359  *	is unset. Otherwise the workqueue won't be flushed.
360  *
361  *	Locking: Takes port->buf.lock
362  */
363 
364 void tty_schedule_flip(struct tty_struct *tty)
365 {
366 	struct tty_bufhead *buf = &tty->port->buf;
367 	unsigned long flags;
368 	WARN_ON(tty->low_latency);
369 
370 	spin_lock_irqsave(&buf->lock, flags);
371 	if (buf->tail != NULL)
372 		buf->tail->commit = buf->tail->used;
373 	spin_unlock_irqrestore(&buf->lock, flags);
374 	schedule_work(&buf->work);
375 }
376 EXPORT_SYMBOL(tty_schedule_flip);
377 
378 /**
379  *	tty_prepare_flip_string		-	make room for characters
380  *	@tty: tty
381  *	@chars: return pointer for character write area
382  *	@size: desired size
383  *
384  *	Prepare a block of space in the buffer for data. Returns the length
385  *	available and buffer pointer to the space which is now allocated and
386  *	accounted for as ready for normal characters. This is used for drivers
387  *	that need their own block copy routines into the buffer. There is no
388  *	guarantee the buffer is a DMA target!
389  *
390  *	Locking: May call functions taking port->buf.lock
391  */
392 
393 int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars,
394 		size_t size)
395 {
396 	struct tty_bufhead *buf = &tty->port->buf;
397 	int space;
398 	unsigned long flags;
399 	struct tty_buffer *tb;
400 
401 	spin_lock_irqsave(&buf->lock, flags);
402 	space = __tty_buffer_request_room(tty->port, size);
403 
404 	tb = buf->tail;
405 	if (likely(space)) {
406 		*chars = tb->char_buf_ptr + tb->used;
407 		memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
408 		tb->used += space;
409 	}
410 	spin_unlock_irqrestore(&buf->lock, flags);
411 	return space;
412 }
413 EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
414 
415 /**
416  *	tty_prepare_flip_string_flags	-	make room for characters
417  *	@tty: tty
418  *	@chars: return pointer for character write area
419  *	@flags: return pointer for status flag write area
420  *	@size: desired size
421  *
422  *	Prepare a block of space in the buffer for data. Returns the length
423  *	available and buffer pointer to the space which is now allocated and
424  *	accounted for as ready for characters. This is used for drivers
425  *	that need their own block copy routines into the buffer. There is no
426  *	guarantee the buffer is a DMA target!
427  *
428  *	Locking: May call functions taking port->buf.lock
429  */
430 
431 int tty_prepare_flip_string_flags(struct tty_struct *tty,
432 			unsigned char **chars, char **flags, size_t size)
433 {
434 	struct tty_bufhead *buf = &tty->port->buf;
435 	int space;
436 	unsigned long __flags;
437 	struct tty_buffer *tb;
438 
439 	spin_lock_irqsave(&buf->lock, __flags);
440 	space = __tty_buffer_request_room(tty->port, size);
441 
442 	tb = buf->tail;
443 	if (likely(space)) {
444 		*chars = tb->char_buf_ptr + tb->used;
445 		*flags = tb->flag_buf_ptr + tb->used;
446 		tb->used += space;
447 	}
448 	spin_unlock_irqrestore(&buf->lock, __flags);
449 	return space;
450 }
451 EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
452 
453 
454 
455 /**
456  *	flush_to_ldisc
457  *	@work: tty structure passed from work queue.
458  *
459  *	This routine is called out of the software interrupt to flush data
460  *	from the buffer chain to the line discipline.
461  *
462  *	Locking: holds tty->buf.lock to guard buffer list. Drops the lock
463  *	while invoking the line discipline receive_buf method. The
464  *	receive_buf method is single threaded for each tty instance.
465  */
466 
467 static void flush_to_ldisc(struct work_struct *work)
468 {
469 	struct tty_port *port = container_of(work, struct tty_port, buf.work);
470 	struct tty_bufhead *buf = &port->buf;
471 	struct tty_struct *tty;
472 	unsigned long 	flags;
473 	struct tty_ldisc *disc;
474 
475 	tty = port->itty;
476 	if (WARN_RATELIMIT(tty == NULL, "tty is NULL\n"))
477 		return;
478 
479 	disc = tty_ldisc_ref(tty);
480 	if (disc == NULL)	/*  !TTY_LDISC */
481 		return;
482 
483 	spin_lock_irqsave(&buf->lock, flags);
484 
485 	if (!test_and_set_bit(TTYP_FLUSHING, &port->iflags)) {
486 		struct tty_buffer *head;
487 		while ((head = buf->head) != NULL) {
488 			int count;
489 			char *char_buf;
490 			unsigned char *flag_buf;
491 
492 			count = head->commit - head->read;
493 			if (!count) {
494 				if (head->next == NULL)
495 					break;
496 				buf->head = head->next;
497 				tty_buffer_free(port, head);
498 				continue;
499 			}
500 			/* Ldisc or user is trying to flush the buffers
501 			   we are feeding to the ldisc, stop feeding the
502 			   line discipline as we want to empty the queue */
503 			if (test_bit(TTYP_FLUSHPENDING, &port->iflags))
504 				break;
505 			if (!tty->receive_room)
506 				break;
507 			if (count > tty->receive_room)
508 				count = tty->receive_room;
509 			char_buf = head->char_buf_ptr + head->read;
510 			flag_buf = head->flag_buf_ptr + head->read;
511 			head->read += count;
512 			spin_unlock_irqrestore(&buf->lock, flags);
513 			disc->ops->receive_buf(tty, char_buf,
514 							flag_buf, count);
515 			spin_lock_irqsave(&buf->lock, flags);
516 		}
517 		clear_bit(TTYP_FLUSHING, &port->iflags);
518 	}
519 
520 	/* We may have a deferred request to flush the input buffer,
521 	   if so pull the chain under the lock and empty the queue */
522 	if (test_bit(TTYP_FLUSHPENDING, &port->iflags)) {
523 		__tty_buffer_flush(port);
524 		clear_bit(TTYP_FLUSHPENDING, &port->iflags);
525 		wake_up(&tty->read_wait);
526 	}
527 	spin_unlock_irqrestore(&buf->lock, flags);
528 
529 	tty_ldisc_deref(disc);
530 }
531 
532 /**
533  *	tty_flush_to_ldisc
534  *	@tty: tty to push
535  *
536  *	Push the terminal flip buffers to the line discipline.
537  *
538  *	Must not be called from IRQ context.
539  */
540 void tty_flush_to_ldisc(struct tty_struct *tty)
541 {
542 	if (!tty->low_latency)
543 		flush_work(&tty->port->buf.work);
544 }
545 
546 /**
547  *	tty_flip_buffer_push	-	terminal
548  *	@tty: tty to push
549  *
550  *	Queue a push of the terminal flip buffers to the line discipline. This
551  *	function must not be called from IRQ context if tty->low_latency is set.
552  *
553  *	In the event of the queue being busy for flipping the work will be
554  *	held off and retried later.
555  *
556  *	Locking: tty buffer lock. Driver locks in low latency mode.
557  */
558 
559 void tty_flip_buffer_push(struct tty_struct *tty)
560 {
561 	struct tty_bufhead *buf = &tty->port->buf;
562 	unsigned long flags;
563 
564 	spin_lock_irqsave(&buf->lock, flags);
565 	if (buf->tail != NULL)
566 		buf->tail->commit = buf->tail->used;
567 	spin_unlock_irqrestore(&buf->lock, flags);
568 
569 	if (tty->low_latency)
570 		flush_to_ldisc(&buf->work);
571 	else
572 		schedule_work(&buf->work);
573 }
574 EXPORT_SYMBOL(tty_flip_buffer_push);
575 
576 /**
577  *	tty_buffer_init		-	prepare a tty buffer structure
578  *	@tty: tty to initialise
579  *
580  *	Set up the initial state of the buffer management for a tty device.
581  *	Must be called before the other tty buffer functions are used.
582  *
583  *	Locking: none
584  */
585 
586 void tty_buffer_init(struct tty_port *port)
587 {
588 	struct tty_bufhead *buf = &port->buf;
589 
590 	spin_lock_init(&buf->lock);
591 	buf->head = NULL;
592 	buf->tail = NULL;
593 	buf->free = NULL;
594 	buf->memory_used = 0;
595 	INIT_WORK(&buf->work, flush_to_ldisc);
596 }
597 
598