1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Tty buffer allocation management 4 */ 5 6 #include <linux/types.h> 7 #include <linux/errno.h> 8 #include <linux/tty.h> 9 #include <linux/tty_driver.h> 10 #include <linux/tty_flip.h> 11 #include <linux/timer.h> 12 #include <linux/string.h> 13 #include <linux/slab.h> 14 #include <linux/sched.h> 15 #include <linux/wait.h> 16 #include <linux/bitops.h> 17 #include <linux/delay.h> 18 #include <linux/module.h> 19 #include <linux/ratelimit.h> 20 #include "tty.h" 21 22 #define MIN_TTYB_SIZE 256 23 #define TTYB_ALIGN_MASK 255 24 25 /* 26 * Byte threshold to limit memory consumption for flip buffers. 27 * The actual memory limit is > 2x this amount. 28 */ 29 #define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL) 30 31 /* 32 * We default to dicing tty buffer allocations to this many characters 33 * in order to avoid multiple page allocations. We know the size of 34 * tty_buffer itself but it must also be taken into account that the 35 * buffer is 256 byte aligned. See tty_buffer_find for the allocation 36 * logic this must match. 37 */ 38 39 #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) 40 41 /** 42 * tty_buffer_lock_exclusive - gain exclusive access to buffer 43 * @port: tty port owning the flip buffer 44 * 45 * Guarantees safe use of the &tty_ldisc_ops.receive_buf() method by excluding 46 * the buffer work and any pending flush from using the flip buffer. Data can 47 * continue to be added concurrently to the flip buffer from the driver side. 48 * 49 * See also tty_buffer_unlock_exclusive(). 50 */ 51 void tty_buffer_lock_exclusive(struct tty_port *port) 52 { 53 struct tty_bufhead *buf = &port->buf; 54 55 atomic_inc(&buf->priority); 56 mutex_lock(&buf->lock); 57 } 58 EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive); 59 60 /** 61 * tty_buffer_unlock_exclusive - release exclusive access 62 * @port: tty port owning the flip buffer 63 * 64 * The buffer work is restarted if there is data in the flip buffer. 65 * 66 * See also tty_buffer_lock_exclusive(). 67 */ 68 void tty_buffer_unlock_exclusive(struct tty_port *port) 69 { 70 struct tty_bufhead *buf = &port->buf; 71 int restart; 72 73 restart = buf->head->commit != buf->head->read; 74 75 atomic_dec(&buf->priority); 76 mutex_unlock(&buf->lock); 77 if (restart) 78 queue_work(system_unbound_wq, &buf->work); 79 } 80 EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive); 81 82 /** 83 * tty_buffer_space_avail - return unused buffer space 84 * @port: tty port owning the flip buffer 85 * 86 * Returns: the # of bytes which can be written by the driver without reaching 87 * the buffer limit. 88 * 89 * Note: this does not guarantee that memory is available to write the returned 90 * # of bytes (use tty_prepare_flip_string() to pre-allocate if memory 91 * guarantee is required). 92 */ 93 unsigned int tty_buffer_space_avail(struct tty_port *port) 94 { 95 int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used); 96 97 return max(space, 0); 98 } 99 EXPORT_SYMBOL_GPL(tty_buffer_space_avail); 100 101 static void tty_buffer_reset(struct tty_buffer *p, size_t size) 102 { 103 p->used = 0; 104 p->size = size; 105 p->next = NULL; 106 p->commit = 0; 107 p->read = 0; 108 p->flags = 0; 109 } 110 111 /** 112 * tty_buffer_free_all - free buffers used by a tty 113 * @port: tty port to free from 114 * 115 * Remove all the buffers pending on a tty whether queued with data or in the 116 * free ring. Must be called when the tty is no longer in use. 117 */ 118 void tty_buffer_free_all(struct tty_port *port) 119 { 120 struct tty_bufhead *buf = &port->buf; 121 struct tty_buffer *p, *next; 122 struct llist_node *llist; 123 unsigned int freed = 0; 124 int still_used; 125 126 while ((p = buf->head) != NULL) { 127 buf->head = p->next; 128 freed += p->size; 129 if (p->size > 0) 130 kfree(p); 131 } 132 llist = llist_del_all(&buf->free); 133 llist_for_each_entry_safe(p, next, llist, free) 134 kfree(p); 135 136 tty_buffer_reset(&buf->sentinel, 0); 137 buf->head = &buf->sentinel; 138 buf->tail = &buf->sentinel; 139 140 still_used = atomic_xchg(&buf->mem_used, 0); 141 WARN(still_used != freed, "we still have not freed %d bytes!", 142 still_used - freed); 143 } 144 145 /** 146 * tty_buffer_alloc - allocate a tty buffer 147 * @port: tty port 148 * @size: desired size (characters) 149 * 150 * Allocate a new tty buffer to hold the desired number of characters. We 151 * round our buffers off in 256 character chunks to get better allocation 152 * behaviour. 153 * 154 * Returns: %NULL if out of memory or the allocation would exceed the per 155 * device queue. 156 */ 157 static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) 158 { 159 struct llist_node *free; 160 struct tty_buffer *p; 161 162 /* Round the buffer size out */ 163 size = __ALIGN_MASK(size, TTYB_ALIGN_MASK); 164 165 if (size <= MIN_TTYB_SIZE) { 166 free = llist_del_first(&port->buf.free); 167 if (free) { 168 p = llist_entry(free, struct tty_buffer, free); 169 goto found; 170 } 171 } 172 173 /* Should possibly check if this fails for the largest buffer we 174 * have queued and recycle that ? 175 */ 176 if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit) 177 return NULL; 178 p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); 179 if (p == NULL) 180 return NULL; 181 182 found: 183 tty_buffer_reset(p, size); 184 atomic_add(size, &port->buf.mem_used); 185 return p; 186 } 187 188 /** 189 * tty_buffer_free - free a tty buffer 190 * @port: tty port owning the buffer 191 * @b: the buffer to free 192 * 193 * Free a tty buffer, or add it to the free list according to our internal 194 * strategy. 195 */ 196 static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b) 197 { 198 struct tty_bufhead *buf = &port->buf; 199 200 /* Dumb strategy for now - should keep some stats */ 201 WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0); 202 203 if (b->size > MIN_TTYB_SIZE) 204 kfree(b); 205 else if (b->size > 0) 206 llist_add(&b->free, &buf->free); 207 } 208 209 /** 210 * tty_buffer_flush - flush full tty buffers 211 * @tty: tty to flush 212 * @ld: optional ldisc ptr (must be referenced) 213 * 214 * Flush all the buffers containing receive data. If @ld != %NULL, flush the 215 * ldisc input buffer. 216 * 217 * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'. 218 */ 219 void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) 220 { 221 struct tty_port *port = tty->port; 222 struct tty_bufhead *buf = &port->buf; 223 struct tty_buffer *next; 224 225 atomic_inc(&buf->priority); 226 227 mutex_lock(&buf->lock); 228 /* paired w/ release in __tty_buffer_request_room; ensures there are 229 * no pending memory accesses to the freed buffer 230 */ 231 while ((next = smp_load_acquire(&buf->head->next)) != NULL) { 232 tty_buffer_free(port, buf->head); 233 buf->head = next; 234 } 235 buf->head->read = buf->head->commit; 236 237 if (ld && ld->ops->flush_buffer) 238 ld->ops->flush_buffer(tty); 239 240 atomic_dec(&buf->priority); 241 mutex_unlock(&buf->lock); 242 } 243 244 /** 245 * __tty_buffer_request_room - grow tty buffer if needed 246 * @port: tty port 247 * @size: size desired 248 * @flags: buffer flags if new buffer allocated (default = 0) 249 * 250 * Make at least @size bytes of linear space available for the tty buffer. 251 * 252 * Will change over to a new buffer if the current buffer is encoded as 253 * %TTY_NORMAL (so has no flags buffer) and the new buffer requires a flags 254 * buffer. 255 * 256 * Returns: the size we managed to find. 257 */ 258 static int __tty_buffer_request_room(struct tty_port *port, size_t size, 259 int flags) 260 { 261 struct tty_bufhead *buf = &port->buf; 262 struct tty_buffer *b, *n; 263 int left, change; 264 265 b = buf->tail; 266 if (b->flags & TTYB_NORMAL) 267 left = 2 * b->size - b->used; 268 else 269 left = b->size - b->used; 270 271 change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL); 272 if (change || left < size) { 273 /* This is the slow path - looking for new buffers to use */ 274 n = tty_buffer_alloc(port, size); 275 if (n != NULL) { 276 n->flags = flags; 277 buf->tail = n; 278 /* paired w/ acquire in flush_to_ldisc(); ensures 279 * flush_to_ldisc() sees buffer data. 280 */ 281 smp_store_release(&b->commit, b->used); 282 /* paired w/ acquire in flush_to_ldisc(); ensures the 283 * latest commit value can be read before the head is 284 * advanced to the next buffer 285 */ 286 smp_store_release(&b->next, n); 287 } else if (change) 288 size = 0; 289 else 290 size = left; 291 } 292 return size; 293 } 294 295 int tty_buffer_request_room(struct tty_port *port, size_t size) 296 { 297 return __tty_buffer_request_room(port, size, 0); 298 } 299 EXPORT_SYMBOL_GPL(tty_buffer_request_room); 300 301 /** 302 * tty_insert_flip_string_fixed_flag - add characters to the tty buffer 303 * @port: tty port 304 * @chars: characters 305 * @flag: flag value for each character 306 * @size: size 307 * 308 * Queue a series of bytes to the tty buffering. All the characters passed are 309 * marked with the supplied flag. 310 * 311 * Returns: the number added. 312 */ 313 int tty_insert_flip_string_fixed_flag(struct tty_port *port, 314 const unsigned char *chars, char flag, size_t size) 315 { 316 int copied = 0; 317 318 do { 319 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 320 int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0; 321 int space = __tty_buffer_request_room(port, goal, flags); 322 struct tty_buffer *tb = port->buf.tail; 323 324 if (unlikely(space == 0)) 325 break; 326 memcpy(char_buf_ptr(tb, tb->used), chars, space); 327 if (~tb->flags & TTYB_NORMAL) 328 memset(flag_buf_ptr(tb, tb->used), flag, space); 329 tb->used += space; 330 copied += space; 331 chars += space; 332 /* There is a small chance that we need to split the data over 333 * several buffers. If this is the case we must loop. 334 */ 335 } while (unlikely(size > copied)); 336 return copied; 337 } 338 EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag); 339 340 /** 341 * tty_insert_flip_string_flags - add characters to the tty buffer 342 * @port: tty port 343 * @chars: characters 344 * @flags: flag bytes 345 * @size: size 346 * 347 * Queue a series of bytes to the tty buffering. For each character the flags 348 * array indicates the status of the character. 349 * 350 * Returns: the number added. 351 */ 352 int tty_insert_flip_string_flags(struct tty_port *port, 353 const unsigned char *chars, const char *flags, size_t size) 354 { 355 int copied = 0; 356 357 do { 358 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 359 int space = tty_buffer_request_room(port, goal); 360 struct tty_buffer *tb = port->buf.tail; 361 362 if (unlikely(space == 0)) 363 break; 364 memcpy(char_buf_ptr(tb, tb->used), chars, space); 365 memcpy(flag_buf_ptr(tb, tb->used), flags, space); 366 tb->used += space; 367 copied += space; 368 chars += space; 369 flags += space; 370 /* There is a small chance that we need to split the data over 371 * several buffers. If this is the case we must loop. 372 */ 373 } while (unlikely(size > copied)); 374 return copied; 375 } 376 EXPORT_SYMBOL(tty_insert_flip_string_flags); 377 378 /** 379 * __tty_insert_flip_char - add one character to the tty buffer 380 * @port: tty port 381 * @ch: character 382 * @flag: flag byte 383 * 384 * Queue a single byte @ch to the tty buffering, with an optional flag. This is 385 * the slow path of tty_insert_flip_char(). 386 */ 387 int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag) 388 { 389 struct tty_buffer *tb; 390 int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0; 391 392 if (!__tty_buffer_request_room(port, 1, flags)) 393 return 0; 394 395 tb = port->buf.tail; 396 if (~tb->flags & TTYB_NORMAL) 397 *flag_buf_ptr(tb, tb->used) = flag; 398 *char_buf_ptr(tb, tb->used++) = ch; 399 400 return 1; 401 } 402 EXPORT_SYMBOL(__tty_insert_flip_char); 403 404 /** 405 * tty_prepare_flip_string - make room for characters 406 * @port: tty port 407 * @chars: return pointer for character write area 408 * @size: desired size 409 * 410 * Prepare a block of space in the buffer for data. 411 * 412 * This is used for drivers that need their own block copy routines into the 413 * buffer. There is no guarantee the buffer is a DMA target! 414 * 415 * Returns: the length available and buffer pointer (@chars) to the space which 416 * is now allocated and accounted for as ready for normal characters. 417 */ 418 int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, 419 size_t size) 420 { 421 int space = __tty_buffer_request_room(port, size, TTYB_NORMAL); 422 423 if (likely(space)) { 424 struct tty_buffer *tb = port->buf.tail; 425 426 *chars = char_buf_ptr(tb, tb->used); 427 if (~tb->flags & TTYB_NORMAL) 428 memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space); 429 tb->used += space; 430 } 431 return space; 432 } 433 EXPORT_SYMBOL_GPL(tty_prepare_flip_string); 434 435 /** 436 * tty_ldisc_receive_buf - forward data to line discipline 437 * @ld: line discipline to process input 438 * @p: char buffer 439 * @f: %TTY_NORMAL, %TTY_BREAK, etc. flags buffer 440 * @count: number of bytes to process 441 * 442 * Callers other than flush_to_ldisc() need to exclude the kworker from 443 * concurrent use of the line discipline, see paste_selection(). 444 * 445 * Returns: the number of bytes processed. 446 */ 447 int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, 448 const char *f, int count) 449 { 450 if (ld->ops->receive_buf2) 451 count = ld->ops->receive_buf2(ld->tty, p, f, count); 452 else { 453 count = min_t(int, count, ld->tty->receive_room); 454 if (count && ld->ops->receive_buf) 455 ld->ops->receive_buf(ld->tty, p, f, count); 456 } 457 return count; 458 } 459 EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf); 460 461 static int 462 receive_buf(struct tty_port *port, struct tty_buffer *head, int count) 463 { 464 unsigned char *p = char_buf_ptr(head, head->read); 465 const char *f = NULL; 466 int n; 467 468 if (~head->flags & TTYB_NORMAL) 469 f = flag_buf_ptr(head, head->read); 470 471 n = port->client_ops->receive_buf(port, p, f, count); 472 if (n > 0) 473 memset(p, 0, n); 474 return n; 475 } 476 477 /** 478 * flush_to_ldisc - flush data from buffer to ldisc 479 * @work: tty structure passed from work queue. 480 * 481 * This routine is called out of the software interrupt to flush data from the 482 * buffer chain to the line discipline. 483 * 484 * The receive_buf() method is single threaded for each tty instance. 485 * 486 * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'. 487 */ 488 static void flush_to_ldisc(struct work_struct *work) 489 { 490 struct tty_port *port = container_of(work, struct tty_port, buf.work); 491 struct tty_bufhead *buf = &port->buf; 492 493 mutex_lock(&buf->lock); 494 495 while (1) { 496 struct tty_buffer *head = buf->head; 497 struct tty_buffer *next; 498 int count; 499 500 /* Ldisc or user is trying to gain exclusive access */ 501 if (atomic_read(&buf->priority)) 502 break; 503 504 /* paired w/ release in __tty_buffer_request_room(); 505 * ensures commit value read is not stale if the head 506 * is advancing to the next buffer 507 */ 508 next = smp_load_acquire(&head->next); 509 /* paired w/ release in __tty_buffer_request_room() or in 510 * tty_buffer_flush(); ensures we see the committed buffer data 511 */ 512 count = smp_load_acquire(&head->commit) - head->read; 513 if (!count) { 514 if (next == NULL) 515 break; 516 buf->head = next; 517 tty_buffer_free(port, head); 518 continue; 519 } 520 521 count = receive_buf(port, head, count); 522 if (!count) 523 break; 524 head->read += count; 525 526 if (need_resched()) 527 cond_resched(); 528 } 529 530 mutex_unlock(&buf->lock); 531 532 } 533 534 /** 535 * tty_flip_buffer_push - push terminal buffers 536 * @port: tty port to push 537 * 538 * Queue a push of the terminal flip buffers to the line discipline. Can be 539 * called from IRQ/atomic context. 540 * 541 * In the event of the queue being busy for flipping the work will be held off 542 * and retried later. 543 */ 544 void tty_flip_buffer_push(struct tty_port *port) 545 { 546 struct tty_bufhead *buf = &port->buf; 547 548 /* 549 * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees 550 * buffer data. 551 */ 552 smp_store_release(&buf->tail->commit, buf->tail->used); 553 queue_work(system_unbound_wq, &buf->work); 554 } 555 EXPORT_SYMBOL(tty_flip_buffer_push); 556 557 /** 558 * tty_buffer_init - prepare a tty buffer structure 559 * @port: tty port to initialise 560 * 561 * Set up the initial state of the buffer management for a tty device. Must be 562 * called before the other tty buffer functions are used. 563 */ 564 void tty_buffer_init(struct tty_port *port) 565 { 566 struct tty_bufhead *buf = &port->buf; 567 568 mutex_init(&buf->lock); 569 tty_buffer_reset(&buf->sentinel, 0); 570 buf->head = &buf->sentinel; 571 buf->tail = &buf->sentinel; 572 init_llist_head(&buf->free); 573 atomic_set(&buf->mem_used, 0); 574 atomic_set(&buf->priority, 0); 575 INIT_WORK(&buf->work, flush_to_ldisc); 576 buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT; 577 } 578 579 /** 580 * tty_buffer_set_limit - change the tty buffer memory limit 581 * @port: tty port to change 582 * @limit: memory limit to set 583 * 584 * Change the tty buffer memory limit. 585 * 586 * Must be called before the other tty buffer functions are used. 587 */ 588 int tty_buffer_set_limit(struct tty_port *port, int limit) 589 { 590 if (limit < MIN_TTYB_SIZE) 591 return -EINVAL; 592 port->buf.mem_limit = limit; 593 return 0; 594 } 595 EXPORT_SYMBOL_GPL(tty_buffer_set_limit); 596 597 /* slave ptys can claim nested buffer lock when handling BRK and INTR */ 598 void tty_buffer_set_lock_subclass(struct tty_port *port) 599 { 600 lockdep_set_subclass(&port->buf.lock, TTY_LOCK_SLAVE); 601 } 602 603 bool tty_buffer_restart_work(struct tty_port *port) 604 { 605 return queue_work(system_unbound_wq, &port->buf.work); 606 } 607 608 bool tty_buffer_cancel_work(struct tty_port *port) 609 { 610 return cancel_work_sync(&port->buf.work); 611 } 612 613 void tty_buffer_flush_work(struct tty_port *port) 614 { 615 flush_work(&port->buf.work); 616 } 617