1 /* 2 * Tty buffer allocation management 3 */ 4 5 #include <linux/types.h> 6 #include <linux/errno.h> 7 #include <linux/tty.h> 8 #include <linux/tty_driver.h> 9 #include <linux/tty_flip.h> 10 #include <linux/timer.h> 11 #include <linux/string.h> 12 #include <linux/slab.h> 13 #include <linux/sched.h> 14 #include <linux/wait.h> 15 #include <linux/bitops.h> 16 #include <linux/delay.h> 17 #include <linux/module.h> 18 #include <linux/ratelimit.h> 19 20 21 #define MIN_TTYB_SIZE 256 22 #define TTYB_ALIGN_MASK 255 23 24 /* 25 * Byte threshold to limit memory consumption for flip buffers. 26 * The actual memory limit is > 2x this amount. 27 */ 28 #define TTYB_DEFAULT_MEM_LIMIT 65536 29 30 /* 31 * We default to dicing tty buffer allocations to this many characters 32 * in order to avoid multiple page allocations. We know the size of 33 * tty_buffer itself but it must also be taken into account that the 34 * the buffer is 256 byte aligned. See tty_buffer_find for the allocation 35 * logic this must match 36 */ 37 38 #define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) 39 40 41 /** 42 * tty_buffer_lock_exclusive - gain exclusive access to buffer 43 * tty_buffer_unlock_exclusive - release exclusive access 44 * 45 * @port - tty_port owning the flip buffer 46 * 47 * Guarantees safe use of the line discipline's receive_buf() method by 48 * excluding the buffer work and any pending flush from using the flip 49 * buffer. Data can continue to be added concurrently to the flip buffer 50 * from the driver side. 51 * 52 * On release, the buffer work is restarted if there is data in the 53 * flip buffer 54 */ 55 56 void tty_buffer_lock_exclusive(struct tty_port *port) 57 { 58 struct tty_bufhead *buf = &port->buf; 59 60 atomic_inc(&buf->priority); 61 mutex_lock(&buf->lock); 62 } 63 EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive); 64 65 void tty_buffer_unlock_exclusive(struct tty_port *port) 66 { 67 struct tty_bufhead *buf = &port->buf; 68 int restart; 69 70 restart = buf->head->commit != buf->head->read; 71 72 atomic_dec(&buf->priority); 73 mutex_unlock(&buf->lock); 74 if (restart) 75 queue_work(system_unbound_wq, &buf->work); 76 } 77 EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive); 78 79 /** 80 * tty_buffer_space_avail - return unused buffer space 81 * @port - tty_port owning the flip buffer 82 * 83 * Returns the # of bytes which can be written by the driver without 84 * reaching the buffer limit. 85 * 86 * Note: this does not guarantee that memory is available to write 87 * the returned # of bytes (use tty_prepare_flip_string_xxx() to 88 * pre-allocate if memory guarantee is required). 89 */ 90 91 int tty_buffer_space_avail(struct tty_port *port) 92 { 93 int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used); 94 return max(space, 0); 95 } 96 EXPORT_SYMBOL_GPL(tty_buffer_space_avail); 97 98 static void tty_buffer_reset(struct tty_buffer *p, size_t size) 99 { 100 p->used = 0; 101 p->size = size; 102 p->next = NULL; 103 p->commit = 0; 104 p->read = 0; 105 p->flags = 0; 106 } 107 108 /** 109 * tty_buffer_free_all - free buffers used by a tty 110 * @tty: tty to free from 111 * 112 * Remove all the buffers pending on a tty whether queued with data 113 * or in the free ring. Must be called when the tty is no longer in use 114 */ 115 116 void tty_buffer_free_all(struct tty_port *port) 117 { 118 struct tty_bufhead *buf = &port->buf; 119 struct tty_buffer *p, *next; 120 struct llist_node *llist; 121 122 while ((p = buf->head) != NULL) { 123 buf->head = p->next; 124 if (p->size > 0) 125 kfree(p); 126 } 127 llist = llist_del_all(&buf->free); 128 llist_for_each_entry_safe(p, next, llist, free) 129 kfree(p); 130 131 tty_buffer_reset(&buf->sentinel, 0); 132 buf->head = &buf->sentinel; 133 buf->tail = &buf->sentinel; 134 135 atomic_set(&buf->mem_used, 0); 136 } 137 138 /** 139 * tty_buffer_alloc - allocate a tty buffer 140 * @tty: tty device 141 * @size: desired size (characters) 142 * 143 * Allocate a new tty buffer to hold the desired number of characters. 144 * We round our buffers off in 256 character chunks to get better 145 * allocation behaviour. 146 * Return NULL if out of memory or the allocation would exceed the 147 * per device queue 148 */ 149 150 static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) 151 { 152 struct llist_node *free; 153 struct tty_buffer *p; 154 155 /* Round the buffer size out */ 156 size = __ALIGN_MASK(size, TTYB_ALIGN_MASK); 157 158 if (size <= MIN_TTYB_SIZE) { 159 free = llist_del_first(&port->buf.free); 160 if (free) { 161 p = llist_entry(free, struct tty_buffer, free); 162 goto found; 163 } 164 } 165 166 /* Should possibly check if this fails for the largest buffer we 167 have queued and recycle that ? */ 168 if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit) 169 return NULL; 170 p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); 171 if (p == NULL) 172 return NULL; 173 174 found: 175 tty_buffer_reset(p, size); 176 atomic_add(size, &port->buf.mem_used); 177 return p; 178 } 179 180 /** 181 * tty_buffer_free - free a tty buffer 182 * @tty: tty owning the buffer 183 * @b: the buffer to free 184 * 185 * Free a tty buffer, or add it to the free list according to our 186 * internal strategy 187 */ 188 189 static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b) 190 { 191 struct tty_bufhead *buf = &port->buf; 192 193 /* Dumb strategy for now - should keep some stats */ 194 WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0); 195 196 if (b->size > MIN_TTYB_SIZE) 197 kfree(b); 198 else if (b->size > 0) 199 llist_add(&b->free, &buf->free); 200 } 201 202 /** 203 * tty_buffer_flush - flush full tty buffers 204 * @tty: tty to flush 205 * @ld: optional ldisc ptr (must be referenced) 206 * 207 * flush all the buffers containing receive data. If ld != NULL, 208 * flush the ldisc input buffer. 209 * 210 * Locking: takes buffer lock to ensure single-threaded flip buffer 211 * 'consumer' 212 */ 213 214 void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) 215 { 216 struct tty_port *port = tty->port; 217 struct tty_bufhead *buf = &port->buf; 218 struct tty_buffer *next; 219 220 atomic_inc(&buf->priority); 221 222 mutex_lock(&buf->lock); 223 while ((next = buf->head->next) != NULL) { 224 tty_buffer_free(port, buf->head); 225 buf->head = next; 226 } 227 buf->head->read = buf->head->commit; 228 229 if (ld && ld->ops->flush_buffer) 230 ld->ops->flush_buffer(tty); 231 232 atomic_dec(&buf->priority); 233 mutex_unlock(&buf->lock); 234 } 235 236 /** 237 * tty_buffer_request_room - grow tty buffer if needed 238 * @tty: tty structure 239 * @size: size desired 240 * @flags: buffer flags if new buffer allocated (default = 0) 241 * 242 * Make at least size bytes of linear space available for the tty 243 * buffer. If we fail return the size we managed to find. 244 * 245 * Will change over to a new buffer if the current buffer is encoded as 246 * TTY_NORMAL (so has no flags buffer) and the new buffer requires 247 * a flags buffer. 248 */ 249 static int __tty_buffer_request_room(struct tty_port *port, size_t size, 250 int flags) 251 { 252 struct tty_bufhead *buf = &port->buf; 253 struct tty_buffer *b, *n; 254 int left, change; 255 256 b = buf->tail; 257 if (b->flags & TTYB_NORMAL) 258 left = 2 * b->size - b->used; 259 else 260 left = b->size - b->used; 261 262 change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL); 263 if (change || left < size) { 264 /* This is the slow path - looking for new buffers to use */ 265 if ((n = tty_buffer_alloc(port, size)) != NULL) { 266 n->flags = flags; 267 buf->tail = n; 268 b->commit = b->used; 269 /* paired w/ barrier in flush_to_ldisc(); ensures the 270 * latest commit value can be read before the head is 271 * advanced to the next buffer 272 */ 273 smp_wmb(); 274 b->next = n; 275 } else if (change) 276 size = 0; 277 else 278 size = left; 279 } 280 return size; 281 } 282 283 int tty_buffer_request_room(struct tty_port *port, size_t size) 284 { 285 return __tty_buffer_request_room(port, size, 0); 286 } 287 EXPORT_SYMBOL_GPL(tty_buffer_request_room); 288 289 /** 290 * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer 291 * @port: tty port 292 * @chars: characters 293 * @flag: flag value for each character 294 * @size: size 295 * 296 * Queue a series of bytes to the tty buffering. All the characters 297 * passed are marked with the supplied flag. Returns the number added. 298 */ 299 300 int tty_insert_flip_string_fixed_flag(struct tty_port *port, 301 const unsigned char *chars, char flag, size_t size) 302 { 303 int copied = 0; 304 do { 305 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 306 int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0; 307 int space = __tty_buffer_request_room(port, goal, flags); 308 struct tty_buffer *tb = port->buf.tail; 309 if (unlikely(space == 0)) 310 break; 311 memcpy(char_buf_ptr(tb, tb->used), chars, space); 312 if (~tb->flags & TTYB_NORMAL) 313 memset(flag_buf_ptr(tb, tb->used), flag, space); 314 tb->used += space; 315 copied += space; 316 chars += space; 317 /* There is a small chance that we need to split the data over 318 several buffers. If this is the case we must loop */ 319 } while (unlikely(size > copied)); 320 return copied; 321 } 322 EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag); 323 324 /** 325 * tty_insert_flip_string_flags - Add characters to the tty buffer 326 * @port: tty port 327 * @chars: characters 328 * @flags: flag bytes 329 * @size: size 330 * 331 * Queue a series of bytes to the tty buffering. For each character 332 * the flags array indicates the status of the character. Returns the 333 * number added. 334 */ 335 336 int tty_insert_flip_string_flags(struct tty_port *port, 337 const unsigned char *chars, const char *flags, size_t size) 338 { 339 int copied = 0; 340 do { 341 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 342 int space = tty_buffer_request_room(port, goal); 343 struct tty_buffer *tb = port->buf.tail; 344 if (unlikely(space == 0)) 345 break; 346 memcpy(char_buf_ptr(tb, tb->used), chars, space); 347 memcpy(flag_buf_ptr(tb, tb->used), flags, space); 348 tb->used += space; 349 copied += space; 350 chars += space; 351 flags += space; 352 /* There is a small chance that we need to split the data over 353 several buffers. If this is the case we must loop */ 354 } while (unlikely(size > copied)); 355 return copied; 356 } 357 EXPORT_SYMBOL(tty_insert_flip_string_flags); 358 359 /** 360 * tty_schedule_flip - push characters to ldisc 361 * @port: tty port to push from 362 * 363 * Takes any pending buffers and transfers their ownership to the 364 * ldisc side of the queue. It then schedules those characters for 365 * processing by the line discipline. 366 */ 367 368 void tty_schedule_flip(struct tty_port *port) 369 { 370 struct tty_bufhead *buf = &port->buf; 371 372 buf->tail->commit = buf->tail->used; 373 schedule_work(&buf->work); 374 } 375 EXPORT_SYMBOL(tty_schedule_flip); 376 377 /** 378 * tty_prepare_flip_string - make room for characters 379 * @port: tty port 380 * @chars: return pointer for character write area 381 * @size: desired size 382 * 383 * Prepare a block of space in the buffer for data. Returns the length 384 * available and buffer pointer to the space which is now allocated and 385 * accounted for as ready for normal characters. This is used for drivers 386 * that need their own block copy routines into the buffer. There is no 387 * guarantee the buffer is a DMA target! 388 */ 389 390 int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, 391 size_t size) 392 { 393 int space = __tty_buffer_request_room(port, size, TTYB_NORMAL); 394 if (likely(space)) { 395 struct tty_buffer *tb = port->buf.tail; 396 *chars = char_buf_ptr(tb, tb->used); 397 if (~tb->flags & TTYB_NORMAL) 398 memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space); 399 tb->used += space; 400 } 401 return space; 402 } 403 EXPORT_SYMBOL_GPL(tty_prepare_flip_string); 404 405 406 static int 407 receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count) 408 { 409 struct tty_ldisc *disc = tty->ldisc; 410 unsigned char *p = char_buf_ptr(head, head->read); 411 char *f = NULL; 412 413 if (~head->flags & TTYB_NORMAL) 414 f = flag_buf_ptr(head, head->read); 415 416 if (disc->ops->receive_buf2) 417 count = disc->ops->receive_buf2(tty, p, f, count); 418 else { 419 count = min_t(int, count, tty->receive_room); 420 if (count) 421 disc->ops->receive_buf(tty, p, f, count); 422 } 423 head->read += count; 424 return count; 425 } 426 427 /** 428 * flush_to_ldisc 429 * @work: tty structure passed from work queue. 430 * 431 * This routine is called out of the software interrupt to flush data 432 * from the buffer chain to the line discipline. 433 * 434 * The receive_buf method is single threaded for each tty instance. 435 * 436 * Locking: takes buffer lock to ensure single-threaded flip buffer 437 * 'consumer' 438 */ 439 440 static void flush_to_ldisc(struct work_struct *work) 441 { 442 struct tty_port *port = container_of(work, struct tty_port, buf.work); 443 struct tty_bufhead *buf = &port->buf; 444 struct tty_struct *tty; 445 struct tty_ldisc *disc; 446 447 tty = port->itty; 448 if (tty == NULL) 449 return; 450 451 disc = tty_ldisc_ref(tty); 452 if (disc == NULL) 453 return; 454 455 mutex_lock(&buf->lock); 456 457 while (1) { 458 struct tty_buffer *head = buf->head; 459 struct tty_buffer *next; 460 int count; 461 462 /* Ldisc or user is trying to gain exclusive access */ 463 if (atomic_read(&buf->priority)) 464 break; 465 466 next = head->next; 467 /* paired w/ barrier in __tty_buffer_request_room(); 468 * ensures commit value read is not stale if the head 469 * is advancing to the next buffer 470 */ 471 smp_rmb(); 472 count = head->commit - head->read; 473 if (!count) { 474 if (next == NULL) 475 break; 476 buf->head = next; 477 tty_buffer_free(port, head); 478 continue; 479 } 480 481 count = receive_buf(tty, head, count); 482 if (!count) 483 break; 484 } 485 486 mutex_unlock(&buf->lock); 487 488 tty_ldisc_deref(disc); 489 } 490 491 /** 492 * tty_flush_to_ldisc 493 * @tty: tty to push 494 * 495 * Push the terminal flip buffers to the line discipline. 496 * 497 * Must not be called from IRQ context. 498 */ 499 void tty_flush_to_ldisc(struct tty_struct *tty) 500 { 501 flush_work(&tty->port->buf.work); 502 } 503 504 /** 505 * tty_flip_buffer_push - terminal 506 * @port: tty port to push 507 * 508 * Queue a push of the terminal flip buffers to the line discipline. 509 * Can be called from IRQ/atomic context. 510 * 511 * In the event of the queue being busy for flipping the work will be 512 * held off and retried later. 513 */ 514 515 void tty_flip_buffer_push(struct tty_port *port) 516 { 517 tty_schedule_flip(port); 518 } 519 EXPORT_SYMBOL(tty_flip_buffer_push); 520 521 /** 522 * tty_buffer_init - prepare a tty buffer structure 523 * @tty: tty to initialise 524 * 525 * Set up the initial state of the buffer management for a tty device. 526 * Must be called before the other tty buffer functions are used. 527 */ 528 529 void tty_buffer_init(struct tty_port *port) 530 { 531 struct tty_bufhead *buf = &port->buf; 532 533 mutex_init(&buf->lock); 534 tty_buffer_reset(&buf->sentinel, 0); 535 buf->head = &buf->sentinel; 536 buf->tail = &buf->sentinel; 537 init_llist_head(&buf->free); 538 atomic_set(&buf->mem_used, 0); 539 atomic_set(&buf->priority, 0); 540 INIT_WORK(&buf->work, flush_to_ldisc); 541 buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT; 542 } 543 544 /** 545 * tty_buffer_set_limit - change the tty buffer memory limit 546 * @port: tty port to change 547 * 548 * Change the tty buffer memory limit. 549 * Must be called before the other tty buffer functions are used. 550 */ 551 552 int tty_buffer_set_limit(struct tty_port *port, int limit) 553 { 554 if (limit < MIN_TTYB_SIZE) 555 return -EINVAL; 556 port->buf.mem_limit = limit; 557 return 0; 558 } 559 EXPORT_SYMBOL_GPL(tty_buffer_set_limit); 560 561 /* slave ptys can claim nested buffer lock when handling BRK and INTR */ 562 void tty_buffer_set_lock_subclass(struct tty_port *port) 563 { 564 lockdep_set_subclass(&port->buf.lock, TTY_LOCK_SLAVE); 565 } 566