1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * xhci-dbgtty.c - tty glue for xHCI debug capability 4 * 5 * Copyright (C) 2017 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 10 #include <linux/slab.h> 11 #include <linux/tty.h> 12 #include <linux/tty_flip.h> 13 14 #include "xhci.h" 15 #include "xhci-dbgcap.h" 16 17 static int dbc_tty_init(void); 18 static void dbc_tty_exit(void); 19 20 static struct tty_driver *dbc_tty_driver; 21 22 static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc) 23 { 24 return dbc->priv; 25 } 26 27 static unsigned int 28 dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size) 29 { 30 unsigned int len; 31 32 len = kfifo_len(&port->write_fifo); 33 if (len < size) 34 size = len; 35 if (size != 0) 36 size = kfifo_out(&port->write_fifo, packet, size); 37 return size; 38 } 39 40 static int dbc_start_tx(struct dbc_port *port) 41 __releases(&port->port_lock) 42 __acquires(&port->port_lock) 43 { 44 int len; 45 struct dbc_request *req; 46 int status = 0; 47 bool do_tty_wake = false; 48 struct list_head *pool = &port->write_pool; 49 50 while (!list_empty(pool)) { 51 req = list_entry(pool->next, struct dbc_request, list_pool); 52 len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET); 53 if (len == 0) 54 break; 55 do_tty_wake = true; 56 57 req->length = len; 58 list_del(&req->list_pool); 59 60 spin_unlock(&port->port_lock); 61 status = dbc_ep_queue(req); 62 spin_lock(&port->port_lock); 63 64 if (status) { 65 list_add(&req->list_pool, pool); 66 break; 67 } 68 } 69 70 if (do_tty_wake && port->port.tty) 71 tty_wakeup(port->port.tty); 72 73 return status; 74 } 75 76 static void dbc_start_rx(struct dbc_port *port) 77 __releases(&port->port_lock) 78 __acquires(&port->port_lock) 79 { 80 struct dbc_request *req; 81 int status; 82 struct list_head *pool = &port->read_pool; 83 84 while (!list_empty(pool)) { 85 if (!port->port.tty) 86 break; 87 88 req = list_entry(pool->next, struct dbc_request, list_pool); 89 list_del(&req->list_pool); 90 req->length = DBC_MAX_PACKET; 91 92 spin_unlock(&port->port_lock); 93 status = dbc_ep_queue(req); 94 spin_lock(&port->port_lock); 95 96 if (status) { 97 list_add(&req->list_pool, pool); 98 break; 99 } 100 } 101 } 102 103 static void 104 dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req) 105 { 106 unsigned long flags; 107 struct dbc_port *port = dbc_to_port(dbc); 108 109 spin_lock_irqsave(&port->port_lock, flags); 110 list_add_tail(&req->list_pool, &port->read_queue); 111 tasklet_schedule(&port->push); 112 spin_unlock_irqrestore(&port->port_lock, flags); 113 } 114 115 static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req) 116 { 117 unsigned long flags; 118 struct dbc_port *port = dbc_to_port(dbc); 119 120 spin_lock_irqsave(&port->port_lock, flags); 121 list_add(&req->list_pool, &port->write_pool); 122 switch (req->status) { 123 case 0: 124 dbc_start_tx(port); 125 break; 126 case -ESHUTDOWN: 127 break; 128 default: 129 dev_warn(dbc->dev, "unexpected write complete status %d\n", 130 req->status); 131 break; 132 } 133 spin_unlock_irqrestore(&port->port_lock, flags); 134 } 135 136 static void xhci_dbc_free_req(struct dbc_request *req) 137 { 138 kfree(req->buf); 139 dbc_free_request(req); 140 } 141 142 static int 143 xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction, 144 struct list_head *head, 145 void (*fn)(struct xhci_dbc *, struct dbc_request *)) 146 { 147 int i; 148 struct dbc_request *req; 149 150 for (i = 0; i < DBC_QUEUE_SIZE; i++) { 151 req = dbc_alloc_request(dbc, direction, GFP_KERNEL); 152 if (!req) 153 break; 154 155 req->length = DBC_MAX_PACKET; 156 req->buf = kmalloc(req->length, GFP_KERNEL); 157 if (!req->buf) { 158 dbc_free_request(req); 159 break; 160 } 161 162 req->complete = fn; 163 list_add_tail(&req->list_pool, head); 164 } 165 166 return list_empty(head) ? -ENOMEM : 0; 167 } 168 169 static void 170 xhci_dbc_free_requests(struct list_head *head) 171 { 172 struct dbc_request *req; 173 174 while (!list_empty(head)) { 175 req = list_entry(head->next, struct dbc_request, list_pool); 176 list_del(&req->list_pool); 177 xhci_dbc_free_req(req); 178 } 179 } 180 181 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty) 182 { 183 struct dbc_port *port = driver->driver_state; 184 185 tty->driver_data = port; 186 187 return tty_port_install(&port->port, driver, tty); 188 } 189 190 static int dbc_tty_open(struct tty_struct *tty, struct file *file) 191 { 192 struct dbc_port *port = tty->driver_data; 193 194 return tty_port_open(&port->port, tty, file); 195 } 196 197 static void dbc_tty_close(struct tty_struct *tty, struct file *file) 198 { 199 struct dbc_port *port = tty->driver_data; 200 201 tty_port_close(&port->port, tty, file); 202 } 203 204 static int dbc_tty_write(struct tty_struct *tty, 205 const unsigned char *buf, 206 int count) 207 { 208 struct dbc_port *port = tty->driver_data; 209 unsigned long flags; 210 211 spin_lock_irqsave(&port->port_lock, flags); 212 if (count) 213 count = kfifo_in(&port->write_fifo, buf, count); 214 dbc_start_tx(port); 215 spin_unlock_irqrestore(&port->port_lock, flags); 216 217 return count; 218 } 219 220 static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch) 221 { 222 struct dbc_port *port = tty->driver_data; 223 unsigned long flags; 224 int status; 225 226 spin_lock_irqsave(&port->port_lock, flags); 227 status = kfifo_put(&port->write_fifo, ch); 228 spin_unlock_irqrestore(&port->port_lock, flags); 229 230 return status; 231 } 232 233 static void dbc_tty_flush_chars(struct tty_struct *tty) 234 { 235 struct dbc_port *port = tty->driver_data; 236 unsigned long flags; 237 238 spin_lock_irqsave(&port->port_lock, flags); 239 dbc_start_tx(port); 240 spin_unlock_irqrestore(&port->port_lock, flags); 241 } 242 243 static int dbc_tty_write_room(struct tty_struct *tty) 244 { 245 struct dbc_port *port = tty->driver_data; 246 unsigned long flags; 247 int room = 0; 248 249 spin_lock_irqsave(&port->port_lock, flags); 250 room = kfifo_avail(&port->write_fifo); 251 spin_unlock_irqrestore(&port->port_lock, flags); 252 253 return room; 254 } 255 256 static int dbc_tty_chars_in_buffer(struct tty_struct *tty) 257 { 258 struct dbc_port *port = tty->driver_data; 259 unsigned long flags; 260 int chars = 0; 261 262 spin_lock_irqsave(&port->port_lock, flags); 263 chars = kfifo_len(&port->write_fifo); 264 spin_unlock_irqrestore(&port->port_lock, flags); 265 266 return chars; 267 } 268 269 static void dbc_tty_unthrottle(struct tty_struct *tty) 270 { 271 struct dbc_port *port = tty->driver_data; 272 unsigned long flags; 273 274 spin_lock_irqsave(&port->port_lock, flags); 275 tasklet_schedule(&port->push); 276 spin_unlock_irqrestore(&port->port_lock, flags); 277 } 278 279 static const struct tty_operations dbc_tty_ops = { 280 .install = dbc_tty_install, 281 .open = dbc_tty_open, 282 .close = dbc_tty_close, 283 .write = dbc_tty_write, 284 .put_char = dbc_tty_put_char, 285 .flush_chars = dbc_tty_flush_chars, 286 .write_room = dbc_tty_write_room, 287 .chars_in_buffer = dbc_tty_chars_in_buffer, 288 .unthrottle = dbc_tty_unthrottle, 289 }; 290 291 static void dbc_rx_push(struct tasklet_struct *t) 292 { 293 struct dbc_request *req; 294 struct tty_struct *tty; 295 unsigned long flags; 296 bool do_push = false; 297 bool disconnect = false; 298 struct dbc_port *port = from_tasklet(port, t, push); 299 struct list_head *queue = &port->read_queue; 300 301 spin_lock_irqsave(&port->port_lock, flags); 302 tty = port->port.tty; 303 while (!list_empty(queue)) { 304 req = list_first_entry(queue, struct dbc_request, list_pool); 305 306 if (tty && tty_throttled(tty)) 307 break; 308 309 switch (req->status) { 310 case 0: 311 break; 312 case -ESHUTDOWN: 313 disconnect = true; 314 break; 315 default: 316 pr_warn("ttyDBC0: unexpected RX status %d\n", 317 req->status); 318 break; 319 } 320 321 if (req->actual) { 322 char *packet = req->buf; 323 unsigned int n, size = req->actual; 324 int count; 325 326 n = port->n_read; 327 if (n) { 328 packet += n; 329 size -= n; 330 } 331 332 count = tty_insert_flip_string(&port->port, packet, 333 size); 334 if (count) 335 do_push = true; 336 if (count != size) { 337 port->n_read += count; 338 break; 339 } 340 port->n_read = 0; 341 } 342 343 list_move(&req->list_pool, &port->read_pool); 344 } 345 346 if (do_push) 347 tty_flip_buffer_push(&port->port); 348 349 if (!list_empty(queue) && tty) { 350 if (!tty_throttled(tty)) { 351 if (do_push) 352 tasklet_schedule(&port->push); 353 else 354 pr_warn("ttyDBC0: RX not scheduled?\n"); 355 } 356 } 357 358 if (!disconnect) 359 dbc_start_rx(port); 360 361 spin_unlock_irqrestore(&port->port_lock, flags); 362 } 363 364 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) 365 { 366 unsigned long flags; 367 struct dbc_port *port = container_of(_port, struct dbc_port, port); 368 369 spin_lock_irqsave(&port->port_lock, flags); 370 dbc_start_rx(port); 371 spin_unlock_irqrestore(&port->port_lock, flags); 372 373 return 0; 374 } 375 376 static const struct tty_port_operations dbc_port_ops = { 377 .activate = dbc_port_activate, 378 }; 379 380 static void 381 xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port) 382 { 383 tty_port_init(&port->port); 384 spin_lock_init(&port->port_lock); 385 tasklet_setup(&port->push, dbc_rx_push); 386 INIT_LIST_HEAD(&port->read_pool); 387 INIT_LIST_HEAD(&port->read_queue); 388 INIT_LIST_HEAD(&port->write_pool); 389 390 port->port.ops = &dbc_port_ops; 391 port->n_read = 0; 392 } 393 394 static void 395 xhci_dbc_tty_exit_port(struct dbc_port *port) 396 { 397 tasklet_kill(&port->push); 398 tty_port_destroy(&port->port); 399 } 400 401 static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc) 402 { 403 int ret; 404 struct device *tty_dev; 405 struct dbc_port *port = dbc_to_port(dbc); 406 407 if (port->registered) 408 return -EBUSY; 409 410 xhci_dbc_tty_init_port(dbc, port); 411 tty_dev = tty_port_register_device(&port->port, 412 dbc_tty_driver, 0, NULL); 413 if (IS_ERR(tty_dev)) { 414 ret = PTR_ERR(tty_dev); 415 goto register_fail; 416 } 417 418 ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL); 419 if (ret) 420 goto buf_alloc_fail; 421 422 ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool, 423 dbc_read_complete); 424 if (ret) 425 goto request_fail; 426 427 ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool, 428 dbc_write_complete); 429 if (ret) 430 goto request_fail; 431 432 port->registered = true; 433 434 return 0; 435 436 request_fail: 437 xhci_dbc_free_requests(&port->read_pool); 438 xhci_dbc_free_requests(&port->write_pool); 439 kfifo_free(&port->write_fifo); 440 441 buf_alloc_fail: 442 tty_unregister_device(dbc_tty_driver, 0); 443 444 register_fail: 445 xhci_dbc_tty_exit_port(port); 446 447 dev_err(dbc->dev, "can't register tty port, err %d\n", ret); 448 449 return ret; 450 } 451 452 static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc) 453 { 454 struct dbc_port *port = dbc_to_port(dbc); 455 456 if (!port->registered) 457 return; 458 tty_unregister_device(dbc_tty_driver, 0); 459 xhci_dbc_tty_exit_port(port); 460 port->registered = false; 461 462 kfifo_free(&port->write_fifo); 463 xhci_dbc_free_requests(&port->read_pool); 464 xhci_dbc_free_requests(&port->read_queue); 465 xhci_dbc_free_requests(&port->write_pool); 466 } 467 468 static const struct dbc_driver dbc_driver = { 469 .configure = xhci_dbc_tty_register_device, 470 .disconnect = xhci_dbc_tty_unregister_device, 471 }; 472 473 int xhci_dbc_tty_probe(struct xhci_hcd *xhci) 474 { 475 struct xhci_dbc *dbc = xhci->dbc; 476 struct dbc_port *port; 477 int status; 478 479 /* dbc_tty_init will be called by module init() in the future */ 480 status = dbc_tty_init(); 481 if (status) 482 return status; 483 484 port = kzalloc(sizeof(*port), GFP_KERNEL); 485 if (!port) { 486 status = -ENOMEM; 487 goto out; 488 } 489 490 dbc->driver = &dbc_driver; 491 dbc->priv = port; 492 493 494 dbc_tty_driver->driver_state = port; 495 496 return 0; 497 out: 498 /* dbc_tty_exit will be called by module_exit() in the future */ 499 dbc_tty_exit(); 500 return status; 501 } 502 503 /* 504 * undo what probe did, assume dbc is stopped already. 505 * we also assume tty_unregister_device() is called before this 506 */ 507 void xhci_dbc_tty_remove(struct xhci_dbc *dbc) 508 { 509 struct dbc_port *port = dbc_to_port(dbc); 510 511 dbc->driver = NULL; 512 dbc->priv = NULL; 513 kfree(port); 514 515 /* dbc_tty_exit will be called by module_exit() in the future */ 516 dbc_tty_exit(); 517 } 518 519 static int dbc_tty_init(void) 520 { 521 int ret; 522 523 dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW | 524 TTY_DRIVER_DYNAMIC_DEV); 525 if (IS_ERR(dbc_tty_driver)) 526 return PTR_ERR(dbc_tty_driver); 527 528 dbc_tty_driver->driver_name = "dbc_serial"; 529 dbc_tty_driver->name = "ttyDBC"; 530 531 dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; 532 dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; 533 dbc_tty_driver->init_termios = tty_std_termios; 534 dbc_tty_driver->init_termios.c_cflag = 535 B9600 | CS8 | CREAD | HUPCL | CLOCAL; 536 dbc_tty_driver->init_termios.c_ispeed = 9600; 537 dbc_tty_driver->init_termios.c_ospeed = 9600; 538 539 tty_set_operations(dbc_tty_driver, &dbc_tty_ops); 540 541 ret = tty_register_driver(dbc_tty_driver); 542 if (ret) { 543 pr_err("Can't register dbc tty driver\n"); 544 put_tty_driver(dbc_tty_driver); 545 } 546 return ret; 547 } 548 549 static void dbc_tty_exit(void) 550 { 551 if (dbc_tty_driver) { 552 tty_unregister_driver(dbc_tty_driver); 553 put_tty_driver(dbc_tty_driver); 554 dbc_tty_driver = NULL; 555 } 556 } 557