xref: /openbmc/linux/drivers/usb/host/xhci-dbgtty.c (revision d4c52c6a)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * xhci-dbgtty.c - tty glue for xHCI debug capability
4  *
5  * Copyright (C) 2017 Intel Corporation
6  *
7  * Author: Lu Baolu <baolu.lu@linux.intel.com>
8  */
9 
10 #include <linux/slab.h>
11 #include <linux/tty.h>
12 #include <linux/tty_flip.h>
13 #include <linux/idr.h>
14 
15 #include "xhci.h"
16 #include "xhci-dbgcap.h"
17 
18 static struct tty_driver *dbc_tty_driver;
19 static struct idr dbc_tty_minors;
20 static DEFINE_MUTEX(dbc_tty_minors_lock);
21 
22 static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
23 {
24 	return dbc->priv;
25 }
26 
27 static unsigned int
28 dbc_kfifo_to_req(struct dbc_port *port, char *packet)
29 {
30 	unsigned int	len;
31 
32 	len = kfifo_len(&port->port.xmit_fifo);
33 
34 	if (len == 0)
35 		return 0;
36 
37 	len = min(len, DBC_MAX_PACKET);
38 
39 	if (port->tx_boundary)
40 		len = min(port->tx_boundary, len);
41 
42 	len = kfifo_out(&port->port.xmit_fifo, packet, len);
43 
44 	if (port->tx_boundary)
45 		port->tx_boundary -= len;
46 
47 	return len;
48 }
49 
50 static int dbc_start_tx(struct dbc_port *port)
51 	__releases(&port->port_lock)
52 	__acquires(&port->port_lock)
53 {
54 	int			len;
55 	struct dbc_request	*req;
56 	int			status = 0;
57 	bool			do_tty_wake = false;
58 	struct list_head	*pool = &port->write_pool;
59 
60 	while (!list_empty(pool)) {
61 		req = list_entry(pool->next, struct dbc_request, list_pool);
62 		len = dbc_kfifo_to_req(port, req->buf);
63 		if (len == 0)
64 			break;
65 		do_tty_wake = true;
66 
67 		req->length = len;
68 		list_del(&req->list_pool);
69 
70 		spin_unlock(&port->port_lock);
71 		status = dbc_ep_queue(req);
72 		spin_lock(&port->port_lock);
73 
74 		if (status) {
75 			list_add(&req->list_pool, pool);
76 			break;
77 		}
78 	}
79 
80 	if (do_tty_wake && port->port.tty)
81 		tty_wakeup(port->port.tty);
82 
83 	return status;
84 }
85 
86 static void dbc_start_rx(struct dbc_port *port)
87 	__releases(&port->port_lock)
88 	__acquires(&port->port_lock)
89 {
90 	struct dbc_request	*req;
91 	int			status;
92 	struct list_head	*pool = &port->read_pool;
93 
94 	while (!list_empty(pool)) {
95 		if (!port->port.tty)
96 			break;
97 
98 		req = list_entry(pool->next, struct dbc_request, list_pool);
99 		list_del(&req->list_pool);
100 		req->length = DBC_MAX_PACKET;
101 
102 		spin_unlock(&port->port_lock);
103 		status = dbc_ep_queue(req);
104 		spin_lock(&port->port_lock);
105 
106 		if (status) {
107 			list_add(&req->list_pool, pool);
108 			break;
109 		}
110 	}
111 }
112 
113 static void
114 dbc_read_complete(struct xhci_dbc *dbc, struct dbc_request *req)
115 {
116 	unsigned long		flags;
117 	struct dbc_port		*port = dbc_to_port(dbc);
118 
119 	spin_lock_irqsave(&port->port_lock, flags);
120 	list_add_tail(&req->list_pool, &port->read_queue);
121 	tasklet_schedule(&port->push);
122 	spin_unlock_irqrestore(&port->port_lock, flags);
123 }
124 
125 static void dbc_write_complete(struct xhci_dbc *dbc, struct dbc_request *req)
126 {
127 	unsigned long		flags;
128 	struct dbc_port		*port = dbc_to_port(dbc);
129 
130 	spin_lock_irqsave(&port->port_lock, flags);
131 	list_add(&req->list_pool, &port->write_pool);
132 	switch (req->status) {
133 	case 0:
134 		dbc_start_tx(port);
135 		break;
136 	case -ESHUTDOWN:
137 		break;
138 	default:
139 		dev_warn(dbc->dev, "unexpected write complete status %d\n",
140 			  req->status);
141 		break;
142 	}
143 	spin_unlock_irqrestore(&port->port_lock, flags);
144 }
145 
146 static void xhci_dbc_free_req(struct dbc_request *req)
147 {
148 	kfree(req->buf);
149 	dbc_free_request(req);
150 }
151 
152 static int
153 xhci_dbc_alloc_requests(struct xhci_dbc *dbc, unsigned int direction,
154 			struct list_head *head,
155 			void (*fn)(struct xhci_dbc *, struct dbc_request *))
156 {
157 	int			i;
158 	struct dbc_request	*req;
159 
160 	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
161 		req = dbc_alloc_request(dbc, direction, GFP_KERNEL);
162 		if (!req)
163 			break;
164 
165 		req->length = DBC_MAX_PACKET;
166 		req->buf = kmalloc(req->length, GFP_KERNEL);
167 		if (!req->buf) {
168 			dbc_free_request(req);
169 			break;
170 		}
171 
172 		req->complete = fn;
173 		list_add_tail(&req->list_pool, head);
174 	}
175 
176 	return list_empty(head) ? -ENOMEM : 0;
177 }
178 
179 static void
180 xhci_dbc_free_requests(struct list_head *head)
181 {
182 	struct dbc_request	*req;
183 
184 	while (!list_empty(head)) {
185 		req = list_entry(head->next, struct dbc_request, list_pool);
186 		list_del(&req->list_pool);
187 		xhci_dbc_free_req(req);
188 	}
189 }
190 
191 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
192 {
193 	struct dbc_port		*port;
194 
195 	mutex_lock(&dbc_tty_minors_lock);
196 	port = idr_find(&dbc_tty_minors, tty->index);
197 	mutex_unlock(&dbc_tty_minors_lock);
198 
199 	if (!port)
200 		return -ENXIO;
201 
202 	tty->driver_data = port;
203 
204 	return tty_port_install(&port->port, driver, tty);
205 }
206 
207 static int dbc_tty_open(struct tty_struct *tty, struct file *file)
208 {
209 	struct dbc_port		*port = tty->driver_data;
210 
211 	return tty_port_open(&port->port, tty, file);
212 }
213 
214 static void dbc_tty_close(struct tty_struct *tty, struct file *file)
215 {
216 	struct dbc_port		*port = tty->driver_data;
217 
218 	tty_port_close(&port->port, tty, file);
219 }
220 
221 static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
222 			     size_t count)
223 {
224 	struct dbc_port		*port = tty->driver_data;
225 	unsigned long		flags;
226 	unsigned int		written = 0;
227 
228 	spin_lock_irqsave(&port->port_lock, flags);
229 
230 	/*
231 	 * Treat tty write as one usb transfer. Make sure the writes are turned
232 	 * into TRB request having the same size boundaries as the tty writes.
233 	 * Don't add data to kfifo before previous write is turned into TRBs
234 	 */
235 	if (port->tx_boundary) {
236 		spin_unlock_irqrestore(&port->port_lock, flags);
237 		return 0;
238 	}
239 
240 	if (count) {
241 		written = kfifo_in(&port->port.xmit_fifo, buf, count);
242 
243 		if (written == count)
244 			port->tx_boundary = kfifo_len(&port->port.xmit_fifo);
245 
246 		dbc_start_tx(port);
247 	}
248 
249 	spin_unlock_irqrestore(&port->port_lock, flags);
250 
251 	return written;
252 }
253 
254 static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
255 {
256 	struct dbc_port		*port = tty->driver_data;
257 	unsigned long		flags;
258 	int			status;
259 
260 	spin_lock_irqsave(&port->port_lock, flags);
261 	status = kfifo_put(&port->port.xmit_fifo, ch);
262 	spin_unlock_irqrestore(&port->port_lock, flags);
263 
264 	return status;
265 }
266 
267 static void dbc_tty_flush_chars(struct tty_struct *tty)
268 {
269 	struct dbc_port		*port = tty->driver_data;
270 	unsigned long		flags;
271 
272 	spin_lock_irqsave(&port->port_lock, flags);
273 	dbc_start_tx(port);
274 	spin_unlock_irqrestore(&port->port_lock, flags);
275 }
276 
277 static unsigned int dbc_tty_write_room(struct tty_struct *tty)
278 {
279 	struct dbc_port		*port = tty->driver_data;
280 	unsigned long		flags;
281 	unsigned int		room;
282 
283 	spin_lock_irqsave(&port->port_lock, flags);
284 	room = kfifo_avail(&port->port.xmit_fifo);
285 
286 	if (port->tx_boundary)
287 		room = 0;
288 
289 	spin_unlock_irqrestore(&port->port_lock, flags);
290 
291 	return room;
292 }
293 
294 static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
295 {
296 	struct dbc_port		*port = tty->driver_data;
297 	unsigned long		flags;
298 	unsigned int		chars;
299 
300 	spin_lock_irqsave(&port->port_lock, flags);
301 	chars = kfifo_len(&port->port.xmit_fifo);
302 	spin_unlock_irqrestore(&port->port_lock, flags);
303 
304 	return chars;
305 }
306 
307 static void dbc_tty_unthrottle(struct tty_struct *tty)
308 {
309 	struct dbc_port		*port = tty->driver_data;
310 	unsigned long		flags;
311 
312 	spin_lock_irqsave(&port->port_lock, flags);
313 	tasklet_schedule(&port->push);
314 	spin_unlock_irqrestore(&port->port_lock, flags);
315 }
316 
317 static const struct tty_operations dbc_tty_ops = {
318 	.install		= dbc_tty_install,
319 	.open			= dbc_tty_open,
320 	.close			= dbc_tty_close,
321 	.write			= dbc_tty_write,
322 	.put_char		= dbc_tty_put_char,
323 	.flush_chars		= dbc_tty_flush_chars,
324 	.write_room		= dbc_tty_write_room,
325 	.chars_in_buffer	= dbc_tty_chars_in_buffer,
326 	.unthrottle		= dbc_tty_unthrottle,
327 };
328 
329 static void dbc_rx_push(struct tasklet_struct *t)
330 {
331 	struct dbc_request	*req;
332 	struct tty_struct	*tty;
333 	unsigned long		flags;
334 	bool			do_push = false;
335 	bool			disconnect = false;
336 	struct dbc_port		*port = from_tasklet(port, t, push);
337 	struct list_head	*queue = &port->read_queue;
338 
339 	spin_lock_irqsave(&port->port_lock, flags);
340 	tty = port->port.tty;
341 	while (!list_empty(queue)) {
342 		req = list_first_entry(queue, struct dbc_request, list_pool);
343 
344 		if (tty && tty_throttled(tty))
345 			break;
346 
347 		switch (req->status) {
348 		case 0:
349 			break;
350 		case -ESHUTDOWN:
351 			disconnect = true;
352 			break;
353 		default:
354 			pr_warn("ttyDBC0: unexpected RX status %d\n",
355 				req->status);
356 			break;
357 		}
358 
359 		if (req->actual) {
360 			char		*packet = req->buf;
361 			unsigned int	n, size = req->actual;
362 			int		count;
363 
364 			n = port->n_read;
365 			if (n) {
366 				packet += n;
367 				size -= n;
368 			}
369 
370 			count = tty_insert_flip_string(&port->port, packet,
371 						       size);
372 			if (count)
373 				do_push = true;
374 			if (count != size) {
375 				port->n_read += count;
376 				break;
377 			}
378 			port->n_read = 0;
379 		}
380 
381 		list_move(&req->list_pool, &port->read_pool);
382 	}
383 
384 	if (do_push)
385 		tty_flip_buffer_push(&port->port);
386 
387 	if (!list_empty(queue) && tty) {
388 		if (!tty_throttled(tty)) {
389 			if (do_push)
390 				tasklet_schedule(&port->push);
391 			else
392 				pr_warn("ttyDBC0: RX not scheduled?\n");
393 		}
394 	}
395 
396 	if (!disconnect)
397 		dbc_start_rx(port);
398 
399 	spin_unlock_irqrestore(&port->port_lock, flags);
400 }
401 
402 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
403 {
404 	unsigned long	flags;
405 	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
406 
407 	spin_lock_irqsave(&port->port_lock, flags);
408 	dbc_start_rx(port);
409 	spin_unlock_irqrestore(&port->port_lock, flags);
410 
411 	return 0;
412 }
413 
414 static const struct tty_port_operations dbc_port_ops = {
415 	.activate =	dbc_port_activate,
416 };
417 
418 static void
419 xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port)
420 {
421 	tty_port_init(&port->port);
422 	spin_lock_init(&port->port_lock);
423 	tasklet_setup(&port->push, dbc_rx_push);
424 	INIT_LIST_HEAD(&port->read_pool);
425 	INIT_LIST_HEAD(&port->read_queue);
426 	INIT_LIST_HEAD(&port->write_pool);
427 
428 	port->port.ops =	&dbc_port_ops;
429 	port->n_read =		0;
430 }
431 
432 static void
433 xhci_dbc_tty_exit_port(struct dbc_port *port)
434 {
435 	tasklet_kill(&port->push);
436 	tty_port_destroy(&port->port);
437 }
438 
439 static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
440 {
441 	int			ret;
442 	struct device		*tty_dev;
443 	struct dbc_port		*port = dbc_to_port(dbc);
444 
445 	if (port->registered)
446 		return -EBUSY;
447 
448 	xhci_dbc_tty_init_port(dbc, port);
449 
450 	mutex_lock(&dbc_tty_minors_lock);
451 	port->minor = idr_alloc(&dbc_tty_minors, port, 0, 64, GFP_KERNEL);
452 	mutex_unlock(&dbc_tty_minors_lock);
453 
454 	if (port->minor < 0) {
455 		ret = port->minor;
456 		goto err_idr;
457 	}
458 
459 	ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
460 			  GFP_KERNEL);
461 	if (ret)
462 		goto err_exit_port;
463 
464 	ret = xhci_dbc_alloc_requests(dbc, BULK_IN, &port->read_pool,
465 				      dbc_read_complete);
466 	if (ret)
467 		goto err_free_fifo;
468 
469 	ret = xhci_dbc_alloc_requests(dbc, BULK_OUT, &port->write_pool,
470 				      dbc_write_complete);
471 	if (ret)
472 		goto err_free_requests;
473 
474 	tty_dev = tty_port_register_device(&port->port,
475 					   dbc_tty_driver, port->minor, NULL);
476 	if (IS_ERR(tty_dev)) {
477 		ret = PTR_ERR(tty_dev);
478 		goto err_free_requests;
479 	}
480 
481 	port->registered = true;
482 
483 	return 0;
484 
485 err_free_requests:
486 	xhci_dbc_free_requests(&port->read_pool);
487 	xhci_dbc_free_requests(&port->write_pool);
488 err_free_fifo:
489 	kfifo_free(&port->port.xmit_fifo);
490 err_exit_port:
491 	idr_remove(&dbc_tty_minors, port->minor);
492 err_idr:
493 	xhci_dbc_tty_exit_port(port);
494 
495 	dev_err(dbc->dev, "can't register tty port, err %d\n", ret);
496 
497 	return ret;
498 }
499 
500 static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
501 {
502 	struct dbc_port		*port = dbc_to_port(dbc);
503 
504 	if (!port->registered)
505 		return;
506 	tty_unregister_device(dbc_tty_driver, port->minor);
507 	xhci_dbc_tty_exit_port(port);
508 	port->registered = false;
509 
510 	mutex_lock(&dbc_tty_minors_lock);
511 	idr_remove(&dbc_tty_minors, port->minor);
512 	mutex_unlock(&dbc_tty_minors_lock);
513 
514 	kfifo_free(&port->port.xmit_fifo);
515 	xhci_dbc_free_requests(&port->read_pool);
516 	xhci_dbc_free_requests(&port->read_queue);
517 	xhci_dbc_free_requests(&port->write_pool);
518 }
519 
520 static const struct dbc_driver dbc_driver = {
521 	.configure		= xhci_dbc_tty_register_device,
522 	.disconnect		= xhci_dbc_tty_unregister_device,
523 };
524 
525 int xhci_dbc_tty_probe(struct device *dev, void __iomem *base, struct xhci_hcd *xhci)
526 {
527 	struct xhci_dbc		*dbc;
528 	struct dbc_port		*port;
529 	int			status;
530 
531 	if (!dbc_tty_driver)
532 		return -ENODEV;
533 
534 	port = kzalloc(sizeof(*port), GFP_KERNEL);
535 	if (!port)
536 		return -ENOMEM;
537 
538 	dbc = xhci_alloc_dbc(dev, base, &dbc_driver);
539 
540 	if (!dbc) {
541 		status = -ENOMEM;
542 		goto out2;
543 	}
544 
545 	dbc->priv = port;
546 
547 	/* get rid of xhci once this is a real driver binding to a device */
548 	xhci->dbc = dbc;
549 
550 	return 0;
551 out2:
552 	kfree(port);
553 
554 	return status;
555 }
556 
557 /*
558  * undo what probe did, assume dbc is stopped already.
559  * we also assume tty_unregister_device() is called before this
560  */
561 void xhci_dbc_tty_remove(struct xhci_dbc *dbc)
562 {
563 	struct dbc_port         *port = dbc_to_port(dbc);
564 
565 	xhci_dbc_remove(dbc);
566 	kfree(port);
567 }
568 
569 int dbc_tty_init(void)
570 {
571 	int		ret;
572 
573 	idr_init(&dbc_tty_minors);
574 
575 	dbc_tty_driver = tty_alloc_driver(64, TTY_DRIVER_REAL_RAW |
576 					  TTY_DRIVER_DYNAMIC_DEV);
577 	if (IS_ERR(dbc_tty_driver)) {
578 		idr_destroy(&dbc_tty_minors);
579 		return PTR_ERR(dbc_tty_driver);
580 	}
581 
582 	dbc_tty_driver->driver_name = "dbc_serial";
583 	dbc_tty_driver->name = "ttyDBC";
584 
585 	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
586 	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
587 	dbc_tty_driver->init_termios = tty_std_termios;
588 	dbc_tty_driver->init_termios.c_cflag =
589 			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
590 	dbc_tty_driver->init_termios.c_ispeed = 9600;
591 	dbc_tty_driver->init_termios.c_ospeed = 9600;
592 
593 	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
594 
595 	ret = tty_register_driver(dbc_tty_driver);
596 	if (ret) {
597 		pr_err("Can't register dbc tty driver\n");
598 		tty_driver_kref_put(dbc_tty_driver);
599 		idr_destroy(&dbc_tty_minors);
600 	}
601 
602 	return ret;
603 }
604 
605 void dbc_tty_exit(void)
606 {
607 	if (dbc_tty_driver) {
608 		tty_unregister_driver(dbc_tty_driver);
609 		tty_driver_kref_put(dbc_tty_driver);
610 		dbc_tty_driver = NULL;
611 	}
612 
613 	idr_destroy(&dbc_tty_minors);
614 }
615