xref: /openbmc/linux/drivers/usb/host/xhci-dbgtty.c (revision 160b8e75)
1 /**
2  * xhci-dbgtty.c - tty glue for xHCI debug capability
3  *
4  * Copyright (C) 2017 Intel Corporation
5  *
6  * Author: Lu Baolu <baolu.lu@linux.intel.com>
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/tty.h>
11 #include <linux/tty_flip.h>
12 
13 #include "xhci.h"
14 #include "xhci-dbgcap.h"
15 
16 static unsigned int
17 dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
18 {
19 	unsigned int		len;
20 
21 	len = kfifo_len(&port->write_fifo);
22 	if (len < size)
23 		size = len;
24 	if (size != 0)
25 		size = kfifo_out(&port->write_fifo, packet, size);
26 	return size;
27 }
28 
29 static int dbc_start_tx(struct dbc_port *port)
30 	__releases(&port->port_lock)
31 	__acquires(&port->port_lock)
32 {
33 	int			len;
34 	struct dbc_request	*req;
35 	int			status = 0;
36 	bool			do_tty_wake = false;
37 	struct list_head	*pool = &port->write_pool;
38 
39 	while (!list_empty(pool)) {
40 		req = list_entry(pool->next, struct dbc_request, list_pool);
41 		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
42 		if (len == 0)
43 			break;
44 		do_tty_wake = true;
45 
46 		req->length = len;
47 		list_del(&req->list_pool);
48 
49 		spin_unlock(&port->port_lock);
50 		status = dbc_ep_queue(port->out, req, GFP_ATOMIC);
51 		spin_lock(&port->port_lock);
52 
53 		if (status) {
54 			list_add(&req->list_pool, pool);
55 			break;
56 		}
57 	}
58 
59 	if (do_tty_wake && port->port.tty)
60 		tty_wakeup(port->port.tty);
61 
62 	return status;
63 }
64 
65 static void dbc_start_rx(struct dbc_port *port)
66 	__releases(&port->port_lock)
67 	__acquires(&port->port_lock)
68 {
69 	struct dbc_request	*req;
70 	int			status;
71 	struct list_head	*pool = &port->read_pool;
72 
73 	while (!list_empty(pool)) {
74 		if (!port->port.tty)
75 			break;
76 
77 		req = list_entry(pool->next, struct dbc_request, list_pool);
78 		list_del(&req->list_pool);
79 		req->length = DBC_MAX_PACKET;
80 
81 		spin_unlock(&port->port_lock);
82 		status = dbc_ep_queue(port->in, req, GFP_ATOMIC);
83 		spin_lock(&port->port_lock);
84 
85 		if (status) {
86 			list_add(&req->list_pool, pool);
87 			break;
88 		}
89 	}
90 }
91 
92 static void
93 dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
94 {
95 	struct xhci_dbc		*dbc = xhci->dbc;
96 	struct dbc_port		*port = &dbc->port;
97 
98 	spin_lock(&port->port_lock);
99 	list_add_tail(&req->list_pool, &port->read_queue);
100 	tasklet_schedule(&port->push);
101 	spin_unlock(&port->port_lock);
102 }
103 
104 static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
105 {
106 	struct xhci_dbc		*dbc = xhci->dbc;
107 	struct dbc_port		*port = &dbc->port;
108 
109 	spin_lock(&port->port_lock);
110 	list_add(&req->list_pool, &port->write_pool);
111 	switch (req->status) {
112 	case 0:
113 		dbc_start_tx(port);
114 		break;
115 	case -ESHUTDOWN:
116 		break;
117 	default:
118 		xhci_warn(xhci, "unexpected write complete status %d\n",
119 			  req->status);
120 		break;
121 	}
122 	spin_unlock(&port->port_lock);
123 }
124 
125 static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
126 {
127 	kfree(req->buf);
128 	dbc_free_request(dep, req);
129 }
130 
131 static int
132 xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head,
133 			void (*fn)(struct xhci_hcd *, struct dbc_request *))
134 {
135 	int			i;
136 	struct dbc_request	*req;
137 
138 	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
139 		req = dbc_alloc_request(dep, GFP_ATOMIC);
140 		if (!req)
141 			break;
142 
143 		req->length = DBC_MAX_PACKET;
144 		req->buf = kmalloc(req->length, GFP_KERNEL);
145 		if (!req->buf) {
146 			xhci_dbc_free_req(dep, req);
147 			break;
148 		}
149 
150 		req->complete = fn;
151 		list_add_tail(&req->list_pool, head);
152 	}
153 
154 	return list_empty(head) ? -ENOMEM : 0;
155 }
156 
157 static void
158 xhci_dbc_free_requests(struct dbc_ep *dep, struct list_head *head)
159 {
160 	struct dbc_request	*req;
161 
162 	while (!list_empty(head)) {
163 		req = list_entry(head->next, struct dbc_request, list_pool);
164 		list_del(&req->list_pool);
165 		xhci_dbc_free_req(dep, req);
166 	}
167 }
168 
169 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
170 {
171 	struct dbc_port		*port = driver->driver_state;
172 
173 	tty->driver_data = port;
174 
175 	return tty_port_install(&port->port, driver, tty);
176 }
177 
178 static int dbc_tty_open(struct tty_struct *tty, struct file *file)
179 {
180 	struct dbc_port		*port = tty->driver_data;
181 
182 	return tty_port_open(&port->port, tty, file);
183 }
184 
185 static void dbc_tty_close(struct tty_struct *tty, struct file *file)
186 {
187 	struct dbc_port		*port = tty->driver_data;
188 
189 	tty_port_close(&port->port, tty, file);
190 }
191 
192 static int dbc_tty_write(struct tty_struct *tty,
193 			 const unsigned char *buf,
194 			 int count)
195 {
196 	struct dbc_port		*port = tty->driver_data;
197 	unsigned long		flags;
198 
199 	spin_lock_irqsave(&port->port_lock, flags);
200 	if (count)
201 		count = kfifo_in(&port->write_fifo, buf, count);
202 	dbc_start_tx(port);
203 	spin_unlock_irqrestore(&port->port_lock, flags);
204 
205 	return count;
206 }
207 
208 static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
209 {
210 	struct dbc_port		*port = tty->driver_data;
211 	unsigned long		flags;
212 	int			status;
213 
214 	spin_lock_irqsave(&port->port_lock, flags);
215 	status = kfifo_put(&port->write_fifo, ch);
216 	spin_unlock_irqrestore(&port->port_lock, flags);
217 
218 	return status;
219 }
220 
221 static void dbc_tty_flush_chars(struct tty_struct *tty)
222 {
223 	struct dbc_port		*port = tty->driver_data;
224 	unsigned long		flags;
225 
226 	spin_lock_irqsave(&port->port_lock, flags);
227 	dbc_start_tx(port);
228 	spin_unlock_irqrestore(&port->port_lock, flags);
229 }
230 
231 static int dbc_tty_write_room(struct tty_struct *tty)
232 {
233 	struct dbc_port		*port = tty->driver_data;
234 	unsigned long		flags;
235 	int			room = 0;
236 
237 	spin_lock_irqsave(&port->port_lock, flags);
238 	room = kfifo_avail(&port->write_fifo);
239 	spin_unlock_irqrestore(&port->port_lock, flags);
240 
241 	return room;
242 }
243 
244 static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
245 {
246 	struct dbc_port		*port = tty->driver_data;
247 	unsigned long		flags;
248 	int			chars = 0;
249 
250 	spin_lock_irqsave(&port->port_lock, flags);
251 	chars = kfifo_len(&port->write_fifo);
252 	spin_unlock_irqrestore(&port->port_lock, flags);
253 
254 	return chars;
255 }
256 
257 static void dbc_tty_unthrottle(struct tty_struct *tty)
258 {
259 	struct dbc_port		*port = tty->driver_data;
260 	unsigned long		flags;
261 
262 	spin_lock_irqsave(&port->port_lock, flags);
263 	tasklet_schedule(&port->push);
264 	spin_unlock_irqrestore(&port->port_lock, flags);
265 }
266 
267 static const struct tty_operations dbc_tty_ops = {
268 	.install		= dbc_tty_install,
269 	.open			= dbc_tty_open,
270 	.close			= dbc_tty_close,
271 	.write			= dbc_tty_write,
272 	.put_char		= dbc_tty_put_char,
273 	.flush_chars		= dbc_tty_flush_chars,
274 	.write_room		= dbc_tty_write_room,
275 	.chars_in_buffer	= dbc_tty_chars_in_buffer,
276 	.unthrottle		= dbc_tty_unthrottle,
277 };
278 
279 static struct tty_driver *dbc_tty_driver;
280 
281 int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
282 {
283 	int			status;
284 	struct xhci_dbc		*dbc = xhci->dbc;
285 
286 	dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
287 					  TTY_DRIVER_DYNAMIC_DEV);
288 	if (IS_ERR(dbc_tty_driver)) {
289 		status = PTR_ERR(dbc_tty_driver);
290 		dbc_tty_driver = NULL;
291 		return status;
292 	}
293 
294 	dbc_tty_driver->driver_name = "dbc_serial";
295 	dbc_tty_driver->name = "ttyDBC";
296 
297 	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
298 	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
299 	dbc_tty_driver->init_termios = tty_std_termios;
300 	dbc_tty_driver->init_termios.c_cflag =
301 			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
302 	dbc_tty_driver->init_termios.c_ispeed = 9600;
303 	dbc_tty_driver->init_termios.c_ospeed = 9600;
304 	dbc_tty_driver->driver_state = &dbc->port;
305 
306 	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
307 
308 	status = tty_register_driver(dbc_tty_driver);
309 	if (status) {
310 		xhci_err(xhci,
311 			 "can't register dbc tty driver, err %d\n", status);
312 		put_tty_driver(dbc_tty_driver);
313 		dbc_tty_driver = NULL;
314 	}
315 
316 	return status;
317 }
318 
319 void xhci_dbc_tty_unregister_driver(void)
320 {
321 	tty_unregister_driver(dbc_tty_driver);
322 	put_tty_driver(dbc_tty_driver);
323 	dbc_tty_driver = NULL;
324 }
325 
326 static void dbc_rx_push(unsigned long _port)
327 {
328 	struct dbc_request	*req;
329 	struct tty_struct	*tty;
330 	bool			do_push = false;
331 	bool			disconnect = false;
332 	struct dbc_port		*port = (void *)_port;
333 	struct list_head	*queue = &port->read_queue;
334 
335 	spin_lock_irq(&port->port_lock);
336 	tty = port->port.tty;
337 	while (!list_empty(queue)) {
338 		req = list_first_entry(queue, struct dbc_request, list_pool);
339 
340 		if (tty && tty_throttled(tty))
341 			break;
342 
343 		switch (req->status) {
344 		case 0:
345 			break;
346 		case -ESHUTDOWN:
347 			disconnect = true;
348 			break;
349 		default:
350 			pr_warn("ttyDBC0: unexpected RX status %d\n",
351 				req->status);
352 			break;
353 		}
354 
355 		if (req->actual) {
356 			char		*packet = req->buf;
357 			unsigned int	n, size = req->actual;
358 			int		count;
359 
360 			n = port->n_read;
361 			if (n) {
362 				packet += n;
363 				size -= n;
364 			}
365 
366 			count = tty_insert_flip_string(&port->port, packet,
367 						       size);
368 			if (count)
369 				do_push = true;
370 			if (count != size) {
371 				port->n_read += count;
372 				break;
373 			}
374 			port->n_read = 0;
375 		}
376 
377 		list_move(&req->list_pool, &port->read_pool);
378 	}
379 
380 	if (do_push)
381 		tty_flip_buffer_push(&port->port);
382 
383 	if (!list_empty(queue) && tty) {
384 		if (!tty_throttled(tty)) {
385 			if (do_push)
386 				tasklet_schedule(&port->push);
387 			else
388 				pr_warn("ttyDBC0: RX not scheduled?\n");
389 		}
390 	}
391 
392 	if (!disconnect)
393 		dbc_start_rx(port);
394 
395 	spin_unlock_irq(&port->port_lock);
396 }
397 
398 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
399 {
400 	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
401 
402 	spin_lock_irq(&port->port_lock);
403 	dbc_start_rx(port);
404 	spin_unlock_irq(&port->port_lock);
405 
406 	return 0;
407 }
408 
409 static const struct tty_port_operations dbc_port_ops = {
410 	.activate =	dbc_port_activate,
411 };
412 
413 static void
414 xhci_dbc_tty_init_port(struct xhci_hcd *xhci, struct dbc_port *port)
415 {
416 	tty_port_init(&port->port);
417 	spin_lock_init(&port->port_lock);
418 	tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
419 	INIT_LIST_HEAD(&port->read_pool);
420 	INIT_LIST_HEAD(&port->read_queue);
421 	INIT_LIST_HEAD(&port->write_pool);
422 
423 	port->in =		get_in_ep(xhci);
424 	port->out =		get_out_ep(xhci);
425 	port->port.ops =	&dbc_port_ops;
426 	port->n_read =		0;
427 }
428 
429 static void
430 xhci_dbc_tty_exit_port(struct dbc_port *port)
431 {
432 	tasklet_kill(&port->push);
433 	tty_port_destroy(&port->port);
434 }
435 
436 int xhci_dbc_tty_register_device(struct xhci_hcd *xhci)
437 {
438 	int			ret;
439 	struct device		*tty_dev;
440 	struct xhci_dbc		*dbc = xhci->dbc;
441 	struct dbc_port		*port = &dbc->port;
442 
443 	xhci_dbc_tty_init_port(xhci, port);
444 	tty_dev = tty_port_register_device(&port->port,
445 					   dbc_tty_driver, 0, NULL);
446 	ret = IS_ERR_OR_NULL(tty_dev);
447 	if (ret)
448 		goto register_fail;
449 
450 	ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
451 	if (ret)
452 		goto buf_alloc_fail;
453 
454 	ret = xhci_dbc_alloc_requests(port->in, &port->read_pool,
455 				      dbc_read_complete);
456 	if (ret)
457 		goto request_fail;
458 
459 	ret = xhci_dbc_alloc_requests(port->out, &port->write_pool,
460 				      dbc_write_complete);
461 	if (ret)
462 		goto request_fail;
463 
464 	port->registered = true;
465 
466 	return 0;
467 
468 request_fail:
469 	xhci_dbc_free_requests(port->in, &port->read_pool);
470 	xhci_dbc_free_requests(port->out, &port->write_pool);
471 	kfifo_free(&port->write_fifo);
472 
473 buf_alloc_fail:
474 	tty_unregister_device(dbc_tty_driver, 0);
475 
476 register_fail:
477 	xhci_dbc_tty_exit_port(port);
478 
479 	xhci_err(xhci, "can't register tty port, err %d\n", ret);
480 
481 	return ret;
482 }
483 
484 void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci)
485 {
486 	struct xhci_dbc		*dbc = xhci->dbc;
487 	struct dbc_port		*port = &dbc->port;
488 
489 	tty_unregister_device(dbc_tty_driver, 0);
490 	xhci_dbc_tty_exit_port(port);
491 	port->registered = false;
492 
493 	kfifo_free(&port->write_fifo);
494 	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_pool);
495 	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_queue);
496 	xhci_dbc_free_requests(get_in_ep(xhci), &port->write_pool);
497 }
498