xref: /openbmc/linux/drivers/usb/host/xhci-dbgtty.c (revision 2d972b6a)
1 /**
2  * xhci-dbgtty.c - tty glue for xHCI debug capability
3  *
4  * Copyright (C) 2017 Intel Corporation
5  *
6  * Author: Lu Baolu <baolu.lu@linux.intel.com>
7  */
8 
9 #include <linux/slab.h>
10 #include <linux/tty.h>
11 #include <linux/tty_flip.h>
12 
13 #include "xhci.h"
14 #include "xhci-dbgcap.h"
15 
16 static unsigned int
17 dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
18 {
19 	unsigned int		len;
20 
21 	len = kfifo_len(&port->write_fifo);
22 	if (len < size)
23 		size = len;
24 	if (size != 0)
25 		size = kfifo_out(&port->write_fifo, packet, size);
26 	return size;
27 }
28 
29 static int dbc_start_tx(struct dbc_port *port)
30 	__releases(&port->port_lock)
31 	__acquires(&port->port_lock)
32 {
33 	int			len;
34 	struct dbc_request	*req;
35 	int			status = 0;
36 	bool			do_tty_wake = false;
37 	struct list_head	*pool = &port->write_pool;
38 
39 	while (!list_empty(pool)) {
40 		req = list_entry(pool->next, struct dbc_request, list_pool);
41 		len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
42 		if (len == 0)
43 			break;
44 		do_tty_wake = true;
45 
46 		req->length = len;
47 		list_del(&req->list_pool);
48 
49 		spin_unlock(&port->port_lock);
50 		status = dbc_ep_queue(port->out, req, GFP_ATOMIC);
51 		spin_lock(&port->port_lock);
52 
53 		if (status) {
54 			list_add(&req->list_pool, pool);
55 			break;
56 		}
57 	}
58 
59 	if (do_tty_wake && port->port.tty)
60 		tty_wakeup(port->port.tty);
61 
62 	return status;
63 }
64 
65 static void dbc_start_rx(struct dbc_port *port)
66 	__releases(&port->port_lock)
67 	__acquires(&port->port_lock)
68 {
69 	struct dbc_request	*req;
70 	int			status;
71 	struct list_head	*pool = &port->read_pool;
72 
73 	while (!list_empty(pool)) {
74 		if (!port->port.tty)
75 			break;
76 
77 		req = list_entry(pool->next, struct dbc_request, list_pool);
78 		list_del(&req->list_pool);
79 		req->length = DBC_MAX_PACKET;
80 
81 		spin_unlock(&port->port_lock);
82 		status = dbc_ep_queue(port->in, req, GFP_ATOMIC);
83 		spin_lock(&port->port_lock);
84 
85 		if (status) {
86 			list_add(&req->list_pool, pool);
87 			break;
88 		}
89 	}
90 }
91 
92 static void
93 dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
94 {
95 	unsigned long		flags;
96 	struct xhci_dbc		*dbc = xhci->dbc;
97 	struct dbc_port		*port = &dbc->port;
98 
99 	spin_lock_irqsave(&port->port_lock, flags);
100 	list_add_tail(&req->list_pool, &port->read_queue);
101 	tasklet_schedule(&port->push);
102 	spin_unlock_irqrestore(&port->port_lock, flags);
103 }
104 
105 static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
106 {
107 	unsigned long		flags;
108 	struct xhci_dbc		*dbc = xhci->dbc;
109 	struct dbc_port		*port = &dbc->port;
110 
111 	spin_lock_irqsave(&port->port_lock, flags);
112 	list_add(&req->list_pool, &port->write_pool);
113 	switch (req->status) {
114 	case 0:
115 		dbc_start_tx(port);
116 		break;
117 	case -ESHUTDOWN:
118 		break;
119 	default:
120 		xhci_warn(xhci, "unexpected write complete status %d\n",
121 			  req->status);
122 		break;
123 	}
124 	spin_unlock_irqrestore(&port->port_lock, flags);
125 }
126 
127 static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
128 {
129 	kfree(req->buf);
130 	dbc_free_request(dep, req);
131 }
132 
133 static int
134 xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head,
135 			void (*fn)(struct xhci_hcd *, struct dbc_request *))
136 {
137 	int			i;
138 	struct dbc_request	*req;
139 
140 	for (i = 0; i < DBC_QUEUE_SIZE; i++) {
141 		req = dbc_alloc_request(dep, GFP_ATOMIC);
142 		if (!req)
143 			break;
144 
145 		req->length = DBC_MAX_PACKET;
146 		req->buf = kmalloc(req->length, GFP_KERNEL);
147 		if (!req->buf) {
148 			xhci_dbc_free_req(dep, req);
149 			break;
150 		}
151 
152 		req->complete = fn;
153 		list_add_tail(&req->list_pool, head);
154 	}
155 
156 	return list_empty(head) ? -ENOMEM : 0;
157 }
158 
159 static void
160 xhci_dbc_free_requests(struct dbc_ep *dep, struct list_head *head)
161 {
162 	struct dbc_request	*req;
163 
164 	while (!list_empty(head)) {
165 		req = list_entry(head->next, struct dbc_request, list_pool);
166 		list_del(&req->list_pool);
167 		xhci_dbc_free_req(dep, req);
168 	}
169 }
170 
171 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
172 {
173 	struct dbc_port		*port = driver->driver_state;
174 
175 	tty->driver_data = port;
176 
177 	return tty_port_install(&port->port, driver, tty);
178 }
179 
180 static int dbc_tty_open(struct tty_struct *tty, struct file *file)
181 {
182 	struct dbc_port		*port = tty->driver_data;
183 
184 	return tty_port_open(&port->port, tty, file);
185 }
186 
187 static void dbc_tty_close(struct tty_struct *tty, struct file *file)
188 {
189 	struct dbc_port		*port = tty->driver_data;
190 
191 	tty_port_close(&port->port, tty, file);
192 }
193 
194 static int dbc_tty_write(struct tty_struct *tty,
195 			 const unsigned char *buf,
196 			 int count)
197 {
198 	struct dbc_port		*port = tty->driver_data;
199 	unsigned long		flags;
200 
201 	spin_lock_irqsave(&port->port_lock, flags);
202 	if (count)
203 		count = kfifo_in(&port->write_fifo, buf, count);
204 	dbc_start_tx(port);
205 	spin_unlock_irqrestore(&port->port_lock, flags);
206 
207 	return count;
208 }
209 
210 static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
211 {
212 	struct dbc_port		*port = tty->driver_data;
213 	unsigned long		flags;
214 	int			status;
215 
216 	spin_lock_irqsave(&port->port_lock, flags);
217 	status = kfifo_put(&port->write_fifo, ch);
218 	spin_unlock_irqrestore(&port->port_lock, flags);
219 
220 	return status;
221 }
222 
223 static void dbc_tty_flush_chars(struct tty_struct *tty)
224 {
225 	struct dbc_port		*port = tty->driver_data;
226 	unsigned long		flags;
227 
228 	spin_lock_irqsave(&port->port_lock, flags);
229 	dbc_start_tx(port);
230 	spin_unlock_irqrestore(&port->port_lock, flags);
231 }
232 
233 static int dbc_tty_write_room(struct tty_struct *tty)
234 {
235 	struct dbc_port		*port = tty->driver_data;
236 	unsigned long		flags;
237 	int			room = 0;
238 
239 	spin_lock_irqsave(&port->port_lock, flags);
240 	room = kfifo_avail(&port->write_fifo);
241 	spin_unlock_irqrestore(&port->port_lock, flags);
242 
243 	return room;
244 }
245 
246 static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
247 {
248 	struct dbc_port		*port = tty->driver_data;
249 	unsigned long		flags;
250 	int			chars = 0;
251 
252 	spin_lock_irqsave(&port->port_lock, flags);
253 	chars = kfifo_len(&port->write_fifo);
254 	spin_unlock_irqrestore(&port->port_lock, flags);
255 
256 	return chars;
257 }
258 
259 static void dbc_tty_unthrottle(struct tty_struct *tty)
260 {
261 	struct dbc_port		*port = tty->driver_data;
262 	unsigned long		flags;
263 
264 	spin_lock_irqsave(&port->port_lock, flags);
265 	tasklet_schedule(&port->push);
266 	spin_unlock_irqrestore(&port->port_lock, flags);
267 }
268 
269 static const struct tty_operations dbc_tty_ops = {
270 	.install		= dbc_tty_install,
271 	.open			= dbc_tty_open,
272 	.close			= dbc_tty_close,
273 	.write			= dbc_tty_write,
274 	.put_char		= dbc_tty_put_char,
275 	.flush_chars		= dbc_tty_flush_chars,
276 	.write_room		= dbc_tty_write_room,
277 	.chars_in_buffer	= dbc_tty_chars_in_buffer,
278 	.unthrottle		= dbc_tty_unthrottle,
279 };
280 
281 static struct tty_driver *dbc_tty_driver;
282 
283 int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
284 {
285 	int			status;
286 	struct xhci_dbc		*dbc = xhci->dbc;
287 
288 	dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
289 					  TTY_DRIVER_DYNAMIC_DEV);
290 	if (IS_ERR(dbc_tty_driver)) {
291 		status = PTR_ERR(dbc_tty_driver);
292 		dbc_tty_driver = NULL;
293 		return status;
294 	}
295 
296 	dbc_tty_driver->driver_name = "dbc_serial";
297 	dbc_tty_driver->name = "ttyDBC";
298 
299 	dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
300 	dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
301 	dbc_tty_driver->init_termios = tty_std_termios;
302 	dbc_tty_driver->init_termios.c_cflag =
303 			B9600 | CS8 | CREAD | HUPCL | CLOCAL;
304 	dbc_tty_driver->init_termios.c_ispeed = 9600;
305 	dbc_tty_driver->init_termios.c_ospeed = 9600;
306 	dbc_tty_driver->driver_state = &dbc->port;
307 
308 	tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
309 
310 	status = tty_register_driver(dbc_tty_driver);
311 	if (status) {
312 		xhci_err(xhci,
313 			 "can't register dbc tty driver, err %d\n", status);
314 		put_tty_driver(dbc_tty_driver);
315 		dbc_tty_driver = NULL;
316 	}
317 
318 	return status;
319 }
320 
321 void xhci_dbc_tty_unregister_driver(void)
322 {
323 	if (dbc_tty_driver) {
324 		tty_unregister_driver(dbc_tty_driver);
325 		put_tty_driver(dbc_tty_driver);
326 		dbc_tty_driver = NULL;
327 	}
328 }
329 
330 static void dbc_rx_push(unsigned long _port)
331 {
332 	struct dbc_request	*req;
333 	struct tty_struct	*tty;
334 	unsigned long		flags;
335 	bool			do_push = false;
336 	bool			disconnect = false;
337 	struct dbc_port		*port = (void *)_port;
338 	struct list_head	*queue = &port->read_queue;
339 
340 	spin_lock_irqsave(&port->port_lock, flags);
341 	tty = port->port.tty;
342 	while (!list_empty(queue)) {
343 		req = list_first_entry(queue, struct dbc_request, list_pool);
344 
345 		if (tty && tty_throttled(tty))
346 			break;
347 
348 		switch (req->status) {
349 		case 0:
350 			break;
351 		case -ESHUTDOWN:
352 			disconnect = true;
353 			break;
354 		default:
355 			pr_warn("ttyDBC0: unexpected RX status %d\n",
356 				req->status);
357 			break;
358 		}
359 
360 		if (req->actual) {
361 			char		*packet = req->buf;
362 			unsigned int	n, size = req->actual;
363 			int		count;
364 
365 			n = port->n_read;
366 			if (n) {
367 				packet += n;
368 				size -= n;
369 			}
370 
371 			count = tty_insert_flip_string(&port->port, packet,
372 						       size);
373 			if (count)
374 				do_push = true;
375 			if (count != size) {
376 				port->n_read += count;
377 				break;
378 			}
379 			port->n_read = 0;
380 		}
381 
382 		list_move(&req->list_pool, &port->read_pool);
383 	}
384 
385 	if (do_push)
386 		tty_flip_buffer_push(&port->port);
387 
388 	if (!list_empty(queue) && tty) {
389 		if (!tty_throttled(tty)) {
390 			if (do_push)
391 				tasklet_schedule(&port->push);
392 			else
393 				pr_warn("ttyDBC0: RX not scheduled?\n");
394 		}
395 	}
396 
397 	if (!disconnect)
398 		dbc_start_rx(port);
399 
400 	spin_unlock_irqrestore(&port->port_lock, flags);
401 }
402 
403 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
404 {
405 	unsigned long	flags;
406 	struct dbc_port	*port = container_of(_port, struct dbc_port, port);
407 
408 	spin_lock_irqsave(&port->port_lock, flags);
409 	dbc_start_rx(port);
410 	spin_unlock_irqrestore(&port->port_lock, flags);
411 
412 	return 0;
413 }
414 
415 static const struct tty_port_operations dbc_port_ops = {
416 	.activate =	dbc_port_activate,
417 };
418 
419 static void
420 xhci_dbc_tty_init_port(struct xhci_hcd *xhci, struct dbc_port *port)
421 {
422 	tty_port_init(&port->port);
423 	spin_lock_init(&port->port_lock);
424 	tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
425 	INIT_LIST_HEAD(&port->read_pool);
426 	INIT_LIST_HEAD(&port->read_queue);
427 	INIT_LIST_HEAD(&port->write_pool);
428 
429 	port->in =		get_in_ep(xhci);
430 	port->out =		get_out_ep(xhci);
431 	port->port.ops =	&dbc_port_ops;
432 	port->n_read =		0;
433 }
434 
435 static void
436 xhci_dbc_tty_exit_port(struct dbc_port *port)
437 {
438 	tasklet_kill(&port->push);
439 	tty_port_destroy(&port->port);
440 }
441 
442 int xhci_dbc_tty_register_device(struct xhci_hcd *xhci)
443 {
444 	int			ret;
445 	struct device		*tty_dev;
446 	struct xhci_dbc		*dbc = xhci->dbc;
447 	struct dbc_port		*port = &dbc->port;
448 
449 	xhci_dbc_tty_init_port(xhci, port);
450 	tty_dev = tty_port_register_device(&port->port,
451 					   dbc_tty_driver, 0, NULL);
452 	if (IS_ERR(tty_dev)) {
453 		ret = PTR_ERR(tty_dev);
454 		goto register_fail;
455 	}
456 
457 	ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
458 	if (ret)
459 		goto buf_alloc_fail;
460 
461 	ret = xhci_dbc_alloc_requests(port->in, &port->read_pool,
462 				      dbc_read_complete);
463 	if (ret)
464 		goto request_fail;
465 
466 	ret = xhci_dbc_alloc_requests(port->out, &port->write_pool,
467 				      dbc_write_complete);
468 	if (ret)
469 		goto request_fail;
470 
471 	port->registered = true;
472 
473 	return 0;
474 
475 request_fail:
476 	xhci_dbc_free_requests(port->in, &port->read_pool);
477 	xhci_dbc_free_requests(port->out, &port->write_pool);
478 	kfifo_free(&port->write_fifo);
479 
480 buf_alloc_fail:
481 	tty_unregister_device(dbc_tty_driver, 0);
482 
483 register_fail:
484 	xhci_dbc_tty_exit_port(port);
485 
486 	xhci_err(xhci, "can't register tty port, err %d\n", ret);
487 
488 	return ret;
489 }
490 
491 void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci)
492 {
493 	struct xhci_dbc		*dbc = xhci->dbc;
494 	struct dbc_port		*port = &dbc->port;
495 
496 	tty_unregister_device(dbc_tty_driver, 0);
497 	xhci_dbc_tty_exit_port(port);
498 	port->registered = false;
499 
500 	kfifo_free(&port->write_fifo);
501 	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_pool);
502 	xhci_dbc_free_requests(get_out_ep(xhci), &port->read_queue);
503 	xhci_dbc_free_requests(get_in_ep(xhci), &port->write_pool);
504 }
505