xref: /openbmc/linux/drivers/thunderbolt/debugfs.c (revision 6f6573a4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Debugfs interface
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Authors: Gil Fine <gil.fine@intel.com>
7  *	    Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9 
10 #include <linux/debugfs.h>
11 #include <linux/pm_runtime.h>
12 
13 #include "tb.h"
14 
15 #define PORT_CAP_PCIE_LEN	1
16 #define PORT_CAP_POWER_LEN	2
17 #define PORT_CAP_LANE_LEN	3
18 #define PORT_CAP_USB3_LEN	5
19 #define PORT_CAP_DP_LEN		8
20 #define PORT_CAP_TMU_LEN	8
21 #define PORT_CAP_BASIC_LEN	9
22 #define PORT_CAP_USB4_LEN	20
23 
24 #define SWITCH_CAP_TMU_LEN	26
25 #define SWITCH_CAP_BASIC_LEN	27
26 
27 #define PATH_LEN		2
28 
29 #define COUNTER_SET_LEN		3
30 
31 #define DEBUGFS_ATTR(__space, __write)					\
32 static int __space ## _open(struct inode *inode, struct file *file)	\
33 {									\
34 	return single_open(file, __space ## _show, inode->i_private);	\
35 }									\
36 									\
37 static const struct file_operations __space ## _fops = {		\
38 	.owner = THIS_MODULE,						\
39 	.open = __space ## _open,					\
40 	.release = single_release,					\
41 	.read  = seq_read,						\
42 	.write = __write,						\
43 	.llseek = seq_lseek,						\
44 }
45 
46 #define DEBUGFS_ATTR_RO(__space)					\
47 	DEBUGFS_ATTR(__space, NULL)
48 
49 #define DEBUGFS_ATTR_RW(__space)					\
50 	DEBUGFS_ATTR(__space, __space ## _write)
51 
52 static struct dentry *tb_debugfs_root;
53 
54 static void *validate_and_copy_from_user(const void __user *user_buf,
55 					 size_t *count)
56 {
57 	size_t nbytes;
58 	void *buf;
59 
60 	if (!*count)
61 		return ERR_PTR(-EINVAL);
62 
63 	if (!access_ok(user_buf, *count))
64 		return ERR_PTR(-EFAULT);
65 
66 	buf = (void *)get_zeroed_page(GFP_KERNEL);
67 	if (!buf)
68 		return ERR_PTR(-ENOMEM);
69 
70 	nbytes = min_t(size_t, *count, PAGE_SIZE);
71 	if (copy_from_user(buf, user_buf, nbytes)) {
72 		free_page((unsigned long)buf);
73 		return ERR_PTR(-EFAULT);
74 	}
75 
76 	*count = nbytes;
77 	return buf;
78 }
79 
80 static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len,
81 		       int long_fmt_len)
82 {
83 	char *token;
84 	u32 v[5];
85 	int ret;
86 
87 	token = strsep(line, "\n");
88 	if (!token)
89 		return false;
90 
91 	/*
92 	 * For Adapter/Router configuration space:
93 	 * Short format is: offset value\n
94 	 *		    v[0]   v[1]
95 	 * Long format as produced from the read side:
96 	 * offset relative_offset cap_id vs_cap_id value\n
97 	 * v[0]   v[1]            v[2]   v[3]      v[4]
98 	 *
99 	 * For Counter configuration space:
100 	 * Short format is: offset\n
101 	 *		    v[0]
102 	 * Long format as produced from the read side:
103 	 * offset relative_offset counter_id value\n
104 	 * v[0]   v[1]            v[2]       v[3]
105 	 */
106 	ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]);
107 	/* In case of Counters, clear counter, "val" content is NA */
108 	if (ret == short_fmt_len) {
109 		*offs = v[0];
110 		*val = v[short_fmt_len - 1];
111 		return true;
112 	} else if (ret == long_fmt_len) {
113 		*offs = v[0];
114 		*val = v[long_fmt_len - 1];
115 		return true;
116 	}
117 
118 	return false;
119 }
120 
121 #if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE)
122 static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port,
123 			  const char __user *user_buf, size_t count,
124 			  loff_t *ppos)
125 {
126 	struct tb *tb = sw->tb;
127 	char *line, *buf;
128 	u32 val, offset;
129 	int ret = 0;
130 
131 	buf = validate_and_copy_from_user(user_buf, &count);
132 	if (IS_ERR(buf))
133 		return PTR_ERR(buf);
134 
135 	pm_runtime_get_sync(&sw->dev);
136 
137 	if (mutex_lock_interruptible(&tb->lock)) {
138 		ret = -ERESTARTSYS;
139 		goto out;
140 	}
141 
142 	/* User did hardware changes behind the driver's back */
143 	add_taint(TAINT_USER, LOCKDEP_STILL_OK);
144 
145 	line = buf;
146 	while (parse_line(&line, &offset, &val, 2, 5)) {
147 		if (port)
148 			ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1);
149 		else
150 			ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
151 		if (ret)
152 			break;
153 	}
154 
155 	mutex_unlock(&tb->lock);
156 
157 out:
158 	pm_runtime_mark_last_busy(&sw->dev);
159 	pm_runtime_put_autosuspend(&sw->dev);
160 	free_page((unsigned long)buf);
161 
162 	return ret < 0 ? ret : count;
163 }
164 
165 static ssize_t port_regs_write(struct file *file, const char __user *user_buf,
166 			       size_t count, loff_t *ppos)
167 {
168 	struct seq_file *s = file->private_data;
169 	struct tb_port *port = s->private;
170 
171 	return regs_write(port->sw, port, user_buf, count, ppos);
172 }
173 
174 static ssize_t switch_regs_write(struct file *file, const char __user *user_buf,
175 				 size_t count, loff_t *ppos)
176 {
177 	struct seq_file *s = file->private_data;
178 	struct tb_switch *sw = s->private;
179 
180 	return regs_write(sw, NULL, user_buf, count, ppos);
181 }
182 #define DEBUGFS_MODE		0600
183 #else
184 #define port_regs_write		NULL
185 #define switch_regs_write	NULL
186 #define DEBUGFS_MODE		0400
187 #endif
188 
189 static int port_clear_all_counters(struct tb_port *port)
190 {
191 	u32 *buf;
192 	int ret;
193 
194 	buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32),
195 		      GFP_KERNEL);
196 	if (!buf)
197 		return -ENOMEM;
198 
199 	ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0,
200 			    COUNTER_SET_LEN * port->config.max_counters);
201 	kfree(buf);
202 
203 	return ret;
204 }
205 
206 static ssize_t counters_write(struct file *file, const char __user *user_buf,
207 			      size_t count, loff_t *ppos)
208 {
209 	struct seq_file *s = file->private_data;
210 	struct tb_port *port = s->private;
211 	struct tb_switch *sw = port->sw;
212 	struct tb *tb = port->sw->tb;
213 	char *buf;
214 	int ret;
215 
216 	buf = validate_and_copy_from_user(user_buf, &count);
217 	if (IS_ERR(buf))
218 		return PTR_ERR(buf);
219 
220 	pm_runtime_get_sync(&sw->dev);
221 
222 	if (mutex_lock_interruptible(&tb->lock)) {
223 		ret = -ERESTARTSYS;
224 		goto out;
225 	}
226 
227 	/* If written delimiter only, clear all counters in one shot */
228 	if (buf[0] == '\n') {
229 		ret = port_clear_all_counters(port);
230 	} else  {
231 		char *line = buf;
232 		u32 val, offset;
233 
234 		ret = -EINVAL;
235 		while (parse_line(&line, &offset, &val, 1, 4)) {
236 			ret = tb_port_write(port, &val, TB_CFG_COUNTERS,
237 					    offset, 1);
238 			if (ret)
239 				break;
240 		}
241 	}
242 
243 	mutex_unlock(&tb->lock);
244 
245 out:
246 	pm_runtime_mark_last_busy(&sw->dev);
247 	pm_runtime_put_autosuspend(&sw->dev);
248 	free_page((unsigned long)buf);
249 
250 	return ret < 0 ? ret : count;
251 }
252 
253 static void cap_show(struct seq_file *s, struct tb_switch *sw,
254 		     struct tb_port *port, unsigned int cap, u8 cap_id,
255 		     u8 vsec_id, int length)
256 {
257 	int ret, offset = 0;
258 
259 	while (length > 0) {
260 		int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH);
261 		u32 data[TB_MAX_CONFIG_RW_LENGTH];
262 
263 		if (port)
264 			ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset,
265 					   dwords);
266 		else
267 			ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords);
268 		if (ret) {
269 			seq_printf(s, "0x%04x <not accessible>\n",
270 				   cap + offset);
271 			if (dwords > 1)
272 				seq_printf(s, "0x%04x ...\n", cap + offset + 1);
273 			return;
274 		}
275 
276 		for (i = 0; i < dwords; i++) {
277 			seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n",
278 				   cap + offset + i, offset + i,
279 				   cap_id, vsec_id, data[i]);
280 		}
281 
282 		length -= dwords;
283 		offset += dwords;
284 	}
285 }
286 
287 static void port_cap_show(struct tb_port *port, struct seq_file *s,
288 			  unsigned int cap)
289 {
290 	struct tb_cap_any header;
291 	u8 vsec_id = 0;
292 	size_t length;
293 	int ret;
294 
295 	ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1);
296 	if (ret) {
297 		seq_printf(s, "0x%04x <capability read failed>\n", cap);
298 		return;
299 	}
300 
301 	switch (header.basic.cap) {
302 	case TB_PORT_CAP_PHY:
303 		length = PORT_CAP_LANE_LEN;
304 		break;
305 
306 	case TB_PORT_CAP_TIME1:
307 		length = PORT_CAP_TMU_LEN;
308 		break;
309 
310 	case TB_PORT_CAP_POWER:
311 		length = PORT_CAP_POWER_LEN;
312 		break;
313 
314 	case TB_PORT_CAP_ADAP:
315 		if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) {
316 			length = PORT_CAP_PCIE_LEN;
317 		} else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) {
318 			length = PORT_CAP_DP_LEN;
319 		} else if (tb_port_is_usb3_down(port) ||
320 			   tb_port_is_usb3_up(port)) {
321 			length = PORT_CAP_USB3_LEN;
322 		} else {
323 			seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
324 				   cap, header.basic.cap);
325 			return;
326 		}
327 		break;
328 
329 	case TB_PORT_CAP_VSE:
330 		if (!header.extended_short.length) {
331 			ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT,
332 					   cap + 1, 1);
333 			if (ret) {
334 				seq_printf(s, "0x%04x <capability read failed>\n",
335 					   cap + 1);
336 				return;
337 			}
338 			length = header.extended_long.length;
339 			vsec_id = header.extended_short.vsec_id;
340 		} else {
341 			length = header.extended_short.length;
342 			vsec_id = header.extended_short.vsec_id;
343 			/*
344 			 * Ice Lake and Tiger Lake do not implement the
345 			 * full length of the capability, only first 32
346 			 * dwords so hard-code it here.
347 			 */
348 			if (!vsec_id &&
349 			    (tb_switch_is_ice_lake(port->sw) ||
350 			     tb_switch_is_tiger_lake(port->sw)))
351 				length = 32;
352 		}
353 		break;
354 
355 	case TB_PORT_CAP_USB4:
356 		length = PORT_CAP_USB4_LEN;
357 		break;
358 
359 	default:
360 		seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n",
361 			   cap, header.basic.cap);
362 		return;
363 	}
364 
365 	cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length);
366 }
367 
368 static void port_caps_show(struct tb_port *port, struct seq_file *s)
369 {
370 	int cap;
371 
372 	cap = tb_port_next_cap(port, 0);
373 	while (cap > 0) {
374 		port_cap_show(port, s, cap);
375 		cap = tb_port_next_cap(port, cap);
376 	}
377 }
378 
379 static int port_basic_regs_show(struct tb_port *port, struct seq_file *s)
380 {
381 	u32 data[PORT_CAP_BASIC_LEN];
382 	int ret, i;
383 
384 	ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data));
385 	if (ret)
386 		return ret;
387 
388 	for (i = 0; i < ARRAY_SIZE(data); i++)
389 		seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
390 
391 	return 0;
392 }
393 
394 static int port_regs_show(struct seq_file *s, void *not_used)
395 {
396 	struct tb_port *port = s->private;
397 	struct tb_switch *sw = port->sw;
398 	struct tb *tb = sw->tb;
399 	int ret;
400 
401 	pm_runtime_get_sync(&sw->dev);
402 
403 	if (mutex_lock_interruptible(&tb->lock)) {
404 		ret = -ERESTARTSYS;
405 		goto out_rpm_put;
406 	}
407 
408 	seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
409 
410 	ret = port_basic_regs_show(port, s);
411 	if (ret)
412 		goto out_unlock;
413 
414 	port_caps_show(port, s);
415 
416 out_unlock:
417 	mutex_unlock(&tb->lock);
418 out_rpm_put:
419 	pm_runtime_mark_last_busy(&sw->dev);
420 	pm_runtime_put_autosuspend(&sw->dev);
421 
422 	return ret;
423 }
424 DEBUGFS_ATTR_RW(port_regs);
425 
426 static void switch_cap_show(struct tb_switch *sw, struct seq_file *s,
427 			    unsigned int cap)
428 {
429 	struct tb_cap_any header;
430 	int ret, length;
431 	u8 vsec_id = 0;
432 
433 	ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1);
434 	if (ret) {
435 		seq_printf(s, "0x%04x <capability read failed>\n", cap);
436 		return;
437 	}
438 
439 	if (header.basic.cap == TB_SWITCH_CAP_VSE) {
440 		if (!header.extended_short.length) {
441 			ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH,
442 					 cap + 1, 1);
443 			if (ret) {
444 				seq_printf(s, "0x%04x <capability read failed>\n",
445 					   cap + 1);
446 				return;
447 			}
448 			length = header.extended_long.length;
449 		} else {
450 			length = header.extended_short.length;
451 		}
452 		vsec_id = header.extended_short.vsec_id;
453 	} else {
454 		if (header.basic.cap == TB_SWITCH_CAP_TMU) {
455 			length = SWITCH_CAP_TMU_LEN;
456 		} else  {
457 			seq_printf(s, "0x%04x <unknown capability 0x%02x>\n",
458 				   cap, header.basic.cap);
459 			return;
460 		}
461 	}
462 
463 	cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length);
464 }
465 
466 static void switch_caps_show(struct tb_switch *sw, struct seq_file *s)
467 {
468 	int cap;
469 
470 	cap = tb_switch_next_cap(sw, 0);
471 	while (cap > 0) {
472 		switch_cap_show(sw, s, cap);
473 		cap = tb_switch_next_cap(sw, cap);
474 	}
475 }
476 
477 static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s)
478 {
479 	u32 data[SWITCH_CAP_BASIC_LEN];
480 	size_t dwords;
481 	int ret, i;
482 
483 	/* Only USB4 has the additional registers */
484 	if (tb_switch_is_usb4(sw))
485 		dwords = ARRAY_SIZE(data);
486 	else
487 		dwords = 7;
488 
489 	ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords);
490 	if (ret)
491 		return ret;
492 
493 	for (i = 0; i < dwords; i++)
494 		seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]);
495 
496 	return 0;
497 }
498 
499 static int switch_regs_show(struct seq_file *s, void *not_used)
500 {
501 	struct tb_switch *sw = s->private;
502 	struct tb *tb = sw->tb;
503 	int ret;
504 
505 	pm_runtime_get_sync(&sw->dev);
506 
507 	if (mutex_lock_interruptible(&tb->lock)) {
508 		ret = -ERESTARTSYS;
509 		goto out_rpm_put;
510 	}
511 
512 	seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n");
513 
514 	ret = switch_basic_regs_show(sw, s);
515 	if (ret)
516 		goto out_unlock;
517 
518 	switch_caps_show(sw, s);
519 
520 out_unlock:
521 	mutex_unlock(&tb->lock);
522 out_rpm_put:
523 	pm_runtime_mark_last_busy(&sw->dev);
524 	pm_runtime_put_autosuspend(&sw->dev);
525 
526 	return ret;
527 }
528 DEBUGFS_ATTR_RW(switch_regs);
529 
530 static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid)
531 {
532 	u32 data[PATH_LEN];
533 	int ret, i;
534 
535 	ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN,
536 			   ARRAY_SIZE(data));
537 	if (ret) {
538 		seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN);
539 		return ret;
540 	}
541 
542 	for (i = 0; i < ARRAY_SIZE(data); i++) {
543 		seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
544 			   hopid * PATH_LEN + i, i, hopid, data[i]);
545 	}
546 
547 	return 0;
548 }
549 
550 static int path_show(struct seq_file *s, void *not_used)
551 {
552 	struct tb_port *port = s->private;
553 	struct tb_switch *sw = port->sw;
554 	struct tb *tb = sw->tb;
555 	int start, i, ret = 0;
556 
557 	pm_runtime_get_sync(&sw->dev);
558 
559 	if (mutex_lock_interruptible(&tb->lock)) {
560 		ret = -ERESTARTSYS;
561 		goto out_rpm_put;
562 	}
563 
564 	seq_puts(s, "# offset relative_offset in_hop_id value\n");
565 
566 	/* NHI and lane adapters have entry for path 0 */
567 	if (tb_port_is_null(port) || tb_port_is_nhi(port)) {
568 		ret = path_show_one(port, s, 0);
569 		if (ret)
570 			goto out_unlock;
571 	}
572 
573 	start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID;
574 
575 	for (i = start; i <= port->config.max_in_hop_id; i++) {
576 		ret = path_show_one(port, s, i);
577 		if (ret)
578 			break;
579 	}
580 
581 out_unlock:
582 	mutex_unlock(&tb->lock);
583 out_rpm_put:
584 	pm_runtime_mark_last_busy(&sw->dev);
585 	pm_runtime_put_autosuspend(&sw->dev);
586 
587 	return ret;
588 }
589 DEBUGFS_ATTR_RO(path);
590 
591 static int counter_set_regs_show(struct tb_port *port, struct seq_file *s,
592 				 int counter)
593 {
594 	u32 data[COUNTER_SET_LEN];
595 	int ret, i;
596 
597 	ret = tb_port_read(port, data, TB_CFG_COUNTERS,
598 			   counter * COUNTER_SET_LEN, ARRAY_SIZE(data));
599 	if (ret) {
600 		seq_printf(s, "0x%04x <not accessible>\n",
601 			   counter * COUNTER_SET_LEN);
602 		return ret;
603 	}
604 
605 	for (i = 0; i < ARRAY_SIZE(data); i++) {
606 		seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n",
607 			   counter * COUNTER_SET_LEN + i, i, counter, data[i]);
608 	}
609 
610 	return 0;
611 }
612 
613 static int counters_show(struct seq_file *s, void *not_used)
614 {
615 	struct tb_port *port = s->private;
616 	struct tb_switch *sw = port->sw;
617 	struct tb *tb = sw->tb;
618 	int i, ret = 0;
619 
620 	pm_runtime_get_sync(&sw->dev);
621 
622 	if (mutex_lock_interruptible(&tb->lock)) {
623 		ret = -ERESTARTSYS;
624 		goto out;
625 	}
626 
627 	seq_puts(s, "# offset relative_offset counter_id value\n");
628 
629 	for (i = 0; i < port->config.max_counters; i++) {
630 		ret = counter_set_regs_show(port, s, i);
631 		if (ret)
632 			break;
633 	}
634 
635 	mutex_unlock(&tb->lock);
636 
637 out:
638 	pm_runtime_mark_last_busy(&sw->dev);
639 	pm_runtime_put_autosuspend(&sw->dev);
640 
641 	return ret;
642 }
643 DEBUGFS_ATTR_RW(counters);
644 
645 /**
646  * tb_switch_debugfs_init() - Add debugfs entries for router
647  * @sw: Pointer to the router
648  *
649  * Adds debugfs directories and files for given router.
650  */
651 void tb_switch_debugfs_init(struct tb_switch *sw)
652 {
653 	struct dentry *debugfs_dir;
654 	struct tb_port *port;
655 
656 	debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root);
657 	sw->debugfs_dir = debugfs_dir;
658 	debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw,
659 			    &switch_regs_fops);
660 
661 	tb_switch_for_each_port(sw, port) {
662 		struct dentry *debugfs_dir;
663 		char dir_name[10];
664 
665 		if (port->disabled)
666 			continue;
667 		if (port->config.type == TB_TYPE_INACTIVE)
668 			continue;
669 
670 		snprintf(dir_name, sizeof(dir_name), "port%d", port->port);
671 		debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir);
672 		debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir,
673 				    port, &port_regs_fops);
674 		debugfs_create_file("path", 0400, debugfs_dir, port,
675 				    &path_fops);
676 		if (port->config.counters_support)
677 			debugfs_create_file("counters", 0600, debugfs_dir, port,
678 					    &counters_fops);
679 	}
680 }
681 
682 /**
683  * tb_switch_debugfs_remove() - Remove all router debugfs entries
684  * @sw: Pointer to the router
685  *
686  * Removes all previously added debugfs entries under this router.
687  */
688 void tb_switch_debugfs_remove(struct tb_switch *sw)
689 {
690 	debugfs_remove_recursive(sw->debugfs_dir);
691 }
692 
693 void tb_debugfs_init(void)
694 {
695 	tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL);
696 }
697 
698 void tb_debugfs_exit(void)
699 {
700 	debugfs_remove_recursive(tb_debugfs_root);
701 }
702