xref: /openbmc/linux/drivers/thunderbolt/retimer.c (revision ac73d4bf)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt/USB4 retimer support.
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
7  *	    Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/signal.h>
13 
14 #include "sb_regs.h"
15 #include "tb.h"
16 
17 #define TB_MAX_RETIMER_INDEX	6
18 
19 /**
20  * tb_retimer_nvm_read() - Read contents of retimer NVM
21  * @rt: Retimer device
22  * @address: NVM address (in bytes) to start reading
23  * @buf: Data read from NVM is stored here
24  * @size: Number of bytes to read
25  *
26  * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
27  * read was successful and negative errno in case of failure.
28  */
29 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
30 			size_t size)
31 {
32 	return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
33 }
34 
35 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
36 {
37 	struct tb_nvm *nvm = priv;
38 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
39 	int ret;
40 
41 	pm_runtime_get_sync(&rt->dev);
42 
43 	if (!mutex_trylock(&rt->tb->lock)) {
44 		ret = restart_syscall();
45 		goto out;
46 	}
47 
48 	ret = tb_retimer_nvm_read(rt, offset, val, bytes);
49 	mutex_unlock(&rt->tb->lock);
50 
51 out:
52 	pm_runtime_mark_last_busy(&rt->dev);
53 	pm_runtime_put_autosuspend(&rt->dev);
54 
55 	return ret;
56 }
57 
58 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
59 {
60 	struct tb_nvm *nvm = priv;
61 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
62 	int ret = 0;
63 
64 	if (!mutex_trylock(&rt->tb->lock))
65 		return restart_syscall();
66 
67 	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
68 	mutex_unlock(&rt->tb->lock);
69 
70 	return ret;
71 }
72 
73 static int tb_retimer_nvm_add(struct tb_retimer *rt)
74 {
75 	struct tb_nvm *nvm;
76 	int ret;
77 
78 	nvm = tb_nvm_alloc(&rt->dev);
79 	if (IS_ERR(nvm)) {
80 		ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
81 		goto err_nvm;
82 	}
83 
84 	ret = tb_nvm_read_version(nvm);
85 	if (ret)
86 		goto err_nvm;
87 
88 	ret = tb_nvm_add_active(nvm, nvm_read);
89 	if (ret)
90 		goto err_nvm;
91 
92 	ret = tb_nvm_add_non_active(nvm, nvm_write);
93 	if (ret)
94 		goto err_nvm;
95 
96 	rt->nvm = nvm;
97 	return 0;
98 
99 err_nvm:
100 	dev_dbg(&rt->dev, "NVM upgrade disabled\n");
101 	if (!IS_ERR(nvm))
102 		tb_nvm_free(nvm);
103 
104 	return ret;
105 }
106 
107 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
108 {
109 	unsigned int image_size;
110 	const u8 *buf;
111 	int ret;
112 
113 	ret = tb_nvm_validate(rt->nvm);
114 	if (ret)
115 		return ret;
116 
117 	buf = rt->nvm->buf_data_start;
118 	image_size = rt->nvm->buf_data_size;
119 
120 	ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
121 					 image_size);
122 	if (ret)
123 		return ret;
124 
125 	rt->nvm->flushed = true;
126 	return 0;
127 }
128 
129 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
130 {
131 	u32 status;
132 	int ret;
133 
134 	if (auth_only) {
135 		ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
136 		if (ret)
137 			return ret;
138 	}
139 
140 	ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
141 	if (ret)
142 		return ret;
143 
144 	usleep_range(100, 150);
145 
146 	/*
147 	 * Check the status now if we still can access the retimer. It
148 	 * is expected that the below fails.
149 	 */
150 	ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
151 							&status);
152 	if (!ret) {
153 		rt->auth_status = status;
154 		return status ? -EINVAL : 0;
155 	}
156 
157 	return 0;
158 }
159 
160 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
161 			   char *buf)
162 {
163 	struct tb_retimer *rt = tb_to_retimer(dev);
164 
165 	return sysfs_emit(buf, "%#x\n", rt->device);
166 }
167 static DEVICE_ATTR_RO(device);
168 
169 static ssize_t nvm_authenticate_show(struct device *dev,
170 	struct device_attribute *attr, char *buf)
171 {
172 	struct tb_retimer *rt = tb_to_retimer(dev);
173 	int ret;
174 
175 	if (!mutex_trylock(&rt->tb->lock))
176 		return restart_syscall();
177 
178 	if (!rt->nvm)
179 		ret = -EAGAIN;
180 	else if (rt->no_nvm_upgrade)
181 		ret = -EOPNOTSUPP;
182 	else
183 		ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
184 
185 	mutex_unlock(&rt->tb->lock);
186 
187 	return ret;
188 }
189 
190 static ssize_t nvm_authenticate_store(struct device *dev,
191 	struct device_attribute *attr, const char *buf, size_t count)
192 {
193 	struct tb_retimer *rt = tb_to_retimer(dev);
194 	int val, ret;
195 
196 	pm_runtime_get_sync(&rt->dev);
197 
198 	if (!mutex_trylock(&rt->tb->lock)) {
199 		ret = restart_syscall();
200 		goto exit_rpm;
201 	}
202 
203 	if (!rt->nvm) {
204 		ret = -EAGAIN;
205 		goto exit_unlock;
206 	}
207 
208 	ret = kstrtoint(buf, 10, &val);
209 	if (ret)
210 		goto exit_unlock;
211 
212 	/* Always clear status */
213 	rt->auth_status = 0;
214 
215 	if (val) {
216 		if (val == AUTHENTICATE_ONLY) {
217 			ret = tb_retimer_nvm_authenticate(rt, true);
218 		} else {
219 			if (!rt->nvm->flushed) {
220 				if (!rt->nvm->buf) {
221 					ret = -EINVAL;
222 					goto exit_unlock;
223 				}
224 
225 				ret = tb_retimer_nvm_validate_and_write(rt);
226 				if (ret || val == WRITE_ONLY)
227 					goto exit_unlock;
228 			}
229 			if (val == WRITE_AND_AUTHENTICATE)
230 				ret = tb_retimer_nvm_authenticate(rt, false);
231 		}
232 	}
233 
234 exit_unlock:
235 	mutex_unlock(&rt->tb->lock);
236 exit_rpm:
237 	pm_runtime_mark_last_busy(&rt->dev);
238 	pm_runtime_put_autosuspend(&rt->dev);
239 
240 	if (ret)
241 		return ret;
242 	return count;
243 }
244 static DEVICE_ATTR_RW(nvm_authenticate);
245 
246 static ssize_t nvm_version_show(struct device *dev,
247 				struct device_attribute *attr, char *buf)
248 {
249 	struct tb_retimer *rt = tb_to_retimer(dev);
250 	int ret;
251 
252 	if (!mutex_trylock(&rt->tb->lock))
253 		return restart_syscall();
254 
255 	if (!rt->nvm)
256 		ret = -EAGAIN;
257 	else
258 		ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
259 
260 	mutex_unlock(&rt->tb->lock);
261 	return ret;
262 }
263 static DEVICE_ATTR_RO(nvm_version);
264 
265 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
266 			   char *buf)
267 {
268 	struct tb_retimer *rt = tb_to_retimer(dev);
269 
270 	return sysfs_emit(buf, "%#x\n", rt->vendor);
271 }
272 static DEVICE_ATTR_RO(vendor);
273 
274 static struct attribute *retimer_attrs[] = {
275 	&dev_attr_device.attr,
276 	&dev_attr_nvm_authenticate.attr,
277 	&dev_attr_nvm_version.attr,
278 	&dev_attr_vendor.attr,
279 	NULL
280 };
281 
282 static const struct attribute_group retimer_group = {
283 	.attrs = retimer_attrs,
284 };
285 
286 static const struct attribute_group *retimer_groups[] = {
287 	&retimer_group,
288 	NULL
289 };
290 
291 static void tb_retimer_release(struct device *dev)
292 {
293 	struct tb_retimer *rt = tb_to_retimer(dev);
294 
295 	kfree(rt);
296 }
297 
298 struct device_type tb_retimer_type = {
299 	.name = "thunderbolt_retimer",
300 	.groups = retimer_groups,
301 	.release = tb_retimer_release,
302 };
303 
304 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
305 {
306 	struct tb_retimer *rt;
307 	u32 vendor, device;
308 	int ret;
309 
310 	ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
311 				     sizeof(vendor));
312 	if (ret) {
313 		if (ret != -ENODEV)
314 			tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
315 		return ret;
316 	}
317 
318 	ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
319 				     sizeof(device));
320 	if (ret) {
321 		if (ret != -ENODEV)
322 			tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
323 		return ret;
324 	}
325 
326 	if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
327 		tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
328 			     vendor);
329 		return -EOPNOTSUPP;
330 	}
331 
332 	/*
333 	 * Check that it supports NVM operations. If not then don't add
334 	 * the device at all.
335 	 */
336 	ret = usb4_port_retimer_nvm_sector_size(port, index);
337 	if (ret < 0)
338 		return ret;
339 
340 	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
341 	if (!rt)
342 		return -ENOMEM;
343 
344 	rt->index = index;
345 	rt->vendor = vendor;
346 	rt->device = device;
347 	rt->auth_status = auth_status;
348 	rt->port = port;
349 	rt->tb = port->sw->tb;
350 
351 	rt->dev.parent = &port->usb4->dev;
352 	rt->dev.bus = &tb_bus_type;
353 	rt->dev.type = &tb_retimer_type;
354 	dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
355 		     port->port, index);
356 
357 	ret = device_register(&rt->dev);
358 	if (ret) {
359 		dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
360 		put_device(&rt->dev);
361 		return ret;
362 	}
363 
364 	ret = tb_retimer_nvm_add(rt);
365 	if (ret) {
366 		dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
367 		device_unregister(&rt->dev);
368 		return ret;
369 	}
370 
371 	dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
372 		 rt->vendor, rt->device);
373 
374 	pm_runtime_no_callbacks(&rt->dev);
375 	pm_runtime_set_active(&rt->dev);
376 	pm_runtime_enable(&rt->dev);
377 	pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
378 	pm_runtime_mark_last_busy(&rt->dev);
379 	pm_runtime_use_autosuspend(&rt->dev);
380 
381 	return 0;
382 }
383 
384 static void tb_retimer_remove(struct tb_retimer *rt)
385 {
386 	dev_info(&rt->dev, "retimer disconnected\n");
387 	tb_nvm_free(rt->nvm);
388 	device_unregister(&rt->dev);
389 }
390 
391 struct tb_retimer_lookup {
392 	const struct tb_port *port;
393 	u8 index;
394 };
395 
396 static int retimer_match(struct device *dev, void *data)
397 {
398 	const struct tb_retimer_lookup *lookup = data;
399 	struct tb_retimer *rt = tb_to_retimer(dev);
400 
401 	return rt && rt->port == lookup->port && rt->index == lookup->index;
402 }
403 
404 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
405 {
406 	struct tb_retimer_lookup lookup = { .port = port, .index = index };
407 	struct device *dev;
408 
409 	dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
410 	if (dev)
411 		return tb_to_retimer(dev);
412 
413 	return NULL;
414 }
415 
416 /**
417  * tb_retimer_scan() - Scan for on-board retimers under port
418  * @port: USB4 port to scan
419  * @add: If true also registers found retimers
420  *
421  * Brings the sideband into a state where retimers can be accessed.
422  * Then Tries to enumerate on-board retimers connected to @port. Found
423  * retimers are registered as children of @port if @add is set.  Does
424  * not scan for cable retimers for now.
425  */
426 int tb_retimer_scan(struct tb_port *port, bool add)
427 {
428 	u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
429 	int ret, i, last_idx = 0;
430 	struct usb4_port *usb4;
431 
432 	usb4 = port->usb4;
433 	if (!usb4)
434 		return 0;
435 
436 	pm_runtime_get_sync(&usb4->dev);
437 
438 	/*
439 	 * Send broadcast RT to make sure retimer indices facing this
440 	 * port are set.
441 	 */
442 	ret = usb4_port_enumerate_retimers(port);
443 	if (ret)
444 		goto out;
445 
446 	/*
447 	 * Enable sideband channel for each retimer. We can do this
448 	 * regardless whether there is device connected or not.
449 	 */
450 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
451 		usb4_port_retimer_set_inbound_sbtx(port, i);
452 
453 	/*
454 	 * Before doing anything else, read the authentication status.
455 	 * If the retimer has it set, store it for the new retimer
456 	 * device instance.
457 	 */
458 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
459 		usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
460 
461 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
462 		/*
463 		 * Last retimer is true only for the last on-board
464 		 * retimer (the one connected directly to the Type-C
465 		 * port).
466 		 */
467 		ret = usb4_port_retimer_is_last(port, i);
468 		if (ret > 0)
469 			last_idx = i;
470 		else if (ret < 0)
471 			break;
472 	}
473 
474 	if (!last_idx) {
475 		ret = 0;
476 		goto out;
477 	}
478 
479 	/* Add on-board retimers if they do not exist already */
480 	for (i = 1; i <= last_idx; i++) {
481 		struct tb_retimer *rt;
482 
483 		rt = tb_port_find_retimer(port, i);
484 		if (rt) {
485 			put_device(&rt->dev);
486 		} else if (add) {
487 			ret = tb_retimer_add(port, i, status[i]);
488 			if (ret && ret != -EOPNOTSUPP)
489 				break;
490 		}
491 	}
492 
493 out:
494 	pm_runtime_mark_last_busy(&usb4->dev);
495 	pm_runtime_put_autosuspend(&usb4->dev);
496 
497 	return ret;
498 }
499 
500 static int remove_retimer(struct device *dev, void *data)
501 {
502 	struct tb_retimer *rt = tb_to_retimer(dev);
503 	struct tb_port *port = data;
504 
505 	if (rt && rt->port == port)
506 		tb_retimer_remove(rt);
507 	return 0;
508 }
509 
510 /**
511  * tb_retimer_remove_all() - Remove all retimers under port
512  * @port: USB4 port whose retimers to remove
513  *
514  * This removes all previously added retimers under @port.
515  */
516 void tb_retimer_remove_all(struct tb_port *port)
517 {
518 	struct usb4_port *usb4;
519 
520 	usb4 = port->usb4;
521 	if (usb4)
522 		device_for_each_child_reverse(&usb4->dev, port,
523 					      remove_retimer);
524 }
525