xref: /openbmc/linux/drivers/thunderbolt/retimer.c (revision c10d12e3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Thunderbolt/USB4 retimer support.
4  *
5  * Copyright (C) 2020, Intel Corporation
6  * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
7  *	    Mika Westerberg <mika.westerberg@linux.intel.com>
8  */
9 
10 #include <linux/delay.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/signal.h>
13 
14 #include "sb_regs.h"
15 #include "tb.h"
16 
17 #define TB_MAX_RETIMER_INDEX	6
18 
19 static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val,
20 			       size_t bytes)
21 {
22 	struct tb_nvm *nvm = priv;
23 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
24 	int ret;
25 
26 	pm_runtime_get_sync(&rt->dev);
27 
28 	if (!mutex_trylock(&rt->tb->lock)) {
29 		ret = restart_syscall();
30 		goto out;
31 	}
32 
33 	ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes);
34 	mutex_unlock(&rt->tb->lock);
35 
36 out:
37 	pm_runtime_mark_last_busy(&rt->dev);
38 	pm_runtime_put_autosuspend(&rt->dev);
39 
40 	return ret;
41 }
42 
43 static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val,
44 				size_t bytes)
45 {
46 	struct tb_nvm *nvm = priv;
47 	struct tb_retimer *rt = tb_to_retimer(nvm->dev);
48 	int ret = 0;
49 
50 	if (!mutex_trylock(&rt->tb->lock))
51 		return restart_syscall();
52 
53 	ret = tb_nvm_write_buf(nvm, offset, val, bytes);
54 	mutex_unlock(&rt->tb->lock);
55 
56 	return ret;
57 }
58 
59 static int tb_retimer_nvm_add(struct tb_retimer *rt)
60 {
61 	struct tb_nvm *nvm;
62 	u32 val, nvm_size;
63 	int ret;
64 
65 	nvm = tb_nvm_alloc(&rt->dev);
66 	if (IS_ERR(nvm))
67 		return PTR_ERR(nvm);
68 
69 	ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val,
70 					 sizeof(val));
71 	if (ret)
72 		goto err_nvm;
73 
74 	nvm->major = val >> 16;
75 	nvm->minor = val >> 8;
76 
77 	ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE,
78 					 &val, sizeof(val));
79 	if (ret)
80 		goto err_nvm;
81 
82 	nvm_size = (SZ_1M << (val & 7)) / 8;
83 	nvm_size = (nvm_size - SZ_16K) / 2;
84 
85 	ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read);
86 	if (ret)
87 		goto err_nvm;
88 
89 	ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write);
90 	if (ret)
91 		goto err_nvm;
92 
93 	rt->nvm = nvm;
94 	return 0;
95 
96 err_nvm:
97 	tb_nvm_free(nvm);
98 	return ret;
99 }
100 
101 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
102 {
103 	unsigned int image_size, hdr_size;
104 	const u8 *buf = rt->nvm->buf;
105 	u16 ds_size, device;
106 	int ret;
107 
108 	image_size = rt->nvm->buf_data_size;
109 	if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE)
110 		return -EINVAL;
111 
112 	/*
113 	 * FARB pointer must point inside the image and must at least
114 	 * contain parts of the digital section we will be reading here.
115 	 */
116 	hdr_size = (*(u32 *)buf) & 0xffffff;
117 	if (hdr_size + NVM_DEVID + 2 >= image_size)
118 		return -EINVAL;
119 
120 	/* Digital section start should be aligned to 4k page */
121 	if (!IS_ALIGNED(hdr_size, SZ_4K))
122 		return -EINVAL;
123 
124 	/*
125 	 * Read digital section size and check that it also fits inside
126 	 * the image.
127 	 */
128 	ds_size = *(u16 *)(buf + hdr_size);
129 	if (ds_size >= image_size)
130 		return -EINVAL;
131 
132 	/*
133 	 * Make sure the device ID in the image matches the retimer
134 	 * hardware.
135 	 */
136 	device = *(u16 *)(buf + hdr_size + NVM_DEVID);
137 	if (device != rt->device)
138 		return -EINVAL;
139 
140 	/* Skip headers in the image */
141 	buf += hdr_size;
142 	image_size -= hdr_size;
143 
144 	ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
145 					 image_size);
146 	if (!ret)
147 		rt->nvm->flushed = true;
148 
149 	return ret;
150 }
151 
152 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
153 {
154 	u32 status;
155 	int ret;
156 
157 	if (auth_only) {
158 		ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
159 		if (ret)
160 			return ret;
161 	}
162 
163 	ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
164 	if (ret)
165 		return ret;
166 
167 	usleep_range(100, 150);
168 
169 	/*
170 	 * Check the status now if we still can access the retimer. It
171 	 * is expected that the below fails.
172 	 */
173 	ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
174 							&status);
175 	if (!ret) {
176 		rt->auth_status = status;
177 		return status ? -EINVAL : 0;
178 	}
179 
180 	return 0;
181 }
182 
183 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
184 			   char *buf)
185 {
186 	struct tb_retimer *rt = tb_to_retimer(dev);
187 
188 	return sprintf(buf, "%#x\n", rt->device);
189 }
190 static DEVICE_ATTR_RO(device);
191 
192 static ssize_t nvm_authenticate_show(struct device *dev,
193 	struct device_attribute *attr, char *buf)
194 {
195 	struct tb_retimer *rt = tb_to_retimer(dev);
196 	int ret;
197 
198 	if (!mutex_trylock(&rt->tb->lock))
199 		return restart_syscall();
200 
201 	if (!rt->nvm)
202 		ret = -EAGAIN;
203 	else
204 		ret = sprintf(buf, "%#x\n", rt->auth_status);
205 
206 	mutex_unlock(&rt->tb->lock);
207 
208 	return ret;
209 }
210 
211 static ssize_t nvm_authenticate_store(struct device *dev,
212 	struct device_attribute *attr, const char *buf, size_t count)
213 {
214 	struct tb_retimer *rt = tb_to_retimer(dev);
215 	int val, ret;
216 
217 	pm_runtime_get_sync(&rt->dev);
218 
219 	if (!mutex_trylock(&rt->tb->lock)) {
220 		ret = restart_syscall();
221 		goto exit_rpm;
222 	}
223 
224 	if (!rt->nvm) {
225 		ret = -EAGAIN;
226 		goto exit_unlock;
227 	}
228 
229 	ret = kstrtoint(buf, 10, &val);
230 	if (ret)
231 		goto exit_unlock;
232 
233 	/* Always clear status */
234 	rt->auth_status = 0;
235 
236 	if (val) {
237 		if (val == AUTHENTICATE_ONLY) {
238 			ret = tb_retimer_nvm_authenticate(rt, true);
239 		} else {
240 			if (!rt->nvm->flushed) {
241 				if (!rt->nvm->buf) {
242 					ret = -EINVAL;
243 					goto exit_unlock;
244 				}
245 
246 				ret = tb_retimer_nvm_validate_and_write(rt);
247 				if (ret || val == WRITE_ONLY)
248 					goto exit_unlock;
249 			}
250 			if (val == WRITE_AND_AUTHENTICATE)
251 				ret = tb_retimer_nvm_authenticate(rt, false);
252 		}
253 	}
254 
255 exit_unlock:
256 	mutex_unlock(&rt->tb->lock);
257 exit_rpm:
258 	pm_runtime_mark_last_busy(&rt->dev);
259 	pm_runtime_put_autosuspend(&rt->dev);
260 
261 	if (ret)
262 		return ret;
263 	return count;
264 }
265 static DEVICE_ATTR_RW(nvm_authenticate);
266 
267 static ssize_t nvm_version_show(struct device *dev,
268 				struct device_attribute *attr, char *buf)
269 {
270 	struct tb_retimer *rt = tb_to_retimer(dev);
271 	int ret;
272 
273 	if (!mutex_trylock(&rt->tb->lock))
274 		return restart_syscall();
275 
276 	if (!rt->nvm)
277 		ret = -EAGAIN;
278 	else
279 		ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
280 
281 	mutex_unlock(&rt->tb->lock);
282 	return ret;
283 }
284 static DEVICE_ATTR_RO(nvm_version);
285 
286 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
287 			   char *buf)
288 {
289 	struct tb_retimer *rt = tb_to_retimer(dev);
290 
291 	return sprintf(buf, "%#x\n", rt->vendor);
292 }
293 static DEVICE_ATTR_RO(vendor);
294 
295 static struct attribute *retimer_attrs[] = {
296 	&dev_attr_device.attr,
297 	&dev_attr_nvm_authenticate.attr,
298 	&dev_attr_nvm_version.attr,
299 	&dev_attr_vendor.attr,
300 	NULL
301 };
302 
303 static const struct attribute_group retimer_group = {
304 	.attrs = retimer_attrs,
305 };
306 
307 static const struct attribute_group *retimer_groups[] = {
308 	&retimer_group,
309 	NULL
310 };
311 
312 static void tb_retimer_release(struct device *dev)
313 {
314 	struct tb_retimer *rt = tb_to_retimer(dev);
315 
316 	kfree(rt);
317 }
318 
319 struct device_type tb_retimer_type = {
320 	.name = "thunderbolt_retimer",
321 	.groups = retimer_groups,
322 	.release = tb_retimer_release,
323 };
324 
325 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
326 {
327 	struct tb_retimer *rt;
328 	u32 vendor, device;
329 	int ret;
330 
331 	ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
332 				     sizeof(vendor));
333 	if (ret) {
334 		if (ret != -ENODEV)
335 			tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
336 		return ret;
337 	}
338 
339 	ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
340 				     sizeof(device));
341 	if (ret) {
342 		if (ret != -ENODEV)
343 			tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
344 		return ret;
345 	}
346 
347 	if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) {
348 		tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n",
349 			     vendor);
350 		return -EOPNOTSUPP;
351 	}
352 
353 	/*
354 	 * Check that it supports NVM operations. If not then don't add
355 	 * the device at all.
356 	 */
357 	ret = usb4_port_retimer_nvm_sector_size(port, index);
358 	if (ret < 0)
359 		return ret;
360 
361 	rt = kzalloc(sizeof(*rt), GFP_KERNEL);
362 	if (!rt)
363 		return -ENOMEM;
364 
365 	rt->index = index;
366 	rt->vendor = vendor;
367 	rt->device = device;
368 	rt->auth_status = auth_status;
369 	rt->port = port;
370 	rt->tb = port->sw->tb;
371 
372 	rt->dev.parent = &port->usb4->dev;
373 	rt->dev.bus = &tb_bus_type;
374 	rt->dev.type = &tb_retimer_type;
375 	dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
376 		     port->port, index);
377 
378 	ret = device_register(&rt->dev);
379 	if (ret) {
380 		dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
381 		put_device(&rt->dev);
382 		return ret;
383 	}
384 
385 	ret = tb_retimer_nvm_add(rt);
386 	if (ret) {
387 		dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
388 		device_unregister(&rt->dev);
389 		return ret;
390 	}
391 
392 	dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
393 		 rt->vendor, rt->device);
394 
395 	pm_runtime_no_callbacks(&rt->dev);
396 	pm_runtime_set_active(&rt->dev);
397 	pm_runtime_enable(&rt->dev);
398 	pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
399 	pm_runtime_mark_last_busy(&rt->dev);
400 	pm_runtime_use_autosuspend(&rt->dev);
401 
402 	return 0;
403 }
404 
405 static void tb_retimer_remove(struct tb_retimer *rt)
406 {
407 	dev_info(&rt->dev, "retimer disconnected\n");
408 	tb_nvm_free(rt->nvm);
409 	device_unregister(&rt->dev);
410 }
411 
412 struct tb_retimer_lookup {
413 	const struct tb_port *port;
414 	u8 index;
415 };
416 
417 static int retimer_match(struct device *dev, void *data)
418 {
419 	const struct tb_retimer_lookup *lookup = data;
420 	struct tb_retimer *rt = tb_to_retimer(dev);
421 
422 	return rt && rt->port == lookup->port && rt->index == lookup->index;
423 }
424 
425 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
426 {
427 	struct tb_retimer_lookup lookup = { .port = port, .index = index };
428 	struct device *dev;
429 
430 	dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
431 	if (dev)
432 		return tb_to_retimer(dev);
433 
434 	return NULL;
435 }
436 
437 /**
438  * tb_retimer_scan() - Scan for on-board retimers under port
439  * @port: USB4 port to scan
440  * @add: If true also registers found retimers
441  *
442  * Brings the sideband into a state where retimers can be accessed.
443  * Then Tries to enumerate on-board retimers connected to @port. Found
444  * retimers are registered as children of @port if @add is set.  Does
445  * not scan for cable retimers for now.
446  */
447 int tb_retimer_scan(struct tb_port *port, bool add)
448 {
449 	u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
450 	int ret, i, last_idx = 0;
451 	struct usb4_port *usb4;
452 
453 	usb4 = port->usb4;
454 	if (!usb4)
455 		return 0;
456 
457 	pm_runtime_get_sync(&usb4->dev);
458 
459 	/*
460 	 * Send broadcast RT to make sure retimer indices facing this
461 	 * port are set.
462 	 */
463 	ret = usb4_port_enumerate_retimers(port);
464 	if (ret)
465 		goto out;
466 
467 	/*
468 	 * Enable sideband channel for each retimer. We can do this
469 	 * regardless whether there is device connected or not.
470 	 */
471 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
472 		usb4_port_retimer_set_inbound_sbtx(port, i);
473 
474 	/*
475 	 * Before doing anything else, read the authentication status.
476 	 * If the retimer has it set, store it for the new retimer
477 	 * device instance.
478 	 */
479 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
480 		usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
481 
482 	for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
483 		/*
484 		 * Last retimer is true only for the last on-board
485 		 * retimer (the one connected directly to the Type-C
486 		 * port).
487 		 */
488 		ret = usb4_port_retimer_is_last(port, i);
489 		if (ret > 0)
490 			last_idx = i;
491 		else if (ret < 0)
492 			break;
493 	}
494 
495 	if (!last_idx) {
496 		ret = 0;
497 		goto out;
498 	}
499 
500 	/* Add on-board retimers if they do not exist already */
501 	for (i = 1; i <= last_idx; i++) {
502 		struct tb_retimer *rt;
503 
504 		rt = tb_port_find_retimer(port, i);
505 		if (rt) {
506 			put_device(&rt->dev);
507 		} else if (add) {
508 			ret = tb_retimer_add(port, i, status[i]);
509 			if (ret && ret != -EOPNOTSUPP)
510 				break;
511 		}
512 	}
513 
514 out:
515 	pm_runtime_mark_last_busy(&usb4->dev);
516 	pm_runtime_put_autosuspend(&usb4->dev);
517 
518 	return ret;
519 }
520 
521 static int remove_retimer(struct device *dev, void *data)
522 {
523 	struct tb_retimer *rt = tb_to_retimer(dev);
524 	struct tb_port *port = data;
525 
526 	if (rt && rt->port == port)
527 		tb_retimer_remove(rt);
528 	return 0;
529 }
530 
531 /**
532  * tb_retimer_remove_all() - Remove all retimers under port
533  * @port: USB4 port whose retimers to remove
534  *
535  * This removes all previously added retimers under @port.
536  */
537 void tb_retimer_remove_all(struct tb_port *port)
538 {
539 	struct usb4_port *usb4;
540 
541 	usb4 = port->usb4;
542 	if (usb4)
543 		device_for_each_child_reverse(&usb4->dev, port,
544 					      remove_retimer);
545 }
546