1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Thunderbolt/USB4 retimer support.
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com>
7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 */
9
10 #include <linux/delay.h>
11 #include <linux/pm_runtime.h>
12 #include <linux/sched/signal.h>
13
14 #include "sb_regs.h"
15 #include "tb.h"
16
17 #define TB_MAX_RETIMER_INDEX 6
18
19 /**
20 * tb_retimer_nvm_read() - Read contents of retimer NVM
21 * @rt: Retimer device
22 * @address: NVM address (in bytes) to start reading
23 * @buf: Data read from NVM is stored here
24 * @size: Number of bytes to read
25 *
26 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
27 * read was successful and negative errno in case of failure.
28 */
tb_retimer_nvm_read(struct tb_retimer * rt,unsigned int address,void * buf,size_t size)29 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
30 size_t size)
31 {
32 return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
33 }
34
nvm_read(void * priv,unsigned int offset,void * val,size_t bytes)35 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
36 {
37 struct tb_nvm *nvm = priv;
38 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
39 int ret;
40
41 pm_runtime_get_sync(&rt->dev);
42
43 if (!mutex_trylock(&rt->tb->lock)) {
44 ret = restart_syscall();
45 goto out;
46 }
47
48 ret = tb_retimer_nvm_read(rt, offset, val, bytes);
49 mutex_unlock(&rt->tb->lock);
50
51 out:
52 pm_runtime_mark_last_busy(&rt->dev);
53 pm_runtime_put_autosuspend(&rt->dev);
54
55 return ret;
56 }
57
nvm_write(void * priv,unsigned int offset,void * val,size_t bytes)58 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
59 {
60 struct tb_nvm *nvm = priv;
61 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
62 int ret = 0;
63
64 if (!mutex_trylock(&rt->tb->lock))
65 return restart_syscall();
66
67 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
68 mutex_unlock(&rt->tb->lock);
69
70 return ret;
71 }
72
tb_retimer_nvm_add(struct tb_retimer * rt)73 static int tb_retimer_nvm_add(struct tb_retimer *rt)
74 {
75 struct tb_nvm *nvm;
76 int ret;
77
78 nvm = tb_nvm_alloc(&rt->dev);
79 if (IS_ERR(nvm)) {
80 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
81 goto err_nvm;
82 }
83
84 ret = tb_nvm_read_version(nvm);
85 if (ret)
86 goto err_nvm;
87
88 ret = tb_nvm_add_active(nvm, nvm_read);
89 if (ret)
90 goto err_nvm;
91
92 if (!rt->no_nvm_upgrade) {
93 ret = tb_nvm_add_non_active(nvm, nvm_write);
94 if (ret)
95 goto err_nvm;
96 }
97
98 rt->nvm = nvm;
99 return 0;
100
101 err_nvm:
102 dev_dbg(&rt->dev, "NVM upgrade disabled\n");
103 rt->no_nvm_upgrade = true;
104 if (!IS_ERR(nvm))
105 tb_nvm_free(nvm);
106
107 return ret;
108 }
109
tb_retimer_nvm_validate_and_write(struct tb_retimer * rt)110 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
111 {
112 unsigned int image_size;
113 const u8 *buf;
114 int ret;
115
116 ret = tb_nvm_validate(rt->nvm);
117 if (ret)
118 return ret;
119
120 buf = rt->nvm->buf_data_start;
121 image_size = rt->nvm->buf_data_size;
122
123 ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
124 image_size);
125 if (ret)
126 return ret;
127
128 rt->nvm->flushed = true;
129 return 0;
130 }
131
tb_retimer_nvm_authenticate(struct tb_retimer * rt,bool auth_only)132 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
133 {
134 u32 status;
135 int ret;
136
137 if (auth_only) {
138 ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
139 if (ret)
140 return ret;
141 }
142
143 ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
144 if (ret)
145 return ret;
146
147 usleep_range(100, 150);
148
149 /*
150 * Check the status now if we still can access the retimer. It
151 * is expected that the below fails.
152 */
153 ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
154 &status);
155 if (!ret) {
156 rt->auth_status = status;
157 return status ? -EINVAL : 0;
158 }
159
160 return 0;
161 }
162
device_show(struct device * dev,struct device_attribute * attr,char * buf)163 static ssize_t device_show(struct device *dev, struct device_attribute *attr,
164 char *buf)
165 {
166 struct tb_retimer *rt = tb_to_retimer(dev);
167
168 return sysfs_emit(buf, "%#x\n", rt->device);
169 }
170 static DEVICE_ATTR_RO(device);
171
nvm_authenticate_show(struct device * dev,struct device_attribute * attr,char * buf)172 static ssize_t nvm_authenticate_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
174 {
175 struct tb_retimer *rt = tb_to_retimer(dev);
176 int ret;
177
178 if (!mutex_trylock(&rt->tb->lock))
179 return restart_syscall();
180
181 if (!rt->nvm)
182 ret = -EAGAIN;
183 else
184 ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
185
186 mutex_unlock(&rt->tb->lock);
187
188 return ret;
189 }
190
tb_retimer_nvm_authenticate_status(struct tb_port * port,u32 * status)191 static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
192 {
193 int i;
194
195 tb_port_dbg(port, "reading NVM authentication status of retimers\n");
196
197 /*
198 * Before doing anything else, read the authentication status.
199 * If the retimer has it set, store it for the new retimer
200 * device instance.
201 */
202 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
203 usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
204 }
205
tb_retimer_set_inbound_sbtx(struct tb_port * port)206 static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
207 {
208 int i;
209
210 /*
211 * When USB4 port is online sideband communications are
212 * already up.
213 */
214 if (!usb4_port_device_is_offline(port->usb4))
215 return;
216
217 tb_port_dbg(port, "enabling sideband transactions\n");
218
219 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
220 usb4_port_retimer_set_inbound_sbtx(port, i);
221 }
222
tb_retimer_unset_inbound_sbtx(struct tb_port * port)223 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
224 {
225 int i;
226
227 /*
228 * When USB4 port is offline we need to keep the sideband
229 * communications up to make it possible to communicate with
230 * the connected retimers.
231 */
232 if (usb4_port_device_is_offline(port->usb4))
233 return;
234
235 tb_port_dbg(port, "disabling sideband transactions\n");
236
237 for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
238 usb4_port_retimer_unset_inbound_sbtx(port, i);
239 }
240
nvm_authenticate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)241 static ssize_t nvm_authenticate_store(struct device *dev,
242 struct device_attribute *attr, const char *buf, size_t count)
243 {
244 struct tb_retimer *rt = tb_to_retimer(dev);
245 int val, ret;
246
247 pm_runtime_get_sync(&rt->dev);
248
249 if (!mutex_trylock(&rt->tb->lock)) {
250 ret = restart_syscall();
251 goto exit_rpm;
252 }
253
254 if (!rt->nvm) {
255 ret = -EAGAIN;
256 goto exit_unlock;
257 }
258
259 ret = kstrtoint(buf, 10, &val);
260 if (ret)
261 goto exit_unlock;
262
263 /* Always clear status */
264 rt->auth_status = 0;
265
266 if (val) {
267 /*
268 * When NVM authentication starts the retimer is not
269 * accessible so calling tb_retimer_unset_inbound_sbtx()
270 * will fail and therefore we do not call it. Exception
271 * is when the validation fails or we only write the new
272 * NVM image without authentication.
273 */
274 tb_retimer_set_inbound_sbtx(rt->port);
275 if (val == AUTHENTICATE_ONLY) {
276 ret = tb_retimer_nvm_authenticate(rt, true);
277 } else {
278 if (!rt->nvm->flushed) {
279 if (!rt->nvm->buf) {
280 ret = -EINVAL;
281 goto exit_unlock;
282 }
283
284 ret = tb_retimer_nvm_validate_and_write(rt);
285 if (ret || val == WRITE_ONLY)
286 goto exit_unlock;
287 }
288 if (val == WRITE_AND_AUTHENTICATE)
289 ret = tb_retimer_nvm_authenticate(rt, false);
290 }
291 }
292
293 exit_unlock:
294 if (ret || val == WRITE_ONLY)
295 tb_retimer_unset_inbound_sbtx(rt->port);
296 mutex_unlock(&rt->tb->lock);
297 exit_rpm:
298 pm_runtime_mark_last_busy(&rt->dev);
299 pm_runtime_put_autosuspend(&rt->dev);
300
301 if (ret)
302 return ret;
303 return count;
304 }
305 static DEVICE_ATTR_RW(nvm_authenticate);
306
nvm_version_show(struct device * dev,struct device_attribute * attr,char * buf)307 static ssize_t nvm_version_show(struct device *dev,
308 struct device_attribute *attr, char *buf)
309 {
310 struct tb_retimer *rt = tb_to_retimer(dev);
311 int ret;
312
313 if (!mutex_trylock(&rt->tb->lock))
314 return restart_syscall();
315
316 if (!rt->nvm)
317 ret = -EAGAIN;
318 else
319 ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
320
321 mutex_unlock(&rt->tb->lock);
322 return ret;
323 }
324 static DEVICE_ATTR_RO(nvm_version);
325
vendor_show(struct device * dev,struct device_attribute * attr,char * buf)326 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
327 char *buf)
328 {
329 struct tb_retimer *rt = tb_to_retimer(dev);
330
331 return sysfs_emit(buf, "%#x\n", rt->vendor);
332 }
333 static DEVICE_ATTR_RO(vendor);
334
retimer_is_visible(struct kobject * kobj,struct attribute * attr,int n)335 static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr,
336 int n)
337 {
338 struct device *dev = kobj_to_dev(kobj);
339 struct tb_retimer *rt = tb_to_retimer(dev);
340
341 if (attr == &dev_attr_nvm_authenticate.attr ||
342 attr == &dev_attr_nvm_version.attr)
343 return rt->no_nvm_upgrade ? 0 : attr->mode;
344
345 return attr->mode;
346 }
347
348 static struct attribute *retimer_attrs[] = {
349 &dev_attr_device.attr,
350 &dev_attr_nvm_authenticate.attr,
351 &dev_attr_nvm_version.attr,
352 &dev_attr_vendor.attr,
353 NULL
354 };
355
356 static const struct attribute_group retimer_group = {
357 .is_visible = retimer_is_visible,
358 .attrs = retimer_attrs,
359 };
360
361 static const struct attribute_group *retimer_groups[] = {
362 &retimer_group,
363 NULL
364 };
365
tb_retimer_release(struct device * dev)366 static void tb_retimer_release(struct device *dev)
367 {
368 struct tb_retimer *rt = tb_to_retimer(dev);
369
370 kfree(rt);
371 }
372
373 struct device_type tb_retimer_type = {
374 .name = "thunderbolt_retimer",
375 .groups = retimer_groups,
376 .release = tb_retimer_release,
377 };
378
tb_retimer_add(struct tb_port * port,u8 index,u32 auth_status)379 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
380 {
381 struct tb_retimer *rt;
382 u32 vendor, device;
383 int ret;
384
385 ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
386 sizeof(vendor));
387 if (ret) {
388 if (ret != -ENODEV)
389 tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
390 return ret;
391 }
392
393 ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
394 sizeof(device));
395 if (ret) {
396 if (ret != -ENODEV)
397 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
398 return ret;
399 }
400
401 /*
402 * Check that it supports NVM operations. If not then don't add
403 * the device at all.
404 */
405 ret = usb4_port_retimer_nvm_sector_size(port, index);
406 if (ret < 0)
407 return ret;
408
409 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
410 if (!rt)
411 return -ENOMEM;
412
413 rt->index = index;
414 rt->vendor = vendor;
415 rt->device = device;
416 rt->auth_status = auth_status;
417 rt->port = port;
418 rt->tb = port->sw->tb;
419
420 rt->dev.parent = &port->usb4->dev;
421 rt->dev.bus = &tb_bus_type;
422 rt->dev.type = &tb_retimer_type;
423 dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
424 port->port, index);
425
426 ret = device_register(&rt->dev);
427 if (ret) {
428 dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
429 put_device(&rt->dev);
430 return ret;
431 }
432
433 ret = tb_retimer_nvm_add(rt);
434 if (ret) {
435 dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
436 device_unregister(&rt->dev);
437 return ret;
438 }
439
440 dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
441 rt->vendor, rt->device);
442
443 pm_runtime_no_callbacks(&rt->dev);
444 pm_runtime_set_active(&rt->dev);
445 pm_runtime_enable(&rt->dev);
446 pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
447 pm_runtime_mark_last_busy(&rt->dev);
448 pm_runtime_use_autosuspend(&rt->dev);
449
450 return 0;
451 }
452
tb_retimer_remove(struct tb_retimer * rt)453 static void tb_retimer_remove(struct tb_retimer *rt)
454 {
455 dev_info(&rt->dev, "retimer disconnected\n");
456 tb_nvm_free(rt->nvm);
457 device_unregister(&rt->dev);
458 }
459
460 struct tb_retimer_lookup {
461 const struct tb_port *port;
462 u8 index;
463 };
464
retimer_match(struct device * dev,void * data)465 static int retimer_match(struct device *dev, void *data)
466 {
467 const struct tb_retimer_lookup *lookup = data;
468 struct tb_retimer *rt = tb_to_retimer(dev);
469
470 return rt && rt->port == lookup->port && rt->index == lookup->index;
471 }
472
tb_port_find_retimer(struct tb_port * port,u8 index)473 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
474 {
475 struct tb_retimer_lookup lookup = { .port = port, .index = index };
476 struct device *dev;
477
478 dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
479 if (dev)
480 return tb_to_retimer(dev);
481
482 return NULL;
483 }
484
485 /**
486 * tb_retimer_scan() - Scan for on-board retimers under port
487 * @port: USB4 port to scan
488 * @add: If true also registers found retimers
489 *
490 * Brings the sideband into a state where retimers can be accessed.
491 * Then Tries to enumerate on-board retimers connected to @port. Found
492 * retimers are registered as children of @port if @add is set. Does
493 * not scan for cable retimers for now.
494 */
tb_retimer_scan(struct tb_port * port,bool add)495 int tb_retimer_scan(struct tb_port *port, bool add)
496 {
497 u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
498 int ret, i, last_idx = 0;
499
500 /*
501 * Send broadcast RT to make sure retimer indices facing this
502 * port are set.
503 */
504 ret = usb4_port_enumerate_retimers(port);
505 if (ret)
506 return ret;
507
508 /*
509 * Immediately after sending enumerate retimers read the
510 * authentication status of each retimer.
511 */
512 tb_retimer_nvm_authenticate_status(port, status);
513
514 /*
515 * Enable sideband channel for each retimer. We can do this
516 * regardless whether there is device connected or not.
517 */
518 tb_retimer_set_inbound_sbtx(port);
519
520 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
521 /*
522 * Last retimer is true only for the last on-board
523 * retimer (the one connected directly to the Type-C
524 * port).
525 */
526 ret = usb4_port_retimer_is_last(port, i);
527 if (ret > 0)
528 last_idx = i;
529 else if (ret < 0)
530 break;
531 }
532
533 tb_retimer_unset_inbound_sbtx(port);
534
535 if (!last_idx)
536 return 0;
537
538 /* Add on-board retimers if they do not exist already */
539 ret = 0;
540 for (i = 1; i <= last_idx; i++) {
541 struct tb_retimer *rt;
542
543 rt = tb_port_find_retimer(port, i);
544 if (rt) {
545 put_device(&rt->dev);
546 } else if (add) {
547 ret = tb_retimer_add(port, i, status[i]);
548 if (ret && ret != -EOPNOTSUPP)
549 break;
550 }
551 }
552
553 return ret;
554 }
555
remove_retimer(struct device * dev,void * data)556 static int remove_retimer(struct device *dev, void *data)
557 {
558 struct tb_retimer *rt = tb_to_retimer(dev);
559 struct tb_port *port = data;
560
561 if (rt && rt->port == port)
562 tb_retimer_remove(rt);
563 return 0;
564 }
565
566 /**
567 * tb_retimer_remove_all() - Remove all retimers under port
568 * @port: USB4 port whose retimers to remove
569 *
570 * This removes all previously added retimers under @port.
571 */
tb_retimer_remove_all(struct tb_port * port)572 void tb_retimer_remove_all(struct tb_port *port)
573 {
574 struct usb4_port *usb4;
575
576 usb4 = port->usb4;
577 if (usb4)
578 device_for_each_child_reverse(&usb4->dev, port,
579 remove_retimer);
580 }
581