1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Thunderbolt/USB4 retimer support. 4 * 5 * Copyright (C) 2020, Intel Corporation 6 * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com> 8 */ 9 10 #include <linux/delay.h> 11 #include <linux/pm_runtime.h> 12 #include <linux/sched/signal.h> 13 14 #include "sb_regs.h" 15 #include "tb.h" 16 17 #define TB_MAX_RETIMER_INDEX 6 18 19 /** 20 * tb_retimer_nvm_read() - Read contents of retimer NVM 21 * @rt: Retimer device 22 * @address: NVM address (in bytes) to start reading 23 * @buf: Data read from NVM is stored here 24 * @size: Number of bytes to read 25 * 26 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the 27 * read was successful and negative errno in case of failure. 28 */ 29 int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, 30 size_t size) 31 { 32 return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size); 33 } 34 35 static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) 36 { 37 struct tb_nvm *nvm = priv; 38 struct tb_retimer *rt = tb_to_retimer(nvm->dev); 39 int ret; 40 41 pm_runtime_get_sync(&rt->dev); 42 43 if (!mutex_trylock(&rt->tb->lock)) { 44 ret = restart_syscall(); 45 goto out; 46 } 47 48 ret = tb_retimer_nvm_read(rt, offset, val, bytes); 49 mutex_unlock(&rt->tb->lock); 50 51 out: 52 pm_runtime_mark_last_busy(&rt->dev); 53 pm_runtime_put_autosuspend(&rt->dev); 54 55 return ret; 56 } 57 58 static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) 59 { 60 struct tb_nvm *nvm = priv; 61 struct tb_retimer *rt = tb_to_retimer(nvm->dev); 62 int ret = 0; 63 64 if (!mutex_trylock(&rt->tb->lock)) 65 return restart_syscall(); 66 67 ret = tb_nvm_write_buf(nvm, offset, val, bytes); 68 mutex_unlock(&rt->tb->lock); 69 70 return ret; 71 } 72 73 static int tb_retimer_nvm_add(struct tb_retimer *rt) 74 { 75 struct tb_nvm *nvm; 76 int ret; 77 78 nvm = tb_nvm_alloc(&rt->dev); 79 if (IS_ERR(nvm)) { 80 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); 81 goto err_nvm; 82 } 83 84 ret = tb_nvm_read_version(nvm); 85 if (ret) 86 goto err_nvm; 87 88 ret = tb_nvm_add_active(nvm, nvm_read); 89 if (ret) 90 goto err_nvm; 91 92 ret = tb_nvm_add_non_active(nvm, nvm_write); 93 if (ret) 94 goto err_nvm; 95 96 rt->nvm = nvm; 97 return 0; 98 99 err_nvm: 100 dev_dbg(&rt->dev, "NVM upgrade disabled\n"); 101 if (!IS_ERR(nvm)) 102 tb_nvm_free(nvm); 103 104 return ret; 105 } 106 107 static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt) 108 { 109 unsigned int image_size; 110 const u8 *buf; 111 int ret; 112 113 ret = tb_nvm_validate(rt->nvm); 114 if (ret) 115 return ret; 116 117 buf = rt->nvm->buf_data_start; 118 image_size = rt->nvm->buf_data_size; 119 120 ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf, 121 image_size); 122 if (ret) 123 return ret; 124 125 rt->nvm->flushed = true; 126 return 0; 127 } 128 129 static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only) 130 { 131 u32 status; 132 int ret; 133 134 if (auth_only) { 135 ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0); 136 if (ret) 137 return ret; 138 } 139 140 ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index); 141 if (ret) 142 return ret; 143 144 usleep_range(100, 150); 145 146 /* 147 * Check the status now if we still can access the retimer. It 148 * is expected that the below fails. 149 */ 150 ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index, 151 &status); 152 if (!ret) { 153 rt->auth_status = status; 154 return status ? -EINVAL : 0; 155 } 156 157 return 0; 158 } 159 160 static ssize_t device_show(struct device *dev, struct device_attribute *attr, 161 char *buf) 162 { 163 struct tb_retimer *rt = tb_to_retimer(dev); 164 165 return sysfs_emit(buf, "%#x\n", rt->device); 166 } 167 static DEVICE_ATTR_RO(device); 168 169 static ssize_t nvm_authenticate_show(struct device *dev, 170 struct device_attribute *attr, char *buf) 171 { 172 struct tb_retimer *rt = tb_to_retimer(dev); 173 int ret; 174 175 if (!mutex_trylock(&rt->tb->lock)) 176 return restart_syscall(); 177 178 if (!rt->nvm) 179 ret = -EAGAIN; 180 else if (rt->no_nvm_upgrade) 181 ret = -EOPNOTSUPP; 182 else 183 ret = sysfs_emit(buf, "%#x\n", rt->auth_status); 184 185 mutex_unlock(&rt->tb->lock); 186 187 return ret; 188 } 189 190 static void tb_retimer_set_inbound_sbtx(struct tb_port *port) 191 { 192 int i; 193 194 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) 195 usb4_port_retimer_set_inbound_sbtx(port, i); 196 } 197 198 static void tb_retimer_unset_inbound_sbtx(struct tb_port *port) 199 { 200 int i; 201 202 for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) 203 usb4_port_retimer_unset_inbound_sbtx(port, i); 204 } 205 206 static ssize_t nvm_authenticate_store(struct device *dev, 207 struct device_attribute *attr, const char *buf, size_t count) 208 { 209 struct tb_retimer *rt = tb_to_retimer(dev); 210 int val, ret; 211 212 pm_runtime_get_sync(&rt->dev); 213 214 if (!mutex_trylock(&rt->tb->lock)) { 215 ret = restart_syscall(); 216 goto exit_rpm; 217 } 218 219 if (!rt->nvm) { 220 ret = -EAGAIN; 221 goto exit_unlock; 222 } 223 224 ret = kstrtoint(buf, 10, &val); 225 if (ret) 226 goto exit_unlock; 227 228 /* Always clear status */ 229 rt->auth_status = 0; 230 231 if (val) { 232 tb_retimer_set_inbound_sbtx(rt->port); 233 if (val == AUTHENTICATE_ONLY) { 234 ret = tb_retimer_nvm_authenticate(rt, true); 235 } else { 236 if (!rt->nvm->flushed) { 237 if (!rt->nvm->buf) { 238 ret = -EINVAL; 239 goto exit_unlock; 240 } 241 242 ret = tb_retimer_nvm_validate_and_write(rt); 243 if (ret || val == WRITE_ONLY) 244 goto exit_unlock; 245 } 246 if (val == WRITE_AND_AUTHENTICATE) 247 ret = tb_retimer_nvm_authenticate(rt, false); 248 } 249 } 250 251 exit_unlock: 252 tb_retimer_unset_inbound_sbtx(rt->port); 253 mutex_unlock(&rt->tb->lock); 254 exit_rpm: 255 pm_runtime_mark_last_busy(&rt->dev); 256 pm_runtime_put_autosuspend(&rt->dev); 257 258 if (ret) 259 return ret; 260 return count; 261 } 262 static DEVICE_ATTR_RW(nvm_authenticate); 263 264 static ssize_t nvm_version_show(struct device *dev, 265 struct device_attribute *attr, char *buf) 266 { 267 struct tb_retimer *rt = tb_to_retimer(dev); 268 int ret; 269 270 if (!mutex_trylock(&rt->tb->lock)) 271 return restart_syscall(); 272 273 if (!rt->nvm) 274 ret = -EAGAIN; 275 else 276 ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor); 277 278 mutex_unlock(&rt->tb->lock); 279 return ret; 280 } 281 static DEVICE_ATTR_RO(nvm_version); 282 283 static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, 284 char *buf) 285 { 286 struct tb_retimer *rt = tb_to_retimer(dev); 287 288 return sysfs_emit(buf, "%#x\n", rt->vendor); 289 } 290 static DEVICE_ATTR_RO(vendor); 291 292 static struct attribute *retimer_attrs[] = { 293 &dev_attr_device.attr, 294 &dev_attr_nvm_authenticate.attr, 295 &dev_attr_nvm_version.attr, 296 &dev_attr_vendor.attr, 297 NULL 298 }; 299 300 static const struct attribute_group retimer_group = { 301 .attrs = retimer_attrs, 302 }; 303 304 static const struct attribute_group *retimer_groups[] = { 305 &retimer_group, 306 NULL 307 }; 308 309 static void tb_retimer_release(struct device *dev) 310 { 311 struct tb_retimer *rt = tb_to_retimer(dev); 312 313 kfree(rt); 314 } 315 316 struct device_type tb_retimer_type = { 317 .name = "thunderbolt_retimer", 318 .groups = retimer_groups, 319 .release = tb_retimer_release, 320 }; 321 322 static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) 323 { 324 struct tb_retimer *rt; 325 u32 vendor, device; 326 int ret; 327 328 ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor, 329 sizeof(vendor)); 330 if (ret) { 331 if (ret != -ENODEV) 332 tb_port_warn(port, "failed read retimer VendorId: %d\n", ret); 333 return ret; 334 } 335 336 ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device, 337 sizeof(device)); 338 if (ret) { 339 if (ret != -ENODEV) 340 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret); 341 return ret; 342 } 343 344 if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) { 345 tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n", 346 vendor); 347 return -EOPNOTSUPP; 348 } 349 350 /* 351 * Check that it supports NVM operations. If not then don't add 352 * the device at all. 353 */ 354 ret = usb4_port_retimer_nvm_sector_size(port, index); 355 if (ret < 0) 356 return ret; 357 358 rt = kzalloc(sizeof(*rt), GFP_KERNEL); 359 if (!rt) 360 return -ENOMEM; 361 362 rt->index = index; 363 rt->vendor = vendor; 364 rt->device = device; 365 rt->auth_status = auth_status; 366 rt->port = port; 367 rt->tb = port->sw->tb; 368 369 rt->dev.parent = &port->usb4->dev; 370 rt->dev.bus = &tb_bus_type; 371 rt->dev.type = &tb_retimer_type; 372 dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev), 373 port->port, index); 374 375 ret = device_register(&rt->dev); 376 if (ret) { 377 dev_err(&rt->dev, "failed to register retimer: %d\n", ret); 378 put_device(&rt->dev); 379 return ret; 380 } 381 382 ret = tb_retimer_nvm_add(rt); 383 if (ret) { 384 dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); 385 device_unregister(&rt->dev); 386 return ret; 387 } 388 389 dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n", 390 rt->vendor, rt->device); 391 392 pm_runtime_no_callbacks(&rt->dev); 393 pm_runtime_set_active(&rt->dev); 394 pm_runtime_enable(&rt->dev); 395 pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY); 396 pm_runtime_mark_last_busy(&rt->dev); 397 pm_runtime_use_autosuspend(&rt->dev); 398 399 return 0; 400 } 401 402 static void tb_retimer_remove(struct tb_retimer *rt) 403 { 404 dev_info(&rt->dev, "retimer disconnected\n"); 405 tb_nvm_free(rt->nvm); 406 device_unregister(&rt->dev); 407 } 408 409 struct tb_retimer_lookup { 410 const struct tb_port *port; 411 u8 index; 412 }; 413 414 static int retimer_match(struct device *dev, void *data) 415 { 416 const struct tb_retimer_lookup *lookup = data; 417 struct tb_retimer *rt = tb_to_retimer(dev); 418 419 return rt && rt->port == lookup->port && rt->index == lookup->index; 420 } 421 422 static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) 423 { 424 struct tb_retimer_lookup lookup = { .port = port, .index = index }; 425 struct device *dev; 426 427 dev = device_find_child(&port->usb4->dev, &lookup, retimer_match); 428 if (dev) 429 return tb_to_retimer(dev); 430 431 return NULL; 432 } 433 434 /** 435 * tb_retimer_scan() - Scan for on-board retimers under port 436 * @port: USB4 port to scan 437 * @add: If true also registers found retimers 438 * 439 * Brings the sideband into a state where retimers can be accessed. 440 * Then Tries to enumerate on-board retimers connected to @port. Found 441 * retimers are registered as children of @port if @add is set. Does 442 * not scan for cable retimers for now. 443 */ 444 int tb_retimer_scan(struct tb_port *port, bool add) 445 { 446 u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; 447 int ret, i, last_idx = 0; 448 449 /* 450 * Send broadcast RT to make sure retimer indices facing this 451 * port are set. 452 */ 453 ret = usb4_port_enumerate_retimers(port); 454 if (ret) 455 return ret; 456 457 /* 458 * Enable sideband channel for each retimer. We can do this 459 * regardless whether there is device connected or not. 460 */ 461 tb_retimer_set_inbound_sbtx(port); 462 463 /* 464 * Before doing anything else, read the authentication status. 465 * If the retimer has it set, store it for the new retimer 466 * device instance. 467 */ 468 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) 469 usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); 470 471 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { 472 /* 473 * Last retimer is true only for the last on-board 474 * retimer (the one connected directly to the Type-C 475 * port). 476 */ 477 ret = usb4_port_retimer_is_last(port, i); 478 if (ret > 0) 479 last_idx = i; 480 else if (ret < 0) 481 break; 482 } 483 484 tb_retimer_unset_inbound_sbtx(port); 485 486 if (!last_idx) 487 return 0; 488 489 /* Add on-board retimers if they do not exist already */ 490 ret = 0; 491 for (i = 1; i <= last_idx; i++) { 492 struct tb_retimer *rt; 493 494 rt = tb_port_find_retimer(port, i); 495 if (rt) { 496 put_device(&rt->dev); 497 } else if (add) { 498 ret = tb_retimer_add(port, i, status[i]); 499 if (ret && ret != -EOPNOTSUPP) 500 break; 501 } 502 } 503 504 return ret; 505 } 506 507 static int remove_retimer(struct device *dev, void *data) 508 { 509 struct tb_retimer *rt = tb_to_retimer(dev); 510 struct tb_port *port = data; 511 512 if (rt && rt->port == port) 513 tb_retimer_remove(rt); 514 return 0; 515 } 516 517 /** 518 * tb_retimer_remove_all() - Remove all retimers under port 519 * @port: USB4 port whose retimers to remove 520 * 521 * This removes all previously added retimers under @port. 522 */ 523 void tb_retimer_remove_all(struct tb_port *port) 524 { 525 struct usb4_port *usb4; 526 527 usb4 = port->usb4; 528 if (usb4) 529 device_for_each_child_reverse(&usb4->dev, port, 530 remove_retimer); 531 } 532