1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVM helpers 4 * 5 * Copyright (C) 2020, Intel Corporation 6 * Author: Mika Westerberg <mika.westerberg@linux.intel.com> 7 */ 8 9 #include <linux/idr.h> 10 #include <linux/slab.h> 11 #include <linux/vmalloc.h> 12 13 #include "tb.h" 14 15 /* Intel specific NVM offsets */ 16 #define INTEL_NVM_DEVID 0x05 17 #define INTEL_NVM_VERSION 0x08 18 #define INTEL_NVM_CSS 0x10 19 #define INTEL_NVM_FLASH_SIZE 0x45 20 21 /* ASMedia specific NVM offsets */ 22 #define ASMEDIA_NVM_DATE 0x1c 23 #define ASMEDIA_NVM_VERSION 0x28 24 25 static DEFINE_IDA(nvm_ida); 26 27 /** 28 * struct tb_nvm_vendor_ops - Vendor specific NVM operations 29 * @read_version: Reads out NVM version from the flash 30 * @validate: Validates the NVM image before update (optional) 31 * @write_headers: Writes headers before the rest of the image (optional) 32 */ 33 struct tb_nvm_vendor_ops { 34 int (*read_version)(struct tb_nvm *nvm); 35 int (*validate)(struct tb_nvm *nvm); 36 int (*write_headers)(struct tb_nvm *nvm); 37 }; 38 39 /** 40 * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping 41 * @vendor: Vendor ID 42 * @vops: Vendor specific NVM operations 43 * 44 * Maps vendor ID to NVM vendor operations. If there is no mapping then 45 * NVM firmware upgrade is disabled for the device. 46 */ 47 struct tb_nvm_vendor { 48 u16 vendor; 49 const struct tb_nvm_vendor_ops *vops; 50 }; 51 52 static int intel_switch_nvm_version(struct tb_nvm *nvm) 53 { 54 struct tb_switch *sw = tb_to_switch(nvm->dev); 55 u32 val, nvm_size, hdr_size; 56 int ret; 57 58 /* 59 * If the switch is in safe-mode the only accessible portion of 60 * the NVM is the non-active one where userspace is expected to 61 * write new functional NVM. 62 */ 63 if (sw->safe_mode) 64 return 0; 65 66 ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val)); 67 if (ret) 68 return ret; 69 70 hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; 71 nvm_size = (SZ_1M << (val & 7)) / 8; 72 nvm_size = (nvm_size - hdr_size) / 2; 73 74 ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val)); 75 if (ret) 76 return ret; 77 78 nvm->major = (val >> 16) & 0xff; 79 nvm->minor = (val >> 8) & 0xff; 80 nvm->active_size = nvm_size; 81 82 return 0; 83 } 84 85 static int intel_switch_nvm_validate(struct tb_nvm *nvm) 86 { 87 struct tb_switch *sw = tb_to_switch(nvm->dev); 88 unsigned int image_size, hdr_size; 89 u16 ds_size, device_id; 90 u8 *buf = nvm->buf; 91 92 image_size = nvm->buf_data_size; 93 94 /* 95 * FARB pointer must point inside the image and must at least 96 * contain parts of the digital section we will be reading here. 97 */ 98 hdr_size = (*(u32 *)buf) & 0xffffff; 99 if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size) 100 return -EINVAL; 101 102 /* Digital section start should be aligned to 4k page */ 103 if (!IS_ALIGNED(hdr_size, SZ_4K)) 104 return -EINVAL; 105 106 /* 107 * Read digital section size and check that it also fits inside 108 * the image. 109 */ 110 ds_size = *(u16 *)(buf + hdr_size); 111 if (ds_size >= image_size) 112 return -EINVAL; 113 114 if (sw->safe_mode) 115 return 0; 116 117 /* 118 * Make sure the device ID in the image matches the one 119 * we read from the switch config space. 120 */ 121 device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID); 122 if (device_id != sw->config.device_id) 123 return -EINVAL; 124 125 /* Skip headers in the image */ 126 nvm->buf_data_start = buf + hdr_size; 127 nvm->buf_data_size = image_size - hdr_size; 128 129 return 0; 130 } 131 132 static int intel_switch_nvm_write_headers(struct tb_nvm *nvm) 133 { 134 struct tb_switch *sw = tb_to_switch(nvm->dev); 135 136 if (sw->generation < 3) { 137 int ret; 138 139 /* Write CSS headers first */ 140 ret = dma_port_flash_write(sw->dma_port, 141 DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS, 142 DMA_PORT_CSS_MAX_SIZE); 143 if (ret) 144 return ret; 145 } 146 147 return 0; 148 } 149 150 static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = { 151 .read_version = intel_switch_nvm_version, 152 .validate = intel_switch_nvm_validate, 153 .write_headers = intel_switch_nvm_write_headers, 154 }; 155 156 static int asmedia_switch_nvm_version(struct tb_nvm *nvm) 157 { 158 struct tb_switch *sw = tb_to_switch(nvm->dev); 159 u32 val; 160 int ret; 161 162 ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val)); 163 if (ret) 164 return ret; 165 166 nvm->major = (val << 16) & 0xff0000; 167 nvm->major |= val & 0x00ff00; 168 nvm->major |= (val >> 16) & 0x0000ff; 169 170 ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val)); 171 if (ret) 172 return ret; 173 174 nvm->minor = (val << 16) & 0xff0000; 175 nvm->minor |= val & 0x00ff00; 176 nvm->minor |= (val >> 16) & 0x0000ff; 177 178 /* ASMedia NVM size is fixed to 512k */ 179 nvm->active_size = SZ_512K; 180 181 return 0; 182 } 183 184 static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = { 185 .read_version = asmedia_switch_nvm_version, 186 }; 187 188 /* Router vendor NVM support table */ 189 static const struct tb_nvm_vendor switch_nvm_vendors[] = { 190 { 0x174c, &asmedia_switch_nvm_ops }, 191 { PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops }, 192 { 0x8087, &intel_switch_nvm_ops }, 193 }; 194 195 static int intel_retimer_nvm_version(struct tb_nvm *nvm) 196 { 197 struct tb_retimer *rt = tb_to_retimer(nvm->dev); 198 u32 val, nvm_size; 199 int ret; 200 201 ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val)); 202 if (ret) 203 return ret; 204 205 nvm->major = (val >> 16) & 0xff; 206 nvm->minor = (val >> 8) & 0xff; 207 208 ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val)); 209 if (ret) 210 return ret; 211 212 nvm_size = (SZ_1M << (val & 7)) / 8; 213 nvm_size = (nvm_size - SZ_16K) / 2; 214 nvm->active_size = nvm_size; 215 216 return 0; 217 } 218 219 static int intel_retimer_nvm_validate(struct tb_nvm *nvm) 220 { 221 struct tb_retimer *rt = tb_to_retimer(nvm->dev); 222 unsigned int image_size, hdr_size; 223 u8 *buf = nvm->buf; 224 u16 ds_size, device; 225 226 image_size = nvm->buf_data_size; 227 228 /* 229 * FARB pointer must point inside the image and must at least 230 * contain parts of the digital section we will be reading here. 231 */ 232 hdr_size = (*(u32 *)buf) & 0xffffff; 233 if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size) 234 return -EINVAL; 235 236 /* Digital section start should be aligned to 4k page */ 237 if (!IS_ALIGNED(hdr_size, SZ_4K)) 238 return -EINVAL; 239 240 /* 241 * Read digital section size and check that it also fits inside 242 * the image. 243 */ 244 ds_size = *(u16 *)(buf + hdr_size); 245 if (ds_size >= image_size) 246 return -EINVAL; 247 248 /* 249 * Make sure the device ID in the image matches the retimer 250 * hardware. 251 */ 252 device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID); 253 if (device != rt->device) 254 return -EINVAL; 255 256 /* Skip headers in the image */ 257 nvm->buf_data_start = buf + hdr_size; 258 nvm->buf_data_size = image_size - hdr_size; 259 260 return 0; 261 } 262 263 static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = { 264 .read_version = intel_retimer_nvm_version, 265 .validate = intel_retimer_nvm_validate, 266 }; 267 268 /* Retimer vendor NVM support table */ 269 static const struct tb_nvm_vendor retimer_nvm_vendors[] = { 270 { 0x8087, &intel_retimer_nvm_ops }, 271 }; 272 273 /** 274 * tb_nvm_alloc() - Allocate new NVM structure 275 * @dev: Device owning the NVM 276 * 277 * Allocates new NVM structure with unique @id and returns it. In case 278 * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the 279 * NVM format of the @dev is not known by the kernel. 280 */ 281 struct tb_nvm *tb_nvm_alloc(struct device *dev) 282 { 283 const struct tb_nvm_vendor_ops *vops = NULL; 284 struct tb_nvm *nvm; 285 int ret, i; 286 287 if (tb_is_switch(dev)) { 288 const struct tb_switch *sw = tb_to_switch(dev); 289 290 for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) { 291 const struct tb_nvm_vendor *v = &switch_nvm_vendors[i]; 292 293 if (v->vendor == sw->config.vendor_id) { 294 vops = v->vops; 295 break; 296 } 297 } 298 299 if (!vops) { 300 tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n", 301 sw->config.vendor_id); 302 return ERR_PTR(-EOPNOTSUPP); 303 } 304 } else if (tb_is_retimer(dev)) { 305 const struct tb_retimer *rt = tb_to_retimer(dev); 306 307 for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) { 308 const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i]; 309 310 if (v->vendor == rt->vendor) { 311 vops = v->vops; 312 break; 313 } 314 } 315 316 if (!vops) { 317 dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n", 318 rt->vendor); 319 return ERR_PTR(-EOPNOTSUPP); 320 } 321 } else { 322 return ERR_PTR(-EOPNOTSUPP); 323 } 324 325 nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); 326 if (!nvm) 327 return ERR_PTR(-ENOMEM); 328 329 ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); 330 if (ret < 0) { 331 kfree(nvm); 332 return ERR_PTR(ret); 333 } 334 335 nvm->id = ret; 336 nvm->dev = dev; 337 nvm->vops = vops; 338 339 return nvm; 340 } 341 342 /** 343 * tb_nvm_read_version() - Read and populate NVM version 344 * @nvm: NVM structure 345 * 346 * Uses vendor specific means to read out and fill in the existing 347 * active NVM version. Returns %0 in case of success and negative errno 348 * otherwise. 349 */ 350 int tb_nvm_read_version(struct tb_nvm *nvm) 351 { 352 const struct tb_nvm_vendor_ops *vops = nvm->vops; 353 354 if (vops && vops->read_version) 355 return vops->read_version(nvm); 356 357 return -EOPNOTSUPP; 358 } 359 360 /** 361 * tb_nvm_validate() - Validate new NVM image 362 * @nvm: NVM structure 363 * 364 * Runs vendor specific validation over the new NVM image and if all 365 * checks pass returns %0. As side effect updates @nvm->buf_data_start 366 * and @nvm->buf_data_size fields to match the actual data to be written 367 * to the NVM. 368 * 369 * If the validation does not pass then returns negative errno. 370 */ 371 int tb_nvm_validate(struct tb_nvm *nvm) 372 { 373 const struct tb_nvm_vendor_ops *vops = nvm->vops; 374 unsigned int image_size; 375 u8 *buf = nvm->buf; 376 377 if (!buf) 378 return -EINVAL; 379 if (!vops) 380 return -EOPNOTSUPP; 381 382 /* Just do basic image size checks */ 383 image_size = nvm->buf_data_size; 384 if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) 385 return -EINVAL; 386 387 /* 388 * Set the default data start in the buffer. The validate method 389 * below can change this if needed. 390 */ 391 nvm->buf_data_start = buf; 392 393 return vops->validate ? vops->validate(nvm) : 0; 394 } 395 396 /** 397 * tb_nvm_write_headers() - Write headers before the rest of the image 398 * @nvm: NVM structure 399 * 400 * If the vendor NVM format requires writing headers before the rest of 401 * the image, this function does that. Can be called even if the device 402 * does not need this. 403 * 404 * Returns %0 in case of success and negative errno otherwise. 405 */ 406 int tb_nvm_write_headers(struct tb_nvm *nvm) 407 { 408 const struct tb_nvm_vendor_ops *vops = nvm->vops; 409 410 return vops->write_headers ? vops->write_headers(nvm) : 0; 411 } 412 413 /** 414 * tb_nvm_add_active() - Adds active NVMem device to NVM 415 * @nvm: NVM structure 416 * @reg_read: Pointer to the function to read the NVM (passed directly to the 417 * NVMem device) 418 * 419 * Registers new active NVmem device for @nvm. The @reg_read is called 420 * directly from NVMem so it must handle possible concurrent access if 421 * needed. The first parameter passed to @reg_read is @nvm structure. 422 * Returns %0 in success and negative errno otherwise. 423 */ 424 int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read) 425 { 426 struct nvmem_config config; 427 struct nvmem_device *nvmem; 428 429 memset(&config, 0, sizeof(config)); 430 431 config.name = "nvm_active"; 432 config.reg_read = reg_read; 433 config.read_only = true; 434 config.id = nvm->id; 435 config.stride = 4; 436 config.word_size = 4; 437 config.size = nvm->active_size; 438 config.dev = nvm->dev; 439 config.owner = THIS_MODULE; 440 config.priv = nvm; 441 442 nvmem = nvmem_register(&config); 443 if (IS_ERR(nvmem)) 444 return PTR_ERR(nvmem); 445 446 nvm->active = nvmem; 447 return 0; 448 } 449 450 /** 451 * tb_nvm_write_buf() - Write data to @nvm buffer 452 * @nvm: NVM structure 453 * @offset: Offset where to write the data 454 * @val: Data buffer to write 455 * @bytes: Number of bytes to write 456 * 457 * Helper function to cache the new NVM image before it is actually 458 * written to the flash. Copies @bytes from @val to @nvm->buf starting 459 * from @offset. 460 */ 461 int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, 462 size_t bytes) 463 { 464 if (!nvm->buf) { 465 nvm->buf = vmalloc(NVM_MAX_SIZE); 466 if (!nvm->buf) 467 return -ENOMEM; 468 } 469 470 nvm->flushed = false; 471 nvm->buf_data_size = offset + bytes; 472 memcpy(nvm->buf + offset, val, bytes); 473 return 0; 474 } 475 476 /** 477 * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM 478 * @nvm: NVM structure 479 * @reg_write: Pointer to the function to write the NVM (passed directly 480 * to the NVMem device) 481 * 482 * Registers new non-active NVmem device for @nvm. The @reg_write is called 483 * directly from NVMem so it must handle possible concurrent access if 484 * needed. The first parameter passed to @reg_write is @nvm structure. 485 * The size of the NVMem device is set to %NVM_MAX_SIZE. 486 * 487 * Returns %0 in success and negative errno otherwise. 488 */ 489 int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write) 490 { 491 struct nvmem_config config; 492 struct nvmem_device *nvmem; 493 494 memset(&config, 0, sizeof(config)); 495 496 config.name = "nvm_non_active"; 497 config.reg_write = reg_write; 498 config.root_only = true; 499 config.id = nvm->id; 500 config.stride = 4; 501 config.word_size = 4; 502 config.size = NVM_MAX_SIZE; 503 config.dev = nvm->dev; 504 config.owner = THIS_MODULE; 505 config.priv = nvm; 506 507 nvmem = nvmem_register(&config); 508 if (IS_ERR(nvmem)) 509 return PTR_ERR(nvmem); 510 511 nvm->non_active = nvmem; 512 return 0; 513 } 514 515 /** 516 * tb_nvm_free() - Release NVM and its resources 517 * @nvm: NVM structure to release 518 * 519 * Releases NVM and the NVMem devices if they were registered. 520 */ 521 void tb_nvm_free(struct tb_nvm *nvm) 522 { 523 if (nvm) { 524 nvmem_unregister(nvm->non_active); 525 nvmem_unregister(nvm->active); 526 vfree(nvm->buf); 527 ida_simple_remove(&nvm_ida, nvm->id); 528 } 529 kfree(nvm); 530 } 531 532 /** 533 * tb_nvm_read_data() - Read data from NVM 534 * @address: Start address on the flash 535 * @buf: Buffer where the read data is copied 536 * @size: Size of the buffer in bytes 537 * @retries: Number of retries if block read fails 538 * @read_block: Function that reads block from the flash 539 * @read_block_data: Data passsed to @read_block 540 * 541 * This is a generic function that reads data from NVM or NVM like 542 * device. 543 * 544 * Returns %0 on success and negative errno otherwise. 545 */ 546 int tb_nvm_read_data(unsigned int address, void *buf, size_t size, 547 unsigned int retries, read_block_fn read_block, 548 void *read_block_data) 549 { 550 do { 551 unsigned int dwaddress, dwords, offset; 552 u8 data[NVM_DATA_DWORDS * 4]; 553 size_t nbytes; 554 int ret; 555 556 offset = address & 3; 557 nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4); 558 559 dwaddress = address / 4; 560 dwords = ALIGN(nbytes, 4) / 4; 561 562 ret = read_block(read_block_data, dwaddress, data, dwords); 563 if (ret) { 564 if (ret != -ENODEV && retries--) 565 continue; 566 return ret; 567 } 568 569 nbytes -= offset; 570 memcpy(buf, data + offset, nbytes); 571 572 size -= nbytes; 573 address += nbytes; 574 buf += nbytes; 575 } while (size > 0); 576 577 return 0; 578 } 579 580 /** 581 * tb_nvm_write_data() - Write data to NVM 582 * @address: Start address on the flash 583 * @buf: Buffer where the data is copied from 584 * @size: Size of the buffer in bytes 585 * @retries: Number of retries if the block write fails 586 * @write_block: Function that writes block to the flash 587 * @write_block_data: Data passwd to @write_block 588 * 589 * This is generic function that writes data to NVM or NVM like device. 590 * 591 * Returns %0 on success and negative errno otherwise. 592 */ 593 int tb_nvm_write_data(unsigned int address, const void *buf, size_t size, 594 unsigned int retries, write_block_fn write_block, 595 void *write_block_data) 596 { 597 do { 598 unsigned int offset, dwaddress; 599 u8 data[NVM_DATA_DWORDS * 4]; 600 size_t nbytes; 601 int ret; 602 603 offset = address & 3; 604 nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4); 605 606 memcpy(data + offset, buf, nbytes); 607 608 dwaddress = address / 4; 609 ret = write_block(write_block_data, dwaddress, data, nbytes / 4); 610 if (ret) { 611 if (ret == -ETIMEDOUT) { 612 if (retries--) 613 continue; 614 ret = -EIO; 615 } 616 return ret; 617 } 618 619 size -= nbytes; 620 address += nbytes; 621 buf += nbytes; 622 } while (size > 0); 623 624 return 0; 625 } 626 627 void tb_nvm_exit(void) 628 { 629 ida_destroy(&nvm_ida); 630 } 631