1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2 /* 3 * Copyright(c) 2015 - 2017 Intel Corporation. 4 */ 5 6 #include <linux/firmware.h> 7 #include <linux/mutex.h> 8 #include <linux/delay.h> 9 #include <linux/crc32.h> 10 11 #include "hfi.h" 12 #include "trace.h" 13 14 /* 15 * Make it easy to toggle firmware file name and if it gets loaded by 16 * editing the following. This may be something we do while in development 17 * but not necessarily something a user would ever need to use. 18 */ 19 #define DEFAULT_FW_8051_NAME_FPGA "hfi_dc8051.bin" 20 #define DEFAULT_FW_8051_NAME_ASIC "hfi1_dc8051.fw" 21 #define DEFAULT_FW_FABRIC_NAME "hfi1_fabric.fw" 22 #define DEFAULT_FW_SBUS_NAME "hfi1_sbus.fw" 23 #define DEFAULT_FW_PCIE_NAME "hfi1_pcie.fw" 24 #define ALT_FW_8051_NAME_ASIC "hfi1_dc8051_d.fw" 25 #define ALT_FW_FABRIC_NAME "hfi1_fabric_d.fw" 26 #define ALT_FW_SBUS_NAME "hfi1_sbus_d.fw" 27 #define ALT_FW_PCIE_NAME "hfi1_pcie_d.fw" 28 29 MODULE_FIRMWARE(DEFAULT_FW_8051_NAME_ASIC); 30 MODULE_FIRMWARE(DEFAULT_FW_FABRIC_NAME); 31 MODULE_FIRMWARE(DEFAULT_FW_SBUS_NAME); 32 MODULE_FIRMWARE(DEFAULT_FW_PCIE_NAME); 33 34 static uint fw_8051_load = 1; 35 static uint fw_fabric_serdes_load = 1; 36 static uint fw_pcie_serdes_load = 1; 37 static uint fw_sbus_load = 1; 38 39 /* Firmware file names get set in hfi1_firmware_init() based on the above */ 40 static char *fw_8051_name; 41 static char *fw_fabric_serdes_name; 42 static char *fw_sbus_name; 43 static char *fw_pcie_serdes_name; 44 45 #define SBUS_MAX_POLL_COUNT 100 46 #define SBUS_COUNTER(reg, name) \ 47 (((reg) >> ASIC_STS_SBUS_COUNTERS_##name##_CNT_SHIFT) & \ 48 ASIC_STS_SBUS_COUNTERS_##name##_CNT_MASK) 49 50 /* 51 * Firmware security header. 52 */ 53 struct css_header { 54 u32 module_type; 55 u32 header_len; 56 u32 header_version; 57 u32 module_id; 58 u32 module_vendor; 59 u32 date; /* BCD yyyymmdd */ 60 u32 size; /* in DWORDs */ 61 u32 key_size; /* in DWORDs */ 62 u32 modulus_size; /* in DWORDs */ 63 u32 exponent_size; /* in DWORDs */ 64 u32 reserved[22]; 65 }; 66 67 /* expected field values */ 68 #define CSS_MODULE_TYPE 0x00000006 69 #define CSS_HEADER_LEN 0x000000a1 70 #define CSS_HEADER_VERSION 0x00010000 71 #define CSS_MODULE_VENDOR 0x00008086 72 73 #define KEY_SIZE 256 74 #define MU_SIZE 8 75 #define EXPONENT_SIZE 4 76 77 /* size of platform configuration partition */ 78 #define MAX_PLATFORM_CONFIG_FILE_SIZE 4096 79 80 /* size of file of plaform configuration encoded in format version 4 */ 81 #define PLATFORM_CONFIG_FORMAT_4_FILE_SIZE 528 82 83 /* the file itself */ 84 struct firmware_file { 85 struct css_header css_header; 86 u8 modulus[KEY_SIZE]; 87 u8 exponent[EXPONENT_SIZE]; 88 u8 signature[KEY_SIZE]; 89 u8 firmware[]; 90 }; 91 92 struct augmented_firmware_file { 93 struct css_header css_header; 94 u8 modulus[KEY_SIZE]; 95 u8 exponent[EXPONENT_SIZE]; 96 u8 signature[KEY_SIZE]; 97 u8 r2[KEY_SIZE]; 98 u8 mu[MU_SIZE]; 99 u8 firmware[]; 100 }; 101 102 /* augmented file size difference */ 103 #define AUGMENT_SIZE (sizeof(struct augmented_firmware_file) - \ 104 sizeof(struct firmware_file)) 105 106 struct firmware_details { 107 /* Linux core piece */ 108 const struct firmware *fw; 109 110 struct css_header *css_header; 111 u8 *firmware_ptr; /* pointer to binary data */ 112 u32 firmware_len; /* length in bytes */ 113 u8 *modulus; /* pointer to the modulus */ 114 u8 *exponent; /* pointer to the exponent */ 115 u8 *signature; /* pointer to the signature */ 116 u8 *r2; /* pointer to r2 */ 117 u8 *mu; /* pointer to mu */ 118 struct augmented_firmware_file dummy_header; 119 }; 120 121 /* 122 * The mutex protects fw_state, fw_err, and all of the firmware_details 123 * variables. 124 */ 125 static DEFINE_MUTEX(fw_mutex); 126 enum fw_state { 127 FW_EMPTY, 128 FW_TRY, 129 FW_FINAL, 130 FW_ERR 131 }; 132 133 static enum fw_state fw_state = FW_EMPTY; 134 static int fw_err; 135 static struct firmware_details fw_8051; 136 static struct firmware_details fw_fabric; 137 static struct firmware_details fw_pcie; 138 static struct firmware_details fw_sbus; 139 140 /* flags for turn_off_spicos() */ 141 #define SPICO_SBUS 0x1 142 #define SPICO_FABRIC 0x2 143 #define ENABLE_SPICO_SMASK 0x1 144 145 /* security block commands */ 146 #define RSA_CMD_INIT 0x1 147 #define RSA_CMD_START 0x2 148 149 /* security block status */ 150 #define RSA_STATUS_IDLE 0x0 151 #define RSA_STATUS_ACTIVE 0x1 152 #define RSA_STATUS_DONE 0x2 153 #define RSA_STATUS_FAILED 0x3 154 155 /* RSA engine timeout, in ms */ 156 #define RSA_ENGINE_TIMEOUT 100 /* ms */ 157 158 /* hardware mutex timeout, in ms */ 159 #define HM_TIMEOUT 10 /* ms */ 160 161 /* 8051 memory access timeout, in us */ 162 #define DC8051_ACCESS_TIMEOUT 100 /* us */ 163 164 /* the number of fabric SerDes on the SBus */ 165 #define NUM_FABRIC_SERDES 4 166 167 /* ASIC_STS_SBUS_RESULT.RESULT_CODE value */ 168 #define SBUS_READ_COMPLETE 0x4 169 170 /* SBus fabric SerDes addresses, one set per HFI */ 171 static const u8 fabric_serdes_addrs[2][NUM_FABRIC_SERDES] = { 172 { 0x01, 0x02, 0x03, 0x04 }, 173 { 0x28, 0x29, 0x2a, 0x2b } 174 }; 175 176 /* SBus PCIe SerDes addresses, one set per HFI */ 177 static const u8 pcie_serdes_addrs[2][NUM_PCIE_SERDES] = { 178 { 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 179 0x18, 0x1a, 0x1c, 0x1e, 0x20, 0x22, 0x24, 0x26 }, 180 { 0x2f, 0x31, 0x33, 0x35, 0x37, 0x39, 0x3b, 0x3d, 181 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d } 182 }; 183 184 /* SBus PCIe PCS addresses, one set per HFI */ 185 const u8 pcie_pcs_addrs[2][NUM_PCIE_SERDES] = { 186 { 0x09, 0x0b, 0x0d, 0x0f, 0x11, 0x13, 0x15, 0x17, 187 0x19, 0x1b, 0x1d, 0x1f, 0x21, 0x23, 0x25, 0x27 }, 188 { 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e, 189 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e } 190 }; 191 192 /* SBus fabric SerDes broadcast addresses, one per HFI */ 193 static const u8 fabric_serdes_broadcast[2] = { 0xe4, 0xe5 }; 194 static const u8 all_fabric_serdes_broadcast = 0xe1; 195 196 /* SBus PCIe SerDes broadcast addresses, one per HFI */ 197 const u8 pcie_serdes_broadcast[2] = { 0xe2, 0xe3 }; 198 static const u8 all_pcie_serdes_broadcast = 0xe0; 199 200 static const u32 platform_config_table_limits[PLATFORM_CONFIG_TABLE_MAX] = { 201 0, 202 SYSTEM_TABLE_MAX, 203 PORT_TABLE_MAX, 204 RX_PRESET_TABLE_MAX, 205 TX_PRESET_TABLE_MAX, 206 QSFP_ATTEN_TABLE_MAX, 207 VARIABLE_SETTINGS_TABLE_MAX 208 }; 209 210 /* forwards */ 211 static void dispose_one_firmware(struct firmware_details *fdet); 212 static int load_fabric_serdes_firmware(struct hfi1_devdata *dd, 213 struct firmware_details *fdet); 214 static void dump_fw_version(struct hfi1_devdata *dd); 215 216 /* 217 * Read a single 64-bit value from 8051 data memory. 218 * 219 * Expects: 220 * o caller to have already set up data read, no auto increment 221 * o caller to turn off read enable when finished 222 * 223 * The address argument is a byte offset. Bits 0:2 in the address are 224 * ignored - i.e. the hardware will always do aligned 8-byte reads as if 225 * the lower bits are zero. 226 * 227 * Return 0 on success, -ENXIO on a read error (timeout). 228 */ 229 static int __read_8051_data(struct hfi1_devdata *dd, u32 addr, u64 *result) 230 { 231 u64 reg; 232 int count; 233 234 /* step 1: set the address, clear enable */ 235 reg = (addr & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK) 236 << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT; 237 write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg); 238 /* step 2: enable */ 239 write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 240 reg | DC_DC8051_CFG_RAM_ACCESS_CTRL_READ_ENA_SMASK); 241 242 /* wait until ACCESS_COMPLETED is set */ 243 count = 0; 244 while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS) 245 & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK) 246 == 0) { 247 count++; 248 if (count > DC8051_ACCESS_TIMEOUT) { 249 dd_dev_err(dd, "timeout reading 8051 data\n"); 250 return -ENXIO; 251 } 252 ndelay(10); 253 } 254 255 /* gather the data */ 256 *result = read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_RD_DATA); 257 258 return 0; 259 } 260 261 /* 262 * Read 8051 data starting at addr, for len bytes. Will read in 8-byte chunks. 263 * Return 0 on success, -errno on error. 264 */ 265 int read_8051_data(struct hfi1_devdata *dd, u32 addr, u32 len, u64 *result) 266 { 267 unsigned long flags; 268 u32 done; 269 int ret = 0; 270 271 spin_lock_irqsave(&dd->dc8051_memlock, flags); 272 273 /* data read set-up, no auto-increment */ 274 write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0); 275 276 for (done = 0; done < len; addr += 8, done += 8, result++) { 277 ret = __read_8051_data(dd, addr, result); 278 if (ret) 279 break; 280 } 281 282 /* turn off read enable */ 283 write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0); 284 285 spin_unlock_irqrestore(&dd->dc8051_memlock, flags); 286 287 return ret; 288 } 289 290 /* 291 * Write data or code to the 8051 code or data RAM. 292 */ 293 static int write_8051(struct hfi1_devdata *dd, int code, u32 start, 294 const u8 *data, u32 len) 295 { 296 u64 reg; 297 u32 offset; 298 int aligned, count; 299 300 /* check alignment */ 301 aligned = ((unsigned long)data & 0x7) == 0; 302 303 /* write set-up */ 304 reg = (code ? DC_DC8051_CFG_RAM_ACCESS_SETUP_RAM_SEL_SMASK : 0ull) 305 | DC_DC8051_CFG_RAM_ACCESS_SETUP_AUTO_INCR_ADDR_SMASK; 306 write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, reg); 307 308 reg = ((start & DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_MASK) 309 << DC_DC8051_CFG_RAM_ACCESS_CTRL_ADDRESS_SHIFT) 310 | DC_DC8051_CFG_RAM_ACCESS_CTRL_WRITE_ENA_SMASK; 311 write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, reg); 312 313 /* write */ 314 for (offset = 0; offset < len; offset += 8) { 315 int bytes = len - offset; 316 317 if (bytes < 8) { 318 reg = 0; 319 memcpy(®, &data[offset], bytes); 320 } else if (aligned) { 321 reg = *(u64 *)&data[offset]; 322 } else { 323 memcpy(®, &data[offset], 8); 324 } 325 write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_WR_DATA, reg); 326 327 /* wait until ACCESS_COMPLETED is set */ 328 count = 0; 329 while ((read_csr(dd, DC_DC8051_CFG_RAM_ACCESS_STATUS) 330 & DC_DC8051_CFG_RAM_ACCESS_STATUS_ACCESS_COMPLETED_SMASK) 331 == 0) { 332 count++; 333 if (count > DC8051_ACCESS_TIMEOUT) { 334 dd_dev_err(dd, "timeout writing 8051 data\n"); 335 return -ENXIO; 336 } 337 udelay(1); 338 } 339 } 340 341 /* turn off write access, auto increment (also sets to data access) */ 342 write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_CTRL, 0); 343 write_csr(dd, DC_DC8051_CFG_RAM_ACCESS_SETUP, 0); 344 345 return 0; 346 } 347 348 /* return 0 if values match, non-zero and complain otherwise */ 349 static int invalid_header(struct hfi1_devdata *dd, const char *what, 350 u32 actual, u32 expected) 351 { 352 if (actual == expected) 353 return 0; 354 355 dd_dev_err(dd, 356 "invalid firmware header field %s: expected 0x%x, actual 0x%x\n", 357 what, expected, actual); 358 return 1; 359 } 360 361 /* 362 * Verify that the static fields in the CSS header match. 363 */ 364 static int verify_css_header(struct hfi1_devdata *dd, struct css_header *css) 365 { 366 /* verify CSS header fields (most sizes are in DW, so add /4) */ 367 if (invalid_header(dd, "module_type", css->module_type, 368 CSS_MODULE_TYPE) || 369 invalid_header(dd, "header_len", css->header_len, 370 (sizeof(struct firmware_file) / 4)) || 371 invalid_header(dd, "header_version", css->header_version, 372 CSS_HEADER_VERSION) || 373 invalid_header(dd, "module_vendor", css->module_vendor, 374 CSS_MODULE_VENDOR) || 375 invalid_header(dd, "key_size", css->key_size, KEY_SIZE / 4) || 376 invalid_header(dd, "modulus_size", css->modulus_size, 377 KEY_SIZE / 4) || 378 invalid_header(dd, "exponent_size", css->exponent_size, 379 EXPONENT_SIZE / 4)) { 380 return -EINVAL; 381 } 382 return 0; 383 } 384 385 /* 386 * Make sure there are at least some bytes after the prefix. 387 */ 388 static int payload_check(struct hfi1_devdata *dd, const char *name, 389 long file_size, long prefix_size) 390 { 391 /* make sure we have some payload */ 392 if (prefix_size >= file_size) { 393 dd_dev_err(dd, 394 "firmware \"%s\", size %ld, must be larger than %ld bytes\n", 395 name, file_size, prefix_size); 396 return -EINVAL; 397 } 398 399 return 0; 400 } 401 402 /* 403 * Request the firmware from the system. Extract the pieces and fill in 404 * fdet. If successful, the caller will need to call dispose_one_firmware(). 405 * Returns 0 on success, -ERRNO on error. 406 */ 407 static int obtain_one_firmware(struct hfi1_devdata *dd, const char *name, 408 struct firmware_details *fdet) 409 { 410 struct css_header *css; 411 int ret; 412 413 memset(fdet, 0, sizeof(*fdet)); 414 415 ret = request_firmware(&fdet->fw, name, &dd->pcidev->dev); 416 if (ret) { 417 dd_dev_warn(dd, "cannot find firmware \"%s\", err %d\n", 418 name, ret); 419 return ret; 420 } 421 422 /* verify the firmware */ 423 if (fdet->fw->size < sizeof(struct css_header)) { 424 dd_dev_err(dd, "firmware \"%s\" is too small\n", name); 425 ret = -EINVAL; 426 goto done; 427 } 428 css = (struct css_header *)fdet->fw->data; 429 430 hfi1_cdbg(FIRMWARE, "Firmware %s details:", name); 431 hfi1_cdbg(FIRMWARE, "file size: 0x%lx bytes", fdet->fw->size); 432 hfi1_cdbg(FIRMWARE, "CSS structure:"); 433 hfi1_cdbg(FIRMWARE, " module_type 0x%x", css->module_type); 434 hfi1_cdbg(FIRMWARE, " header_len 0x%03x (0x%03x bytes)", 435 css->header_len, 4 * css->header_len); 436 hfi1_cdbg(FIRMWARE, " header_version 0x%x", css->header_version); 437 hfi1_cdbg(FIRMWARE, " module_id 0x%x", css->module_id); 438 hfi1_cdbg(FIRMWARE, " module_vendor 0x%x", css->module_vendor); 439 hfi1_cdbg(FIRMWARE, " date 0x%x", css->date); 440 hfi1_cdbg(FIRMWARE, " size 0x%03x (0x%03x bytes)", 441 css->size, 4 * css->size); 442 hfi1_cdbg(FIRMWARE, " key_size 0x%03x (0x%03x bytes)", 443 css->key_size, 4 * css->key_size); 444 hfi1_cdbg(FIRMWARE, " modulus_size 0x%03x (0x%03x bytes)", 445 css->modulus_size, 4 * css->modulus_size); 446 hfi1_cdbg(FIRMWARE, " exponent_size 0x%03x (0x%03x bytes)", 447 css->exponent_size, 4 * css->exponent_size); 448 hfi1_cdbg(FIRMWARE, "firmware size: 0x%lx bytes", 449 fdet->fw->size - sizeof(struct firmware_file)); 450 451 /* 452 * If the file does not have a valid CSS header, fail. 453 * Otherwise, check the CSS size field for an expected size. 454 * The augmented file has r2 and mu inserted after the header 455 * was generated, so there will be a known difference between 456 * the CSS header size and the actual file size. Use this 457 * difference to identify an augmented file. 458 * 459 * Note: css->size is in DWORDs, multiply by 4 to get bytes. 460 */ 461 ret = verify_css_header(dd, css); 462 if (ret) { 463 dd_dev_info(dd, "Invalid CSS header for \"%s\"\n", name); 464 } else if ((css->size * 4) == fdet->fw->size) { 465 /* non-augmented firmware file */ 466 struct firmware_file *ff = (struct firmware_file *) 467 fdet->fw->data; 468 469 /* make sure there are bytes in the payload */ 470 ret = payload_check(dd, name, fdet->fw->size, 471 sizeof(struct firmware_file)); 472 if (ret == 0) { 473 fdet->css_header = css; 474 fdet->modulus = ff->modulus; 475 fdet->exponent = ff->exponent; 476 fdet->signature = ff->signature; 477 fdet->r2 = fdet->dummy_header.r2; /* use dummy space */ 478 fdet->mu = fdet->dummy_header.mu; /* use dummy space */ 479 fdet->firmware_ptr = ff->firmware; 480 fdet->firmware_len = fdet->fw->size - 481 sizeof(struct firmware_file); 482 /* 483 * Header does not include r2 and mu - generate here. 484 * For now, fail. 485 */ 486 dd_dev_err(dd, "driver is unable to validate firmware without r2 and mu (not in firmware file)\n"); 487 ret = -EINVAL; 488 } 489 } else if ((css->size * 4) + AUGMENT_SIZE == fdet->fw->size) { 490 /* augmented firmware file */ 491 struct augmented_firmware_file *aff = 492 (struct augmented_firmware_file *)fdet->fw->data; 493 494 /* make sure there are bytes in the payload */ 495 ret = payload_check(dd, name, fdet->fw->size, 496 sizeof(struct augmented_firmware_file)); 497 if (ret == 0) { 498 fdet->css_header = css; 499 fdet->modulus = aff->modulus; 500 fdet->exponent = aff->exponent; 501 fdet->signature = aff->signature; 502 fdet->r2 = aff->r2; 503 fdet->mu = aff->mu; 504 fdet->firmware_ptr = aff->firmware; 505 fdet->firmware_len = fdet->fw->size - 506 sizeof(struct augmented_firmware_file); 507 } 508 } else { 509 /* css->size check failed */ 510 dd_dev_err(dd, 511 "invalid firmware header field size: expected 0x%lx or 0x%lx, actual 0x%x\n", 512 fdet->fw->size / 4, 513 (fdet->fw->size - AUGMENT_SIZE) / 4, 514 css->size); 515 516 ret = -EINVAL; 517 } 518 519 done: 520 /* if returning an error, clean up after ourselves */ 521 if (ret) 522 dispose_one_firmware(fdet); 523 return ret; 524 } 525 526 static void dispose_one_firmware(struct firmware_details *fdet) 527 { 528 release_firmware(fdet->fw); 529 /* erase all previous information */ 530 memset(fdet, 0, sizeof(*fdet)); 531 } 532 533 /* 534 * Obtain the 4 firmwares from the OS. All must be obtained at once or not 535 * at all. If called with the firmware state in FW_TRY, use alternate names. 536 * On exit, this routine will have set the firmware state to one of FW_TRY, 537 * FW_FINAL, or FW_ERR. 538 * 539 * Must be holding fw_mutex. 540 */ 541 static void __obtain_firmware(struct hfi1_devdata *dd) 542 { 543 int err = 0; 544 545 if (fw_state == FW_FINAL) /* nothing more to obtain */ 546 return; 547 if (fw_state == FW_ERR) /* already in error */ 548 return; 549 550 /* fw_state is FW_EMPTY or FW_TRY */ 551 retry: 552 if (fw_state == FW_TRY) { 553 /* 554 * We tried the original and it failed. Move to the 555 * alternate. 556 */ 557 dd_dev_warn(dd, "using alternate firmware names\n"); 558 /* 559 * Let others run. Some systems, when missing firmware, does 560 * something that holds for 30 seconds. If we do that twice 561 * in a row it triggers task blocked warning. 562 */ 563 cond_resched(); 564 if (fw_8051_load) 565 dispose_one_firmware(&fw_8051); 566 if (fw_fabric_serdes_load) 567 dispose_one_firmware(&fw_fabric); 568 if (fw_sbus_load) 569 dispose_one_firmware(&fw_sbus); 570 if (fw_pcie_serdes_load) 571 dispose_one_firmware(&fw_pcie); 572 fw_8051_name = ALT_FW_8051_NAME_ASIC; 573 fw_fabric_serdes_name = ALT_FW_FABRIC_NAME; 574 fw_sbus_name = ALT_FW_SBUS_NAME; 575 fw_pcie_serdes_name = ALT_FW_PCIE_NAME; 576 577 /* 578 * Add a delay before obtaining and loading debug firmware. 579 * Authorization will fail if the delay between firmware 580 * authorization events is shorter than 50us. Add 100us to 581 * make a delay time safe. 582 */ 583 usleep_range(100, 120); 584 } 585 586 if (fw_sbus_load) { 587 err = obtain_one_firmware(dd, fw_sbus_name, &fw_sbus); 588 if (err) 589 goto done; 590 } 591 592 if (fw_pcie_serdes_load) { 593 err = obtain_one_firmware(dd, fw_pcie_serdes_name, &fw_pcie); 594 if (err) 595 goto done; 596 } 597 598 if (fw_fabric_serdes_load) { 599 err = obtain_one_firmware(dd, fw_fabric_serdes_name, 600 &fw_fabric); 601 if (err) 602 goto done; 603 } 604 605 if (fw_8051_load) { 606 err = obtain_one_firmware(dd, fw_8051_name, &fw_8051); 607 if (err) 608 goto done; 609 } 610 611 done: 612 if (err) { 613 /* oops, had problems obtaining a firmware */ 614 if (fw_state == FW_EMPTY && dd->icode == ICODE_RTL_SILICON) { 615 /* retry with alternate (RTL only) */ 616 fw_state = FW_TRY; 617 goto retry; 618 } 619 dd_dev_err(dd, "unable to obtain working firmware\n"); 620 fw_state = FW_ERR; 621 fw_err = -ENOENT; 622 } else { 623 /* success */ 624 if (fw_state == FW_EMPTY && 625 dd->icode != ICODE_FUNCTIONAL_SIMULATOR) 626 fw_state = FW_TRY; /* may retry later */ 627 else 628 fw_state = FW_FINAL; /* cannot try again */ 629 } 630 } 631 632 /* 633 * Called by all HFIs when loading their firmware - i.e. device probe time. 634 * The first one will do the actual firmware load. Use a mutex to resolve 635 * any possible race condition. 636 * 637 * The call to this routine cannot be moved to driver load because the kernel 638 * call request_firmware() requires a device which is only available after 639 * the first device probe. 640 */ 641 static int obtain_firmware(struct hfi1_devdata *dd) 642 { 643 unsigned long timeout; 644 645 mutex_lock(&fw_mutex); 646 647 /* 40s delay due to long delay on missing firmware on some systems */ 648 timeout = jiffies + msecs_to_jiffies(40000); 649 while (fw_state == FW_TRY) { 650 /* 651 * Another device is trying the firmware. Wait until it 652 * decides what works (or not). 653 */ 654 if (time_after(jiffies, timeout)) { 655 /* waited too long */ 656 dd_dev_err(dd, "Timeout waiting for firmware try"); 657 fw_state = FW_ERR; 658 fw_err = -ETIMEDOUT; 659 break; 660 } 661 mutex_unlock(&fw_mutex); 662 msleep(20); /* arbitrary delay */ 663 mutex_lock(&fw_mutex); 664 } 665 /* not in FW_TRY state */ 666 667 /* set fw_state to FW_TRY, FW_FINAL, or FW_ERR, and fw_err */ 668 if (fw_state == FW_EMPTY) 669 __obtain_firmware(dd); 670 671 mutex_unlock(&fw_mutex); 672 return fw_err; 673 } 674 675 /* 676 * Called when the driver unloads. The timing is asymmetric with its 677 * counterpart, obtain_firmware(). If called at device remove time, 678 * then it is conceivable that another device could probe while the 679 * firmware is being disposed. The mutexes can be moved to do that 680 * safely, but then the firmware would be requested from the OS multiple 681 * times. 682 * 683 * No mutex is needed as the driver is unloading and there cannot be any 684 * other callers. 685 */ 686 void dispose_firmware(void) 687 { 688 dispose_one_firmware(&fw_8051); 689 dispose_one_firmware(&fw_fabric); 690 dispose_one_firmware(&fw_pcie); 691 dispose_one_firmware(&fw_sbus); 692 693 /* retain the error state, otherwise revert to empty */ 694 if (fw_state != FW_ERR) 695 fw_state = FW_EMPTY; 696 } 697 698 /* 699 * Called with the result of a firmware download. 700 * 701 * Return 1 to retry loading the firmware, 0 to stop. 702 */ 703 static int retry_firmware(struct hfi1_devdata *dd, int load_result) 704 { 705 int retry; 706 707 mutex_lock(&fw_mutex); 708 709 if (load_result == 0) { 710 /* 711 * The load succeeded, so expect all others to do the same. 712 * Do not retry again. 713 */ 714 if (fw_state == FW_TRY) 715 fw_state = FW_FINAL; 716 retry = 0; /* do NOT retry */ 717 } else if (fw_state == FW_TRY) { 718 /* load failed, obtain alternate firmware */ 719 __obtain_firmware(dd); 720 retry = (fw_state == FW_FINAL); 721 } else { 722 /* else in FW_FINAL or FW_ERR, no retry in either case */ 723 retry = 0; 724 } 725 726 mutex_unlock(&fw_mutex); 727 return retry; 728 } 729 730 /* 731 * Write a block of data to a given array CSR. All calls will be in 732 * multiples of 8 bytes. 733 */ 734 static void write_rsa_data(struct hfi1_devdata *dd, int what, 735 const u8 *data, int nbytes) 736 { 737 int qw_size = nbytes / 8; 738 int i; 739 740 if (((unsigned long)data & 0x7) == 0) { 741 /* aligned */ 742 u64 *ptr = (u64 *)data; 743 744 for (i = 0; i < qw_size; i++, ptr++) 745 write_csr(dd, what + (8 * i), *ptr); 746 } else { 747 /* not aligned */ 748 for (i = 0; i < qw_size; i++, data += 8) { 749 u64 value; 750 751 memcpy(&value, data, 8); 752 write_csr(dd, what + (8 * i), value); 753 } 754 } 755 } 756 757 /* 758 * Write a block of data to a given CSR as a stream of writes. All calls will 759 * be in multiples of 8 bytes. 760 */ 761 static void write_streamed_rsa_data(struct hfi1_devdata *dd, int what, 762 const u8 *data, int nbytes) 763 { 764 u64 *ptr = (u64 *)data; 765 int qw_size = nbytes / 8; 766 767 for (; qw_size > 0; qw_size--, ptr++) 768 write_csr(dd, what, *ptr); 769 } 770 771 /* 772 * Download the signature and start the RSA mechanism. Wait for 773 * RSA_ENGINE_TIMEOUT before giving up. 774 */ 775 static int run_rsa(struct hfi1_devdata *dd, const char *who, 776 const u8 *signature) 777 { 778 unsigned long timeout; 779 u64 reg; 780 u32 status; 781 int ret = 0; 782 783 /* write the signature */ 784 write_rsa_data(dd, MISC_CFG_RSA_SIGNATURE, signature, KEY_SIZE); 785 786 /* initialize RSA */ 787 write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_INIT); 788 789 /* 790 * Make sure the engine is idle and insert a delay between the two 791 * writes to MISC_CFG_RSA_CMD. 792 */ 793 status = (read_csr(dd, MISC_CFG_FW_CTRL) 794 & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK) 795 >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT; 796 if (status != RSA_STATUS_IDLE) { 797 dd_dev_err(dd, "%s security engine not idle - giving up\n", 798 who); 799 return -EBUSY; 800 } 801 802 /* start RSA */ 803 write_csr(dd, MISC_CFG_RSA_CMD, RSA_CMD_START); 804 805 /* 806 * Look for the result. 807 * 808 * The RSA engine is hooked up to two MISC errors. The driver 809 * masks these errors as they do not respond to the standard 810 * error "clear down" mechanism. Look for these errors here and 811 * clear them when possible. This routine will exit with the 812 * errors of the current run still set. 813 * 814 * MISC_FW_AUTH_FAILED_ERR 815 * Firmware authorization failed. This can be cleared by 816 * re-initializing the RSA engine, then clearing the status bit. 817 * Do not re-init the RSA angine immediately after a successful 818 * run - this will reset the current authorization. 819 * 820 * MISC_KEY_MISMATCH_ERR 821 * Key does not match. The only way to clear this is to load 822 * a matching key then clear the status bit. If this error 823 * is raised, it will persist outside of this routine until a 824 * matching key is loaded. 825 */ 826 timeout = msecs_to_jiffies(RSA_ENGINE_TIMEOUT) + jiffies; 827 while (1) { 828 status = (read_csr(dd, MISC_CFG_FW_CTRL) 829 & MISC_CFG_FW_CTRL_RSA_STATUS_SMASK) 830 >> MISC_CFG_FW_CTRL_RSA_STATUS_SHIFT; 831 832 if (status == RSA_STATUS_IDLE) { 833 /* should not happen */ 834 dd_dev_err(dd, "%s firmware security bad idle state\n", 835 who); 836 ret = -EINVAL; 837 break; 838 } else if (status == RSA_STATUS_DONE) { 839 /* finished successfully */ 840 break; 841 } else if (status == RSA_STATUS_FAILED) { 842 /* finished unsuccessfully */ 843 ret = -EINVAL; 844 break; 845 } 846 /* else still active */ 847 848 if (time_after(jiffies, timeout)) { 849 /* 850 * Timed out while active. We can't reset the engine 851 * if it is stuck active, but run through the 852 * error code to see what error bits are set. 853 */ 854 dd_dev_err(dd, "%s firmware security time out\n", who); 855 ret = -ETIMEDOUT; 856 break; 857 } 858 859 msleep(20); 860 } 861 862 /* 863 * Arrive here on success or failure. Clear all RSA engine 864 * errors. All current errors will stick - the RSA logic is keeping 865 * error high. All previous errors will clear - the RSA logic 866 * is not keeping the error high. 867 */ 868 write_csr(dd, MISC_ERR_CLEAR, 869 MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK | 870 MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK); 871 /* 872 * All that is left are the current errors. Print warnings on 873 * authorization failure details, if any. Firmware authorization 874 * can be retried, so these are only warnings. 875 */ 876 reg = read_csr(dd, MISC_ERR_STATUS); 877 if (ret) { 878 if (reg & MISC_ERR_STATUS_MISC_FW_AUTH_FAILED_ERR_SMASK) 879 dd_dev_warn(dd, "%s firmware authorization failed\n", 880 who); 881 if (reg & MISC_ERR_STATUS_MISC_KEY_MISMATCH_ERR_SMASK) 882 dd_dev_warn(dd, "%s firmware key mismatch\n", who); 883 } 884 885 return ret; 886 } 887 888 static void load_security_variables(struct hfi1_devdata *dd, 889 struct firmware_details *fdet) 890 { 891 /* Security variables a. Write the modulus */ 892 write_rsa_data(dd, MISC_CFG_RSA_MODULUS, fdet->modulus, KEY_SIZE); 893 /* Security variables b. Write the r2 */ 894 write_rsa_data(dd, MISC_CFG_RSA_R2, fdet->r2, KEY_SIZE); 895 /* Security variables c. Write the mu */ 896 write_rsa_data(dd, MISC_CFG_RSA_MU, fdet->mu, MU_SIZE); 897 /* Security variables d. Write the header */ 898 write_streamed_rsa_data(dd, MISC_CFG_SHA_PRELOAD, 899 (u8 *)fdet->css_header, 900 sizeof(struct css_header)); 901 } 902 903 /* return the 8051 firmware state */ 904 static inline u32 get_firmware_state(struct hfi1_devdata *dd) 905 { 906 u64 reg = read_csr(dd, DC_DC8051_STS_CUR_STATE); 907 908 return (reg >> DC_DC8051_STS_CUR_STATE_FIRMWARE_SHIFT) 909 & DC_DC8051_STS_CUR_STATE_FIRMWARE_MASK; 910 } 911 912 /* 913 * Wait until the firmware is up and ready to take host requests. 914 * Return 0 on success, -ETIMEDOUT on timeout. 915 */ 916 int wait_fm_ready(struct hfi1_devdata *dd, u32 mstimeout) 917 { 918 unsigned long timeout; 919 920 /* in the simulator, the fake 8051 is always ready */ 921 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) 922 return 0; 923 924 timeout = msecs_to_jiffies(mstimeout) + jiffies; 925 while (1) { 926 if (get_firmware_state(dd) == 0xa0) /* ready */ 927 return 0; 928 if (time_after(jiffies, timeout)) /* timed out */ 929 return -ETIMEDOUT; 930 usleep_range(1950, 2050); /* sleep 2ms-ish */ 931 } 932 } 933 934 /* 935 * Load the 8051 firmware. 936 */ 937 static int load_8051_firmware(struct hfi1_devdata *dd, 938 struct firmware_details *fdet) 939 { 940 u64 reg; 941 int ret; 942 u8 ver_major; 943 u8 ver_minor; 944 u8 ver_patch; 945 946 /* 947 * DC Reset sequence 948 * Load DC 8051 firmware 949 */ 950 /* 951 * DC reset step 1: Reset DC8051 952 */ 953 reg = DC_DC8051_CFG_RST_M8051W_SMASK 954 | DC_DC8051_CFG_RST_CRAM_SMASK 955 | DC_DC8051_CFG_RST_DRAM_SMASK 956 | DC_DC8051_CFG_RST_IRAM_SMASK 957 | DC_DC8051_CFG_RST_SFR_SMASK; 958 write_csr(dd, DC_DC8051_CFG_RST, reg); 959 960 /* 961 * DC reset step 2 (optional): Load 8051 data memory with link 962 * configuration 963 */ 964 965 /* 966 * DC reset step 3: Load DC8051 firmware 967 */ 968 /* release all but the core reset */ 969 reg = DC_DC8051_CFG_RST_M8051W_SMASK; 970 write_csr(dd, DC_DC8051_CFG_RST, reg); 971 972 /* Firmware load step 1 */ 973 load_security_variables(dd, fdet); 974 975 /* 976 * Firmware load step 2. Clear MISC_CFG_FW_CTRL.FW_8051_LOADED 977 */ 978 write_csr(dd, MISC_CFG_FW_CTRL, 0); 979 980 /* Firmware load steps 3-5 */ 981 ret = write_8051(dd, 1/*code*/, 0, fdet->firmware_ptr, 982 fdet->firmware_len); 983 if (ret) 984 return ret; 985 986 /* 987 * DC reset step 4. Host starts the DC8051 firmware 988 */ 989 /* 990 * Firmware load step 6. Set MISC_CFG_FW_CTRL.FW_8051_LOADED 991 */ 992 write_csr(dd, MISC_CFG_FW_CTRL, MISC_CFG_FW_CTRL_FW_8051_LOADED_SMASK); 993 994 /* Firmware load steps 7-10 */ 995 ret = run_rsa(dd, "8051", fdet->signature); 996 if (ret) 997 return ret; 998 999 /* clear all reset bits, releasing the 8051 */ 1000 write_csr(dd, DC_DC8051_CFG_RST, 0ull); 1001 1002 /* 1003 * DC reset step 5. Wait for firmware to be ready to accept host 1004 * requests. 1005 */ 1006 ret = wait_fm_ready(dd, TIMEOUT_8051_START); 1007 if (ret) { /* timed out */ 1008 dd_dev_err(dd, "8051 start timeout, current state 0x%x\n", 1009 get_firmware_state(dd)); 1010 return -ETIMEDOUT; 1011 } 1012 1013 read_misc_status(dd, &ver_major, &ver_minor, &ver_patch); 1014 dd_dev_info(dd, "8051 firmware version %d.%d.%d\n", 1015 (int)ver_major, (int)ver_minor, (int)ver_patch); 1016 dd->dc8051_ver = dc8051_ver(ver_major, ver_minor, ver_patch); 1017 ret = write_host_interface_version(dd, HOST_INTERFACE_VERSION); 1018 if (ret != HCMD_SUCCESS) { 1019 dd_dev_err(dd, 1020 "Failed to set host interface version, return 0x%x\n", 1021 ret); 1022 return -EIO; 1023 } 1024 1025 return 0; 1026 } 1027 1028 /* 1029 * Write the SBus request register 1030 * 1031 * No need for masking - the arguments are sized exactly. 1032 */ 1033 void sbus_request(struct hfi1_devdata *dd, 1034 u8 receiver_addr, u8 data_addr, u8 command, u32 data_in) 1035 { 1036 write_csr(dd, ASIC_CFG_SBUS_REQUEST, 1037 ((u64)data_in << ASIC_CFG_SBUS_REQUEST_DATA_IN_SHIFT) | 1038 ((u64)command << ASIC_CFG_SBUS_REQUEST_COMMAND_SHIFT) | 1039 ((u64)data_addr << ASIC_CFG_SBUS_REQUEST_DATA_ADDR_SHIFT) | 1040 ((u64)receiver_addr << 1041 ASIC_CFG_SBUS_REQUEST_RECEIVER_ADDR_SHIFT)); 1042 } 1043 1044 /* 1045 * Read a value from the SBus. 1046 * 1047 * Requires the caller to be in fast mode 1048 */ 1049 static u32 sbus_read(struct hfi1_devdata *dd, u8 receiver_addr, u8 data_addr, 1050 u32 data_in) 1051 { 1052 u64 reg; 1053 int retries; 1054 int success = 0; 1055 u32 result = 0; 1056 u32 result_code = 0; 1057 1058 sbus_request(dd, receiver_addr, data_addr, READ_SBUS_RECEIVER, data_in); 1059 1060 for (retries = 0; retries < 100; retries++) { 1061 usleep_range(1000, 1200); /* arbitrary */ 1062 reg = read_csr(dd, ASIC_STS_SBUS_RESULT); 1063 result_code = (reg >> ASIC_STS_SBUS_RESULT_RESULT_CODE_SHIFT) 1064 & ASIC_STS_SBUS_RESULT_RESULT_CODE_MASK; 1065 if (result_code != SBUS_READ_COMPLETE) 1066 continue; 1067 1068 success = 1; 1069 result = (reg >> ASIC_STS_SBUS_RESULT_DATA_OUT_SHIFT) 1070 & ASIC_STS_SBUS_RESULT_DATA_OUT_MASK; 1071 break; 1072 } 1073 1074 if (!success) { 1075 dd_dev_err(dd, "%s: read failed, result code 0x%x\n", __func__, 1076 result_code); 1077 } 1078 1079 return result; 1080 } 1081 1082 /* 1083 * Turn off the SBus and fabric serdes spicos. 1084 * 1085 * + Must be called with Sbus fast mode turned on. 1086 * + Must be called after fabric serdes broadcast is set up. 1087 * + Must be called before the 8051 is loaded - assumes 8051 is not loaded 1088 * when using MISC_CFG_FW_CTRL. 1089 */ 1090 static void turn_off_spicos(struct hfi1_devdata *dd, int flags) 1091 { 1092 /* only needed on A0 */ 1093 if (!is_ax(dd)) 1094 return; 1095 1096 dd_dev_info(dd, "Turning off spicos:%s%s\n", 1097 flags & SPICO_SBUS ? " SBus" : "", 1098 flags & SPICO_FABRIC ? " fabric" : ""); 1099 1100 write_csr(dd, MISC_CFG_FW_CTRL, ENABLE_SPICO_SMASK); 1101 /* disable SBus spico */ 1102 if (flags & SPICO_SBUS) 1103 sbus_request(dd, SBUS_MASTER_BROADCAST, 0x01, 1104 WRITE_SBUS_RECEIVER, 0x00000040); 1105 1106 /* disable the fabric serdes spicos */ 1107 if (flags & SPICO_FABRIC) 1108 sbus_request(dd, fabric_serdes_broadcast[dd->hfi1_id], 1109 0x07, WRITE_SBUS_RECEIVER, 0x00000000); 1110 write_csr(dd, MISC_CFG_FW_CTRL, 0); 1111 } 1112 1113 /* 1114 * Reset all of the fabric serdes for this HFI in preparation to take the 1115 * link to Polling. 1116 * 1117 * To do a reset, we need to write to the serdes registers. Unfortunately, 1118 * the fabric serdes download to the other HFI on the ASIC will have turned 1119 * off the firmware validation on this HFI. This means we can't write to the 1120 * registers to reset the serdes. Work around this by performing a complete 1121 * re-download and validation of the fabric serdes firmware. This, as a 1122 * by-product, will reset the serdes. NOTE: the re-download requires that 1123 * the 8051 be in the Offline state. I.e. not actively trying to use the 1124 * serdes. This routine is called at the point where the link is Offline and 1125 * is getting ready to go to Polling. 1126 */ 1127 void fabric_serdes_reset(struct hfi1_devdata *dd) 1128 { 1129 int ret; 1130 1131 if (!fw_fabric_serdes_load) 1132 return; 1133 1134 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); 1135 if (ret) { 1136 dd_dev_err(dd, 1137 "Cannot acquire SBus resource to reset fabric SerDes - perhaps you should reboot\n"); 1138 return; 1139 } 1140 set_sbus_fast_mode(dd); 1141 1142 if (is_ax(dd)) { 1143 /* A0 serdes do not work with a re-download */ 1144 u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; 1145 1146 /* place SerDes in reset and disable SPICO */ 1147 sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011); 1148 /* wait 100 refclk cycles @ 156.25MHz => 640ns */ 1149 udelay(1); 1150 /* remove SerDes reset */ 1151 sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010); 1152 /* turn SPICO enable on */ 1153 sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002); 1154 } else { 1155 turn_off_spicos(dd, SPICO_FABRIC); 1156 /* 1157 * No need for firmware retry - what to download has already 1158 * been decided. 1159 * No need to pay attention to the load return - the only 1160 * failure is a validation failure, which has already been 1161 * checked by the initial download. 1162 */ 1163 (void)load_fabric_serdes_firmware(dd, &fw_fabric); 1164 } 1165 1166 clear_sbus_fast_mode(dd); 1167 release_chip_resource(dd, CR_SBUS); 1168 } 1169 1170 /* Access to the SBus in this routine should probably be serialized */ 1171 int sbus_request_slow(struct hfi1_devdata *dd, 1172 u8 receiver_addr, u8 data_addr, u8 command, u32 data_in) 1173 { 1174 u64 reg, count = 0; 1175 1176 /* make sure fast mode is clear */ 1177 clear_sbus_fast_mode(dd); 1178 1179 sbus_request(dd, receiver_addr, data_addr, command, data_in); 1180 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 1181 ASIC_CFG_SBUS_EXECUTE_EXECUTE_SMASK); 1182 /* Wait for both DONE and RCV_DATA_VALID to go high */ 1183 reg = read_csr(dd, ASIC_STS_SBUS_RESULT); 1184 while (!((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) && 1185 (reg & ASIC_STS_SBUS_RESULT_RCV_DATA_VALID_SMASK))) { 1186 if (count++ >= SBUS_MAX_POLL_COUNT) { 1187 u64 counts = read_csr(dd, ASIC_STS_SBUS_COUNTERS); 1188 /* 1189 * If the loop has timed out, we are OK if DONE bit 1190 * is set and RCV_DATA_VALID and EXECUTE counters 1191 * are the same. If not, we cannot proceed. 1192 */ 1193 if ((reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) && 1194 (SBUS_COUNTER(counts, RCV_DATA_VALID) == 1195 SBUS_COUNTER(counts, EXECUTE))) 1196 break; 1197 return -ETIMEDOUT; 1198 } 1199 udelay(1); 1200 reg = read_csr(dd, ASIC_STS_SBUS_RESULT); 1201 } 1202 count = 0; 1203 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0); 1204 /* Wait for DONE to clear after EXECUTE is cleared */ 1205 reg = read_csr(dd, ASIC_STS_SBUS_RESULT); 1206 while (reg & ASIC_STS_SBUS_RESULT_DONE_SMASK) { 1207 if (count++ >= SBUS_MAX_POLL_COUNT) 1208 return -ETIME; 1209 udelay(1); 1210 reg = read_csr(dd, ASIC_STS_SBUS_RESULT); 1211 } 1212 return 0; 1213 } 1214 1215 static int load_fabric_serdes_firmware(struct hfi1_devdata *dd, 1216 struct firmware_details *fdet) 1217 { 1218 int i, err; 1219 const u8 ra = fabric_serdes_broadcast[dd->hfi1_id]; /* receiver addr */ 1220 1221 dd_dev_info(dd, "Downloading fabric firmware\n"); 1222 1223 /* step 1: load security variables */ 1224 load_security_variables(dd, fdet); 1225 /* step 2: place SerDes in reset and disable SPICO */ 1226 sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000011); 1227 /* wait 100 refclk cycles @ 156.25MHz => 640ns */ 1228 udelay(1); 1229 /* step 3: remove SerDes reset */ 1230 sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000010); 1231 /* step 4: assert IMEM override */ 1232 sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x40000000); 1233 /* step 5: download SerDes machine code */ 1234 for (i = 0; i < fdet->firmware_len; i += 4) { 1235 sbus_request(dd, ra, 0x0a, WRITE_SBUS_RECEIVER, 1236 *(u32 *)&fdet->firmware_ptr[i]); 1237 } 1238 /* step 6: IMEM override off */ 1239 sbus_request(dd, ra, 0x00, WRITE_SBUS_RECEIVER, 0x00000000); 1240 /* step 7: turn ECC on */ 1241 sbus_request(dd, ra, 0x0b, WRITE_SBUS_RECEIVER, 0x000c0000); 1242 1243 /* steps 8-11: run the RSA engine */ 1244 err = run_rsa(dd, "fabric serdes", fdet->signature); 1245 if (err) 1246 return err; 1247 1248 /* step 12: turn SPICO enable on */ 1249 sbus_request(dd, ra, 0x07, WRITE_SBUS_RECEIVER, 0x00000002); 1250 /* step 13: enable core hardware interrupts */ 1251 sbus_request(dd, ra, 0x08, WRITE_SBUS_RECEIVER, 0x00000000); 1252 1253 return 0; 1254 } 1255 1256 static int load_sbus_firmware(struct hfi1_devdata *dd, 1257 struct firmware_details *fdet) 1258 { 1259 int i, err; 1260 const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */ 1261 1262 dd_dev_info(dd, "Downloading SBus firmware\n"); 1263 1264 /* step 1: load security variables */ 1265 load_security_variables(dd, fdet); 1266 /* step 2: place SPICO into reset and enable off */ 1267 sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x000000c0); 1268 /* step 3: remove reset, enable off, IMEM_CNTRL_EN on */ 1269 sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000240); 1270 /* step 4: set starting IMEM address for burst download */ 1271 sbus_request(dd, ra, 0x03, WRITE_SBUS_RECEIVER, 0x80000000); 1272 /* step 5: download the SBus Master machine code */ 1273 for (i = 0; i < fdet->firmware_len; i += 4) { 1274 sbus_request(dd, ra, 0x14, WRITE_SBUS_RECEIVER, 1275 *(u32 *)&fdet->firmware_ptr[i]); 1276 } 1277 /* step 6: set IMEM_CNTL_EN off */ 1278 sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000040); 1279 /* step 7: turn ECC on */ 1280 sbus_request(dd, ra, 0x16, WRITE_SBUS_RECEIVER, 0x000c0000); 1281 1282 /* steps 8-11: run the RSA engine */ 1283 err = run_rsa(dd, "SBus", fdet->signature); 1284 if (err) 1285 return err; 1286 1287 /* step 12: set SPICO_ENABLE on */ 1288 sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140); 1289 1290 return 0; 1291 } 1292 1293 static int load_pcie_serdes_firmware(struct hfi1_devdata *dd, 1294 struct firmware_details *fdet) 1295 { 1296 int i; 1297 const u8 ra = SBUS_MASTER_BROADCAST; /* receiver address */ 1298 1299 dd_dev_info(dd, "Downloading PCIe firmware\n"); 1300 1301 /* step 1: load security variables */ 1302 load_security_variables(dd, fdet); 1303 /* step 2: assert single step (halts the SBus Master spico) */ 1304 sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000001); 1305 /* step 3: enable XDMEM access */ 1306 sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000d40); 1307 /* step 4: load firmware into SBus Master XDMEM */ 1308 /* 1309 * NOTE: the dmem address, write_en, and wdata are all pre-packed, 1310 * we only need to pick up the bytes and write them 1311 */ 1312 for (i = 0; i < fdet->firmware_len; i += 4) { 1313 sbus_request(dd, ra, 0x04, WRITE_SBUS_RECEIVER, 1314 *(u32 *)&fdet->firmware_ptr[i]); 1315 } 1316 /* step 5: disable XDMEM access */ 1317 sbus_request(dd, ra, 0x01, WRITE_SBUS_RECEIVER, 0x00000140); 1318 /* step 6: allow SBus Spico to run */ 1319 sbus_request(dd, ra, 0x05, WRITE_SBUS_RECEIVER, 0x00000000); 1320 1321 /* 1322 * steps 7-11: run RSA, if it succeeds, firmware is available to 1323 * be swapped 1324 */ 1325 return run_rsa(dd, "PCIe serdes", fdet->signature); 1326 } 1327 1328 /* 1329 * Set the given broadcast values on the given list of devices. 1330 */ 1331 static void set_serdes_broadcast(struct hfi1_devdata *dd, u8 bg1, u8 bg2, 1332 const u8 *addrs, int count) 1333 { 1334 while (--count >= 0) { 1335 /* 1336 * Set BROADCAST_GROUP_1 and BROADCAST_GROUP_2, leave 1337 * defaults for everything else. Do not read-modify-write, 1338 * per instruction from the manufacturer. 1339 * 1340 * Register 0xfd: 1341 * bits what 1342 * ----- --------------------------------- 1343 * 0 IGNORE_BROADCAST (default 0) 1344 * 11:4 BROADCAST_GROUP_1 (default 0xff) 1345 * 23:16 BROADCAST_GROUP_2 (default 0xff) 1346 */ 1347 sbus_request(dd, addrs[count], 0xfd, WRITE_SBUS_RECEIVER, 1348 (u32)bg1 << 4 | (u32)bg2 << 16); 1349 } 1350 } 1351 1352 int acquire_hw_mutex(struct hfi1_devdata *dd) 1353 { 1354 unsigned long timeout; 1355 int try = 0; 1356 u8 mask = 1 << dd->hfi1_id; 1357 u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX); 1358 1359 if (user == mask) { 1360 dd_dev_info(dd, 1361 "Hardware mutex already acquired, mutex mask %u\n", 1362 (u32)mask); 1363 return 0; 1364 } 1365 1366 retry: 1367 timeout = msecs_to_jiffies(HM_TIMEOUT) + jiffies; 1368 while (1) { 1369 write_csr(dd, ASIC_CFG_MUTEX, mask); 1370 user = (u8)read_csr(dd, ASIC_CFG_MUTEX); 1371 if (user == mask) 1372 return 0; /* success */ 1373 if (time_after(jiffies, timeout)) 1374 break; /* timed out */ 1375 msleep(20); 1376 } 1377 1378 /* timed out */ 1379 dd_dev_err(dd, 1380 "Unable to acquire hardware mutex, mutex mask %u, my mask %u (%s)\n", 1381 (u32)user, (u32)mask, (try == 0) ? "retrying" : "giving up"); 1382 1383 if (try == 0) { 1384 /* break mutex and retry */ 1385 write_csr(dd, ASIC_CFG_MUTEX, 0); 1386 try++; 1387 goto retry; 1388 } 1389 1390 return -EBUSY; 1391 } 1392 1393 void release_hw_mutex(struct hfi1_devdata *dd) 1394 { 1395 u8 mask = 1 << dd->hfi1_id; 1396 u8 user = (u8)read_csr(dd, ASIC_CFG_MUTEX); 1397 1398 if (user != mask) 1399 dd_dev_warn(dd, 1400 "Unable to release hardware mutex, mutex mask %u, my mask %u\n", 1401 (u32)user, (u32)mask); 1402 else 1403 write_csr(dd, ASIC_CFG_MUTEX, 0); 1404 } 1405 1406 /* return the given resource bit(s) as a mask for the given HFI */ 1407 static inline u64 resource_mask(u32 hfi1_id, u32 resource) 1408 { 1409 return ((u64)resource) << (hfi1_id ? CR_DYN_SHIFT : 0); 1410 } 1411 1412 static void fail_mutex_acquire_message(struct hfi1_devdata *dd, 1413 const char *func) 1414 { 1415 dd_dev_err(dd, 1416 "%s: hardware mutex stuck - suggest rebooting the machine\n", 1417 func); 1418 } 1419 1420 /* 1421 * Acquire access to a chip resource. 1422 * 1423 * Return 0 on success, -EBUSY if resource busy, -EIO if mutex acquire failed. 1424 */ 1425 static int __acquire_chip_resource(struct hfi1_devdata *dd, u32 resource) 1426 { 1427 u64 scratch0, all_bits, my_bit; 1428 int ret; 1429 1430 if (resource & CR_DYN_MASK) { 1431 /* a dynamic resource is in use if either HFI has set the bit */ 1432 if (dd->pcidev->device == PCI_DEVICE_ID_INTEL0 && 1433 (resource & (CR_I2C1 | CR_I2C2))) { 1434 /* discrete devices must serialize across both chains */ 1435 all_bits = resource_mask(0, CR_I2C1 | CR_I2C2) | 1436 resource_mask(1, CR_I2C1 | CR_I2C2); 1437 } else { 1438 all_bits = resource_mask(0, resource) | 1439 resource_mask(1, resource); 1440 } 1441 my_bit = resource_mask(dd->hfi1_id, resource); 1442 } else { 1443 /* non-dynamic resources are not split between HFIs */ 1444 all_bits = resource; 1445 my_bit = resource; 1446 } 1447 1448 /* lock against other callers within the driver wanting a resource */ 1449 mutex_lock(&dd->asic_data->asic_resource_mutex); 1450 1451 ret = acquire_hw_mutex(dd); 1452 if (ret) { 1453 fail_mutex_acquire_message(dd, __func__); 1454 ret = -EIO; 1455 goto done; 1456 } 1457 1458 scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); 1459 if (scratch0 & all_bits) { 1460 ret = -EBUSY; 1461 } else { 1462 write_csr(dd, ASIC_CFG_SCRATCH, scratch0 | my_bit); 1463 /* force write to be visible to other HFI on another OS */ 1464 (void)read_csr(dd, ASIC_CFG_SCRATCH); 1465 } 1466 1467 release_hw_mutex(dd); 1468 1469 done: 1470 mutex_unlock(&dd->asic_data->asic_resource_mutex); 1471 return ret; 1472 } 1473 1474 /* 1475 * Acquire access to a chip resource, wait up to mswait milliseconds for 1476 * the resource to become available. 1477 * 1478 * Return 0 on success, -EBUSY if busy (even after wait), -EIO if mutex 1479 * acquire failed. 1480 */ 1481 int acquire_chip_resource(struct hfi1_devdata *dd, u32 resource, u32 mswait) 1482 { 1483 unsigned long timeout; 1484 int ret; 1485 1486 timeout = jiffies + msecs_to_jiffies(mswait); 1487 while (1) { 1488 ret = __acquire_chip_resource(dd, resource); 1489 if (ret != -EBUSY) 1490 return ret; 1491 /* resource is busy, check our timeout */ 1492 if (time_after_eq(jiffies, timeout)) 1493 return -EBUSY; 1494 usleep_range(80, 120); /* arbitrary delay */ 1495 } 1496 } 1497 1498 /* 1499 * Release access to a chip resource 1500 */ 1501 void release_chip_resource(struct hfi1_devdata *dd, u32 resource) 1502 { 1503 u64 scratch0, bit; 1504 1505 /* only dynamic resources should ever be cleared */ 1506 if (!(resource & CR_DYN_MASK)) { 1507 dd_dev_err(dd, "%s: invalid resource 0x%x\n", __func__, 1508 resource); 1509 return; 1510 } 1511 bit = resource_mask(dd->hfi1_id, resource); 1512 1513 /* lock against other callers within the driver wanting a resource */ 1514 mutex_lock(&dd->asic_data->asic_resource_mutex); 1515 1516 if (acquire_hw_mutex(dd)) { 1517 fail_mutex_acquire_message(dd, __func__); 1518 goto done; 1519 } 1520 1521 scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); 1522 if ((scratch0 & bit) != 0) { 1523 scratch0 &= ~bit; 1524 write_csr(dd, ASIC_CFG_SCRATCH, scratch0); 1525 /* force write to be visible to other HFI on another OS */ 1526 (void)read_csr(dd, ASIC_CFG_SCRATCH); 1527 } else { 1528 dd_dev_warn(dd, "%s: id %d, resource 0x%x: bit not set\n", 1529 __func__, dd->hfi1_id, resource); 1530 } 1531 1532 release_hw_mutex(dd); 1533 1534 done: 1535 mutex_unlock(&dd->asic_data->asic_resource_mutex); 1536 } 1537 1538 /* 1539 * Return true if resource is set, false otherwise. Print a warning 1540 * if not set and a function is supplied. 1541 */ 1542 bool check_chip_resource(struct hfi1_devdata *dd, u32 resource, 1543 const char *func) 1544 { 1545 u64 scratch0, bit; 1546 1547 if (resource & CR_DYN_MASK) 1548 bit = resource_mask(dd->hfi1_id, resource); 1549 else 1550 bit = resource; 1551 1552 scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); 1553 if ((scratch0 & bit) == 0) { 1554 if (func) 1555 dd_dev_warn(dd, 1556 "%s: id %d, resource 0x%x, not acquired!\n", 1557 func, dd->hfi1_id, resource); 1558 return false; 1559 } 1560 return true; 1561 } 1562 1563 static void clear_chip_resources(struct hfi1_devdata *dd, const char *func) 1564 { 1565 u64 scratch0; 1566 1567 /* lock against other callers within the driver wanting a resource */ 1568 mutex_lock(&dd->asic_data->asic_resource_mutex); 1569 1570 if (acquire_hw_mutex(dd)) { 1571 fail_mutex_acquire_message(dd, func); 1572 goto done; 1573 } 1574 1575 /* clear all dynamic access bits for this HFI */ 1576 scratch0 = read_csr(dd, ASIC_CFG_SCRATCH); 1577 scratch0 &= ~resource_mask(dd->hfi1_id, CR_DYN_MASK); 1578 write_csr(dd, ASIC_CFG_SCRATCH, scratch0); 1579 /* force write to be visible to other HFI on another OS */ 1580 (void)read_csr(dd, ASIC_CFG_SCRATCH); 1581 1582 release_hw_mutex(dd); 1583 1584 done: 1585 mutex_unlock(&dd->asic_data->asic_resource_mutex); 1586 } 1587 1588 void init_chip_resources(struct hfi1_devdata *dd) 1589 { 1590 /* clear any holds left by us */ 1591 clear_chip_resources(dd, __func__); 1592 } 1593 1594 void finish_chip_resources(struct hfi1_devdata *dd) 1595 { 1596 /* clear any holds left by us */ 1597 clear_chip_resources(dd, __func__); 1598 } 1599 1600 void set_sbus_fast_mode(struct hfi1_devdata *dd) 1601 { 1602 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 1603 ASIC_CFG_SBUS_EXECUTE_FAST_MODE_SMASK); 1604 } 1605 1606 void clear_sbus_fast_mode(struct hfi1_devdata *dd) 1607 { 1608 u64 reg, count = 0; 1609 1610 reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS); 1611 while (SBUS_COUNTER(reg, EXECUTE) != 1612 SBUS_COUNTER(reg, RCV_DATA_VALID)) { 1613 if (count++ >= SBUS_MAX_POLL_COUNT) 1614 break; 1615 udelay(1); 1616 reg = read_csr(dd, ASIC_STS_SBUS_COUNTERS); 1617 } 1618 write_csr(dd, ASIC_CFG_SBUS_EXECUTE, 0); 1619 } 1620 1621 int load_firmware(struct hfi1_devdata *dd) 1622 { 1623 int ret; 1624 1625 if (fw_fabric_serdes_load) { 1626 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); 1627 if (ret) 1628 return ret; 1629 1630 set_sbus_fast_mode(dd); 1631 1632 set_serdes_broadcast(dd, all_fabric_serdes_broadcast, 1633 fabric_serdes_broadcast[dd->hfi1_id], 1634 fabric_serdes_addrs[dd->hfi1_id], 1635 NUM_FABRIC_SERDES); 1636 turn_off_spicos(dd, SPICO_FABRIC); 1637 do { 1638 ret = load_fabric_serdes_firmware(dd, &fw_fabric); 1639 } while (retry_firmware(dd, ret)); 1640 1641 clear_sbus_fast_mode(dd); 1642 release_chip_resource(dd, CR_SBUS); 1643 if (ret) 1644 return ret; 1645 } 1646 1647 if (fw_8051_load) { 1648 do { 1649 ret = load_8051_firmware(dd, &fw_8051); 1650 } while (retry_firmware(dd, ret)); 1651 if (ret) 1652 return ret; 1653 } 1654 1655 dump_fw_version(dd); 1656 return 0; 1657 } 1658 1659 int hfi1_firmware_init(struct hfi1_devdata *dd) 1660 { 1661 /* only RTL can use these */ 1662 if (dd->icode != ICODE_RTL_SILICON) { 1663 fw_fabric_serdes_load = 0; 1664 fw_pcie_serdes_load = 0; 1665 fw_sbus_load = 0; 1666 } 1667 1668 /* no 8051 or QSFP on simulator */ 1669 if (dd->icode == ICODE_FUNCTIONAL_SIMULATOR) 1670 fw_8051_load = 0; 1671 1672 if (!fw_8051_name) { 1673 if (dd->icode == ICODE_RTL_SILICON) 1674 fw_8051_name = DEFAULT_FW_8051_NAME_ASIC; 1675 else 1676 fw_8051_name = DEFAULT_FW_8051_NAME_FPGA; 1677 } 1678 if (!fw_fabric_serdes_name) 1679 fw_fabric_serdes_name = DEFAULT_FW_FABRIC_NAME; 1680 if (!fw_sbus_name) 1681 fw_sbus_name = DEFAULT_FW_SBUS_NAME; 1682 if (!fw_pcie_serdes_name) 1683 fw_pcie_serdes_name = DEFAULT_FW_PCIE_NAME; 1684 1685 return obtain_firmware(dd); 1686 } 1687 1688 /* 1689 * This function is a helper function for parse_platform_config(...) and 1690 * does not check for validity of the platform configuration cache 1691 * (because we know it is invalid as we are building up the cache). 1692 * As such, this should not be called from anywhere other than 1693 * parse_platform_config 1694 */ 1695 static int check_meta_version(struct hfi1_devdata *dd, u32 *system_table) 1696 { 1697 u32 meta_ver, meta_ver_meta, ver_start, ver_len, mask; 1698 struct platform_config_cache *pcfgcache = &dd->pcfg_cache; 1699 1700 if (!system_table) 1701 return -EINVAL; 1702 1703 meta_ver_meta = 1704 *(pcfgcache->config_tables[PLATFORM_CONFIG_SYSTEM_TABLE].table_metadata 1705 + SYSTEM_TABLE_META_VERSION); 1706 1707 mask = ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1); 1708 ver_start = meta_ver_meta & mask; 1709 1710 meta_ver_meta >>= METADATA_TABLE_FIELD_LEN_SHIFT; 1711 1712 mask = ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1); 1713 ver_len = meta_ver_meta & mask; 1714 1715 ver_start /= 8; 1716 meta_ver = *((u8 *)system_table + ver_start) & ((1 << ver_len) - 1); 1717 1718 if (meta_ver < 4) { 1719 dd_dev_info( 1720 dd, "%s:Please update platform config\n", __func__); 1721 return -EINVAL; 1722 } 1723 return 0; 1724 } 1725 1726 int parse_platform_config(struct hfi1_devdata *dd) 1727 { 1728 struct platform_config_cache *pcfgcache = &dd->pcfg_cache; 1729 struct hfi1_pportdata *ppd = dd->pport; 1730 u32 *ptr = NULL; 1731 u32 header1 = 0, header2 = 0, magic_num = 0, crc = 0, file_length = 0; 1732 u32 record_idx = 0, table_type = 0, table_length_dwords = 0; 1733 int ret = -EINVAL; /* assume failure */ 1734 1735 /* 1736 * For integrated devices that did not fall back to the default file, 1737 * the SI tuning information for active channels is acquired from the 1738 * scratch register bitmap, thus there is no platform config to parse. 1739 * Skip parsing in these situations. 1740 */ 1741 if (ppd->config_from_scratch) 1742 return 0; 1743 1744 if (!dd->platform_config.data) { 1745 dd_dev_err(dd, "%s: Missing config file\n", __func__); 1746 ret = -EINVAL; 1747 goto bail; 1748 } 1749 ptr = (u32 *)dd->platform_config.data; 1750 1751 magic_num = *ptr; 1752 ptr++; 1753 if (magic_num != PLATFORM_CONFIG_MAGIC_NUM) { 1754 dd_dev_err(dd, "%s: Bad config file\n", __func__); 1755 ret = -EINVAL; 1756 goto bail; 1757 } 1758 1759 /* Field is file size in DWORDs */ 1760 file_length = (*ptr) * 4; 1761 1762 /* 1763 * Length can't be larger than partition size. Assume platform 1764 * config format version 4 is being used. Interpret the file size 1765 * field as header instead by not moving the pointer. 1766 */ 1767 if (file_length > MAX_PLATFORM_CONFIG_FILE_SIZE) { 1768 dd_dev_info(dd, 1769 "%s:File length out of bounds, using alternative format\n", 1770 __func__); 1771 file_length = PLATFORM_CONFIG_FORMAT_4_FILE_SIZE; 1772 } else { 1773 ptr++; 1774 } 1775 1776 if (file_length > dd->platform_config.size) { 1777 dd_dev_info(dd, "%s:File claims to be larger than read size\n", 1778 __func__); 1779 ret = -EINVAL; 1780 goto bail; 1781 } else if (file_length < dd->platform_config.size) { 1782 dd_dev_info(dd, 1783 "%s:File claims to be smaller than read size, continuing\n", 1784 __func__); 1785 } 1786 /* exactly equal, perfection */ 1787 1788 /* 1789 * In both cases where we proceed, using the self-reported file length 1790 * is the safer option. In case of old format a predefined value is 1791 * being used. 1792 */ 1793 while (ptr < (u32 *)(dd->platform_config.data + file_length)) { 1794 header1 = *ptr; 1795 header2 = *(ptr + 1); 1796 if (header1 != ~header2) { 1797 dd_dev_err(dd, "%s: Failed validation at offset %ld\n", 1798 __func__, (ptr - (u32 *) 1799 dd->platform_config.data)); 1800 ret = -EINVAL; 1801 goto bail; 1802 } 1803 1804 record_idx = *ptr & 1805 ((1 << PLATFORM_CONFIG_HEADER_RECORD_IDX_LEN_BITS) - 1); 1806 1807 table_length_dwords = (*ptr >> 1808 PLATFORM_CONFIG_HEADER_TABLE_LENGTH_SHIFT) & 1809 ((1 << PLATFORM_CONFIG_HEADER_TABLE_LENGTH_LEN_BITS) - 1); 1810 1811 table_type = (*ptr >> PLATFORM_CONFIG_HEADER_TABLE_TYPE_SHIFT) & 1812 ((1 << PLATFORM_CONFIG_HEADER_TABLE_TYPE_LEN_BITS) - 1); 1813 1814 /* Done with this set of headers */ 1815 ptr += 2; 1816 1817 if (record_idx) { 1818 /* data table */ 1819 switch (table_type) { 1820 case PLATFORM_CONFIG_SYSTEM_TABLE: 1821 pcfgcache->config_tables[table_type].num_table = 1822 1; 1823 ret = check_meta_version(dd, ptr); 1824 if (ret) 1825 goto bail; 1826 break; 1827 case PLATFORM_CONFIG_PORT_TABLE: 1828 pcfgcache->config_tables[table_type].num_table = 1829 2; 1830 break; 1831 case PLATFORM_CONFIG_RX_PRESET_TABLE: 1832 case PLATFORM_CONFIG_TX_PRESET_TABLE: 1833 case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: 1834 case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: 1835 pcfgcache->config_tables[table_type].num_table = 1836 table_length_dwords; 1837 break; 1838 default: 1839 dd_dev_err(dd, 1840 "%s: Unknown data table %d, offset %ld\n", 1841 __func__, table_type, 1842 (ptr - (u32 *) 1843 dd->platform_config.data)); 1844 ret = -EINVAL; 1845 goto bail; /* We don't trust this file now */ 1846 } 1847 pcfgcache->config_tables[table_type].table = ptr; 1848 } else { 1849 /* metadata table */ 1850 switch (table_type) { 1851 case PLATFORM_CONFIG_SYSTEM_TABLE: 1852 case PLATFORM_CONFIG_PORT_TABLE: 1853 case PLATFORM_CONFIG_RX_PRESET_TABLE: 1854 case PLATFORM_CONFIG_TX_PRESET_TABLE: 1855 case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: 1856 case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: 1857 break; 1858 default: 1859 dd_dev_err(dd, 1860 "%s: Unknown meta table %d, offset %ld\n", 1861 __func__, table_type, 1862 (ptr - 1863 (u32 *)dd->platform_config.data)); 1864 ret = -EINVAL; 1865 goto bail; /* We don't trust this file now */ 1866 } 1867 pcfgcache->config_tables[table_type].table_metadata = 1868 ptr; 1869 } 1870 1871 /* Calculate and check table crc */ 1872 crc = crc32_le(~(u32)0, (unsigned char const *)ptr, 1873 (table_length_dwords * 4)); 1874 crc ^= ~(u32)0; 1875 1876 /* Jump the table */ 1877 ptr += table_length_dwords; 1878 if (crc != *ptr) { 1879 dd_dev_err(dd, "%s: Failed CRC check at offset %ld\n", 1880 __func__, (ptr - 1881 (u32 *)dd->platform_config.data)); 1882 ret = -EINVAL; 1883 goto bail; 1884 } 1885 /* Jump the CRC DWORD */ 1886 ptr++; 1887 } 1888 1889 pcfgcache->cache_valid = 1; 1890 return 0; 1891 bail: 1892 memset(pcfgcache, 0, sizeof(struct platform_config_cache)); 1893 return ret; 1894 } 1895 1896 static void get_integrated_platform_config_field( 1897 struct hfi1_devdata *dd, 1898 enum platform_config_table_type_encoding table_type, 1899 int field_index, u32 *data) 1900 { 1901 struct hfi1_pportdata *ppd = dd->pport; 1902 u8 *cache = ppd->qsfp_info.cache; 1903 u32 tx_preset = 0; 1904 1905 switch (table_type) { 1906 case PLATFORM_CONFIG_SYSTEM_TABLE: 1907 if (field_index == SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) 1908 *data = ppd->max_power_class; 1909 else if (field_index == SYSTEM_TABLE_QSFP_ATTENUATION_DEFAULT_25G) 1910 *data = ppd->default_atten; 1911 break; 1912 case PLATFORM_CONFIG_PORT_TABLE: 1913 if (field_index == PORT_TABLE_PORT_TYPE) 1914 *data = ppd->port_type; 1915 else if (field_index == PORT_TABLE_LOCAL_ATTEN_25G) 1916 *data = ppd->local_atten; 1917 else if (field_index == PORT_TABLE_REMOTE_ATTEN_25G) 1918 *data = ppd->remote_atten; 1919 break; 1920 case PLATFORM_CONFIG_RX_PRESET_TABLE: 1921 if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR_APPLY) 1922 *data = (ppd->rx_preset & QSFP_RX_CDR_APPLY_SMASK) >> 1923 QSFP_RX_CDR_APPLY_SHIFT; 1924 else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP_APPLY) 1925 *data = (ppd->rx_preset & QSFP_RX_EMP_APPLY_SMASK) >> 1926 QSFP_RX_EMP_APPLY_SHIFT; 1927 else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP_APPLY) 1928 *data = (ppd->rx_preset & QSFP_RX_AMP_APPLY_SMASK) >> 1929 QSFP_RX_AMP_APPLY_SHIFT; 1930 else if (field_index == RX_PRESET_TABLE_QSFP_RX_CDR) 1931 *data = (ppd->rx_preset & QSFP_RX_CDR_SMASK) >> 1932 QSFP_RX_CDR_SHIFT; 1933 else if (field_index == RX_PRESET_TABLE_QSFP_RX_EMP) 1934 *data = (ppd->rx_preset & QSFP_RX_EMP_SMASK) >> 1935 QSFP_RX_EMP_SHIFT; 1936 else if (field_index == RX_PRESET_TABLE_QSFP_RX_AMP) 1937 *data = (ppd->rx_preset & QSFP_RX_AMP_SMASK) >> 1938 QSFP_RX_AMP_SHIFT; 1939 break; 1940 case PLATFORM_CONFIG_TX_PRESET_TABLE: 1941 if (cache[QSFP_EQ_INFO_OFFS] & 0x4) 1942 tx_preset = ppd->tx_preset_eq; 1943 else 1944 tx_preset = ppd->tx_preset_noeq; 1945 if (field_index == TX_PRESET_TABLE_PRECUR) 1946 *data = (tx_preset & TX_PRECUR_SMASK) >> 1947 TX_PRECUR_SHIFT; 1948 else if (field_index == TX_PRESET_TABLE_ATTN) 1949 *data = (tx_preset & TX_ATTN_SMASK) >> 1950 TX_ATTN_SHIFT; 1951 else if (field_index == TX_PRESET_TABLE_POSTCUR) 1952 *data = (tx_preset & TX_POSTCUR_SMASK) >> 1953 TX_POSTCUR_SHIFT; 1954 else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR_APPLY) 1955 *data = (tx_preset & QSFP_TX_CDR_APPLY_SMASK) >> 1956 QSFP_TX_CDR_APPLY_SHIFT; 1957 else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ_APPLY) 1958 *data = (tx_preset & QSFP_TX_EQ_APPLY_SMASK) >> 1959 QSFP_TX_EQ_APPLY_SHIFT; 1960 else if (field_index == TX_PRESET_TABLE_QSFP_TX_CDR) 1961 *data = (tx_preset & QSFP_TX_CDR_SMASK) >> 1962 QSFP_TX_CDR_SHIFT; 1963 else if (field_index == TX_PRESET_TABLE_QSFP_TX_EQ) 1964 *data = (tx_preset & QSFP_TX_EQ_SMASK) >> 1965 QSFP_TX_EQ_SHIFT; 1966 break; 1967 case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: 1968 case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: 1969 default: 1970 break; 1971 } 1972 } 1973 1974 static int get_platform_fw_field_metadata(struct hfi1_devdata *dd, int table, 1975 int field, u32 *field_len_bits, 1976 u32 *field_start_bits) 1977 { 1978 struct platform_config_cache *pcfgcache = &dd->pcfg_cache; 1979 u32 *src_ptr = NULL; 1980 1981 if (!pcfgcache->cache_valid) 1982 return -EINVAL; 1983 1984 switch (table) { 1985 case PLATFORM_CONFIG_SYSTEM_TABLE: 1986 case PLATFORM_CONFIG_PORT_TABLE: 1987 case PLATFORM_CONFIG_RX_PRESET_TABLE: 1988 case PLATFORM_CONFIG_TX_PRESET_TABLE: 1989 case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: 1990 case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: 1991 if (field && field < platform_config_table_limits[table]) 1992 src_ptr = 1993 pcfgcache->config_tables[table].table_metadata + field; 1994 break; 1995 default: 1996 dd_dev_info(dd, "%s: Unknown table\n", __func__); 1997 break; 1998 } 1999 2000 if (!src_ptr) 2001 return -EINVAL; 2002 2003 if (field_start_bits) 2004 *field_start_bits = *src_ptr & 2005 ((1 << METADATA_TABLE_FIELD_START_LEN_BITS) - 1); 2006 2007 if (field_len_bits) 2008 *field_len_bits = (*src_ptr >> METADATA_TABLE_FIELD_LEN_SHIFT) 2009 & ((1 << METADATA_TABLE_FIELD_LEN_LEN_BITS) - 1); 2010 2011 return 0; 2012 } 2013 2014 /* This is the central interface to getting data out of the platform config 2015 * file. It depends on parse_platform_config() having populated the 2016 * platform_config_cache in hfi1_devdata, and checks the cache_valid member to 2017 * validate the sanity of the cache. 2018 * 2019 * The non-obvious parameters: 2020 * @table_index: Acts as a look up key into which instance of the tables the 2021 * relevant field is fetched from. 2022 * 2023 * This applies to the data tables that have multiple instances. The port table 2024 * is an exception to this rule as each HFI only has one port and thus the 2025 * relevant table can be distinguished by hfi_id. 2026 * 2027 * @data: pointer to memory that will be populated with the field requested. 2028 * @len: length of memory pointed by @data in bytes. 2029 */ 2030 int get_platform_config_field(struct hfi1_devdata *dd, 2031 enum platform_config_table_type_encoding 2032 table_type, int table_index, int field_index, 2033 u32 *data, u32 len) 2034 { 2035 int ret = 0, wlen = 0, seek = 0; 2036 u32 field_len_bits = 0, field_start_bits = 0, *src_ptr = NULL; 2037 struct platform_config_cache *pcfgcache = &dd->pcfg_cache; 2038 struct hfi1_pportdata *ppd = dd->pport; 2039 2040 if (data) 2041 memset(data, 0, len); 2042 else 2043 return -EINVAL; 2044 2045 if (ppd->config_from_scratch) { 2046 /* 2047 * Use saved configuration from ppd for integrated platforms 2048 */ 2049 get_integrated_platform_config_field(dd, table_type, 2050 field_index, data); 2051 return 0; 2052 } 2053 2054 ret = get_platform_fw_field_metadata(dd, table_type, field_index, 2055 &field_len_bits, 2056 &field_start_bits); 2057 if (ret) 2058 return -EINVAL; 2059 2060 /* Convert length to bits */ 2061 len *= 8; 2062 2063 /* Our metadata function checked cache_valid and field_index for us */ 2064 switch (table_type) { 2065 case PLATFORM_CONFIG_SYSTEM_TABLE: 2066 src_ptr = pcfgcache->config_tables[table_type].table; 2067 2068 if (field_index != SYSTEM_TABLE_QSFP_POWER_CLASS_MAX) { 2069 if (len < field_len_bits) 2070 return -EINVAL; 2071 2072 seek = field_start_bits / 8; 2073 wlen = field_len_bits / 8; 2074 2075 src_ptr = (u32 *)((u8 *)src_ptr + seek); 2076 2077 /* 2078 * We expect the field to be byte aligned and whole byte 2079 * lengths if we are here 2080 */ 2081 memcpy(data, src_ptr, wlen); 2082 return 0; 2083 } 2084 break; 2085 case PLATFORM_CONFIG_PORT_TABLE: 2086 /* Port table is 4 DWORDS */ 2087 src_ptr = dd->hfi1_id ? 2088 pcfgcache->config_tables[table_type].table + 4 : 2089 pcfgcache->config_tables[table_type].table; 2090 break; 2091 case PLATFORM_CONFIG_RX_PRESET_TABLE: 2092 case PLATFORM_CONFIG_TX_PRESET_TABLE: 2093 case PLATFORM_CONFIG_QSFP_ATTEN_TABLE: 2094 case PLATFORM_CONFIG_VARIABLE_SETTINGS_TABLE: 2095 src_ptr = pcfgcache->config_tables[table_type].table; 2096 2097 if (table_index < 2098 pcfgcache->config_tables[table_type].num_table) 2099 src_ptr += table_index; 2100 else 2101 src_ptr = NULL; 2102 break; 2103 default: 2104 dd_dev_info(dd, "%s: Unknown table\n", __func__); 2105 break; 2106 } 2107 2108 if (!src_ptr || len < field_len_bits) 2109 return -EINVAL; 2110 2111 src_ptr += (field_start_bits / 32); 2112 *data = (*src_ptr >> (field_start_bits % 32)) & 2113 ((1 << field_len_bits) - 1); 2114 2115 return 0; 2116 } 2117 2118 /* 2119 * Download the firmware needed for the Gen3 PCIe SerDes. An update 2120 * to the SBus firmware is needed before updating the PCIe firmware. 2121 * 2122 * Note: caller must be holding the SBus resource. 2123 */ 2124 int load_pcie_firmware(struct hfi1_devdata *dd) 2125 { 2126 int ret = 0; 2127 2128 /* both firmware loads below use the SBus */ 2129 set_sbus_fast_mode(dd); 2130 2131 if (fw_sbus_load) { 2132 turn_off_spicos(dd, SPICO_SBUS); 2133 do { 2134 ret = load_sbus_firmware(dd, &fw_sbus); 2135 } while (retry_firmware(dd, ret)); 2136 if (ret) 2137 goto done; 2138 } 2139 2140 if (fw_pcie_serdes_load) { 2141 dd_dev_info(dd, "Setting PCIe SerDes broadcast\n"); 2142 set_serdes_broadcast(dd, all_pcie_serdes_broadcast, 2143 pcie_serdes_broadcast[dd->hfi1_id], 2144 pcie_serdes_addrs[dd->hfi1_id], 2145 NUM_PCIE_SERDES); 2146 do { 2147 ret = load_pcie_serdes_firmware(dd, &fw_pcie); 2148 } while (retry_firmware(dd, ret)); 2149 if (ret) 2150 goto done; 2151 } 2152 2153 done: 2154 clear_sbus_fast_mode(dd); 2155 2156 return ret; 2157 } 2158 2159 /* 2160 * Read the GUID from the hardware, store it in dd. 2161 */ 2162 void read_guid(struct hfi1_devdata *dd) 2163 { 2164 /* Take the DC out of reset to get a valid GUID value */ 2165 write_csr(dd, CCE_DC_CTRL, 0); 2166 (void)read_csr(dd, CCE_DC_CTRL); 2167 2168 dd->base_guid = read_csr(dd, DC_DC8051_CFG_LOCAL_GUID); 2169 dd_dev_info(dd, "GUID %llx", 2170 (unsigned long long)dd->base_guid); 2171 } 2172 2173 /* read and display firmware version info */ 2174 static void dump_fw_version(struct hfi1_devdata *dd) 2175 { 2176 u32 pcie_vers[NUM_PCIE_SERDES]; 2177 u32 fabric_vers[NUM_FABRIC_SERDES]; 2178 u32 sbus_vers; 2179 int i; 2180 int all_same; 2181 int ret; 2182 u8 rcv_addr; 2183 2184 ret = acquire_chip_resource(dd, CR_SBUS, SBUS_TIMEOUT); 2185 if (ret) { 2186 dd_dev_err(dd, "Unable to acquire SBus to read firmware versions\n"); 2187 return; 2188 } 2189 2190 /* set fast mode */ 2191 set_sbus_fast_mode(dd); 2192 2193 /* read version for SBus Master */ 2194 sbus_request(dd, SBUS_MASTER_BROADCAST, 0x02, WRITE_SBUS_RECEIVER, 0); 2195 sbus_request(dd, SBUS_MASTER_BROADCAST, 0x07, WRITE_SBUS_RECEIVER, 0x1); 2196 /* wait for interrupt to be processed */ 2197 usleep_range(10000, 11000); 2198 sbus_vers = sbus_read(dd, SBUS_MASTER_BROADCAST, 0x08, 0x1); 2199 dd_dev_info(dd, "SBus Master firmware version 0x%08x\n", sbus_vers); 2200 2201 /* read version for PCIe SerDes */ 2202 all_same = 1; 2203 pcie_vers[0] = 0; 2204 for (i = 0; i < NUM_PCIE_SERDES; i++) { 2205 rcv_addr = pcie_serdes_addrs[dd->hfi1_id][i]; 2206 sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0); 2207 /* wait for interrupt to be processed */ 2208 usleep_range(10000, 11000); 2209 pcie_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0); 2210 if (i > 0 && pcie_vers[0] != pcie_vers[i]) 2211 all_same = 0; 2212 } 2213 2214 if (all_same) { 2215 dd_dev_info(dd, "PCIe SerDes firmware version 0x%x\n", 2216 pcie_vers[0]); 2217 } else { 2218 dd_dev_warn(dd, "PCIe SerDes do not have the same firmware version\n"); 2219 for (i = 0; i < NUM_PCIE_SERDES; i++) { 2220 dd_dev_info(dd, 2221 "PCIe SerDes lane %d firmware version 0x%x\n", 2222 i, pcie_vers[i]); 2223 } 2224 } 2225 2226 /* read version for fabric SerDes */ 2227 all_same = 1; 2228 fabric_vers[0] = 0; 2229 for (i = 0; i < NUM_FABRIC_SERDES; i++) { 2230 rcv_addr = fabric_serdes_addrs[dd->hfi1_id][i]; 2231 sbus_request(dd, rcv_addr, 0x03, WRITE_SBUS_RECEIVER, 0); 2232 /* wait for interrupt to be processed */ 2233 usleep_range(10000, 11000); 2234 fabric_vers[i] = sbus_read(dd, rcv_addr, 0x04, 0x0); 2235 if (i > 0 && fabric_vers[0] != fabric_vers[i]) 2236 all_same = 0; 2237 } 2238 2239 if (all_same) { 2240 dd_dev_info(dd, "Fabric SerDes firmware version 0x%x\n", 2241 fabric_vers[0]); 2242 } else { 2243 dd_dev_warn(dd, "Fabric SerDes do not have the same firmware version\n"); 2244 for (i = 0; i < NUM_FABRIC_SERDES; i++) { 2245 dd_dev_info(dd, 2246 "Fabric SerDes lane %d firmware version 0x%x\n", 2247 i, fabric_vers[i]); 2248 } 2249 } 2250 2251 clear_sbus_fast_mode(dd); 2252 release_chip_resource(dd, CR_SBUS); 2253 } 2254