1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */ 3 4 /* 5 * nfp_main.c 6 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> 7 * Alejandro Lucero <alejandro.lucero@netronome.com> 8 * Jason McMullan <jason.mcmullan@netronome.com> 9 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 10 */ 11 12 #include <linux/kernel.h> 13 #include <linux/module.h> 14 #include <linux/mutex.h> 15 #include <linux/pci.h> 16 #include <linux/firmware.h> 17 #include <linux/vmalloc.h> 18 #include <net/devlink.h> 19 20 #include "nfpcore/nfp.h" 21 #include "nfpcore/nfp_cpp.h" 22 #include "nfpcore/nfp_dev.h" 23 #include "nfpcore/nfp_nffw.h" 24 #include "nfpcore/nfp_nsp.h" 25 26 #include "nfpcore/nfp6000_pcie.h" 27 28 #include "nfp_abi.h" 29 #include "nfp_app.h" 30 #include "nfp_main.h" 31 #include "nfp_net.h" 32 33 static const char nfp_driver_name[] = "nfp"; 34 35 static const struct pci_device_id nfp_pci_device_ids[] = { 36 { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP3800, 37 PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, 38 PCI_ANY_ID, 0, NFP_DEV_NFP3800, 39 }, 40 { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP4000, 41 PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, 42 PCI_ANY_ID, 0, NFP_DEV_NFP6000, 43 }, 44 { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP5000, 45 PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, 46 PCI_ANY_ID, 0, NFP_DEV_NFP6000, 47 }, 48 { PCI_VENDOR_ID_NETRONOME, PCI_DEVICE_ID_NFP6000, 49 PCI_VENDOR_ID_NETRONOME, PCI_ANY_ID, 50 PCI_ANY_ID, 0, NFP_DEV_NFP6000, 51 }, 52 { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP3800, 53 PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, 54 PCI_ANY_ID, 0, NFP_DEV_NFP3800, 55 }, 56 { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP4000, 57 PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, 58 PCI_ANY_ID, 0, NFP_DEV_NFP6000, 59 }, 60 { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP5000, 61 PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, 62 PCI_ANY_ID, 0, NFP_DEV_NFP6000, 63 }, 64 { PCI_VENDOR_ID_CORIGINE, PCI_DEVICE_ID_NFP6000, 65 PCI_VENDOR_ID_CORIGINE, PCI_ANY_ID, 66 PCI_ANY_ID, 0, NFP_DEV_NFP6000, 67 }, 68 { 0, } /* Required last entry. */ 69 }; 70 MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); 71 72 int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, 73 unsigned int default_val) 74 { 75 char name[256]; 76 int err = 0; 77 u64 val; 78 79 snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); 80 81 val = nfp_rtsym_read_le(pf->rtbl, name, &err); 82 if (err) { 83 if (err == -ENOENT) 84 return default_val; 85 nfp_err(pf->cpp, "Unable to read symbol %s\n", name); 86 return err; 87 } 88 89 return val; 90 } 91 92 u8 __iomem * 93 nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, 94 unsigned int min_size, struct nfp_cpp_area **area) 95 { 96 char pf_symbol[256]; 97 98 snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, 99 nfp_cppcore_pcie_unit(pf->cpp)); 100 101 return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); 102 } 103 104 /* Callers should hold the devlink instance lock */ 105 int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, 106 void *out_data, u64 out_length) 107 { 108 unsigned long err_at; 109 u64 max_data_sz; 110 u32 val = 0; 111 int n, err; 112 113 if (!pf->mbox) 114 return -EOPNOTSUPP; 115 116 max_data_sz = nfp_rtsym_size(pf->mbox) - NFP_MBOX_SYM_MIN_SIZE; 117 118 /* Check if cmd field is clear */ 119 err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); 120 if (err || val) { 121 nfp_warn(pf->cpp, "failed to issue command (%u): %u, err: %d\n", 122 cmd, val, err); 123 return err ?: -EBUSY; 124 } 125 126 in_length = min(in_length, max_data_sz); 127 n = nfp_rtsym_write(pf->cpp, pf->mbox, NFP_MBOX_DATA, in_data, 128 in_length); 129 if (n != in_length) 130 return -EIO; 131 /* Write data_len and wipe reserved */ 132 err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, in_length); 133 if (err) 134 return err; 135 136 /* Read back for ordering */ 137 err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); 138 if (err) 139 return err; 140 141 /* Write cmd and wipe return value */ 142 err = nfp_rtsym_writeq(pf->cpp, pf->mbox, NFP_MBOX_CMD, cmd); 143 if (err) 144 return err; 145 146 err_at = jiffies + 5 * HZ; 147 while (true) { 148 /* Wait for command to go to 0 (NFP_MBOX_NO_CMD) */ 149 err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_CMD, &val); 150 if (err) 151 return err; 152 if (!val) 153 break; 154 155 if (time_is_before_eq_jiffies(err_at)) 156 return -ETIMEDOUT; 157 158 msleep(5); 159 } 160 161 /* Copy output if any (could be error info, do it before reading ret) */ 162 err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_DATA_LEN, &val); 163 if (err) 164 return err; 165 166 out_length = min_t(u32, val, min(out_length, max_data_sz)); 167 n = nfp_rtsym_read(pf->cpp, pf->mbox, NFP_MBOX_DATA, 168 out_data, out_length); 169 if (n != out_length) 170 return -EIO; 171 172 /* Check if there is an error */ 173 err = nfp_rtsym_readl(pf->cpp, pf->mbox, NFP_MBOX_RET, &val); 174 if (err) 175 return err; 176 if (val) 177 return -val; 178 179 return out_length; 180 } 181 182 static bool nfp_board_ready(struct nfp_pf *pf) 183 { 184 const char *cp; 185 long state; 186 int err; 187 188 cp = nfp_hwinfo_lookup(pf->hwinfo, "board.state"); 189 if (!cp) 190 return false; 191 192 err = kstrtol(cp, 0, &state); 193 if (err < 0) 194 return false; 195 196 return state == 15; 197 } 198 199 static int nfp_pf_board_state_wait(struct nfp_pf *pf) 200 { 201 const unsigned long wait_until = jiffies + 10 * HZ; 202 203 while (!nfp_board_ready(pf)) { 204 if (time_is_before_eq_jiffies(wait_until)) { 205 nfp_err(pf->cpp, "NFP board initialization timeout\n"); 206 return -EINVAL; 207 } 208 209 nfp_info(pf->cpp, "waiting for board initialization\n"); 210 if (msleep_interruptible(500)) 211 return -ERESTARTSYS; 212 213 /* Refresh cached information */ 214 kfree(pf->hwinfo); 215 pf->hwinfo = nfp_hwinfo_read(pf->cpp); 216 } 217 218 return 0; 219 } 220 221 static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) 222 { 223 int err; 224 225 pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); 226 if (err) { 227 /* For backwards compatibility if symbol not found allow all */ 228 pf->limit_vfs = ~0; 229 if (err == -ENOENT) 230 return 0; 231 232 nfp_warn(pf->cpp, "Warning: VF limit read failed: %d\n", err); 233 return err; 234 } 235 236 err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); 237 if (err) 238 nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); 239 return 0; 240 } 241 242 static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs) 243 { 244 #ifdef CONFIG_PCI_IOV 245 struct nfp_pf *pf = pci_get_drvdata(pdev); 246 struct devlink *devlink; 247 int err; 248 249 if (num_vfs > pf->limit_vfs) { 250 nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n", 251 pf->limit_vfs); 252 return -EINVAL; 253 } 254 255 err = pci_enable_sriov(pdev, num_vfs); 256 if (err) { 257 dev_warn(&pdev->dev, "Failed to enable PCI SR-IOV: %d\n", err); 258 return err; 259 } 260 261 devlink = priv_to_devlink(pf); 262 devl_lock(devlink); 263 264 err = nfp_app_sriov_enable(pf->app, num_vfs); 265 if (err) { 266 dev_warn(&pdev->dev, 267 "App specific PCI SR-IOV configuration failed: %d\n", 268 err); 269 goto err_sriov_disable; 270 } 271 272 pf->num_vfs = num_vfs; 273 274 dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs); 275 276 devl_unlock(devlink); 277 return num_vfs; 278 279 err_sriov_disable: 280 devl_unlock(devlink); 281 pci_disable_sriov(pdev); 282 return err; 283 #endif 284 return 0; 285 } 286 287 static int nfp_pcie_sriov_disable(struct pci_dev *pdev) 288 { 289 #ifdef CONFIG_PCI_IOV 290 struct nfp_pf *pf = pci_get_drvdata(pdev); 291 struct devlink *devlink; 292 293 devlink = priv_to_devlink(pf); 294 devl_lock(devlink); 295 296 /* If the VFs are assigned we cannot shut down SR-IOV without 297 * causing issues, so just leave the hardware available but 298 * disabled 299 */ 300 if (pci_vfs_assigned(pdev)) { 301 dev_warn(&pdev->dev, "Disabling while VFs assigned - VFs will not be deallocated\n"); 302 devl_unlock(devlink); 303 return -EPERM; 304 } 305 306 nfp_app_sriov_disable(pf->app); 307 308 pf->num_vfs = 0; 309 310 devl_unlock(devlink); 311 312 pci_disable_sriov(pdev); 313 dev_dbg(&pdev->dev, "Removed VFs.\n"); 314 #endif 315 return 0; 316 } 317 318 static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs) 319 { 320 if (!pci_get_drvdata(pdev)) 321 return -ENOENT; 322 323 if (num_vfs == 0) 324 return nfp_pcie_sriov_disable(pdev); 325 else 326 return nfp_pcie_sriov_enable(pdev, num_vfs); 327 } 328 329 int nfp_flash_update_common(struct nfp_pf *pf, const struct firmware *fw, 330 struct netlink_ext_ack *extack) 331 { 332 struct device *dev = &pf->pdev->dev; 333 struct nfp_nsp *nsp; 334 int err; 335 336 nsp = nfp_nsp_open(pf->cpp); 337 if (IS_ERR(nsp)) { 338 err = PTR_ERR(nsp); 339 if (extack) 340 NL_SET_ERR_MSG_MOD(extack, "can't access NSP"); 341 else 342 dev_err(dev, "Failed to access the NSP: %d\n", err); 343 return err; 344 } 345 346 err = nfp_nsp_write_flash(nsp, fw); 347 if (err < 0) 348 goto exit_close_nsp; 349 dev_info(dev, "Finished writing flash image\n"); 350 err = 0; 351 352 exit_close_nsp: 353 nfp_nsp_close(nsp); 354 return err; 355 } 356 357 static const struct firmware * 358 nfp_net_fw_request(struct pci_dev *pdev, struct nfp_pf *pf, const char *name) 359 { 360 const struct firmware *fw = NULL; 361 int err; 362 363 err = request_firmware_direct(&fw, name, &pdev->dev); 364 nfp_info(pf->cpp, " %s: %s\n", 365 name, err ? "not found" : "found"); 366 if (err) 367 return NULL; 368 369 return fw; 370 } 371 372 /** 373 * nfp_net_fw_find() - Find the correct firmware image for netdev mode 374 * @pdev: PCI Device structure 375 * @pf: NFP PF Device structure 376 * 377 * Return: firmware if found and requested successfully. 378 */ 379 static const struct firmware * 380 nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) 381 { 382 struct nfp_eth_table_port *port; 383 const struct firmware *fw; 384 const char *fw_model; 385 char fw_name[256]; 386 const u8 *serial; 387 u16 interface; 388 int spc, i, j; 389 390 nfp_info(pf->cpp, "Looking for firmware file in order of priority:\n"); 391 392 /* First try to find a firmware image specific for this device */ 393 interface = nfp_cpp_interface(pf->cpp); 394 nfp_cpp_serial(pf->cpp, &serial); 395 sprintf(fw_name, "netronome/serial-%pMF-%02x-%02x.nffw", 396 serial, interface >> 8, interface & 0xff); 397 fw = nfp_net_fw_request(pdev, pf, fw_name); 398 if (fw) 399 return fw; 400 401 /* Then try the PCI name */ 402 sprintf(fw_name, "netronome/pci-%s.nffw", pci_name(pdev)); 403 fw = nfp_net_fw_request(pdev, pf, fw_name); 404 if (fw) 405 return fw; 406 407 /* Finally try the card type and media */ 408 if (!pf->eth_tbl) { 409 dev_err(&pdev->dev, "Error: can't identify media config\n"); 410 return NULL; 411 } 412 413 fw_model = nfp_hwinfo_lookup(pf->hwinfo, "nffw.partno"); 414 if (!fw_model) 415 fw_model = nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"); 416 if (!fw_model) { 417 dev_err(&pdev->dev, "Error: can't read part number\n"); 418 return NULL; 419 } 420 421 spc = ARRAY_SIZE(fw_name); 422 spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model); 423 424 for (i = 0; spc > 0 && i < pf->eth_tbl->count; i += j) { 425 port = &pf->eth_tbl->ports[i]; 426 j = 1; 427 while (i + j < pf->eth_tbl->count && 428 port->speed == port[j].speed) 429 j++; 430 431 spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, 432 "_%dx%d", j, port->speed / 1000); 433 } 434 435 if (spc <= 0) 436 return NULL; 437 438 spc -= snprintf(&fw_name[ARRAY_SIZE(fw_name) - spc], spc, ".nffw"); 439 if (spc <= 0) 440 return NULL; 441 442 return nfp_net_fw_request(pdev, pf, fw_name); 443 } 444 445 static int 446 nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, 447 const char *key, const char *default_val, int max_val, 448 int *value) 449 { 450 char hwinfo[64]; 451 long hi_val; 452 int err; 453 454 snprintf(hwinfo, sizeof(hwinfo), key); 455 err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), 456 default_val); 457 if (err) 458 return err; 459 460 err = kstrtol(hwinfo, 0, &hi_val); 461 if (err || hi_val < 0 || hi_val > max_val) { 462 dev_warn(&pdev->dev, 463 "Invalid value '%s' from '%s', ignoring\n", 464 hwinfo, key); 465 err = kstrtol(default_val, 0, &hi_val); 466 } 467 468 *value = hi_val; 469 return err; 470 } 471 472 /** 473 * nfp_fw_load() - Load the firmware image 474 * @pdev: PCI Device structure 475 * @pf: NFP PF Device structure 476 * @nsp: NFP SP handle 477 * 478 * Return: -ERRNO, 0 for no firmware loaded, 1 for firmware loaded 479 */ 480 static int 481 nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) 482 { 483 bool do_reset, fw_loaded = false; 484 const struct firmware *fw = NULL; 485 int err, reset, policy, ifcs = 0; 486 char *token, *ptr; 487 char hwinfo[64]; 488 u16 interface; 489 490 snprintf(hwinfo, sizeof(hwinfo), "abi_drv_load_ifc"); 491 err = nfp_nsp_hwinfo_lookup_optional(nsp, hwinfo, sizeof(hwinfo), 492 NFP_NSP_DRV_LOAD_IFC_DEFAULT); 493 if (err) 494 return err; 495 496 interface = nfp_cpp_interface(pf->cpp); 497 ptr = hwinfo; 498 while ((token = strsep(&ptr, ","))) { 499 unsigned long interface_hi; 500 501 err = kstrtoul(token, 0, &interface_hi); 502 if (err) { 503 dev_err(&pdev->dev, 504 "Failed to parse interface '%s': %d\n", 505 token, err); 506 return err; 507 } 508 509 ifcs++; 510 if (interface == interface_hi) 511 break; 512 } 513 514 if (!token) { 515 dev_info(&pdev->dev, "Firmware will be loaded by partner\n"); 516 return 0; 517 } 518 519 err = nfp_get_fw_policy_value(pdev, nsp, "abi_drv_reset", 520 NFP_NSP_DRV_RESET_DEFAULT, 521 NFP_NSP_DRV_RESET_NEVER, &reset); 522 if (err) 523 return err; 524 525 err = nfp_get_fw_policy_value(pdev, nsp, "app_fw_from_flash", 526 NFP_NSP_APP_FW_LOAD_DEFAULT, 527 NFP_NSP_APP_FW_LOAD_PREF, &policy); 528 if (err) 529 return err; 530 531 fw = nfp_net_fw_find(pdev, pf); 532 do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || 533 (fw && reset == NFP_NSP_DRV_RESET_DISK); 534 535 if (do_reset) { 536 dev_info(&pdev->dev, "Soft-resetting the NFP\n"); 537 err = nfp_nsp_device_soft_reset(nsp); 538 if (err < 0) { 539 dev_err(&pdev->dev, 540 "Failed to soft reset the NFP: %d\n", err); 541 goto exit_release_fw; 542 } 543 } 544 545 if (fw && policy != NFP_NSP_APP_FW_LOAD_FLASH) { 546 if (nfp_nsp_has_fw_loaded(nsp) && nfp_nsp_fw_loaded(nsp)) 547 goto exit_release_fw; 548 549 err = nfp_nsp_load_fw(nsp, fw); 550 if (err < 0) { 551 dev_err(&pdev->dev, "FW loading failed: %d\n", 552 err); 553 goto exit_release_fw; 554 } 555 dev_info(&pdev->dev, "Finished loading FW image\n"); 556 fw_loaded = true; 557 } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && 558 nfp_nsp_has_stored_fw_load(nsp)) { 559 560 /* Don't propagate this error to stick with legacy driver 561 * behavior, failure will be detected later during init. 562 */ 563 if (!nfp_nsp_load_stored_fw(nsp)) 564 dev_info(&pdev->dev, "Finished loading stored FW image\n"); 565 566 /* Don't flag the fw_loaded in this case since other devices 567 * may reuse the firmware when configured this way 568 */ 569 } else { 570 dev_warn(&pdev->dev, "Didn't load firmware, please update flash or reconfigure card\n"); 571 } 572 573 exit_release_fw: 574 release_firmware(fw); 575 576 /* We don't want to unload firmware when other devices may still be 577 * dependent on it, which could be the case if there are multiple 578 * devices that could load firmware. 579 */ 580 if (fw_loaded && ifcs == 1) 581 pf->unload_fw_on_remove = true; 582 583 return err < 0 ? err : fw_loaded; 584 } 585 586 static void 587 nfp_nsp_init_ports(struct pci_dev *pdev, struct nfp_pf *pf, 588 struct nfp_nsp *nsp) 589 { 590 bool needs_reinit = false; 591 int i; 592 593 pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); 594 if (!pf->eth_tbl) 595 return; 596 597 if (!nfp_nsp_has_mac_reinit(nsp)) 598 return; 599 600 for (i = 0; i < pf->eth_tbl->count; i++) 601 needs_reinit |= pf->eth_tbl->ports[i].override_changed; 602 if (!needs_reinit) 603 return; 604 605 kfree(pf->eth_tbl); 606 if (nfp_nsp_mac_reinit(nsp)) 607 dev_warn(&pdev->dev, "MAC reinit failed\n"); 608 609 pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); 610 } 611 612 static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) 613 { 614 struct nfp_nsp *nsp; 615 int err; 616 617 err = nfp_resource_wait(pf->cpp, NFP_RESOURCE_NSP, 30); 618 if (err) 619 return err; 620 621 nsp = nfp_nsp_open(pf->cpp); 622 if (IS_ERR(nsp)) { 623 err = PTR_ERR(nsp); 624 dev_err(&pdev->dev, "Failed to access the NSP: %d\n", err); 625 return err; 626 } 627 628 err = nfp_nsp_wait(nsp); 629 if (err < 0) 630 goto exit_close_nsp; 631 632 nfp_nsp_init_ports(pdev, pf, nsp); 633 634 pf->nspi = __nfp_nsp_identify(nsp); 635 if (pf->nspi) 636 dev_info(&pdev->dev, "BSP: %s\n", pf->nspi->version); 637 638 err = nfp_fw_load(pdev, pf, nsp); 639 if (err < 0) { 640 kfree(pf->nspi); 641 kfree(pf->eth_tbl); 642 dev_err(&pdev->dev, "Failed to load FW\n"); 643 goto exit_close_nsp; 644 } 645 646 pf->fw_loaded = !!err; 647 err = 0; 648 649 exit_close_nsp: 650 nfp_nsp_close(nsp); 651 652 return err; 653 } 654 655 static void nfp_fw_unload(struct nfp_pf *pf) 656 { 657 struct nfp_nsp *nsp; 658 int err; 659 660 nsp = nfp_nsp_open(pf->cpp); 661 if (IS_ERR(nsp)) { 662 nfp_err(pf->cpp, "Reset failed, can't open NSP\n"); 663 return; 664 } 665 666 err = nfp_nsp_device_soft_reset(nsp); 667 if (err < 0) 668 dev_warn(&pf->pdev->dev, "Couldn't unload firmware: %d\n", err); 669 else 670 dev_info(&pf->pdev->dev, "Firmware safely unloaded\n"); 671 672 nfp_nsp_close(nsp); 673 } 674 675 static int nfp_pf_find_rtsyms(struct nfp_pf *pf) 676 { 677 char pf_symbol[256]; 678 unsigned int pf_id; 679 680 pf_id = nfp_cppcore_pcie_unit(pf->cpp); 681 682 /* Optional per-PCI PF mailbox */ 683 snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); 684 pf->mbox = nfp_rtsym_lookup(pf->rtbl, pf_symbol); 685 if (pf->mbox && nfp_rtsym_size(pf->mbox) < NFP_MBOX_SYM_MIN_SIZE) { 686 nfp_err(pf->cpp, "PF mailbox symbol too small: %llu < %d\n", 687 nfp_rtsym_size(pf->mbox), NFP_MBOX_SYM_MIN_SIZE); 688 return -EINVAL; 689 } 690 691 return 0; 692 } 693 694 int nfp_net_pf_get_app_id(struct nfp_pf *pf) 695 { 696 return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", 697 NFP_APP_CORE_NIC); 698 } 699 700 static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf) 701 { 702 char name[32]; 703 int err = 0; 704 u64 val; 705 706 snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp)); 707 708 val = nfp_rtsym_read_le(pf->rtbl, name, &err); 709 if (err) { 710 if (err != -ENOENT) 711 nfp_err(pf->cpp, "Unable to read symbol %s\n", name); 712 713 return 0; 714 } 715 716 return val; 717 } 718 719 static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff) 720 { 721 struct nfp_nsp *nsp; 722 char hwinfo[32]; 723 int err; 724 725 nsp = nfp_nsp_open(pf->cpp); 726 if (IS_ERR(nsp)) 727 return PTR_ERR(nsp); 728 729 snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff); 730 err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo)); 731 /* Not a fatal error, no need to return error to stop driver from loading */ 732 if (err) { 733 nfp_warn(pf->cpp, "HWinfo(sp_indiff=%d) set failed: %d\n", sp_indiff, err); 734 } else { 735 /* Need reinit eth_tbl since the eth table state may change 736 * after sp_indiff is configured. 737 */ 738 kfree(pf->eth_tbl); 739 pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); 740 } 741 742 nfp_nsp_close(nsp); 743 return 0; 744 } 745 746 static int nfp_pf_nsp_cfg(struct nfp_pf *pf) 747 { 748 bool sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) || 749 (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF); 750 751 return nfp_pf_cfg_hwinfo(pf, sp_indiff); 752 } 753 754 static void nfp_pf_nsp_clean(struct nfp_pf *pf) 755 { 756 nfp_pf_cfg_hwinfo(pf, false); 757 } 758 759 static int nfp_pci_probe(struct pci_dev *pdev, 760 const struct pci_device_id *pci_id) 761 { 762 const struct nfp_dev_info *dev_info; 763 struct devlink *devlink; 764 struct nfp_pf *pf; 765 int err; 766 767 if ((pdev->vendor == PCI_VENDOR_ID_NETRONOME || 768 pdev->vendor == PCI_VENDOR_ID_CORIGINE) && 769 (pdev->device == PCI_DEVICE_ID_NFP3800_VF || 770 pdev->device == PCI_DEVICE_ID_NFP6000_VF)) 771 dev_warn(&pdev->dev, "Binding NFP VF device to the NFP PF driver, the VF driver is called 'nfp_netvf'\n"); 772 773 dev_info = &nfp_dev_info[pci_id->driver_data]; 774 775 err = pci_enable_device(pdev); 776 if (err < 0) 777 return err; 778 779 pci_set_master(pdev); 780 781 err = dma_set_mask_and_coherent(&pdev->dev, dev_info->dma_mask); 782 if (err) 783 goto err_pci_disable; 784 785 err = pci_request_regions(pdev, nfp_driver_name); 786 if (err < 0) { 787 dev_err(&pdev->dev, "Unable to reserve pci resources.\n"); 788 goto err_pci_disable; 789 } 790 791 devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf), &pdev->dev); 792 if (!devlink) { 793 err = -ENOMEM; 794 goto err_rel_regions; 795 } 796 pf = devlink_priv(devlink); 797 INIT_LIST_HEAD(&pf->vnics); 798 INIT_LIST_HEAD(&pf->ports); 799 pci_set_drvdata(pdev, pf); 800 pf->pdev = pdev; 801 pf->dev_info = dev_info; 802 803 pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev)); 804 if (!pf->wq) { 805 err = -ENOMEM; 806 goto err_pci_priv_unset; 807 } 808 809 pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info); 810 if (IS_ERR(pf->cpp)) { 811 err = PTR_ERR(pf->cpp); 812 goto err_disable_msix; 813 } 814 815 err = nfp_resource_table_init(pf->cpp); 816 if (err) 817 goto err_cpp_free; 818 819 pf->hwinfo = nfp_hwinfo_read(pf->cpp); 820 821 dev_info(&pdev->dev, "Assembly: %s%s%s-%s CPLD: %s\n", 822 nfp_hwinfo_lookup(pf->hwinfo, "assembly.vendor"), 823 nfp_hwinfo_lookup(pf->hwinfo, "assembly.partno"), 824 nfp_hwinfo_lookup(pf->hwinfo, "assembly.serial"), 825 nfp_hwinfo_lookup(pf->hwinfo, "assembly.revision"), 826 nfp_hwinfo_lookup(pf->hwinfo, "cpld.version")); 827 828 err = nfp_pf_board_state_wait(pf); 829 if (err) 830 goto err_hwinfo_free; 831 832 err = nfp_nsp_init(pdev, pf); 833 if (err) 834 goto err_hwinfo_free; 835 836 pf->mip = nfp_mip_open(pf->cpp); 837 pf->rtbl = __nfp_rtsym_table_read(pf->cpp, pf->mip); 838 839 err = nfp_pf_find_rtsyms(pf); 840 if (err) 841 goto err_fw_unload; 842 843 pf->dump_flag = NFP_DUMP_NSP_DIAG; 844 pf->dumpspec = nfp_net_dump_load_dumpspec(pf->cpp, pf->rtbl); 845 846 err = nfp_pcie_sriov_read_nfd_limit(pf); 847 if (err) 848 goto err_fw_unload; 849 850 pf->num_vfs = pci_num_vf(pdev); 851 if (pf->num_vfs > pf->limit_vfs) { 852 dev_err(&pdev->dev, 853 "Error: %d VFs already enabled, but loaded FW can only support %d\n", 854 pf->num_vfs, pf->limit_vfs); 855 err = -EINVAL; 856 goto err_fw_unload; 857 } 858 859 err = nfp_pf_nsp_cfg(pf); 860 if (err) 861 goto err_fw_unload; 862 863 err = nfp_net_pci_probe(pf); 864 if (err) 865 goto err_nsp_clean; 866 867 err = nfp_hwmon_register(pf); 868 if (err) { 869 dev_err(&pdev->dev, "Failed to register hwmon info\n"); 870 goto err_net_remove; 871 } 872 873 return 0; 874 875 err_net_remove: 876 nfp_net_pci_remove(pf); 877 err_nsp_clean: 878 nfp_pf_nsp_clean(pf); 879 err_fw_unload: 880 kfree(pf->rtbl); 881 nfp_mip_close(pf->mip); 882 if (pf->unload_fw_on_remove) 883 nfp_fw_unload(pf); 884 kfree(pf->eth_tbl); 885 kfree(pf->nspi); 886 vfree(pf->dumpspec); 887 err_hwinfo_free: 888 kfree(pf->hwinfo); 889 err_cpp_free: 890 nfp_cpp_free(pf->cpp); 891 err_disable_msix: 892 destroy_workqueue(pf->wq); 893 err_pci_priv_unset: 894 pci_set_drvdata(pdev, NULL); 895 devlink_free(devlink); 896 err_rel_regions: 897 pci_release_regions(pdev); 898 err_pci_disable: 899 pci_disable_device(pdev); 900 901 return err; 902 } 903 904 static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) 905 { 906 struct nfp_pf *pf; 907 908 pf = pci_get_drvdata(pdev); 909 if (!pf) 910 return; 911 912 nfp_hwmon_unregister(pf); 913 914 nfp_pcie_sriov_disable(pdev); 915 916 nfp_net_pci_remove(pf); 917 918 nfp_pf_nsp_clean(pf); 919 vfree(pf->dumpspec); 920 kfree(pf->rtbl); 921 nfp_mip_close(pf->mip); 922 if (unload_fw && pf->unload_fw_on_remove) 923 nfp_fw_unload(pf); 924 925 destroy_workqueue(pf->wq); 926 pci_set_drvdata(pdev, NULL); 927 kfree(pf->hwinfo); 928 nfp_cpp_free(pf->cpp); 929 930 kfree(pf->eth_tbl); 931 kfree(pf->nspi); 932 devlink_free(priv_to_devlink(pf)); 933 pci_release_regions(pdev); 934 pci_disable_device(pdev); 935 } 936 937 static void nfp_pci_remove(struct pci_dev *pdev) 938 { 939 __nfp_pci_shutdown(pdev, true); 940 } 941 942 static void nfp_pci_shutdown(struct pci_dev *pdev) 943 { 944 __nfp_pci_shutdown(pdev, false); 945 } 946 947 static struct pci_driver nfp_pci_driver = { 948 .name = nfp_driver_name, 949 .id_table = nfp_pci_device_ids, 950 .probe = nfp_pci_probe, 951 .remove = nfp_pci_remove, 952 .shutdown = nfp_pci_shutdown, 953 .sriov_configure = nfp_pcie_sriov_configure, 954 }; 955 956 static int __init nfp_main_init(void) 957 { 958 int err; 959 960 pr_info("%s: NFP PCIe Driver, Copyright (C) 2014-2020 Netronome Systems\n", 961 nfp_driver_name); 962 pr_info("%s: NFP PCIe Driver, Copyright (C) 2021-2022 Corigine Inc.\n", 963 nfp_driver_name); 964 965 nfp_net_debugfs_create(); 966 967 err = pci_register_driver(&nfp_pci_driver); 968 if (err < 0) 969 goto err_destroy_debugfs; 970 971 err = pci_register_driver(&nfp_netvf_pci_driver); 972 if (err) 973 goto err_unreg_pf; 974 975 return err; 976 977 err_unreg_pf: 978 pci_unregister_driver(&nfp_pci_driver); 979 err_destroy_debugfs: 980 nfp_net_debugfs_destroy(); 981 return err; 982 } 983 984 static void __exit nfp_main_exit(void) 985 { 986 pci_unregister_driver(&nfp_netvf_pci_driver); 987 pci_unregister_driver(&nfp_pci_driver); 988 nfp_net_debugfs_destroy(); 989 } 990 991 module_init(nfp_main_init); 992 module_exit(nfp_main_exit); 993 994 MODULE_FIRMWARE("netronome/nic_AMDA0058-0011_2x40.nffw"); 995 MODULE_FIRMWARE("netronome/nic_AMDA0058-0012_2x40.nffw"); 996 MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_1x40.nffw"); 997 MODULE_FIRMWARE("netronome/nic_AMDA0081-0001_4x10.nffw"); 998 MODULE_FIRMWARE("netronome/nic_AMDA0096-0001_2x10.nffw"); 999 MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_2x40.nffw"); 1000 MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw"); 1001 MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); 1002 MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); 1003 MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); 1004 MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw"); 1005 1006 MODULE_AUTHOR("Corigine, Inc. <oss-drivers@corigine.com>"); 1007 MODULE_LICENSE("GPL"); 1008 MODULE_DESCRIPTION("The Network Flow Processor (NFP) driver."); 1009