1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */ 3 4 /* 5 * nfp_net_main.c 6 * Netronome network device driver: Main entry point 7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> 8 * Alejandro Lucero <alejandro.lucero@netronome.com> 9 * Jason McMullan <jason.mcmullan@netronome.com> 10 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 11 */ 12 13 #include <linux/etherdevice.h> 14 #include <linux/kernel.h> 15 #include <linux/init.h> 16 #include <linux/lockdep.h> 17 #include <linux/pci.h> 18 #include <linux/pci_regs.h> 19 #include <linux/msi.h> 20 #include <linux/random.h> 21 #include <linux/rtnetlink.h> 22 23 #include "nfpcore/nfp.h" 24 #include "nfpcore/nfp_cpp.h" 25 #include "nfpcore/nfp_dev.h" 26 #include "nfpcore/nfp_nffw.h" 27 #include "nfpcore/nfp_nsp.h" 28 #include "nfpcore/nfp6000_pcie.h" 29 #include "nfp_app.h" 30 #include "nfp_net_ctrl.h" 31 #include "nfp_net_sriov.h" 32 #include "nfp_net.h" 33 #include "nfp_main.h" 34 #include "nfp_port.h" 35 36 #define NFP_PF_CSR_SLICE_SIZE (32 * 1024) 37 38 /** 39 * nfp_net_get_mac_addr() - Get the MAC address. 40 * @pf: NFP PF handle 41 * @netdev: net_device to set MAC address on 42 * @port: NFP port structure 43 * 44 * First try to get the MAC address from NSP ETH table. If that 45 * fails generate a random address. 46 */ 47 void 48 nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev, 49 struct nfp_port *port) 50 { 51 struct nfp_eth_table_port *eth_port; 52 53 eth_port = __nfp_port_get_eth_port(port); 54 if (!eth_port) { 55 eth_hw_addr_random(netdev); 56 return; 57 } 58 59 eth_hw_addr_set(netdev, eth_port->mac_addr); 60 ether_addr_copy(netdev->perm_addr, eth_port->mac_addr); 61 } 62 63 static struct nfp_eth_table_port * 64 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index) 65 { 66 int i; 67 68 for (i = 0; eth_tbl && i < eth_tbl->count; i++) 69 if (eth_tbl->ports[i].index == index) 70 return ð_tbl->ports[i]; 71 72 return NULL; 73 } 74 75 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf) 76 { 77 return nfp_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1); 78 } 79 80 static int nfp_net_pf_get_app_id(struct nfp_pf *pf) 81 { 82 return nfp_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", 83 NFP_APP_CORE_NIC); 84 } 85 86 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) 87 { 88 if (nfp_net_is_data_vnic(nn)) 89 nfp_app_vnic_free(pf->app, nn); 90 nfp_port_free(nn->port); 91 list_del(&nn->vnic_list); 92 pf->num_vnics--; 93 nfp_net_free(nn); 94 } 95 96 static void nfp_net_pf_free_vnics(struct nfp_pf *pf) 97 { 98 struct nfp_net *nn, *next; 99 100 list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) 101 if (nfp_net_is_data_vnic(nn)) 102 nfp_net_pf_free_vnic(pf, nn); 103 } 104 105 static struct nfp_net * 106 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev, 107 void __iomem *ctrl_bar, void __iomem *qc_bar, 108 int stride, unsigned int id) 109 { 110 u32 tx_base, rx_base, n_tx_rings, n_rx_rings; 111 struct nfp_net *nn; 112 int err; 113 114 tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); 115 rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); 116 n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); 117 n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); 118 119 /* Allocate and initialise the vNIC */ 120 nn = nfp_net_alloc(pf->pdev, pf->dev_info, ctrl_bar, needs_netdev, 121 n_tx_rings, n_rx_rings); 122 if (IS_ERR(nn)) 123 return nn; 124 125 nn->app = pf->app; 126 nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 127 nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; 128 nn->dp.is_vf = 0; 129 nn->stride_rx = stride; 130 nn->stride_tx = stride; 131 132 if (needs_netdev) { 133 err = nfp_app_vnic_alloc(pf->app, nn, id); 134 if (err) { 135 nfp_net_free(nn); 136 return ERR_PTR(err); 137 } 138 } 139 140 pf->num_vnics++; 141 list_add_tail(&nn->vnic_list, &pf->vnics); 142 143 return nn; 144 } 145 146 static int 147 nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) 148 { 149 int err; 150 151 nn->id = id; 152 153 if (nn->port) { 154 err = nfp_devlink_port_register(pf->app, nn->port); 155 if (err) 156 return err; 157 } 158 159 err = nfp_net_init(nn); 160 if (err) 161 goto err_devlink_port_clean; 162 163 nfp_net_debugfs_vnic_add(nn, pf->ddir); 164 165 if (nn->port) 166 nfp_devlink_port_type_eth_set(nn->port); 167 168 nfp_net_info(nn); 169 170 if (nfp_net_is_data_vnic(nn)) { 171 err = nfp_app_vnic_init(pf->app, nn); 172 if (err) 173 goto err_devlink_port_type_clean; 174 } 175 176 return 0; 177 178 err_devlink_port_type_clean: 179 if (nn->port) 180 nfp_devlink_port_type_clear(nn->port); 181 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 182 nfp_net_clean(nn); 183 err_devlink_port_clean: 184 if (nn->port) 185 nfp_devlink_port_unregister(nn->port); 186 return err; 187 } 188 189 static int 190 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, 191 void __iomem *qc_bar, int stride) 192 { 193 struct nfp_net *nn; 194 unsigned int i; 195 int err; 196 197 for (i = 0; i < pf->max_data_vnics; i++) { 198 nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar, 199 stride, i); 200 if (IS_ERR(nn)) { 201 err = PTR_ERR(nn); 202 goto err_free_prev; 203 } 204 205 ctrl_bar += NFP_PF_CSR_SLICE_SIZE; 206 207 /* Kill the vNIC if app init marked it as invalid */ 208 if (nn->port && nn->port->type == NFP_PORT_INVALID) 209 nfp_net_pf_free_vnic(pf, nn); 210 } 211 212 if (list_empty(&pf->vnics)) 213 return -ENODEV; 214 215 return 0; 216 217 err_free_prev: 218 nfp_net_pf_free_vnics(pf); 219 return err; 220 } 221 222 static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn) 223 { 224 if (nfp_net_is_data_vnic(nn)) 225 nfp_app_vnic_clean(pf->app, nn); 226 if (nn->port) 227 nfp_devlink_port_type_clear(nn->port); 228 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 229 nfp_net_clean(nn); 230 if (nn->port) 231 nfp_devlink_port_unregister(nn->port); 232 } 233 234 static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf) 235 { 236 unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left; 237 struct nfp_net *nn; 238 239 /* Get MSI-X vectors */ 240 wanted_irqs = 0; 241 list_for_each_entry(nn, &pf->vnics, vnic_list) 242 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs; 243 pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), 244 GFP_KERNEL); 245 if (!pf->irq_entries) 246 return -ENOMEM; 247 248 num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries, 249 NFP_NET_MIN_VNIC_IRQS * pf->num_vnics, 250 wanted_irqs); 251 if (!num_irqs) { 252 nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n"); 253 kfree(pf->irq_entries); 254 return -ENOMEM; 255 } 256 257 /* Distribute IRQs to vNICs */ 258 irqs_left = num_irqs; 259 vnics_left = pf->num_vnics; 260 list_for_each_entry(nn, &pf->vnics, vnic_list) { 261 unsigned int n; 262 263 n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs, 264 DIV_ROUND_UP(irqs_left, vnics_left)); 265 nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left], 266 n); 267 irqs_left -= n; 268 vnics_left--; 269 } 270 271 return 0; 272 } 273 274 static void nfp_net_pf_free_irqs(struct nfp_pf *pf) 275 { 276 nfp_net_irqs_disable(pf->pdev); 277 kfree(pf->irq_entries); 278 } 279 280 static int nfp_net_pf_init_vnics(struct nfp_pf *pf) 281 { 282 struct nfp_net *nn; 283 unsigned int id; 284 int err; 285 286 /* Finish vNIC init and register */ 287 id = 0; 288 list_for_each_entry(nn, &pf->vnics, vnic_list) { 289 if (!nfp_net_is_data_vnic(nn)) 290 continue; 291 err = nfp_net_pf_init_vnic(pf, nn, id); 292 if (err) 293 goto err_prev_deinit; 294 295 id++; 296 } 297 298 return 0; 299 300 err_prev_deinit: 301 list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list) 302 if (nfp_net_is_data_vnic(nn)) 303 nfp_net_pf_clean_vnic(pf, nn); 304 return err; 305 } 306 307 static int 308 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) 309 { 310 struct devlink *devlink = priv_to_devlink(pf); 311 u8 __iomem *ctrl_bar; 312 int err; 313 314 pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf)); 315 if (IS_ERR(pf->app)) 316 return PTR_ERR(pf->app); 317 318 devl_lock(devlink); 319 err = nfp_app_init(pf->app); 320 devl_unlock(devlink); 321 if (err) 322 goto err_free; 323 324 if (!nfp_app_needs_ctrl_vnic(pf->app)) 325 return 0; 326 327 ctrl_bar = nfp_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar", 328 NFP_PF_CSR_SLICE_SIZE, &pf->ctrl_vnic_bar); 329 if (IS_ERR(ctrl_bar)) { 330 nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n"); 331 err = PTR_ERR(ctrl_bar); 332 goto err_app_clean; 333 } 334 335 pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar, 336 stride, 0); 337 if (IS_ERR(pf->ctrl_vnic)) { 338 err = PTR_ERR(pf->ctrl_vnic); 339 goto err_unmap; 340 } 341 342 return 0; 343 344 err_unmap: 345 nfp_cpp_area_release_free(pf->ctrl_vnic_bar); 346 err_app_clean: 347 devl_lock(devlink); 348 nfp_app_clean(pf->app); 349 devl_unlock(devlink); 350 err_free: 351 nfp_app_free(pf->app); 352 pf->app = NULL; 353 return err; 354 } 355 356 static void nfp_net_pf_app_clean(struct nfp_pf *pf) 357 { 358 struct devlink *devlink = priv_to_devlink(pf); 359 360 if (pf->ctrl_vnic) { 361 nfp_net_pf_free_vnic(pf, pf->ctrl_vnic); 362 nfp_cpp_area_release_free(pf->ctrl_vnic_bar); 363 } 364 365 devl_lock(devlink); 366 nfp_app_clean(pf->app); 367 devl_unlock(devlink); 368 369 nfp_app_free(pf->app); 370 pf->app = NULL; 371 } 372 373 static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf) 374 { 375 int err; 376 377 if (!pf->ctrl_vnic) 378 return 0; 379 err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0); 380 if (err) 381 return err; 382 383 err = nfp_ctrl_open(pf->ctrl_vnic); 384 if (err) 385 goto err_clean_ctrl; 386 387 return 0; 388 389 err_clean_ctrl: 390 nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic); 391 return err; 392 } 393 394 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf) 395 { 396 if (!pf->ctrl_vnic) 397 return; 398 nfp_ctrl_close(pf->ctrl_vnic); 399 nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic); 400 } 401 402 static int nfp_net_pf_app_start(struct nfp_pf *pf) 403 { 404 int err; 405 406 err = nfp_net_pf_app_start_ctrl(pf); 407 if (err) 408 return err; 409 410 err = nfp_app_start(pf->app, pf->ctrl_vnic); 411 if (err) 412 goto err_ctrl_stop; 413 414 if (pf->num_vfs) { 415 err = nfp_app_sriov_enable(pf->app, pf->num_vfs); 416 if (err) 417 goto err_app_stop; 418 } 419 420 return 0; 421 422 err_app_stop: 423 nfp_app_stop(pf->app); 424 err_ctrl_stop: 425 nfp_net_pf_app_stop_ctrl(pf); 426 return err; 427 } 428 429 static void nfp_net_pf_app_stop(struct nfp_pf *pf) 430 { 431 if (pf->num_vfs) 432 nfp_app_sriov_disable(pf->app); 433 nfp_app_stop(pf->app); 434 nfp_net_pf_app_stop_ctrl(pf); 435 } 436 437 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) 438 { 439 if (pf->vfcfg_tbl2_area) 440 nfp_cpp_area_release_free(pf->vfcfg_tbl2_area); 441 if (pf->vf_cfg_bar) 442 nfp_cpp_area_release_free(pf->vf_cfg_bar); 443 if (pf->mac_stats_bar) 444 nfp_cpp_area_release_free(pf->mac_stats_bar); 445 nfp_cpp_area_release_free(pf->qc_area); 446 nfp_cpp_area_release_free(pf->data_vnic_bar); 447 } 448 449 static int nfp_net_pci_map_mem(struct nfp_pf *pf) 450 { 451 u32 min_size, cpp_id; 452 u8 __iomem *mem; 453 int err; 454 455 min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; 456 mem = nfp_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0", 457 min_size, &pf->data_vnic_bar); 458 if (IS_ERR(mem)) { 459 nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); 460 return PTR_ERR(mem); 461 } 462 463 if (pf->eth_tbl) { 464 min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); 465 pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", 466 "net.macstats", min_size, 467 &pf->mac_stats_bar); 468 if (IS_ERR(pf->mac_stats_mem)) { 469 if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { 470 err = PTR_ERR(pf->mac_stats_mem); 471 goto err_unmap_ctrl; 472 } 473 pf->mac_stats_mem = NULL; 474 } 475 } 476 477 pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar", 478 NFP_NET_CFG_BAR_SZ * pf->limit_vfs, 479 &pf->vf_cfg_bar); 480 if (IS_ERR(pf->vf_cfg_mem)) { 481 if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) { 482 err = PTR_ERR(pf->vf_cfg_mem); 483 goto err_unmap_mac_stats; 484 } 485 pf->vf_cfg_mem = NULL; 486 } 487 488 min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; 489 pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2", 490 "_pf%d_net_vf_cfg2", 491 min_size, &pf->vfcfg_tbl2_area); 492 if (IS_ERR(pf->vfcfg_tbl2)) { 493 if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) { 494 err = PTR_ERR(pf->vfcfg_tbl2); 495 goto err_unmap_vf_cfg; 496 } 497 pf->vfcfg_tbl2 = NULL; 498 } 499 500 cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); 501 mem = nfp_cpp_map_area(pf->cpp, "net.qc", cpp_id, 502 nfp_qcp_queue_offset(pf->dev_info, 0), 503 pf->dev_info->qc_area_sz, &pf->qc_area); 504 if (IS_ERR(mem)) { 505 nfp_err(pf->cpp, "Failed to map Queue Controller area.\n"); 506 err = PTR_ERR(mem); 507 goto err_unmap_vfcfg_tbl2; 508 } 509 510 return 0; 511 512 err_unmap_vfcfg_tbl2: 513 if (pf->vfcfg_tbl2_area) 514 nfp_cpp_area_release_free(pf->vfcfg_tbl2_area); 515 err_unmap_vf_cfg: 516 if (pf->vf_cfg_bar) 517 nfp_cpp_area_release_free(pf->vf_cfg_bar); 518 err_unmap_mac_stats: 519 if (pf->mac_stats_bar) 520 nfp_cpp_area_release_free(pf->mac_stats_bar); 521 err_unmap_ctrl: 522 nfp_cpp_area_release_free(pf->data_vnic_bar); 523 return err; 524 } 525 526 static int 527 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port, 528 struct nfp_eth_table *eth_table) 529 { 530 struct nfp_eth_table_port *eth_port; 531 532 ASSERT_RTNL(); 533 534 eth_port = nfp_net_find_port(eth_table, port->eth_id); 535 if (!eth_port) { 536 set_bit(NFP_PORT_CHANGED, &port->flags); 537 nfp_warn(cpp, "Warning: port #%d not present after reconfig\n", 538 port->eth_id); 539 return -EIO; 540 } 541 if (eth_port->override_changed) { 542 nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id); 543 port->type = NFP_PORT_INVALID; 544 } 545 546 memcpy(port->eth_port, eth_port, sizeof(*eth_port)); 547 548 return 0; 549 } 550 551 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf) 552 { 553 struct devlink *devlink = priv_to_devlink(pf); 554 struct nfp_eth_table *eth_table; 555 struct nfp_net *nn, *next; 556 struct nfp_port *port; 557 int err; 558 559 devl_assert_locked(devlink); 560 561 /* Check for nfp_net_pci_remove() racing against us */ 562 if (list_empty(&pf->vnics)) 563 return 0; 564 565 /* Update state of all ports */ 566 rtnl_lock(); 567 list_for_each_entry(port, &pf->ports, port_list) 568 clear_bit(NFP_PORT_CHANGED, &port->flags); 569 570 eth_table = nfp_eth_read_ports(pf->cpp); 571 if (!eth_table) { 572 list_for_each_entry(port, &pf->ports, port_list) 573 if (__nfp_port_get_eth_port(port)) 574 set_bit(NFP_PORT_CHANGED, &port->flags); 575 rtnl_unlock(); 576 nfp_err(pf->cpp, "Error refreshing port config!\n"); 577 return -EIO; 578 } 579 580 list_for_each_entry(port, &pf->ports, port_list) 581 if (__nfp_port_get_eth_port(port)) 582 nfp_net_eth_port_update(pf->cpp, port, eth_table); 583 rtnl_unlock(); 584 585 kfree(eth_table); 586 587 /* Resync repr state. This may cause reprs to be removed. */ 588 err = nfp_reprs_resync_phys_ports(pf->app); 589 if (err) 590 return err; 591 592 /* Shoot off the ports which became invalid */ 593 list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) { 594 if (!nn->port || nn->port->type != NFP_PORT_INVALID) 595 continue; 596 597 nfp_net_pf_clean_vnic(pf, nn); 598 nfp_net_pf_free_vnic(pf, nn); 599 } 600 601 return 0; 602 } 603 604 static void nfp_net_refresh_vnics(struct work_struct *work) 605 { 606 struct nfp_pf *pf = container_of(work, struct nfp_pf, 607 port_refresh_work); 608 struct devlink *devlink = priv_to_devlink(pf); 609 610 devl_lock(devlink); 611 nfp_net_refresh_port_table_sync(pf); 612 devl_unlock(devlink); 613 } 614 615 void nfp_net_refresh_port_table(struct nfp_port *port) 616 { 617 struct nfp_pf *pf = port->app->pf; 618 619 set_bit(NFP_PORT_CHANGED, &port->flags); 620 621 queue_work(pf->wq, &pf->port_refresh_work); 622 } 623 624 int nfp_net_refresh_eth_port(struct nfp_port *port) 625 { 626 struct nfp_cpp *cpp = port->app->cpp; 627 struct nfp_eth_table *eth_table; 628 int ret; 629 630 clear_bit(NFP_PORT_CHANGED, &port->flags); 631 632 eth_table = nfp_eth_read_ports(cpp); 633 if (!eth_table) { 634 set_bit(NFP_PORT_CHANGED, &port->flags); 635 nfp_err(cpp, "Error refreshing port state table!\n"); 636 return -EIO; 637 } 638 639 ret = nfp_net_eth_port_update(cpp, port, eth_table); 640 641 kfree(eth_table); 642 643 return ret; 644 } 645 646 /* 647 * PCI device functions 648 */ 649 int nfp_net_pci_probe(struct nfp_pf *pf) 650 { 651 struct devlink *devlink = priv_to_devlink(pf); 652 struct nfp_net_fw_version fw_ver; 653 u8 __iomem *ctrl_bar, *qc_bar; 654 int stride; 655 int err; 656 657 INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); 658 659 if (!pf->rtbl) { 660 nfp_err(pf->cpp, "No %s, giving up.\n", 661 pf->fw_loaded ? "symbol table" : "firmware found"); 662 return -EINVAL; 663 } 664 665 pf->max_data_vnics = nfp_net_pf_get_num_ports(pf); 666 if ((int)pf->max_data_vnics < 0) 667 return pf->max_data_vnics; 668 669 err = nfp_net_pci_map_mem(pf); 670 if (err) 671 return err; 672 673 ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar); 674 qc_bar = nfp_cpp_area_iomem(pf->qc_area); 675 if (!ctrl_bar || !qc_bar) { 676 err = -EIO; 677 goto err_unmap; 678 } 679 680 nfp_net_get_fw_version(&fw_ver, ctrl_bar); 681 if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || 682 fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { 683 nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", 684 fw_ver.extend, fw_ver.class, 685 fw_ver.major, fw_ver.minor); 686 err = -EINVAL; 687 goto err_unmap; 688 } 689 690 /* Determine stride */ 691 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { 692 stride = 2; 693 nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); 694 } else { 695 switch (fw_ver.major) { 696 case 1 ... 5: 697 stride = 4; 698 break; 699 default: 700 nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", 701 fw_ver.extend, fw_ver.class, 702 fw_ver.major, fw_ver.minor); 703 err = -EINVAL; 704 goto err_unmap; 705 } 706 } 707 708 err = nfp_net_pf_app_init(pf, qc_bar, stride); 709 if (err) 710 goto err_unmap; 711 712 err = nfp_shared_buf_register(pf); 713 if (err) 714 goto err_devlink_unreg; 715 716 err = nfp_devlink_params_register(pf); 717 if (err) 718 goto err_shared_buf_unreg; 719 720 devl_lock(devlink); 721 pf->ddir = nfp_net_debugfs_device_add(pf->pdev); 722 723 /* Allocate the vnics and do basic init */ 724 err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride); 725 if (err) 726 goto err_clean_ddir; 727 728 err = nfp_net_pf_alloc_irqs(pf); 729 if (err) 730 goto err_free_vnics; 731 732 err = nfp_net_pf_app_start(pf); 733 if (err) 734 goto err_free_irqs; 735 736 err = nfp_net_pf_init_vnics(pf); 737 if (err) 738 goto err_stop_app; 739 740 devl_unlock(devlink); 741 devlink_register(devlink); 742 743 return 0; 744 745 err_stop_app: 746 nfp_net_pf_app_stop(pf); 747 err_free_irqs: 748 nfp_net_pf_free_irqs(pf); 749 err_free_vnics: 750 nfp_net_pf_free_vnics(pf); 751 err_clean_ddir: 752 nfp_net_debugfs_dir_clean(&pf->ddir); 753 devl_unlock(devlink); 754 nfp_devlink_params_unregister(pf); 755 err_shared_buf_unreg: 756 nfp_shared_buf_unregister(pf); 757 err_devlink_unreg: 758 cancel_work_sync(&pf->port_refresh_work); 759 nfp_net_pf_app_clean(pf); 760 err_unmap: 761 nfp_net_pci_unmap_mem(pf); 762 return err; 763 } 764 765 void nfp_net_pci_remove(struct nfp_pf *pf) 766 { 767 struct devlink *devlink = priv_to_devlink(pf); 768 struct nfp_net *nn, *next; 769 770 devlink_unregister(priv_to_devlink(pf)); 771 devl_lock(devlink); 772 list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) { 773 if (!nfp_net_is_data_vnic(nn)) 774 continue; 775 nfp_net_pf_clean_vnic(pf, nn); 776 nfp_net_pf_free_vnic(pf, nn); 777 } 778 779 nfp_net_pf_app_stop(pf); 780 /* stop app first, to avoid double free of ctrl vNIC's ddir */ 781 nfp_net_debugfs_dir_clean(&pf->ddir); 782 783 devl_unlock(devlink); 784 785 nfp_devlink_params_unregister(pf); 786 nfp_shared_buf_unregister(pf); 787 788 nfp_net_pf_free_irqs(pf); 789 nfp_net_pf_app_clean(pf); 790 nfp_net_pci_unmap_mem(pf); 791 792 cancel_work_sync(&pf->port_refresh_work); 793 } 794