1 /* 2 * Copyright (C) 2015-2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 /* 35 * nfp_net_main.c 36 * Netronome network device driver: Main entry point 37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> 38 * Alejandro Lucero <alejandro.lucero@netronome.com> 39 * Jason McMullan <jason.mcmullan@netronome.com> 40 * Rolf Neugebauer <rolf.neugebauer@netronome.com> 41 */ 42 43 #include <linux/etherdevice.h> 44 #include <linux/kernel.h> 45 #include <linux/init.h> 46 #include <linux/lockdep.h> 47 #include <linux/pci.h> 48 #include <linux/pci_regs.h> 49 #include <linux/msi.h> 50 #include <linux/random.h> 51 #include <linux/rtnetlink.h> 52 53 #include "nfpcore/nfp.h" 54 #include "nfpcore/nfp_cpp.h" 55 #include "nfpcore/nfp_nffw.h" 56 #include "nfpcore/nfp_nsp.h" 57 #include "nfpcore/nfp6000_pcie.h" 58 #include "nfp_app.h" 59 #include "nfp_net_ctrl.h" 60 #include "nfp_net_sriov.h" 61 #include "nfp_net.h" 62 #include "nfp_main.h" 63 #include "nfp_port.h" 64 65 #define NFP_PF_CSR_SLICE_SIZE (32 * 1024) 66 67 /** 68 * nfp_net_get_mac_addr() - Get the MAC address. 69 * @pf: NFP PF handle 70 * @port: NFP port structure 71 * 72 * First try to get the MAC address from NSP ETH table. If that 73 * fails generate a random address. 74 */ 75 void nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port) 76 { 77 struct nfp_eth_table_port *eth_port; 78 79 eth_port = __nfp_port_get_eth_port(port); 80 if (!eth_port) { 81 eth_hw_addr_random(port->netdev); 82 return; 83 } 84 85 ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr); 86 ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr); 87 } 88 89 static struct nfp_eth_table_port * 90 nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int index) 91 { 92 int i; 93 94 for (i = 0; eth_tbl && i < eth_tbl->count; i++) 95 if (eth_tbl->ports[i].index == index) 96 return ð_tbl->ports[i]; 97 98 return NULL; 99 } 100 101 static int 102 nfp_net_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, 103 unsigned int default_val) 104 { 105 char name[256]; 106 int err = 0; 107 u64 val; 108 109 snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); 110 111 val = nfp_rtsym_read_le(pf->rtbl, name, &err); 112 if (err) { 113 if (err == -ENOENT) 114 return default_val; 115 nfp_err(pf->cpp, "Unable to read symbol %s\n", name); 116 return err; 117 } 118 119 return val; 120 } 121 122 static int nfp_net_pf_get_num_ports(struct nfp_pf *pf) 123 { 124 return nfp_net_pf_rtsym_read_optional(pf, "nfd_cfg_pf%u_num_ports", 1); 125 } 126 127 static int nfp_net_pf_get_app_id(struct nfp_pf *pf) 128 { 129 return nfp_net_pf_rtsym_read_optional(pf, "_pf%u_net_app_id", 130 NFP_APP_CORE_NIC); 131 } 132 133 static u8 __iomem * 134 nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, 135 unsigned int min_size, struct nfp_cpp_area **area) 136 { 137 char pf_symbol[256]; 138 139 snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, 140 nfp_cppcore_pcie_unit(pf->cpp)); 141 142 return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); 143 } 144 145 static void nfp_net_pf_free_vnic(struct nfp_pf *pf, struct nfp_net *nn) 146 { 147 if (nfp_net_is_data_vnic(nn)) 148 nfp_app_vnic_free(pf->app, nn); 149 nfp_port_free(nn->port); 150 list_del(&nn->vnic_list); 151 pf->num_vnics--; 152 nfp_net_free(nn); 153 } 154 155 static void nfp_net_pf_free_vnics(struct nfp_pf *pf) 156 { 157 struct nfp_net *nn, *next; 158 159 list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) 160 if (nfp_net_is_data_vnic(nn)) 161 nfp_net_pf_free_vnic(pf, nn); 162 } 163 164 static struct nfp_net * 165 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev, 166 void __iomem *ctrl_bar, void __iomem *qc_bar, 167 int stride, unsigned int id) 168 { 169 u32 tx_base, rx_base, n_tx_rings, n_rx_rings; 170 struct nfp_net *nn; 171 int err; 172 173 tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ); 174 rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ); 175 n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS); 176 n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); 177 178 /* Allocate and initialise the vNIC */ 179 nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings); 180 if (IS_ERR(nn)) 181 return nn; 182 183 nn->app = pf->app; 184 nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar); 185 nn->dp.ctrl_bar = ctrl_bar; 186 nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; 187 nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; 188 nn->dp.is_vf = 0; 189 nn->stride_rx = stride; 190 nn->stride_tx = stride; 191 192 if (needs_netdev) { 193 err = nfp_app_vnic_alloc(pf->app, nn, id); 194 if (err) { 195 nfp_net_free(nn); 196 return ERR_PTR(err); 197 } 198 } 199 200 pf->num_vnics++; 201 list_add_tail(&nn->vnic_list, &pf->vnics); 202 203 return nn; 204 } 205 206 static int 207 nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) 208 { 209 int err; 210 211 err = nfp_net_init(nn); 212 if (err) 213 return err; 214 215 nfp_net_debugfs_vnic_add(nn, pf->ddir, id); 216 217 if (nn->port) { 218 err = nfp_devlink_port_register(pf->app, nn->port); 219 if (err) 220 goto err_dfs_clean; 221 } 222 223 nfp_net_info(nn); 224 225 if (nfp_net_is_data_vnic(nn)) { 226 err = nfp_app_vnic_init(pf->app, nn); 227 if (err) 228 goto err_devlink_port_clean; 229 } 230 231 return 0; 232 233 err_devlink_port_clean: 234 if (nn->port) 235 nfp_devlink_port_unregister(nn->port); 236 err_dfs_clean: 237 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 238 nfp_net_clean(nn); 239 return err; 240 } 241 242 static int 243 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, 244 void __iomem *qc_bar, int stride) 245 { 246 struct nfp_net *nn; 247 unsigned int i; 248 int err; 249 250 for (i = 0; i < pf->max_data_vnics; i++) { 251 nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar, 252 stride, i); 253 if (IS_ERR(nn)) { 254 err = PTR_ERR(nn); 255 goto err_free_prev; 256 } 257 258 ctrl_bar += NFP_PF_CSR_SLICE_SIZE; 259 260 /* Kill the vNIC if app init marked it as invalid */ 261 if (nn->port && nn->port->type == NFP_PORT_INVALID) { 262 nfp_net_pf_free_vnic(pf, nn); 263 continue; 264 } 265 } 266 267 if (list_empty(&pf->vnics)) 268 return -ENODEV; 269 270 return 0; 271 272 err_free_prev: 273 nfp_net_pf_free_vnics(pf); 274 return err; 275 } 276 277 static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn) 278 { 279 if (nfp_net_is_data_vnic(nn)) 280 nfp_app_vnic_clean(pf->app, nn); 281 if (nn->port) 282 nfp_devlink_port_unregister(nn->port); 283 nfp_net_debugfs_dir_clean(&nn->debugfs_dir); 284 nfp_net_clean(nn); 285 } 286 287 static int nfp_net_pf_alloc_irqs(struct nfp_pf *pf) 288 { 289 unsigned int wanted_irqs, num_irqs, vnics_left, irqs_left; 290 struct nfp_net *nn; 291 292 /* Get MSI-X vectors */ 293 wanted_irqs = 0; 294 list_for_each_entry(nn, &pf->vnics, vnic_list) 295 wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs; 296 pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries), 297 GFP_KERNEL); 298 if (!pf->irq_entries) 299 return -ENOMEM; 300 301 num_irqs = nfp_net_irqs_alloc(pf->pdev, pf->irq_entries, 302 NFP_NET_MIN_VNIC_IRQS * pf->num_vnics, 303 wanted_irqs); 304 if (!num_irqs) { 305 nfp_warn(pf->cpp, "Unable to allocate MSI-X vectors\n"); 306 kfree(pf->irq_entries); 307 return -ENOMEM; 308 } 309 310 /* Distribute IRQs to vNICs */ 311 irqs_left = num_irqs; 312 vnics_left = pf->num_vnics; 313 list_for_each_entry(nn, &pf->vnics, vnic_list) { 314 unsigned int n; 315 316 n = min(NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs, 317 DIV_ROUND_UP(irqs_left, vnics_left)); 318 nfp_net_irqs_assign(nn, &pf->irq_entries[num_irqs - irqs_left], 319 n); 320 irqs_left -= n; 321 vnics_left--; 322 } 323 324 return 0; 325 } 326 327 static void nfp_net_pf_free_irqs(struct nfp_pf *pf) 328 { 329 nfp_net_irqs_disable(pf->pdev); 330 kfree(pf->irq_entries); 331 } 332 333 static int nfp_net_pf_init_vnics(struct nfp_pf *pf) 334 { 335 struct nfp_net *nn; 336 unsigned int id; 337 int err; 338 339 /* Finish vNIC init and register */ 340 id = 0; 341 list_for_each_entry(nn, &pf->vnics, vnic_list) { 342 if (!nfp_net_is_data_vnic(nn)) 343 continue; 344 err = nfp_net_pf_init_vnic(pf, nn, id); 345 if (err) 346 goto err_prev_deinit; 347 348 id++; 349 } 350 351 return 0; 352 353 err_prev_deinit: 354 list_for_each_entry_continue_reverse(nn, &pf->vnics, vnic_list) 355 if (nfp_net_is_data_vnic(nn)) 356 nfp_net_pf_clean_vnic(pf, nn); 357 return err; 358 } 359 360 static int 361 nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) 362 { 363 u8 __iomem *ctrl_bar; 364 int err; 365 366 pf->app = nfp_app_alloc(pf, nfp_net_pf_get_app_id(pf)); 367 if (IS_ERR(pf->app)) 368 return PTR_ERR(pf->app); 369 370 mutex_lock(&pf->lock); 371 err = nfp_app_init(pf->app); 372 mutex_unlock(&pf->lock); 373 if (err) 374 goto err_free; 375 376 if (!nfp_app_needs_ctrl_vnic(pf->app)) 377 return 0; 378 379 ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%u_net_ctrl_bar", 380 NFP_PF_CSR_SLICE_SIZE, 381 &pf->ctrl_vnic_bar); 382 if (IS_ERR(ctrl_bar)) { 383 nfp_err(pf->cpp, "Failed to find ctrl vNIC memory symbol\n"); 384 err = PTR_ERR(ctrl_bar); 385 goto err_app_clean; 386 } 387 388 pf->ctrl_vnic = nfp_net_pf_alloc_vnic(pf, false, ctrl_bar, qc_bar, 389 stride, 0); 390 if (IS_ERR(pf->ctrl_vnic)) { 391 err = PTR_ERR(pf->ctrl_vnic); 392 goto err_unmap; 393 } 394 395 return 0; 396 397 err_unmap: 398 nfp_cpp_area_release_free(pf->ctrl_vnic_bar); 399 err_app_clean: 400 mutex_lock(&pf->lock); 401 nfp_app_clean(pf->app); 402 mutex_unlock(&pf->lock); 403 err_free: 404 nfp_app_free(pf->app); 405 pf->app = NULL; 406 return err; 407 } 408 409 static void nfp_net_pf_app_clean(struct nfp_pf *pf) 410 { 411 if (pf->ctrl_vnic) { 412 nfp_net_pf_free_vnic(pf, pf->ctrl_vnic); 413 nfp_cpp_area_release_free(pf->ctrl_vnic_bar); 414 } 415 416 mutex_lock(&pf->lock); 417 nfp_app_clean(pf->app); 418 mutex_unlock(&pf->lock); 419 420 nfp_app_free(pf->app); 421 pf->app = NULL; 422 } 423 424 static int nfp_net_pf_app_start_ctrl(struct nfp_pf *pf) 425 { 426 int err; 427 428 if (!pf->ctrl_vnic) 429 return 0; 430 err = nfp_net_pf_init_vnic(pf, pf->ctrl_vnic, 0); 431 if (err) 432 return err; 433 434 err = nfp_ctrl_open(pf->ctrl_vnic); 435 if (err) 436 goto err_clean_ctrl; 437 438 return 0; 439 440 err_clean_ctrl: 441 nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic); 442 return err; 443 } 444 445 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf *pf) 446 { 447 if (!pf->ctrl_vnic) 448 return; 449 nfp_ctrl_close(pf->ctrl_vnic); 450 nfp_net_pf_clean_vnic(pf, pf->ctrl_vnic); 451 } 452 453 static int nfp_net_pf_app_start(struct nfp_pf *pf) 454 { 455 int err; 456 457 err = nfp_net_pf_app_start_ctrl(pf); 458 if (err) 459 return err; 460 461 err = nfp_app_start(pf->app, pf->ctrl_vnic); 462 if (err) 463 goto err_ctrl_stop; 464 465 if (pf->num_vfs) { 466 err = nfp_app_sriov_enable(pf->app, pf->num_vfs); 467 if (err) 468 goto err_app_stop; 469 } 470 471 return 0; 472 473 err_app_stop: 474 nfp_app_stop(pf->app); 475 err_ctrl_stop: 476 nfp_net_pf_app_stop_ctrl(pf); 477 return err; 478 } 479 480 static void nfp_net_pf_app_stop(struct nfp_pf *pf) 481 { 482 if (pf->num_vfs) 483 nfp_app_sriov_disable(pf->app); 484 nfp_app_stop(pf->app); 485 nfp_net_pf_app_stop_ctrl(pf); 486 } 487 488 static void nfp_net_pci_unmap_mem(struct nfp_pf *pf) 489 { 490 if (pf->vfcfg_tbl2_area) 491 nfp_cpp_area_release_free(pf->vfcfg_tbl2_area); 492 if (pf->vf_cfg_bar) 493 nfp_cpp_area_release_free(pf->vf_cfg_bar); 494 if (pf->mac_stats_bar) 495 nfp_cpp_area_release_free(pf->mac_stats_bar); 496 nfp_cpp_area_release_free(pf->qc_area); 497 nfp_cpp_area_release_free(pf->data_vnic_bar); 498 } 499 500 static int nfp_net_pci_map_mem(struct nfp_pf *pf) 501 { 502 u8 __iomem *mem; 503 u32 min_size; 504 int err; 505 506 min_size = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE; 507 mem = nfp_net_pf_map_rtsym(pf, "net.bar0", "_pf%d_net_bar0", 508 min_size, &pf->data_vnic_bar); 509 if (IS_ERR(mem)) { 510 nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); 511 return PTR_ERR(mem); 512 } 513 514 min_size = NFP_MAC_STATS_SIZE * (pf->eth_tbl->max_index + 1); 515 pf->mac_stats_mem = nfp_rtsym_map(pf->rtbl, "_mac_stats", 516 "net.macstats", min_size, 517 &pf->mac_stats_bar); 518 if (IS_ERR(pf->mac_stats_mem)) { 519 if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) { 520 err = PTR_ERR(pf->mac_stats_mem); 521 goto err_unmap_ctrl; 522 } 523 pf->mac_stats_mem = NULL; 524 } 525 526 pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg", 527 "_pf%d_net_vf_bar", 528 NFP_NET_CFG_BAR_SZ * 529 pf->limit_vfs, &pf->vf_cfg_bar); 530 if (IS_ERR(pf->vf_cfg_mem)) { 531 if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) { 532 err = PTR_ERR(pf->vf_cfg_mem); 533 goto err_unmap_mac_stats; 534 } 535 pf->vf_cfg_mem = NULL; 536 } 537 538 min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; 539 pf->vfcfg_tbl2 = nfp_net_pf_map_rtsym(pf, "net.vfcfg_tbl2", 540 "_pf%d_net_vf_cfg2", 541 min_size, &pf->vfcfg_tbl2_area); 542 if (IS_ERR(pf->vfcfg_tbl2)) { 543 if (PTR_ERR(pf->vfcfg_tbl2) != -ENOENT) { 544 err = PTR_ERR(pf->vfcfg_tbl2); 545 goto err_unmap_vf_cfg; 546 } 547 pf->vfcfg_tbl2 = NULL; 548 } 549 550 mem = nfp_cpp_map_area(pf->cpp, "net.qc", 0, 0, 551 NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ, 552 &pf->qc_area); 553 if (IS_ERR(mem)) { 554 nfp_err(pf->cpp, "Failed to map Queue Controller area.\n"); 555 err = PTR_ERR(mem); 556 goto err_unmap_vfcfg_tbl2; 557 } 558 559 return 0; 560 561 err_unmap_vfcfg_tbl2: 562 if (pf->vfcfg_tbl2_area) 563 nfp_cpp_area_release_free(pf->vfcfg_tbl2_area); 564 err_unmap_vf_cfg: 565 if (pf->vf_cfg_bar) 566 nfp_cpp_area_release_free(pf->vf_cfg_bar); 567 err_unmap_mac_stats: 568 if (pf->mac_stats_bar) 569 nfp_cpp_area_release_free(pf->mac_stats_bar); 570 err_unmap_ctrl: 571 nfp_cpp_area_release_free(pf->data_vnic_bar); 572 return err; 573 } 574 575 static int 576 nfp_net_eth_port_update(struct nfp_cpp *cpp, struct nfp_port *port, 577 struct nfp_eth_table *eth_table) 578 { 579 struct nfp_eth_table_port *eth_port; 580 581 ASSERT_RTNL(); 582 583 eth_port = nfp_net_find_port(eth_table, port->eth_id); 584 if (!eth_port) { 585 set_bit(NFP_PORT_CHANGED, &port->flags); 586 nfp_warn(cpp, "Warning: port #%d not present after reconfig\n", 587 port->eth_id); 588 return -EIO; 589 } 590 if (eth_port->override_changed) { 591 nfp_warn(cpp, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port->eth_id); 592 port->type = NFP_PORT_INVALID; 593 } 594 595 memcpy(port->eth_port, eth_port, sizeof(*eth_port)); 596 597 return 0; 598 } 599 600 int nfp_net_refresh_port_table_sync(struct nfp_pf *pf) 601 { 602 struct nfp_eth_table *eth_table; 603 struct nfp_net *nn, *next; 604 struct nfp_port *port; 605 int err; 606 607 lockdep_assert_held(&pf->lock); 608 609 /* Check for nfp_net_pci_remove() racing against us */ 610 if (list_empty(&pf->vnics)) 611 return 0; 612 613 /* Update state of all ports */ 614 rtnl_lock(); 615 list_for_each_entry(port, &pf->ports, port_list) 616 clear_bit(NFP_PORT_CHANGED, &port->flags); 617 618 eth_table = nfp_eth_read_ports(pf->cpp); 619 if (!eth_table) { 620 list_for_each_entry(port, &pf->ports, port_list) 621 if (__nfp_port_get_eth_port(port)) 622 set_bit(NFP_PORT_CHANGED, &port->flags); 623 rtnl_unlock(); 624 nfp_err(pf->cpp, "Error refreshing port config!\n"); 625 return -EIO; 626 } 627 628 list_for_each_entry(port, &pf->ports, port_list) 629 if (__nfp_port_get_eth_port(port)) 630 nfp_net_eth_port_update(pf->cpp, port, eth_table); 631 rtnl_unlock(); 632 633 kfree(eth_table); 634 635 /* Resync repr state. This may cause reprs to be removed. */ 636 err = nfp_reprs_resync_phys_ports(pf->app); 637 if (err) 638 return err; 639 640 /* Shoot off the ports which became invalid */ 641 list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) { 642 if (!nn->port || nn->port->type != NFP_PORT_INVALID) 643 continue; 644 645 nfp_net_pf_clean_vnic(pf, nn); 646 nfp_net_pf_free_vnic(pf, nn); 647 } 648 649 return 0; 650 } 651 652 static void nfp_net_refresh_vnics(struct work_struct *work) 653 { 654 struct nfp_pf *pf = container_of(work, struct nfp_pf, 655 port_refresh_work); 656 657 mutex_lock(&pf->lock); 658 nfp_net_refresh_port_table_sync(pf); 659 mutex_unlock(&pf->lock); 660 } 661 662 void nfp_net_refresh_port_table(struct nfp_port *port) 663 { 664 struct nfp_pf *pf = port->app->pf; 665 666 set_bit(NFP_PORT_CHANGED, &port->flags); 667 668 queue_work(pf->wq, &pf->port_refresh_work); 669 } 670 671 int nfp_net_refresh_eth_port(struct nfp_port *port) 672 { 673 struct nfp_cpp *cpp = port->app->cpp; 674 struct nfp_eth_table *eth_table; 675 int ret; 676 677 clear_bit(NFP_PORT_CHANGED, &port->flags); 678 679 eth_table = nfp_eth_read_ports(cpp); 680 if (!eth_table) { 681 set_bit(NFP_PORT_CHANGED, &port->flags); 682 nfp_err(cpp, "Error refreshing port state table!\n"); 683 return -EIO; 684 } 685 686 ret = nfp_net_eth_port_update(cpp, port, eth_table); 687 688 kfree(eth_table); 689 690 return ret; 691 } 692 693 /* 694 * PCI device functions 695 */ 696 int nfp_net_pci_probe(struct nfp_pf *pf) 697 { 698 struct devlink *devlink = priv_to_devlink(pf); 699 struct nfp_net_fw_version fw_ver; 700 u8 __iomem *ctrl_bar, *qc_bar; 701 int stride; 702 int err; 703 704 INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); 705 706 if (!pf->rtbl) { 707 nfp_err(pf->cpp, "No %s, giving up.\n", 708 pf->fw_loaded ? "symbol table" : "firmware found"); 709 return -EINVAL; 710 } 711 712 pf->max_data_vnics = nfp_net_pf_get_num_ports(pf); 713 if ((int)pf->max_data_vnics < 0) 714 return pf->max_data_vnics; 715 716 err = nfp_net_pci_map_mem(pf); 717 if (err) 718 return err; 719 720 ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar); 721 qc_bar = nfp_cpp_area_iomem(pf->qc_area); 722 if (!ctrl_bar || !qc_bar) { 723 err = -EIO; 724 goto err_unmap; 725 } 726 727 nfp_net_get_fw_version(&fw_ver, ctrl_bar); 728 if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { 729 nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", 730 fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor); 731 err = -EINVAL; 732 goto err_unmap; 733 } 734 735 /* Determine stride */ 736 if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { 737 stride = 2; 738 nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); 739 } else { 740 switch (fw_ver.major) { 741 case 1 ... 5: 742 stride = 4; 743 break; 744 default: 745 nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", 746 fw_ver.resv, fw_ver.class, 747 fw_ver.major, fw_ver.minor); 748 err = -EINVAL; 749 goto err_unmap; 750 } 751 } 752 753 err = nfp_net_pf_app_init(pf, qc_bar, stride); 754 if (err) 755 goto err_unmap; 756 757 err = devlink_register(devlink, &pf->pdev->dev); 758 if (err) 759 goto err_app_clean; 760 761 mutex_lock(&pf->lock); 762 pf->ddir = nfp_net_debugfs_device_add(pf->pdev); 763 764 /* Allocate the vnics and do basic init */ 765 err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride); 766 if (err) 767 goto err_clean_ddir; 768 769 err = nfp_net_pf_alloc_irqs(pf); 770 if (err) 771 goto err_free_vnics; 772 773 err = nfp_net_pf_app_start(pf); 774 if (err) 775 goto err_free_irqs; 776 777 err = nfp_net_pf_init_vnics(pf); 778 if (err) 779 goto err_stop_app; 780 781 mutex_unlock(&pf->lock); 782 783 return 0; 784 785 err_stop_app: 786 nfp_net_pf_app_stop(pf); 787 err_free_irqs: 788 nfp_net_pf_free_irqs(pf); 789 err_free_vnics: 790 nfp_net_pf_free_vnics(pf); 791 err_clean_ddir: 792 nfp_net_debugfs_dir_clean(&pf->ddir); 793 mutex_unlock(&pf->lock); 794 cancel_work_sync(&pf->port_refresh_work); 795 devlink_unregister(devlink); 796 err_app_clean: 797 nfp_net_pf_app_clean(pf); 798 err_unmap: 799 nfp_net_pci_unmap_mem(pf); 800 return err; 801 } 802 803 void nfp_net_pci_remove(struct nfp_pf *pf) 804 { 805 struct nfp_net *nn, *next; 806 807 mutex_lock(&pf->lock); 808 list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) { 809 if (!nfp_net_is_data_vnic(nn)) 810 continue; 811 nfp_net_pf_clean_vnic(pf, nn); 812 nfp_net_pf_free_vnic(pf, nn); 813 } 814 815 nfp_net_pf_app_stop(pf); 816 /* stop app first, to avoid double free of ctrl vNIC's ddir */ 817 nfp_net_debugfs_dir_clean(&pf->ddir); 818 819 mutex_unlock(&pf->lock); 820 821 devlink_unregister(priv_to_devlink(pf)); 822 823 nfp_net_pf_free_irqs(pf); 824 nfp_net_pf_app_clean(pf); 825 nfp_net_pci_unmap_mem(pf); 826 827 cancel_work_sync(&pf->port_refresh_work); 828 } 829