1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/lockdep.h> 6 #include <linux/pci.h> 7 #include <linux/skbuff.h> 8 #include <linux/vmalloc.h> 9 #include <net/devlink.h> 10 #include <net/dst_metadata.h> 11 12 #include "main.h" 13 #include "../nfpcore/nfp_cpp.h" 14 #include "../nfpcore/nfp_nffw.h" 15 #include "../nfpcore/nfp_nsp.h" 16 #include "../nfp_app.h" 17 #include "../nfp_main.h" 18 #include "../nfp_net.h" 19 #include "../nfp_net_repr.h" 20 #include "../nfp_port.h" 21 #include "./cmsg.h" 22 23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL 24 25 #define NFP_MIN_INT_PORT_ID 1 26 #define NFP_MAX_INT_PORT_ID 256 27 28 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) 29 { 30 return "FLOWER"; 31 } 32 33 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) 34 { 35 return DEVLINK_ESWITCH_MODE_SWITCHDEV; 36 } 37 38 static int 39 nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv, 40 struct net_device *netdev) 41 { 42 struct net_device *entry; 43 int i, id = 0; 44 45 rcu_read_lock(); 46 idr_for_each_entry(&priv->internal_ports.port_ids, entry, i) 47 if (entry == netdev) { 48 id = i; 49 break; 50 } 51 rcu_read_unlock(); 52 53 return id; 54 } 55 56 static int 57 nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev) 58 { 59 struct nfp_flower_priv *priv = app->priv; 60 int id; 61 62 id = nfp_flower_lookup_internal_port_id(priv, netdev); 63 if (id > 0) 64 return id; 65 66 idr_preload(GFP_ATOMIC); 67 spin_lock_bh(&priv->internal_ports.lock); 68 id = idr_alloc(&priv->internal_ports.port_ids, netdev, 69 NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC); 70 spin_unlock_bh(&priv->internal_ports.lock); 71 idr_preload_end(); 72 73 return id; 74 } 75 76 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, 77 struct net_device *netdev) 78 { 79 int ext_port; 80 81 if (nfp_netdev_is_nfp_repr(netdev)) { 82 return nfp_repr_get_port_id(netdev); 83 } else if (nfp_flower_internal_port_can_offload(app, netdev)) { 84 ext_port = nfp_flower_get_internal_port_id(app, netdev); 85 if (ext_port < 0) 86 return 0; 87 88 return nfp_flower_internal_port_get_port_id(ext_port); 89 } 90 91 return 0; 92 } 93 94 static struct net_device * 95 nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id) 96 { 97 struct nfp_flower_priv *priv = app->priv; 98 struct net_device *netdev; 99 100 rcu_read_lock(); 101 netdev = idr_find(&priv->internal_ports.port_ids, port_id); 102 rcu_read_unlock(); 103 104 return netdev; 105 } 106 107 static void 108 nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev) 109 { 110 struct nfp_flower_priv *priv = app->priv; 111 int id; 112 113 id = nfp_flower_lookup_internal_port_id(priv, netdev); 114 if (!id) 115 return; 116 117 spin_lock_bh(&priv->internal_ports.lock); 118 idr_remove(&priv->internal_ports.port_ids, id); 119 spin_unlock_bh(&priv->internal_ports.lock); 120 } 121 122 static int 123 nfp_flower_internal_port_event_handler(struct nfp_app *app, 124 struct net_device *netdev, 125 unsigned long event) 126 { 127 if (event == NETDEV_UNREGISTER && 128 nfp_flower_internal_port_can_offload(app, netdev)) 129 nfp_flower_free_internal_port_id(app, netdev); 130 131 return NOTIFY_OK; 132 } 133 134 static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv) 135 { 136 spin_lock_init(&priv->internal_ports.lock); 137 idr_init(&priv->internal_ports.port_ids); 138 } 139 140 static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv) 141 { 142 idr_destroy(&priv->internal_ports.port_ids); 143 } 144 145 static struct nfp_flower_non_repr_priv * 146 nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev) 147 { 148 struct nfp_flower_priv *priv = app->priv; 149 struct nfp_flower_non_repr_priv *entry; 150 151 ASSERT_RTNL(); 152 153 list_for_each_entry(entry, &priv->non_repr_priv, list) 154 if (entry->netdev == netdev) 155 return entry; 156 157 return NULL; 158 } 159 160 void 161 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv) 162 { 163 non_repr_priv->ref_count++; 164 } 165 166 struct nfp_flower_non_repr_priv * 167 nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev) 168 { 169 struct nfp_flower_priv *priv = app->priv; 170 struct nfp_flower_non_repr_priv *entry; 171 172 entry = nfp_flower_non_repr_priv_lookup(app, netdev); 173 if (entry) 174 goto inc_ref; 175 176 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 177 if (!entry) 178 return NULL; 179 180 entry->netdev = netdev; 181 list_add(&entry->list, &priv->non_repr_priv); 182 183 inc_ref: 184 __nfp_flower_non_repr_priv_get(entry); 185 return entry; 186 } 187 188 void 189 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv) 190 { 191 if (--non_repr_priv->ref_count) 192 return; 193 194 list_del(&non_repr_priv->list); 195 kfree(non_repr_priv); 196 } 197 198 void 199 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev) 200 { 201 struct nfp_flower_non_repr_priv *entry; 202 203 entry = nfp_flower_non_repr_priv_lookup(app, netdev); 204 if (!entry) 205 return; 206 207 __nfp_flower_non_repr_priv_put(entry); 208 } 209 210 static enum nfp_repr_type 211 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) 212 { 213 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) { 214 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT: 215 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, 216 port_id); 217 return NFP_REPR_TYPE_PHYS_PORT; 218 219 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT: 220 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id); 221 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) == 222 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF) 223 return NFP_REPR_TYPE_PF; 224 else 225 return NFP_REPR_TYPE_VF; 226 } 227 228 return __NFP_REPR_TYPE_MAX; 229 } 230 231 static struct net_device * 232 nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress) 233 { 234 enum nfp_repr_type repr_type; 235 struct nfp_reprs *reprs; 236 u8 port = 0; 237 238 /* Check if the port is internal. */ 239 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) == 240 NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) { 241 if (redir_egress) 242 *redir_egress = true; 243 port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id); 244 return nfp_flower_get_netdev_from_internal_port_id(app, port); 245 } 246 247 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); 248 if (repr_type > NFP_REPR_TYPE_MAX) 249 return NULL; 250 251 reprs = rcu_dereference(app->reprs[repr_type]); 252 if (!reprs) 253 return NULL; 254 255 if (port >= reprs->num_reprs) 256 return NULL; 257 258 return rcu_dereference(reprs->reprs[port]); 259 } 260 261 static int 262 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type, 263 bool exists) 264 { 265 struct nfp_reprs *reprs; 266 int i, err, count = 0; 267 268 reprs = rcu_dereference_protected(app->reprs[type], 269 lockdep_is_held(&app->pf->lock)); 270 if (!reprs) 271 return 0; 272 273 for (i = 0; i < reprs->num_reprs; i++) { 274 struct net_device *netdev; 275 276 netdev = nfp_repr_get_locked(app, reprs, i); 277 if (netdev) { 278 struct nfp_repr *repr = netdev_priv(netdev); 279 280 err = nfp_flower_cmsg_portreify(repr, exists); 281 if (err) 282 return err; 283 count++; 284 } 285 } 286 287 return count; 288 } 289 290 static int 291 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl) 292 { 293 struct nfp_flower_priv *priv = app->priv; 294 295 if (!tot_repl) 296 return 0; 297 298 lockdep_assert_held(&app->pf->lock); 299 if (!wait_event_timeout(priv->reify_wait_queue, 300 atomic_read(replies) >= tot_repl, 301 NFP_FL_REPLY_TIMEOUT)) { 302 nfp_warn(app->cpp, "Not all reprs responded to reify\n"); 303 return -EIO; 304 } 305 306 return 0; 307 } 308 309 static int 310 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) 311 { 312 int err; 313 314 err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false); 315 if (err) 316 return err; 317 318 netif_tx_wake_all_queues(repr->netdev); 319 320 return 0; 321 } 322 323 static int 324 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) 325 { 326 netif_tx_disable(repr->netdev); 327 328 return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); 329 } 330 331 static void 332 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) 333 { 334 struct nfp_repr *repr = netdev_priv(netdev); 335 336 kfree(repr->app_priv); 337 } 338 339 static void 340 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev) 341 { 342 struct nfp_repr *repr = netdev_priv(netdev); 343 struct nfp_flower_priv *priv = app->priv; 344 atomic_t *replies = &priv->reify_replies; 345 int err; 346 347 atomic_set(replies, 0); 348 err = nfp_flower_cmsg_portreify(repr, false); 349 if (err) { 350 nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n"); 351 return; 352 } 353 354 nfp_flower_wait_repr_reify(app, replies, 1); 355 } 356 357 static void nfp_flower_sriov_disable(struct nfp_app *app) 358 { 359 struct nfp_flower_priv *priv = app->priv; 360 361 if (!priv->nn) 362 return; 363 364 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); 365 } 366 367 static int 368 nfp_flower_spawn_vnic_reprs(struct nfp_app *app, 369 enum nfp_flower_cmsg_port_vnic_type vnic_type, 370 enum nfp_repr_type repr_type, unsigned int cnt) 371 { 372 u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); 373 struct nfp_flower_priv *priv = app->priv; 374 atomic_t *replies = &priv->reify_replies; 375 struct nfp_flower_repr_priv *repr_priv; 376 enum nfp_port_type port_type; 377 struct nfp_repr *nfp_repr; 378 struct nfp_reprs *reprs; 379 int i, err, reify_cnt; 380 const u8 queue = 0; 381 382 port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT : 383 NFP_PORT_VF_PORT; 384 385 reprs = nfp_reprs_alloc(cnt); 386 if (!reprs) 387 return -ENOMEM; 388 389 for (i = 0; i < cnt; i++) { 390 struct net_device *repr; 391 struct nfp_port *port; 392 u32 port_id; 393 394 repr = nfp_repr_alloc(app); 395 if (!repr) { 396 err = -ENOMEM; 397 goto err_reprs_clean; 398 } 399 400 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); 401 if (!repr_priv) { 402 err = -ENOMEM; 403 goto err_reprs_clean; 404 } 405 406 nfp_repr = netdev_priv(repr); 407 nfp_repr->app_priv = repr_priv; 408 repr_priv->nfp_repr = nfp_repr; 409 410 /* For now we only support 1 PF */ 411 WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); 412 413 port = nfp_port_alloc(app, port_type, repr); 414 if (IS_ERR(port)) { 415 err = PTR_ERR(port); 416 nfp_repr_free(repr); 417 goto err_reprs_clean; 418 } 419 if (repr_type == NFP_REPR_TYPE_PF) { 420 port->pf_id = i; 421 port->vnic = priv->nn->dp.ctrl_bar; 422 } else { 423 port->pf_id = 0; 424 port->vf_id = i; 425 port->vnic = 426 app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; 427 } 428 429 eth_hw_addr_random(repr); 430 431 port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type, 432 i, queue); 433 err = nfp_repr_init(app, repr, 434 port_id, port, priv->nn->dp.netdev); 435 if (err) { 436 nfp_port_free(port); 437 nfp_repr_free(repr); 438 goto err_reprs_clean; 439 } 440 441 RCU_INIT_POINTER(reprs->reprs[i], repr); 442 nfp_info(app->cpp, "%s%d Representor(%s) created\n", 443 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, 444 repr->name); 445 } 446 447 nfp_app_reprs_set(app, repr_type, reprs); 448 449 atomic_set(replies, 0); 450 reify_cnt = nfp_flower_reprs_reify(app, repr_type, true); 451 if (reify_cnt < 0) { 452 err = reify_cnt; 453 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); 454 goto err_reprs_remove; 455 } 456 457 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); 458 if (err) 459 goto err_reprs_remove; 460 461 return 0; 462 err_reprs_remove: 463 reprs = nfp_app_reprs_set(app, repr_type, NULL); 464 err_reprs_clean: 465 nfp_reprs_clean_and_free(app, reprs); 466 return err; 467 } 468 469 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs) 470 { 471 struct nfp_flower_priv *priv = app->priv; 472 473 if (!priv->nn) 474 return 0; 475 476 return nfp_flower_spawn_vnic_reprs(app, 477 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, 478 NFP_REPR_TYPE_VF, num_vfs); 479 } 480 481 static int 482 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) 483 { 484 struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; 485 atomic_t *replies = &priv->reify_replies; 486 struct nfp_flower_repr_priv *repr_priv; 487 struct nfp_repr *nfp_repr; 488 struct sk_buff *ctrl_skb; 489 struct nfp_reprs *reprs; 490 int err, reify_cnt; 491 unsigned int i; 492 493 ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); 494 if (!ctrl_skb) 495 return -ENOMEM; 496 497 reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); 498 if (!reprs) { 499 err = -ENOMEM; 500 goto err_free_ctrl_skb; 501 } 502 503 for (i = 0; i < eth_tbl->count; i++) { 504 unsigned int phys_port = eth_tbl->ports[i].index; 505 struct net_device *repr; 506 struct nfp_port *port; 507 u32 cmsg_port_id; 508 509 repr = nfp_repr_alloc(app); 510 if (!repr) { 511 err = -ENOMEM; 512 goto err_reprs_clean; 513 } 514 515 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); 516 if (!repr_priv) { 517 err = -ENOMEM; 518 goto err_reprs_clean; 519 } 520 521 nfp_repr = netdev_priv(repr); 522 nfp_repr->app_priv = repr_priv; 523 repr_priv->nfp_repr = nfp_repr; 524 525 port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); 526 if (IS_ERR(port)) { 527 err = PTR_ERR(port); 528 nfp_repr_free(repr); 529 goto err_reprs_clean; 530 } 531 err = nfp_port_init_phy_port(app->pf, app, port, i); 532 if (err) { 533 nfp_port_free(port); 534 nfp_repr_free(repr); 535 goto err_reprs_clean; 536 } 537 538 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); 539 nfp_net_get_mac_addr(app->pf, repr, port); 540 541 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); 542 err = nfp_repr_init(app, repr, 543 cmsg_port_id, port, priv->nn->dp.netdev); 544 if (err) { 545 nfp_port_free(port); 546 nfp_repr_free(repr); 547 goto err_reprs_clean; 548 } 549 550 nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, 551 eth_tbl->ports[i].nbi, 552 eth_tbl->ports[i].base, 553 phys_port); 554 555 RCU_INIT_POINTER(reprs->reprs[phys_port], repr); 556 nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", 557 phys_port, repr->name); 558 } 559 560 nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); 561 562 /* The REIFY/MAC_REPR control messages should be sent after the MAC 563 * representors are registered using nfp_app_reprs_set(). This is 564 * because the firmware may respond with control messages for the 565 * MAC representors, f.e. to provide the driver with information 566 * about their state, and without registration the driver will drop 567 * any such messages. 568 */ 569 atomic_set(replies, 0); 570 reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); 571 if (reify_cnt < 0) { 572 err = reify_cnt; 573 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); 574 goto err_reprs_remove; 575 } 576 577 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); 578 if (err) 579 goto err_reprs_remove; 580 581 nfp_ctrl_tx(app->ctrl, ctrl_skb); 582 583 return 0; 584 err_reprs_remove: 585 reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); 586 err_reprs_clean: 587 nfp_reprs_clean_and_free(app, reprs); 588 err_free_ctrl_skb: 589 kfree_skb(ctrl_skb); 590 return err; 591 } 592 593 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, 594 unsigned int id) 595 { 596 if (id > 0) { 597 nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); 598 goto err_invalid_port; 599 } 600 601 eth_hw_addr_random(nn->dp.netdev); 602 netif_keep_dst(nn->dp.netdev); 603 nn->vnic_no_name = true; 604 605 return 0; 606 607 err_invalid_port: 608 nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev); 609 return PTR_ERR_OR_ZERO(nn->port); 610 } 611 612 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn) 613 { 614 struct nfp_flower_priv *priv = app->priv; 615 616 if (app->pf->num_vfs) 617 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); 618 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 619 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 620 621 priv->nn = NULL; 622 } 623 624 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn) 625 { 626 struct nfp_flower_priv *priv = app->priv; 627 int err; 628 629 priv->nn = nn; 630 631 err = nfp_flower_spawn_phy_reprs(app, app->priv); 632 if (err) 633 goto err_clear_nn; 634 635 err = nfp_flower_spawn_vnic_reprs(app, 636 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF, 637 NFP_REPR_TYPE_PF, 1); 638 if (err) 639 goto err_destroy_reprs_phy; 640 641 if (app->pf->num_vfs) { 642 err = nfp_flower_spawn_vnic_reprs(app, 643 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, 644 NFP_REPR_TYPE_VF, 645 app->pf->num_vfs); 646 if (err) 647 goto err_destroy_reprs_pf; 648 } 649 650 return 0; 651 652 err_destroy_reprs_pf: 653 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 654 err_destroy_reprs_phy: 655 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 656 err_clear_nn: 657 priv->nn = NULL; 658 return err; 659 } 660 661 static int nfp_flower_init(struct nfp_app *app) 662 { 663 u64 version, features, ctx_count, num_mems; 664 const struct nfp_pf *pf = app->pf; 665 struct nfp_flower_priv *app_priv; 666 int err; 667 668 if (!pf->eth_tbl) { 669 nfp_warn(app->cpp, "FlowerNIC requires eth table\n"); 670 return -EINVAL; 671 } 672 673 if (!pf->mac_stats_bar) { 674 nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n"); 675 return -EINVAL; 676 } 677 678 if (!pf->vf_cfg_bar) { 679 nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n"); 680 return -EINVAL; 681 } 682 683 version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err); 684 if (err) { 685 nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n"); 686 return err; 687 } 688 689 num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT", 690 &err); 691 if (err) { 692 nfp_warn(app->cpp, 693 "FlowerNIC: unsupported host context memory: %d\n", 694 err); 695 err = 0; 696 num_mems = 1; 697 } 698 699 if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) { 700 nfp_warn(app->cpp, 701 "FlowerNIC: invalid host context memory: %llu\n", 702 num_mems); 703 return -EINVAL; 704 } 705 706 ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT", 707 &err); 708 if (err) { 709 nfp_warn(app->cpp, 710 "FlowerNIC: unsupported host context count: %d\n", 711 err); 712 err = 0; 713 ctx_count = BIT(17); 714 } 715 716 /* We need to ensure hardware has enough flower capabilities. */ 717 if (version != NFP_FLOWER_ALLOWED_VER) { 718 nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); 719 return -EINVAL; 720 } 721 722 app_priv = vzalloc(sizeof(struct nfp_flower_priv)); 723 if (!app_priv) 724 return -ENOMEM; 725 726 app_priv->total_mem_units = num_mems; 727 app_priv->active_mem_unit = 0; 728 app_priv->stats_ring_size = roundup_pow_of_two(ctx_count); 729 app->priv = app_priv; 730 app_priv->app = app; 731 skb_queue_head_init(&app_priv->cmsg_skbs_high); 732 skb_queue_head_init(&app_priv->cmsg_skbs_low); 733 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); 734 init_waitqueue_head(&app_priv->reify_wait_queue); 735 736 init_waitqueue_head(&app_priv->mtu_conf.wait_q); 737 spin_lock_init(&app_priv->mtu_conf.lock); 738 739 err = nfp_flower_metadata_init(app, ctx_count, num_mems); 740 if (err) 741 goto err_free_app_priv; 742 743 /* Extract the extra features supported by the firmware. */ 744 features = nfp_rtsym_read_le(app->pf->rtbl, 745 "_abi_flower_extra_features", &err); 746 if (err) 747 app_priv->flower_ext_feats = 0; 748 else 749 app_priv->flower_ext_feats = features; 750 751 /* Tell the firmware that the driver supports lag. */ 752 err = nfp_rtsym_write_le(app->pf->rtbl, 753 "_abi_flower_balance_sync_enable", 1); 754 if (!err) { 755 app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; 756 nfp_flower_lag_init(&app_priv->nfp_lag); 757 } else if (err == -ENOENT) { 758 nfp_warn(app->cpp, "LAG not supported by FW.\n"); 759 } else { 760 goto err_cleanup_metadata; 761 } 762 763 if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) { 764 /* Tell the firmware that the driver supports flow merging. */ 765 err = nfp_rtsym_write_le(app->pf->rtbl, 766 "_abi_flower_merge_hint_enable", 1); 767 if (!err) { 768 app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE; 769 nfp_flower_internal_port_init(app_priv); 770 } else if (err == -ENOENT) { 771 nfp_warn(app->cpp, "Flow merge not supported by FW.\n"); 772 } else { 773 goto err_lag_clean; 774 } 775 } else { 776 nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); 777 } 778 779 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) 780 nfp_flower_qos_init(app); 781 782 INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); 783 INIT_LIST_HEAD(&app_priv->non_repr_priv); 784 785 return 0; 786 787 err_lag_clean: 788 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) 789 nfp_flower_lag_cleanup(&app_priv->nfp_lag); 790 err_cleanup_metadata: 791 nfp_flower_metadata_cleanup(app); 792 err_free_app_priv: 793 vfree(app->priv); 794 return err; 795 } 796 797 static void nfp_flower_clean(struct nfp_app *app) 798 { 799 struct nfp_flower_priv *app_priv = app->priv; 800 801 skb_queue_purge(&app_priv->cmsg_skbs_high); 802 skb_queue_purge(&app_priv->cmsg_skbs_low); 803 flush_work(&app_priv->cmsg_work); 804 805 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) 806 nfp_flower_qos_cleanup(app); 807 808 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) 809 nfp_flower_lag_cleanup(&app_priv->nfp_lag); 810 811 if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) 812 nfp_flower_internal_port_cleanup(app_priv); 813 814 nfp_flower_metadata_cleanup(app); 815 vfree(app->priv); 816 app->priv = NULL; 817 } 818 819 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) 820 { 821 bool ret; 822 823 spin_lock_bh(&app_priv->mtu_conf.lock); 824 ret = app_priv->mtu_conf.ack; 825 spin_unlock_bh(&app_priv->mtu_conf.lock); 826 827 return ret; 828 } 829 830 static int 831 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, 832 int new_mtu) 833 { 834 struct nfp_flower_priv *app_priv = app->priv; 835 struct nfp_repr *repr = netdev_priv(netdev); 836 int err; 837 838 /* Only need to config FW for physical port MTU change. */ 839 if (repr->port->type != NFP_PORT_PHYS_PORT) 840 return 0; 841 842 if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) { 843 nfp_err(app->cpp, "Physical port MTU setting not supported\n"); 844 return -EINVAL; 845 } 846 847 spin_lock_bh(&app_priv->mtu_conf.lock); 848 app_priv->mtu_conf.ack = false; 849 app_priv->mtu_conf.requested_val = new_mtu; 850 app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id; 851 spin_unlock_bh(&app_priv->mtu_conf.lock); 852 853 err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu, 854 true); 855 if (err) { 856 spin_lock_bh(&app_priv->mtu_conf.lock); 857 app_priv->mtu_conf.requested_val = 0; 858 spin_unlock_bh(&app_priv->mtu_conf.lock); 859 return err; 860 } 861 862 /* Wait for fw to ack the change. */ 863 if (!wait_event_timeout(app_priv->mtu_conf.wait_q, 864 nfp_flower_check_ack(app_priv), 865 NFP_FL_REPLY_TIMEOUT)) { 866 spin_lock_bh(&app_priv->mtu_conf.lock); 867 app_priv->mtu_conf.requested_val = 0; 868 spin_unlock_bh(&app_priv->mtu_conf.lock); 869 nfp_warn(app->cpp, "MTU change not verified with fw\n"); 870 return -EIO; 871 } 872 873 return 0; 874 } 875 876 static int nfp_flower_start(struct nfp_app *app) 877 { 878 struct nfp_flower_priv *app_priv = app->priv; 879 int err; 880 881 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { 882 err = nfp_flower_lag_reset(&app_priv->nfp_lag); 883 if (err) 884 return err; 885 } 886 887 return nfp_tunnel_config_start(app); 888 } 889 890 static void nfp_flower_stop(struct nfp_app *app) 891 { 892 nfp_tunnel_config_stop(app); 893 } 894 895 static int 896 nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, 897 unsigned long event, void *ptr) 898 { 899 struct nfp_flower_priv *app_priv = app->priv; 900 int ret; 901 902 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { 903 ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr); 904 if (ret & NOTIFY_STOP_MASK) 905 return ret; 906 } 907 908 ret = nfp_flower_reg_indir_block_handler(app, netdev, event); 909 if (ret & NOTIFY_STOP_MASK) 910 return ret; 911 912 ret = nfp_flower_internal_port_event_handler(app, netdev, event); 913 if (ret & NOTIFY_STOP_MASK) 914 return ret; 915 916 return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); 917 } 918 919 const struct nfp_app_type app_flower = { 920 .id = NFP_APP_FLOWER_NIC, 921 .name = "flower", 922 923 .ctrl_cap_mask = ~0U, 924 .ctrl_has_meta = true, 925 926 .extra_cap = nfp_flower_extra_cap, 927 928 .init = nfp_flower_init, 929 .clean = nfp_flower_clean, 930 931 .repr_change_mtu = nfp_flower_repr_change_mtu, 932 933 .vnic_alloc = nfp_flower_vnic_alloc, 934 .vnic_init = nfp_flower_vnic_init, 935 .vnic_clean = nfp_flower_vnic_clean, 936 937 .repr_preclean = nfp_flower_repr_netdev_preclean, 938 .repr_clean = nfp_flower_repr_netdev_clean, 939 940 .repr_open = nfp_flower_repr_netdev_open, 941 .repr_stop = nfp_flower_repr_netdev_stop, 942 943 .start = nfp_flower_start, 944 .stop = nfp_flower_stop, 945 946 .netdev_event = nfp_flower_netdev_event, 947 948 .ctrl_msg_rx = nfp_flower_cmsg_rx, 949 950 .sriov_enable = nfp_flower_sriov_enable, 951 .sriov_disable = nfp_flower_sriov_disable, 952 953 .eswitch_mode_get = eswitch_mode_get, 954 .dev_get = nfp_flower_dev_get, 955 956 .setup_tc = nfp_flower_setup_tc, 957 }; 958