1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ 3 4 #include <linux/etherdevice.h> 5 #include <linux/lockdep.h> 6 #include <linux/pci.h> 7 #include <linux/skbuff.h> 8 #include <linux/vmalloc.h> 9 #include <net/devlink.h> 10 #include <net/dst_metadata.h> 11 12 #include "main.h" 13 #include "../nfpcore/nfp_cpp.h" 14 #include "../nfpcore/nfp_nffw.h" 15 #include "../nfpcore/nfp_nsp.h" 16 #include "../nfp_app.h" 17 #include "../nfp_main.h" 18 #include "../nfp_net.h" 19 #include "../nfp_net_repr.h" 20 #include "../nfp_port.h" 21 #include "./cmsg.h" 22 23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL 24 25 #define NFP_MIN_INT_PORT_ID 1 26 #define NFP_MAX_INT_PORT_ID 256 27 28 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) 29 { 30 return "FLOWER"; 31 } 32 33 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) 34 { 35 return DEVLINK_ESWITCH_MODE_SWITCHDEV; 36 } 37 38 static int 39 nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv, 40 struct net_device *netdev) 41 { 42 struct net_device *entry; 43 int i, id = 0; 44 45 rcu_read_lock(); 46 idr_for_each_entry(&priv->internal_ports.port_ids, entry, i) 47 if (entry == netdev) { 48 id = i; 49 break; 50 } 51 rcu_read_unlock(); 52 53 return id; 54 } 55 56 static int 57 nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev) 58 { 59 struct nfp_flower_priv *priv = app->priv; 60 int id; 61 62 id = nfp_flower_lookup_internal_port_id(priv, netdev); 63 if (id > 0) 64 return id; 65 66 idr_preload(GFP_ATOMIC); 67 spin_lock_bh(&priv->internal_ports.lock); 68 id = idr_alloc(&priv->internal_ports.port_ids, netdev, 69 NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC); 70 spin_unlock_bh(&priv->internal_ports.lock); 71 idr_preload_end(); 72 73 return id; 74 } 75 76 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app, 77 struct net_device *netdev) 78 { 79 int ext_port; 80 81 if (nfp_netdev_is_nfp_repr(netdev)) { 82 return nfp_repr_get_port_id(netdev); 83 } else if (nfp_flower_internal_port_can_offload(app, netdev)) { 84 ext_port = nfp_flower_get_internal_port_id(app, netdev); 85 if (ext_port < 0) 86 return 0; 87 88 return nfp_flower_internal_port_get_port_id(ext_port); 89 } 90 91 return 0; 92 } 93 94 static struct net_device * 95 nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id) 96 { 97 struct nfp_flower_priv *priv = app->priv; 98 struct net_device *netdev; 99 100 rcu_read_lock(); 101 netdev = idr_find(&priv->internal_ports.port_ids, port_id); 102 rcu_read_unlock(); 103 104 return netdev; 105 } 106 107 static void 108 nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev) 109 { 110 struct nfp_flower_priv *priv = app->priv; 111 int id; 112 113 id = nfp_flower_lookup_internal_port_id(priv, netdev); 114 if (!id) 115 return; 116 117 spin_lock_bh(&priv->internal_ports.lock); 118 idr_remove(&priv->internal_ports.port_ids, id); 119 spin_unlock_bh(&priv->internal_ports.lock); 120 } 121 122 static int 123 nfp_flower_internal_port_event_handler(struct nfp_app *app, 124 struct net_device *netdev, 125 unsigned long event) 126 { 127 if (event == NETDEV_UNREGISTER && 128 nfp_flower_internal_port_can_offload(app, netdev)) 129 nfp_flower_free_internal_port_id(app, netdev); 130 131 return NOTIFY_OK; 132 } 133 134 static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv) 135 { 136 spin_lock_init(&priv->internal_ports.lock); 137 idr_init(&priv->internal_ports.port_ids); 138 } 139 140 static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv) 141 { 142 idr_destroy(&priv->internal_ports.port_ids); 143 } 144 145 static struct nfp_flower_non_repr_priv * 146 nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev) 147 { 148 struct nfp_flower_priv *priv = app->priv; 149 struct nfp_flower_non_repr_priv *entry; 150 151 ASSERT_RTNL(); 152 153 list_for_each_entry(entry, &priv->non_repr_priv, list) 154 if (entry->netdev == netdev) 155 return entry; 156 157 return NULL; 158 } 159 160 void 161 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv) 162 { 163 non_repr_priv->ref_count++; 164 } 165 166 struct nfp_flower_non_repr_priv * 167 nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev) 168 { 169 struct nfp_flower_priv *priv = app->priv; 170 struct nfp_flower_non_repr_priv *entry; 171 172 entry = nfp_flower_non_repr_priv_lookup(app, netdev); 173 if (entry) 174 goto inc_ref; 175 176 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 177 if (!entry) 178 return NULL; 179 180 entry->netdev = netdev; 181 list_add(&entry->list, &priv->non_repr_priv); 182 183 inc_ref: 184 __nfp_flower_non_repr_priv_get(entry); 185 return entry; 186 } 187 188 void 189 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv) 190 { 191 if (--non_repr_priv->ref_count) 192 return; 193 194 list_del(&non_repr_priv->list); 195 kfree(non_repr_priv); 196 } 197 198 void 199 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev) 200 { 201 struct nfp_flower_non_repr_priv *entry; 202 203 entry = nfp_flower_non_repr_priv_lookup(app, netdev); 204 if (!entry) 205 return; 206 207 __nfp_flower_non_repr_priv_put(entry); 208 } 209 210 static enum nfp_repr_type 211 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) 212 { 213 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) { 214 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT: 215 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, 216 port_id); 217 return NFP_REPR_TYPE_PHYS_PORT; 218 219 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT: 220 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id); 221 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) == 222 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF) 223 return NFP_REPR_TYPE_PF; 224 else 225 return NFP_REPR_TYPE_VF; 226 } 227 228 return __NFP_REPR_TYPE_MAX; 229 } 230 231 static struct net_device * 232 nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress) 233 { 234 enum nfp_repr_type repr_type; 235 struct nfp_reprs *reprs; 236 u8 port = 0; 237 238 /* Check if the port is internal. */ 239 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) == 240 NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) { 241 if (redir_egress) 242 *redir_egress = true; 243 port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id); 244 return nfp_flower_get_netdev_from_internal_port_id(app, port); 245 } 246 247 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); 248 if (repr_type > NFP_REPR_TYPE_MAX) 249 return NULL; 250 251 reprs = rcu_dereference(app->reprs[repr_type]); 252 if (!reprs) 253 return NULL; 254 255 if (port >= reprs->num_reprs) 256 return NULL; 257 258 return rcu_dereference(reprs->reprs[port]); 259 } 260 261 static int 262 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type, 263 bool exists) 264 { 265 struct nfp_reprs *reprs; 266 int i, err, count = 0; 267 268 reprs = rcu_dereference_protected(app->reprs[type], 269 lockdep_is_held(&app->pf->lock)); 270 if (!reprs) 271 return 0; 272 273 for (i = 0; i < reprs->num_reprs; i++) { 274 struct net_device *netdev; 275 276 netdev = nfp_repr_get_locked(app, reprs, i); 277 if (netdev) { 278 struct nfp_repr *repr = netdev_priv(netdev); 279 280 err = nfp_flower_cmsg_portreify(repr, exists); 281 if (err) 282 return err; 283 count++; 284 } 285 } 286 287 return count; 288 } 289 290 static int 291 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl) 292 { 293 struct nfp_flower_priv *priv = app->priv; 294 295 if (!tot_repl) 296 return 0; 297 298 lockdep_assert_held(&app->pf->lock); 299 if (!wait_event_timeout(priv->reify_wait_queue, 300 atomic_read(replies) >= tot_repl, 301 NFP_FL_REPLY_TIMEOUT)) { 302 nfp_warn(app->cpp, "Not all reprs responded to reify\n"); 303 return -EIO; 304 } 305 306 return 0; 307 } 308 309 static int 310 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) 311 { 312 int err; 313 314 err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false); 315 if (err) 316 return err; 317 318 netif_tx_wake_all_queues(repr->netdev); 319 320 return 0; 321 } 322 323 static int 324 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) 325 { 326 netif_tx_disable(repr->netdev); 327 328 return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); 329 } 330 331 static void 332 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) 333 { 334 struct nfp_repr *repr = netdev_priv(netdev); 335 336 kfree(repr->app_priv); 337 } 338 339 static void 340 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev) 341 { 342 struct nfp_repr *repr = netdev_priv(netdev); 343 struct nfp_flower_priv *priv = app->priv; 344 atomic_t *replies = &priv->reify_replies; 345 int err; 346 347 atomic_set(replies, 0); 348 err = nfp_flower_cmsg_portreify(repr, false); 349 if (err) { 350 nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n"); 351 return; 352 } 353 354 nfp_flower_wait_repr_reify(app, replies, 1); 355 } 356 357 static void nfp_flower_sriov_disable(struct nfp_app *app) 358 { 359 struct nfp_flower_priv *priv = app->priv; 360 361 if (!priv->nn) 362 return; 363 364 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); 365 } 366 367 static int 368 nfp_flower_spawn_vnic_reprs(struct nfp_app *app, 369 enum nfp_flower_cmsg_port_vnic_type vnic_type, 370 enum nfp_repr_type repr_type, unsigned int cnt) 371 { 372 u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); 373 struct nfp_flower_priv *priv = app->priv; 374 atomic_t *replies = &priv->reify_replies; 375 struct nfp_flower_repr_priv *repr_priv; 376 enum nfp_port_type port_type; 377 struct nfp_repr *nfp_repr; 378 struct nfp_reprs *reprs; 379 int i, err, reify_cnt; 380 const u8 queue = 0; 381 382 port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT : 383 NFP_PORT_VF_PORT; 384 385 reprs = nfp_reprs_alloc(cnt); 386 if (!reprs) 387 return -ENOMEM; 388 389 for (i = 0; i < cnt; i++) { 390 struct net_device *repr; 391 struct nfp_port *port; 392 u32 port_id; 393 394 repr = nfp_repr_alloc(app); 395 if (!repr) { 396 err = -ENOMEM; 397 goto err_reprs_clean; 398 } 399 400 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); 401 if (!repr_priv) { 402 err = -ENOMEM; 403 nfp_repr_free(repr); 404 goto err_reprs_clean; 405 } 406 407 nfp_repr = netdev_priv(repr); 408 nfp_repr->app_priv = repr_priv; 409 repr_priv->nfp_repr = nfp_repr; 410 411 /* For now we only support 1 PF */ 412 WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); 413 414 port = nfp_port_alloc(app, port_type, repr); 415 if (IS_ERR(port)) { 416 err = PTR_ERR(port); 417 kfree(repr_priv); 418 nfp_repr_free(repr); 419 goto err_reprs_clean; 420 } 421 if (repr_type == NFP_REPR_TYPE_PF) { 422 port->pf_id = i; 423 port->vnic = priv->nn->dp.ctrl_bar; 424 } else { 425 port->pf_id = 0; 426 port->vf_id = i; 427 port->vnic = 428 app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; 429 } 430 431 eth_hw_addr_random(repr); 432 433 port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type, 434 i, queue); 435 err = nfp_repr_init(app, repr, 436 port_id, port, priv->nn->dp.netdev); 437 if (err) { 438 kfree(repr_priv); 439 nfp_port_free(port); 440 nfp_repr_free(repr); 441 goto err_reprs_clean; 442 } 443 444 RCU_INIT_POINTER(reprs->reprs[i], repr); 445 nfp_info(app->cpp, "%s%d Representor(%s) created\n", 446 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, 447 repr->name); 448 } 449 450 nfp_app_reprs_set(app, repr_type, reprs); 451 452 atomic_set(replies, 0); 453 reify_cnt = nfp_flower_reprs_reify(app, repr_type, true); 454 if (reify_cnt < 0) { 455 err = reify_cnt; 456 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); 457 goto err_reprs_remove; 458 } 459 460 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); 461 if (err) 462 goto err_reprs_remove; 463 464 return 0; 465 err_reprs_remove: 466 reprs = nfp_app_reprs_set(app, repr_type, NULL); 467 err_reprs_clean: 468 nfp_reprs_clean_and_free(app, reprs); 469 return err; 470 } 471 472 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs) 473 { 474 struct nfp_flower_priv *priv = app->priv; 475 476 if (!priv->nn) 477 return 0; 478 479 return nfp_flower_spawn_vnic_reprs(app, 480 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, 481 NFP_REPR_TYPE_VF, num_vfs); 482 } 483 484 static int 485 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) 486 { 487 struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; 488 atomic_t *replies = &priv->reify_replies; 489 struct nfp_flower_repr_priv *repr_priv; 490 struct nfp_repr *nfp_repr; 491 struct sk_buff *ctrl_skb; 492 struct nfp_reprs *reprs; 493 int err, reify_cnt; 494 unsigned int i; 495 496 ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); 497 if (!ctrl_skb) 498 return -ENOMEM; 499 500 reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); 501 if (!reprs) { 502 err = -ENOMEM; 503 goto err_free_ctrl_skb; 504 } 505 506 for (i = 0; i < eth_tbl->count; i++) { 507 unsigned int phys_port = eth_tbl->ports[i].index; 508 struct net_device *repr; 509 struct nfp_port *port; 510 u32 cmsg_port_id; 511 512 repr = nfp_repr_alloc(app); 513 if (!repr) { 514 err = -ENOMEM; 515 goto err_reprs_clean; 516 } 517 518 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); 519 if (!repr_priv) { 520 err = -ENOMEM; 521 nfp_repr_free(repr); 522 goto err_reprs_clean; 523 } 524 525 nfp_repr = netdev_priv(repr); 526 nfp_repr->app_priv = repr_priv; 527 repr_priv->nfp_repr = nfp_repr; 528 529 port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); 530 if (IS_ERR(port)) { 531 err = PTR_ERR(port); 532 kfree(repr_priv); 533 nfp_repr_free(repr); 534 goto err_reprs_clean; 535 } 536 err = nfp_port_init_phy_port(app->pf, app, port, i); 537 if (err) { 538 kfree(repr_priv); 539 nfp_port_free(port); 540 nfp_repr_free(repr); 541 goto err_reprs_clean; 542 } 543 544 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); 545 nfp_net_get_mac_addr(app->pf, repr, port); 546 547 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); 548 err = nfp_repr_init(app, repr, 549 cmsg_port_id, port, priv->nn->dp.netdev); 550 if (err) { 551 kfree(repr_priv); 552 nfp_port_free(port); 553 nfp_repr_free(repr); 554 goto err_reprs_clean; 555 } 556 557 nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, 558 eth_tbl->ports[i].nbi, 559 eth_tbl->ports[i].base, 560 phys_port); 561 562 RCU_INIT_POINTER(reprs->reprs[phys_port], repr); 563 nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", 564 phys_port, repr->name); 565 } 566 567 nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); 568 569 /* The REIFY/MAC_REPR control messages should be sent after the MAC 570 * representors are registered using nfp_app_reprs_set(). This is 571 * because the firmware may respond with control messages for the 572 * MAC representors, f.e. to provide the driver with information 573 * about their state, and without registration the driver will drop 574 * any such messages. 575 */ 576 atomic_set(replies, 0); 577 reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); 578 if (reify_cnt < 0) { 579 err = reify_cnt; 580 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); 581 goto err_reprs_remove; 582 } 583 584 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); 585 if (err) 586 goto err_reprs_remove; 587 588 nfp_ctrl_tx(app->ctrl, ctrl_skb); 589 590 return 0; 591 err_reprs_remove: 592 reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); 593 err_reprs_clean: 594 nfp_reprs_clean_and_free(app, reprs); 595 err_free_ctrl_skb: 596 kfree_skb(ctrl_skb); 597 return err; 598 } 599 600 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, 601 unsigned int id) 602 { 603 if (id > 0) { 604 nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); 605 goto err_invalid_port; 606 } 607 608 eth_hw_addr_random(nn->dp.netdev); 609 netif_keep_dst(nn->dp.netdev); 610 nn->vnic_no_name = true; 611 612 return 0; 613 614 err_invalid_port: 615 nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev); 616 return PTR_ERR_OR_ZERO(nn->port); 617 } 618 619 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn) 620 { 621 struct nfp_flower_priv *priv = app->priv; 622 623 if (app->pf->num_vfs) 624 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); 625 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 626 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 627 628 priv->nn = NULL; 629 } 630 631 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn) 632 { 633 struct nfp_flower_priv *priv = app->priv; 634 int err; 635 636 priv->nn = nn; 637 638 err = nfp_flower_spawn_phy_reprs(app, app->priv); 639 if (err) 640 goto err_clear_nn; 641 642 err = nfp_flower_spawn_vnic_reprs(app, 643 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF, 644 NFP_REPR_TYPE_PF, 1); 645 if (err) 646 goto err_destroy_reprs_phy; 647 648 if (app->pf->num_vfs) { 649 err = nfp_flower_spawn_vnic_reprs(app, 650 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, 651 NFP_REPR_TYPE_VF, 652 app->pf->num_vfs); 653 if (err) 654 goto err_destroy_reprs_pf; 655 } 656 657 return 0; 658 659 err_destroy_reprs_pf: 660 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 661 err_destroy_reprs_phy: 662 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 663 err_clear_nn: 664 priv->nn = NULL; 665 return err; 666 } 667 668 static int nfp_flower_init(struct nfp_app *app) 669 { 670 u64 version, features, ctx_count, num_mems; 671 const struct nfp_pf *pf = app->pf; 672 struct nfp_flower_priv *app_priv; 673 int err; 674 675 if (!pf->eth_tbl) { 676 nfp_warn(app->cpp, "FlowerNIC requires eth table\n"); 677 return -EINVAL; 678 } 679 680 if (!pf->mac_stats_bar) { 681 nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n"); 682 return -EINVAL; 683 } 684 685 if (!pf->vf_cfg_bar) { 686 nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n"); 687 return -EINVAL; 688 } 689 690 version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err); 691 if (err) { 692 nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n"); 693 return err; 694 } 695 696 num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT", 697 &err); 698 if (err) { 699 nfp_warn(app->cpp, 700 "FlowerNIC: unsupported host context memory: %d\n", 701 err); 702 err = 0; 703 num_mems = 1; 704 } 705 706 if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) { 707 nfp_warn(app->cpp, 708 "FlowerNIC: invalid host context memory: %llu\n", 709 num_mems); 710 return -EINVAL; 711 } 712 713 ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT", 714 &err); 715 if (err) { 716 nfp_warn(app->cpp, 717 "FlowerNIC: unsupported host context count: %d\n", 718 err); 719 err = 0; 720 ctx_count = BIT(17); 721 } 722 723 /* We need to ensure hardware has enough flower capabilities. */ 724 if (version != NFP_FLOWER_ALLOWED_VER) { 725 nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); 726 return -EINVAL; 727 } 728 729 app_priv = vzalloc(sizeof(struct nfp_flower_priv)); 730 if (!app_priv) 731 return -ENOMEM; 732 733 app_priv->total_mem_units = num_mems; 734 app_priv->active_mem_unit = 0; 735 app_priv->stats_ring_size = roundup_pow_of_two(ctx_count); 736 app->priv = app_priv; 737 app_priv->app = app; 738 skb_queue_head_init(&app_priv->cmsg_skbs_high); 739 skb_queue_head_init(&app_priv->cmsg_skbs_low); 740 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); 741 init_waitqueue_head(&app_priv->reify_wait_queue); 742 743 init_waitqueue_head(&app_priv->mtu_conf.wait_q); 744 spin_lock_init(&app_priv->mtu_conf.lock); 745 746 err = nfp_flower_metadata_init(app, ctx_count, num_mems); 747 if (err) 748 goto err_free_app_priv; 749 750 /* Extract the extra features supported by the firmware. */ 751 features = nfp_rtsym_read_le(app->pf->rtbl, 752 "_abi_flower_extra_features", &err); 753 if (err) 754 app_priv->flower_ext_feats = 0; 755 else 756 app_priv->flower_ext_feats = features; 757 758 /* Tell the firmware that the driver supports lag. */ 759 err = nfp_rtsym_write_le(app->pf->rtbl, 760 "_abi_flower_balance_sync_enable", 1); 761 if (!err) { 762 app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; 763 nfp_flower_lag_init(&app_priv->nfp_lag); 764 } else if (err == -ENOENT) { 765 nfp_warn(app->cpp, "LAG not supported by FW.\n"); 766 } else { 767 goto err_cleanup_metadata; 768 } 769 770 if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) { 771 /* Tell the firmware that the driver supports flow merging. */ 772 err = nfp_rtsym_write_le(app->pf->rtbl, 773 "_abi_flower_merge_hint_enable", 1); 774 if (!err) { 775 app_priv->flower_ext_feats |= NFP_FL_FEATS_FLOW_MERGE; 776 nfp_flower_internal_port_init(app_priv); 777 } else if (err == -ENOENT) { 778 nfp_warn(app->cpp, "Flow merge not supported by FW.\n"); 779 } else { 780 goto err_lag_clean; 781 } 782 } else { 783 nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n"); 784 } 785 786 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) 787 nfp_flower_qos_init(app); 788 789 INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); 790 INIT_LIST_HEAD(&app_priv->non_repr_priv); 791 app_priv->pre_tun_rule_cnt = 0; 792 793 return 0; 794 795 err_lag_clean: 796 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) 797 nfp_flower_lag_cleanup(&app_priv->nfp_lag); 798 err_cleanup_metadata: 799 nfp_flower_metadata_cleanup(app); 800 err_free_app_priv: 801 vfree(app->priv); 802 return err; 803 } 804 805 static void nfp_flower_clean(struct nfp_app *app) 806 { 807 struct nfp_flower_priv *app_priv = app->priv; 808 809 skb_queue_purge(&app_priv->cmsg_skbs_high); 810 skb_queue_purge(&app_priv->cmsg_skbs_low); 811 flush_work(&app_priv->cmsg_work); 812 813 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM) 814 nfp_flower_qos_cleanup(app); 815 816 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) 817 nfp_flower_lag_cleanup(&app_priv->nfp_lag); 818 819 if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MERGE) 820 nfp_flower_internal_port_cleanup(app_priv); 821 822 nfp_flower_metadata_cleanup(app); 823 vfree(app->priv); 824 app->priv = NULL; 825 } 826 827 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) 828 { 829 bool ret; 830 831 spin_lock_bh(&app_priv->mtu_conf.lock); 832 ret = app_priv->mtu_conf.ack; 833 spin_unlock_bh(&app_priv->mtu_conf.lock); 834 835 return ret; 836 } 837 838 static int 839 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, 840 int new_mtu) 841 { 842 struct nfp_flower_priv *app_priv = app->priv; 843 struct nfp_repr *repr = netdev_priv(netdev); 844 int err; 845 846 /* Only need to config FW for physical port MTU change. */ 847 if (repr->port->type != NFP_PORT_PHYS_PORT) 848 return 0; 849 850 if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) { 851 nfp_err(app->cpp, "Physical port MTU setting not supported\n"); 852 return -EINVAL; 853 } 854 855 spin_lock_bh(&app_priv->mtu_conf.lock); 856 app_priv->mtu_conf.ack = false; 857 app_priv->mtu_conf.requested_val = new_mtu; 858 app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id; 859 spin_unlock_bh(&app_priv->mtu_conf.lock); 860 861 err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu, 862 true); 863 if (err) { 864 spin_lock_bh(&app_priv->mtu_conf.lock); 865 app_priv->mtu_conf.requested_val = 0; 866 spin_unlock_bh(&app_priv->mtu_conf.lock); 867 return err; 868 } 869 870 /* Wait for fw to ack the change. */ 871 if (!wait_event_timeout(app_priv->mtu_conf.wait_q, 872 nfp_flower_check_ack(app_priv), 873 NFP_FL_REPLY_TIMEOUT)) { 874 spin_lock_bh(&app_priv->mtu_conf.lock); 875 app_priv->mtu_conf.requested_val = 0; 876 spin_unlock_bh(&app_priv->mtu_conf.lock); 877 nfp_warn(app->cpp, "MTU change not verified with fw\n"); 878 return -EIO; 879 } 880 881 return 0; 882 } 883 884 static int nfp_flower_start(struct nfp_app *app) 885 { 886 struct nfp_flower_priv *app_priv = app->priv; 887 int err; 888 889 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { 890 err = nfp_flower_lag_reset(&app_priv->nfp_lag); 891 if (err) 892 return err; 893 } 894 895 return nfp_tunnel_config_start(app); 896 } 897 898 static void nfp_flower_stop(struct nfp_app *app) 899 { 900 nfp_tunnel_config_stop(app); 901 } 902 903 static int 904 nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, 905 unsigned long event, void *ptr) 906 { 907 struct nfp_flower_priv *app_priv = app->priv; 908 int ret; 909 910 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { 911 ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr); 912 if (ret & NOTIFY_STOP_MASK) 913 return ret; 914 } 915 916 ret = nfp_flower_reg_indir_block_handler(app, netdev, event); 917 if (ret & NOTIFY_STOP_MASK) 918 return ret; 919 920 ret = nfp_flower_internal_port_event_handler(app, netdev, event); 921 if (ret & NOTIFY_STOP_MASK) 922 return ret; 923 924 return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); 925 } 926 927 const struct nfp_app_type app_flower = { 928 .id = NFP_APP_FLOWER_NIC, 929 .name = "flower", 930 931 .ctrl_cap_mask = ~0U, 932 .ctrl_has_meta = true, 933 934 .extra_cap = nfp_flower_extra_cap, 935 936 .init = nfp_flower_init, 937 .clean = nfp_flower_clean, 938 939 .repr_change_mtu = nfp_flower_repr_change_mtu, 940 941 .vnic_alloc = nfp_flower_vnic_alloc, 942 .vnic_init = nfp_flower_vnic_init, 943 .vnic_clean = nfp_flower_vnic_clean, 944 945 .repr_preclean = nfp_flower_repr_netdev_preclean, 946 .repr_clean = nfp_flower_repr_netdev_clean, 947 948 .repr_open = nfp_flower_repr_netdev_open, 949 .repr_stop = nfp_flower_repr_netdev_stop, 950 951 .start = nfp_flower_start, 952 .stop = nfp_flower_stop, 953 954 .netdev_event = nfp_flower_netdev_event, 955 956 .ctrl_msg_rx = nfp_flower_cmsg_rx, 957 958 .sriov_enable = nfp_flower_sriov_enable, 959 .sriov_disable = nfp_flower_sriov_disable, 960 961 .eswitch_mode_get = eswitch_mode_get, 962 .dev_get = nfp_flower_dev_get, 963 964 .setup_tc = nfp_flower_setup_tc, 965 }; 966