1 /* 2 * Copyright (C) 2017 Netronome Systems, Inc. 3 * 4 * This software is dual licensed under the GNU General License Version 2, 5 * June 1991 as shown in the file COPYING in the top-level directory of this 6 * source tree or the BSD 2-Clause License provided below. You have the 7 * option to license this software under the complete terms of either license. 8 * 9 * The BSD 2-Clause License: 10 * 11 * Redistribution and use in source and binary forms, with or 12 * without modification, are permitted provided that the following 13 * conditions are met: 14 * 15 * 1. Redistributions of source code must retain the above 16 * copyright notice, this list of conditions and the following 17 * disclaimer. 18 * 19 * 2. Redistributions in binary form must reproduce the above 20 * copyright notice, this list of conditions and the following 21 * disclaimer in the documentation and/or other materials 22 * provided with the distribution. 23 * 24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * SOFTWARE. 32 */ 33 34 #include <linux/etherdevice.h> 35 #include <linux/lockdep.h> 36 #include <linux/pci.h> 37 #include <linux/skbuff.h> 38 #include <linux/vmalloc.h> 39 #include <net/devlink.h> 40 #include <net/dst_metadata.h> 41 42 #include "main.h" 43 #include "../nfpcore/nfp_cpp.h" 44 #include "../nfpcore/nfp_nffw.h" 45 #include "../nfpcore/nfp_nsp.h" 46 #include "../nfp_app.h" 47 #include "../nfp_main.h" 48 #include "../nfp_net.h" 49 #include "../nfp_net_repr.h" 50 #include "../nfp_port.h" 51 #include "./cmsg.h" 52 53 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL 54 55 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn) 56 { 57 return "FLOWER"; 58 } 59 60 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app) 61 { 62 return DEVLINK_ESWITCH_MODE_SWITCHDEV; 63 } 64 65 static enum nfp_repr_type 66 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port) 67 { 68 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) { 69 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT: 70 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, 71 port_id); 72 return NFP_REPR_TYPE_PHYS_PORT; 73 74 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT: 75 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id); 76 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) == 77 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF) 78 return NFP_REPR_TYPE_PF; 79 else 80 return NFP_REPR_TYPE_VF; 81 } 82 83 return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC; 84 } 85 86 static struct net_device * 87 nfp_flower_repr_get(struct nfp_app *app, u32 port_id) 88 { 89 enum nfp_repr_type repr_type; 90 struct nfp_reprs *reprs; 91 u8 port = 0; 92 93 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port); 94 95 reprs = rcu_dereference(app->reprs[repr_type]); 96 if (!reprs) 97 return NULL; 98 99 if (port >= reprs->num_reprs) 100 return NULL; 101 102 return rcu_dereference(reprs->reprs[port]); 103 } 104 105 static int 106 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type, 107 bool exists) 108 { 109 struct nfp_reprs *reprs; 110 int i, err, count = 0; 111 112 reprs = rcu_dereference_protected(app->reprs[type], 113 lockdep_is_held(&app->pf->lock)); 114 if (!reprs) 115 return 0; 116 117 for (i = 0; i < reprs->num_reprs; i++) { 118 struct net_device *netdev; 119 120 netdev = nfp_repr_get_locked(app, reprs, i); 121 if (netdev) { 122 struct nfp_repr *repr = netdev_priv(netdev); 123 124 err = nfp_flower_cmsg_portreify(repr, exists); 125 if (err) 126 return err; 127 count++; 128 } 129 } 130 131 return count; 132 } 133 134 static int 135 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl) 136 { 137 struct nfp_flower_priv *priv = app->priv; 138 int err; 139 140 if (!tot_repl) 141 return 0; 142 143 lockdep_assert_held(&app->pf->lock); 144 err = wait_event_interruptible_timeout(priv->reify_wait_queue, 145 atomic_read(replies) >= tot_repl, 146 msecs_to_jiffies(10)); 147 if (err <= 0) { 148 nfp_warn(app->cpp, "Not all reprs responded to reify\n"); 149 return -EIO; 150 } 151 152 return 0; 153 } 154 155 static int 156 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr) 157 { 158 int err; 159 160 err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false); 161 if (err) 162 return err; 163 164 netif_tx_wake_all_queues(repr->netdev); 165 166 return 0; 167 } 168 169 static int 170 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) 171 { 172 netif_tx_disable(repr->netdev); 173 174 return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); 175 } 176 177 static int 178 nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev) 179 { 180 return tc_setup_cb_egdev_register(netdev, 181 nfp_flower_setup_tc_egress_cb, 182 netdev_priv(netdev)); 183 } 184 185 static void 186 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) 187 { 188 struct nfp_repr *repr = netdev_priv(netdev); 189 190 kfree(repr->app_priv); 191 192 tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb, 193 netdev_priv(netdev)); 194 } 195 196 static void 197 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev) 198 { 199 struct nfp_repr *repr = netdev_priv(netdev); 200 struct nfp_flower_priv *priv = app->priv; 201 atomic_t *replies = &priv->reify_replies; 202 int err; 203 204 atomic_set(replies, 0); 205 err = nfp_flower_cmsg_portreify(repr, false); 206 if (err) { 207 nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n"); 208 return; 209 } 210 211 nfp_flower_wait_repr_reify(app, replies, 1); 212 } 213 214 static void nfp_flower_sriov_disable(struct nfp_app *app) 215 { 216 struct nfp_flower_priv *priv = app->priv; 217 218 if (!priv->nn) 219 return; 220 221 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); 222 } 223 224 static int 225 nfp_flower_spawn_vnic_reprs(struct nfp_app *app, 226 enum nfp_flower_cmsg_port_vnic_type vnic_type, 227 enum nfp_repr_type repr_type, unsigned int cnt) 228 { 229 u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); 230 struct nfp_flower_priv *priv = app->priv; 231 atomic_t *replies = &priv->reify_replies; 232 struct nfp_flower_repr_priv *repr_priv; 233 enum nfp_port_type port_type; 234 struct nfp_repr *nfp_repr; 235 struct nfp_reprs *reprs; 236 int i, err, reify_cnt; 237 const u8 queue = 0; 238 239 port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT : 240 NFP_PORT_VF_PORT; 241 242 reprs = nfp_reprs_alloc(cnt); 243 if (!reprs) 244 return -ENOMEM; 245 246 for (i = 0; i < cnt; i++) { 247 struct net_device *repr; 248 struct nfp_port *port; 249 u32 port_id; 250 251 repr = nfp_repr_alloc(app); 252 if (!repr) { 253 err = -ENOMEM; 254 goto err_reprs_clean; 255 } 256 257 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); 258 if (!repr_priv) { 259 err = -ENOMEM; 260 goto err_reprs_clean; 261 } 262 263 nfp_repr = netdev_priv(repr); 264 nfp_repr->app_priv = repr_priv; 265 266 /* For now we only support 1 PF */ 267 WARN_ON(repr_type == NFP_REPR_TYPE_PF && i); 268 269 port = nfp_port_alloc(app, port_type, repr); 270 if (IS_ERR(port)) { 271 err = PTR_ERR(port); 272 nfp_repr_free(repr); 273 goto err_reprs_clean; 274 } 275 if (repr_type == NFP_REPR_TYPE_PF) { 276 port->pf_id = i; 277 port->vnic = priv->nn->dp.ctrl_bar; 278 } else { 279 port->pf_id = 0; 280 port->vf_id = i; 281 port->vnic = 282 app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; 283 } 284 285 eth_hw_addr_random(repr); 286 287 port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type, 288 i, queue); 289 err = nfp_repr_init(app, repr, 290 port_id, port, priv->nn->dp.netdev); 291 if (err) { 292 nfp_port_free(port); 293 nfp_repr_free(repr); 294 goto err_reprs_clean; 295 } 296 297 RCU_INIT_POINTER(reprs->reprs[i], repr); 298 nfp_info(app->cpp, "%s%d Representor(%s) created\n", 299 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i, 300 repr->name); 301 } 302 303 nfp_app_reprs_set(app, repr_type, reprs); 304 305 atomic_set(replies, 0); 306 reify_cnt = nfp_flower_reprs_reify(app, repr_type, true); 307 if (reify_cnt < 0) { 308 err = reify_cnt; 309 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); 310 goto err_reprs_remove; 311 } 312 313 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); 314 if (err) 315 goto err_reprs_remove; 316 317 return 0; 318 err_reprs_remove: 319 reprs = nfp_app_reprs_set(app, repr_type, NULL); 320 err_reprs_clean: 321 nfp_reprs_clean_and_free(app, reprs); 322 return err; 323 } 324 325 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs) 326 { 327 struct nfp_flower_priv *priv = app->priv; 328 329 if (!priv->nn) 330 return 0; 331 332 return nfp_flower_spawn_vnic_reprs(app, 333 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, 334 NFP_REPR_TYPE_VF, num_vfs); 335 } 336 337 static int 338 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) 339 { 340 struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; 341 atomic_t *replies = &priv->reify_replies; 342 struct nfp_flower_repr_priv *repr_priv; 343 struct nfp_repr *nfp_repr; 344 struct sk_buff *ctrl_skb; 345 struct nfp_reprs *reprs; 346 int err, reify_cnt; 347 unsigned int i; 348 349 ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); 350 if (!ctrl_skb) 351 return -ENOMEM; 352 353 reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); 354 if (!reprs) { 355 err = -ENOMEM; 356 goto err_free_ctrl_skb; 357 } 358 359 for (i = 0; i < eth_tbl->count; i++) { 360 unsigned int phys_port = eth_tbl->ports[i].index; 361 struct net_device *repr; 362 struct nfp_port *port; 363 u32 cmsg_port_id; 364 365 repr = nfp_repr_alloc(app); 366 if (!repr) { 367 err = -ENOMEM; 368 goto err_reprs_clean; 369 } 370 371 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL); 372 if (!repr_priv) { 373 err = -ENOMEM; 374 goto err_reprs_clean; 375 } 376 377 nfp_repr = netdev_priv(repr); 378 nfp_repr->app_priv = repr_priv; 379 380 port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr); 381 if (IS_ERR(port)) { 382 err = PTR_ERR(port); 383 nfp_repr_free(repr); 384 goto err_reprs_clean; 385 } 386 err = nfp_port_init_phy_port(app->pf, app, port, i); 387 if (err) { 388 nfp_port_free(port); 389 nfp_repr_free(repr); 390 goto err_reprs_clean; 391 } 392 393 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev); 394 nfp_net_get_mac_addr(app->pf, repr, port); 395 396 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port); 397 err = nfp_repr_init(app, repr, 398 cmsg_port_id, port, priv->nn->dp.netdev); 399 if (err) { 400 nfp_port_free(port); 401 nfp_repr_free(repr); 402 goto err_reprs_clean; 403 } 404 405 nfp_flower_cmsg_mac_repr_add(ctrl_skb, i, 406 eth_tbl->ports[i].nbi, 407 eth_tbl->ports[i].base, 408 phys_port); 409 410 RCU_INIT_POINTER(reprs->reprs[phys_port], repr); 411 nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n", 412 phys_port, repr->name); 413 } 414 415 nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs); 416 417 /* The REIFY/MAC_REPR control messages should be sent after the MAC 418 * representors are registered using nfp_app_reprs_set(). This is 419 * because the firmware may respond with control messages for the 420 * MAC representors, f.e. to provide the driver with information 421 * about their state, and without registration the driver will drop 422 * any such messages. 423 */ 424 atomic_set(replies, 0); 425 reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true); 426 if (reify_cnt < 0) { 427 err = reify_cnt; 428 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n"); 429 goto err_reprs_remove; 430 } 431 432 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt); 433 if (err) 434 goto err_reprs_remove; 435 436 nfp_ctrl_tx(app->ctrl, ctrl_skb); 437 438 return 0; 439 err_reprs_remove: 440 reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL); 441 err_reprs_clean: 442 nfp_reprs_clean_and_free(app, reprs); 443 err_free_ctrl_skb: 444 kfree_skb(ctrl_skb); 445 return err; 446 } 447 448 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, 449 unsigned int id) 450 { 451 if (id > 0) { 452 nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); 453 goto err_invalid_port; 454 } 455 456 eth_hw_addr_random(nn->dp.netdev); 457 netif_keep_dst(nn->dp.netdev); 458 459 return 0; 460 461 err_invalid_port: 462 nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev); 463 return PTR_ERR_OR_ZERO(nn->port); 464 } 465 466 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn) 467 { 468 struct nfp_flower_priv *priv = app->priv; 469 470 if (app->pf->num_vfs) 471 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF); 472 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 473 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 474 475 priv->nn = NULL; 476 } 477 478 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn) 479 { 480 struct nfp_flower_priv *priv = app->priv; 481 int err; 482 483 priv->nn = nn; 484 485 err = nfp_flower_spawn_phy_reprs(app, app->priv); 486 if (err) 487 goto err_clear_nn; 488 489 err = nfp_flower_spawn_vnic_reprs(app, 490 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF, 491 NFP_REPR_TYPE_PF, 1); 492 if (err) 493 goto err_destroy_reprs_phy; 494 495 if (app->pf->num_vfs) { 496 err = nfp_flower_spawn_vnic_reprs(app, 497 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF, 498 NFP_REPR_TYPE_VF, 499 app->pf->num_vfs); 500 if (err) 501 goto err_destroy_reprs_pf; 502 } 503 504 return 0; 505 506 err_destroy_reprs_pf: 507 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 508 err_destroy_reprs_phy: 509 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 510 err_clear_nn: 511 priv->nn = NULL; 512 return err; 513 } 514 515 static int nfp_flower_init(struct nfp_app *app) 516 { 517 const struct nfp_pf *pf = app->pf; 518 struct nfp_flower_priv *app_priv; 519 u64 version, features; 520 int err; 521 522 if (!pf->eth_tbl) { 523 nfp_warn(app->cpp, "FlowerNIC requires eth table\n"); 524 return -EINVAL; 525 } 526 527 if (!pf->mac_stats_bar) { 528 nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n"); 529 return -EINVAL; 530 } 531 532 if (!pf->vf_cfg_bar) { 533 nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n"); 534 return -EINVAL; 535 } 536 537 version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err); 538 if (err) { 539 nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n"); 540 return err; 541 } 542 543 /* We need to ensure hardware has enough flower capabilities. */ 544 if (version != NFP_FLOWER_ALLOWED_VER) { 545 nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n"); 546 return -EINVAL; 547 } 548 549 app_priv = vzalloc(sizeof(struct nfp_flower_priv)); 550 if (!app_priv) 551 return -ENOMEM; 552 553 app->priv = app_priv; 554 app_priv->app = app; 555 skb_queue_head_init(&app_priv->cmsg_skbs_high); 556 skb_queue_head_init(&app_priv->cmsg_skbs_low); 557 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx); 558 init_waitqueue_head(&app_priv->reify_wait_queue); 559 560 init_waitqueue_head(&app_priv->mtu_conf.wait_q); 561 spin_lock_init(&app_priv->mtu_conf.lock); 562 563 err = nfp_flower_metadata_init(app); 564 if (err) 565 goto err_free_app_priv; 566 567 /* Extract the extra features supported by the firmware. */ 568 features = nfp_rtsym_read_le(app->pf->rtbl, 569 "_abi_flower_extra_features", &err); 570 if (err) 571 app_priv->flower_ext_feats = 0; 572 else 573 app_priv->flower_ext_feats = features; 574 575 /* Tell the firmware that the driver supports lag. */ 576 err = nfp_rtsym_write_le(app->pf->rtbl, 577 "_abi_flower_balance_sync_enable", 1); 578 if (!err) { 579 app_priv->flower_ext_feats |= NFP_FL_FEATS_LAG; 580 nfp_flower_lag_init(&app_priv->nfp_lag); 581 } else if (err == -ENOENT) { 582 nfp_warn(app->cpp, "LAG not supported by FW.\n"); 583 } else { 584 goto err_cleanup_metadata; 585 } 586 587 return 0; 588 589 err_cleanup_metadata: 590 nfp_flower_metadata_cleanup(app); 591 err_free_app_priv: 592 vfree(app->priv); 593 return err; 594 } 595 596 static void nfp_flower_clean(struct nfp_app *app) 597 { 598 struct nfp_flower_priv *app_priv = app->priv; 599 600 skb_queue_purge(&app_priv->cmsg_skbs_high); 601 skb_queue_purge(&app_priv->cmsg_skbs_low); 602 flush_work(&app_priv->cmsg_work); 603 604 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) 605 nfp_flower_lag_cleanup(&app_priv->nfp_lag); 606 607 nfp_flower_metadata_cleanup(app); 608 vfree(app->priv); 609 app->priv = NULL; 610 } 611 612 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv) 613 { 614 bool ret; 615 616 spin_lock_bh(&app_priv->mtu_conf.lock); 617 ret = app_priv->mtu_conf.ack; 618 spin_unlock_bh(&app_priv->mtu_conf.lock); 619 620 return ret; 621 } 622 623 static int 624 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, 625 int new_mtu) 626 { 627 struct nfp_flower_priv *app_priv = app->priv; 628 struct nfp_repr *repr = netdev_priv(netdev); 629 int err, ack; 630 631 /* Only need to config FW for physical port MTU change. */ 632 if (repr->port->type != NFP_PORT_PHYS_PORT) 633 return 0; 634 635 if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) { 636 nfp_err(app->cpp, "Physical port MTU setting not supported\n"); 637 return -EINVAL; 638 } 639 640 spin_lock_bh(&app_priv->mtu_conf.lock); 641 app_priv->mtu_conf.ack = false; 642 app_priv->mtu_conf.requested_val = new_mtu; 643 app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id; 644 spin_unlock_bh(&app_priv->mtu_conf.lock); 645 646 err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu, 647 true); 648 if (err) { 649 spin_lock_bh(&app_priv->mtu_conf.lock); 650 app_priv->mtu_conf.requested_val = 0; 651 spin_unlock_bh(&app_priv->mtu_conf.lock); 652 return err; 653 } 654 655 /* Wait for fw to ack the change. */ 656 ack = wait_event_timeout(app_priv->mtu_conf.wait_q, 657 nfp_flower_check_ack(app_priv), 658 msecs_to_jiffies(10)); 659 660 if (!ack) { 661 spin_lock_bh(&app_priv->mtu_conf.lock); 662 app_priv->mtu_conf.requested_val = 0; 663 spin_unlock_bh(&app_priv->mtu_conf.lock); 664 nfp_warn(app->cpp, "MTU change not verified with fw\n"); 665 return -EIO; 666 } 667 668 return 0; 669 } 670 671 static int nfp_flower_start(struct nfp_app *app) 672 { 673 struct nfp_flower_priv *app_priv = app->priv; 674 int err; 675 676 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { 677 err = nfp_flower_lag_reset(&app_priv->nfp_lag); 678 if (err) 679 return err; 680 681 err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb); 682 if (err) 683 return err; 684 } 685 686 return nfp_tunnel_config_start(app); 687 } 688 689 static void nfp_flower_stop(struct nfp_app *app) 690 { 691 struct nfp_flower_priv *app_priv = app->priv; 692 693 if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) 694 unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb); 695 696 nfp_tunnel_config_stop(app); 697 } 698 699 const struct nfp_app_type app_flower = { 700 .id = NFP_APP_FLOWER_NIC, 701 .name = "flower", 702 703 .ctrl_cap_mask = ~0U, 704 .ctrl_has_meta = true, 705 706 .extra_cap = nfp_flower_extra_cap, 707 708 .init = nfp_flower_init, 709 .clean = nfp_flower_clean, 710 711 .repr_change_mtu = nfp_flower_repr_change_mtu, 712 713 .vnic_alloc = nfp_flower_vnic_alloc, 714 .vnic_init = nfp_flower_vnic_init, 715 .vnic_clean = nfp_flower_vnic_clean, 716 717 .repr_init = nfp_flower_repr_netdev_init, 718 .repr_preclean = nfp_flower_repr_netdev_preclean, 719 .repr_clean = nfp_flower_repr_netdev_clean, 720 721 .repr_open = nfp_flower_repr_netdev_open, 722 .repr_stop = nfp_flower_repr_netdev_stop, 723 724 .start = nfp_flower_start, 725 .stop = nfp_flower_stop, 726 727 .ctrl_msg_rx = nfp_flower_cmsg_rx, 728 729 .sriov_enable = nfp_flower_sriov_enable, 730 .sriov_disable = nfp_flower_sriov_disable, 731 732 .eswitch_mode_get = eswitch_mode_get, 733 .repr_get = nfp_flower_repr_get, 734 735 .setup_tc = nfp_flower_setup_tc, 736 }; 737