1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2018 Netronome Systems, Inc. */ 3 4 #include <linux/bitfield.h> 5 #include <linux/etherdevice.h> 6 #include <linux/lockdep.h> 7 #include <linux/netdevice.h> 8 #include <linux/rcupdate.h> 9 #include <linux/slab.h> 10 #include <net/pkt_cls.h> 11 #include <net/pkt_sched.h> 12 #include <net/red.h> 13 14 #include "../nfpcore/nfp.h" 15 #include "../nfpcore/nfp_cpp.h" 16 #include "../nfpcore/nfp_nsp.h" 17 #include "../nfp_app.h" 18 #include "../nfp_main.h" 19 #include "../nfp_net.h" 20 #include "../nfp_net_repr.h" 21 #include "../nfp_port.h" 22 #include "main.h" 23 24 static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) 25 { 26 return FIELD_PREP(NFP_ABM_PORTID_TYPE, rtype) | 27 FIELD_PREP(NFP_ABM_PORTID_ID, id); 28 } 29 30 static int 31 __nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, 32 u32 handle, unsigned int qs, u32 init_val) 33 { 34 struct nfp_port *port = nfp_port_from_netdev(netdev); 35 int ret; 36 37 ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val); 38 memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs); 39 40 alink->parent = handle; 41 alink->num_qdiscs = qs; 42 port->tc_offload_cnt = qs; 43 44 return ret; 45 } 46 47 static void 48 nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, 49 u32 handle, unsigned int qs) 50 { 51 __nfp_abm_reset_root(netdev, alink, handle, qs, ~0); 52 } 53 54 static int 55 nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) 56 { 57 unsigned int i = TC_H_MIN(opt->parent) - 1; 58 59 if (opt->parent == TC_H_ROOT) 60 i = 0; 61 else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) 62 i = TC_H_MIN(opt->parent) - 1; 63 else 64 return -EOPNOTSUPP; 65 66 if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle) 67 return -EOPNOTSUPP; 68 69 return i; 70 } 71 72 static void 73 nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, 74 u32 handle) 75 { 76 unsigned int i; 77 78 for (i = 0; i < alink->num_qdiscs; i++) 79 if (handle == alink->qdiscs[i].handle) 80 break; 81 if (i == alink->num_qdiscs) 82 return; 83 84 if (alink->parent == TC_H_ROOT) { 85 nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); 86 } else { 87 nfp_abm_ctrl_set_q_lvl(alink, i, ~0); 88 memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs)); 89 } 90 } 91 92 static int 93 nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, 94 struct tc_red_qopt_offload *opt) 95 { 96 bool existing; 97 int i, err; 98 99 i = nfp_abm_red_find(alink, opt); 100 existing = i >= 0; 101 102 if (opt->set.min != opt->set.max || !opt->set.is_ecn) { 103 nfp_warn(alink->abm->app->cpp, 104 "RED offload failed - unsupported parameters\n"); 105 err = -EINVAL; 106 goto err_destroy; 107 } 108 109 if (existing) { 110 if (alink->parent == TC_H_ROOT) 111 err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); 112 else 113 err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); 114 if (err) 115 goto err_destroy; 116 return 0; 117 } 118 119 if (opt->parent == TC_H_ROOT) { 120 i = 0; 121 err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1, 122 opt->set.min); 123 } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) { 124 i = TC_H_MIN(opt->parent) - 1; 125 err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); 126 } else { 127 return -EINVAL; 128 } 129 /* Set the handle to try full clean up, in case IO failed */ 130 alink->qdiscs[i].handle = opt->handle; 131 if (err) 132 goto err_destroy; 133 134 if (opt->parent == TC_H_ROOT) 135 err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats); 136 else 137 err = nfp_abm_ctrl_read_q_stats(alink, i, 138 &alink->qdiscs[i].stats); 139 if (err) 140 goto err_destroy; 141 142 if (opt->parent == TC_H_ROOT) 143 err = nfp_abm_ctrl_read_xstats(alink, 144 &alink->qdiscs[i].xstats); 145 else 146 err = nfp_abm_ctrl_read_q_xstats(alink, i, 147 &alink->qdiscs[i].xstats); 148 if (err) 149 goto err_destroy; 150 151 alink->qdiscs[i].stats.backlog_pkts = 0; 152 alink->qdiscs[i].stats.backlog_bytes = 0; 153 154 return 0; 155 err_destroy: 156 /* If the qdisc keeps on living, but we can't offload undo changes */ 157 if (existing) { 158 opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; 159 opt->set.qstats->backlog -= 160 alink->qdiscs[i].stats.backlog_bytes; 161 } 162 nfp_abm_red_destroy(netdev, alink, opt->handle); 163 164 return err; 165 } 166 167 static void 168 nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old, 169 struct tc_qopt_offload_stats *stats) 170 { 171 _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes, 172 new->tx_pkts - old->tx_pkts); 173 stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts; 174 stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes; 175 stats->qstats->overlimits += new->overlimits - old->overlimits; 176 stats->qstats->drops += new->drops - old->drops; 177 } 178 179 static int 180 nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) 181 { 182 struct nfp_alink_stats *prev_stats; 183 struct nfp_alink_stats stats; 184 int i, err; 185 186 i = nfp_abm_red_find(alink, opt); 187 if (i < 0) 188 return i; 189 prev_stats = &alink->qdiscs[i].stats; 190 191 if (alink->parent == TC_H_ROOT) 192 err = nfp_abm_ctrl_read_stats(alink, &stats); 193 else 194 err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); 195 if (err) 196 return err; 197 198 nfp_abm_update_stats(&stats, prev_stats, &opt->stats); 199 200 *prev_stats = stats; 201 202 return 0; 203 } 204 205 static int 206 nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) 207 { 208 struct nfp_alink_xstats *prev_xstats; 209 struct nfp_alink_xstats xstats; 210 int i, err; 211 212 i = nfp_abm_red_find(alink, opt); 213 if (i < 0) 214 return i; 215 prev_xstats = &alink->qdiscs[i].xstats; 216 217 if (alink->parent == TC_H_ROOT) 218 err = nfp_abm_ctrl_read_xstats(alink, &xstats); 219 else 220 err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats); 221 if (err) 222 return err; 223 224 opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked; 225 opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop; 226 227 *prev_xstats = xstats; 228 229 return 0; 230 } 231 232 static int 233 nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, 234 struct tc_red_qopt_offload *opt) 235 { 236 switch (opt->command) { 237 case TC_RED_REPLACE: 238 return nfp_abm_red_replace(netdev, alink, opt); 239 case TC_RED_DESTROY: 240 nfp_abm_red_destroy(netdev, alink, opt->handle); 241 return 0; 242 case TC_RED_STATS: 243 return nfp_abm_red_stats(alink, opt); 244 case TC_RED_XSTATS: 245 return nfp_abm_red_xstats(alink, opt); 246 default: 247 return -EOPNOTSUPP; 248 } 249 } 250 251 static int 252 nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt) 253 { 254 struct nfp_alink_stats stats; 255 unsigned int i; 256 int err; 257 258 for (i = 0; i < alink->num_qdiscs; i++) { 259 if (alink->qdiscs[i].handle == TC_H_UNSPEC) 260 continue; 261 262 err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); 263 if (err) 264 return err; 265 266 nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats, 267 &opt->stats); 268 } 269 270 return 0; 271 } 272 273 static int 274 nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, 275 struct tc_mq_qopt_offload *opt) 276 { 277 switch (opt->command) { 278 case TC_MQ_CREATE: 279 nfp_abm_reset_root(netdev, alink, opt->handle, 280 alink->total_queues); 281 return 0; 282 case TC_MQ_DESTROY: 283 if (opt->handle == alink->parent) 284 nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); 285 return 0; 286 case TC_MQ_STATS: 287 return nfp_abm_mq_stats(alink, opt); 288 default: 289 return -EOPNOTSUPP; 290 } 291 } 292 293 static int 294 nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, 295 enum tc_setup_type type, void *type_data) 296 { 297 struct nfp_repr *repr = netdev_priv(netdev); 298 struct nfp_port *port; 299 300 port = nfp_port_from_netdev(netdev); 301 if (!port || port->type != NFP_PORT_PF_PORT) 302 return -EOPNOTSUPP; 303 304 switch (type) { 305 case TC_SETUP_QDISC_MQ: 306 return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data); 307 case TC_SETUP_QDISC_RED: 308 return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data); 309 default: 310 return -EOPNOTSUPP; 311 } 312 } 313 314 static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id) 315 { 316 enum nfp_repr_type rtype; 317 struct nfp_reprs *reprs; 318 u8 port; 319 320 rtype = FIELD_GET(NFP_ABM_PORTID_TYPE, port_id); 321 port = FIELD_GET(NFP_ABM_PORTID_ID, port_id); 322 323 reprs = rcu_dereference(app->reprs[rtype]); 324 if (!reprs) 325 return NULL; 326 327 if (port >= reprs->num_reprs) 328 return NULL; 329 330 return rcu_dereference(reprs->reprs[port]); 331 } 332 333 static int 334 nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink, 335 enum nfp_port_type ptype) 336 { 337 struct net_device *netdev; 338 enum nfp_repr_type rtype; 339 struct nfp_reprs *reprs; 340 struct nfp_repr *repr; 341 struct nfp_port *port; 342 unsigned int txqs; 343 int err; 344 345 if (ptype == NFP_PORT_PHYS_PORT) { 346 rtype = NFP_REPR_TYPE_PHYS_PORT; 347 txqs = 1; 348 } else { 349 rtype = NFP_REPR_TYPE_PF; 350 txqs = alink->vnic->max_rx_rings; 351 } 352 353 netdev = nfp_repr_alloc_mqs(app, txqs, 1); 354 if (!netdev) 355 return -ENOMEM; 356 repr = netdev_priv(netdev); 357 repr->app_priv = alink; 358 359 port = nfp_port_alloc(app, ptype, netdev); 360 if (IS_ERR(port)) { 361 err = PTR_ERR(port); 362 goto err_free_repr; 363 } 364 365 if (ptype == NFP_PORT_PHYS_PORT) { 366 port->eth_forced = true; 367 err = nfp_port_init_phy_port(app->pf, app, port, alink->id); 368 if (err) 369 goto err_free_port; 370 } else { 371 port->pf_id = alink->abm->pf_id; 372 port->pf_split = app->pf->max_data_vnics > 1; 373 port->pf_split_id = alink->id; 374 port->vnic = alink->vnic->dp.ctrl_bar; 375 } 376 377 SET_NETDEV_DEV(netdev, &alink->vnic->pdev->dev); 378 eth_hw_addr_random(netdev); 379 380 err = nfp_repr_init(app, netdev, nfp_abm_portid(rtype, alink->id), 381 port, alink->vnic->dp.netdev); 382 if (err) 383 goto err_free_port; 384 385 reprs = nfp_reprs_get_locked(app, rtype); 386 WARN(nfp_repr_get_locked(app, reprs, alink->id), "duplicate repr"); 387 rcu_assign_pointer(reprs->reprs[alink->id], netdev); 388 389 nfp_info(app->cpp, "%s Port %d Representor(%s) created\n", 390 ptype == NFP_PORT_PF_PORT ? "PCIe" : "Phys", 391 alink->id, netdev->name); 392 393 return 0; 394 395 err_free_port: 396 nfp_port_free(port); 397 err_free_repr: 398 nfp_repr_free(netdev); 399 return err; 400 } 401 402 static void 403 nfp_abm_kill_repr(struct nfp_app *app, struct nfp_abm_link *alink, 404 enum nfp_repr_type rtype) 405 { 406 struct net_device *netdev; 407 struct nfp_reprs *reprs; 408 409 reprs = nfp_reprs_get_locked(app, rtype); 410 netdev = nfp_repr_get_locked(app, reprs, alink->id); 411 if (!netdev) 412 return; 413 rcu_assign_pointer(reprs->reprs[alink->id], NULL); 414 synchronize_rcu(); 415 /* Cast to make sure nfp_repr_clean_and_free() takes a nfp_repr */ 416 nfp_repr_clean_and_free((struct nfp_repr *)netdev_priv(netdev)); 417 } 418 419 static void 420 nfp_abm_kill_reprs(struct nfp_abm *abm, struct nfp_abm_link *alink) 421 { 422 nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PF); 423 nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PHYS_PORT); 424 } 425 426 static void nfp_abm_kill_reprs_all(struct nfp_abm *abm) 427 { 428 struct nfp_pf *pf = abm->app->pf; 429 struct nfp_net *nn; 430 431 list_for_each_entry(nn, &pf->vnics, vnic_list) 432 nfp_abm_kill_reprs(abm, (struct nfp_abm_link *)nn->app_priv); 433 } 434 435 static enum devlink_eswitch_mode nfp_abm_eswitch_mode_get(struct nfp_app *app) 436 { 437 struct nfp_abm *abm = app->priv; 438 439 return abm->eswitch_mode; 440 } 441 442 static int nfp_abm_eswitch_set_legacy(struct nfp_abm *abm) 443 { 444 nfp_abm_kill_reprs_all(abm); 445 nfp_abm_ctrl_qm_disable(abm); 446 447 abm->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 448 return 0; 449 } 450 451 static void nfp_abm_eswitch_clean_up(struct nfp_abm *abm) 452 { 453 if (abm->eswitch_mode != DEVLINK_ESWITCH_MODE_LEGACY) 454 WARN_ON(nfp_abm_eswitch_set_legacy(abm)); 455 } 456 457 static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm) 458 { 459 struct nfp_app *app = abm->app; 460 struct nfp_pf *pf = app->pf; 461 struct nfp_net *nn; 462 int err; 463 464 err = nfp_abm_ctrl_qm_enable(abm); 465 if (err) 466 return err; 467 468 list_for_each_entry(nn, &pf->vnics, vnic_list) { 469 struct nfp_abm_link *alink = nn->app_priv; 470 471 err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PHYS_PORT); 472 if (err) 473 goto err_kill_all_reprs; 474 475 err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PF_PORT); 476 if (err) 477 goto err_kill_all_reprs; 478 } 479 480 abm->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 481 return 0; 482 483 err_kill_all_reprs: 484 nfp_abm_kill_reprs_all(abm); 485 nfp_abm_ctrl_qm_disable(abm); 486 return err; 487 } 488 489 static int nfp_abm_eswitch_mode_set(struct nfp_app *app, u16 mode) 490 { 491 struct nfp_abm *abm = app->priv; 492 493 if (abm->eswitch_mode == mode) 494 return 0; 495 496 switch (mode) { 497 case DEVLINK_ESWITCH_MODE_LEGACY: 498 return nfp_abm_eswitch_set_legacy(abm); 499 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 500 return nfp_abm_eswitch_set_switchdev(abm); 501 default: 502 return -EINVAL; 503 } 504 } 505 506 static void 507 nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, 508 unsigned int id) 509 { 510 struct nfp_eth_table_port *eth_port = &pf->eth_tbl->ports[id]; 511 u8 mac_addr[ETH_ALEN]; 512 struct nfp_nsp *nsp; 513 char hwinfo[32]; 514 int err; 515 516 if (id > pf->eth_tbl->count) { 517 nfp_warn(pf->cpp, "No entry for persistent MAC address\n"); 518 eth_hw_addr_random(nn->dp.netdev); 519 return; 520 } 521 522 snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac.pf%u", 523 eth_port->eth_index, abm->pf_id); 524 525 nsp = nfp_nsp_open(pf->cpp); 526 if (IS_ERR(nsp)) { 527 nfp_warn(pf->cpp, "Failed to access the NSP for persistent MAC address: %ld\n", 528 PTR_ERR(nsp)); 529 eth_hw_addr_random(nn->dp.netdev); 530 return; 531 } 532 533 if (!nfp_nsp_has_hwinfo_lookup(nsp)) { 534 nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); 535 eth_hw_addr_random(nn->dp.netdev); 536 return; 537 } 538 539 err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo)); 540 nfp_nsp_close(nsp); 541 if (err) { 542 nfp_warn(pf->cpp, "Reading persistent MAC address failed: %d\n", 543 err); 544 eth_hw_addr_random(nn->dp.netdev); 545 return; 546 } 547 548 if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", 549 &mac_addr[0], &mac_addr[1], &mac_addr[2], 550 &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { 551 nfp_warn(pf->cpp, "Can't parse persistent MAC address (%s)\n", 552 hwinfo); 553 eth_hw_addr_random(nn->dp.netdev); 554 return; 555 } 556 557 ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); 558 ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); 559 } 560 561 static int 562 nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) 563 { 564 struct nfp_eth_table_port *eth_port = &app->pf->eth_tbl->ports[id]; 565 struct nfp_abm *abm = app->priv; 566 struct nfp_abm_link *alink; 567 int err; 568 569 alink = kzalloc(sizeof(*alink), GFP_KERNEL); 570 if (!alink) 571 return -ENOMEM; 572 nn->app_priv = alink; 573 alink->abm = abm; 574 alink->vnic = nn; 575 alink->id = id; 576 alink->parent = TC_H_ROOT; 577 alink->total_queues = alink->vnic->max_rx_rings; 578 alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs), 579 GFP_KERNEL); 580 if (!alink->qdiscs) { 581 err = -ENOMEM; 582 goto err_free_alink; 583 } 584 585 /* This is a multi-host app, make sure MAC/PHY is up, but don't 586 * make the MAC/PHY state follow the state of any of the ports. 587 */ 588 err = nfp_eth_set_configured(app->cpp, eth_port->index, true); 589 if (err < 0) 590 goto err_free_qdiscs; 591 592 netif_keep_dst(nn->dp.netdev); 593 594 nfp_abm_vnic_set_mac(app->pf, abm, nn, id); 595 nfp_abm_ctrl_read_params(alink); 596 597 return 0; 598 599 err_free_qdiscs: 600 kvfree(alink->qdiscs); 601 err_free_alink: 602 kfree(alink); 603 return err; 604 } 605 606 static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) 607 { 608 struct nfp_abm_link *alink = nn->app_priv; 609 610 nfp_abm_kill_reprs(alink->abm, alink); 611 kvfree(alink->qdiscs); 612 kfree(alink); 613 } 614 615 static u64 * 616 nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data) 617 { 618 struct nfp_repr *repr = netdev_priv(port->netdev); 619 struct nfp_abm_link *alink; 620 unsigned int i; 621 622 if (port->type != NFP_PORT_PF_PORT) 623 return data; 624 alink = repr->app_priv; 625 for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { 626 *data++ = nfp_abm_ctrl_stat_non_sto(alink, i); 627 *data++ = nfp_abm_ctrl_stat_sto(alink, i); 628 } 629 return data; 630 } 631 632 static int 633 nfp_abm_port_get_stats_count(struct nfp_app *app, struct nfp_port *port) 634 { 635 struct nfp_repr *repr = netdev_priv(port->netdev); 636 struct nfp_abm_link *alink; 637 638 if (port->type != NFP_PORT_PF_PORT) 639 return 0; 640 alink = repr->app_priv; 641 return alink->vnic->dp.num_r_vecs * 2; 642 } 643 644 static u8 * 645 nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port, 646 u8 *data) 647 { 648 struct nfp_repr *repr = netdev_priv(port->netdev); 649 struct nfp_abm_link *alink; 650 unsigned int i; 651 652 if (port->type != NFP_PORT_PF_PORT) 653 return data; 654 alink = repr->app_priv; 655 for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { 656 data = nfp_pr_et(data, "q%u_no_wait", i); 657 data = nfp_pr_et(data, "q%u_delayed", i); 658 } 659 return data; 660 } 661 662 static int nfp_abm_init(struct nfp_app *app) 663 { 664 struct nfp_pf *pf = app->pf; 665 struct nfp_reprs *reprs; 666 struct nfp_abm *abm; 667 int err; 668 669 if (!pf->eth_tbl) { 670 nfp_err(pf->cpp, "ABM NIC requires ETH table\n"); 671 return -EINVAL; 672 } 673 if (pf->max_data_vnics != pf->eth_tbl->count) { 674 nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", 675 pf->max_data_vnics, pf->eth_tbl->count); 676 return -EINVAL; 677 } 678 if (!pf->mac_stats_bar) { 679 nfp_warn(app->cpp, "ABM NIC requires mac_stats symbol\n"); 680 return -EINVAL; 681 } 682 683 abm = kzalloc(sizeof(*abm), GFP_KERNEL); 684 if (!abm) 685 return -ENOMEM; 686 app->priv = abm; 687 abm->app = app; 688 689 err = nfp_abm_ctrl_find_addrs(abm); 690 if (err) 691 goto err_free_abm; 692 693 /* We start in legacy mode, make sure advanced queuing is disabled */ 694 err = nfp_abm_ctrl_qm_disable(abm); 695 if (err) 696 goto err_free_abm; 697 698 err = -ENOMEM; 699 reprs = nfp_reprs_alloc(pf->max_data_vnics); 700 if (!reprs) 701 goto err_free_abm; 702 RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs); 703 704 reprs = nfp_reprs_alloc(pf->max_data_vnics); 705 if (!reprs) 706 goto err_free_phys; 707 RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PF], reprs); 708 709 return 0; 710 711 err_free_phys: 712 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 713 err_free_abm: 714 kfree(abm); 715 app->priv = NULL; 716 return err; 717 } 718 719 static void nfp_abm_clean(struct nfp_app *app) 720 { 721 struct nfp_abm *abm = app->priv; 722 723 nfp_abm_eswitch_clean_up(abm); 724 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 725 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 726 kfree(abm); 727 app->priv = NULL; 728 } 729 730 const struct nfp_app_type app_abm = { 731 .id = NFP_APP_ACTIVE_BUFFER_MGMT_NIC, 732 .name = "abm", 733 734 .init = nfp_abm_init, 735 .clean = nfp_abm_clean, 736 737 .vnic_alloc = nfp_abm_vnic_alloc, 738 .vnic_free = nfp_abm_vnic_free, 739 740 .port_get_stats = nfp_abm_port_get_stats, 741 .port_get_stats_count = nfp_abm_port_get_stats_count, 742 .port_get_stats_strings = nfp_abm_port_get_stats_strings, 743 744 .setup_tc = nfp_abm_setup_tc, 745 746 .eswitch_mode_get = nfp_abm_eswitch_mode_get, 747 .eswitch_mode_set = nfp_abm_eswitch_mode_set, 748 749 .repr_get = nfp_abm_repr_get, 750 }; 751