1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) 2 /* 3 * Copyright (C) 2018 Netronome Systems, Inc. 4 * 5 * This software is dual licensed under the GNU General License Version 2, 6 * June 1991 as shown in the file COPYING in the top-level directory of this 7 * source tree or the BSD 2-Clause License provided below. You have the 8 * option to license this software under the complete terms of either license. 9 * 10 * The BSD 2-Clause License: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * 1. Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * 2. Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/bitfield.h> 36 #include <linux/etherdevice.h> 37 #include <linux/lockdep.h> 38 #include <linux/netdevice.h> 39 #include <linux/rcupdate.h> 40 #include <linux/slab.h> 41 #include <net/pkt_cls.h> 42 #include <net/pkt_sched.h> 43 #include <net/red.h> 44 45 #include "../nfpcore/nfp.h" 46 #include "../nfpcore/nfp_cpp.h" 47 #include "../nfpcore/nfp_nsp.h" 48 #include "../nfp_app.h" 49 #include "../nfp_main.h" 50 #include "../nfp_net.h" 51 #include "../nfp_net_repr.h" 52 #include "../nfp_port.h" 53 #include "main.h" 54 55 static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) 56 { 57 return FIELD_PREP(NFP_ABM_PORTID_TYPE, rtype) | 58 FIELD_PREP(NFP_ABM_PORTID_ID, id); 59 } 60 61 static int 62 __nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, 63 u32 handle, unsigned int qs, u32 init_val) 64 { 65 struct nfp_port *port = nfp_port_from_netdev(netdev); 66 int ret; 67 68 ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val); 69 memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs); 70 71 alink->parent = handle; 72 alink->num_qdiscs = qs; 73 port->tc_offload_cnt = qs; 74 75 return ret; 76 } 77 78 static void 79 nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, 80 u32 handle, unsigned int qs) 81 { 82 __nfp_abm_reset_root(netdev, alink, handle, qs, ~0); 83 } 84 85 static int 86 nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) 87 { 88 unsigned int i = TC_H_MIN(opt->parent) - 1; 89 90 if (opt->parent == TC_H_ROOT) 91 i = 0; 92 else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) 93 i = TC_H_MIN(opt->parent) - 1; 94 else 95 return -EOPNOTSUPP; 96 97 if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle) 98 return -EOPNOTSUPP; 99 100 return i; 101 } 102 103 static void 104 nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, 105 u32 handle) 106 { 107 unsigned int i; 108 109 for (i = 0; i < alink->num_qdiscs; i++) 110 if (handle == alink->qdiscs[i].handle) 111 break; 112 if (i == alink->num_qdiscs) 113 return; 114 115 if (alink->parent == TC_H_ROOT) { 116 nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); 117 } else { 118 nfp_abm_ctrl_set_q_lvl(alink, i, ~0); 119 memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs)); 120 } 121 } 122 123 static int 124 nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, 125 struct tc_red_qopt_offload *opt) 126 { 127 bool existing; 128 int i, err; 129 130 i = nfp_abm_red_find(alink, opt); 131 existing = i >= 0; 132 133 if (opt->set.min != opt->set.max || !opt->set.is_ecn) { 134 nfp_warn(alink->abm->app->cpp, 135 "RED offload failed - unsupported parameters\n"); 136 err = -EINVAL; 137 goto err_destroy; 138 } 139 140 if (existing) { 141 if (alink->parent == TC_H_ROOT) 142 err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); 143 else 144 err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); 145 if (err) 146 goto err_destroy; 147 return 0; 148 } 149 150 if (opt->parent == TC_H_ROOT) { 151 i = 0; 152 err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1, 153 opt->set.min); 154 } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) { 155 i = TC_H_MIN(opt->parent) - 1; 156 err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); 157 } else { 158 return -EINVAL; 159 } 160 /* Set the handle to try full clean up, in case IO failed */ 161 alink->qdiscs[i].handle = opt->handle; 162 if (err) 163 goto err_destroy; 164 165 if (opt->parent == TC_H_ROOT) 166 err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats); 167 else 168 err = nfp_abm_ctrl_read_q_stats(alink, i, 169 &alink->qdiscs[i].stats); 170 if (err) 171 goto err_destroy; 172 173 if (opt->parent == TC_H_ROOT) 174 err = nfp_abm_ctrl_read_xstats(alink, 175 &alink->qdiscs[i].xstats); 176 else 177 err = nfp_abm_ctrl_read_q_xstats(alink, i, 178 &alink->qdiscs[i].xstats); 179 if (err) 180 goto err_destroy; 181 182 alink->qdiscs[i].stats.backlog_pkts = 0; 183 alink->qdiscs[i].stats.backlog_bytes = 0; 184 185 return 0; 186 err_destroy: 187 /* If the qdisc keeps on living, but we can't offload undo changes */ 188 if (existing) { 189 opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; 190 opt->set.qstats->backlog -= 191 alink->qdiscs[i].stats.backlog_bytes; 192 } 193 nfp_abm_red_destroy(netdev, alink, opt->handle); 194 195 return err; 196 } 197 198 static void 199 nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old, 200 struct tc_qopt_offload_stats *stats) 201 { 202 _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes, 203 new->tx_pkts - old->tx_pkts); 204 stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts; 205 stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes; 206 stats->qstats->overlimits += new->overlimits - old->overlimits; 207 stats->qstats->drops += new->drops - old->drops; 208 } 209 210 static int 211 nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) 212 { 213 struct nfp_alink_stats *prev_stats; 214 struct nfp_alink_stats stats; 215 int i, err; 216 217 i = nfp_abm_red_find(alink, opt); 218 if (i < 0) 219 return i; 220 prev_stats = &alink->qdiscs[i].stats; 221 222 if (alink->parent == TC_H_ROOT) 223 err = nfp_abm_ctrl_read_stats(alink, &stats); 224 else 225 err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); 226 if (err) 227 return err; 228 229 nfp_abm_update_stats(&stats, prev_stats, &opt->stats); 230 231 *prev_stats = stats; 232 233 return 0; 234 } 235 236 static int 237 nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) 238 { 239 struct nfp_alink_xstats *prev_xstats; 240 struct nfp_alink_xstats xstats; 241 int i, err; 242 243 i = nfp_abm_red_find(alink, opt); 244 if (i < 0) 245 return i; 246 prev_xstats = &alink->qdiscs[i].xstats; 247 248 if (alink->parent == TC_H_ROOT) 249 err = nfp_abm_ctrl_read_xstats(alink, &xstats); 250 else 251 err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats); 252 if (err) 253 return err; 254 255 opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked; 256 opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop; 257 258 *prev_xstats = xstats; 259 260 return 0; 261 } 262 263 static int 264 nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, 265 struct tc_red_qopt_offload *opt) 266 { 267 switch (opt->command) { 268 case TC_RED_REPLACE: 269 return nfp_abm_red_replace(netdev, alink, opt); 270 case TC_RED_DESTROY: 271 nfp_abm_red_destroy(netdev, alink, opt->handle); 272 return 0; 273 case TC_RED_STATS: 274 return nfp_abm_red_stats(alink, opt); 275 case TC_RED_XSTATS: 276 return nfp_abm_red_xstats(alink, opt); 277 default: 278 return -EOPNOTSUPP; 279 } 280 } 281 282 static int 283 nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt) 284 { 285 struct nfp_alink_stats stats; 286 unsigned int i; 287 int err; 288 289 for (i = 0; i < alink->num_qdiscs; i++) { 290 if (alink->qdiscs[i].handle == TC_H_UNSPEC) 291 continue; 292 293 err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); 294 if (err) 295 return err; 296 297 nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats, 298 &opt->stats); 299 } 300 301 return 0; 302 } 303 304 static int 305 nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, 306 struct tc_mq_qopt_offload *opt) 307 { 308 switch (opt->command) { 309 case TC_MQ_CREATE: 310 nfp_abm_reset_root(netdev, alink, opt->handle, 311 alink->total_queues); 312 return 0; 313 case TC_MQ_DESTROY: 314 if (opt->handle == alink->parent) 315 nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); 316 return 0; 317 case TC_MQ_STATS: 318 return nfp_abm_mq_stats(alink, opt); 319 default: 320 return -EOPNOTSUPP; 321 } 322 } 323 324 static int 325 nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, 326 enum tc_setup_type type, void *type_data) 327 { 328 struct nfp_repr *repr = netdev_priv(netdev); 329 struct nfp_port *port; 330 331 port = nfp_port_from_netdev(netdev); 332 if (!port || port->type != NFP_PORT_PF_PORT) 333 return -EOPNOTSUPP; 334 335 switch (type) { 336 case TC_SETUP_QDISC_MQ: 337 return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data); 338 case TC_SETUP_QDISC_RED: 339 return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data); 340 default: 341 return -EOPNOTSUPP; 342 } 343 } 344 345 static struct net_device *nfp_abm_repr_get(struct nfp_app *app, u32 port_id) 346 { 347 enum nfp_repr_type rtype; 348 struct nfp_reprs *reprs; 349 u8 port; 350 351 rtype = FIELD_GET(NFP_ABM_PORTID_TYPE, port_id); 352 port = FIELD_GET(NFP_ABM_PORTID_ID, port_id); 353 354 reprs = rcu_dereference(app->reprs[rtype]); 355 if (!reprs) 356 return NULL; 357 358 if (port >= reprs->num_reprs) 359 return NULL; 360 361 return rcu_dereference(reprs->reprs[port]); 362 } 363 364 static int 365 nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink, 366 enum nfp_port_type ptype) 367 { 368 struct net_device *netdev; 369 enum nfp_repr_type rtype; 370 struct nfp_reprs *reprs; 371 struct nfp_repr *repr; 372 struct nfp_port *port; 373 unsigned int txqs; 374 int err; 375 376 if (ptype == NFP_PORT_PHYS_PORT) { 377 rtype = NFP_REPR_TYPE_PHYS_PORT; 378 txqs = 1; 379 } else { 380 rtype = NFP_REPR_TYPE_PF; 381 txqs = alink->vnic->max_rx_rings; 382 } 383 384 netdev = nfp_repr_alloc_mqs(app, txqs, 1); 385 if (!netdev) 386 return -ENOMEM; 387 repr = netdev_priv(netdev); 388 repr->app_priv = alink; 389 390 port = nfp_port_alloc(app, ptype, netdev); 391 if (IS_ERR(port)) { 392 err = PTR_ERR(port); 393 goto err_free_repr; 394 } 395 396 if (ptype == NFP_PORT_PHYS_PORT) { 397 port->eth_forced = true; 398 err = nfp_port_init_phy_port(app->pf, app, port, alink->id); 399 if (err) 400 goto err_free_port; 401 } else { 402 port->pf_id = alink->abm->pf_id; 403 port->pf_split = app->pf->max_data_vnics > 1; 404 port->pf_split_id = alink->id; 405 port->vnic = alink->vnic->dp.ctrl_bar; 406 } 407 408 SET_NETDEV_DEV(netdev, &alink->vnic->pdev->dev); 409 eth_hw_addr_random(netdev); 410 411 err = nfp_repr_init(app, netdev, nfp_abm_portid(rtype, alink->id), 412 port, alink->vnic->dp.netdev); 413 if (err) 414 goto err_free_port; 415 416 reprs = nfp_reprs_get_locked(app, rtype); 417 WARN(nfp_repr_get_locked(app, reprs, alink->id), "duplicate repr"); 418 rcu_assign_pointer(reprs->reprs[alink->id], netdev); 419 420 nfp_info(app->cpp, "%s Port %d Representor(%s) created\n", 421 ptype == NFP_PORT_PF_PORT ? "PCIe" : "Phys", 422 alink->id, netdev->name); 423 424 return 0; 425 426 err_free_port: 427 nfp_port_free(port); 428 err_free_repr: 429 nfp_repr_free(netdev); 430 return err; 431 } 432 433 static void 434 nfp_abm_kill_repr(struct nfp_app *app, struct nfp_abm_link *alink, 435 enum nfp_repr_type rtype) 436 { 437 struct net_device *netdev; 438 struct nfp_reprs *reprs; 439 440 reprs = nfp_reprs_get_locked(app, rtype); 441 netdev = nfp_repr_get_locked(app, reprs, alink->id); 442 if (!netdev) 443 return; 444 rcu_assign_pointer(reprs->reprs[alink->id], NULL); 445 synchronize_rcu(); 446 /* Cast to make sure nfp_repr_clean_and_free() takes a nfp_repr */ 447 nfp_repr_clean_and_free((struct nfp_repr *)netdev_priv(netdev)); 448 } 449 450 static void 451 nfp_abm_kill_reprs(struct nfp_abm *abm, struct nfp_abm_link *alink) 452 { 453 nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PF); 454 nfp_abm_kill_repr(abm->app, alink, NFP_REPR_TYPE_PHYS_PORT); 455 } 456 457 static void nfp_abm_kill_reprs_all(struct nfp_abm *abm) 458 { 459 struct nfp_pf *pf = abm->app->pf; 460 struct nfp_net *nn; 461 462 list_for_each_entry(nn, &pf->vnics, vnic_list) 463 nfp_abm_kill_reprs(abm, (struct nfp_abm_link *)nn->app_priv); 464 } 465 466 static enum devlink_eswitch_mode nfp_abm_eswitch_mode_get(struct nfp_app *app) 467 { 468 struct nfp_abm *abm = app->priv; 469 470 return abm->eswitch_mode; 471 } 472 473 static int nfp_abm_eswitch_set_legacy(struct nfp_abm *abm) 474 { 475 nfp_abm_kill_reprs_all(abm); 476 nfp_abm_ctrl_qm_disable(abm); 477 478 abm->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 479 return 0; 480 } 481 482 static void nfp_abm_eswitch_clean_up(struct nfp_abm *abm) 483 { 484 if (abm->eswitch_mode != DEVLINK_ESWITCH_MODE_LEGACY) 485 WARN_ON(nfp_abm_eswitch_set_legacy(abm)); 486 } 487 488 static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm) 489 { 490 struct nfp_app *app = abm->app; 491 struct nfp_pf *pf = app->pf; 492 struct nfp_net *nn; 493 int err; 494 495 err = nfp_abm_ctrl_qm_enable(abm); 496 if (err) 497 return err; 498 499 list_for_each_entry(nn, &pf->vnics, vnic_list) { 500 struct nfp_abm_link *alink = nn->app_priv; 501 502 err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PHYS_PORT); 503 if (err) 504 goto err_kill_all_reprs; 505 506 err = nfp_abm_spawn_repr(app, alink, NFP_PORT_PF_PORT); 507 if (err) 508 goto err_kill_all_reprs; 509 } 510 511 abm->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV; 512 return 0; 513 514 err_kill_all_reprs: 515 nfp_abm_kill_reprs_all(abm); 516 nfp_abm_ctrl_qm_disable(abm); 517 return err; 518 } 519 520 static int nfp_abm_eswitch_mode_set(struct nfp_app *app, u16 mode) 521 { 522 struct nfp_abm *abm = app->priv; 523 524 if (abm->eswitch_mode == mode) 525 return 0; 526 527 switch (mode) { 528 case DEVLINK_ESWITCH_MODE_LEGACY: 529 return nfp_abm_eswitch_set_legacy(abm); 530 case DEVLINK_ESWITCH_MODE_SWITCHDEV: 531 return nfp_abm_eswitch_set_switchdev(abm); 532 default: 533 return -EINVAL; 534 } 535 } 536 537 static void 538 nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn, 539 unsigned int id) 540 { 541 struct nfp_eth_table_port *eth_port = &pf->eth_tbl->ports[id]; 542 u8 mac_addr[ETH_ALEN]; 543 struct nfp_nsp *nsp; 544 char hwinfo[32]; 545 int err; 546 547 if (id > pf->eth_tbl->count) { 548 nfp_warn(pf->cpp, "No entry for persistent MAC address\n"); 549 eth_hw_addr_random(nn->dp.netdev); 550 return; 551 } 552 553 snprintf(hwinfo, sizeof(hwinfo), "eth%u.mac.pf%u", 554 eth_port->eth_index, abm->pf_id); 555 556 nsp = nfp_nsp_open(pf->cpp); 557 if (IS_ERR(nsp)) { 558 nfp_warn(pf->cpp, "Failed to access the NSP for persistent MAC address: %ld\n", 559 PTR_ERR(nsp)); 560 eth_hw_addr_random(nn->dp.netdev); 561 return; 562 } 563 564 if (!nfp_nsp_has_hwinfo_lookup(nsp)) { 565 nfp_warn(pf->cpp, "NSP doesn't support PF MAC generation\n"); 566 eth_hw_addr_random(nn->dp.netdev); 567 return; 568 } 569 570 err = nfp_nsp_hwinfo_lookup(nsp, hwinfo, sizeof(hwinfo)); 571 nfp_nsp_close(nsp); 572 if (err) { 573 nfp_warn(pf->cpp, "Reading persistent MAC address failed: %d\n", 574 err); 575 eth_hw_addr_random(nn->dp.netdev); 576 return; 577 } 578 579 if (sscanf(hwinfo, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx", 580 &mac_addr[0], &mac_addr[1], &mac_addr[2], 581 &mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) { 582 nfp_warn(pf->cpp, "Can't parse persistent MAC address (%s)\n", 583 hwinfo); 584 eth_hw_addr_random(nn->dp.netdev); 585 return; 586 } 587 588 ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr); 589 ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr); 590 } 591 592 static int 593 nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) 594 { 595 struct nfp_eth_table_port *eth_port = &app->pf->eth_tbl->ports[id]; 596 struct nfp_abm *abm = app->priv; 597 struct nfp_abm_link *alink; 598 int err; 599 600 alink = kzalloc(sizeof(*alink), GFP_KERNEL); 601 if (!alink) 602 return -ENOMEM; 603 nn->app_priv = alink; 604 alink->abm = abm; 605 alink->vnic = nn; 606 alink->id = id; 607 alink->parent = TC_H_ROOT; 608 alink->total_queues = alink->vnic->max_rx_rings; 609 alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs), 610 GFP_KERNEL); 611 if (!alink->qdiscs) { 612 err = -ENOMEM; 613 goto err_free_alink; 614 } 615 616 /* This is a multi-host app, make sure MAC/PHY is up, but don't 617 * make the MAC/PHY state follow the state of any of the ports. 618 */ 619 err = nfp_eth_set_configured(app->cpp, eth_port->index, true); 620 if (err < 0) 621 goto err_free_qdiscs; 622 623 netif_keep_dst(nn->dp.netdev); 624 625 nfp_abm_vnic_set_mac(app->pf, abm, nn, id); 626 nfp_abm_ctrl_read_params(alink); 627 628 return 0; 629 630 err_free_qdiscs: 631 kvfree(alink->qdiscs); 632 err_free_alink: 633 kfree(alink); 634 return err; 635 } 636 637 static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) 638 { 639 struct nfp_abm_link *alink = nn->app_priv; 640 641 nfp_abm_kill_reprs(alink->abm, alink); 642 kvfree(alink->qdiscs); 643 kfree(alink); 644 } 645 646 static u64 * 647 nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data) 648 { 649 struct nfp_repr *repr = netdev_priv(port->netdev); 650 struct nfp_abm_link *alink; 651 unsigned int i; 652 653 if (port->type != NFP_PORT_PF_PORT) 654 return data; 655 alink = repr->app_priv; 656 for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { 657 *data++ = nfp_abm_ctrl_stat_non_sto(alink, i); 658 *data++ = nfp_abm_ctrl_stat_sto(alink, i); 659 } 660 return data; 661 } 662 663 static int 664 nfp_abm_port_get_stats_count(struct nfp_app *app, struct nfp_port *port) 665 { 666 struct nfp_repr *repr = netdev_priv(port->netdev); 667 struct nfp_abm_link *alink; 668 669 if (port->type != NFP_PORT_PF_PORT) 670 return 0; 671 alink = repr->app_priv; 672 return alink->vnic->dp.num_r_vecs * 2; 673 } 674 675 static u8 * 676 nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port, 677 u8 *data) 678 { 679 struct nfp_repr *repr = netdev_priv(port->netdev); 680 struct nfp_abm_link *alink; 681 unsigned int i; 682 683 if (port->type != NFP_PORT_PF_PORT) 684 return data; 685 alink = repr->app_priv; 686 for (i = 0; i < alink->vnic->dp.num_r_vecs; i++) { 687 data = nfp_pr_et(data, "q%u_no_wait", i); 688 data = nfp_pr_et(data, "q%u_delayed", i); 689 } 690 return data; 691 } 692 693 static int nfp_abm_init(struct nfp_app *app) 694 { 695 struct nfp_pf *pf = app->pf; 696 struct nfp_reprs *reprs; 697 struct nfp_abm *abm; 698 int err; 699 700 if (!pf->eth_tbl) { 701 nfp_err(pf->cpp, "ABM NIC requires ETH table\n"); 702 return -EINVAL; 703 } 704 if (pf->max_data_vnics != pf->eth_tbl->count) { 705 nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", 706 pf->max_data_vnics, pf->eth_tbl->count); 707 return -EINVAL; 708 } 709 if (!pf->mac_stats_bar) { 710 nfp_warn(app->cpp, "ABM NIC requires mac_stats symbol\n"); 711 return -EINVAL; 712 } 713 714 abm = kzalloc(sizeof(*abm), GFP_KERNEL); 715 if (!abm) 716 return -ENOMEM; 717 app->priv = abm; 718 abm->app = app; 719 720 err = nfp_abm_ctrl_find_addrs(abm); 721 if (err) 722 goto err_free_abm; 723 724 /* We start in legacy mode, make sure advanced queuing is disabled */ 725 err = nfp_abm_ctrl_qm_disable(abm); 726 if (err) 727 goto err_free_abm; 728 729 err = -ENOMEM; 730 reprs = nfp_reprs_alloc(pf->max_data_vnics); 731 if (!reprs) 732 goto err_free_abm; 733 RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs); 734 735 reprs = nfp_reprs_alloc(pf->max_data_vnics); 736 if (!reprs) 737 goto err_free_phys; 738 RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PF], reprs); 739 740 return 0; 741 742 err_free_phys: 743 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 744 err_free_abm: 745 kfree(abm); 746 app->priv = NULL; 747 return err; 748 } 749 750 static void nfp_abm_clean(struct nfp_app *app) 751 { 752 struct nfp_abm *abm = app->priv; 753 754 nfp_abm_eswitch_clean_up(abm); 755 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); 756 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); 757 kfree(abm); 758 app->priv = NULL; 759 } 760 761 const struct nfp_app_type app_abm = { 762 .id = NFP_APP_ACTIVE_BUFFER_MGMT_NIC, 763 .name = "abm", 764 765 .init = nfp_abm_init, 766 .clean = nfp_abm_clean, 767 768 .vnic_alloc = nfp_abm_vnic_alloc, 769 .vnic_free = nfp_abm_vnic_free, 770 771 .port_get_stats = nfp_abm_port_get_stats, 772 .port_get_stats_count = nfp_abm_port_get_stats_count, 773 .port_get_stats_strings = nfp_abm_port_get_stats_strings, 774 775 .setup_tc = nfp_abm_setup_tc, 776 777 .eswitch_mode_get = nfp_abm_eswitch_mode_get, 778 .eswitch_mode_set = nfp_abm_eswitch_mode_set, 779 780 .repr_get = nfp_abm_repr_get, 781 }; 782