1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2018 Netronome Systems, Inc. */ 3 4 #include <linux/rtnetlink.h> 5 #include <net/pkt_cls.h> 6 #include <net/pkt_sched.h> 7 #include <net/red.h> 8 9 #include "../nfpcore/nfp_cpp.h" 10 #include "../nfp_app.h" 11 #include "../nfp_main.h" 12 #include "../nfp_net.h" 13 #include "../nfp_port.h" 14 #include "main.h" 15 16 static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc) 17 { 18 return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED; 19 } 20 21 static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id) 22 { 23 return qdisc->children[id] && 24 qdisc->children[id] != NFP_QDISC_UNTRACKED; 25 } 26 27 static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot) 28 { 29 return rtnl_dereference(*slot); 30 } 31 32 static void 33 nfp_abm_stats_propagate(struct nfp_alink_stats *parent, 34 struct nfp_alink_stats *child) 35 { 36 parent->tx_pkts += child->tx_pkts; 37 parent->tx_bytes += child->tx_bytes; 38 parent->backlog_pkts += child->backlog_pkts; 39 parent->backlog_bytes += child->backlog_bytes; 40 parent->overlimits += child->overlimits; 41 parent->drops += child->drops; 42 } 43 44 static void 45 nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc, 46 unsigned int queue) 47 { 48 struct nfp_cpp *cpp = alink->abm->app->cpp; 49 unsigned int i; 50 int err; 51 52 if (!qdisc->offloaded) 53 return; 54 55 for (i = 0; i < qdisc->red.num_bands; i++) { 56 err = nfp_abm_ctrl_read_q_stats(alink, i, queue, 57 &qdisc->red.band[i].stats); 58 if (err) 59 nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n", 60 i, queue, err); 61 62 err = nfp_abm_ctrl_read_q_xstats(alink, i, queue, 63 &qdisc->red.band[i].xstats); 64 if (err) 65 nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n", 66 i, queue, err); 67 } 68 } 69 70 static void 71 nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc) 72 { 73 unsigned int i; 74 75 if (qdisc->type != NFP_QDISC_MQ) 76 return; 77 78 for (i = 0; i < alink->total_queues; i++) 79 if (nfp_abm_qdisc_child_valid(qdisc, i)) 80 nfp_abm_stats_update_red(alink, qdisc->children[i], i); 81 } 82 83 static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now) 84 { 85 alink->last_stats_update = time_now; 86 if (alink->root_qdisc) 87 nfp_abm_stats_update_mq(alink, alink->root_qdisc); 88 } 89 90 static void nfp_abm_stats_update(struct nfp_abm_link *alink) 91 { 92 u64 now; 93 94 /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum 95 * of all their leafs, so we would read the same stat multiple times 96 * for every dump. 97 */ 98 now = ktime_get(); 99 if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL) 100 return; 101 102 __nfp_abm_stats_update(alink, now); 103 } 104 105 static void 106 nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc, 107 unsigned int start, unsigned int end) 108 { 109 unsigned int i; 110 111 for (i = start; i < end; i++) 112 if (nfp_abm_qdisc_child_valid(qdisc, i)) { 113 qdisc->children[i]->use_cnt--; 114 qdisc->children[i] = NULL; 115 } 116 } 117 118 static void 119 nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc) 120 { 121 unsigned int i; 122 123 /* Don't complain when qdisc is getting unlinked */ 124 if (qdisc->use_cnt) 125 nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n", 126 qdisc->handle); 127 128 if (!nfp_abm_qdisc_is_red(qdisc)) 129 return; 130 131 for (i = 0; i < qdisc->red.num_bands; i++) { 132 qdisc->red.band[i].stats.backlog_pkts = 0; 133 qdisc->red.band[i].stats.backlog_bytes = 0; 134 } 135 } 136 137 static int 138 __nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band, 139 unsigned int queue, struct nfp_alink_stats *prev_stats, 140 struct nfp_alink_xstats *prev_xstats) 141 { 142 u64 backlog_pkts, backlog_bytes; 143 int err; 144 145 /* Don't touch the backlog, backlog can only be reset after it has 146 * been reported back to the tc qdisc stats. 147 */ 148 backlog_pkts = prev_stats->backlog_pkts; 149 backlog_bytes = prev_stats->backlog_bytes; 150 151 err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats); 152 if (err) { 153 nfp_err(alink->abm->app->cpp, 154 "RED stats init (%d, %d) failed with error %d\n", 155 band, queue, err); 156 return err; 157 } 158 159 err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats); 160 if (err) { 161 nfp_err(alink->abm->app->cpp, 162 "RED xstats init (%d, %d) failed with error %d\n", 163 band, queue, err); 164 return err; 165 } 166 167 prev_stats->backlog_pkts = backlog_pkts; 168 prev_stats->backlog_bytes = backlog_bytes; 169 return 0; 170 } 171 172 static int 173 nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc, 174 unsigned int queue) 175 { 176 unsigned int i; 177 int err; 178 179 for (i = 0; i < qdisc->red.num_bands; i++) { 180 err = __nfp_abm_stats_init(alink, i, queue, 181 &qdisc->red.band[i].prev_stats, 182 &qdisc->red.band[i].prev_xstats); 183 if (err) 184 return err; 185 } 186 187 return 0; 188 } 189 190 static void 191 nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc, 192 unsigned int queue) 193 { 194 bool good_red, good_gred; 195 unsigned int i; 196 197 good_red = qdisc->type == NFP_QDISC_RED && 198 qdisc->params_ok && 199 qdisc->use_cnt == 1 && 200 !alink->has_prio && 201 !qdisc->children[0]; 202 good_gred = qdisc->type == NFP_QDISC_GRED && 203 qdisc->params_ok && 204 qdisc->use_cnt == 1; 205 qdisc->offload_mark = good_red || good_gred; 206 207 /* If we are starting offload init prev_stats */ 208 if (qdisc->offload_mark && !qdisc->offloaded) 209 if (nfp_abm_stats_init(alink, qdisc, queue)) 210 qdisc->offload_mark = false; 211 212 if (!qdisc->offload_mark) 213 return; 214 215 for (i = 0; i < alink->abm->num_bands; i++) 216 nfp_abm_ctrl_set_q_lvl(alink, i, queue, 217 qdisc->red.band[i].threshold); 218 } 219 220 static void 221 nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc) 222 { 223 unsigned int i; 224 225 qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ; 226 if (!qdisc->offload_mark) 227 return; 228 229 for (i = 0; i < alink->total_queues; i++) { 230 struct nfp_qdisc *child = qdisc->children[i]; 231 232 if (!nfp_abm_qdisc_child_valid(qdisc, i)) 233 continue; 234 235 nfp_abm_offload_compile_red(alink, child, i); 236 } 237 } 238 239 void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink) 240 { 241 struct nfp_abm *abm = alink->abm; 242 struct radix_tree_iter iter; 243 struct nfp_qdisc *qdisc; 244 void __rcu **slot; 245 size_t i; 246 247 /* Mark all thresholds as unconfigured */ 248 for (i = 0; i < abm->num_bands; i++) 249 __bitmap_set(abm->threshold_undef, 250 i * NFP_NET_MAX_RX_RINGS + alink->queue_base, 251 alink->total_queues); 252 253 /* Clear offload marks */ 254 radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) { 255 qdisc = nfp_abm_qdisc_tree_deref_slot(slot); 256 qdisc->offload_mark = false; 257 } 258 259 if (alink->root_qdisc) 260 nfp_abm_offload_compile_mq(alink, alink->root_qdisc); 261 262 /* Refresh offload status */ 263 radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) { 264 qdisc = nfp_abm_qdisc_tree_deref_slot(slot); 265 if (!qdisc->offload_mark && qdisc->offloaded) 266 nfp_abm_qdisc_offload_stop(alink, qdisc); 267 qdisc->offloaded = qdisc->offload_mark; 268 } 269 270 /* Reset the unconfigured thresholds */ 271 for (i = 0; i < abm->num_thresholds; i++) 272 if (test_bit(i, abm->threshold_undef)) 273 __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY); 274 275 __nfp_abm_stats_update(alink, ktime_get()); 276 } 277 278 static void 279 nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink, 280 struct nfp_qdisc *qdisc) 281 { 282 struct radix_tree_iter iter; 283 unsigned int mq_refs = 0; 284 void __rcu **slot; 285 286 if (!qdisc->use_cnt) 287 return; 288 /* MQ doesn't notify well on destruction, we need special handling of 289 * MQ's children. 290 */ 291 if (qdisc->type == NFP_QDISC_MQ && 292 qdisc == alink->root_qdisc && 293 netdev->reg_state == NETREG_UNREGISTERING) 294 return; 295 296 /* Count refs held by MQ instances and clear pointers */ 297 radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) { 298 struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot); 299 unsigned int i; 300 301 if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev) 302 continue; 303 for (i = 0; i < mq->num_children; i++) 304 if (mq->children[i] == qdisc) { 305 mq->children[i] = NULL; 306 mq_refs++; 307 } 308 } 309 310 WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n", 311 qdisc->use_cnt, mq_refs); 312 } 313 314 static void 315 nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink, 316 struct nfp_qdisc *qdisc) 317 { 318 struct nfp_port *port = nfp_port_from_netdev(netdev); 319 320 if (!qdisc) 321 return; 322 nfp_abm_qdisc_clear_mq(netdev, alink, qdisc); 323 WARN_ON(radix_tree_delete(&alink->qdiscs, 324 TC_H_MAJ(qdisc->handle)) != qdisc); 325 326 kfree(qdisc->children); 327 kfree(qdisc); 328 329 port->tc_offload_cnt--; 330 } 331 332 static struct nfp_qdisc * 333 nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink, 334 enum nfp_qdisc_type type, u32 parent_handle, u32 handle, 335 unsigned int children) 336 { 337 struct nfp_port *port = nfp_port_from_netdev(netdev); 338 struct nfp_qdisc *qdisc; 339 int err; 340 341 qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL); 342 if (!qdisc) 343 return NULL; 344 345 if (children) { 346 qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL); 347 if (!qdisc->children) 348 goto err_free_qdisc; 349 } 350 351 qdisc->netdev = netdev; 352 qdisc->type = type; 353 qdisc->parent_handle = parent_handle; 354 qdisc->handle = handle; 355 qdisc->num_children = children; 356 357 err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc); 358 if (err) { 359 nfp_err(alink->abm->app->cpp, 360 "Qdisc insertion into radix tree failed: %d\n", err); 361 goto err_free_child_tbl; 362 } 363 364 port->tc_offload_cnt++; 365 return qdisc; 366 367 err_free_child_tbl: 368 kfree(qdisc->children); 369 err_free_qdisc: 370 kfree(qdisc); 371 return NULL; 372 } 373 374 static struct nfp_qdisc * 375 nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle) 376 { 377 return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle)); 378 } 379 380 static int 381 nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink, 382 enum nfp_qdisc_type type, u32 parent_handle, u32 handle, 383 unsigned int children, struct nfp_qdisc **qdisc) 384 { 385 *qdisc = nfp_abm_qdisc_find(alink, handle); 386 if (*qdisc) { 387 if (WARN_ON((*qdisc)->type != type)) 388 return -EINVAL; 389 return 1; 390 } 391 392 *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle, 393 children); 394 return *qdisc ? 0 : -ENOMEM; 395 } 396 397 static void 398 nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink, 399 u32 handle) 400 { 401 struct nfp_qdisc *qdisc; 402 403 qdisc = nfp_abm_qdisc_find(alink, handle); 404 if (!qdisc) 405 return; 406 407 /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */ 408 if (alink->root_qdisc == qdisc) 409 qdisc->use_cnt--; 410 411 nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children); 412 nfp_abm_qdisc_free(netdev, alink, qdisc); 413 414 if (alink->root_qdisc == qdisc) { 415 alink->root_qdisc = NULL; 416 /* Only root change matters, other changes are acted upon on 417 * the graft notification. 418 */ 419 nfp_abm_qdisc_offload_update(alink); 420 } 421 } 422 423 static int 424 nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle, 425 unsigned int id) 426 { 427 struct nfp_qdisc *parent, *child; 428 429 parent = nfp_abm_qdisc_find(alink, handle); 430 if (!parent) 431 return 0; 432 433 if (WARN(id >= parent->num_children, 434 "graft child out of bound %d >= %d\n", 435 id, parent->num_children)) 436 return -EINVAL; 437 438 nfp_abm_qdisc_unlink_children(parent, id, id + 1); 439 440 child = nfp_abm_qdisc_find(alink, child_handle); 441 if (child) 442 child->use_cnt++; 443 else 444 child = NFP_QDISC_UNTRACKED; 445 parent->children[id] = child; 446 447 nfp_abm_qdisc_offload_update(alink); 448 449 return 0; 450 } 451 452 static void 453 nfp_abm_stats_calculate(struct nfp_alink_stats *new, 454 struct nfp_alink_stats *old, 455 struct gnet_stats_basic_packed *bstats, 456 struct gnet_stats_queue *qstats) 457 { 458 _bstats_update(bstats, new->tx_bytes - old->tx_bytes, 459 new->tx_pkts - old->tx_pkts); 460 qstats->qlen += new->backlog_pkts - old->backlog_pkts; 461 qstats->backlog += new->backlog_bytes - old->backlog_bytes; 462 qstats->overlimits += new->overlimits - old->overlimits; 463 qstats->drops += new->drops - old->drops; 464 } 465 466 static void 467 nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new, 468 struct nfp_alink_xstats *old, 469 struct red_stats *stats) 470 { 471 stats->forced_mark += new->ecn_marked - old->ecn_marked; 472 stats->pdrop += new->pdrop - old->pdrop; 473 } 474 475 static int 476 nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle, 477 struct tc_gred_qopt_offload_stats *stats) 478 { 479 struct nfp_qdisc *qdisc; 480 unsigned int i; 481 482 nfp_abm_stats_update(alink); 483 484 qdisc = nfp_abm_qdisc_find(alink, handle); 485 if (!qdisc) 486 return -EOPNOTSUPP; 487 /* If the qdisc offload has stopped we may need to adjust the backlog 488 * counters back so carry on even if qdisc is not currently offloaded. 489 */ 490 491 for (i = 0; i < qdisc->red.num_bands; i++) { 492 if (!stats->xstats[i]) 493 continue; 494 495 nfp_abm_stats_calculate(&qdisc->red.band[i].stats, 496 &qdisc->red.band[i].prev_stats, 497 &stats->bstats[i], &stats->qstats[i]); 498 qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats; 499 500 nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats, 501 &qdisc->red.band[i].prev_xstats, 502 stats->xstats[i]); 503 qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats; 504 } 505 506 return qdisc->offloaded ? 0 : -EOPNOTSUPP; 507 } 508 509 static bool 510 nfp_abm_gred_check_params(struct nfp_abm_link *alink, 511 struct tc_gred_qopt_offload *opt) 512 { 513 struct nfp_cpp *cpp = alink->abm->app->cpp; 514 struct nfp_abm *abm = alink->abm; 515 unsigned int i; 516 517 if (opt->set.grio_on || opt->set.wred_on) { 518 nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n", 519 opt->parent, opt->handle); 520 return false; 521 } 522 if (opt->set.dp_def != alink->def_band) { 523 nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n", 524 alink->def_band, opt->parent, opt->handle); 525 return false; 526 } 527 if (opt->set.dp_cnt != abm->num_bands) { 528 nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n", 529 abm->num_bands, opt->parent, opt->handle); 530 return false; 531 } 532 533 for (i = 0; i < abm->num_bands; i++) { 534 struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i]; 535 536 if (!band->present) 537 return false; 538 if (!band->is_ecn) { 539 nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n", 540 opt->parent, opt->handle, i); 541 return false; 542 } 543 if (band->is_harddrop) { 544 nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n", 545 opt->parent, opt->handle, i); 546 return false; 547 } 548 if (band->min != band->max) { 549 nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n", 550 opt->parent, opt->handle, i); 551 return false; 552 } 553 if (band->min > S32_MAX) { 554 nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n", 555 band->min, S32_MAX, opt->parent, opt->handle, 556 i); 557 return false; 558 } 559 } 560 561 return true; 562 } 563 564 static int 565 nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink, 566 struct tc_gred_qopt_offload *opt) 567 { 568 struct nfp_qdisc *qdisc; 569 unsigned int i; 570 int ret; 571 572 ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent, 573 opt->handle, 0, &qdisc); 574 if (ret < 0) 575 return ret; 576 577 qdisc->params_ok = nfp_abm_gred_check_params(alink, opt); 578 if (qdisc->params_ok) { 579 qdisc->red.num_bands = opt->set.dp_cnt; 580 for (i = 0; i < qdisc->red.num_bands; i++) 581 qdisc->red.band[i].threshold = opt->set.tab[i].min; 582 } 583 584 if (qdisc->use_cnt) 585 nfp_abm_qdisc_offload_update(alink); 586 587 return 0; 588 } 589 590 int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink, 591 struct tc_gred_qopt_offload *opt) 592 { 593 switch (opt->command) { 594 case TC_GRED_REPLACE: 595 return nfp_abm_gred_replace(netdev, alink, opt); 596 case TC_GRED_DESTROY: 597 nfp_abm_qdisc_destroy(netdev, alink, opt->handle); 598 return 0; 599 case TC_GRED_STATS: 600 return nfp_abm_gred_stats(alink, opt->handle, &opt->stats); 601 default: 602 return -EOPNOTSUPP; 603 } 604 } 605 606 static int 607 nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) 608 { 609 struct nfp_qdisc *qdisc; 610 611 nfp_abm_stats_update(alink); 612 613 qdisc = nfp_abm_qdisc_find(alink, opt->handle); 614 if (!qdisc || !qdisc->offloaded) 615 return -EOPNOTSUPP; 616 617 nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats, 618 &qdisc->red.band[0].prev_xstats, 619 opt->xstats); 620 qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats; 621 return 0; 622 } 623 624 static int 625 nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle, 626 struct tc_qopt_offload_stats *stats) 627 { 628 struct nfp_qdisc *qdisc; 629 630 nfp_abm_stats_update(alink); 631 632 qdisc = nfp_abm_qdisc_find(alink, handle); 633 if (!qdisc) 634 return -EOPNOTSUPP; 635 /* If the qdisc offload has stopped we may need to adjust the backlog 636 * counters back so carry on even if qdisc is not currently offloaded. 637 */ 638 639 nfp_abm_stats_calculate(&qdisc->red.band[0].stats, 640 &qdisc->red.band[0].prev_stats, 641 stats->bstats, stats->qstats); 642 qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats; 643 644 return qdisc->offloaded ? 0 : -EOPNOTSUPP; 645 } 646 647 static bool 648 nfp_abm_red_check_params(struct nfp_abm_link *alink, 649 struct tc_red_qopt_offload *opt) 650 { 651 struct nfp_cpp *cpp = alink->abm->app->cpp; 652 653 if (!opt->set.is_ecn) { 654 nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n", 655 opt->parent, opt->handle); 656 return false; 657 } 658 if (opt->set.is_harddrop) { 659 nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n", 660 opt->parent, opt->handle); 661 return false; 662 } 663 if (opt->set.min != opt->set.max) { 664 nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n", 665 opt->parent, opt->handle); 666 return false; 667 } 668 if (opt->set.min > NFP_ABM_LVL_INFINITY) { 669 nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n", 670 opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent, 671 opt->handle); 672 return false; 673 } 674 675 return true; 676 } 677 678 static int 679 nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, 680 struct tc_red_qopt_offload *opt) 681 { 682 struct nfp_qdisc *qdisc; 683 int ret; 684 685 ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent, 686 opt->handle, 1, &qdisc); 687 if (ret < 0) 688 return ret; 689 690 /* If limit != 0 child gets reset */ 691 if (opt->set.limit) { 692 if (nfp_abm_qdisc_child_valid(qdisc, 0)) 693 qdisc->children[0]->use_cnt--; 694 qdisc->children[0] = NULL; 695 } else { 696 /* Qdisc was just allocated without a limit will use noop_qdisc, 697 * i.e. a block hole. 698 */ 699 if (!ret) 700 qdisc->children[0] = NFP_QDISC_UNTRACKED; 701 } 702 703 qdisc->params_ok = nfp_abm_red_check_params(alink, opt); 704 if (qdisc->params_ok) { 705 qdisc->red.num_bands = 1; 706 qdisc->red.band[0].threshold = opt->set.min; 707 } 708 709 if (qdisc->use_cnt == 1) 710 nfp_abm_qdisc_offload_update(alink); 711 712 return 0; 713 } 714 715 int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, 716 struct tc_red_qopt_offload *opt) 717 { 718 switch (opt->command) { 719 case TC_RED_REPLACE: 720 return nfp_abm_red_replace(netdev, alink, opt); 721 case TC_RED_DESTROY: 722 nfp_abm_qdisc_destroy(netdev, alink, opt->handle); 723 return 0; 724 case TC_RED_STATS: 725 return nfp_abm_red_stats(alink, opt->handle, &opt->stats); 726 case TC_RED_XSTATS: 727 return nfp_abm_red_xstats(alink, opt); 728 case TC_RED_GRAFT: 729 return nfp_abm_qdisc_graft(alink, opt->handle, 730 opt->child_handle, 0); 731 default: 732 return -EOPNOTSUPP; 733 } 734 } 735 736 static int 737 nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink, 738 struct tc_mq_qopt_offload *opt) 739 { 740 struct nfp_qdisc *qdisc; 741 int ret; 742 743 ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ, 744 TC_H_ROOT, opt->handle, alink->total_queues, 745 &qdisc); 746 if (ret < 0) 747 return ret; 748 749 qdisc->params_ok = true; 750 qdisc->offloaded = true; 751 nfp_abm_qdisc_offload_update(alink); 752 return 0; 753 } 754 755 static int 756 nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle, 757 struct tc_qopt_offload_stats *stats) 758 { 759 struct nfp_qdisc *qdisc, *red; 760 unsigned int i, j; 761 762 qdisc = nfp_abm_qdisc_find(alink, handle); 763 if (!qdisc) 764 return -EOPNOTSUPP; 765 766 nfp_abm_stats_update(alink); 767 768 /* MQ stats are summed over the children in the core, so we need 769 * to add up the unreported child values. 770 */ 771 memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats)); 772 memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats)); 773 774 for (i = 0; i < qdisc->num_children; i++) { 775 if (!nfp_abm_qdisc_child_valid(qdisc, i)) 776 continue; 777 778 if (!nfp_abm_qdisc_is_red(qdisc->children[i])) 779 continue; 780 red = qdisc->children[i]; 781 782 for (j = 0; j < red->red.num_bands; j++) { 783 nfp_abm_stats_propagate(&qdisc->mq.stats, 784 &red->red.band[j].stats); 785 nfp_abm_stats_propagate(&qdisc->mq.prev_stats, 786 &red->red.band[j].prev_stats); 787 } 788 } 789 790 nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats, 791 stats->bstats, stats->qstats); 792 793 return qdisc->offloaded ? 0 : -EOPNOTSUPP; 794 } 795 796 int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, 797 struct tc_mq_qopt_offload *opt) 798 { 799 switch (opt->command) { 800 case TC_MQ_CREATE: 801 return nfp_abm_mq_create(netdev, alink, opt); 802 case TC_MQ_DESTROY: 803 nfp_abm_qdisc_destroy(netdev, alink, opt->handle); 804 return 0; 805 case TC_MQ_STATS: 806 return nfp_abm_mq_stats(alink, opt->handle, &opt->stats); 807 case TC_MQ_GRAFT: 808 return nfp_abm_qdisc_graft(alink, opt->handle, 809 opt->graft_params.child_handle, 810 opt->graft_params.queue); 811 default: 812 return -EOPNOTSUPP; 813 } 814 } 815 816 int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink, 817 struct tc_root_qopt_offload *opt) 818 { 819 if (opt->ingress) 820 return -EOPNOTSUPP; 821 if (alink->root_qdisc) 822 alink->root_qdisc->use_cnt--; 823 alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle); 824 if (alink->root_qdisc) 825 alink->root_qdisc->use_cnt++; 826 827 nfp_abm_qdisc_offload_update(alink); 828 829 return 0; 830 } 831