1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 /* Copyright (C) 2018 Netronome Systems, Inc. */ 3 4 #include <linux/rtnetlink.h> 5 #include <net/pkt_cls.h> 6 #include <net/pkt_sched.h> 7 #include <net/red.h> 8 9 #include "../nfpcore/nfp_cpp.h" 10 #include "../nfp_app.h" 11 #include "../nfp_main.h" 12 #include "../nfp_net.h" 13 #include "../nfp_port.h" 14 #include "main.h" 15 16 static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc) 17 { 18 return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED; 19 } 20 21 static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id) 22 { 23 return qdisc->children[id] && 24 qdisc->children[id] != NFP_QDISC_UNTRACKED; 25 } 26 27 static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot) 28 { 29 return rtnl_dereference(*slot); 30 } 31 32 static void 33 nfp_abm_stats_propagate(struct nfp_alink_stats *parent, 34 struct nfp_alink_stats *child) 35 { 36 parent->tx_pkts += child->tx_pkts; 37 parent->tx_bytes += child->tx_bytes; 38 parent->backlog_pkts += child->backlog_pkts; 39 parent->backlog_bytes += child->backlog_bytes; 40 parent->overlimits += child->overlimits; 41 parent->drops += child->drops; 42 } 43 44 static void 45 nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc, 46 unsigned int queue) 47 { 48 struct nfp_cpp *cpp = alink->abm->app->cpp; 49 unsigned int i; 50 int err; 51 52 if (!qdisc->offloaded) 53 return; 54 55 for (i = 0; i < qdisc->red.num_bands; i++) { 56 err = nfp_abm_ctrl_read_q_stats(alink, i, queue, 57 &qdisc->red.band[i].stats); 58 if (err) 59 nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n", 60 i, queue, err); 61 62 err = nfp_abm_ctrl_read_q_xstats(alink, i, queue, 63 &qdisc->red.band[i].xstats); 64 if (err) 65 nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n", 66 i, queue, err); 67 } 68 } 69 70 static void 71 nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc) 72 { 73 unsigned int i; 74 75 if (qdisc->type != NFP_QDISC_MQ) 76 return; 77 78 for (i = 0; i < alink->total_queues; i++) 79 if (nfp_abm_qdisc_child_valid(qdisc, i)) 80 nfp_abm_stats_update_red(alink, qdisc->children[i], i); 81 } 82 83 static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now) 84 { 85 alink->last_stats_update = time_now; 86 if (alink->root_qdisc) 87 nfp_abm_stats_update_mq(alink, alink->root_qdisc); 88 } 89 90 static void nfp_abm_stats_update(struct nfp_abm_link *alink) 91 { 92 u64 now; 93 94 /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum 95 * of all their leafs, so we would read the same stat multiple times 96 * for every dump. 97 */ 98 now = ktime_get(); 99 if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL) 100 return; 101 102 __nfp_abm_stats_update(alink, now); 103 } 104 105 static void 106 nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc, 107 unsigned int start, unsigned int end) 108 { 109 unsigned int i; 110 111 for (i = start; i < end; i++) 112 if (nfp_abm_qdisc_child_valid(qdisc, i)) { 113 qdisc->children[i]->use_cnt--; 114 qdisc->children[i] = NULL; 115 } 116 } 117 118 static void 119 nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc) 120 { 121 unsigned int i; 122 123 /* Don't complain when qdisc is getting unlinked */ 124 if (qdisc->use_cnt) 125 nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n", 126 qdisc->handle); 127 128 if (!nfp_abm_qdisc_is_red(qdisc)) 129 return; 130 131 for (i = 0; i < qdisc->red.num_bands; i++) { 132 qdisc->red.band[i].stats.backlog_pkts = 0; 133 qdisc->red.band[i].stats.backlog_bytes = 0; 134 } 135 } 136 137 static int 138 __nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band, 139 unsigned int queue, struct nfp_alink_stats *prev_stats, 140 struct nfp_alink_xstats *prev_xstats) 141 { 142 u64 backlog_pkts, backlog_bytes; 143 int err; 144 145 /* Don't touch the backlog, backlog can only be reset after it has 146 * been reported back to the tc qdisc stats. 147 */ 148 backlog_pkts = prev_stats->backlog_pkts; 149 backlog_bytes = prev_stats->backlog_bytes; 150 151 err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats); 152 if (err) { 153 nfp_err(alink->abm->app->cpp, 154 "RED stats init (%d, %d) failed with error %d\n", 155 band, queue, err); 156 return err; 157 } 158 159 err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats); 160 if (err) { 161 nfp_err(alink->abm->app->cpp, 162 "RED xstats init (%d, %d) failed with error %d\n", 163 band, queue, err); 164 return err; 165 } 166 167 prev_stats->backlog_pkts = backlog_pkts; 168 prev_stats->backlog_bytes = backlog_bytes; 169 return 0; 170 } 171 172 static int 173 nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc, 174 unsigned int queue) 175 { 176 unsigned int i; 177 int err; 178 179 for (i = 0; i < qdisc->red.num_bands; i++) { 180 err = __nfp_abm_stats_init(alink, i, queue, 181 &qdisc->red.band[i].prev_stats, 182 &qdisc->red.band[i].prev_xstats); 183 if (err) 184 return err; 185 } 186 187 return 0; 188 } 189 190 static void 191 nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc, 192 unsigned int queue) 193 { 194 bool good_red, good_gred; 195 unsigned int i; 196 197 good_red = qdisc->type == NFP_QDISC_RED && 198 qdisc->params_ok && 199 qdisc->use_cnt == 1 && 200 !qdisc->children[0]; 201 good_gred = qdisc->type == NFP_QDISC_GRED && 202 qdisc->params_ok && 203 qdisc->use_cnt == 1; 204 qdisc->offload_mark = good_red || good_gred; 205 206 /* If we are starting offload init prev_stats */ 207 if (qdisc->offload_mark && !qdisc->offloaded) 208 if (nfp_abm_stats_init(alink, qdisc, queue)) 209 qdisc->offload_mark = false; 210 211 if (!qdisc->offload_mark) 212 return; 213 214 for (i = 0; i < alink->abm->num_bands; i++) 215 nfp_abm_ctrl_set_q_lvl(alink, i, queue, 216 qdisc->red.band[i].threshold); 217 } 218 219 static void 220 nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc) 221 { 222 unsigned int i; 223 224 qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ; 225 if (!qdisc->offload_mark) 226 return; 227 228 for (i = 0; i < alink->total_queues; i++) { 229 struct nfp_qdisc *child = qdisc->children[i]; 230 231 if (!nfp_abm_qdisc_child_valid(qdisc, i)) 232 continue; 233 234 nfp_abm_offload_compile_red(alink, child, i); 235 } 236 } 237 238 void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink) 239 { 240 struct nfp_abm *abm = alink->abm; 241 struct radix_tree_iter iter; 242 struct nfp_qdisc *qdisc; 243 void __rcu **slot; 244 size_t i; 245 246 /* Mark all thresholds as unconfigured */ 247 for (i = 0; i < abm->num_bands; i++) 248 __bitmap_set(abm->threshold_undef, 249 i * NFP_NET_MAX_RX_RINGS + alink->queue_base, 250 alink->total_queues); 251 252 /* Clear offload marks */ 253 radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) { 254 qdisc = nfp_abm_qdisc_tree_deref_slot(slot); 255 qdisc->offload_mark = false; 256 } 257 258 if (alink->root_qdisc) 259 nfp_abm_offload_compile_mq(alink, alink->root_qdisc); 260 261 /* Refresh offload status */ 262 radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) { 263 qdisc = nfp_abm_qdisc_tree_deref_slot(slot); 264 if (!qdisc->offload_mark && qdisc->offloaded) 265 nfp_abm_qdisc_offload_stop(alink, qdisc); 266 qdisc->offloaded = qdisc->offload_mark; 267 } 268 269 /* Reset the unconfigured thresholds */ 270 for (i = 0; i < abm->num_thresholds; i++) 271 if (test_bit(i, abm->threshold_undef)) 272 __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY); 273 274 __nfp_abm_stats_update(alink, ktime_get()); 275 } 276 277 static void 278 nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink, 279 struct nfp_qdisc *qdisc) 280 { 281 struct radix_tree_iter iter; 282 unsigned int mq_refs = 0; 283 void __rcu **slot; 284 285 if (!qdisc->use_cnt) 286 return; 287 /* MQ doesn't notify well on destruction, we need special handling of 288 * MQ's children. 289 */ 290 if (qdisc->type == NFP_QDISC_MQ && 291 qdisc == alink->root_qdisc && 292 netdev->reg_state == NETREG_UNREGISTERING) 293 return; 294 295 /* Count refs held by MQ instances and clear pointers */ 296 radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) { 297 struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot); 298 unsigned int i; 299 300 if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev) 301 continue; 302 for (i = 0; i < mq->num_children; i++) 303 if (mq->children[i] == qdisc) { 304 mq->children[i] = NULL; 305 mq_refs++; 306 } 307 } 308 309 WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n", 310 qdisc->use_cnt, mq_refs); 311 } 312 313 static void 314 nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink, 315 struct nfp_qdisc *qdisc) 316 { 317 struct nfp_port *port = nfp_port_from_netdev(netdev); 318 319 if (!qdisc) 320 return; 321 nfp_abm_qdisc_clear_mq(netdev, alink, qdisc); 322 WARN_ON(radix_tree_delete(&alink->qdiscs, 323 TC_H_MAJ(qdisc->handle)) != qdisc); 324 325 kfree(qdisc->children); 326 kfree(qdisc); 327 328 port->tc_offload_cnt--; 329 } 330 331 static struct nfp_qdisc * 332 nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink, 333 enum nfp_qdisc_type type, u32 parent_handle, u32 handle, 334 unsigned int children) 335 { 336 struct nfp_port *port = nfp_port_from_netdev(netdev); 337 struct nfp_qdisc *qdisc; 338 int err; 339 340 qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL); 341 if (!qdisc) 342 return NULL; 343 344 if (children) { 345 qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL); 346 if (!qdisc->children) 347 goto err_free_qdisc; 348 } 349 350 qdisc->netdev = netdev; 351 qdisc->type = type; 352 qdisc->parent_handle = parent_handle; 353 qdisc->handle = handle; 354 qdisc->num_children = children; 355 356 err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc); 357 if (err) { 358 nfp_err(alink->abm->app->cpp, 359 "Qdisc insertion into radix tree failed: %d\n", err); 360 goto err_free_child_tbl; 361 } 362 363 port->tc_offload_cnt++; 364 return qdisc; 365 366 err_free_child_tbl: 367 kfree(qdisc->children); 368 err_free_qdisc: 369 kfree(qdisc); 370 return NULL; 371 } 372 373 static struct nfp_qdisc * 374 nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle) 375 { 376 return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle)); 377 } 378 379 static int 380 nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink, 381 enum nfp_qdisc_type type, u32 parent_handle, u32 handle, 382 unsigned int children, struct nfp_qdisc **qdisc) 383 { 384 *qdisc = nfp_abm_qdisc_find(alink, handle); 385 if (*qdisc) { 386 if (WARN_ON((*qdisc)->type != type)) 387 return -EINVAL; 388 return 1; 389 } 390 391 *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle, 392 children); 393 return *qdisc ? 0 : -ENOMEM; 394 } 395 396 static void 397 nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink, 398 u32 handle) 399 { 400 struct nfp_qdisc *qdisc; 401 402 qdisc = nfp_abm_qdisc_find(alink, handle); 403 if (!qdisc) 404 return; 405 406 /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */ 407 if (alink->root_qdisc == qdisc) 408 qdisc->use_cnt--; 409 410 nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children); 411 nfp_abm_qdisc_free(netdev, alink, qdisc); 412 413 if (alink->root_qdisc == qdisc) { 414 alink->root_qdisc = NULL; 415 /* Only root change matters, other changes are acted upon on 416 * the graft notification. 417 */ 418 nfp_abm_qdisc_offload_update(alink); 419 } 420 } 421 422 static int 423 nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle, 424 unsigned int id) 425 { 426 struct nfp_qdisc *parent, *child; 427 428 parent = nfp_abm_qdisc_find(alink, handle); 429 if (!parent) 430 return 0; 431 432 if (WARN(id >= parent->num_children, 433 "graft child out of bound %d >= %d\n", 434 id, parent->num_children)) 435 return -EINVAL; 436 437 nfp_abm_qdisc_unlink_children(parent, id, id + 1); 438 439 child = nfp_abm_qdisc_find(alink, child_handle); 440 if (child) 441 child->use_cnt++; 442 else 443 child = NFP_QDISC_UNTRACKED; 444 parent->children[id] = child; 445 446 nfp_abm_qdisc_offload_update(alink); 447 448 return 0; 449 } 450 451 static void 452 nfp_abm_stats_calculate(struct nfp_alink_stats *new, 453 struct nfp_alink_stats *old, 454 struct gnet_stats_basic_packed *bstats, 455 struct gnet_stats_queue *qstats) 456 { 457 _bstats_update(bstats, new->tx_bytes - old->tx_bytes, 458 new->tx_pkts - old->tx_pkts); 459 qstats->qlen += new->backlog_pkts - old->backlog_pkts; 460 qstats->backlog += new->backlog_bytes - old->backlog_bytes; 461 qstats->overlimits += new->overlimits - old->overlimits; 462 qstats->drops += new->drops - old->drops; 463 } 464 465 static void 466 nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new, 467 struct nfp_alink_xstats *old, 468 struct red_stats *stats) 469 { 470 stats->forced_mark += new->ecn_marked - old->ecn_marked; 471 stats->pdrop += new->pdrop - old->pdrop; 472 } 473 474 static int 475 nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle, 476 struct tc_gred_qopt_offload_stats *stats) 477 { 478 struct nfp_qdisc *qdisc; 479 unsigned int i; 480 481 nfp_abm_stats_update(alink); 482 483 qdisc = nfp_abm_qdisc_find(alink, handle); 484 if (!qdisc) 485 return -EOPNOTSUPP; 486 /* If the qdisc offload has stopped we may need to adjust the backlog 487 * counters back so carry on even if qdisc is not currently offloaded. 488 */ 489 490 for (i = 0; i < qdisc->red.num_bands; i++) { 491 if (!stats->xstats[i]) 492 continue; 493 494 nfp_abm_stats_calculate(&qdisc->red.band[i].stats, 495 &qdisc->red.band[i].prev_stats, 496 &stats->bstats[i], &stats->qstats[i]); 497 qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats; 498 499 nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats, 500 &qdisc->red.band[i].prev_xstats, 501 stats->xstats[i]); 502 qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats; 503 } 504 505 return qdisc->offloaded ? 0 : -EOPNOTSUPP; 506 } 507 508 static bool 509 nfp_abm_gred_check_params(struct nfp_abm_link *alink, 510 struct tc_gred_qopt_offload *opt) 511 { 512 struct nfp_cpp *cpp = alink->abm->app->cpp; 513 struct nfp_abm *abm = alink->abm; 514 unsigned int i; 515 516 if (opt->set.grio_on || opt->set.wred_on) { 517 nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n", 518 opt->parent, opt->handle); 519 return false; 520 } 521 if (opt->set.dp_def != alink->def_band) { 522 nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n", 523 alink->def_band, opt->parent, opt->handle); 524 return false; 525 } 526 if (opt->set.dp_cnt != abm->num_bands) { 527 nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n", 528 abm->num_bands, opt->parent, opt->handle); 529 return false; 530 } 531 532 for (i = 0; i < abm->num_bands; i++) { 533 struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i]; 534 535 if (!band->present) 536 return false; 537 if (!band->is_ecn) { 538 nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n", 539 opt->parent, opt->handle, i); 540 return false; 541 } 542 if (band->is_harddrop) { 543 nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n", 544 opt->parent, opt->handle, i); 545 return false; 546 } 547 if (band->min != band->max) { 548 nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n", 549 opt->parent, opt->handle, i); 550 return false; 551 } 552 if (band->min > S32_MAX) { 553 nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n", 554 band->min, S32_MAX, opt->parent, opt->handle, 555 i); 556 return false; 557 } 558 } 559 560 return true; 561 } 562 563 static int 564 nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink, 565 struct tc_gred_qopt_offload *opt) 566 { 567 struct nfp_qdisc *qdisc; 568 unsigned int i; 569 int ret; 570 571 ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent, 572 opt->handle, 0, &qdisc); 573 if (ret < 0) 574 return ret; 575 576 qdisc->params_ok = nfp_abm_gred_check_params(alink, opt); 577 if (qdisc->params_ok) { 578 qdisc->red.num_bands = opt->set.dp_cnt; 579 for (i = 0; i < qdisc->red.num_bands; i++) 580 qdisc->red.band[i].threshold = opt->set.tab[i].min; 581 } 582 583 if (qdisc->use_cnt) 584 nfp_abm_qdisc_offload_update(alink); 585 586 return 0; 587 } 588 589 int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink, 590 struct tc_gred_qopt_offload *opt) 591 { 592 switch (opt->command) { 593 case TC_GRED_REPLACE: 594 return nfp_abm_gred_replace(netdev, alink, opt); 595 case TC_GRED_DESTROY: 596 nfp_abm_qdisc_destroy(netdev, alink, opt->handle); 597 return 0; 598 case TC_GRED_STATS: 599 return nfp_abm_gred_stats(alink, opt->handle, &opt->stats); 600 default: 601 return -EOPNOTSUPP; 602 } 603 } 604 605 static int 606 nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) 607 { 608 struct nfp_qdisc *qdisc; 609 610 nfp_abm_stats_update(alink); 611 612 qdisc = nfp_abm_qdisc_find(alink, opt->handle); 613 if (!qdisc || !qdisc->offloaded) 614 return -EOPNOTSUPP; 615 616 nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats, 617 &qdisc->red.band[0].prev_xstats, 618 opt->xstats); 619 qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats; 620 return 0; 621 } 622 623 static int 624 nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle, 625 struct tc_qopt_offload_stats *stats) 626 { 627 struct nfp_qdisc *qdisc; 628 629 nfp_abm_stats_update(alink); 630 631 qdisc = nfp_abm_qdisc_find(alink, handle); 632 if (!qdisc) 633 return -EOPNOTSUPP; 634 /* If the qdisc offload has stopped we may need to adjust the backlog 635 * counters back so carry on even if qdisc is not currently offloaded. 636 */ 637 638 nfp_abm_stats_calculate(&qdisc->red.band[0].stats, 639 &qdisc->red.band[0].prev_stats, 640 stats->bstats, stats->qstats); 641 qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats; 642 643 return qdisc->offloaded ? 0 : -EOPNOTSUPP; 644 } 645 646 static bool 647 nfp_abm_red_check_params(struct nfp_abm_link *alink, 648 struct tc_red_qopt_offload *opt) 649 { 650 struct nfp_cpp *cpp = alink->abm->app->cpp; 651 652 if (!opt->set.is_ecn) { 653 nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n", 654 opt->parent, opt->handle); 655 return false; 656 } 657 if (opt->set.is_harddrop) { 658 nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n", 659 opt->parent, opt->handle); 660 return false; 661 } 662 if (opt->set.min != opt->set.max) { 663 nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n", 664 opt->parent, opt->handle); 665 return false; 666 } 667 if (opt->set.min > NFP_ABM_LVL_INFINITY) { 668 nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n", 669 opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent, 670 opt->handle); 671 return false; 672 } 673 674 return true; 675 } 676 677 static int 678 nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, 679 struct tc_red_qopt_offload *opt) 680 { 681 struct nfp_qdisc *qdisc; 682 int ret; 683 684 ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent, 685 opt->handle, 1, &qdisc); 686 if (ret < 0) 687 return ret; 688 689 /* If limit != 0 child gets reset */ 690 if (opt->set.limit) { 691 if (nfp_abm_qdisc_child_valid(qdisc, 0)) 692 qdisc->children[0]->use_cnt--; 693 qdisc->children[0] = NULL; 694 } else { 695 /* Qdisc was just allocated without a limit will use noop_qdisc, 696 * i.e. a block hole. 697 */ 698 if (!ret) 699 qdisc->children[0] = NFP_QDISC_UNTRACKED; 700 } 701 702 qdisc->params_ok = nfp_abm_red_check_params(alink, opt); 703 if (qdisc->params_ok) { 704 qdisc->red.num_bands = 1; 705 qdisc->red.band[0].threshold = opt->set.min; 706 } 707 708 if (qdisc->use_cnt == 1) 709 nfp_abm_qdisc_offload_update(alink); 710 711 return 0; 712 } 713 714 int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, 715 struct tc_red_qopt_offload *opt) 716 { 717 switch (opt->command) { 718 case TC_RED_REPLACE: 719 return nfp_abm_red_replace(netdev, alink, opt); 720 case TC_RED_DESTROY: 721 nfp_abm_qdisc_destroy(netdev, alink, opt->handle); 722 return 0; 723 case TC_RED_STATS: 724 return nfp_abm_red_stats(alink, opt->handle, &opt->stats); 725 case TC_RED_XSTATS: 726 return nfp_abm_red_xstats(alink, opt); 727 case TC_RED_GRAFT: 728 return nfp_abm_qdisc_graft(alink, opt->handle, 729 opt->child_handle, 0); 730 default: 731 return -EOPNOTSUPP; 732 } 733 } 734 735 static int 736 nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink, 737 struct tc_mq_qopt_offload *opt) 738 { 739 struct nfp_qdisc *qdisc; 740 int ret; 741 742 ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ, 743 TC_H_ROOT, opt->handle, alink->total_queues, 744 &qdisc); 745 if (ret < 0) 746 return ret; 747 748 qdisc->params_ok = true; 749 qdisc->offloaded = true; 750 nfp_abm_qdisc_offload_update(alink); 751 return 0; 752 } 753 754 static int 755 nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle, 756 struct tc_qopt_offload_stats *stats) 757 { 758 struct nfp_qdisc *qdisc, *red; 759 unsigned int i, j; 760 761 qdisc = nfp_abm_qdisc_find(alink, handle); 762 if (!qdisc) 763 return -EOPNOTSUPP; 764 765 nfp_abm_stats_update(alink); 766 767 /* MQ stats are summed over the children in the core, so we need 768 * to add up the unreported child values. 769 */ 770 memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats)); 771 memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats)); 772 773 for (i = 0; i < qdisc->num_children; i++) { 774 if (!nfp_abm_qdisc_child_valid(qdisc, i)) 775 continue; 776 777 if (!nfp_abm_qdisc_is_red(qdisc->children[i])) 778 continue; 779 red = qdisc->children[i]; 780 781 for (j = 0; j < red->red.num_bands; j++) { 782 nfp_abm_stats_propagate(&qdisc->mq.stats, 783 &red->red.band[j].stats); 784 nfp_abm_stats_propagate(&qdisc->mq.prev_stats, 785 &red->red.band[j].prev_stats); 786 } 787 } 788 789 nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats, 790 stats->bstats, stats->qstats); 791 792 return qdisc->offloaded ? 0 : -EOPNOTSUPP; 793 } 794 795 int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, 796 struct tc_mq_qopt_offload *opt) 797 { 798 switch (opt->command) { 799 case TC_MQ_CREATE: 800 return nfp_abm_mq_create(netdev, alink, opt); 801 case TC_MQ_DESTROY: 802 nfp_abm_qdisc_destroy(netdev, alink, opt->handle); 803 return 0; 804 case TC_MQ_STATS: 805 return nfp_abm_mq_stats(alink, opt->handle, &opt->stats); 806 case TC_MQ_GRAFT: 807 return nfp_abm_qdisc_graft(alink, opt->handle, 808 opt->graft_params.child_handle, 809 opt->graft_params.queue); 810 default: 811 return -EOPNOTSUPP; 812 } 813 } 814 815 int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink, 816 struct tc_root_qopt_offload *opt) 817 { 818 if (opt->ingress) 819 return -EOPNOTSUPP; 820 if (alink->root_qdisc) 821 alink->root_qdisc->use_cnt--; 822 alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle); 823 if (alink->root_qdisc) 824 alink->root_qdisc->use_cnt++; 825 826 nfp_abm_qdisc_offload_update(alink); 827 828 return 0; 829 } 830