1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2018 Netronome Systems, Inc. */
3 
4 #include <linux/rtnetlink.h>
5 #include <net/pkt_cls.h>
6 #include <net/pkt_sched.h>
7 #include <net/red.h>
8 
9 #include "../nfpcore/nfp_cpp.h"
10 #include "../nfp_app.h"
11 #include "../nfp_main.h"
12 #include "../nfp_net.h"
13 #include "../nfp_port.h"
14 #include "main.h"
15 
16 static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc)
17 {
18 	return qdisc->type == NFP_QDISC_RED;
19 }
20 
21 static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id)
22 {
23 	return qdisc->children[id] &&
24 	       qdisc->children[id] != NFP_QDISC_UNTRACKED;
25 }
26 
27 static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot)
28 {
29 	return rtnl_dereference(*slot);
30 }
31 
32 static void
33 nfp_abm_stats_propagate(struct nfp_alink_stats *parent,
34 			struct nfp_alink_stats *child)
35 {
36 	parent->tx_pkts		+= child->tx_pkts;
37 	parent->tx_bytes	+= child->tx_bytes;
38 	parent->backlog_pkts	+= child->backlog_pkts;
39 	parent->backlog_bytes	+= child->backlog_bytes;
40 	parent->overlimits	+= child->overlimits;
41 	parent->drops		+= child->drops;
42 }
43 
44 static void
45 nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
46 			 unsigned int queue)
47 {
48 	struct nfp_cpp *cpp = alink->abm->app->cpp;
49 	int err;
50 
51 	if (!qdisc->offloaded)
52 		return;
53 
54 	err = nfp_abm_ctrl_read_q_stats(alink, 0, queue, &qdisc->red.stats);
55 	if (err)
56 		nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n",
57 			0, queue, err);
58 
59 	err = nfp_abm_ctrl_read_q_xstats(alink, 0, queue, &qdisc->red.xstats);
60 	if (err)
61 		nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n",
62 			0, queue, err);
63 }
64 
65 static void
66 nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
67 {
68 	unsigned int i;
69 
70 	if (qdisc->type != NFP_QDISC_MQ)
71 		return;
72 
73 	for (i = 0; i < alink->total_queues; i++)
74 		if (nfp_abm_qdisc_child_valid(qdisc, i))
75 			nfp_abm_stats_update_red(alink, qdisc->children[i], i);
76 }
77 
78 static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now)
79 {
80 	alink->last_stats_update = time_now;
81 	if (alink->root_qdisc)
82 		nfp_abm_stats_update_mq(alink, alink->root_qdisc);
83 }
84 
85 static void nfp_abm_stats_update(struct nfp_abm_link *alink)
86 {
87 	u64 now;
88 
89 	/* Limit the frequency of updates - stats of non-leaf qdiscs are a sum
90 	 * of all their leafs, so we would read the same stat multiple times
91 	 * for every dump.
92 	 */
93 	now = ktime_get();
94 	if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL)
95 		return;
96 
97 	__nfp_abm_stats_update(alink, now);
98 }
99 
100 static void
101 nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc,
102 			      unsigned int start, unsigned int end)
103 {
104 	unsigned int i;
105 
106 	for (i = start; i < end; i++)
107 		if (nfp_abm_qdisc_child_valid(qdisc, i)) {
108 			qdisc->children[i]->use_cnt--;
109 			qdisc->children[i] = NULL;
110 		}
111 }
112 
113 static void
114 nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
115 {
116 	/* Don't complain when qdisc is getting unlinked */
117 	if (qdisc->use_cnt)
118 		nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n",
119 			 qdisc->handle);
120 
121 	if (!nfp_abm_qdisc_is_red(qdisc))
122 		return;
123 
124 	qdisc->red.stats.backlog_pkts = 0;
125 	qdisc->red.stats.backlog_bytes = 0;
126 }
127 
128 static int
129 __nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band,
130 		     unsigned int queue, struct nfp_alink_stats *prev_stats,
131 		     struct nfp_alink_xstats *prev_xstats)
132 {
133 	u64 backlog_pkts, backlog_bytes;
134 	int err;
135 
136 	/* Don't touch the backlog, backlog can only be reset after it has
137 	 * been reported back to the tc qdisc stats.
138 	 */
139 	backlog_pkts = prev_stats->backlog_pkts;
140 	backlog_bytes = prev_stats->backlog_bytes;
141 
142 	err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats);
143 	if (err) {
144 		nfp_err(alink->abm->app->cpp,
145 			"RED stats init (%d, %d) failed with error %d\n",
146 			band, queue, err);
147 		return err;
148 	}
149 
150 	err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats);
151 	if (err) {
152 		nfp_err(alink->abm->app->cpp,
153 			"RED xstats init (%d, %d) failed with error %d\n",
154 			band, queue, err);
155 		return err;
156 	}
157 
158 	prev_stats->backlog_pkts = backlog_pkts;
159 	prev_stats->backlog_bytes = backlog_bytes;
160 	return 0;
161 }
162 
163 static int
164 nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
165 		   unsigned int queue)
166 {
167 	return __nfp_abm_stats_init(alink, 0, queue,
168 				    &qdisc->red.prev_stats,
169 				    &qdisc->red.prev_xstats);
170 }
171 
172 static void
173 nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc,
174 			    unsigned int queue)
175 {
176 	qdisc->offload_mark = qdisc->type == NFP_QDISC_RED &&
177 			      qdisc->params_ok &&
178 			      qdisc->use_cnt == 1 &&
179 			      !qdisc->children[0];
180 
181 	/* If we are starting offload init prev_stats */
182 	if (qdisc->offload_mark && !qdisc->offloaded)
183 		if (nfp_abm_stats_init(alink, qdisc, queue))
184 			qdisc->offload_mark = false;
185 
186 	if (!qdisc->offload_mark)
187 		return;
188 
189 	nfp_abm_ctrl_set_q_lvl(alink, 0, queue, qdisc->red.threshold);
190 }
191 
192 static void
193 nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc)
194 {
195 	unsigned int i;
196 
197 	qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ;
198 	if (!qdisc->offload_mark)
199 		return;
200 
201 	for (i = 0; i < alink->total_queues; i++) {
202 		struct nfp_qdisc *child = qdisc->children[i];
203 
204 		if (!nfp_abm_qdisc_child_valid(qdisc, i))
205 			continue;
206 
207 		nfp_abm_offload_compile_red(alink, child, i);
208 	}
209 }
210 
211 void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink)
212 {
213 	struct nfp_abm *abm = alink->abm;
214 	struct radix_tree_iter iter;
215 	struct nfp_qdisc *qdisc;
216 	void __rcu **slot;
217 	size_t i;
218 
219 	/* Mark all thresholds as unconfigured */
220 	__bitmap_set(abm->threshold_undef,
221 		     alink->queue_base, alink->total_queues);
222 
223 	/* Clear offload marks */
224 	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
225 		qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
226 		qdisc->offload_mark = false;
227 	}
228 
229 	if (alink->root_qdisc)
230 		nfp_abm_offload_compile_mq(alink, alink->root_qdisc);
231 
232 	/* Refresh offload status */
233 	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
234 		qdisc = nfp_abm_qdisc_tree_deref_slot(slot);
235 		if (!qdisc->offload_mark && qdisc->offloaded)
236 			nfp_abm_qdisc_offload_stop(alink, qdisc);
237 		qdisc->offloaded = qdisc->offload_mark;
238 	}
239 
240 	/* Reset the unconfigured thresholds */
241 	for (i = 0; i < abm->num_thresholds; i++)
242 		if (test_bit(i, abm->threshold_undef))
243 			__nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY);
244 
245 	__nfp_abm_stats_update(alink, ktime_get());
246 }
247 
248 static void
249 nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink,
250 		       struct nfp_qdisc *qdisc)
251 {
252 	struct radix_tree_iter iter;
253 	unsigned int mq_refs = 0;
254 	void __rcu **slot;
255 
256 	if (!qdisc->use_cnt)
257 		return;
258 	/* MQ doesn't notify well on destruction, we need special handling of
259 	 * MQ's children.
260 	 */
261 	if (qdisc->type == NFP_QDISC_MQ &&
262 	    qdisc == alink->root_qdisc &&
263 	    netdev->reg_state == NETREG_UNREGISTERING)
264 		return;
265 
266 	/* Count refs held by MQ instances and clear pointers */
267 	radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) {
268 		struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot);
269 		unsigned int i;
270 
271 		if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev)
272 			continue;
273 		for (i = 0; i < mq->num_children; i++)
274 			if (mq->children[i] == qdisc) {
275 				mq->children[i] = NULL;
276 				mq_refs++;
277 			}
278 	}
279 
280 	WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n",
281 	     qdisc->use_cnt, mq_refs);
282 }
283 
284 static void
285 nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink,
286 		   struct nfp_qdisc *qdisc)
287 {
288 	struct nfp_port *port = nfp_port_from_netdev(netdev);
289 
290 	if (!qdisc)
291 		return;
292 	nfp_abm_qdisc_clear_mq(netdev, alink, qdisc);
293 	WARN_ON(radix_tree_delete(&alink->qdiscs,
294 				  TC_H_MAJ(qdisc->handle)) != qdisc);
295 
296 	kfree(qdisc->children);
297 	kfree(qdisc);
298 
299 	port->tc_offload_cnt--;
300 }
301 
302 static struct nfp_qdisc *
303 nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink,
304 		    enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
305 		    unsigned int children)
306 {
307 	struct nfp_port *port = nfp_port_from_netdev(netdev);
308 	struct nfp_qdisc *qdisc;
309 	int err;
310 
311 	qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL);
312 	if (!qdisc)
313 		return NULL;
314 
315 	qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL);
316 	if (!qdisc->children)
317 		goto err_free_qdisc;
318 
319 	qdisc->netdev = netdev;
320 	qdisc->type = type;
321 	qdisc->parent_handle = parent_handle;
322 	qdisc->handle = handle;
323 	qdisc->num_children = children;
324 
325 	err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc);
326 	if (err) {
327 		nfp_err(alink->abm->app->cpp,
328 			"Qdisc insertion into radix tree failed: %d\n", err);
329 		goto err_free_child_tbl;
330 	}
331 
332 	port->tc_offload_cnt++;
333 	return qdisc;
334 
335 err_free_child_tbl:
336 	kfree(qdisc->children);
337 err_free_qdisc:
338 	kfree(qdisc);
339 	return NULL;
340 }
341 
342 static struct nfp_qdisc *
343 nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle)
344 {
345 	return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle));
346 }
347 
348 static int
349 nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink,
350 		      enum nfp_qdisc_type type, u32 parent_handle, u32 handle,
351 		      unsigned int children, struct nfp_qdisc **qdisc)
352 {
353 	*qdisc = nfp_abm_qdisc_find(alink, handle);
354 	if (*qdisc) {
355 		if (WARN_ON((*qdisc)->type != type))
356 			return -EINVAL;
357 		return 1;
358 	}
359 
360 	*qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle,
361 				     children);
362 	return *qdisc ? 0 : -ENOMEM;
363 }
364 
365 static void
366 nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink,
367 		      u32 handle)
368 {
369 	struct nfp_qdisc *qdisc;
370 
371 	qdisc = nfp_abm_qdisc_find(alink, handle);
372 	if (!qdisc)
373 		return;
374 
375 	/* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */
376 	if (alink->root_qdisc == qdisc)
377 		qdisc->use_cnt--;
378 
379 	nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children);
380 	nfp_abm_qdisc_free(netdev, alink, qdisc);
381 
382 	if (alink->root_qdisc == qdisc) {
383 		alink->root_qdisc = NULL;
384 		/* Only root change matters, other changes are acted upon on
385 		 * the graft notification.
386 		 */
387 		nfp_abm_qdisc_offload_update(alink);
388 	}
389 }
390 
391 static int
392 nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
393 		    unsigned int id)
394 {
395 	struct nfp_qdisc *parent, *child;
396 
397 	parent = nfp_abm_qdisc_find(alink, handle);
398 	if (!parent)
399 		return 0;
400 
401 	if (WARN(id >= parent->num_children,
402 		 "graft child out of bound %d >= %d\n",
403 		 id, parent->num_children))
404 		return -EINVAL;
405 
406 	nfp_abm_qdisc_unlink_children(parent, id, id + 1);
407 
408 	child = nfp_abm_qdisc_find(alink, child_handle);
409 	if (child)
410 		child->use_cnt++;
411 	else
412 		child = NFP_QDISC_UNTRACKED;
413 	parent->children[id] = child;
414 
415 	nfp_abm_qdisc_offload_update(alink);
416 
417 	return 0;
418 }
419 
420 static void
421 nfp_abm_stats_calculate(struct nfp_alink_stats *new,
422 			struct nfp_alink_stats *old,
423 			struct gnet_stats_basic_packed *bstats,
424 			struct gnet_stats_queue *qstats)
425 {
426 	_bstats_update(bstats, new->tx_bytes - old->tx_bytes,
427 		       new->tx_pkts - old->tx_pkts);
428 	qstats->qlen += new->backlog_pkts - old->backlog_pkts;
429 	qstats->backlog += new->backlog_bytes - old->backlog_bytes;
430 	qstats->overlimits += new->overlimits - old->overlimits;
431 	qstats->drops += new->drops - old->drops;
432 }
433 
434 static void
435 nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new,
436 			    struct nfp_alink_xstats *old,
437 			    struct red_stats *stats)
438 {
439 	stats->forced_mark += new->ecn_marked - old->ecn_marked;
440 	stats->pdrop += new->pdrop - old->pdrop;
441 }
442 
443 static int
444 nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt)
445 {
446 	struct nfp_qdisc *qdisc;
447 
448 	nfp_abm_stats_update(alink);
449 
450 	qdisc = nfp_abm_qdisc_find(alink, opt->handle);
451 	if (!qdisc || !qdisc->offloaded)
452 		return -EOPNOTSUPP;
453 
454 	nfp_abm_stats_red_calculate(&qdisc->red.xstats,
455 				    &qdisc->red.prev_xstats,
456 				    opt->xstats);
457 	qdisc->red.prev_xstats = qdisc->red.xstats;
458 	return 0;
459 }
460 
461 static int
462 nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle,
463 		  struct tc_qopt_offload_stats *stats)
464 {
465 	struct nfp_qdisc *qdisc;
466 
467 	nfp_abm_stats_update(alink);
468 
469 	qdisc = nfp_abm_qdisc_find(alink, handle);
470 	if (!qdisc)
471 		return -EOPNOTSUPP;
472 	/* If the qdisc offload has stopped we may need to adjust the backlog
473 	 * counters back so carry on even if qdisc is not currently offloaded.
474 	 */
475 
476 	nfp_abm_stats_calculate(&qdisc->red.stats,
477 				&qdisc->red.prev_stats,
478 				stats->bstats, stats->qstats);
479 	qdisc->red.prev_stats = qdisc->red.stats;
480 
481 	return qdisc->offloaded ? 0 : -EOPNOTSUPP;
482 }
483 
484 static bool
485 nfp_abm_red_check_params(struct nfp_abm_link *alink,
486 			 struct tc_red_qopt_offload *opt)
487 {
488 	struct nfp_cpp *cpp = alink->abm->app->cpp;
489 
490 	if (!opt->set.is_ecn) {
491 		nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n",
492 			 opt->parent, opt->handle);
493 		return false;
494 	}
495 	if (opt->set.is_harddrop) {
496 		nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n",
497 			 opt->parent, opt->handle);
498 		return false;
499 	}
500 	if (opt->set.min != opt->set.max) {
501 		nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n",
502 			 opt->parent, opt->handle);
503 		return false;
504 	}
505 	if (opt->set.min > NFP_ABM_LVL_INFINITY) {
506 		nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n",
507 			 opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent,
508 			 opt->handle);
509 		return false;
510 	}
511 
512 	return true;
513 }
514 
515 static int
516 nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink,
517 		    struct tc_red_qopt_offload *opt)
518 {
519 	struct nfp_qdisc *qdisc;
520 	int ret;
521 
522 	ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent,
523 				    opt->handle, 1, &qdisc);
524 	if (ret < 0)
525 		return ret;
526 
527 	/* If limit != 0 child gets reset */
528 	if (opt->set.limit) {
529 		if (nfp_abm_qdisc_child_valid(qdisc, 0))
530 			qdisc->children[0]->use_cnt--;
531 		qdisc->children[0] = NULL;
532 	} else {
533 		/* Qdisc was just allocated without a limit will use noop_qdisc,
534 		 * i.e. a block hole.
535 		 */
536 		if (!ret)
537 			qdisc->children[0] = NFP_QDISC_UNTRACKED;
538 	}
539 
540 	qdisc->params_ok = nfp_abm_red_check_params(alink, opt);
541 	if (qdisc->params_ok)
542 		qdisc->red.threshold = opt->set.min;
543 
544 	if (qdisc->use_cnt == 1)
545 		nfp_abm_qdisc_offload_update(alink);
546 
547 	return 0;
548 }
549 
550 int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink,
551 			 struct tc_red_qopt_offload *opt)
552 {
553 	switch (opt->command) {
554 	case TC_RED_REPLACE:
555 		return nfp_abm_red_replace(netdev, alink, opt);
556 	case TC_RED_DESTROY:
557 		nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
558 		return 0;
559 	case TC_RED_STATS:
560 		return nfp_abm_red_stats(alink, opt->handle, &opt->stats);
561 	case TC_RED_XSTATS:
562 		return nfp_abm_red_xstats(alink, opt);
563 	case TC_RED_GRAFT:
564 		return nfp_abm_qdisc_graft(alink, opt->handle,
565 					   opt->child_handle, 0);
566 	default:
567 		return -EOPNOTSUPP;
568 	}
569 }
570 
571 static int
572 nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink,
573 		  struct tc_mq_qopt_offload *opt)
574 {
575 	struct nfp_qdisc *qdisc;
576 	int ret;
577 
578 	ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ,
579 				    TC_H_ROOT, opt->handle, alink->total_queues,
580 				    &qdisc);
581 	if (ret < 0)
582 		return ret;
583 
584 	qdisc->params_ok = true;
585 	qdisc->offloaded = true;
586 	nfp_abm_qdisc_offload_update(alink);
587 	return 0;
588 }
589 
590 static int
591 nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle,
592 		 struct tc_qopt_offload_stats *stats)
593 {
594 	struct nfp_qdisc *qdisc, *red;
595 	unsigned int i;
596 
597 	qdisc = nfp_abm_qdisc_find(alink, handle);
598 	if (!qdisc)
599 		return -EOPNOTSUPP;
600 
601 	nfp_abm_stats_update(alink);
602 
603 	/* MQ stats are summed over the children in the core, so we need
604 	 * to add up the unreported child values.
605 	 */
606 	memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats));
607 	memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats));
608 
609 	for (i = 0; i < qdisc->num_children; i++) {
610 		if (!nfp_abm_qdisc_child_valid(qdisc, i))
611 			continue;
612 
613 		if (!nfp_abm_qdisc_is_red(qdisc->children[i]))
614 			continue;
615 		red = qdisc->children[i];
616 
617 		nfp_abm_stats_propagate(&qdisc->mq.stats,
618 					&red->red.stats);
619 		nfp_abm_stats_propagate(&qdisc->mq.prev_stats,
620 					&red->red.prev_stats);
621 	}
622 
623 	nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats,
624 				stats->bstats, stats->qstats);
625 
626 	return qdisc->offloaded ? 0 : -EOPNOTSUPP;
627 }
628 
629 int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink,
630 			struct tc_mq_qopt_offload *opt)
631 {
632 	switch (opt->command) {
633 	case TC_MQ_CREATE:
634 		return nfp_abm_mq_create(netdev, alink, opt);
635 	case TC_MQ_DESTROY:
636 		nfp_abm_qdisc_destroy(netdev, alink, opt->handle);
637 		return 0;
638 	case TC_MQ_STATS:
639 		return nfp_abm_mq_stats(alink, opt->handle, &opt->stats);
640 	case TC_MQ_GRAFT:
641 		return nfp_abm_qdisc_graft(alink, opt->handle,
642 					   opt->graft_params.child_handle,
643 					   opt->graft_params.queue);
644 	default:
645 		return -EOPNOTSUPP;
646 	}
647 }
648 
649 int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink,
650 		       struct tc_root_qopt_offload *opt)
651 {
652 	if (opt->ingress)
653 		return -EOPNOTSUPP;
654 	if (alink->root_qdisc)
655 		alink->root_qdisc->use_cnt--;
656 	alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle);
657 	if (alink->root_qdisc)
658 		alink->root_qdisc->use_cnt++;
659 
660 	nfp_abm_qdisc_offload_update(alink);
661 
662 	return 0;
663 }
664