1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/pkt_cls.h>
8 #include <net/red.h>
9 
10 #include "spectrum.h"
11 #include "reg.h"
12 
13 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
14 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
15 	MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
16 
17 enum mlxsw_sp_qdisc_type {
18 	MLXSW_SP_QDISC_NO_QDISC,
19 	MLXSW_SP_QDISC_RED,
20 	MLXSW_SP_QDISC_PRIO,
21 	MLXSW_SP_QDISC_ETS,
22 	MLXSW_SP_QDISC_TBF,
23 };
24 
25 struct mlxsw_sp_qdisc_ops {
26 	enum mlxsw_sp_qdisc_type type;
27 	int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
28 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
29 			    void *params);
30 	int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
31 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
32 	int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
33 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
34 	int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
35 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
36 			 struct tc_qopt_offload_stats *stats_ptr);
37 	int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
38 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
39 			  void *xstats_ptr);
40 	void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
41 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
42 	/* unoffload - to be used for a qdisc that stops being offloaded without
43 	 * being destroyed.
44 	 */
45 	void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
46 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
47 };
48 
49 struct mlxsw_sp_qdisc {
50 	u32 handle;
51 	u8 tclass_num;
52 	u8 prio_bitmap;
53 	union {
54 		struct red_stats red;
55 	} xstats_base;
56 	struct mlxsw_sp_qdisc_stats {
57 		u64 tx_bytes;
58 		u64 tx_packets;
59 		u64 drops;
60 		u64 overlimits;
61 		u64 backlog;
62 	} stats_base;
63 
64 	struct mlxsw_sp_qdisc_ops *ops;
65 };
66 
67 static bool
68 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
69 		       enum mlxsw_sp_qdisc_type type)
70 {
71 	return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
72 	       mlxsw_sp_qdisc->ops->type == type &&
73 	       mlxsw_sp_qdisc->handle == handle;
74 }
75 
76 static struct mlxsw_sp_qdisc *
77 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
78 		    bool root_only)
79 {
80 	int tclass, child_index;
81 
82 	if (parent == TC_H_ROOT)
83 		return mlxsw_sp_port->root_qdisc;
84 
85 	if (root_only || !mlxsw_sp_port->root_qdisc ||
86 	    !mlxsw_sp_port->root_qdisc->ops ||
87 	    TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
88 	    TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
89 		return NULL;
90 
91 	child_index = TC_H_MIN(parent);
92 	tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
93 	return &mlxsw_sp_port->tclass_qdiscs[tclass];
94 }
95 
96 static struct mlxsw_sp_qdisc *
97 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
98 {
99 	int i;
100 
101 	if (mlxsw_sp_port->root_qdisc->handle == handle)
102 		return mlxsw_sp_port->root_qdisc;
103 
104 	if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
105 		return NULL;
106 
107 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
108 		if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
109 			return &mlxsw_sp_port->tclass_qdiscs[i];
110 
111 	return NULL;
112 }
113 
114 static int
115 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
116 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
117 {
118 	int err = 0;
119 
120 	if (!mlxsw_sp_qdisc)
121 		return 0;
122 
123 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
124 		err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
125 						   mlxsw_sp_qdisc);
126 
127 	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
128 	mlxsw_sp_qdisc->ops = NULL;
129 	return err;
130 }
131 
132 static int
133 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
134 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
135 		       struct mlxsw_sp_qdisc_ops *ops, void *params)
136 {
137 	int err;
138 
139 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
140 		/* In case this location contained a different qdisc of the
141 		 * same type we can override the old qdisc configuration.
142 		 * Otherwise, we need to remove the old qdisc before setting the
143 		 * new one.
144 		 */
145 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
146 	err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
147 	if (err)
148 		goto err_bad_param;
149 
150 	err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
151 	if (err)
152 		goto err_config;
153 
154 	if (mlxsw_sp_qdisc->handle != handle) {
155 		mlxsw_sp_qdisc->ops = ops;
156 		if (ops->clean_stats)
157 			ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
158 	}
159 
160 	mlxsw_sp_qdisc->handle = handle;
161 	return 0;
162 
163 err_bad_param:
164 err_config:
165 	if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
166 		ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
167 
168 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
169 	return err;
170 }
171 
172 static int
173 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
174 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
175 			 struct tc_qopt_offload_stats *stats_ptr)
176 {
177 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
178 	    mlxsw_sp_qdisc->ops->get_stats)
179 		return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
180 						      mlxsw_sp_qdisc,
181 						      stats_ptr);
182 
183 	return -EOPNOTSUPP;
184 }
185 
186 static int
187 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
188 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
189 			  void *xstats_ptr)
190 {
191 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
192 	    mlxsw_sp_qdisc->ops->get_xstats)
193 		return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
194 						      mlxsw_sp_qdisc,
195 						      xstats_ptr);
196 
197 	return -EOPNOTSUPP;
198 }
199 
200 static u64
201 mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
202 {
203 	return xstats->backlog[tclass_num] +
204 	       xstats->backlog[tclass_num + 8];
205 }
206 
207 static u64
208 mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
209 {
210 	return xstats->tail_drop[tclass_num] +
211 	       xstats->tail_drop[tclass_num + 8];
212 }
213 
214 static void
215 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
216 				       u8 prio_bitmap, u64 *tx_packets,
217 				       u64 *tx_bytes)
218 {
219 	int i;
220 
221 	*tx_packets = 0;
222 	*tx_bytes = 0;
223 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
224 		if (prio_bitmap & BIT(i)) {
225 			*tx_packets += xstats->tx_packets[i];
226 			*tx_bytes += xstats->tx_bytes[i];
227 		}
228 	}
229 }
230 
231 static void
232 mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
233 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
234 				u64 *p_tx_bytes, u64 *p_tx_packets,
235 				u64 *p_drops, u64 *p_backlog)
236 {
237 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
238 	struct mlxsw_sp_port_xstats *xstats;
239 	u64 tx_bytes, tx_packets;
240 
241 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
242 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
243 					       mlxsw_sp_qdisc->prio_bitmap,
244 					       &tx_packets, &tx_bytes);
245 
246 	*p_tx_packets += tx_packets;
247 	*p_tx_bytes += tx_bytes;
248 	*p_drops += xstats->wred_drop[tclass_num] +
249 		    mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
250 	*p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
251 }
252 
253 static void
254 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
255 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
256 			    u64 tx_bytes, u64 tx_packets,
257 			    u64 drops, u64 backlog,
258 			    struct tc_qopt_offload_stats *stats_ptr)
259 {
260 	struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
261 
262 	tx_bytes -= stats_base->tx_bytes;
263 	tx_packets -= stats_base->tx_packets;
264 	drops -= stats_base->drops;
265 	backlog -= stats_base->backlog;
266 
267 	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
268 	stats_ptr->qstats->drops += drops;
269 	stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
270 
271 	stats_base->backlog += backlog;
272 	stats_base->drops += drops;
273 	stats_base->tx_bytes += tx_bytes;
274 	stats_base->tx_packets += tx_packets;
275 }
276 
277 static void
278 mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
279 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
280 			    struct tc_qopt_offload_stats *stats_ptr)
281 {
282 	u64 tx_packets = 0;
283 	u64 tx_bytes = 0;
284 	u64 backlog = 0;
285 	u64 drops = 0;
286 
287 	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
288 					&tx_bytes, &tx_packets,
289 					&drops, &backlog);
290 	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
291 				    tx_bytes, tx_packets, drops, backlog,
292 				    stats_ptr);
293 }
294 
295 static int
296 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
297 				  int tclass_num, u32 min, u32 max,
298 				  u32 probability, bool is_ecn)
299 {
300 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
301 	char cwtp_cmd[MLXSW_REG_CWTP_LEN];
302 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
303 	int err;
304 
305 	mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
306 	mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
307 				    roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
308 				    roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
309 				    probability);
310 
311 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
312 	if (err)
313 		return err;
314 
315 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
316 			     MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
317 
318 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
319 }
320 
321 static int
322 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
323 				   int tclass_num)
324 {
325 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
326 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
327 
328 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
329 			     MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
330 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
331 }
332 
333 static void
334 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
335 					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
336 {
337 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
338 	struct mlxsw_sp_qdisc_stats *stats_base;
339 	struct mlxsw_sp_port_xstats *xstats;
340 	struct red_stats *red_base;
341 
342 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
343 	stats_base = &mlxsw_sp_qdisc->stats_base;
344 	red_base = &mlxsw_sp_qdisc->xstats_base.red;
345 
346 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
347 					       mlxsw_sp_qdisc->prio_bitmap,
348 					       &stats_base->tx_packets,
349 					       &stats_base->tx_bytes);
350 	red_base->prob_mark = xstats->ecn;
351 	red_base->prob_drop = xstats->wred_drop[tclass_num];
352 	red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
353 
354 	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
355 	stats_base->drops = red_base->prob_drop + red_base->pdrop;
356 
357 	stats_base->backlog = 0;
358 }
359 
360 static int
361 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
362 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
363 {
364 	struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
365 
366 	if (root_qdisc != mlxsw_sp_qdisc)
367 		root_qdisc->stats_base.backlog -=
368 					mlxsw_sp_qdisc->stats_base.backlog;
369 
370 	return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
371 						  mlxsw_sp_qdisc->tclass_num);
372 }
373 
374 static int
375 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
376 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
377 				void *params)
378 {
379 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
380 	struct tc_red_qopt_offload_params *p = params;
381 
382 	if (p->min > p->max) {
383 		dev_err(mlxsw_sp->bus_info->dev,
384 			"spectrum: RED: min %u is bigger then max %u\n", p->min,
385 			p->max);
386 		return -EINVAL;
387 	}
388 	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
389 					GUARANTEED_SHARED_BUFFER)) {
390 		dev_err(mlxsw_sp->bus_info->dev,
391 			"spectrum: RED: max value %u is too big\n", p->max);
392 		return -EINVAL;
393 	}
394 	if (p->min == 0 || p->max == 0) {
395 		dev_err(mlxsw_sp->bus_info->dev,
396 			"spectrum: RED: 0 value is illegal for min and max\n");
397 		return -EINVAL;
398 	}
399 	return 0;
400 }
401 
402 static int
403 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
404 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
405 			   void *params)
406 {
407 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
408 	struct tc_red_qopt_offload_params *p = params;
409 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
410 	u32 min, max;
411 	u64 prob;
412 
413 	/* calculate probability in percentage */
414 	prob = p->probability;
415 	prob *= 100;
416 	prob = DIV_ROUND_UP(prob, 1 << 16);
417 	prob = DIV_ROUND_UP(prob, 1 << 16);
418 	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
419 	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
420 	return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
421 						 max, prob, p->is_ecn);
422 }
423 
424 static void
425 mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
426 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
427 			      struct gnet_stats_queue *qstats)
428 {
429 	u64 backlog;
430 
431 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
432 				       mlxsw_sp_qdisc->stats_base.backlog);
433 	qstats->backlog -= backlog;
434 	mlxsw_sp_qdisc->stats_base.backlog = 0;
435 }
436 
437 static void
438 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
439 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
440 			     void *params)
441 {
442 	struct tc_red_qopt_offload_params *p = params;
443 
444 	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
445 }
446 
447 static int
448 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
449 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
450 			      void *xstats_ptr)
451 {
452 	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
453 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
454 	struct mlxsw_sp_port_xstats *xstats;
455 	struct red_stats *res = xstats_ptr;
456 	int early_drops, marks, pdrops;
457 
458 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
459 
460 	early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
461 	marks = xstats->ecn - xstats_base->prob_mark;
462 	pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
463 		 xstats_base->pdrop;
464 
465 	res->pdrop += pdrops;
466 	res->prob_drop += early_drops;
467 	res->prob_mark += marks;
468 
469 	xstats_base->pdrop += pdrops;
470 	xstats_base->prob_drop += early_drops;
471 	xstats_base->prob_mark += marks;
472 	return 0;
473 }
474 
475 static int
476 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
477 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
478 			     struct tc_qopt_offload_stats *stats_ptr)
479 {
480 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
481 	struct mlxsw_sp_qdisc_stats *stats_base;
482 	struct mlxsw_sp_port_xstats *xstats;
483 	u64 overlimits;
484 
485 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
486 	stats_base = &mlxsw_sp_qdisc->stats_base;
487 
488 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
489 	overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
490 		     stats_base->overlimits;
491 
492 	stats_ptr->qstats->overlimits += overlimits;
493 	stats_base->overlimits += overlimits;
494 
495 	return 0;
496 }
497 
498 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
499 
500 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
501 	.type = MLXSW_SP_QDISC_RED,
502 	.check_params = mlxsw_sp_qdisc_red_check_params,
503 	.replace = mlxsw_sp_qdisc_red_replace,
504 	.unoffload = mlxsw_sp_qdisc_red_unoffload,
505 	.destroy = mlxsw_sp_qdisc_red_destroy,
506 	.get_stats = mlxsw_sp_qdisc_get_red_stats,
507 	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
508 	.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
509 };
510 
511 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
512 			  struct tc_red_qopt_offload *p)
513 {
514 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
515 
516 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
517 	if (!mlxsw_sp_qdisc)
518 		return -EOPNOTSUPP;
519 
520 	if (p->command == TC_RED_REPLACE)
521 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
522 					      mlxsw_sp_qdisc,
523 					      &mlxsw_sp_qdisc_ops_red,
524 					      &p->set);
525 
526 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
527 				    MLXSW_SP_QDISC_RED))
528 		return -EOPNOTSUPP;
529 
530 	switch (p->command) {
531 	case TC_RED_DESTROY:
532 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
533 	case TC_RED_XSTATS:
534 		return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
535 						 p->xstats);
536 	case TC_RED_STATS:
537 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
538 						&p->stats);
539 	default:
540 		return -EOPNOTSUPP;
541 	}
542 }
543 
544 static void
545 mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
546 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
547 {
548 	u64 backlog_cells = 0;
549 	u64 tx_packets = 0;
550 	u64 tx_bytes = 0;
551 	u64 drops = 0;
552 
553 	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
554 					&tx_bytes, &tx_packets,
555 					&drops, &backlog_cells);
556 
557 	mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
558 	mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
559 	mlxsw_sp_qdisc->stats_base.drops = drops;
560 	mlxsw_sp_qdisc->stats_base.backlog = 0;
561 }
562 
563 static int
564 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
565 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
566 {
567 	struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
568 
569 	if (root_qdisc != mlxsw_sp_qdisc)
570 		root_qdisc->stats_base.backlog -=
571 					mlxsw_sp_qdisc->stats_base.backlog;
572 
573 	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
574 					     MLXSW_REG_QEEC_HR_SUBGROUP,
575 					     mlxsw_sp_qdisc->tclass_num, 0,
576 					     MLXSW_REG_QEEC_MAS_DIS, 0);
577 }
578 
579 static int
580 mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
581 		      u32 max_size, u8 *p_burst_size)
582 {
583 	/* TBF burst size is configured in bytes. The ASIC burst size value is
584 	 * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
585 	 */
586 	u32 bs512 = max_size / 64;
587 	u8 bs = fls(bs512);
588 
589 	if (!bs)
590 		return -EINVAL;
591 	--bs;
592 
593 	/* Demand a power of two. */
594 	if ((1 << bs) != bs512)
595 		return -EINVAL;
596 
597 	if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
598 	    bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
599 		return -EINVAL;
600 
601 	*p_burst_size = bs;
602 	return 0;
603 }
604 
605 static u32
606 mlxsw_sp_qdisc_tbf_max_size(u8 bs)
607 {
608 	return (1U << bs) * 64;
609 }
610 
611 static u64
612 mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
613 {
614 	/* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
615 	 * Kbits/s.
616 	 */
617 	return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
618 }
619 
620 static int
621 mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
622 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
623 				void *params)
624 {
625 	struct tc_tbf_qopt_offload_replace_params *p = params;
626 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
627 	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
628 	u8 burst_size;
629 	int err;
630 
631 	if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
632 		dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
633 			"spectrum: TBF: rate of %lluKbps must be below %u\n",
634 			rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
635 		return -EINVAL;
636 	}
637 
638 	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
639 	if (err) {
640 		u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
641 
642 		dev_err(mlxsw_sp->bus_info->dev,
643 			"spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
644 			p->max_size,
645 			mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
646 			mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
647 		return -EINVAL;
648 	}
649 
650 	return 0;
651 }
652 
653 static int
654 mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port,
655 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
656 			   void *params)
657 {
658 	struct tc_tbf_qopt_offload_replace_params *p = params;
659 	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
660 	u8 burst_size;
661 	int err;
662 
663 	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
664 	if (WARN_ON_ONCE(err))
665 		/* check_params above was supposed to reject this value. */
666 		return -EINVAL;
667 
668 	/* Configure subgroup shaper, so that both UC and MC traffic is subject
669 	 * to shaping. That is unlike RED, however UC queue lengths are going to
670 	 * be different than MC ones due to different pool and quota
671 	 * configurations, so the configuration is not applicable. For shaper on
672 	 * the other hand, subjecting the overall stream to the configured
673 	 * shaper makes sense. Also note that that is what we do for
674 	 * ieee_setmaxrate().
675 	 */
676 	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
677 					     MLXSW_REG_QEEC_HR_SUBGROUP,
678 					     mlxsw_sp_qdisc->tclass_num, 0,
679 					     rate_kbps, burst_size);
680 }
681 
682 static void
683 mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
684 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
685 			     void *params)
686 {
687 	struct tc_tbf_qopt_offload_replace_params *p = params;
688 
689 	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
690 }
691 
692 static int
693 mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
694 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
695 			     struct tc_qopt_offload_stats *stats_ptr)
696 {
697 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
698 				    stats_ptr);
699 	return 0;
700 }
701 
702 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
703 	.type = MLXSW_SP_QDISC_TBF,
704 	.check_params = mlxsw_sp_qdisc_tbf_check_params,
705 	.replace = mlxsw_sp_qdisc_tbf_replace,
706 	.unoffload = mlxsw_sp_qdisc_tbf_unoffload,
707 	.destroy = mlxsw_sp_qdisc_tbf_destroy,
708 	.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
709 	.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
710 };
711 
712 int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
713 			  struct tc_tbf_qopt_offload *p)
714 {
715 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
716 
717 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
718 	if (!mlxsw_sp_qdisc)
719 		return -EOPNOTSUPP;
720 
721 	if (p->command == TC_TBF_REPLACE)
722 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
723 					      mlxsw_sp_qdisc,
724 					      &mlxsw_sp_qdisc_ops_tbf,
725 					      &p->replace_params);
726 
727 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
728 				    MLXSW_SP_QDISC_TBF))
729 		return -EOPNOTSUPP;
730 
731 	switch (p->command) {
732 	case TC_TBF_DESTROY:
733 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
734 	case TC_TBF_STATS:
735 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
736 						&p->stats);
737 	default:
738 		return -EOPNOTSUPP;
739 	}
740 }
741 
742 static int
743 __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
744 {
745 	int i;
746 
747 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
748 		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
749 					  MLXSW_SP_PORT_DEFAULT_TCLASS);
750 		mlxsw_sp_port_ets_set(mlxsw_sp_port,
751 				      MLXSW_REG_QEEC_HR_SUBGROUP,
752 				      i, 0, false, 0);
753 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
754 				       &mlxsw_sp_port->tclass_qdiscs[i]);
755 		mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
756 	}
757 
758 	return 0;
759 }
760 
761 static int
762 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
763 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
764 {
765 	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
766 }
767 
768 static int
769 __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
770 {
771 	if (nbands > IEEE_8021QAZ_MAX_TCS)
772 		return -EOPNOTSUPP;
773 
774 	return 0;
775 }
776 
777 static int
778 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
779 				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
780 				 void *params)
781 {
782 	struct tc_prio_qopt_offload_params *p = params;
783 
784 	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
785 }
786 
787 static int
788 __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
789 			     unsigned int nbands,
790 			     const unsigned int *quanta,
791 			     const unsigned int *weights,
792 			     const u8 *priomap)
793 {
794 	struct mlxsw_sp_qdisc *child_qdisc;
795 	int tclass, i, band, backlog;
796 	u8 old_priomap;
797 	int err;
798 
799 	for (band = 0; band < nbands; band++) {
800 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
801 		child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
802 		old_priomap = child_qdisc->prio_bitmap;
803 		child_qdisc->prio_bitmap = 0;
804 
805 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
806 					    MLXSW_REG_QEEC_HR_SUBGROUP,
807 					    tclass, 0, !!quanta[band],
808 					    weights[band]);
809 		if (err)
810 			return err;
811 
812 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
813 			if (priomap[i] == band) {
814 				child_qdisc->prio_bitmap |= BIT(i);
815 				if (BIT(i) & old_priomap)
816 					continue;
817 				err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
818 								i, tclass);
819 				if (err)
820 					return err;
821 			}
822 		}
823 		if (old_priomap != child_qdisc->prio_bitmap &&
824 		    child_qdisc->ops && child_qdisc->ops->clean_stats) {
825 			backlog = child_qdisc->stats_base.backlog;
826 			child_qdisc->ops->clean_stats(mlxsw_sp_port,
827 						      child_qdisc);
828 			child_qdisc->stats_base.backlog = backlog;
829 		}
830 	}
831 	for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
832 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
833 		child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
834 		child_qdisc->prio_bitmap = 0;
835 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
836 		mlxsw_sp_port_ets_set(mlxsw_sp_port,
837 				      MLXSW_REG_QEEC_HR_SUBGROUP,
838 				      tclass, 0, false, 0);
839 	}
840 	return 0;
841 }
842 
843 static int
844 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
845 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
846 			    void *params)
847 {
848 	struct tc_prio_qopt_offload_params *p = params;
849 	unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
850 
851 	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
852 					    zeroes, zeroes, p->priomap);
853 }
854 
855 static void
856 __mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
857 			       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
858 			       struct gnet_stats_queue *qstats)
859 {
860 	u64 backlog;
861 
862 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
863 				       mlxsw_sp_qdisc->stats_base.backlog);
864 	qstats->backlog -= backlog;
865 }
866 
867 static void
868 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
869 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
870 			      void *params)
871 {
872 	struct tc_prio_qopt_offload_params *p = params;
873 
874 	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
875 				       p->qstats);
876 }
877 
878 static int
879 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
880 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
881 			      struct tc_qopt_offload_stats *stats_ptr)
882 {
883 	struct mlxsw_sp_qdisc *tc_qdisc;
884 	u64 tx_packets = 0;
885 	u64 tx_bytes = 0;
886 	u64 backlog = 0;
887 	u64 drops = 0;
888 	int i;
889 
890 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
891 		tc_qdisc = &mlxsw_sp_port->tclass_qdiscs[i];
892 		mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
893 						&tx_bytes, &tx_packets,
894 						&drops, &backlog);
895 	}
896 
897 	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
898 				    tx_bytes, tx_packets, drops, backlog,
899 				    stats_ptr);
900 	return 0;
901 }
902 
903 static void
904 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
905 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
906 {
907 	struct mlxsw_sp_qdisc_stats *stats_base;
908 	struct mlxsw_sp_port_xstats *xstats;
909 	struct rtnl_link_stats64 *stats;
910 	int i;
911 
912 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
913 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
914 	stats_base = &mlxsw_sp_qdisc->stats_base;
915 
916 	stats_base->tx_packets = stats->tx_packets;
917 	stats_base->tx_bytes = stats->tx_bytes;
918 
919 	stats_base->drops = 0;
920 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
921 		stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
922 		stats_base->drops += xstats->wred_drop[i];
923 	}
924 
925 	mlxsw_sp_qdisc->stats_base.backlog = 0;
926 }
927 
928 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
929 	.type = MLXSW_SP_QDISC_PRIO,
930 	.check_params = mlxsw_sp_qdisc_prio_check_params,
931 	.replace = mlxsw_sp_qdisc_prio_replace,
932 	.unoffload = mlxsw_sp_qdisc_prio_unoffload,
933 	.destroy = mlxsw_sp_qdisc_prio_destroy,
934 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
935 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
936 };
937 
938 static int
939 mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
940 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
941 				void *params)
942 {
943 	struct tc_ets_qopt_offload_replace_params *p = params;
944 
945 	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
946 }
947 
948 static int
949 mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
950 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
951 			   void *params)
952 {
953 	struct tc_ets_qopt_offload_replace_params *p = params;
954 
955 	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, p->bands,
956 					    p->quanta, p->weights, p->priomap);
957 }
958 
959 static void
960 mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
961 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
962 			     void *params)
963 {
964 	struct tc_ets_qopt_offload_replace_params *p = params;
965 
966 	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
967 				       p->qstats);
968 }
969 
970 static int
971 mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
972 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
973 {
974 	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
975 }
976 
977 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
978 	.type = MLXSW_SP_QDISC_ETS,
979 	.check_params = mlxsw_sp_qdisc_ets_check_params,
980 	.replace = mlxsw_sp_qdisc_ets_replace,
981 	.unoffload = mlxsw_sp_qdisc_ets_unoffload,
982 	.destroy = mlxsw_sp_qdisc_ets_destroy,
983 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
984 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
985 };
986 
987 /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
988  * graph is free of cycles). These operations do not change the parent handle
989  * though, which means it can be incomplete (if there is more than one class
990  * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
991  * linked to a different class and then removed from the original class).
992  *
993  * E.g. consider this sequence of operations:
994  *
995  *  # tc qdisc add dev swp1 root handle 1: prio
996  *  # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
997  *  RED: set bandwidth to 10Mbit
998  *  # tc qdisc link dev swp1 handle 13: parent 1:2
999  *
1000  * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
1001  * child. But RED will still only claim that 1:3 is its parent. If it's removed
1002  * from that band, its only parent will be 1:2, but it will continue to claim
1003  * that it is in fact 1:3.
1004  *
1005  * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
1006  * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
1007  * notification to offload the child Qdisc, based on its parent handle, and use
1008  * the graft operation to validate that the class where the child is actually
1009  * grafted corresponds to the parent handle. If the two don't match, we
1010  * unoffload the child.
1011  */
1012 static int
1013 __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1014 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1015 			   u8 band, u32 child_handle)
1016 {
1017 	int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1018 	struct mlxsw_sp_qdisc *old_qdisc;
1019 
1020 	if (band < IEEE_8021QAZ_MAX_TCS &&
1021 	    mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == child_handle)
1022 		return 0;
1023 
1024 	if (!child_handle) {
1025 		/* This is an invisible FIFO replacing the original Qdisc.
1026 		 * Ignore it--the original Qdisc's destroy will follow.
1027 		 */
1028 		return 0;
1029 	}
1030 
1031 	/* See if the grafted qdisc is already offloaded on any tclass. If so,
1032 	 * unoffload it.
1033 	 */
1034 	old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1035 						  child_handle);
1036 	if (old_qdisc)
1037 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1038 
1039 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
1040 			       &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
1041 	return -EOPNOTSUPP;
1042 }
1043 
1044 static int
1045 mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1046 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1047 			  struct tc_prio_qopt_offload_graft_params *p)
1048 {
1049 	return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1050 					  p->band, p->child_handle);
1051 }
1052 
1053 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1054 			   struct tc_prio_qopt_offload *p)
1055 {
1056 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1057 
1058 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1059 	if (!mlxsw_sp_qdisc)
1060 		return -EOPNOTSUPP;
1061 
1062 	if (p->command == TC_PRIO_REPLACE)
1063 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1064 					      mlxsw_sp_qdisc,
1065 					      &mlxsw_sp_qdisc_ops_prio,
1066 					      &p->replace_params);
1067 
1068 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
1069 				    MLXSW_SP_QDISC_PRIO))
1070 		return -EOPNOTSUPP;
1071 
1072 	switch (p->command) {
1073 	case TC_PRIO_DESTROY:
1074 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1075 	case TC_PRIO_STATS:
1076 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1077 						&p->stats);
1078 	case TC_PRIO_GRAFT:
1079 		return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1080 						 &p->graft_params);
1081 	default:
1082 		return -EOPNOTSUPP;
1083 	}
1084 }
1085 
1086 int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1087 			  struct tc_ets_qopt_offload *p)
1088 {
1089 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1090 
1091 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1092 	if (!mlxsw_sp_qdisc)
1093 		return -EOPNOTSUPP;
1094 
1095 	if (p->command == TC_ETS_REPLACE)
1096 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1097 					      mlxsw_sp_qdisc,
1098 					      &mlxsw_sp_qdisc_ops_ets,
1099 					      &p->replace_params);
1100 
1101 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
1102 				    MLXSW_SP_QDISC_ETS))
1103 		return -EOPNOTSUPP;
1104 
1105 	switch (p->command) {
1106 	case TC_ETS_DESTROY:
1107 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1108 	case TC_ETS_STATS:
1109 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1110 						&p->stats);
1111 	case TC_ETS_GRAFT:
1112 		return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1113 						  p->graft_params.band,
1114 						  p->graft_params.child_handle);
1115 	default:
1116 		return -EOPNOTSUPP;
1117 	}
1118 }
1119 
1120 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
1121 {
1122 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1123 	int i;
1124 
1125 	mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
1126 	if (!mlxsw_sp_qdisc)
1127 		goto err_root_qdisc_init;
1128 
1129 	mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
1130 	mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
1131 	mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
1132 
1133 	mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
1134 				 sizeof(*mlxsw_sp_qdisc),
1135 				 GFP_KERNEL);
1136 	if (!mlxsw_sp_qdisc)
1137 		goto err_tclass_qdiscs_init;
1138 
1139 	mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
1140 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1141 		mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
1142 
1143 	return 0;
1144 
1145 err_tclass_qdiscs_init:
1146 	kfree(mlxsw_sp_port->root_qdisc);
1147 err_root_qdisc_init:
1148 	return -ENOMEM;
1149 }
1150 
1151 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1152 {
1153 	kfree(mlxsw_sp_port->tclass_qdiscs);
1154 	kfree(mlxsw_sp_port->root_qdisc);
1155 }
1156