1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/pkt_cls.h>
8 #include <net/red.h>
9 
10 #include "spectrum.h"
11 #include "spectrum_span.h"
12 #include "reg.h"
13 
14 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
15 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
16 	MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
17 
18 enum mlxsw_sp_qdisc_type {
19 	MLXSW_SP_QDISC_NO_QDISC,
20 	MLXSW_SP_QDISC_RED,
21 	MLXSW_SP_QDISC_PRIO,
22 	MLXSW_SP_QDISC_ETS,
23 	MLXSW_SP_QDISC_TBF,
24 	MLXSW_SP_QDISC_FIFO,
25 };
26 
27 struct mlxsw_sp_qdisc;
28 
29 struct mlxsw_sp_qdisc_ops {
30 	enum mlxsw_sp_qdisc_type type;
31 	int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
32 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
33 			    void *params);
34 	int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
35 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
36 	int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
37 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
38 	int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
39 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
40 			 struct tc_qopt_offload_stats *stats_ptr);
41 	int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
42 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
43 			  void *xstats_ptr);
44 	void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
45 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
46 	/* unoffload - to be used for a qdisc that stops being offloaded without
47 	 * being destroyed.
48 	 */
49 	void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
50 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
51 };
52 
53 struct mlxsw_sp_qdisc {
54 	u32 handle;
55 	u8 tclass_num;
56 	u8 prio_bitmap;
57 	union {
58 		struct red_stats red;
59 	} xstats_base;
60 	struct mlxsw_sp_qdisc_stats {
61 		u64 tx_bytes;
62 		u64 tx_packets;
63 		u64 drops;
64 		u64 overlimits;
65 		u64 backlog;
66 	} stats_base;
67 
68 	struct mlxsw_sp_qdisc_ops *ops;
69 };
70 
71 struct mlxsw_sp_qdisc_state {
72 	struct mlxsw_sp_qdisc root_qdisc;
73 	struct mlxsw_sp_qdisc tclass_qdiscs[IEEE_8021QAZ_MAX_TCS];
74 
75 	/* When a PRIO or ETS are added, the invisible FIFOs in their bands are
76 	 * created first. When notifications for these FIFOs arrive, it is not
77 	 * known what qdisc their parent handle refers to. It could be a
78 	 * newly-created PRIO that will replace the currently-offloaded one, or
79 	 * it could be e.g. a RED that will be attached below it.
80 	 *
81 	 * As the notifications start to arrive, use them to note what the
82 	 * future parent handle is, and keep track of which child FIFOs were
83 	 * seen. Then when the parent is known, retroactively offload those
84 	 * FIFOs.
85 	 */
86 	u32 future_handle;
87 	bool future_fifos[IEEE_8021QAZ_MAX_TCS];
88 };
89 
90 static bool
91 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
92 		       enum mlxsw_sp_qdisc_type type)
93 {
94 	return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
95 	       mlxsw_sp_qdisc->ops->type == type &&
96 	       mlxsw_sp_qdisc->handle == handle;
97 }
98 
99 static struct mlxsw_sp_qdisc *
100 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
101 		    bool root_only)
102 {
103 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
104 	int tclass, child_index;
105 
106 	if (parent == TC_H_ROOT)
107 		return &qdisc_state->root_qdisc;
108 
109 	if (root_only || !qdisc_state ||
110 	    !qdisc_state->root_qdisc.ops ||
111 	    TC_H_MAJ(parent) != qdisc_state->root_qdisc.handle ||
112 	    TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
113 		return NULL;
114 
115 	child_index = TC_H_MIN(parent);
116 	tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
117 	return &qdisc_state->tclass_qdiscs[tclass];
118 }
119 
120 static struct mlxsw_sp_qdisc *
121 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
122 {
123 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
124 	int i;
125 
126 	if (qdisc_state->root_qdisc.handle == handle)
127 		return &qdisc_state->root_qdisc;
128 
129 	if (qdisc_state->root_qdisc.handle == TC_H_UNSPEC)
130 		return NULL;
131 
132 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
133 		if (qdisc_state->tclass_qdiscs[i].handle == handle)
134 			return &qdisc_state->tclass_qdiscs[i];
135 
136 	return NULL;
137 }
138 
139 static int
140 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
141 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
142 {
143 	struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
144 	int err_hdroom = 0;
145 	int err = 0;
146 
147 	if (!mlxsw_sp_qdisc)
148 		return 0;
149 
150 	if (root_qdisc == mlxsw_sp_qdisc) {
151 		struct mlxsw_sp_hdroom hdroom = *mlxsw_sp_port->hdroom;
152 
153 		hdroom.mode = MLXSW_SP_HDROOM_MODE_DCB;
154 		mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
155 		mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
156 		mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
157 		err_hdroom = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
158 	}
159 
160 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
161 		err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
162 						   mlxsw_sp_qdisc);
163 
164 	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
165 	mlxsw_sp_qdisc->ops = NULL;
166 
167 	return err_hdroom ?: err;
168 }
169 
170 static int
171 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
172 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
173 		       struct mlxsw_sp_qdisc_ops *ops, void *params)
174 {
175 	struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
176 	struct mlxsw_sp_hdroom orig_hdroom;
177 	int err;
178 
179 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
180 		/* In case this location contained a different qdisc of the
181 		 * same type we can override the old qdisc configuration.
182 		 * Otherwise, we need to remove the old qdisc before setting the
183 		 * new one.
184 		 */
185 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
186 
187 	orig_hdroom = *mlxsw_sp_port->hdroom;
188 	if (root_qdisc == mlxsw_sp_qdisc) {
189 		struct mlxsw_sp_hdroom hdroom = orig_hdroom;
190 
191 		hdroom.mode = MLXSW_SP_HDROOM_MODE_TC;
192 		mlxsw_sp_hdroom_prios_reset_buf_idx(&hdroom);
193 		mlxsw_sp_hdroom_bufs_reset_lossiness(&hdroom);
194 		mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
195 
196 		err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
197 		if (err)
198 			goto err_hdroom_configure;
199 	}
200 
201 	err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
202 	if (err)
203 		goto err_bad_param;
204 
205 	err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
206 	if (err)
207 		goto err_config;
208 
209 	/* Check if the Qdisc changed. That includes a situation where an
210 	 * invisible Qdisc replaces another one, or is being added for the
211 	 * first time.
212 	 */
213 	if (mlxsw_sp_qdisc->handle != handle || handle == TC_H_UNSPEC) {
214 		mlxsw_sp_qdisc->ops = ops;
215 		if (ops->clean_stats)
216 			ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
217 	}
218 
219 	mlxsw_sp_qdisc->handle = handle;
220 	return 0;
221 
222 err_bad_param:
223 err_config:
224 	mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
225 err_hdroom_configure:
226 	if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
227 		ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
228 
229 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
230 	return err;
231 }
232 
233 static int
234 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
235 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
236 			 struct tc_qopt_offload_stats *stats_ptr)
237 {
238 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
239 	    mlxsw_sp_qdisc->ops->get_stats)
240 		return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
241 						      mlxsw_sp_qdisc,
242 						      stats_ptr);
243 
244 	return -EOPNOTSUPP;
245 }
246 
247 static int
248 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
249 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
250 			  void *xstats_ptr)
251 {
252 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
253 	    mlxsw_sp_qdisc->ops->get_xstats)
254 		return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
255 						      mlxsw_sp_qdisc,
256 						      xstats_ptr);
257 
258 	return -EOPNOTSUPP;
259 }
260 
261 static u64
262 mlxsw_sp_xstats_backlog(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
263 {
264 	return xstats->backlog[tclass_num] +
265 	       xstats->backlog[tclass_num + 8];
266 }
267 
268 static u64
269 mlxsw_sp_xstats_tail_drop(struct mlxsw_sp_port_xstats *xstats, int tclass_num)
270 {
271 	return xstats->tail_drop[tclass_num] +
272 	       xstats->tail_drop[tclass_num + 8];
273 }
274 
275 static void
276 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
277 				       u8 prio_bitmap, u64 *tx_packets,
278 				       u64 *tx_bytes)
279 {
280 	int i;
281 
282 	*tx_packets = 0;
283 	*tx_bytes = 0;
284 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
285 		if (prio_bitmap & BIT(i)) {
286 			*tx_packets += xstats->tx_packets[i];
287 			*tx_bytes += xstats->tx_bytes[i];
288 		}
289 	}
290 }
291 
292 static void
293 mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
294 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
295 				u64 *p_tx_bytes, u64 *p_tx_packets,
296 				u64 *p_drops, u64 *p_backlog)
297 {
298 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
299 	struct mlxsw_sp_port_xstats *xstats;
300 	u64 tx_bytes, tx_packets;
301 
302 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
303 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
304 					       mlxsw_sp_qdisc->prio_bitmap,
305 					       &tx_packets, &tx_bytes);
306 
307 	*p_tx_packets += tx_packets;
308 	*p_tx_bytes += tx_bytes;
309 	*p_drops += xstats->wred_drop[tclass_num] +
310 		    mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
311 	*p_backlog += mlxsw_sp_xstats_backlog(xstats, tclass_num);
312 }
313 
314 static void
315 mlxsw_sp_qdisc_update_stats(struct mlxsw_sp *mlxsw_sp,
316 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
317 			    u64 tx_bytes, u64 tx_packets,
318 			    u64 drops, u64 backlog,
319 			    struct tc_qopt_offload_stats *stats_ptr)
320 {
321 	struct mlxsw_sp_qdisc_stats *stats_base = &mlxsw_sp_qdisc->stats_base;
322 
323 	tx_bytes -= stats_base->tx_bytes;
324 	tx_packets -= stats_base->tx_packets;
325 	drops -= stats_base->drops;
326 	backlog -= stats_base->backlog;
327 
328 	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
329 	stats_ptr->qstats->drops += drops;
330 	stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog);
331 
332 	stats_base->backlog += backlog;
333 	stats_base->drops += drops;
334 	stats_base->tx_bytes += tx_bytes;
335 	stats_base->tx_packets += tx_packets;
336 }
337 
338 static void
339 mlxsw_sp_qdisc_get_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
340 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
341 			    struct tc_qopt_offload_stats *stats_ptr)
342 {
343 	u64 tx_packets = 0;
344 	u64 tx_bytes = 0;
345 	u64 backlog = 0;
346 	u64 drops = 0;
347 
348 	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
349 					&tx_bytes, &tx_packets,
350 					&drops, &backlog);
351 	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
352 				    tx_bytes, tx_packets, drops, backlog,
353 				    stats_ptr);
354 }
355 
356 static int
357 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
358 				  int tclass_num, u32 min, u32 max,
359 				  u32 probability, bool is_wred, bool is_ecn)
360 {
361 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
362 	char cwtp_cmd[MLXSW_REG_CWTP_LEN];
363 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
364 	int err;
365 
366 	mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
367 	mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
368 				    roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
369 				    roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
370 				    probability);
371 
372 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
373 	if (err)
374 		return err;
375 
376 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
377 			     MLXSW_REG_CWTP_DEFAULT_PROFILE, is_wred, is_ecn);
378 
379 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
380 }
381 
382 static int
383 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
384 				   int tclass_num)
385 {
386 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
388 
389 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
390 			     MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
391 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
392 }
393 
394 static void
395 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
396 					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
397 {
398 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
399 	struct mlxsw_sp_qdisc_stats *stats_base;
400 	struct mlxsw_sp_port_xstats *xstats;
401 	struct red_stats *red_base;
402 
403 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
404 	stats_base = &mlxsw_sp_qdisc->stats_base;
405 	red_base = &mlxsw_sp_qdisc->xstats_base.red;
406 
407 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
408 					       mlxsw_sp_qdisc->prio_bitmap,
409 					       &stats_base->tx_packets,
410 					       &stats_base->tx_bytes);
411 	red_base->prob_drop = xstats->wred_drop[tclass_num];
412 	red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
413 
414 	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
415 	stats_base->drops = red_base->prob_drop + red_base->pdrop;
416 
417 	stats_base->backlog = 0;
418 }
419 
420 static int
421 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
422 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
423 {
424 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
425 	struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
426 
427 	if (root_qdisc != mlxsw_sp_qdisc)
428 		root_qdisc->stats_base.backlog -=
429 					mlxsw_sp_qdisc->stats_base.backlog;
430 
431 	return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
432 						  mlxsw_sp_qdisc->tclass_num);
433 }
434 
435 static int
436 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
437 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
438 				void *params)
439 {
440 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
441 	struct tc_red_qopt_offload_params *p = params;
442 
443 	if (p->min > p->max) {
444 		dev_err(mlxsw_sp->bus_info->dev,
445 			"spectrum: RED: min %u is bigger then max %u\n", p->min,
446 			p->max);
447 		return -EINVAL;
448 	}
449 	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core,
450 					GUARANTEED_SHARED_BUFFER)) {
451 		dev_err(mlxsw_sp->bus_info->dev,
452 			"spectrum: RED: max value %u is too big\n", p->max);
453 		return -EINVAL;
454 	}
455 	if (p->min == 0 || p->max == 0) {
456 		dev_err(mlxsw_sp->bus_info->dev,
457 			"spectrum: RED: 0 value is illegal for min and max\n");
458 		return -EINVAL;
459 	}
460 	return 0;
461 }
462 
463 static int
464 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
465 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
466 			   void *params)
467 {
468 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
469 	struct tc_red_qopt_offload_params *p = params;
470 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
471 	u32 min, max;
472 	u64 prob;
473 
474 	/* calculate probability in percentage */
475 	prob = p->probability;
476 	prob *= 100;
477 	prob = DIV_ROUND_UP(prob, 1 << 16);
478 	prob = DIV_ROUND_UP(prob, 1 << 16);
479 	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
480 	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
481 	return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num,
482 						 min, max, prob,
483 						 !p->is_nodrop, p->is_ecn);
484 }
485 
486 static void
487 mlxsw_sp_qdisc_leaf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
488 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
489 			      struct gnet_stats_queue *qstats)
490 {
491 	u64 backlog;
492 
493 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
494 				       mlxsw_sp_qdisc->stats_base.backlog);
495 	qstats->backlog -= backlog;
496 	mlxsw_sp_qdisc->stats_base.backlog = 0;
497 }
498 
499 static void
500 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
501 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
502 			     void *params)
503 {
504 	struct tc_red_qopt_offload_params *p = params;
505 
506 	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
507 }
508 
509 static int
510 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
511 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
512 			      void *xstats_ptr)
513 {
514 	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
515 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
516 	struct mlxsw_sp_port_xstats *xstats;
517 	struct red_stats *res = xstats_ptr;
518 	int early_drops, pdrops;
519 
520 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
521 
522 	early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
523 	pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
524 		 xstats_base->pdrop;
525 
526 	res->pdrop += pdrops;
527 	res->prob_drop += early_drops;
528 
529 	xstats_base->pdrop += pdrops;
530 	xstats_base->prob_drop += early_drops;
531 	return 0;
532 }
533 
534 static int
535 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
536 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
537 			     struct tc_qopt_offload_stats *stats_ptr)
538 {
539 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
540 	struct mlxsw_sp_qdisc_stats *stats_base;
541 	struct mlxsw_sp_port_xstats *xstats;
542 	u64 overlimits;
543 
544 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
545 	stats_base = &mlxsw_sp_qdisc->stats_base;
546 
547 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
548 	overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
549 
550 	stats_ptr->qstats->overlimits += overlimits;
551 	stats_base->overlimits += overlimits;
552 
553 	return 0;
554 }
555 
556 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
557 
558 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
559 	.type = MLXSW_SP_QDISC_RED,
560 	.check_params = mlxsw_sp_qdisc_red_check_params,
561 	.replace = mlxsw_sp_qdisc_red_replace,
562 	.unoffload = mlxsw_sp_qdisc_red_unoffload,
563 	.destroy = mlxsw_sp_qdisc_red_destroy,
564 	.get_stats = mlxsw_sp_qdisc_get_red_stats,
565 	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
566 	.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
567 };
568 
569 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
570 			  struct tc_red_qopt_offload *p)
571 {
572 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
573 
574 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
575 	if (!mlxsw_sp_qdisc)
576 		return -EOPNOTSUPP;
577 
578 	if (p->command == TC_RED_REPLACE)
579 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
580 					      mlxsw_sp_qdisc,
581 					      &mlxsw_sp_qdisc_ops_red,
582 					      &p->set);
583 
584 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
585 				    MLXSW_SP_QDISC_RED))
586 		return -EOPNOTSUPP;
587 
588 	switch (p->command) {
589 	case TC_RED_DESTROY:
590 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
591 	case TC_RED_XSTATS:
592 		return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
593 						 p->xstats);
594 	case TC_RED_STATS:
595 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
596 						&p->stats);
597 	default:
598 		return -EOPNOTSUPP;
599 	}
600 }
601 
602 static void
603 mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
604 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
605 {
606 	u64 backlog_cells = 0;
607 	u64 tx_packets = 0;
608 	u64 tx_bytes = 0;
609 	u64 drops = 0;
610 
611 	mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
612 					&tx_bytes, &tx_packets,
613 					&drops, &backlog_cells);
614 
615 	mlxsw_sp_qdisc->stats_base.tx_packets = tx_packets;
616 	mlxsw_sp_qdisc->stats_base.tx_bytes = tx_bytes;
617 	mlxsw_sp_qdisc->stats_base.drops = drops;
618 	mlxsw_sp_qdisc->stats_base.backlog = 0;
619 }
620 
621 static int
622 mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
623 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
624 {
625 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
626 	struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
627 
628 	if (root_qdisc != mlxsw_sp_qdisc)
629 		root_qdisc->stats_base.backlog -=
630 					mlxsw_sp_qdisc->stats_base.backlog;
631 
632 	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
633 					     MLXSW_REG_QEEC_HR_SUBGROUP,
634 					     mlxsw_sp_qdisc->tclass_num, 0,
635 					     MLXSW_REG_QEEC_MAS_DIS, 0);
636 }
637 
638 static int
639 mlxsw_sp_qdisc_tbf_bs(struct mlxsw_sp_port *mlxsw_sp_port,
640 		      u32 max_size, u8 *p_burst_size)
641 {
642 	/* TBF burst size is configured in bytes. The ASIC burst size value is
643 	 * ((2 ^ bs) * 512 bits. Convert the TBF bytes to 512-bit units.
644 	 */
645 	u32 bs512 = max_size / 64;
646 	u8 bs = fls(bs512);
647 
648 	if (!bs)
649 		return -EINVAL;
650 	--bs;
651 
652 	/* Demand a power of two. */
653 	if ((1 << bs) != bs512)
654 		return -EINVAL;
655 
656 	if (bs < mlxsw_sp_port->mlxsw_sp->lowest_shaper_bs ||
657 	    bs > MLXSW_REG_QEEC_HIGHEST_SHAPER_BS)
658 		return -EINVAL;
659 
660 	*p_burst_size = bs;
661 	return 0;
662 }
663 
664 static u32
665 mlxsw_sp_qdisc_tbf_max_size(u8 bs)
666 {
667 	return (1U << bs) * 64;
668 }
669 
670 static u64
671 mlxsw_sp_qdisc_tbf_rate_kbps(struct tc_tbf_qopt_offload_replace_params *p)
672 {
673 	/* TBF interface is in bytes/s, whereas Spectrum ASIC is configured in
674 	 * Kbits/s.
675 	 */
676 	return div_u64(p->rate.rate_bytes_ps, 1000) * 8;
677 }
678 
679 static int
680 mlxsw_sp_qdisc_tbf_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
681 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
682 				void *params)
683 {
684 	struct tc_tbf_qopt_offload_replace_params *p = params;
685 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
686 	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
687 	u8 burst_size;
688 	int err;
689 
690 	if (rate_kbps >= MLXSW_REG_QEEC_MAS_DIS) {
691 		dev_err(mlxsw_sp_port->mlxsw_sp->bus_info->dev,
692 			"spectrum: TBF: rate of %lluKbps must be below %u\n",
693 			rate_kbps, MLXSW_REG_QEEC_MAS_DIS);
694 		return -EINVAL;
695 	}
696 
697 	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
698 	if (err) {
699 		u8 highest_shaper_bs = MLXSW_REG_QEEC_HIGHEST_SHAPER_BS;
700 
701 		dev_err(mlxsw_sp->bus_info->dev,
702 			"spectrum: TBF: invalid burst size of %u, must be a power of two between %u and %u",
703 			p->max_size,
704 			mlxsw_sp_qdisc_tbf_max_size(mlxsw_sp->lowest_shaper_bs),
705 			mlxsw_sp_qdisc_tbf_max_size(highest_shaper_bs));
706 		return -EINVAL;
707 	}
708 
709 	return 0;
710 }
711 
712 static int
713 mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
714 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
715 			   void *params)
716 {
717 	struct tc_tbf_qopt_offload_replace_params *p = params;
718 	u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
719 	u8 burst_size;
720 	int err;
721 
722 	err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
723 	if (WARN_ON_ONCE(err))
724 		/* check_params above was supposed to reject this value. */
725 		return -EINVAL;
726 
727 	/* Configure subgroup shaper, so that both UC and MC traffic is subject
728 	 * to shaping. That is unlike RED, however UC queue lengths are going to
729 	 * be different than MC ones due to different pool and quota
730 	 * configurations, so the configuration is not applicable. For shaper on
731 	 * the other hand, subjecting the overall stream to the configured
732 	 * shaper makes sense. Also note that that is what we do for
733 	 * ieee_setmaxrate().
734 	 */
735 	return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
736 					     MLXSW_REG_QEEC_HR_SUBGROUP,
737 					     mlxsw_sp_qdisc->tclass_num, 0,
738 					     rate_kbps, burst_size);
739 }
740 
741 static void
742 mlxsw_sp_qdisc_tbf_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
743 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
744 			     void *params)
745 {
746 	struct tc_tbf_qopt_offload_replace_params *p = params;
747 
748 	mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats);
749 }
750 
751 static int
752 mlxsw_sp_qdisc_get_tbf_stats(struct mlxsw_sp_port *mlxsw_sp_port,
753 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
754 			     struct tc_qopt_offload_stats *stats_ptr)
755 {
756 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
757 				    stats_ptr);
758 	return 0;
759 }
760 
761 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
762 	.type = MLXSW_SP_QDISC_TBF,
763 	.check_params = mlxsw_sp_qdisc_tbf_check_params,
764 	.replace = mlxsw_sp_qdisc_tbf_replace,
765 	.unoffload = mlxsw_sp_qdisc_tbf_unoffload,
766 	.destroy = mlxsw_sp_qdisc_tbf_destroy,
767 	.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
768 	.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
769 };
770 
771 int mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
772 			  struct tc_tbf_qopt_offload *p)
773 {
774 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
775 
776 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
777 	if (!mlxsw_sp_qdisc)
778 		return -EOPNOTSUPP;
779 
780 	if (p->command == TC_TBF_REPLACE)
781 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
782 					      mlxsw_sp_qdisc,
783 					      &mlxsw_sp_qdisc_ops_tbf,
784 					      &p->replace_params);
785 
786 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
787 				    MLXSW_SP_QDISC_TBF))
788 		return -EOPNOTSUPP;
789 
790 	switch (p->command) {
791 	case TC_TBF_DESTROY:
792 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
793 	case TC_TBF_STATS:
794 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
795 						&p->stats);
796 	default:
797 		return -EOPNOTSUPP;
798 	}
799 }
800 
801 static int
802 mlxsw_sp_qdisc_fifo_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
803 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
804 {
805 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
806 	struct mlxsw_sp_qdisc *root_qdisc = &qdisc_state->root_qdisc;
807 
808 	if (root_qdisc != mlxsw_sp_qdisc)
809 		root_qdisc->stats_base.backlog -=
810 					mlxsw_sp_qdisc->stats_base.backlog;
811 	return 0;
812 }
813 
814 static int
815 mlxsw_sp_qdisc_fifo_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
816 				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
817 				 void *params)
818 {
819 	return 0;
820 }
821 
822 static int
823 mlxsw_sp_qdisc_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
824 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
825 			    void *params)
826 {
827 	return 0;
828 }
829 
830 static int
831 mlxsw_sp_qdisc_get_fifo_stats(struct mlxsw_sp_port *mlxsw_sp_port,
832 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
833 			      struct tc_qopt_offload_stats *stats_ptr)
834 {
835 	mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
836 				    stats_ptr);
837 	return 0;
838 }
839 
840 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
841 	.type = MLXSW_SP_QDISC_FIFO,
842 	.check_params = mlxsw_sp_qdisc_fifo_check_params,
843 	.replace = mlxsw_sp_qdisc_fifo_replace,
844 	.destroy = mlxsw_sp_qdisc_fifo_destroy,
845 	.get_stats = mlxsw_sp_qdisc_get_fifo_stats,
846 	.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
847 };
848 
849 int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
850 			   struct tc_fifo_qopt_offload *p)
851 {
852 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
853 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
854 	int tclass, child_index;
855 	u32 parent_handle;
856 
857 	/* Invisible FIFOs are tracked in future_handle and future_fifos. Make
858 	 * sure that not more than one qdisc is created for a port at a time.
859 	 * RTNL is a simple proxy for that.
860 	 */
861 	ASSERT_RTNL();
862 
863 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
864 	if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
865 		parent_handle = TC_H_MAJ(p->parent);
866 		if (parent_handle != qdisc_state->future_handle) {
867 			/* This notifications is for a different Qdisc than
868 			 * previously. Wipe the future cache.
869 			 */
870 			memset(qdisc_state->future_fifos, 0,
871 			       sizeof(qdisc_state->future_fifos));
872 			qdisc_state->future_handle = parent_handle;
873 		}
874 
875 		child_index = TC_H_MIN(p->parent);
876 		tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
877 		if (tclass < IEEE_8021QAZ_MAX_TCS) {
878 			if (p->command == TC_FIFO_REPLACE)
879 				qdisc_state->future_fifos[tclass] = true;
880 			else if (p->command == TC_FIFO_DESTROY)
881 				qdisc_state->future_fifos[tclass] = false;
882 		}
883 	}
884 	if (!mlxsw_sp_qdisc)
885 		return -EOPNOTSUPP;
886 
887 	if (p->command == TC_FIFO_REPLACE) {
888 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
889 					      mlxsw_sp_qdisc,
890 					      &mlxsw_sp_qdisc_ops_fifo, NULL);
891 	}
892 
893 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
894 				    MLXSW_SP_QDISC_FIFO))
895 		return -EOPNOTSUPP;
896 
897 	switch (p->command) {
898 	case TC_FIFO_DESTROY:
899 		if (p->handle == mlxsw_sp_qdisc->handle)
900 			return mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
901 						      mlxsw_sp_qdisc);
902 		return 0;
903 	case TC_FIFO_STATS:
904 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
905 						&p->stats);
906 	case TC_FIFO_REPLACE: /* Handled above. */
907 		break;
908 	}
909 
910 	return -EOPNOTSUPP;
911 }
912 
913 static int
914 __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
915 {
916 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
917 	int i;
918 
919 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
920 		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
921 					  MLXSW_SP_PORT_DEFAULT_TCLASS);
922 		mlxsw_sp_port_ets_set(mlxsw_sp_port,
923 				      MLXSW_REG_QEEC_HR_SUBGROUP,
924 				      i, 0, false, 0);
925 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
926 				       &qdisc_state->tclass_qdiscs[i]);
927 		qdisc_state->tclass_qdiscs[i].prio_bitmap = 0;
928 	}
929 
930 	return 0;
931 }
932 
933 static int
934 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
935 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
936 {
937 	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
938 }
939 
940 static int
941 __mlxsw_sp_qdisc_ets_check_params(unsigned int nbands)
942 {
943 	if (nbands > IEEE_8021QAZ_MAX_TCS)
944 		return -EOPNOTSUPP;
945 
946 	return 0;
947 }
948 
949 static int
950 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
951 				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
952 				 void *params)
953 {
954 	struct tc_prio_qopt_offload_params *p = params;
955 
956 	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
957 }
958 
959 static int
960 __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
961 			     unsigned int nbands,
962 			     const unsigned int *quanta,
963 			     const unsigned int *weights,
964 			     const u8 *priomap)
965 {
966 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
967 	struct mlxsw_sp_qdisc *child_qdisc;
968 	int tclass, i, band, backlog;
969 	u8 old_priomap;
970 	int err;
971 
972 	for (band = 0; band < nbands; band++) {
973 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
974 		child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
975 		old_priomap = child_qdisc->prio_bitmap;
976 		child_qdisc->prio_bitmap = 0;
977 
978 		err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
979 					    MLXSW_REG_QEEC_HR_SUBGROUP,
980 					    tclass, 0, !!quanta[band],
981 					    weights[band]);
982 		if (err)
983 			return err;
984 
985 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
986 			if (priomap[i] == band) {
987 				child_qdisc->prio_bitmap |= BIT(i);
988 				if (BIT(i) & old_priomap)
989 					continue;
990 				err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
991 								i, tclass);
992 				if (err)
993 					return err;
994 			}
995 		}
996 		if (old_priomap != child_qdisc->prio_bitmap &&
997 		    child_qdisc->ops && child_qdisc->ops->clean_stats) {
998 			backlog = child_qdisc->stats_base.backlog;
999 			child_qdisc->ops->clean_stats(mlxsw_sp_port,
1000 						      child_qdisc);
1001 			child_qdisc->stats_base.backlog = backlog;
1002 		}
1003 
1004 		if (handle == qdisc_state->future_handle &&
1005 		    qdisc_state->future_fifos[tclass]) {
1006 			err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
1007 						     child_qdisc,
1008 						     &mlxsw_sp_qdisc_ops_fifo,
1009 						     NULL);
1010 			if (err)
1011 				return err;
1012 		}
1013 	}
1014 	for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
1015 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1016 		child_qdisc = &qdisc_state->tclass_qdiscs[tclass];
1017 		child_qdisc->prio_bitmap = 0;
1018 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
1019 		mlxsw_sp_port_ets_set(mlxsw_sp_port,
1020 				      MLXSW_REG_QEEC_HR_SUBGROUP,
1021 				      tclass, 0, false, 0);
1022 	}
1023 
1024 	qdisc_state->future_handle = TC_H_UNSPEC;
1025 	memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
1026 	return 0;
1027 }
1028 
1029 static int
1030 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1031 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1032 			    void *params)
1033 {
1034 	struct tc_prio_qopt_offload_params *p = params;
1035 	unsigned int zeroes[TCQ_ETS_MAX_BANDS] = {0};
1036 
1037 	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
1038 					    zeroes, zeroes, p->priomap);
1039 }
1040 
1041 static void
1042 __mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1043 			       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1044 			       struct gnet_stats_queue *qstats)
1045 {
1046 	u64 backlog;
1047 
1048 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
1049 				       mlxsw_sp_qdisc->stats_base.backlog);
1050 	qstats->backlog -= backlog;
1051 }
1052 
1053 static void
1054 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1055 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1056 			      void *params)
1057 {
1058 	struct tc_prio_qopt_offload_params *p = params;
1059 
1060 	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1061 				       p->qstats);
1062 }
1063 
1064 static int
1065 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1066 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1067 			      struct tc_qopt_offload_stats *stats_ptr)
1068 {
1069 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1070 	struct mlxsw_sp_qdisc *tc_qdisc;
1071 	u64 tx_packets = 0;
1072 	u64 tx_bytes = 0;
1073 	u64 backlog = 0;
1074 	u64 drops = 0;
1075 	int i;
1076 
1077 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1078 		tc_qdisc = &qdisc_state->tclass_qdiscs[i];
1079 		mlxsw_sp_qdisc_collect_tc_stats(mlxsw_sp_port, tc_qdisc,
1080 						&tx_bytes, &tx_packets,
1081 						&drops, &backlog);
1082 	}
1083 
1084 	mlxsw_sp_qdisc_update_stats(mlxsw_sp_port->mlxsw_sp, mlxsw_sp_qdisc,
1085 				    tx_bytes, tx_packets, drops, backlog,
1086 				    stats_ptr);
1087 	return 0;
1088 }
1089 
1090 static void
1091 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
1092 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1093 {
1094 	struct mlxsw_sp_qdisc_stats *stats_base;
1095 	struct mlxsw_sp_port_xstats *xstats;
1096 	struct rtnl_link_stats64 *stats;
1097 	int i;
1098 
1099 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
1100 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
1101 	stats_base = &mlxsw_sp_qdisc->stats_base;
1102 
1103 	stats_base->tx_packets = stats->tx_packets;
1104 	stats_base->tx_bytes = stats->tx_bytes;
1105 
1106 	stats_base->drops = 0;
1107 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1108 		stats_base->drops += mlxsw_sp_xstats_tail_drop(xstats, i);
1109 		stats_base->drops += xstats->wred_drop[i];
1110 	}
1111 
1112 	mlxsw_sp_qdisc->stats_base.backlog = 0;
1113 }
1114 
1115 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
1116 	.type = MLXSW_SP_QDISC_PRIO,
1117 	.check_params = mlxsw_sp_qdisc_prio_check_params,
1118 	.replace = mlxsw_sp_qdisc_prio_replace,
1119 	.unoffload = mlxsw_sp_qdisc_prio_unoffload,
1120 	.destroy = mlxsw_sp_qdisc_prio_destroy,
1121 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
1122 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1123 };
1124 
1125 static int
1126 mlxsw_sp_qdisc_ets_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
1127 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1128 				void *params)
1129 {
1130 	struct tc_ets_qopt_offload_replace_params *p = params;
1131 
1132 	return __mlxsw_sp_qdisc_ets_check_params(p->bands);
1133 }
1134 
1135 static int
1136 mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
1137 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1138 			   void *params)
1139 {
1140 	struct tc_ets_qopt_offload_replace_params *p = params;
1141 
1142 	return __mlxsw_sp_qdisc_ets_replace(mlxsw_sp_port, handle, p->bands,
1143 					    p->quanta, p->weights, p->priomap);
1144 }
1145 
1146 static void
1147 mlxsw_sp_qdisc_ets_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
1148 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1149 			     void *params)
1150 {
1151 	struct tc_ets_qopt_offload_replace_params *p = params;
1152 
1153 	__mlxsw_sp_qdisc_ets_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc,
1154 				       p->qstats);
1155 }
1156 
1157 static int
1158 mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
1159 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
1160 {
1161 	return __mlxsw_sp_qdisc_ets_destroy(mlxsw_sp_port);
1162 }
1163 
1164 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
1165 	.type = MLXSW_SP_QDISC_ETS,
1166 	.check_params = mlxsw_sp_qdisc_ets_check_params,
1167 	.replace = mlxsw_sp_qdisc_ets_replace,
1168 	.unoffload = mlxsw_sp_qdisc_ets_unoffload,
1169 	.destroy = mlxsw_sp_qdisc_ets_destroy,
1170 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
1171 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
1172 };
1173 
1174 /* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
1175  * graph is free of cycles). These operations do not change the parent handle
1176  * though, which means it can be incomplete (if there is more than one class
1177  * where the Qdisc in question is grafted) or outright wrong (if the Qdisc was
1178  * linked to a different class and then removed from the original class).
1179  *
1180  * E.g. consider this sequence of operations:
1181  *
1182  *  # tc qdisc add dev swp1 root handle 1: prio
1183  *  # tc qdisc add dev swp1 parent 1:3 handle 13: red limit 1000000 avpkt 10000
1184  *  RED: set bandwidth to 10Mbit
1185  *  # tc qdisc link dev swp1 handle 13: parent 1:2
1186  *
1187  * At this point, both 1:2 and 1:3 have the same RED Qdisc instance as their
1188  * child. But RED will still only claim that 1:3 is its parent. If it's removed
1189  * from that band, its only parent will be 1:2, but it will continue to claim
1190  * that it is in fact 1:3.
1191  *
1192  * The notification for child Qdisc replace (e.g. TC_RED_REPLACE) comes before
1193  * the notification for parent graft (e.g. TC_PRIO_GRAFT). We take the replace
1194  * notification to offload the child Qdisc, based on its parent handle, and use
1195  * the graft operation to validate that the class where the child is actually
1196  * grafted corresponds to the parent handle. If the two don't match, we
1197  * unoffload the child.
1198  */
1199 static int
1200 __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1201 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1202 			   u8 band, u32 child_handle)
1203 {
1204 	struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
1205 	int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
1206 	struct mlxsw_sp_qdisc *old_qdisc;
1207 
1208 	if (band < IEEE_8021QAZ_MAX_TCS &&
1209 	    qdisc_state->tclass_qdiscs[tclass_num].handle == child_handle)
1210 		return 0;
1211 
1212 	if (!child_handle) {
1213 		/* This is an invisible FIFO replacing the original Qdisc.
1214 		 * Ignore it--the original Qdisc's destroy will follow.
1215 		 */
1216 		return 0;
1217 	}
1218 
1219 	/* See if the grafted qdisc is already offloaded on any tclass. If so,
1220 	 * unoffload it.
1221 	 */
1222 	old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
1223 						  child_handle);
1224 	if (old_qdisc)
1225 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
1226 
1227 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
1228 			       &qdisc_state->tclass_qdiscs[tclass_num]);
1229 	return -EOPNOTSUPP;
1230 }
1231 
1232 static int
1233 mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
1234 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
1235 			  struct tc_prio_qopt_offload_graft_params *p)
1236 {
1237 	return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1238 					  p->band, p->child_handle);
1239 }
1240 
1241 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
1242 			   struct tc_prio_qopt_offload *p)
1243 {
1244 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1245 
1246 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1247 	if (!mlxsw_sp_qdisc)
1248 		return -EOPNOTSUPP;
1249 
1250 	if (p->command == TC_PRIO_REPLACE)
1251 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1252 					      mlxsw_sp_qdisc,
1253 					      &mlxsw_sp_qdisc_ops_prio,
1254 					      &p->replace_params);
1255 
1256 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
1257 				    MLXSW_SP_QDISC_PRIO))
1258 		return -EOPNOTSUPP;
1259 
1260 	switch (p->command) {
1261 	case TC_PRIO_DESTROY:
1262 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1263 	case TC_PRIO_STATS:
1264 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1265 						&p->stats);
1266 	case TC_PRIO_GRAFT:
1267 		return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1268 						 &p->graft_params);
1269 	default:
1270 		return -EOPNOTSUPP;
1271 	}
1272 }
1273 
1274 int mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
1275 			  struct tc_ets_qopt_offload *p)
1276 {
1277 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
1278 
1279 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
1280 	if (!mlxsw_sp_qdisc)
1281 		return -EOPNOTSUPP;
1282 
1283 	if (p->command == TC_ETS_REPLACE)
1284 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
1285 					      mlxsw_sp_qdisc,
1286 					      &mlxsw_sp_qdisc_ops_ets,
1287 					      &p->replace_params);
1288 
1289 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
1290 				    MLXSW_SP_QDISC_ETS))
1291 		return -EOPNOTSUPP;
1292 
1293 	switch (p->command) {
1294 	case TC_ETS_DESTROY:
1295 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
1296 	case TC_ETS_STATS:
1297 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
1298 						&p->stats);
1299 	case TC_ETS_GRAFT:
1300 		return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
1301 						  p->graft_params.band,
1302 						  p->graft_params.child_handle);
1303 	default:
1304 		return -EOPNOTSUPP;
1305 	}
1306 }
1307 
1308 struct mlxsw_sp_qevent_block {
1309 	struct list_head binding_list;
1310 	struct list_head mall_entry_list;
1311 	struct mlxsw_sp *mlxsw_sp;
1312 };
1313 
1314 struct mlxsw_sp_qevent_binding {
1315 	struct list_head list;
1316 	struct mlxsw_sp_port *mlxsw_sp_port;
1317 	u32 handle;
1318 	int tclass_num;
1319 	enum mlxsw_sp_span_trigger span_trigger;
1320 };
1321 
1322 static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
1323 
1324 static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
1325 					  struct mlxsw_sp_mall_entry *mall_entry,
1326 					  struct mlxsw_sp_qevent_binding *qevent_binding,
1327 					  const struct mlxsw_sp_span_agent_parms *agent_parms,
1328 					  int *p_span_id)
1329 {
1330 	struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1331 	struct mlxsw_sp_span_trigger_parms trigger_parms = {};
1332 	int span_id;
1333 	int err;
1334 
1335 	err = mlxsw_sp_span_agent_get(mlxsw_sp, &span_id, agent_parms);
1336 	if (err)
1337 		return err;
1338 
1339 	err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
1340 	if (err)
1341 		goto err_analyzed_port_get;
1342 
1343 	trigger_parms.span_id = span_id;
1344 	trigger_parms.probability_rate = 1;
1345 	err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1346 				       &trigger_parms);
1347 	if (err)
1348 		goto err_agent_bind;
1349 
1350 	err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
1351 					   qevent_binding->tclass_num);
1352 	if (err)
1353 		goto err_trigger_enable;
1354 
1355 	*p_span_id = span_id;
1356 	return 0;
1357 
1358 err_trigger_enable:
1359 	mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1360 				   &trigger_parms);
1361 err_agent_bind:
1362 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1363 err_analyzed_port_get:
1364 	mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1365 	return err;
1366 }
1367 
1368 static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
1369 					     struct mlxsw_sp_qevent_binding *qevent_binding,
1370 					     int span_id)
1371 {
1372 	struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
1373 	struct mlxsw_sp_span_trigger_parms trigger_parms = {
1374 		.span_id = span_id,
1375 	};
1376 
1377 	mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
1378 				      qevent_binding->tclass_num);
1379 	mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
1380 				   &trigger_parms);
1381 	mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
1382 	mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
1383 }
1384 
1385 static int mlxsw_sp_qevent_mirror_configure(struct mlxsw_sp *mlxsw_sp,
1386 					    struct mlxsw_sp_mall_entry *mall_entry,
1387 					    struct mlxsw_sp_qevent_binding *qevent_binding)
1388 {
1389 	struct mlxsw_sp_span_agent_parms agent_parms = {
1390 		.to_dev = mall_entry->mirror.to_dev,
1391 	};
1392 
1393 	return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1394 					      &agent_parms, &mall_entry->mirror.span_id);
1395 }
1396 
1397 static void mlxsw_sp_qevent_mirror_deconfigure(struct mlxsw_sp *mlxsw_sp,
1398 					       struct mlxsw_sp_mall_entry *mall_entry,
1399 					       struct mlxsw_sp_qevent_binding *qevent_binding)
1400 {
1401 	mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->mirror.span_id);
1402 }
1403 
1404 static int mlxsw_sp_qevent_trap_configure(struct mlxsw_sp *mlxsw_sp,
1405 					  struct mlxsw_sp_mall_entry *mall_entry,
1406 					  struct mlxsw_sp_qevent_binding *qevent_binding)
1407 {
1408 	struct mlxsw_sp_span_agent_parms agent_parms = {
1409 		.session_id = MLXSW_SP_SPAN_SESSION_ID_BUFFER,
1410 	};
1411 	int err;
1412 
1413 	err = mlxsw_sp_trap_group_policer_hw_id_get(mlxsw_sp,
1414 						    DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
1415 						    &agent_parms.policer_enable,
1416 						    &agent_parms.policer_id);
1417 	if (err)
1418 		return err;
1419 
1420 	return mlxsw_sp_qevent_span_configure(mlxsw_sp, mall_entry, qevent_binding,
1421 					      &agent_parms, &mall_entry->trap.span_id);
1422 }
1423 
1424 static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
1425 					     struct mlxsw_sp_mall_entry *mall_entry,
1426 					     struct mlxsw_sp_qevent_binding *qevent_binding)
1427 {
1428 	mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
1429 }
1430 
1431 static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
1432 					   struct mlxsw_sp_mall_entry *mall_entry,
1433 					   struct mlxsw_sp_qevent_binding *qevent_binding)
1434 {
1435 	switch (mall_entry->type) {
1436 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1437 		return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
1438 	case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1439 		return mlxsw_sp_qevent_trap_configure(mlxsw_sp, mall_entry, qevent_binding);
1440 	default:
1441 		/* This should have been validated away. */
1442 		WARN_ON(1);
1443 		return -EOPNOTSUPP;
1444 	}
1445 }
1446 
1447 static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
1448 					      struct mlxsw_sp_mall_entry *mall_entry,
1449 					      struct mlxsw_sp_qevent_binding *qevent_binding)
1450 {
1451 	switch (mall_entry->type) {
1452 	case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
1453 		return mlxsw_sp_qevent_mirror_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1454 	case MLXSW_SP_MALL_ACTION_TYPE_TRAP:
1455 		return mlxsw_sp_qevent_trap_deconfigure(mlxsw_sp, mall_entry, qevent_binding);
1456 	default:
1457 		WARN_ON(1);
1458 		return;
1459 	}
1460 }
1461 
1462 static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
1463 					     struct mlxsw_sp_qevent_binding *qevent_binding)
1464 {
1465 	struct mlxsw_sp_mall_entry *mall_entry;
1466 	int err;
1467 
1468 	list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
1469 		err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
1470 						      qevent_binding);
1471 		if (err)
1472 			goto err_entry_configure;
1473 	}
1474 
1475 	return 0;
1476 
1477 err_entry_configure:
1478 	list_for_each_entry_continue_reverse(mall_entry, &qevent_block->mall_entry_list, list)
1479 		mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1480 						  qevent_binding);
1481 	return err;
1482 }
1483 
1484 static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qevent_block,
1485 						struct mlxsw_sp_qevent_binding *qevent_binding)
1486 {
1487 	struct mlxsw_sp_mall_entry *mall_entry;
1488 
1489 	list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list)
1490 		mlxsw_sp_qevent_entry_deconfigure(qevent_block->mlxsw_sp, mall_entry,
1491 						  qevent_binding);
1492 }
1493 
1494 static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
1495 {
1496 	struct mlxsw_sp_qevent_binding *qevent_binding;
1497 	int err;
1498 
1499 	list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
1500 		err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1501 		if (err)
1502 			goto err_binding_configure;
1503 	}
1504 
1505 	return 0;
1506 
1507 err_binding_configure:
1508 	list_for_each_entry_continue_reverse(qevent_binding, &qevent_block->binding_list, list)
1509 		mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1510 	return err;
1511 }
1512 
1513 static void mlxsw_sp_qevent_block_deconfigure(struct mlxsw_sp_qevent_block *qevent_block)
1514 {
1515 	struct mlxsw_sp_qevent_binding *qevent_binding;
1516 
1517 	list_for_each_entry(qevent_binding, &qevent_block->binding_list, list)
1518 		mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1519 }
1520 
1521 static struct mlxsw_sp_mall_entry *
1522 mlxsw_sp_qevent_mall_entry_find(struct mlxsw_sp_qevent_block *block, unsigned long cookie)
1523 {
1524 	struct mlxsw_sp_mall_entry *mall_entry;
1525 
1526 	list_for_each_entry(mall_entry, &block->mall_entry_list, list)
1527 		if (mall_entry->cookie == cookie)
1528 			return mall_entry;
1529 
1530 	return NULL;
1531 }
1532 
1533 static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
1534 					struct mlxsw_sp_qevent_block *qevent_block,
1535 					struct tc_cls_matchall_offload *f)
1536 {
1537 	struct mlxsw_sp_mall_entry *mall_entry;
1538 	struct flow_action_entry *act;
1539 	int err;
1540 
1541 	/* It should not currently be possible to replace a matchall rule. So
1542 	 * this must be a new rule.
1543 	 */
1544 	if (!list_empty(&qevent_block->mall_entry_list)) {
1545 		NL_SET_ERR_MSG(f->common.extack, "At most one filter supported");
1546 		return -EOPNOTSUPP;
1547 	}
1548 	if (f->rule->action.num_entries != 1) {
1549 		NL_SET_ERR_MSG(f->common.extack, "Only singular actions supported");
1550 		return -EOPNOTSUPP;
1551 	}
1552 	if (f->common.chain_index) {
1553 		NL_SET_ERR_MSG(f->common.extack, "Only chain 0 is supported");
1554 		return -EOPNOTSUPP;
1555 	}
1556 	if (f->common.protocol != htons(ETH_P_ALL)) {
1557 		NL_SET_ERR_MSG(f->common.extack, "Protocol matching not supported");
1558 		return -EOPNOTSUPP;
1559 	}
1560 
1561 	act = &f->rule->action.entries[0];
1562 	if (!(act->hw_stats & FLOW_ACTION_HW_STATS_DISABLED)) {
1563 		NL_SET_ERR_MSG(f->common.extack, "HW counters not supported on qevents");
1564 		return -EOPNOTSUPP;
1565 	}
1566 
1567 	mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
1568 	if (!mall_entry)
1569 		return -ENOMEM;
1570 	mall_entry->cookie = f->cookie;
1571 
1572 	if (act->id == FLOW_ACTION_MIRRED) {
1573 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_MIRROR;
1574 		mall_entry->mirror.to_dev = act->dev;
1575 	} else if (act->id == FLOW_ACTION_TRAP) {
1576 		mall_entry->type = MLXSW_SP_MALL_ACTION_TYPE_TRAP;
1577 	} else {
1578 		NL_SET_ERR_MSG(f->common.extack, "Unsupported action");
1579 		err = -EOPNOTSUPP;
1580 		goto err_unsupported_action;
1581 	}
1582 
1583 	list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
1584 
1585 	err = mlxsw_sp_qevent_block_configure(qevent_block);
1586 	if (err)
1587 		goto err_block_configure;
1588 
1589 	return 0;
1590 
1591 err_block_configure:
1592 	list_del(&mall_entry->list);
1593 err_unsupported_action:
1594 	kfree(mall_entry);
1595 	return err;
1596 }
1597 
1598 static void mlxsw_sp_qevent_mall_destroy(struct mlxsw_sp_qevent_block *qevent_block,
1599 					 struct tc_cls_matchall_offload *f)
1600 {
1601 	struct mlxsw_sp_mall_entry *mall_entry;
1602 
1603 	mall_entry = mlxsw_sp_qevent_mall_entry_find(qevent_block, f->cookie);
1604 	if (!mall_entry)
1605 		return;
1606 
1607 	mlxsw_sp_qevent_block_deconfigure(qevent_block);
1608 
1609 	list_del(&mall_entry->list);
1610 	kfree(mall_entry);
1611 }
1612 
1613 static int mlxsw_sp_qevent_block_mall_cb(struct mlxsw_sp_qevent_block *qevent_block,
1614 					 struct tc_cls_matchall_offload *f)
1615 {
1616 	struct mlxsw_sp *mlxsw_sp = qevent_block->mlxsw_sp;
1617 
1618 	switch (f->command) {
1619 	case TC_CLSMATCHALL_REPLACE:
1620 		return mlxsw_sp_qevent_mall_replace(mlxsw_sp, qevent_block, f);
1621 	case TC_CLSMATCHALL_DESTROY:
1622 		mlxsw_sp_qevent_mall_destroy(qevent_block, f);
1623 		return 0;
1624 	default:
1625 		return -EOPNOTSUPP;
1626 	}
1627 }
1628 
1629 static int mlxsw_sp_qevent_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1630 {
1631 	struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1632 
1633 	switch (type) {
1634 	case TC_SETUP_CLSMATCHALL:
1635 		return mlxsw_sp_qevent_block_mall_cb(qevent_block, type_data);
1636 	default:
1637 		return -EOPNOTSUPP;
1638 	}
1639 }
1640 
1641 static struct mlxsw_sp_qevent_block *mlxsw_sp_qevent_block_create(struct mlxsw_sp *mlxsw_sp,
1642 								  struct net *net)
1643 {
1644 	struct mlxsw_sp_qevent_block *qevent_block;
1645 
1646 	qevent_block = kzalloc(sizeof(*qevent_block), GFP_KERNEL);
1647 	if (!qevent_block)
1648 		return NULL;
1649 
1650 	INIT_LIST_HEAD(&qevent_block->binding_list);
1651 	INIT_LIST_HEAD(&qevent_block->mall_entry_list);
1652 	qevent_block->mlxsw_sp = mlxsw_sp;
1653 	return qevent_block;
1654 }
1655 
1656 static void
1657 mlxsw_sp_qevent_block_destroy(struct mlxsw_sp_qevent_block *qevent_block)
1658 {
1659 	WARN_ON(!list_empty(&qevent_block->binding_list));
1660 	WARN_ON(!list_empty(&qevent_block->mall_entry_list));
1661 	kfree(qevent_block);
1662 }
1663 
1664 static void mlxsw_sp_qevent_block_release(void *cb_priv)
1665 {
1666 	struct mlxsw_sp_qevent_block *qevent_block = cb_priv;
1667 
1668 	mlxsw_sp_qevent_block_destroy(qevent_block);
1669 }
1670 
1671 static struct mlxsw_sp_qevent_binding *
1672 mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
1673 			       enum mlxsw_sp_span_trigger span_trigger)
1674 {
1675 	struct mlxsw_sp_qevent_binding *binding;
1676 
1677 	binding = kzalloc(sizeof(*binding), GFP_KERNEL);
1678 	if (!binding)
1679 		return ERR_PTR(-ENOMEM);
1680 
1681 	binding->mlxsw_sp_port = mlxsw_sp_port;
1682 	binding->handle = handle;
1683 	binding->tclass_num = tclass_num;
1684 	binding->span_trigger = span_trigger;
1685 	return binding;
1686 }
1687 
1688 static void
1689 mlxsw_sp_qevent_binding_destroy(struct mlxsw_sp_qevent_binding *binding)
1690 {
1691 	kfree(binding);
1692 }
1693 
1694 static struct mlxsw_sp_qevent_binding *
1695 mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
1696 			       struct mlxsw_sp_port *mlxsw_sp_port,
1697 			       u32 handle,
1698 			       enum mlxsw_sp_span_trigger span_trigger)
1699 {
1700 	struct mlxsw_sp_qevent_binding *qevent_binding;
1701 
1702 	list_for_each_entry(qevent_binding, &block->binding_list, list)
1703 		if (qevent_binding->mlxsw_sp_port == mlxsw_sp_port &&
1704 		    qevent_binding->handle == handle &&
1705 		    qevent_binding->span_trigger == span_trigger)
1706 			return qevent_binding;
1707 	return NULL;
1708 }
1709 
1710 static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
1711 					       struct flow_block_offload *f,
1712 					       enum mlxsw_sp_span_trigger span_trigger)
1713 {
1714 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1715 	struct mlxsw_sp_qevent_binding *qevent_binding;
1716 	struct mlxsw_sp_qevent_block *qevent_block;
1717 	struct flow_block_cb *block_cb;
1718 	struct mlxsw_sp_qdisc *qdisc;
1719 	bool register_block = false;
1720 	int err;
1721 
1722 	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1723 	if (!block_cb) {
1724 		qevent_block = mlxsw_sp_qevent_block_create(mlxsw_sp, f->net);
1725 		if (!qevent_block)
1726 			return -ENOMEM;
1727 		block_cb = flow_block_cb_alloc(mlxsw_sp_qevent_block_cb, mlxsw_sp, qevent_block,
1728 					       mlxsw_sp_qevent_block_release);
1729 		if (IS_ERR(block_cb)) {
1730 			mlxsw_sp_qevent_block_destroy(qevent_block);
1731 			return PTR_ERR(block_cb);
1732 		}
1733 		register_block = true;
1734 	} else {
1735 		qevent_block = flow_block_cb_priv(block_cb);
1736 	}
1737 	flow_block_cb_incref(block_cb);
1738 
1739 	qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port, f->sch->handle);
1740 	if (!qdisc) {
1741 		NL_SET_ERR_MSG(f->extack, "Qdisc not offloaded");
1742 		err = -ENOENT;
1743 		goto err_find_qdisc;
1744 	}
1745 
1746 	if (WARN_ON(mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1747 						   span_trigger))) {
1748 		err = -EEXIST;
1749 		goto err_binding_exists;
1750 	}
1751 
1752 	qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
1753 							qdisc->tclass_num, span_trigger);
1754 	if (IS_ERR(qevent_binding)) {
1755 		err = PTR_ERR(qevent_binding);
1756 		goto err_binding_create;
1757 	}
1758 
1759 	err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
1760 	if (err)
1761 		goto err_binding_configure;
1762 
1763 	list_add(&qevent_binding->list, &qevent_block->binding_list);
1764 
1765 	if (register_block) {
1766 		flow_block_cb_add(block_cb, f);
1767 		list_add_tail(&block_cb->driver_list, &mlxsw_sp_qevent_block_cb_list);
1768 	}
1769 
1770 	return 0;
1771 
1772 err_binding_configure:
1773 	mlxsw_sp_qevent_binding_destroy(qevent_binding);
1774 err_binding_create:
1775 err_binding_exists:
1776 err_find_qdisc:
1777 	if (!flow_block_cb_decref(block_cb))
1778 		flow_block_cb_free(block_cb);
1779 	return err;
1780 }
1781 
1782 static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp_port,
1783 						  struct flow_block_offload *f,
1784 						  enum mlxsw_sp_span_trigger span_trigger)
1785 {
1786 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1787 	struct mlxsw_sp_qevent_binding *qevent_binding;
1788 	struct mlxsw_sp_qevent_block *qevent_block;
1789 	struct flow_block_cb *block_cb;
1790 
1791 	block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
1792 	if (!block_cb)
1793 		return;
1794 	qevent_block = flow_block_cb_priv(block_cb);
1795 
1796 	qevent_binding = mlxsw_sp_qevent_binding_lookup(qevent_block, mlxsw_sp_port, f->sch->handle,
1797 							span_trigger);
1798 	if (!qevent_binding)
1799 		return;
1800 
1801 	list_del(&qevent_binding->list);
1802 	mlxsw_sp_qevent_binding_deconfigure(qevent_block, qevent_binding);
1803 	mlxsw_sp_qevent_binding_destroy(qevent_binding);
1804 
1805 	if (!flow_block_cb_decref(block_cb)) {
1806 		flow_block_cb_remove(block_cb, f);
1807 		list_del(&block_cb->driver_list);
1808 	}
1809 }
1810 
1811 static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
1812 					  struct flow_block_offload *f,
1813 					  enum mlxsw_sp_span_trigger span_trigger)
1814 {
1815 	f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
1816 
1817 	switch (f->command) {
1818 	case FLOW_BLOCK_BIND:
1819 		return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
1820 	case FLOW_BLOCK_UNBIND:
1821 		mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
1822 		return 0;
1823 	default:
1824 		return -EOPNOTSUPP;
1825 	}
1826 }
1827 
1828 int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
1829 					      struct flow_block_offload *f)
1830 {
1831 	return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
1832 }
1833 
1834 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
1835 {
1836 	struct mlxsw_sp_qdisc_state *qdisc_state;
1837 	int i;
1838 
1839 	qdisc_state = kzalloc(sizeof(*qdisc_state), GFP_KERNEL);
1840 	if (!qdisc_state)
1841 		return -ENOMEM;
1842 
1843 	qdisc_state->root_qdisc.prio_bitmap = 0xff;
1844 	qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
1845 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
1846 		qdisc_state->tclass_qdiscs[i].tclass_num = i;
1847 
1848 	mlxsw_sp_port->qdisc = qdisc_state;
1849 	return 0;
1850 }
1851 
1852 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1853 {
1854 	kfree(mlxsw_sp_port->qdisc);
1855 }
1856