1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3 
4 #include <linux/kernel.h>
5 #include <linux/errno.h>
6 #include <linux/netdevice.h>
7 #include <net/pkt_cls.h>
8 #include <net/red.h>
9 
10 #include "spectrum.h"
11 #include "reg.h"
12 
13 #define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
14 #define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
15 	MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
16 
17 enum mlxsw_sp_qdisc_type {
18 	MLXSW_SP_QDISC_NO_QDISC,
19 	MLXSW_SP_QDISC_RED,
20 	MLXSW_SP_QDISC_PRIO,
21 };
22 
23 struct mlxsw_sp_qdisc_ops {
24 	enum mlxsw_sp_qdisc_type type;
25 	int (*check_params)(struct mlxsw_sp_port *mlxsw_sp_port,
26 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
27 			    void *params);
28 	int (*replace)(struct mlxsw_sp_port *mlxsw_sp_port,
29 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
30 	int (*destroy)(struct mlxsw_sp_port *mlxsw_sp_port,
31 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
32 	int (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
33 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
34 			 struct tc_qopt_offload_stats *stats_ptr);
35 	int (*get_xstats)(struct mlxsw_sp_port *mlxsw_sp_port,
36 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
37 			  void *xstats_ptr);
38 	void (*clean_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
39 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc);
40 	/* unoffload - to be used for a qdisc that stops being offloaded without
41 	 * being destroyed.
42 	 */
43 	void (*unoffload)(struct mlxsw_sp_port *mlxsw_sp_port,
44 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, void *params);
45 };
46 
47 struct mlxsw_sp_qdisc {
48 	u32 handle;
49 	u8 tclass_num;
50 	u8 prio_bitmap;
51 	union {
52 		struct red_stats red;
53 	} xstats_base;
54 	struct mlxsw_sp_qdisc_stats {
55 		u64 tx_bytes;
56 		u64 tx_packets;
57 		u64 drops;
58 		u64 overlimits;
59 		u64 backlog;
60 	} stats_base;
61 
62 	struct mlxsw_sp_qdisc_ops *ops;
63 };
64 
65 static bool
66 mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
67 		       enum mlxsw_sp_qdisc_type type)
68 {
69 	return mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
70 	       mlxsw_sp_qdisc->ops->type == type &&
71 	       mlxsw_sp_qdisc->handle == handle;
72 }
73 
74 static struct mlxsw_sp_qdisc *
75 mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
76 		    bool root_only)
77 {
78 	int tclass, child_index;
79 
80 	if (parent == TC_H_ROOT)
81 		return mlxsw_sp_port->root_qdisc;
82 
83 	if (root_only || !mlxsw_sp_port->root_qdisc ||
84 	    !mlxsw_sp_port->root_qdisc->ops ||
85 	    TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
86 	    TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
87 		return NULL;
88 
89 	child_index = TC_H_MIN(parent);
90 	tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
91 	return &mlxsw_sp_port->tclass_qdiscs[tclass];
92 }
93 
94 static struct mlxsw_sp_qdisc *
95 mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
96 {
97 	int i;
98 
99 	if (mlxsw_sp_port->root_qdisc->handle == handle)
100 		return mlxsw_sp_port->root_qdisc;
101 
102 	if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
103 		return NULL;
104 
105 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
106 		if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
107 			return &mlxsw_sp_port->tclass_qdiscs[i];
108 
109 	return NULL;
110 }
111 
112 static int
113 mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
114 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
115 {
116 	int err = 0;
117 
118 	if (!mlxsw_sp_qdisc)
119 		return 0;
120 
121 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->destroy)
122 		err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
123 						   mlxsw_sp_qdisc);
124 
125 	mlxsw_sp_qdisc->handle = TC_H_UNSPEC;
126 	mlxsw_sp_qdisc->ops = NULL;
127 	return err;
128 }
129 
130 static int
131 mlxsw_sp_qdisc_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
132 		       struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
133 		       struct mlxsw_sp_qdisc_ops *ops, void *params)
134 {
135 	int err;
136 
137 	if (mlxsw_sp_qdisc->ops && mlxsw_sp_qdisc->ops->type != ops->type)
138 		/* In case this location contained a different qdisc of the
139 		 * same type we can override the old qdisc configuration.
140 		 * Otherwise, we need to remove the old qdisc before setting the
141 		 * new one.
142 		 */
143 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
144 	err = ops->check_params(mlxsw_sp_port, mlxsw_sp_qdisc, params);
145 	if (err)
146 		goto err_bad_param;
147 
148 	err = ops->replace(mlxsw_sp_port, mlxsw_sp_qdisc, params);
149 	if (err)
150 		goto err_config;
151 
152 	if (mlxsw_sp_qdisc->handle != handle) {
153 		mlxsw_sp_qdisc->ops = ops;
154 		if (ops->clean_stats)
155 			ops->clean_stats(mlxsw_sp_port, mlxsw_sp_qdisc);
156 	}
157 
158 	mlxsw_sp_qdisc->handle = handle;
159 	return 0;
160 
161 err_bad_param:
162 err_config:
163 	if (mlxsw_sp_qdisc->handle == handle && ops->unoffload)
164 		ops->unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, params);
165 
166 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
167 	return err;
168 }
169 
170 static int
171 mlxsw_sp_qdisc_get_stats(struct mlxsw_sp_port *mlxsw_sp_port,
172 			 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
173 			 struct tc_qopt_offload_stats *stats_ptr)
174 {
175 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
176 	    mlxsw_sp_qdisc->ops->get_stats)
177 		return mlxsw_sp_qdisc->ops->get_stats(mlxsw_sp_port,
178 						      mlxsw_sp_qdisc,
179 						      stats_ptr);
180 
181 	return -EOPNOTSUPP;
182 }
183 
184 static int
185 mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
186 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
187 			  void *xstats_ptr)
188 {
189 	if (mlxsw_sp_qdisc && mlxsw_sp_qdisc->ops &&
190 	    mlxsw_sp_qdisc->ops->get_xstats)
191 		return mlxsw_sp_qdisc->ops->get_xstats(mlxsw_sp_port,
192 						      mlxsw_sp_qdisc,
193 						      xstats_ptr);
194 
195 	return -EOPNOTSUPP;
196 }
197 
198 static void
199 mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
200 				       u8 prio_bitmap, u64 *tx_packets,
201 				       u64 *tx_bytes)
202 {
203 	int i;
204 
205 	*tx_packets = 0;
206 	*tx_bytes = 0;
207 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
208 		if (prio_bitmap & BIT(i)) {
209 			*tx_packets += xstats->tx_packets[i];
210 			*tx_bytes += xstats->tx_bytes[i];
211 		}
212 	}
213 }
214 
215 static int
216 mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
217 				  int tclass_num, u32 min, u32 max,
218 				  u32 probability, bool is_ecn)
219 {
220 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
221 	char cwtp_cmd[MLXSW_REG_CWTP_LEN];
222 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
223 	int err;
224 
225 	mlxsw_reg_cwtp_pack(cwtp_cmd, mlxsw_sp_port->local_port, tclass_num);
226 	mlxsw_reg_cwtp_profile_pack(cwtp_cmd, MLXSW_REG_CWTP_DEFAULT_PROFILE,
227 				    roundup(min, MLXSW_REG_CWTP_MIN_VALUE),
228 				    roundup(max, MLXSW_REG_CWTP_MIN_VALUE),
229 				    probability);
230 
231 	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtp), cwtp_cmd);
232 	if (err)
233 		return err;
234 
235 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
236 			     MLXSW_REG_CWTP_DEFAULT_PROFILE, true, is_ecn);
237 
238 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
239 }
240 
241 static int
242 mlxsw_sp_tclass_congestion_disable(struct mlxsw_sp_port *mlxsw_sp_port,
243 				   int tclass_num)
244 {
245 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
246 	char cwtpm_cmd[MLXSW_REG_CWTPM_LEN];
247 
248 	mlxsw_reg_cwtpm_pack(cwtpm_cmd, mlxsw_sp_port->local_port, tclass_num,
249 			     MLXSW_REG_CWTPM_RESET_PROFILE, false, false);
250 	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(cwtpm), cwtpm_cmd);
251 }
252 
253 static void
254 mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
255 					struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
256 {
257 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
258 	struct mlxsw_sp_qdisc_stats *stats_base;
259 	struct mlxsw_sp_port_xstats *xstats;
260 	struct red_stats *red_base;
261 
262 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
263 	stats_base = &mlxsw_sp_qdisc->stats_base;
264 	red_base = &mlxsw_sp_qdisc->xstats_base.red;
265 
266 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
267 					       mlxsw_sp_qdisc->prio_bitmap,
268 					       &stats_base->tx_packets,
269 					       &stats_base->tx_bytes);
270 	red_base->prob_mark = xstats->ecn;
271 	red_base->prob_drop = xstats->wred_drop[tclass_num];
272 	red_base->pdrop = xstats->tail_drop[tclass_num];
273 
274 	stats_base->overlimits = red_base->prob_drop + red_base->prob_mark;
275 	stats_base->drops = red_base->prob_drop + red_base->pdrop;
276 
277 	stats_base->backlog = 0;
278 }
279 
280 static int
281 mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
282 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
283 {
284 	struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
285 
286 	if (root_qdisc != mlxsw_sp_qdisc)
287 		root_qdisc->stats_base.backlog -=
288 					mlxsw_sp_qdisc->stats_base.backlog;
289 
290 	return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
291 						  mlxsw_sp_qdisc->tclass_num);
292 }
293 
294 static int
295 mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
296 				struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
297 				void *params)
298 {
299 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
300 	struct tc_red_qopt_offload_params *p = params;
301 
302 	if (p->min > p->max) {
303 		dev_err(mlxsw_sp->bus_info->dev,
304 			"spectrum: RED: min %u is bigger then max %u\n", p->min,
305 			p->max);
306 		return -EINVAL;
307 	}
308 	if (p->max > MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_BUFFER_SIZE)) {
309 		dev_err(mlxsw_sp->bus_info->dev,
310 			"spectrum: RED: max value %u is too big\n", p->max);
311 		return -EINVAL;
312 	}
313 	if (p->min == 0 || p->max == 0) {
314 		dev_err(mlxsw_sp->bus_info->dev,
315 			"spectrum: RED: 0 value is illegal for min and max\n");
316 		return -EINVAL;
317 	}
318 	return 0;
319 }
320 
321 static int
322 mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port,
323 			   struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
324 			   void *params)
325 {
326 	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
327 	struct tc_red_qopt_offload_params *p = params;
328 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
329 	u32 min, max;
330 	u64 prob;
331 
332 	/* calculate probability in percentage */
333 	prob = p->probability;
334 	prob *= 100;
335 	prob = DIV_ROUND_UP(prob, 1 << 16);
336 	prob = DIV_ROUND_UP(prob, 1 << 16);
337 	min = mlxsw_sp_bytes_cells(mlxsw_sp, p->min);
338 	max = mlxsw_sp_bytes_cells(mlxsw_sp, p->max);
339 	return mlxsw_sp_tclass_congestion_enable(mlxsw_sp_port, tclass_num, min,
340 						 max, prob, p->is_ecn);
341 }
342 
343 static void
344 mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
345 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
346 			     void *params)
347 {
348 	struct tc_red_qopt_offload_params *p = params;
349 	u64 backlog;
350 
351 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
352 				       mlxsw_sp_qdisc->stats_base.backlog);
353 	p->qstats->backlog -= backlog;
354 	mlxsw_sp_qdisc->stats_base.backlog = 0;
355 }
356 
357 static int
358 mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
359 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
360 			      void *xstats_ptr)
361 {
362 	struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
363 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
364 	struct mlxsw_sp_port_xstats *xstats;
365 	struct red_stats *res = xstats_ptr;
366 	int early_drops, marks, pdrops;
367 
368 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
369 
370 	early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
371 	marks = xstats->ecn - xstats_base->prob_mark;
372 	pdrops = xstats->tail_drop[tclass_num] - xstats_base->pdrop;
373 
374 	res->pdrop += pdrops;
375 	res->prob_drop += early_drops;
376 	res->prob_mark += marks;
377 
378 	xstats_base->pdrop += pdrops;
379 	xstats_base->prob_drop += early_drops;
380 	xstats_base->prob_mark += marks;
381 	return 0;
382 }
383 
384 static int
385 mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
386 			     struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
387 			     struct tc_qopt_offload_stats *stats_ptr)
388 {
389 	u64 tx_bytes, tx_packets, overlimits, drops, backlog;
390 	u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
391 	struct mlxsw_sp_qdisc_stats *stats_base;
392 	struct mlxsw_sp_port_xstats *xstats;
393 
394 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
395 	stats_base = &mlxsw_sp_qdisc->stats_base;
396 
397 	mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
398 					       mlxsw_sp_qdisc->prio_bitmap,
399 					       &tx_packets, &tx_bytes);
400 	tx_bytes = tx_bytes - stats_base->tx_bytes;
401 	tx_packets = tx_packets - stats_base->tx_packets;
402 
403 	overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
404 		     stats_base->overlimits;
405 	drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
406 		stats_base->drops;
407 	backlog = xstats->backlog[tclass_num];
408 
409 	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
410 	stats_ptr->qstats->overlimits += overlimits;
411 	stats_ptr->qstats->drops += drops;
412 	stats_ptr->qstats->backlog +=
413 				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
414 						     backlog) -
415 				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
416 						     stats_base->backlog);
417 
418 	stats_base->backlog = backlog;
419 	stats_base->drops +=  drops;
420 	stats_base->overlimits += overlimits;
421 	stats_base->tx_bytes += tx_bytes;
422 	stats_base->tx_packets += tx_packets;
423 	return 0;
424 }
425 
426 #define MLXSW_SP_PORT_DEFAULT_TCLASS 0
427 
428 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
429 	.type = MLXSW_SP_QDISC_RED,
430 	.check_params = mlxsw_sp_qdisc_red_check_params,
431 	.replace = mlxsw_sp_qdisc_red_replace,
432 	.unoffload = mlxsw_sp_qdisc_red_unoffload,
433 	.destroy = mlxsw_sp_qdisc_red_destroy,
434 	.get_stats = mlxsw_sp_qdisc_get_red_stats,
435 	.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
436 	.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
437 };
438 
439 int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
440 			  struct tc_red_qopt_offload *p)
441 {
442 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
443 
444 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
445 	if (!mlxsw_sp_qdisc)
446 		return -EOPNOTSUPP;
447 
448 	if (p->command == TC_RED_REPLACE)
449 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
450 					      mlxsw_sp_qdisc,
451 					      &mlxsw_sp_qdisc_ops_red,
452 					      &p->set);
453 
454 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
455 				    MLXSW_SP_QDISC_RED))
456 		return -EOPNOTSUPP;
457 
458 	switch (p->command) {
459 	case TC_RED_DESTROY:
460 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
461 	case TC_RED_XSTATS:
462 		return mlxsw_sp_qdisc_get_xstats(mlxsw_sp_port, mlxsw_sp_qdisc,
463 						 p->xstats);
464 	case TC_RED_STATS:
465 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
466 						&p->stats);
467 	default:
468 		return -EOPNOTSUPP;
469 	}
470 }
471 
472 static int
473 mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
474 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
475 {
476 	int i;
477 
478 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
479 		mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
480 					  MLXSW_SP_PORT_DEFAULT_TCLASS);
481 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
482 				       &mlxsw_sp_port->tclass_qdiscs[i]);
483 		mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
484 	}
485 
486 	return 0;
487 }
488 
489 static int
490 mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
491 				 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
492 				 void *params)
493 {
494 	struct tc_prio_qopt_offload_params *p = params;
495 
496 	if (p->bands > IEEE_8021QAZ_MAX_TCS)
497 		return -EOPNOTSUPP;
498 
499 	return 0;
500 }
501 
502 static int
503 mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
504 			    struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
505 			    void *params)
506 {
507 	struct tc_prio_qopt_offload_params *p = params;
508 	struct mlxsw_sp_qdisc *child_qdisc;
509 	int tclass, i, band, backlog;
510 	u8 old_priomap;
511 	int err;
512 
513 	for (band = 0; band < p->bands; band++) {
514 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
515 		child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
516 		old_priomap = child_qdisc->prio_bitmap;
517 		child_qdisc->prio_bitmap = 0;
518 		for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
519 			if (p->priomap[i] == band) {
520 				child_qdisc->prio_bitmap |= BIT(i);
521 				if (BIT(i) & old_priomap)
522 					continue;
523 				err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
524 								i, tclass);
525 				if (err)
526 					return err;
527 			}
528 		}
529 		if (old_priomap != child_qdisc->prio_bitmap &&
530 		    child_qdisc->ops && child_qdisc->ops->clean_stats) {
531 			backlog = child_qdisc->stats_base.backlog;
532 			child_qdisc->ops->clean_stats(mlxsw_sp_port,
533 						      child_qdisc);
534 			child_qdisc->stats_base.backlog = backlog;
535 		}
536 	}
537 	for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
538 		tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
539 		child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
540 		child_qdisc->prio_bitmap = 0;
541 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
542 	}
543 	return 0;
544 }
545 
546 static void
547 mlxsw_sp_qdisc_prio_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
548 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
549 			      void *params)
550 {
551 	struct tc_prio_qopt_offload_params *p = params;
552 	u64 backlog;
553 
554 	backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
555 				       mlxsw_sp_qdisc->stats_base.backlog);
556 	p->qstats->backlog -= backlog;
557 }
558 
559 static int
560 mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
561 			      struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
562 			      struct tc_qopt_offload_stats *stats_ptr)
563 {
564 	u64 tx_bytes, tx_packets, drops = 0, backlog = 0;
565 	struct mlxsw_sp_qdisc_stats *stats_base;
566 	struct mlxsw_sp_port_xstats *xstats;
567 	struct rtnl_link_stats64 *stats;
568 	int i;
569 
570 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
571 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
572 	stats_base = &mlxsw_sp_qdisc->stats_base;
573 
574 	tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
575 	tx_packets = stats->tx_packets - stats_base->tx_packets;
576 
577 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
578 		drops += xstats->tail_drop[i];
579 		drops += xstats->wred_drop[i];
580 		backlog += xstats->backlog[i];
581 	}
582 	drops = drops - stats_base->drops;
583 
584 	_bstats_update(stats_ptr->bstats, tx_bytes, tx_packets);
585 	stats_ptr->qstats->drops += drops;
586 	stats_ptr->qstats->backlog +=
587 				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
588 						     backlog) -
589 				mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
590 						     stats_base->backlog);
591 	stats_base->backlog = backlog;
592 	stats_base->drops += drops;
593 	stats_base->tx_bytes += tx_bytes;
594 	stats_base->tx_packets += tx_packets;
595 	return 0;
596 }
597 
598 static void
599 mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
600 					 struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
601 {
602 	struct mlxsw_sp_qdisc_stats *stats_base;
603 	struct mlxsw_sp_port_xstats *xstats;
604 	struct rtnl_link_stats64 *stats;
605 	int i;
606 
607 	xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
608 	stats = &mlxsw_sp_port->periodic_hw_stats.stats;
609 	stats_base = &mlxsw_sp_qdisc->stats_base;
610 
611 	stats_base->tx_packets = stats->tx_packets;
612 	stats_base->tx_bytes = stats->tx_bytes;
613 
614 	stats_base->drops = 0;
615 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
616 		stats_base->drops += xstats->tail_drop[i];
617 		stats_base->drops += xstats->wred_drop[i];
618 	}
619 
620 	mlxsw_sp_qdisc->stats_base.backlog = 0;
621 }
622 
623 static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
624 	.type = MLXSW_SP_QDISC_PRIO,
625 	.check_params = mlxsw_sp_qdisc_prio_check_params,
626 	.replace = mlxsw_sp_qdisc_prio_replace,
627 	.unoffload = mlxsw_sp_qdisc_prio_unoffload,
628 	.destroy = mlxsw_sp_qdisc_prio_destroy,
629 	.get_stats = mlxsw_sp_qdisc_get_prio_stats,
630 	.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
631 };
632 
633 /* Grafting is not supported in mlxsw. It will result in un-offloading of the
634  * grafted qdisc as well as the qdisc in the qdisc new location.
635  * (However, if the graft is to the location where the qdisc is already at, it
636  * will be ignored completely and won't cause un-offloading).
637  */
638 static int
639 mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
640 			  struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
641 			  struct tc_prio_qopt_offload_graft_params *p)
642 {
643 	int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
644 	struct mlxsw_sp_qdisc *old_qdisc;
645 
646 	/* Check if the grafted qdisc is already in its "new" location. If so -
647 	 * nothing needs to be done.
648 	 */
649 	if (p->band < IEEE_8021QAZ_MAX_TCS &&
650 	    mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
651 		return 0;
652 
653 	/* See if the grafted qdisc is already offloaded on any tclass. If so,
654 	 * unoffload it.
655 	 */
656 	old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
657 						  p->child_handle);
658 	if (old_qdisc)
659 		mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
660 
661 	mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
662 			       &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
663 	return -EOPNOTSUPP;
664 }
665 
666 int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
667 			   struct tc_prio_qopt_offload *p)
668 {
669 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
670 
671 	mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
672 	if (!mlxsw_sp_qdisc)
673 		return -EOPNOTSUPP;
674 
675 	if (p->command == TC_PRIO_REPLACE)
676 		return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
677 					      mlxsw_sp_qdisc,
678 					      &mlxsw_sp_qdisc_ops_prio,
679 					      &p->replace_params);
680 
681 	if (!mlxsw_sp_qdisc_compare(mlxsw_sp_qdisc, p->handle,
682 				    MLXSW_SP_QDISC_PRIO))
683 		return -EOPNOTSUPP;
684 
685 	switch (p->command) {
686 	case TC_PRIO_DESTROY:
687 		return mlxsw_sp_qdisc_destroy(mlxsw_sp_port, mlxsw_sp_qdisc);
688 	case TC_PRIO_STATS:
689 		return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
690 						&p->stats);
691 	case TC_PRIO_GRAFT:
692 		return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
693 						 &p->graft_params);
694 	default:
695 		return -EOPNOTSUPP;
696 	}
697 }
698 
699 int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
700 {
701 	struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
702 	int i;
703 
704 	mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
705 	if (!mlxsw_sp_qdisc)
706 		goto err_root_qdisc_init;
707 
708 	mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
709 	mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
710 	mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
711 
712 	mlxsw_sp_qdisc = kcalloc(IEEE_8021QAZ_MAX_TCS,
713 				 sizeof(*mlxsw_sp_qdisc),
714 				 GFP_KERNEL);
715 	if (!mlxsw_sp_qdisc)
716 		goto err_tclass_qdiscs_init;
717 
718 	mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
719 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
720 		mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
721 
722 	return 0;
723 
724 err_tclass_qdiscs_init:
725 	kfree(mlxsw_sp_port->root_qdisc);
726 err_root_qdisc_init:
727 	return -ENOMEM;
728 }
729 
730 void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
731 {
732 	kfree(mlxsw_sp_port->tclass_qdiscs);
733 	kfree(mlxsw_sp_port->root_qdisc);
734 }
735